diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..3f4ac27e --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "benchmarks"] + path = benchmarks + url = ../itsi-server-benchmarks diff --git a/CHANGELOG.md b/CHANGELOG.md index 37917b88..7326448c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +## [0.2.17] - 2025-05-31 +- Enabled vectorized writes in IoSteam +- Replaced all usage of heap-allocated BoxBody with HttpBody enums +- Add 5 threads as default for rack/handler +- Reserve header size ahead of time in rack interface +- Avoid intermediate array allocation when populating Rack env headers. +- Rewrite synchronous thread worker to avoid excessive GVL acquisition +- Revert to default write_ev behaviour for http1 +- Switch to service_fn from service struct to avoid one additional pinned future +- Worker pinning accepts ruby workers too +- Fixed ordering incomaptibility in etag forwarding from static file server +- Added embedded benchmark suite + ## [0.2.16] - 2025-05-02 - Optimized static error responses - Optimized rate limit middleware diff --git a/Cargo.lock b/Cargo.lock index 76796d73..a086a600 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1644,7 +1644,7 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "itsi-scheduler" -version = "0.2.16" +version = "0.2.17" dependencies = [ "bytes", "derive_more", @@ -1662,7 +1662,7 @@ dependencies = [ [[package]] name = "itsi-server" -version = "0.2.16" +version = "0.2.17" dependencies = [ "argon2", "async-channel", @@ -1678,6 +1678,7 @@ dependencies = [ "either", "fs2", "futures", + "futures-util", "globset", "http 1.3.1", "http-body-util", @@ -1713,6 +1714,7 @@ dependencies = [ "serde_magnus", "sha-crypt", "sha2", + "smallvec", "socket2", "sysinfo", "tempfile", diff --git a/Gemfile.lock b/Gemfile.lock index fa660d67..3b727690 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,20 +1,20 @@ PATH remote: . specs: - itsi (0.2.16) - itsi-scheduler (~> 0.2.16) - itsi-server (~> 0.2.16) + itsi (0.2.17) + itsi-scheduler (~> 0.2.17) + itsi-server (~> 0.2.17) PATH remote: gems/scheduler specs: - itsi-scheduler (0.2.16) + itsi-scheduler (0.2.17) rb_sys (~> 0.9.91) PATH remote: gems/server specs: - itsi-server (0.2.16) + itsi-server (0.2.17) json (~> 2) prism (~> 1.4) rack (>= 1.6) diff --git a/benchmarks b/benchmarks new file mode 160000 index 00000000..c99d6305 --- /dev/null +++ b/benchmarks @@ -0,0 +1 @@ +Subproject commit c99d63051dd96f533b9aa9976e3c112dd73ca52e diff --git a/crates/itsi_acme/Cargo.toml b/crates/itsi_acme/Cargo.toml index 7cc9c8a8..b79a3d76 100644 --- a/crates/itsi_acme/Cargo.toml +++ b/crates/itsi_acme/Cargo.toml @@ -2,7 +2,7 @@ name = "itsi_acme" version = "0.1.0" authors = [ - "wouterkem ", + "wouterken ", "dignifiedquire ", "Florian Uekermann ", ] diff --git a/crates/itsi_scheduler/Cargo.toml b/crates/itsi_scheduler/Cargo.toml index 356af752..f6f7b438 100644 --- a/crates/itsi_scheduler/Cargo.toml +++ b/crates/itsi_scheduler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "itsi-scheduler" -version = "0.2.16" +version = "0.2.17" edition = "2021" authors = ["Wouter Coppieters "] license = "MIT" diff --git a/crates/itsi_server/Cargo.toml b/crates/itsi_server/Cargo.toml index ed9f793b..ed6fbb8b 100644 --- a/crates/itsi_server/Cargo.toml +++ b/crates/itsi_server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "itsi-server" -version = "0.2.16" +version = "0.2.17" edition = "2021" authors = ["Wouter Coppieters "] license = "MIT" @@ -90,3 +90,5 @@ argon2 = "0.5.3" core_affinity = "0.8.3" memchr = "2.7.4" quick_cache = "0.6.13" +smallvec = "1.15.0" +futures-util = "0.3.31" diff --git a/crates/itsi_server/src/lib.rs b/crates/itsi_server/src/lib.rs index 7cc9940c..d9567b7b 100644 --- a/crates/itsi_server/src/lib.rs +++ b/crates/itsi_server/src/lib.rs @@ -58,6 +58,7 @@ fn init(ruby: &Ruby) -> Result<()> { request.define_method("rack_protocol", method!(ItsiHttpRequest::rack_protocol, 0))?; request.define_method("host", method!(ItsiHttpRequest::host, 0))?; request.define_method("headers", method!(ItsiHttpRequest::headers, 0))?; + request.define_method("each_header", method!(ItsiHttpRequest::each_header, 0))?; request.define_method("uri", method!(ItsiHttpRequest::uri, 0))?; request.define_method("header", method!(ItsiHttpRequest::header, 1))?; request.define_method("[]", method!(ItsiHttpRequest::header, 1))?; @@ -71,6 +72,7 @@ fn init(ruby: &Ruby) -> Result<()> { request.define_method("url_encoded?", method!(ItsiHttpRequest::is_url_encoded, 0))?; request.define_method("multipart?", method!(ItsiHttpRequest::is_multipart, 0))?; request.define_method("url_params", method!(ItsiHttpRequest::url_params, 0))?; + request.define_method("server_error", method!(ItsiHttpRequest::error, 1))?; let body_proxy = ruby.get_inner(&ITSI_BODY_PROXY); body_proxy.define_method("gets", method!(ItsiBodyProxy::gets, 0))?; @@ -80,6 +82,10 @@ fn init(ruby: &Ruby) -> Result<()> { let response = ruby.get_inner(&ITSI_RESPONSE); response.define_method("[]=", method!(ItsiHttpResponse::add_header, 2))?; + response.define_method( + "reserve_headers", + method!(ItsiHttpResponse::reserve_headers, 1), + )?; response.define_method("add_header", method!(ItsiHttpResponse::add_header, 2))?; response.define_method("add_headers", method!(ItsiHttpResponse::add_headers, 1))?; response.define_method("status=", method!(ItsiHttpResponse::set_status, 1))?; @@ -87,7 +93,6 @@ fn init(ruby: &Ruby) -> Result<()> { response.define_method("<<", method!(ItsiHttpResponse::send_frame, 1))?; response.define_method("write", method!(ItsiHttpResponse::send_frame, 1))?; response.define_method("read", method!(ItsiHttpResponse::recv_frame, 0))?; - response.define_method("flush", method!(ItsiHttpResponse::flush, 0))?; response.define_method("closed?", method!(ItsiHttpResponse::is_closed, 0))?; response.define_method( "send_and_close", diff --git a/crates/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs b/crates/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs index 0877496f..5ce01d6c 100644 --- a/crates/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +++ b/crates/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs @@ -25,6 +25,7 @@ pub struct ItsiBodyProxy { pub enum ItsiBody { Buffered(BigBytes), Stream(ItsiBodyProxy), + Empty, } impl ItsiBody { @@ -32,6 +33,7 @@ impl ItsiBody { match self { ItsiBody::Buffered(bytes) => bytes.as_value(), ItsiBody::Stream(proxy) => Some(proxy.clone().into_value()), + ItsiBody::Empty => None, } } } diff --git a/crates/itsi_server/src/ruby_types/itsi_grpc_call.rs b/crates/itsi_server/src/ruby_types/itsi_grpc_call.rs index 23370a29..508bd981 100644 --- a/crates/itsi_server/src/ruby_types/itsi_grpc_call.rs +++ b/crates/itsi_server/src/ruby_types/itsi_grpc_call.rs @@ -1,6 +1,6 @@ use super::itsi_grpc_response_stream::ItsiGrpcResponseStream; use crate::prelude::*; -use crate::server::http_message_types::{HttpRequest, HttpResponse}; +use crate::server::http_message_types::{HttpBody, HttpRequest, HttpResponse}; use crate::server::{byte_frame::ByteFrame, request_job::RequestJob}; use crate::services::itsi_http_service::HttpRequestContext; use async_compression::futures::bufread::{GzipDecoder, GzipEncoder, ZlibDecoder, ZlibEncoder}; @@ -8,7 +8,7 @@ use bytes::Bytes; use derive_more::Debug; use futures::{executor::block_on, io::Cursor, AsyncReadExt}; use http::{request::Parts, Response, StatusCode}; -use http_body_util::{combinators::BoxBody, BodyExt, Empty}; +use http_body_util::BodyExt; use itsi_error::CLIENT_CONNECTION_CLOSED; use itsi_rb_helpers::{print_rb_backtrace, HeapValue}; use itsi_tracing::debug; @@ -139,7 +139,7 @@ impl ItsiGrpcCall { { Err(err) => { error!("Error occurred: {}", err); - let mut response = Response::new(BoxBody::new(Empty::new())); + let mut response = Response::new(HttpBody::empty()); *response.status_mut() = StatusCode::BAD_REQUEST; Ok(response) } @@ -147,7 +147,7 @@ impl ItsiGrpcCall { Some(first_frame) => Ok(response_stream .build_response(first_frame, receiver, shutdown_channel) .await), - None => Ok(Response::new(BoxBody::new(Empty::new()))), + None => Ok(Response::new(HttpBody::empty())), }, } } diff --git a/crates/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs b/crates/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs index 9bdec5ed..f34a5c98 100644 --- a/crates/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +++ b/crates/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs @@ -1,6 +1,6 @@ use super::itsi_grpc_call::CompressionAlgorithm; use crate::prelude::*; -use crate::server::http_message_types::HttpResponse; +use crate::server::http_message_types::{HttpBody, HttpResponse}; use crate::server::size_limited_incoming::SizeLimitedIncoming; use crate::server::{byte_frame::ByteFrame, serve_strategy::single_mode::RunningPhase}; use bytes::Bytes; @@ -11,8 +11,8 @@ use http::{ header::{HeaderName, HeaderValue}, HeaderMap, Response, }; -use http_body_util::{combinators::BoxBody, BodyDataStream, BodyExt, Empty, Full, StreamBody}; -use hyper::body::{Frame, Incoming}; +use http_body_util::BodyDataStream; +use hyper::body::Incoming; use magnus::error::Result as MagnusResult; use nix::unistd::pipe; use parking_lot::Mutex; @@ -161,7 +161,7 @@ impl ItsiGrpcResponseStream { response_headers, incoming_reader: Some(pipe_read), response_sender, - response: Some(Response::new(BoxBody::new(Empty::new()))), + response: Some(Response::new(HttpBody::empty())), trailer_tx, trailer_rx: Some(trailer_rx), })), @@ -207,12 +207,12 @@ impl ItsiGrpcResponseStream { let rx = self.inner.lock().trailer_rx.take().unwrap(); *response.version_mut() = Version::HTTP_2; *response.headers_mut() = self.inner.lock().response_headers.clone(); - *response.body_mut() = if matches!(first_frame, ByteFrame::Empty) { - BoxBody::new(Empty::new()) + let body_with_trailers = if matches!(first_frame, ByteFrame::Empty) { + HttpBody::empty() } else if matches!(first_frame, ByteFrame::End(_)) { - BoxBody::new(Full::new(first_frame.into())) + HttpBody::full(first_frame.into()) } else { - let initial_frame = tokio_stream::once(Ok(Frame::data(Bytes::from(first_frame)))); + let initial_frame = tokio_stream::once(Ok(Bytes::from(first_frame))); let frame_stream = unfold( (ReceiverStream::new(receiver), shutdown_rx), |(mut receiver, mut shutdown_rx)| async move { @@ -224,7 +224,7 @@ impl ItsiGrpcResponseStream { maybe_bytes = receiver.next() => { match maybe_bytes { Some(ByteFrame::Data(bytes)) | Some(ByteFrame::End(bytes)) => { - return Some((Ok(Frame::data(bytes)), (receiver, shutdown_rx))); + return Some((Ok(bytes), (receiver, shutdown_rx))); } _ => { return None; @@ -234,7 +234,7 @@ impl ItsiGrpcResponseStream { _ = shutdown_rx.changed() => { match *shutdown_rx.borrow() { RunningPhase::ShutdownPending => { - warn!("Disconnecting streaming client."); + debug!("Disconnecting streaming client."); return None; }, _ => continue, @@ -246,15 +246,16 @@ impl ItsiGrpcResponseStream { ); let combined_stream = initial_frame.chain(frame_stream); - BoxBody::new(StreamBody::new(combined_stream)) + HttpBody::stream(combined_stream) } .with_trailers(async move { match rx.await { Ok(trailers) => Some(Ok(trailers)), Err(_err) => None, } - }) - .boxed(); + }); + + *response.body_mut() = body_with_trailers; response } diff --git a/crates/itsi_server/src/ruby_types/itsi_http_request.rs b/crates/itsi_server/src/ruby_types/itsi_http_request.rs index c52fd591..801102ce 100644 --- a/crates/itsi_server/src/ruby_types/itsi_http_request.rs +++ b/crates/itsi_server/src/ruby_types/itsi_http_request.rs @@ -1,31 +1,30 @@ use derive_more::Debug; use futures::StreamExt; -use http::{header::CONTENT_LENGTH, request::Parts, Response, StatusCode, Version}; -use http_body_util::{combinators::BoxBody, BodyExt, Empty}; +use http::{header::CONTENT_LENGTH, request::Parts, HeaderValue, Response, StatusCode, Version}; +use http_body_util::BodyExt; use itsi_error::CLIENT_CONNECTION_CLOSED; -use itsi_rb_helpers::{print_rb_backtrace, HeapValue}; -use itsi_tracing::{debug, error}; +use itsi_rb_helpers::{funcall_no_ret, print_rb_backtrace, HeapValue}; +use itsi_tracing::debug; use magnus::{ - block::Proc, + block::{yield_values, Proc}, error::{ErrorType, Result as MagnusResult}, - Error, RHash, Symbol, + Error, IntoValue, RHash, Symbol, }; use magnus::{ value::{LazyId, ReprValue}, Ruby, Value, }; use std::{fmt, io::Write, sync::Arc, time::Instant}; -use tokio::sync::mpsc::{self}; +use tracing::error; use super::{ itsi_body_proxy::{big_bytes::BigBytes, ItsiBody, ItsiBodyProxy}, - itsi_http_response::ItsiHttpResponse, + itsi_http_response::{ItsiHttpResponse, ResponseFrame}, }; use crate::{ default_responses::{INTERNAL_SERVER_ERROR_RESPONSE, SERVICE_UNAVAILABLE_RESPONSE}, server::{ - byte_frame::ByteFrame, - http_message_types::{HttpRequest, HttpResponse}, + http_message_types::{HttpBody, HttpRequest, HttpResponse}, request_job::RequestJob, size_limited_incoming::MaxBodySizeReached, }, @@ -33,11 +32,13 @@ use crate::{ }; static ID_MESSAGE: LazyId = LazyId::new("message"); +static ID_CALL: LazyId = LazyId::new("call"); +static ZERO_HEADER_VALUE: HeaderValue = HeaderValue::from_static("0"); #[derive(Debug)] #[magnus::wrap(class = "Itsi::HttpRequest", free_immediately, size)] pub struct ItsiHttpRequest { - pub parts: Parts, + pub parts: Arc, #[debug(skip)] pub body: ItsiBody, pub version: Version, @@ -148,8 +149,10 @@ impl ItsiHttpRequest { pub fn process(self, ruby: &Ruby, app_proc: Arc>) -> magnus::error::Result<()> { let response = self.response.clone(); - let result = app_proc.call::<_, Value>((self,)); - if let Err(err) = result { + + if let Err(err) = + funcall_no_ret(app_proc.as_value(), *ID_CALL, [self.into_value_with(ruby)]) + { Self::internal_error(ruby, response, err); } Ok(()) @@ -158,7 +161,7 @@ impl ItsiHttpRequest { pub fn internal_error(ruby: &Ruby, response: ItsiHttpResponse, err: Error) { if Self::is_connection_closed_err(ruby, &err) { debug!("Connection closed by client"); - response.close(); + response.close().ok(); } else if let Some(rb_err) = err.value() { print_rb_backtrace(rb_err); response.internal_server_error(err.to_string()); @@ -167,7 +170,7 @@ impl ItsiHttpRequest { } } - pub fn error(self, message: String) { + pub fn error(&self, message: String) { self.response.internal_server_error(message); } @@ -179,9 +182,7 @@ impl ItsiHttpRequest { nonblocking: bool, ) -> itsi_error::Result { match ItsiHttpRequest::new(hyper_request, context, script_name).await { - Ok((request, mut receiver)) => { - let shutdown_channel = context.service.shutdown_receiver.clone(); - let response = request.response.clone(); + Ok((request, receiver)) => { let sender = if nonblocking { &context.nonblocking_sender } else { @@ -192,20 +193,30 @@ impl ItsiHttpRequest { async_channel::TrySendError::Full(_) => Ok(SERVICE_UNAVAILABLE_RESPONSE .to_http_response(context.accept) .await), - async_channel::TrySendError::Closed(err) => { - error!("Error occurred: {:?}", err); + async_channel::TrySendError::Closed(_) => { + error!("Channel closed while sending request job"); Ok(INTERNAL_SERVER_ERROR_RESPONSE .to_http_response(context.accept) .await) } }, - _ => match receiver.recv().await { - Some(first_frame) => Ok(response - .build(first_frame, receiver, shutdown_channel) - .await), - None => Ok(response - .build(ByteFrame::Empty, receiver, shutdown_channel) - .await), + Ok(_) => match receiver.await { + Ok(ResponseFrame::HttpResponse(response)) => Ok(response), + Ok(ResponseFrame::HijackedResponse(response)) => { + match response.process_hijacked_response().await { + Ok(result) => Ok(result), + Err(e) => { + error!("Error processing hijacked response: {}", e); + Ok(Response::new(HttpBody::empty())) + } + } + } + Err(_) => { + error!("Failed to receive response from receiver"); + Ok(INTERNAL_SERVER_ERROR_RESPONSE + .to_http_response(context.accept) + .await) + } }, } } @@ -217,9 +228,18 @@ impl ItsiHttpRequest { request: HttpRequest, context: &HttpRequestContext, script_name: String, - ) -> Result<(ItsiHttpRequest, mpsc::Receiver), HttpResponse> { + ) -> Result< + ( + ItsiHttpRequest, + tokio::sync::oneshot::Receiver, + ), + HttpResponse, + > { let (parts, body) = request.into_parts(); - let body = if context.server_params.streamable_body { + let parts = Arc::new(parts); + let body = if parts.headers.get(CONTENT_LENGTH) == Some(&ZERO_HEADER_VALUE) { + ItsiBody::Empty + } else if context.server_params.streamable_body { ItsiBody::Stream(ItsiBodyProxy::new(body)) } else { let mut body_bytes = BigBytes::new(); @@ -228,7 +248,7 @@ impl ItsiHttpRequest { match chunk { Ok(byte_array) => body_bytes.write_all(&byte_array).unwrap(), Err(e) => { - let mut err_resp = Response::new(BoxBody::new(Empty::new())); + let mut err_resp = Response::new(HttpBody::empty()); if e.downcast_ref::().is_some() { *err_resp.status_mut() = StatusCode::PAYLOAD_TOO_LARGE; } @@ -238,18 +258,22 @@ impl ItsiHttpRequest { } ItsiBody::Buffered(body_bytes) }; - let response_channel = mpsc::channel::(100); + let (sender, receiver) = tokio::sync::oneshot::channel::(); Ok(( Self { context: context.clone(), version: parts.version, - response: ItsiHttpResponse::new(parts.clone(), response_channel.0), + response: ItsiHttpResponse::new( + parts.clone(), + sender, + context.service.shutdown_receiver.clone(), + ), start: Instant::now(), script_name, body, parts, }, - response_channel.1, + receiver, )) } @@ -328,6 +352,13 @@ impl ItsiHttpRequest { .collect::>()) } + pub(crate) fn each_header(&self) -> MagnusResult<()> { + self.parts.headers.iter().for_each(|(hn, hv)| { + yield_values::<_, Value>((hn.as_str(), hv.to_str().unwrap_or(""))).ok(); + }); + Ok(()) + } + pub(crate) fn uri(&self) -> MagnusResult { Ok(self.parts.uri.to_string()) } diff --git a/crates/itsi_server/src/ruby_types/itsi_http_response.rs b/crates/itsi_server/src/ruby_types/itsi_http_response.rs index e8c17cba..5bfa32f5 100644 --- a/crates/itsi_server/src/ruby_types/itsi_http_response.rs +++ b/crates/itsi_server/src/ruby_types/itsi_http_response.rs @@ -1,4 +1,9 @@ -use bytes::{Buf, Bytes, BytesMut}; +use crate::server::{ + frame_stream::{BufferedStream, FrameStream}, + http_message_types::{HttpBody, HttpResponse}, + serve_strategy::single_mode::RunningPhase, +}; +use bytes::{Buf, Bytes}; use derive_more::Debug; use futures::stream::{unfold, StreamExt}; use http::{ @@ -6,10 +11,11 @@ use http::{ request::Parts, HeaderMap, HeaderName, HeaderValue, Request, Response, StatusCode, }; -use http_body_util::{combinators::BoxBody, Empty, Full, StreamBody}; -use hyper::{body::Frame, upgrade::Upgraded}; +use http_body_util::Empty; +use hyper::upgrade::Upgraded; use hyper_util::rt::TokioIo; use itsi_error::Result; +use itsi_rb_helpers::call_without_gvl; use itsi_tracing::error; use magnus::error::Result as MagnusResult; use memchr::{memchr, memchr_iter}; @@ -17,105 +23,66 @@ use parking_lot::RwLock; use std::{ collections::HashMap, io, + ops::Deref, os::{fd::FromRawFd, unix::net::UnixStream}, str::FromStr, sync::Arc, + time::Duration, }; use tokio::{ io::AsyncReadExt, net::UnixStream as TokioUnixStream, - sync::{ - mpsc::{self}, - watch, - }, + sync::{mpsc::Sender, oneshot::Sender as OneshotSender, watch}, }; -use tokio_stream::wrappers::ReceiverStream; use tokio_util::io::ReaderStream; -use tracing::warn; - -use crate::server::{ - byte_frame::ByteFrame, http_message_types::HttpResponse, - serve_strategy::single_mode::RunningPhase, -}; +use tracing::{info, warn}; #[magnus::wrap(class = "Itsi::HttpResponse", free_immediately, size)] #[derive(Debug, Clone)] pub struct ItsiHttpResponse { - pub data: Arc, + pub inner: Arc, +} + +impl Deref for ItsiHttpResponse { + type Target = Arc; + + fn deref(&self) -> &Self::Target { + &self.inner + } } #[derive(Debug)] -pub struct ResponseData { +pub struct ResponseInner { + pub frame_writer: RwLock>>, pub response: RwLock>, - pub response_writer: RwLock>>, - pub response_buffer: RwLock, pub hijacked_socket: RwLock>, - pub parts: Parts, + pub response_sender: RwLock>>, + pub shutdown_rx: watch::Receiver, + pub parts: Arc, +} + +#[derive(Debug)] +pub enum ResponseFrame { + HttpResponse(HttpResponse), + HijackedResponse(ItsiHttpResponse), } impl ItsiHttpResponse { - pub async fn build( - &self, - first_frame: ByteFrame, - receiver: mpsc::Receiver, + pub fn new( + parts: Arc, + response_sender: OneshotSender, shutdown_rx: watch::Receiver, - ) -> HttpResponse { - if self.is_hijacked() { - return match self.process_hijacked_response().await { - Ok(result) => result, - Err(e) => { - error!("Error processing hijacked response: {}", e); - Response::new(BoxBody::new(Empty::new())) - } - }; + ) -> Self { + Self { + inner: Arc::new(ResponseInner { + parts, + shutdown_rx, + response_sender: RwLock::new(Some(response_sender)), + frame_writer: RwLock::new(None), + response: RwLock::new(Some(Response::new(HttpBody::empty()))), + hijacked_socket: RwLock::new(None), + }), } - let mut response = self.data.response.write().take().unwrap(); - *response.body_mut() = if matches!(first_frame, ByteFrame::Empty) { - BoxBody::new(Empty::new()) - } else if matches!(first_frame, ByteFrame::End(_)) { - BoxBody::new(Full::new(first_frame.into())) - } else { - let initial_frame = tokio_stream::once(Ok(Frame::data(Bytes::from(first_frame)))); - let frame_stream = unfold( - (ReceiverStream::new(receiver), shutdown_rx), - |(mut receiver, mut shutdown_rx)| async move { - if let RunningPhase::ShutdownPending = *shutdown_rx.borrow() { - return None; - } - loop { - tokio::select! { - maybe_bytes = receiver.next() => { - match maybe_bytes { - Some(ByteFrame::Data(bytes)) | Some(ByteFrame::End(bytes)) => { - return Some((Ok(Frame::data(bytes)), (receiver, shutdown_rx))); - } - _ => { - return None; - } - } - }, - _ = shutdown_rx.changed() => { - match *shutdown_rx.borrow() { - RunningPhase::ShutdownPending => { - warn!("Disconnecting streaming client."); - return None; - }, - _ => continue, - } - } - } - } - }, - ); - - let combined_stream = initial_frame.chain(frame_stream); - BoxBody::new(StreamBody::new(combined_stream)) - }; - response - } - - pub fn close(&self) { - self.data.response_writer.write().take(); } async fn two_way_bridge(upgraded: Upgraded, local: TokioUnixStream) -> io::Result<()> { @@ -163,8 +130,7 @@ impl ItsiHttpResponse { &self, ) -> Result<(HeaderMap, StatusCode, bool, TokioUnixStream)> { let hijacked_socket = - self.data - .hijacked_socket + self.hijacked_socket .write() .take() .ok_or(itsi_error::ItsiError::InvalidInput( @@ -191,9 +157,9 @@ impl ItsiHttpResponse { pub async fn process_hijacked_response(&self) -> Result { let (headers, status, requires_upgrade, reader) = self.read_hijacked_headers().await?; let mut response = if requires_upgrade { - let parts = self.data.parts.clone(); + let parts = self.parts.clone(); tokio::spawn(async move { - let mut req = Request::from_parts(parts, Empty::::new()); + let mut req = Request::from_parts((*parts).clone(), Empty::::new()); match hyper::upgrade::on(&mut req).await { Ok(upgraded) => { Self::two_way_bridge(upgraded, reader) @@ -203,14 +169,14 @@ impl ItsiHttpResponse { Err(e) => eprintln!("upgrade error: {:?}", e), } }); - Response::new(BoxBody::new(Empty::new())) + Response::new(HttpBody::empty()) } else { let stream = ReaderStream::new(reader); let boxed_body = if headers .get(TRANSFER_ENCODING) .is_some_and(|h| h == "chunked") { - BoxBody::new(StreamBody::new(unfold( + HttpBody::stream(unfold( (stream, Vec::new()), |(mut stream, mut buf)| async move { loop { @@ -231,7 +197,7 @@ impl ItsiHttpResponse { if buf.starts_with(b"\r\n") { buf.drain(..2); } - return Some((Ok(Frame::data(Bytes::from(data))), (stream, buf))); + return Some((Ok(Bytes::from(data)), (stream, buf))); } match stream.next().await { Some(Ok(chunk)) => buf.extend_from_slice(&chunk), @@ -239,15 +205,11 @@ impl ItsiHttpResponse { } } }, - ))) + )) } else { - BoxBody::new(StreamBody::new(stream.map( - |result: std::result::Result| { - result - .map(Frame::data) - .map_err(|e| unreachable!("unexpected io error: {:?}", e)) - }, - ))) + HttpBody::stream(stream.map(|result: std::result::Result| { + result.map_err(|e| unreachable!("unexpected io error: {:?}", e)) + })) }; Response::new(boxed_body) }; @@ -259,59 +221,97 @@ impl ItsiHttpResponse { pub fn internal_server_error(&self, message: String) { error!(message); - self.data.response_writer.write().take(); - if let Some(ref mut response) = *self.data.response.write() { + self.close_write().ok(); + if let Some(mut response) = self.response.write().take() { *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; + if let Some(sender) = self.response_sender.write().take() { + sender.send(ResponseFrame::HttpResponse(response)).ok(); + } } } - pub fn send_frame(&self, frame: Bytes) -> MagnusResult<()> { - self.send_frame_into(ByteFrame::Data(frame), &self.data.response_writer) + pub fn service_unavailable(&self) { + self.close_write().ok(); + if let Some(mut response) = self.response.write().take() { + *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; + if let Some(sender) = self.response_sender.write().take() { + sender.send(ResponseFrame::HttpResponse(response)).ok(); + } + } } - pub fn recv_frame(&self) { - // not implemented + pub fn send_frame(&self, frame: Bytes) -> MagnusResult<()> { + { + if self.frame_writer.read().is_none() && self.response.read().is_some() { + if let Some(mut response) = self.response.write().take() { + let (writer, reader) = tokio::sync::mpsc::channel::(256); + let shutdown_rx = self.shutdown_rx.clone(); + let frame_stream = FrameStream::new(reader, shutdown_rx.clone()); + + let buffered = + BufferedStream::new(frame_stream, 32 * 1024, Duration::from_millis(10)); + *response.body_mut() = HttpBody::stream(buffered); + self.frame_writer.write().replace(writer); + if let Some(sender) = self.response_sender.write().take() { + sender.send(ResponseFrame::HttpResponse(response)).ok(); + } + } else { + info!("No response!"); + } + } + } + if let Some(frame_writer) = self.frame_writer.read().as_ref() { + call_without_gvl(|| frame_writer.blocking_send(frame)) + .map_err(|_| itsi_error::ItsiError::ClientConnectionClosed)?; + } + Ok(()) } - pub fn flush(&self) { - // no-op + pub fn send_and_close(&self, frame: Bytes) -> MagnusResult<()> { + if self.frame_writer.read().is_some() { + self.send_frame(frame)?; + self.close()?; + return Ok(()); + } + if let Some(mut response) = self.response.write().take() { + if frame.is_empty() { + *response.body_mut() = HttpBody::empty(); + } else { + *response.body_mut() = HttpBody::full(frame); + } + if let Some(sender) = self.response_sender.write().take() { + sender.send(ResponseFrame::HttpResponse(response)).ok(); + } + } + + Ok(()) } - pub fn is_closed(&self) -> bool { - self.data.response_writer.write().is_none() + pub fn close_write(&self) -> MagnusResult { + self.frame_writer.write().take(); + Ok(true) } - pub fn send_and_close(&self, frame: Bytes) -> MagnusResult<()> { - let result = self.send_frame_into(ByteFrame::End(frame), &self.data.response_writer); - self.data.response_writer.write().take(); - result + pub fn recv_frame(&self) { + // not implemented } - pub fn send_frame_into( - &self, - frame: ByteFrame, - writer: &RwLock>>, - ) -> MagnusResult<()> { - if let Some(writer) = writer.write().as_ref() { - return Ok(writer - .blocking_send(frame) - .map_err(|_| itsi_error::ItsiError::ClientConnectionClosed)?); - } - Ok(()) + pub fn is_closed(&self) -> bool { + self.response.read().is_none() && self.frame_writer.read().is_none() } pub fn is_hijacked(&self) -> bool { - self.data.hijacked_socket.read().is_some() + self.hijacked_socket.read().is_some() } - pub fn close_write(&self) -> MagnusResult { - self.data.response_writer.write().take(); - Ok(true) + pub fn close(&self) -> MagnusResult<()> { + self.close_write()?; + self.close_read()?; + Ok(()) } pub fn accept_str(&self) -> &str { - self.data - .parts + self.parts .headers .get(ACCEPT) .and_then(|hv| hv.to_str().ok()) // handle invalid utf-8 @@ -330,25 +330,9 @@ impl ItsiHttpResponse { Ok(true) } - pub fn new(parts: Parts, response_writer: mpsc::Sender) -> Self { - Self { - data: Arc::new(ResponseData { - response: RwLock::new(Some(Response::new(BoxBody::new(Empty::new())))), - response_writer: RwLock::new(Some(response_writer)), - response_buffer: RwLock::new(BytesMut::new()), - hijacked_socket: RwLock::new(None), - parts, - }), - } - } - - pub fn add_header(&self, name: Bytes, value: Bytes) -> MagnusResult<()> { - let header_name: HeaderName = HeaderName::from_bytes(&name).map_err(|e| { - itsi_error::ItsiError::InvalidInput(format!("Invalid header name {:?}: {:?}", name, e)) - })?; - if let Some(ref mut resp) = *self.data.response.write() { - let headers_mut = resp.headers_mut(); - self.insert_header(headers_mut, &header_name, value); + pub fn reserve_headers(&self, header_count: usize) -> MagnusResult<()> { + if let Some(ref mut resp) = *self.response.write() { + resp.headers_mut().try_reserve(header_count).ok(); } Ok(()) } @@ -394,8 +378,22 @@ impl ItsiHttpResponse { } } + pub fn add_header(&self, header_name: Bytes, value: Bytes) -> MagnusResult<()> { + if let Some(ref mut resp) = *self.response.write() { + let headers_mut = resp.headers_mut(); + let header_name = HeaderName::from_bytes(&header_name).map_err(|e| { + itsi_error::ItsiError::InvalidInput(format!( + "Invalid header name {:?}: {:?}", + header_name, e + )) + })?; + self.insert_header(headers_mut, &header_name, value); + } + Ok(()) + } + pub fn add_headers(&self, headers: HashMap>) -> MagnusResult<()> { - if let Some(ref mut resp) = *self.data.response.write() { + if let Some(ref mut resp) = *self.response.write() { let headers_mut = resp.headers_mut(); for (name, values) in headers { let header_name = HeaderName::from_bytes(&name).map_err(|e| { @@ -414,7 +412,7 @@ impl ItsiHttpResponse { } pub fn set_status(&self, status: u16) -> MagnusResult<()> { - if let Some(ref mut resp) = *self.data.response.write() { + if let Some(ref mut resp) = *self.response.write() { *resp.status_mut() = StatusCode::from_u16(status).map_err(|e| { itsi_error::ItsiError::InvalidInput(format!( "Invalid status code {:?}: {:?}", @@ -428,13 +426,14 @@ impl ItsiHttpResponse { pub fn hijack(&self, fd: i32) -> MagnusResult<()> { let stream = unsafe { UnixStream::from_raw_fd(fd) }; - *self.data.hijacked_socket.write() = Some(stream); - if let Some(writer) = self.data.response_writer.write().as_ref() { - writer - .blocking_send(ByteFrame::Empty) - .map_err(|_| itsi_error::ItsiError::ClientConnectionClosed)? + *self.hijacked_socket.write() = Some(stream); + if let Some(sender) = self.response_sender.write().take() { + sender + .send(ResponseFrame::HijackedResponse(self.clone())) + .ok(); } - self.close(); + + self.close()?; Ok(()) } } diff --git a/crates/itsi_server/src/ruby_types/itsi_server.rs b/crates/itsi_server/src/ruby_types/itsi_server.rs index 54e2c2aa..88b6c0ee 100644 --- a/crates/itsi_server/src/ruby_types/itsi_server.rs +++ b/crates/itsi_server/src/ruby_types/itsi_server.rs @@ -64,7 +64,7 @@ impl ItsiServer { Ok(if server_config.server_params.read().workers > 1 { ServeStrategy::Cluster(Arc::new(ClusterMode::new(server_config.clone()))) } else { - ServeStrategy::Single(Arc::new(SingleMode::new(server_config.clone())?)) + ServeStrategy::Single(Arc::new(SingleMode::new(server_config.clone(), 0)?)) }) } diff --git a/crates/itsi_server/src/ruby_types/itsi_server/file_watcher.rs b/crates/itsi_server/src/ruby_types/itsi_server/file_watcher.rs index 8ceeb5f9..08219cc0 100644 --- a/crates/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +++ b/crates/itsi_server/src/ruby_types/itsi_server/file_watcher.rs @@ -32,18 +32,8 @@ struct PatternGroup { /// component that contains a wildcard character. fn extract_and_canonicalize_base_dir(pattern: &str) -> PathBuf { if !(pattern.contains("*") || pattern.contains("?") || pattern.contains('[')) { - if let Ok(metadata) = fs::metadata(pattern) { - if metadata.is_dir() { - return fs::canonicalize(pattern).unwrap(); - } - if metadata.is_file() { - return fs::canonicalize(pattern) - .unwrap() - .parent() - .unwrap() - .to_path_buf(); - } - } + let base = PathBuf::from("."); + return fs::canonicalize(&base).unwrap_or(base); } let path = Path::new(pattern); @@ -63,12 +53,11 @@ fn extract_and_canonicalize_base_dir(pattern: &str) -> PathBuf { base }; - // Canonicalize to get the absolute path. fs::canonicalize(&base).unwrap_or(base) } /// Minimum time between triggering the same pattern group (debounce time) -const DEBOUNCE_DURATION: Duration = Duration::from_millis(500); +const DEBOUNCE_DURATION: Duration = Duration::from_millis(2000); pub fn watch_groups(pattern_groups: Vec<(String, Vec>)>) -> Result> { let (r_fd, w_fd): (OwnedFd, OwnedFd) = pipe().map_err(|e| { @@ -145,13 +134,14 @@ pub fn watch_groups(pattern_groups: Vec<(String, Vec>)>) -> Result>)>) -> Result, pub middleware_loader: HeapValue, pub middleware: OnceLock, + pub pipeline_flush: bool, + pub writev: Option, + pub max_concurrent_streams: Option, + pub max_local_error_reset_streams: Option, + pub max_header_list_size: u32, + pub max_send_buf_size: usize, pub binds: Vec, #[debug(skip)] pub(crate) listeners: Mutex>, @@ -178,19 +184,19 @@ impl ServerParams { } fn from_rb_hash(rb_param_hash: RHash) -> Result { + let num_cpus = num_cpus::get_physical() as u8; let workers = rb_param_hash .fetch::<_, Option>("workers")? - .unwrap_or(num_cpus::get() as u8); + .unwrap_or(num_cpus); let worker_memory_limit: Option = rb_param_hash.fetch("worker_memory_limit")?; let silence: bool = rb_param_hash.fetch("silence")?; let multithreaded_reactor: bool = rb_param_hash .fetch::<_, Option>("multithreaded_reactor")? - .unwrap_or(workers == 1); + .unwrap_or(workers <= (num_cpus / 3)); let pin_worker_cores: bool = rb_param_hash .fetch::<_, Option>("pin_worker_cores")? - .unwrap_or(true); + .unwrap_or(false); let shutdown_timeout: f64 = rb_param_hash.fetch("shutdown_timeout")?; - let hooks: Option = rb_param_hash.fetch("hooks")?; let hooks = hooks .map(|rhash| -> Result>> { @@ -281,6 +287,14 @@ impl ServerParams { set_target_filters(target_filters); } + let pipeline_flush: bool = rb_param_hash.fetch("pipeline_flush")?; + let writev: Option = rb_param_hash.fetch("writev")?; + let max_concurrent_streams: Option = rb_param_hash.fetch("max_concurrent_streams")?; + let max_local_error_reset_streams: Option = + rb_param_hash.fetch("max_local_error_reset_streams")?; + let max_header_list_size: u32 = rb_param_hash.fetch("max_header_list_size")?; + let max_send_buf_size: usize = rb_param_hash.fetch("max_send_buf_size")?; + let binds: Option> = rb_param_hash.fetch("binds")?; let binds = binds .unwrap_or_else(|| vec![DEFAULT_BIND.to_string()]) @@ -322,6 +336,12 @@ impl ServerParams { scheduler_class, ruby_thread_request_backlog_size, oob_gc_responses_threshold, + pipeline_flush, + writev, + max_concurrent_streams, + max_local_error_reset_streams, + max_header_list_size, + max_send_buf_size, binds, itsi_server_token_preference, socket_opts, @@ -437,7 +457,8 @@ impl ItsiServerConfig { let requires_exec = if !is_single_mode && !server_params.preload { // In cluster mode children are cycled during a reload // and if preload is disabled, will get a clean memory slate, - // so we don't need to exec. + // so we don't need to exec. We do need to rebind our listeners here. + server_params.setup_listeners()?; false } else { // In non-cluster mode, or when preloading is enabled, we shouldn't try to diff --git a/crates/itsi_server/src/server/binds/listener.rs b/crates/itsi_server/src/server/binds/listener.rs index 6129bbc3..7af10c89 100644 --- a/crates/itsi_server/src/server/binds/listener.rs +++ b/crates/itsi_server/src/server/binds/listener.rs @@ -9,7 +9,7 @@ use super::bind_protocol::BindProtocol; use super::tls::ItsiTlsAcceptor; use itsi_error::{ItsiError, Result}; use itsi_tracing::info; -use socket2::{Domain, Protocol, Socket, Type}; +use socket2::{Domain, Protocol, SockRef, Socket, Type}; use std::fmt::Display; use std::net::{IpAddr, SocketAddr, TcpListener}; use std::os::fd::{AsRawFd, FromRawFd, RawFd}; @@ -274,15 +274,53 @@ impl Display for Listener { } impl Listener { - pub fn into_tokio_listener(self) -> TokioListener { + pub fn rebind_listener(listener: TcpListener) -> TcpListener { + let sock = SockRef::from(&listener); + let (reuse_address, reuse_port) = ( + sock.reuse_address().unwrap_or(true), + sock.reuse_port().unwrap_or(true), + ); + + if !reuse_address || !reuse_port { + return listener; + } + + let (ip, port) = sock + .local_addr() + .unwrap() + .as_socket() + .map(|addr| (addr.ip(), addr.port())) + .unwrap(); + + let socket_opts = SocketOpts { + reuse_address: sock.reuse_address().unwrap_or(true), // default: true + reuse_port: sock.reuse_port().unwrap_or(false), // default: false + nodelay: sock.nodelay().unwrap_or(false), // default: false + recv_buffer_size: sock.recv_buffer_size().unwrap_or(0), + send_buffer_size: sock.send_buffer_size().unwrap_or(0), + listen_backlog: 1024, // cannot query – pick sane default + }; + + connect_tcp_socket(ip, port, &socket_opts).unwrap() + } + + pub fn into_tokio_listener(self, no_rebind: bool) -> TokioListener { match self { - Listener::Tcp(listener) => { + Listener::Tcp(mut listener) => { + if cfg!(target_os = "linux") && !no_rebind { + listener = Listener::rebind_listener(listener); + } TokioListener::Tcp(TokioTcpListener::from_std(listener).unwrap()) } - Listener::TcpTls((listener, acceptor)) => TokioListener::TcpTls( - TokioTcpListener::from_std(listener).unwrap(), - acceptor.clone(), - ), + Listener::TcpTls((mut listener, acceptor)) => { + if cfg!(target_os = "linux") && !no_rebind { + listener = Listener::rebind_listener(listener); + } + TokioListener::TcpTls( + TokioTcpListener::from_std(listener).unwrap(), + acceptor.clone(), + ) + } Listener::Unix(listener) => { TokioListener::Unix(TokioUnixListener::from_std(listener).unwrap()) } diff --git a/crates/itsi_server/src/server/frame_stream.rs b/crates/itsi_server/src/server/frame_stream.rs new file mode 100644 index 00000000..e5afa38e --- /dev/null +++ b/crates/itsi_server/src/server/frame_stream.rs @@ -0,0 +1,142 @@ +use bytes::{Bytes, BytesMut}; +use futures::Stream; +use std::convert::Infallible; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; +use tokio::sync::mpsc::Receiver; +use tokio::sync::watch; +use tokio::time::{sleep, Sleep}; + +use super::serve_strategy::single_mode::RunningPhase; + +#[derive(Debug)] +pub struct FrameStream { + receiver: Receiver, + shutdown_rx: watch::Receiver, + drained: bool, +} + +impl FrameStream { + pub fn new(receiver: Receiver, shutdown_rx: watch::Receiver) -> Self { + Self { + receiver, + shutdown_rx, + drained: false, + } + } +} + +impl Stream for FrameStream { + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + if this.drained { + return Poll::Ready(None); + } + + match Pin::new(&mut this.receiver).poll_recv(cx) { + Poll::Ready(Some(bytes)) => Poll::Ready(Some(Ok(bytes))), + Poll::Ready(None) => { + this.drained = true; + Poll::Ready(None) + } + Poll::Pending => { + if this.shutdown_rx.has_changed().unwrap_or(false) + && *this.shutdown_rx.borrow() == RunningPhase::ShutdownPending + { + while let Ok(bytes) = this.receiver.try_recv() { + return Poll::Ready(Some(Ok(bytes))); + } + this.drained = true; + return Poll::Ready(None); + } + + Poll::Pending + } + } + } +} + +/// BufferedStream wraps a stream of Bytes and coalesces chunks into a larger buffer, +/// flushing either after `max_flush_bytes` is reached or `max_flush_interval` elapses. +pub struct BufferedStream { + inner: S, + buffer: BytesMut, + max_flush_bytes: usize, + max_flush_interval: Duration, + flush_deadline: Option>>, +} + +impl BufferedStream { + pub fn new(inner: S, max_flush_bytes: usize, max_flush_interval: Duration) -> Self { + Self { + inner, + buffer: BytesMut::with_capacity(max_flush_bytes), + max_flush_bytes, + max_flush_interval, + flush_deadline: None, + } + } +} + +impl Stream for BufferedStream +where + S: Stream> + Unpin, +{ + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + loop { + // Flush on timer if needed + if let Some(deadline) = &mut this.flush_deadline { + if Pin::new(deadline).poll(cx).is_ready() && !this.buffer.is_empty() { + let flushed = this.buffer.split().freeze(); + this.flush_deadline = None; + return Poll::Ready(Some(Ok(flushed))); + } + } + + match Pin::new(&mut this.inner).poll_next(cx) { + Poll::Ready(Some(Ok(bytes))) => { + this.buffer.extend_from_slice(&bytes); + + if bytes.is_empty() || this.buffer.len() >= this.max_flush_bytes { + let flushed = this.buffer.split().freeze(); + this.flush_deadline = None; + return Poll::Ready(Some(Ok(flushed))); + } + + if this.flush_deadline.is_none() { + this.flush_deadline = Some(Box::pin(sleep(this.max_flush_interval))); + } + } + Poll::Ready(None) => { + if this.buffer.is_empty() { + return Poll::Ready(None); + } else { + let flushed = this.buffer.split().freeze(); + this.flush_deadline = None; + return Poll::Ready(Some(Ok(flushed))); + } + } + Poll::Pending => { + if let Some(deadline) = &mut this.flush_deadline { + let deadline = deadline.as_mut(); + if deadline.poll(cx).is_ready() && !this.buffer.is_empty() { + let flushed = this.buffer.split().freeze(); + this.flush_deadline = None; + return Poll::Ready(Some(Ok(flushed))); + } + } + return Poll::Pending; + } + } + } + } +} diff --git a/crates/itsi_server/src/server/http_message_types.rs b/crates/itsi_server/src/server/http_message_types.rs index 43e4cc88..363adfa1 100644 --- a/crates/itsi_server/src/server/http_message_types.rs +++ b/crates/itsi_server/src/server/http_message_types.rs @@ -1,13 +1,146 @@ -use std::convert::Infallible; - use bytes::Bytes; -use http::{Request, Response}; -use http_body_util::combinators::BoxBody; -use hyper::body::Incoming; +use core::fmt; +use futures::Stream; +use futures_util::TryStreamExt; +use http::Request; +use http_body_util::{combinators::WithTrailers, BodyExt, Either, Empty, Full, StreamBody}; +use hyper::body::{Body, Frame, Incoming, SizeHint}; +use std::{ + convert::Infallible, + pin::Pin, + task::{Context, Poll}, +}; use super::size_limited_incoming::SizeLimitedIncoming; -pub type HttpResponse = Response>; +type Inner = Either, Empty>; + +type BoxStream = + Pin, Infallible>> + Send + Sync + 'static>>; + +pub struct PlainBody(Either, Inner>); + +impl fmt::Debug for PlainBody { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + Either::Left(_) => f.write_str("PlainBody::Stream(..)"), + Either::Right(inner) => match inner { + Either::Left(full) => f.debug_tuple("PlainBody::Full").field(full).finish(), + Either::Right(_) => f.write_str("PlainBody::Empty"), + }, + } + } +} +type DynErr = Box; + +impl Body for PlainBody { + type Data = Bytes; + type Error = DynErr; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + unsafe { self.map_unchecked_mut(|s| &mut s.0) }.poll_frame(cx) + } + + fn size_hint(&self) -> SizeHint { + self.0.size_hint() + } +} + +impl PlainBody { + fn stream(s: S) -> Self + where + S: Stream> + Send + Sync + 'static, + { + let boxed: BoxStream = Box::pin(s.map_ok(Frame::data)); + Self(Either::Left(StreamBody::new(boxed))) + } + + fn full(bytes: Bytes) -> Self { + Self(Either::Right(Either::Left(Full::new(bytes)))) + } + + fn empty() -> Self { + Self(Either::Right(Either::Right(Empty::new()))) + } +} + +type BoxTrailers = Pin< + Box>> + Send + Sync>, +>; + +pub enum HttpBody { + Plain(PlainBody), + WithT(WithTrailers), +} + +impl fmt::Debug for HttpBody { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + HttpBody::Plain(b) => f.debug_tuple("HttpBody::Plain").field(b).finish(), + HttpBody::WithT(_) => f.write_str("HttpBody::WithT(..)"), + } + } +} + +impl Body for HttpBody { + type Data = Bytes; + type Error = DynErr; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + unsafe { + match self.get_unchecked_mut() { + HttpBody::Plain(b) => Pin::new_unchecked(b).poll_frame(cx), + HttpBody::WithT(b) => Pin::new_unchecked(b).poll_frame(cx), + } + } + } + + fn size_hint(&self) -> SizeHint { + match self { + HttpBody::Plain(b) => b.size_hint(), + HttpBody::WithT(b) => b.size_hint(), + } + } +} + +impl HttpBody { + pub fn stream(s: S) -> Self + where + S: Stream> + Send + Sync + 'static, + { + HttpBody::Plain(PlainBody::stream(s)) + } + + pub fn full(bytes: Bytes) -> Self { + HttpBody::Plain(PlainBody::full(bytes)) + } + + pub fn empty() -> Self { + HttpBody::Plain(PlainBody::empty()) + } + + pub fn with_trailers(self, fut: Fut) -> Self + where + Fut: std::future::Future>> + + Send + + Sync + + 'static, + { + let boxed: BoxTrailers = Box::pin(fut); + match self { + HttpBody::Plain(p) => HttpBody::WithT(p.with_trailers(boxed)), + already @ HttpBody::WithT(_) => already, + } + } +} + +pub type HttpResponse = http::Response; pub type HttpRequest = Request>; pub trait ConversionExt { @@ -64,7 +197,7 @@ impl PathExt for str { if self == "/" { self } else { - self.trim_end_matches("/") + self.trim_end_matches('/') } } } @@ -91,7 +224,7 @@ impl RequestExt for HttpRequest { fn query_param(&self, query_name: &str) -> Option<&str> { self.uri() .query() - .and_then(|query| query.split('&').find(|param| param.starts_with(query_name))) - .map(|param| param.split('=').nth(1).unwrap_or("")) + .and_then(|q| q.split('&').find(|p| p.starts_with(query_name))) + .map(|p| p.split('=').nth(1).unwrap_or("")) } } diff --git a/crates/itsi_server/src/server/io_stream.rs b/crates/itsi_server/src/server/io_stream.rs index d47b71d9..ea073f97 100644 --- a/crates/itsi_server/src/server/io_stream.rs +++ b/crates/itsi_server/src/server/io_stream.rs @@ -2,6 +2,7 @@ use pin_project::pin_project; use tokio::net::{TcpStream, UnixStream}; use tokio_rustls::server::TlsStream; +use std::io::{self, IoSlice}; use std::os::unix::io::{AsRawFd, RawFd}; use std::pin::Pin; use std::task::{Context, Poll}; @@ -34,12 +35,12 @@ pub enum IoStream { } impl IoStream { - pub fn addr(&self) -> SockAddr { + pub fn addr(&self) -> String { match self { - IoStream::Tcp { addr, .. } => addr.clone(), - IoStream::TcpTls { addr, .. } => addr.clone(), - IoStream::Unix { addr, .. } => addr.clone(), - IoStream::UnixTls { addr, .. } => addr.clone(), + IoStream::Tcp { addr, .. } => addr.to_string(), + IoStream::TcpTls { addr, .. } => addr.to_string(), + IoStream::Unix { addr, .. } => addr.to_string(), + IoStream::UnixTls { addr, .. } => addr.to_string(), } } } @@ -90,6 +91,28 @@ impl AsyncWrite for IoStream { IoStreamEnumProj::UnixTls { stream, .. } => stream.poll_shutdown(cx), } } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[IoSlice<'_>], + ) -> Poll> { + match self.project() { + IoStreamEnumProj::Tcp { stream, .. } => stream.poll_write_vectored(cx, bufs), + IoStreamEnumProj::TcpTls { stream, .. } => stream.poll_write_vectored(cx, bufs), + IoStreamEnumProj::Unix { stream, .. } => stream.poll_write_vectored(cx, bufs), + IoStreamEnumProj::UnixTls { stream, .. } => stream.poll_write_vectored(cx, bufs), + } + } + + fn is_write_vectored(&self) -> bool { + match self { + IoStream::Tcp { stream, .. } => stream.is_write_vectored(), + IoStream::TcpTls { stream, .. } => stream.is_write_vectored(), + IoStream::Unix { stream, .. } => stream.is_write_vectored(), + IoStream::UnixTls { stream, .. } => stream.is_write_vectored(), + } + } } impl AsRawFd for IoStream { diff --git a/crates/itsi_server/src/server/lifecycle_event.rs b/crates/itsi_server/src/server/lifecycle_event.rs index 18cb99ff..5a258f4a 100644 --- a/crates/itsi_server/src/server/lifecycle_event.rs +++ b/crates/itsi_server/src/server/lifecycle_event.rs @@ -1,4 +1,4 @@ -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum LifecycleEvent { Start, Shutdown, diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs index 91e773c1..ae59b64c 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs @@ -3,7 +3,6 @@ use base64::{engine::general_purpose, Engine}; use bytes::Bytes; use either::Either; use http::{Response, StatusCode}; -use http_body_util::{combinators::BoxBody, Full}; use magnus::error::Result; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -11,7 +10,7 @@ use std::str; use tracing::debug; use crate::{ - server::http_message_types::{HttpRequest, HttpResponse, RequestExt}, + server::http_message_types::{HttpBody, HttpRequest, HttpResponse, RequestExt}, services::{itsi_http_service::HttpRequestContext, password_hasher::verify_password_hash}, }; @@ -34,7 +33,7 @@ impl AuthBasic { "WWW-Authenticate", format!("Basic realm=\"{}\"", self.realm), ) - .body(BoxBody::new(Full::new(Bytes::from("Unauthorized")))) + .body(HttpBody::full(Bytes::from("Unauthorized"))) .unwrap() } } diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/compression.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/compression.rs index d55f0c3e..fd690c3f 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/compression.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/compression.rs @@ -1,5 +1,5 @@ use crate::{ - server::http_message_types::{HttpRequest, HttpResponse}, + server::http_message_types::{HttpBody, HttpRequest, HttpResponse}, services::itsi_http_service::HttpRequestContext, }; @@ -20,8 +20,8 @@ use http::{ header::{GetAll, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_TYPE}, HeaderValue, Response, }; -use http_body_util::{combinators::BoxBody, BodyExt, Full, StreamBody}; -use hyper::body::{Body, Frame}; +use http_body_util::{BodyExt, StreamBody}; +use hyper::body::Body; use magnus::error::Result; use serde::{Deserialize, Serialize}; use std::convert::Infallible; @@ -126,15 +126,13 @@ impl MimeType { } } -fn stream_encode(encoder: R) -> BoxBody +fn stream_encode(encoder: R) -> HttpBody where R: AsyncRead + Unpin + Sync + Send + 'static, { - let encoded_stream = ReaderStream::new(encoder).map(|res| { - res.map(Frame::data) - .map_err(|_| -> Infallible { unreachable!("We handle IO errors above") }) - }); - BoxBody::new(StreamBody::new(encoded_stream)) + let encoded_stream = ReaderStream::new(encoder) + .map(|res| res.map_err(|_| -> Infallible { unreachable!("We handle IO errors above") })); + HttpBody::stream(StreamBody::new(encoded_stream)) } fn update_content_encoding(parts: &mut http::response::Parts, new_encoding: HeaderValue) { @@ -293,7 +291,7 @@ impl MiddlewareLayer for Compression { } CompressionAlgorithm::Identity => unreachable!(), }; - BoxBody::new(Full::new(Bytes::from(compressed_bytes))) + HttpBody::full(Bytes::from(compressed_bytes)) } else { let stream = body .into_data_stream() diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/cors.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/cors.rs index 0e649f8f..152fecca 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/cors.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/cors.rs @@ -1,12 +1,11 @@ use super::{FromValue, MiddlewareLayer}; use crate::{ - server::http_message_types::{HttpRequest, HttpResponse, RequestExt}, + server::http_message_types::{HttpBody, HttpRequest, HttpResponse, RequestExt}, services::itsi_http_service::HttpRequestContext, }; use async_trait::async_trait; use http::{HeaderMap, Method, Response}; -use http_body_util::{combinators::BoxBody, Empty}; use itsi_error::ItsiError; use magnus::error::Result; use serde::Deserialize; @@ -273,7 +272,7 @@ impl MiddlewareLayer for Cors { let mut response_builder = Response::builder().status(204); *response_builder.headers_mut().unwrap() = headers; let response = response_builder - .body(BoxBody::new(Empty::new())) + .body(HttpBody::empty()) .map_err(ItsiError::new)?; return Ok(either::Either::Right(response)); } diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/csp.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/csp.rs index 000b203e..523efaa9 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/csp.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/csp.rs @@ -1,6 +1,6 @@ use super::FromValue; use crate::{ - server::http_message_types::{HttpRequest, HttpResponse}, + server::http_message_types::{HttpBody, HttpRequest, HttpResponse}, services::itsi_http_service::HttpRequestContext, }; use async_trait::async_trait; @@ -8,7 +8,7 @@ use bytes::{Bytes, BytesMut}; use either::Either; use futures::TryStreamExt; use http::{HeaderValue, StatusCode}; -use http_body_util::{combinators::BoxBody, BodyExt, Empty}; +use http_body_util::BodyExt; use itsi_error::ItsiError; use serde::{Deserialize, Serialize}; use std::sync::Arc; @@ -164,7 +164,7 @@ impl super::MiddlewareLayer for Csp { } } - let mut resp = HttpResponse::new(BoxBody::new(Empty::new())); + let mut resp = HttpResponse::new(HttpBody::empty()); *resp.status_mut() = StatusCode::NO_CONTENT; return Ok(Either::Right(resp)); } diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/error_response.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/error_response.rs index c9654540..851a2bc9 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/error_response.rs @@ -1,13 +1,11 @@ use bytes::Bytes; use http::header::CONTENT_TYPE; use http::Response; -use http_body_util::{combinators::BoxBody, Full}; use serde::{Deserialize, Deserializer}; -use std::convert::Infallible; use std::path::PathBuf; use tracing::warn; -use crate::server::http_message_types::{HttpResponse, ResponseFormat}; +use crate::server::http_message_types::{HttpBody, HttpResponse, ResponseFormat}; use crate::services::static_file_server::ROOT_STATIC_FILE_SERVER; mod default_responses; @@ -19,7 +17,7 @@ pub enum ContentSource { File(PathBuf), #[serde(rename(deserialize = "static"))] #[serde(skip_deserializing)] - Static(Full), + Static(Bytes), } #[derive(Debug, Clone, Deserialize, Default)] @@ -144,13 +142,13 @@ impl ErrorResponse { code: u16, source: &Option, accept: ResponseFormat, - ) -> BoxBody { + ) -> HttpBody { match source { Some(ContentSource::Inline(text)) => { - return BoxBody::new(Full::new(Bytes::from(text.clone()))); + return HttpBody::full(Bytes::from(text.clone())); } Some(ContentSource::Static(text)) => { - return BoxBody::new(text.clone()); + return HttpBody::full(text.clone()); } Some(ContentSource::File(path)) => { // Convert the PathBuf to a &str (assumes valid UTF-8). diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs index 187530ad..0329e8c3 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs @@ -1,91 +1,89 @@ use super::{ContentSource, DefaultFormat, ErrorResponse}; -use crate::server::http_message_types::ResponseFormat; +use crate::server::http_message_types::{HttpBody, ResponseFormat}; use bytes::Bytes; -use http_body_util::{combinators::BoxBody, Full}; -use std::convert::Infallible; impl DefaultFormat { pub fn response_for_code(&self, code: u16) -> ContentSource { match self { DefaultFormat::Plaintext => match code { - 500 => ContentSource::Static(Full::new("500 Internal Error".into())), - 404 => ContentSource::Static(Full::new("404 Not Found".into())), - 401 => ContentSource::Static(Full::new("401 Unauthorized".into())), - 403 => ContentSource::Static(Full::new("403 Forbidden".into())), - 413 => ContentSource::Static(Full::new("413 Payload Too Large".into())), - 429 => ContentSource::Static(Full::new("429 Too Many Requests".into())), - 502 => ContentSource::Static(Full::new("502 Bad Gateway".into())), - 503 => ContentSource::Static(Full::new("503 Service Unavailable".into())), - 504 => ContentSource::Static(Full::new("504 Gateway Timeout".into())), - _ => ContentSource::Static(Full::new("Unexpected Error".into())), + 500 => ContentSource::Static("500 Internal Error".into()), + 404 => ContentSource::Static("404 Not Found".into()), + 401 => ContentSource::Static("401 Unauthorized".into()), + 403 => ContentSource::Static("403 Forbidden".into()), + 413 => ContentSource::Static("413 Payload Too Large".into()), + 429 => ContentSource::Static("429 Too Many Requests".into()), + 502 => ContentSource::Static("502 Bad Gateway".into()), + 503 => ContentSource::Static("503 Service Unavailable".into()), + 504 => ContentSource::Static("504 Gateway Timeout".into()), + _ => ContentSource::Static("Unexpected Error".into()), }, DefaultFormat::Html => match code { - 500 => ContentSource::Static(Full::new( + 500 => ContentSource::Static( include_str!("../../../../default_responses/html/500.html").into(), - )), - 404 => ContentSource::Static(Full::new( + ), + 404 => ContentSource::Static( include_str!("../../../../default_responses/html/404.html").into(), - )), - 401 => ContentSource::Static(Full::new( + ), + 401 => ContentSource::Static( include_str!("../../../../default_responses/html/401.html").into(), - )), - 403 => ContentSource::Static(Full::new( + ), + 403 => ContentSource::Static( include_str!("../../../../default_responses/html/403.html").into(), - )), - 413 => ContentSource::Static(Full::new( + ), + 413 => ContentSource::Static( include_str!("../../../../default_responses/html/413.html").into(), - )), - 429 => ContentSource::Static(Full::new( + ), + 429 => ContentSource::Static( include_str!("../../../../default_responses/html/429.html").into(), - )), - 502 => ContentSource::Static(Full::new( + ), + 502 => ContentSource::Static( include_str!("../../../../default_responses/html/502.html").into(), - )), - 503 => ContentSource::Static(Full::new( + ), + 503 => ContentSource::Static( include_str!("../../../../default_responses/html/503.html").into(), - )), - 504 => ContentSource::Static(Full::new( + ), + 504 => ContentSource::Static( include_str!("../../../../default_responses/html/504.html").into(), - )), - _ => ContentSource::Static(Full::new( + ), + _ => ContentSource::Static( include_str!("../../../../default_responses/html/500.html").into(), - )), + ), }, DefaultFormat::Json => match code { - 500 => ContentSource::Static(Full::new( + 500 => ContentSource::Static( include_str!("../../../../default_responses/json/500.json").into(), - )), - 404 => ContentSource::Static(Full::new( + ), + 404 => ContentSource::Static( include_str!("../../../../default_responses/json/404.json").into(), - )), - 401 => ContentSource::Static(Full::new( + ), + 401 => ContentSource::Static( include_str!("../../../../default_responses/json/401.json").into(), - )), - 403 => ContentSource::Static(Full::new( + ), + 403 => ContentSource::Static( include_str!("../../../../default_responses/json/403.json").into(), - )), - 413 => ContentSource::Static(Full::new( + ), + 413 => ContentSource::Static( include_str!("../../../../default_responses/json/413.json").into(), - )), - 429 => ContentSource::Static(Full::new( + ), + 429 => ContentSource::Static( include_str!("../../../../default_responses/json/429.json").into(), - )), - 502 => ContentSource::Static(Full::new( + ), + 502 => ContentSource::Static( include_str!("../../../../default_responses/json/502.json").into(), - )), - 503 => ContentSource::Static(Full::new( + ), + 503 => ContentSource::Static( include_str!("../../../../default_responses/json/503.json").into(), - )), - 504 => ContentSource::Static(Full::new( + ), + 504 => ContentSource::Static( include_str!("../../../../default_responses/json/504.json").into(), - )), - _ => ContentSource::Static(Full::new("Unexpected Error".into())), + ), + _ => ContentSource::Static("Unexpected Error".into()), }, } } } impl ErrorResponse { - pub fn fallback_body_for(code: u16, accept: ResponseFormat) -> BoxBody { + pub fn fallback_body_for(code: u16, accept: ResponseFormat) -> HttpBody { let source = match accept { ResponseFormat::TEXT => DefaultFormat::Plaintext.response_for_code(code), ResponseFormat::HTML => DefaultFormat::Html.response_for_code(code), @@ -93,9 +91,9 @@ impl ErrorResponse { ResponseFormat::UNKNOWN => ContentSource::Inline("Unexpected Error".to_owned()), }; match source { - ContentSource::Inline(bytes) => BoxBody::new(Full::new(Bytes::from(bytes))), - ContentSource::Static(bytes) => BoxBody::new(bytes), - ContentSource::File(_) => BoxBody::new(Full::new(Bytes::from("Unexpected error"))), + ContentSource::Inline(bytes) => HttpBody::full(Bytes::from(bytes)), + ContentSource::Static(bytes) => HttpBody::full(bytes), + ContentSource::File(_) => HttpBody::full(Bytes::from("Unexpected error")), } } pub fn internal_server_error() -> Self { diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/etag.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/etag.rs index 74242fad..6921098f 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/etag.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/etag.rs @@ -1,5 +1,5 @@ use crate::{ - server::http_message_types::{HttpRequest, HttpResponse}, + server::http_message_types::{HttpBody, HttpRequest, HttpResponse}, services::itsi_http_service::HttpRequestContext, }; @@ -10,7 +10,7 @@ use bytes::{Bytes, BytesMut}; use either::Either; use futures::TryStreamExt; use http::{header, HeaderValue, Response, StatusCode}; -use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full}; +use http_body_util::BodyExt; use hyper::body::Body; use magnus::error::Result; use serde::Deserialize; @@ -113,7 +113,7 @@ impl MiddlewareLayer for ETag { .await { Ok(bytes_mut) => bytes_mut.freeze(), - Err(_) => return Response::from_parts(parts, BoxBody::new(Empty::new())), + Err(_) => return Response::from_parts(parts, HttpBody::empty()), }; let computed_etag = match self.algorithm { @@ -139,14 +139,14 @@ impl MiddlewareLayer for ETag { parts.headers.insert(header::ETAG, value); } - body = Full::new(full_bytes).boxed(); + body = HttpBody::full(full_bytes); formatted_etag }; if let Some(if_none_match) = context.get_if_none_match() { if if_none_match == etag_value || if_none_match == "*" { // Return 304 Not Modified without the body - let mut not_modified = Response::new(BoxBody::new(Empty::new())); + let mut not_modified = Response::new(HttpBody::empty()); *not_modified.status_mut() = StatusCode::NOT_MODIFIED; // Copy headers we want to preserve for (name, value) in parts.headers.iter() { diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/proxy.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/proxy.rs index e2e8f2a3..c391a498 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/proxy.rs @@ -14,7 +14,7 @@ use super::{string_rewrite::StringRewrite, ErrorResponse, FromValue, MiddlewareL use crate::{ server::{ binds::bind::{Bind, BindAddress}, - http_message_types::{HttpRequest, HttpResponse, RequestExt, ResponseFormat}, + http_message_types::{HttpBody, HttpRequest, HttpResponse, RequestExt, ResponseFormat}, size_limited_incoming::MaxBodySizeReached, }, services::itsi_http_service::HttpRequestContext, @@ -24,8 +24,7 @@ use bytes::{Bytes, BytesMut}; use either::Either; use futures::TryStreamExt; use http::{HeaderMap, Method, Response, StatusCode}; -use http_body_util::{combinators::BoxBody, BodyExt, Empty, StreamBody}; -use hyper::body::Frame; +use http_body_util::BodyExt; use magnus::error::Result; use rand::Rng; use reqwest::{ @@ -373,19 +372,17 @@ impl MiddlewareLayer for Proxy { for (hn, hv) in response.headers() { builder = builder.header(hn, hv); } - let response = builder.body(BoxBody::new(StreamBody::new( - response - .bytes_stream() - .map_ok(Frame::data) - .map_err(|_| -> Infallible { unreachable!("We handle IO errors above") }), - ))); + let response = + builder.body(HttpBody::stream(response.bytes_stream().map_err( + |_| -> Infallible { unreachable!("We handle IO errors above") }, + ))); response.unwrap_or(error_response) } Err(e) => { debug!(target: "middleware::proxy", "Error {:?} received", e); if let Some(inner) = e.source() { if inner.downcast_ref::().is_some() { - let mut max_body_response = Response::new(BoxBody::new(Empty::new())); + let mut max_body_response = Response::new(HttpBody::empty()); *max_body_response.status_mut() = StatusCode::PAYLOAD_TOO_LARGE; return Ok(Either::Right(max_body_response)); } diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/redirect.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/redirect.rs index f790e120..e5a899ec 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/redirect.rs @@ -1,7 +1,7 @@ use super::{string_rewrite::StringRewrite, FromValue, MiddlewareLayer}; use crate::{ server::{ - http_message_types::{HttpRequest, HttpResponse}, + http_message_types::{HttpBody, HttpRequest, HttpResponse}, redirect_type::RedirectType, }, services::itsi_http_service::HttpRequestContext, @@ -9,7 +9,6 @@ use crate::{ use async_trait::async_trait; use either::Either; use http::Response; -use http_body_util::{combinators::BoxBody, Empty}; use magnus::error::Result; use serde::Deserialize; use tracing::debug; @@ -39,7 +38,7 @@ impl Redirect { req: &HttpRequest, context: &mut HttpRequestContext, ) -> Result { - let mut response = Response::new(BoxBody::new(Empty::new())); + let mut response = Response::new(HttpBody::empty()); *response.status_mut() = self.redirect_type.status_code(); let destination = self.to.rewrite_request(req, context).parse().map_err(|e| { magnus::Error::new( diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs index fee00d0a..44d6e613 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs @@ -134,8 +134,7 @@ impl MiddlewareLayer for StaticAssets { let file_server = self.file_server.get().unwrap(); let encodings: &[HeaderValue] = context .supported_encoding_set() - .map(Vec::as_slice) - .unwrap_or(&[] as &[HeaderValue]); + .map_or(&[], |set| set.as_slice()); let response = file_server .serve( &req, diff --git a/crates/itsi_server/src/server/middleware_stack/middlewares/static_response.rs b/crates/itsi_server/src/server/middleware_stack/middlewares/static_response.rs index ec32fa1e..b5131c3f 100644 --- a/crates/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +++ b/crates/itsi_server/src/server/middleware_stack/middlewares/static_response.rs @@ -1,15 +1,13 @@ use std::sync::OnceLock; use super::{FromValue, MiddlewareLayer}; -use crate::server::http_message_types::{HttpRequest, HttpResponse}; +use crate::server::http_message_types::{HttpBody, HttpRequest, HttpResponse}; use crate::services::itsi_http_service::HttpRequestContext; use async_trait::async_trait; use bytes::Bytes; use derive_more::Debug; use either::Either; use http::{HeaderMap, HeaderName, HeaderValue, Response, StatusCode}; -use http_body_util::combinators::BoxBody; -use http_body_util::Full; use itsi_error::ItsiError; use magnus::error::Result; use serde::Deserialize; @@ -22,7 +20,7 @@ pub struct StaticResponse { #[serde(skip)] header_map: OnceLock, #[serde(skip)] - body_bytes: OnceLock>, + body_bytes: OnceLock, #[serde(skip)] status_code: OnceLock, } @@ -40,7 +38,7 @@ impl MiddlewareLayer for StaticResponse { .set(header_map) .map_err(|_| ItsiError::new("Failed to set headers"))?; self.body_bytes - .set(Full::new(Bytes::from(self.body.clone()))) + .set(Bytes::from(self.body.clone())) .map_err(|_| ItsiError::new("Failed to set body bytes"))?; self.status_code .set(StatusCode::from_u16(self.code).unwrap_or(StatusCode::OK)) @@ -53,7 +51,7 @@ impl MiddlewareLayer for StaticResponse { _req: HttpRequest, _context: &mut HttpRequestContext, ) -> Result> { - let mut resp = Response::new(BoxBody::new(self.body_bytes.get().unwrap().clone())); + let mut resp = Response::new(HttpBody::full(self.body_bytes.get().unwrap().clone())); *resp.status_mut() = *self.status_code.get().unwrap(); *resp.headers_mut() = self.header_map.get().unwrap().clone(); diff --git a/crates/itsi_server/src/server/mod.rs b/crates/itsi_server/src/server/mod.rs index 02fa6e0a..d27af45c 100644 --- a/crates/itsi_server/src/server/mod.rs +++ b/crates/itsi_server/src/server/mod.rs @@ -1,5 +1,6 @@ pub mod binds; pub mod byte_frame; +pub mod frame_stream; pub mod http_message_types; pub mod io_stream; pub mod lifecycle_event; diff --git a/crates/itsi_server/src/server/process_worker.rs b/crates/itsi_server/src/server/process_worker.rs index 11bae63d..cfbc6198 100644 --- a/crates/itsi_server/src/server/process_worker.rs +++ b/crates/itsi_server/src/server/process_worker.rs @@ -79,7 +79,7 @@ impl ProcessWorker { ) { error!("Failed to set process group ID: {}", e); } - match SingleMode::new(cluster_template.server_config.clone()) { + match SingleMode::new(cluster_template.server_config.clone(), self.worker_id) { Ok(single_mode) => { if cluster_template .server_config @@ -88,7 +88,7 @@ impl ProcessWorker { .pin_worker_cores { core_affinity::set_for_current( - CORE_IDS[self.worker_id % CORE_IDS.len()], + CORE_IDS[(2 * self.worker_id) % CORE_IDS.len()], ); } Arc::new(single_mode).run().ok(); @@ -166,7 +166,7 @@ impl ProcessWorker { } pub(crate) fn boot_if_dead(&self, cluster_template: Arc) -> bool { - if !self.is_alive() { + if !self.is_alive() && self.child_pid.lock().is_some() { if self.just_started() { error!( "Worker in crash loop {:?}. Refusing to restart", @@ -202,7 +202,6 @@ impl ProcessWorker { let child_pid = *self.child_pid.lock(); if let Some(pid) = child_pid { if self.is_alive() { - info!("Worker still alive, sending SIGKILL {}", pid); if let Err(e) = kill(pid, SIGKILL) { error!("Failed to force kill process {}: {}", pid, e); } diff --git a/crates/itsi_server/src/server/serve_strategy/acceptor.rs b/crates/itsi_server/src/server/serve_strategy/acceptor.rs index 371d0409..e73f73a0 100644 --- a/crates/itsi_server/src/server/serve_strategy/acceptor.rs +++ b/crates/itsi_server/src/server/serve_strategy/acceptor.rs @@ -1,6 +1,5 @@ -use std::{ops::Deref, pin::Pin, sync::Arc, time::Duration}; - use hyper_util::rt::TokioIo; +use std::{ops::Deref, pin::Pin, sync::Arc, time::Duration}; use tokio::task::JoinSet; use tracing::debug; @@ -40,17 +39,21 @@ impl Acceptor { let io: TokioIo>> = TokioIo::new(Box::pin(stream)); let mut shutdown_channel = self.shutdown_receiver.clone(); let acceptor_args = self.acceptor_args.clone(); + let service = ItsiHttpService { + inner: Arc::new(ItsiHttpServiceInner { + acceptor_args: acceptor_args.clone(), + addr, + }), + }; + self.join_set.spawn(async move { let executor = &acceptor_args.strategy.executor; - let mut serve = Box::pin(executor.serve_connection_with_upgrades( - io, - ItsiHttpService { - inner: Arc::new(ItsiHttpServiceInner { - acceptor_args: acceptor_args.clone(), - addr: addr.to_string(), - }), - }, - )); + let svc = hyper::service::service_fn(move |req| { + let service = service.clone(); + async move { service.handle_request(req).await } + }); + + let mut serve = Box::pin(executor.serve_connection_with_upgrades(io, svc)); tokio::select! { // Await the connection finishing naturally. @@ -63,7 +66,6 @@ impl Acceptor { debug!("Connection closed abruptly: {:?}", res); } } - serve.as_mut().graceful_shutdown(); }, // A lifecycle event triggers shutdown. _ = shutdown_channel.changed() => { @@ -81,6 +83,7 @@ impl Acceptor { pub async fn join(&mut self) { // Join all acceptor tasks with timeout + let deadline = tokio::time::Instant::now() + Duration::from_secs_f64(self.server_params.shutdown_timeout); let sleep_until = tokio::time::sleep_until(deadline); @@ -89,6 +92,7 @@ impl Acceptor { while (self.join_set.join_next().await).is_some() {} } => {}, _ = sleep_until => { + self.join_set.abort_all(); debug!("Shutdown timeout reached; abandoning remaining acceptor tasks."); } } diff --git a/crates/itsi_server/src/server/serve_strategy/cluster_mode.rs b/crates/itsi_server/src/server/serve_strategy/cluster_mode.rs index 06a76c6a..dfb8ad65 100644 --- a/crates/itsi_server/src/server/serve_strategy/cluster_mode.rs +++ b/crates/itsi_server/src/server/serve_strategy/cluster_mode.rs @@ -1,5 +1,5 @@ use crate::ruby_types::itsi_server::itsi_server_config::ItsiServerConfig; -use crate::server::signal::SIGNAL_HANDLER_CHANNEL; +use crate::server::signal::{subscribe_runtime_to_signals, unsubscribe_runtime}; use crate::server::{lifecycle_event::LifecycleEvent, process_worker::ProcessWorker}; use itsi_error::{ItsiError, Result}; use itsi_rb_helpers::{call_with_gvl, call_without_gvl, create_ruby_thread}; @@ -7,31 +7,32 @@ use itsi_tracing::{error, info, warn}; use magnus::Value; use nix::{libc::exit, unistd::Pid}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::{ - sync::{atomic::AtomicUsize, Arc}, + sync::Arc, time::{Duration, Instant}, }; use tokio::{ runtime::{Builder as RuntimeBuilder, Runtime}, - sync::{broadcast, watch, Mutex}, + sync::{watch, Mutex}, time::{self, sleep}, }; use tracing::{debug, instrument}; pub(crate) struct ClusterMode { pub server_config: Arc, pub process_workers: parking_lot::Mutex>, - pub lifecycle_channel: broadcast::Sender, } -static WORKER_ID: AtomicUsize = AtomicUsize::new(0); static CHILD_SIGNAL_SENDER: parking_lot::Mutex>> = parking_lot::Mutex::new(None); +static RELOAD_IN_PROGRESS: AtomicBool = AtomicBool::new(false); + impl ClusterMode { pub fn new(server_config: Arc) -> Self { let process_workers = (0..server_config.server_params.read().workers) - .map(|_| ProcessWorker { - worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed), + .map(|id| ProcessWorker { + worker_id: id as usize, ..Default::default() }) .collect(); @@ -39,7 +40,6 @@ impl ClusterMode { Self { server_config, process_workers: parking_lot::Mutex::new(process_workers), - lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(), } } @@ -60,6 +60,26 @@ impl ClusterMode { } } + fn next_worker_id(&self) -> usize { + let mut ids: Vec = self + .process_workers + .lock() + .iter() + .map(|w| w.worker_id) + .collect(); + self.next_available_id_in(&mut ids) + } + + fn next_available_id_in(&self, list: &mut [usize]) -> usize { + list.sort_unstable(); + for (expected, &id) in list.iter().enumerate() { + if id != expected { + return expected; + } + } + list.len() + } + #[allow(clippy::await_holding_lock)] pub async fn handle_lifecycle_event( self: Arc, @@ -97,40 +117,56 @@ impl ClusterMode { self.shutdown().await.ok(); self.server_config.reload_exec()?; } - let mut workers_to_load = self.server_config.server_params.read().workers; - let mut next_workers = Vec::new(); - for worker in self.process_workers.lock().drain(..) { - if workers_to_load == 0 { - worker.graceful_shutdown(self.clone()).await - } else { - workers_to_load -= 1; - worker.reboot(self.clone()).await?; - next_workers.push(worker); - } + + if RELOAD_IN_PROGRESS + .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) + .is_err() + { + warn!("Reload already in progress, ignoring request"); + return Ok(()); } - self.process_workers.lock().extend(next_workers); - while workers_to_load > 0 { - let mut workers = self.process_workers.lock(); + let workers_to_load = self.server_config.server_params.read().workers; + let mut next_workers = Vec::new(); + let mut old_workers = self.process_workers.lock().drain(..).collect::>(); + + // Spawn new workers + for i in 0..workers_to_load { let worker = ProcessWorker { - worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed), + worker_id: i as usize, ..Default::default() }; let worker_clone = worker.clone(); let self_clone = self.clone(); - create_ruby_thread(move || { - call_without_gvl(move || { - worker_clone.boot(self_clone).ok(); - }) + + call_with_gvl(|_| { + create_ruby_thread(move || { + call_without_gvl(move || match worker_clone.boot(self_clone) { + Err(err) => error!("Worker boot failed {:?}", err), + _ => {} + }) + }); }); - workers.push(worker); - workers_to_load -= 1 + + next_workers.push(worker); + + if let Some(old) = old_workers.pop() { + old.graceful_shutdown(self.clone()).await; + } } + + for worker in old_workers { + worker.graceful_shutdown(self.clone()).await; + } + + self.process_workers.lock().extend(next_workers); + RELOAD_IN_PROGRESS.store(false, Ordering::SeqCst); + Ok(()) } LifecycleEvent::IncreaseWorkers => { let mut workers = self.process_workers.lock(); let worker = ProcessWorker { - worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed), + worker_id: self.next_worker_id(), ..Default::default() }; let worker_clone = worker.clone(); @@ -171,6 +207,10 @@ impl ClusterMode { unsafe { exit(0) }; } LifecycleEvent::ChildTerminated => { + if RELOAD_IN_PROGRESS.load(Ordering::SeqCst) { + warn!("Reload already in progress, ignoring child signal"); + return Ok(()); + } CHILD_SIGNAL_SENDER.lock().as_ref().inspect(|i| { i.send(()).ok(); }); @@ -275,18 +315,29 @@ impl ClusterMode { pub fn run(self: Arc) -> Result<()> { info!("Starting in Cluster mode"); self.invoke_hook("before_fork"); + self.process_workers .lock() .iter() .try_for_each(|worker| worker.boot(Arc::clone(&self)))?; + if cfg!(target_os = "linux") { + self.server_config + .server_params + .write() + .listeners + .lock() + .drain(..); + }; + let (sender, mut receiver) = watch::channel(()); *CHILD_SIGNAL_SENDER.lock() = Some(sender); - let mut lifecycle_rx = self.lifecycle_channel.subscribe(); let self_ref = self.clone(); self.build_runtime().block_on(async { + let mut lifecycle_rx = subscribe_runtime_to_signals(); + let self_ref = self_ref.clone(); let memory_check_duration = if self_ref.server_config.server_params.read().worker_memory_limit.is_some(){ time::Duration::from_secs(15) @@ -338,11 +389,16 @@ impl ClusterMode { } }, - Err(e) => error!("Error receiving lifecycle_event: {:?}", e), + Err(e) => { + debug!("Lifecycle channel closed: {:?}, exiting cluster monitor loop", e); + break + }, } } } }); + + unsubscribe_runtime(); self.server_config .server_params .write() diff --git a/crates/itsi_server/src/server/serve_strategy/single_mode.rs b/crates/itsi_server/src/server/serve_strategy/single_mode.rs index 1eb6df9b..2595dfaf 100644 --- a/crates/itsi_server/src/server/serve_strategy/single_mode.rs +++ b/crates/itsi_server/src/server/serve_strategy/single_mode.rs @@ -4,7 +4,10 @@ use crate::{ lifecycle_event::LifecycleEvent, request_job::RequestJob, serve_strategy::acceptor::{Acceptor, AcceptorArgs}, - signal::{SHUTDOWN_REQUESTED, SIGNAL_HANDLER_CHANNEL}, + signal::{ + send_lifecycle_event, subscribe_runtime_to_signals, unsubscribe_runtime, + SHUTDOWN_REQUESTED, + }, thread_worker::{build_thread_workers, ThreadWorker}, }, }; @@ -29,22 +32,20 @@ use std::{ }; use tokio::{ runtime::{Builder as RuntimeBuilder, Runtime}, - sync::{ - broadcast, - watch::{self}, - }, + sync::watch::{self}, task::JoinSet, }; use tracing::instrument; pub struct SingleMode { + pub worker_id: usize, pub executor: Builder, pub server_config: Arc, - pub(crate) lifecycle_channel: broadcast::Sender, pub restart_requested: AtomicBool, pub status: RwLock>, } +#[derive(PartialEq, Debug)] pub enum RunningPhase { Running, ShutdownPending, @@ -53,31 +54,45 @@ pub enum RunningPhase { impl SingleMode { #[instrument(parent=None, skip_all)] - pub fn new(server_config: Arc) -> Result { + pub fn new(server_config: Arc, worker_id: usize) -> Result { server_config.server_params.read().preload_ruby()?; - let mut executor = Builder::new(TokioExecutor::new()); - executor - .http1() - .header_read_timeout(server_config.server_params.read().header_read_timeout) - .writev(true) - .timer(TokioTimer::new()); - executor - .http2() - .max_concurrent_streams(100) - .max_local_error_reset_streams(100) - .enable_connect_protocol() - .max_header_list_size(10 * 1024 * 1024) - .max_send_buf_size(16 * 1024 * 1024); + let executor = { + let mut executor = Builder::new(TokioExecutor::new()); + let server_params = server_config.server_params.read(); + let mut http1_executor = executor.http1(); + + http1_executor + .header_read_timeout(server_params.header_read_timeout) + .pipeline_flush(server_params.pipeline_flush) + .timer(TokioTimer::new()); + + if let Some(writev) = server_params.writev { + http1_executor.writev(writev); + } + + executor + .http2() + .max_concurrent_streams(server_params.max_concurrent_streams) + .max_local_error_reset_streams(server_params.max_local_error_reset_streams) + .max_header_list_size(server_params.max_header_list_size) + .max_send_buf_size(server_params.max_send_buf_size) + .enable_connect_protocol(); + executor + }; Ok(Self { + worker_id, executor, server_config, - lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(), restart_requested: AtomicBool::new(false), status: RwLock::new(HashMap::new()), }) } + pub fn is_zero_worker(&self) -> bool { + self.worker_id == 0 + } + pub fn build_runtime(&self) -> Runtime { let mut builder: RuntimeBuilder = if self .server_config @@ -103,7 +118,7 @@ impl SingleMode { pub fn stop(&self) -> Result<()> { SHUTDOWN_REQUESTED.store(true, std::sync::atomic::Ordering::SeqCst); - self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok(); + send_lifecycle_event(LifecycleEvent::Shutdown); Ok(()) } @@ -182,7 +197,7 @@ impl SingleMode { .unwrap(); let receiver = self.clone(); monitor_runtime.block_on({ - let mut lifecycle_rx = receiver.lifecycle_channel.subscribe(); + let mut lifecycle_rx = subscribe_runtime_to_signals(); let receiver = receiver.clone(); let thread_workers = thread_workers.clone(); async move { @@ -201,18 +216,19 @@ impl SingleMode { } lifecycle_event = lifecycle_rx.recv() => { match lifecycle_event { - Ok(LifecycleEvent::Restart) => { + Ok(LifecycleEvent::Restart) | Ok(LifecycleEvent::Reload) => { receiver.restart().await.ok(); } - Ok(LifecycleEvent::Reload) => { - receiver.reload().await.ok(); - } Ok(LifecycleEvent::Shutdown) => { break; } Ok(LifecycleEvent::PrintInfo) => { receiver.print_info(thread_workers.clone()).await.ok(); } + Err(e) => { + debug!("Lifecycle channel closed: {:?}, exiting single mode monitor loop", e); + break; + } _ => {} } } @@ -227,13 +243,15 @@ impl SingleMode { #[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))] pub fn run(self: Arc) -> Result<()> { - let (thread_workers, job_sender, nonblocking_sender) = - build_thread_workers(self.server_config.server_params.read().clone(), Pid::this()) - .inspect_err(|e| { - if let Some(err_val) = e.value() { - print_rb_backtrace(err_val); - } - })?; + let (thread_workers, job_sender, nonblocking_sender) = build_thread_workers( + self.server_config.server_params.read().clone(), + self.worker_id, + ) + .inspect_err(|e| { + if let Some(err_val) = e.value() { + print_rb_backtrace(err_val); + } + })?; let worker_count = thread_workers.len(); info!( @@ -244,6 +262,7 @@ impl SingleMode { let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout; let (shutdown_sender, _) = watch::channel(RunningPhase::Running); let monitor_thread = self.clone().start_monitors(thread_workers.clone()); + let is_zero_worker = self.is_zero_worker(); if monitor_thread.is_none() { error!("Failed to start monitor thread"); return Err(ItsiError::new("Failed to start monitor thread")); @@ -253,106 +272,123 @@ impl SingleMode { return Ok(()); } let runtime = self.build_runtime(); - let result = runtime.block_on( - async { - let mut listener_task_set = JoinSet::new(); - let server_params = self.server_config.server_params.read().clone(); - if let Err(err) = server_params.initialize_middleware().await { - error!("Failed to initialize middleware: {}", err); - return Err(ItsiError::new("Failed to initialize middleware")) - } - let tokio_listeners = server_params.listeners.lock() - .drain(..) - .map(|list| { - Arc::new(list.into_tokio_listener()) - }) - .collect::>(); - - tokio_listeners.iter().cloned().for_each(|listener| { - let shutdown_sender = shutdown_sender.clone(); - let job_sender = job_sender.clone(); - let nonblocking_sender = nonblocking_sender.clone(); - - let mut lifecycle_rx = self.lifecycle_channel.subscribe(); - let mut shutdown_receiver = shutdown_sender.subscribe(); - let mut acceptor = Acceptor{ - acceptor_args: Arc::new( - AcceptorArgs{ - strategy: self.clone(), - listener_info: listener.listener_info(), - shutdown_receiver: shutdown_sender.subscribe(), - job_sender: job_sender.clone(), - nonblocking_sender: nonblocking_sender.clone(), - server_params: server_params.clone() - } - ), - join_set: JoinSet::new() - }; - - let shutdown_rx_for_acme_task = shutdown_receiver.clone(); - let acme_task_listener_clone = listener.clone(); - listener_task_set.spawn(async move { - acme_task_listener_clone.spawn_acme_event_task(shutdown_rx_for_acme_task).await; - }); - - listener_task_set.spawn(async move { - loop { - tokio::select! { - accept_result = listener.accept() => { - match accept_result { - Ok(accepted) => acceptor.serve_connection(accepted).await, - Err(e) => debug!("Listener.accept failed: {:?}", e) - } - }, - _ = shutdown_receiver.changed() => { - debug!("Shutdown requested via receiver"); - break; - }, - lifecycle_event = lifecycle_rx.recv() => { - match lifecycle_event { - Ok(LifecycleEvent::Shutdown) => { - debug!("Received LifecycleEvent::Shutdown"); - let _ = shutdown_sender.send(RunningPhase::ShutdownPending); - for _ in 0..worker_count { - let _ = job_sender.send(RequestJob::Shutdown).await; - let _ = nonblocking_sender.send(RequestJob::Shutdown).await; - } - break; - }, - Err(e) => error!("Error receiving lifecycle event: {:?}", e), - _ => () + let result = runtime.block_on(async { + let mut listener_task_set = JoinSet::new(); + let server_params = self.server_config.server_params.read().clone(); + if let Err(err) = server_params.initialize_middleware().await { + error!("Failed to initialize middleware: {}", err); + return Err(ItsiError::new("Failed to initialize middleware")); + } + let tokio_listeners = server_params + .listeners + .lock() + .drain(..) + .map(|list| Arc::new(list.into_tokio_listener(is_zero_worker))) + .collect::>(); + + tokio_listeners.iter().cloned().for_each(|listener| { + let shutdown_sender = shutdown_sender.clone(); + let job_sender = job_sender.clone(); + let nonblocking_sender = nonblocking_sender.clone(); + + let mut lifecycle_rx = subscribe_runtime_to_signals(); + let mut shutdown_receiver = shutdown_sender.subscribe(); + let mut acceptor = Acceptor { + acceptor_args: Arc::new(AcceptorArgs { + strategy: self.clone(), + listener_info: listener.listener_info(), + shutdown_receiver: shutdown_sender.subscribe(), + job_sender: job_sender.clone(), + nonblocking_sender: nonblocking_sender.clone(), + server_params: server_params.clone(), + }), + join_set: JoinSet::new(), + }; + + let shutdown_rx_for_acme_task = shutdown_receiver.clone(); + let acme_task_listener_clone = listener.clone(); + + let mut after_accept_wait: Option = None::; + + if cfg!(target_os = "macos") { + after_accept_wait = if server_params.workers > 1 { + Some(Duration::from_nanos(10 * server_params.workers as u64)) + } else { + None + }; + }; + + listener_task_set.spawn(async move { + acme_task_listener_clone + .spawn_acme_event_task(shutdown_rx_for_acme_task) + .await; + }); + + listener_task_set.spawn(async move { + loop { + // Process any pending signals before select + tokio::select! { + accept_result = listener.accept() => { + match accept_result { + Ok(accepted) => acceptor.serve_connection(accepted).await, + Err(e) => debug!("Listener.accept failed: {:?}", e) + } + if cfg!(target_os = "macos") { + if let Some(after_accept_wait) = after_accept_wait{ + tokio::time::sleep(after_accept_wait).await; } - } - } - } - acceptor.join().await; - }); - }); - - if self.is_single_mode() { + } + }, + _ = shutdown_receiver.changed() => { + debug!("Shutdown requested via receiver"); + break; + }, + lifecycle_event = lifecycle_rx.recv() => { + match lifecycle_event { + Ok(LifecycleEvent::Shutdown) => { + debug!("Received LifecycleEvent::Shutdown"); + let _ = shutdown_sender.send(RunningPhase::ShutdownPending); + break; + }, + Err(e) => { + debug!("Lifecycle channel closed: {:?}, exiting accept loop", e); + break + }, + _ => () + } + } + } + } + acceptor.join().await; + }); + }); + + if self.is_single_mode() { self.invoke_hook("after_start"); - } + } - while let Some(_res) = listener_task_set.join_next().await {} - drop(tokio_listeners); + while let Some(_res) = listener_task_set.join_next().await {} + drop(tokio_listeners); - Ok::<(), ItsiError>(()) - }); + Ok::<(), ItsiError>(()) + }); debug!("Single mode runtime exited."); + for _i in 0..thread_workers.len() { + job_sender.send_blocking(RequestJob::Shutdown).unwrap(); + nonblocking_sender + .send_blocking(RequestJob::Shutdown) + .unwrap(); + } if result.is_err() { - for _i in 0..thread_workers.len() { - job_sender.send_blocking(RequestJob::Shutdown).unwrap(); - nonblocking_sender - .send_blocking(RequestJob::Shutdown) - .unwrap(); - } - self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok(); + send_lifecycle_event(LifecycleEvent::Shutdown); } shutdown_sender.send(RunningPhase::Shutdown).ok(); runtime.shutdown_timeout(Duration::from_millis(100)); + unsubscribe_runtime(); + debug!("Shutdown timeout finished."); let deadline = Instant::now() + Duration::from_secs_f64(shutdown_timeout); @@ -384,26 +420,6 @@ impl SingleMode { pub fn is_single_mode(&self) -> bool { self.server_config.server_params.read().workers == 1 } - /// Attempts to reload the config "live" - /// Not that when running in single mode this will not unload - /// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead - pub async fn reload(&self) -> Result<()> { - if !self.server_config.check_config().await { - return Ok(()); - } - let should_reexec = self.server_config.clone().reload(false)?; - if should_reexec { - if self.is_single_mode() { - self.invoke_hook("before_restart"); - } - self.server_config.dup_fds()?; - self.server_config.reload_exec()?; - } - self.restart_requested.store(true, Ordering::SeqCst); - self.stop()?; - self.server_config.server_params.read().preload_ruby()?; - Ok(()) - } pub fn invoke_hook(&self, hook_name: &str) { if let Some(hook) = self.server_config.server_params.read().hooks.get(hook_name) { diff --git a/crates/itsi_server/src/server/signal.rs b/crates/itsi_server/src/server/signal.rs index 539cd16f..a1bf07e5 100644 --- a/crates/itsi_server/src/server/signal.rs +++ b/crates/itsi_server/src/server/signal.rs @@ -1,22 +1,50 @@ -use std::sync::{ - atomic::{AtomicBool, AtomicI8}, - LazyLock, +use std::{ + collections::VecDeque, + sync::atomic::{AtomicBool, AtomicI8}, }; use nix::libc::{self, sighandler_t}; -use tokio::sync::{self, broadcast}; +use parking_lot::Mutex; +use tokio::sync::broadcast; use super::lifecycle_event::LifecycleEvent; pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0); pub static SHUTDOWN_REQUESTED: AtomicBool = AtomicBool::new(false); -pub static SIGNAL_HANDLER_CHANNEL: LazyLock<( - broadcast::Sender, - broadcast::Receiver, -)> = LazyLock::new(|| sync::broadcast::channel(5)); +pub static SIGNAL_HANDLER_CHANNEL: Mutex>> = + Mutex::new(None); + +pub static PENDING_QUEUE: Mutex> = Mutex::new(VecDeque::new()); + +pub fn subscribe_runtime_to_signals() -> broadcast::Receiver { + let mut guard = SIGNAL_HANDLER_CHANNEL.lock(); + if let Some(sender) = guard.as_ref() { + return sender.subscribe(); + } + let (sender, receiver) = broadcast::channel(5); + let sender_clone = sender.clone(); + std::thread::spawn(move || { + std::thread::sleep(std::time::Duration::from_millis(50)); + for event in PENDING_QUEUE.lock().drain(..) { + sender_clone.send(event).ok(); + } + }); + + guard.replace(sender); + + receiver +} + +pub fn unsubscribe_runtime() { + SIGNAL_HANDLER_CHANNEL.lock().take(); +} pub fn send_lifecycle_event(event: LifecycleEvent) { - SIGNAL_HANDLER_CHANNEL.0.send(event).ok(); + if let Some(sender) = SIGNAL_HANDLER_CHANNEL.lock().as_ref() { + sender.send(event).ok(); + } else { + PENDING_QUEUE.lock().push_back(event); + } } fn receive_signal(signum: i32, _: sighandler_t) { diff --git a/crates/itsi_server/src/server/thread_worker.rs b/crates/itsi_server/src/server/thread_worker.rs index ad177636..0ae37e9b 100644 --- a/crates/itsi_server/src/server/thread_worker.rs +++ b/crates/itsi_server/src/server/thread_worker.rs @@ -3,13 +3,12 @@ use itsi_error::ItsiError; use itsi_rb_helpers::{ call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue, }; -use itsi_tracing::{debug, error, warn}; +use itsi_tracing::{debug, error}; use magnus::{ error::Result, value::{InnerValue, Lazy, LazyId, Opaque, ReprValue}, Module, RClass, Ruby, Thread, Value, }; -use nix::unistd::Pid; use parking_lot::{Mutex, RwLock}; use std::{ ops::Deref, @@ -17,8 +16,7 @@ use std::{ atomic::{AtomicBool, AtomicU64, Ordering}, Arc, }, - thread, - time::{Duration, Instant, SystemTime, UNIX_EPOCH}, + time::{Instant, SystemTime, UNIX_EPOCH}, }; use tokio::{runtime::Builder as RuntimeBuilder, sync::watch}; use tracing::instrument; @@ -35,7 +33,7 @@ use super::request_job::RequestJob; pub struct ThreadWorker { pub params: Arc, pub id: u8, - pub name: String, + pub worker_id: usize, pub request_id: AtomicU64, pub current_request_start: AtomicU64, pub receiver: Arc>, @@ -64,8 +62,11 @@ type ThreadWorkerBuildResult = Result<( Sender, )>; -#[instrument(name = "boot", parent=None, skip(params, pid))] -pub fn build_thread_workers(params: Arc, pid: Pid) -> ThreadWorkerBuildResult { +#[instrument(name = "boot", parent=None, skip(params, worker_id))] +pub fn build_thread_workers( + params: Arc, + worker_id: usize, +) -> ThreadWorkerBuildResult { let blocking_thread_count = params.threads; let nonblocking_thread_count = params.scheduler_threads; let ruby_thread_request_backlog_size: usize = params @@ -83,7 +84,7 @@ pub fn build_thread_workers(params: Arc, pid: Pid) -> ThreadWorker ThreadWorker::new( params.clone(), id, - format!("{:?}#{:?}", pid, id), + worker_id, blocking_receiver_ref.clone(), blocking_sender_ref.clone(), if nonblocking_thread_count.is_some() { @@ -106,7 +107,7 @@ pub fn build_thread_workers(params: Arc, pid: Pid) -> ThreadWorker workers.push(ThreadWorker::new( params.clone(), id, - format!("{:?}#{:?}", pid, id), + worker_id, nonblocking_receiver_ref.clone(), nonblocking_sender_ref.clone(), Some(scheduler_class), @@ -141,7 +142,7 @@ impl ThreadWorker { pub fn new( params: Arc, id: u8, - name: String, + worker_id: usize, receiver: Arc>, sender: Sender, scheduler_class: Option>, @@ -149,9 +150,9 @@ impl ThreadWorker { let worker = Arc::new(Self { params, id, + worker_id, request_id: AtomicU64::new(0), current_request_start: AtomicU64::new(0), - name, receiver, sender, thread: RwLock::new(None), @@ -181,24 +182,24 @@ impl ThreadWorker { } pub fn run(self: Arc) -> Result<()> { - let name = self.name.clone(); let receiver = self.receiver.clone(); let terminated = self.terminated.clone(); let scheduler_class = self.scheduler_class; let params = self.params.clone(); let self_ref = self.clone(); - let id = self.id; + let worker_id = self.worker_id; call_with_gvl(|_| { *self.thread.write() = Some( create_ruby_thread(move || { if params.pin_worker_cores { - core_affinity::set_for_current(CORE_IDS[(id as usize) % CORE_IDS.len()]); + core_affinity::set_for_current( + CORE_IDS[((2 * worker_id) + 1) % CORE_IDS.len()], + ); } debug!("Ruby thread worker started"); if let Some(scheduler_class) = scheduler_class { if let Err(err) = self_ref.fiber_accept_loop( params, - name, receiver, scheduler_class, terminated, @@ -206,7 +207,7 @@ impl ThreadWorker { error!("Error in fiber_accept_loop: {:?}", err); } } else { - self_ref.accept_loop(params, name, receiver, terminated); + self_ref.accept_loop(params, receiver, terminated); } }) .ok_or_else(|| { @@ -262,9 +263,14 @@ impl ThreadWorker { } } } + for _ in 0..MAX_BATCH_SIZE { if let Ok(req) = receiver.try_recv() { + let should_break = matches!(req, RequestJob::Shutdown); batch.push(req); + if should_break { + break; + } } else { break; } @@ -307,7 +313,9 @@ impl ThreadWorker { ItsiGrpcCall::internal_error(ruby, response, err) } } - RequestJob::Shutdown => return true, + RequestJob::Shutdown => { + return true; + } } } false @@ -339,15 +347,14 @@ impl ThreadWorker { if yield_result.is_err() { break; } - }) + }); }) } - #[instrument(skip_all, fields(thread_worker=name))] + #[instrument(skip_all, fields(thread_worker=format!("{}:{}", self.id, self.worker_id)))] pub fn fiber_accept_loop( self: Arc, params: Arc, - name: String, receiver: Arc>, scheduler_class: Opaque, terminated: Arc, @@ -422,68 +429,76 @@ impl ThreadWorker { }); } - #[instrument(skip_all, fields(thread_worker=id))] + #[instrument(skip_all, fields(thread_worker=format!("{}:{}", self.id, self.worker_id)))] pub fn accept_loop( self: Arc, params: Arc, - id: String, receiver: Arc>, terminated: Arc, ) { - let ruby = Ruby::get().unwrap(); let mut idle_counter = 0; - let self_ref = self.clone(); call_without_gvl(|| loop { - if receiver.is_empty() { - if let Some(oob_gc_threshold) = params.oob_gc_responses_threshold { - idle_counter = (idle_counter + 1) % oob_gc_threshold; - if idle_counter == 0 { - call_with_gvl(|_ruby| { - ruby.gc_start(); - }); - } - }; - } match receiver.recv_blocking() { - Ok(RequestJob::ProcessHttpRequest(request, app_proc)) => { - self_ref.request_id.fetch_add(1, Ordering::Relaxed); - self_ref.current_request_start.store( - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(), - Ordering::Relaxed, - ); - call_with_gvl(|_ruby| { - request.process(&ruby, app_proc).ok(); - }); - if terminated.load(Ordering::Relaxed) { - break; + Err(_) => break, + Ok(RequestJob::Shutdown) => break, + Ok(request_job) => call_with_gvl(|ruby| { + self.process_one(&ruby, request_job, &terminated); + while let Ok(request_job) = receiver.try_recv() { + if matches!(request_job, RequestJob::Shutdown) { + terminated.store(true, Ordering::Relaxed); + break; + } + self.process_one(&ruby, request_job, &terminated); } - } - Ok(RequestJob::ProcessGrpcRequest(request, app_proc)) => { - self_ref.request_id.fetch_add(1, Ordering::Relaxed); - self_ref.current_request_start.store( - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(), - Ordering::Relaxed, - ); - call_with_gvl(|_ruby| { - request.process(&ruby, app_proc).ok(); - }); - if terminated.load(Ordering::Relaxed) { - break; + if let Some(thresh) = params.oob_gc_responses_threshold { + idle_counter = (idle_counter + 1) % thresh; + if idle_counter == 0 { + ruby.gc_start(); + } } + }), + }; + if terminated.load(Ordering::Relaxed) { + break; + } + }); + } + + fn process_one(self: &Arc, ruby: &Ruby, job: RequestJob, terminated: &Arc) { + match job { + RequestJob::ProcessHttpRequest(request, app_proc) => { + if terminated.load(Ordering::Relaxed) { + request.response().unwrap().service_unavailable(); + return; } - Ok(RequestJob::Shutdown) => { - break; - } - Err(_) => { - thread::sleep(Duration::from_micros(1)); + self.request_id.fetch_add(1, Ordering::Relaxed); + self.current_request_start.store( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(), + Ordering::Relaxed, + ); + request.process(ruby, app_proc).ok(); + } + + RequestJob::ProcessGrpcRequest(request, app_proc) => { + if terminated.load(Ordering::Relaxed) { + request.stream().unwrap().close().ok(); + return; } + self.request_id.fetch_add(1, Ordering::Relaxed); + self.current_request_start.store( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(), + Ordering::Relaxed, + ); + request.process(ruby, app_proc).ok(); } - }); + + RequestJob::Shutdown => unreachable!(), + } } } diff --git a/crates/itsi_server/src/services/itsi_http_service.rs b/crates/itsi_server/src/services/itsi_http_service.rs index a641d984..ce7a6fbb 100644 --- a/crates/itsi_server/src/services/itsi_http_service.rs +++ b/crates/itsi_server/src/services/itsi_http_service.rs @@ -6,22 +6,20 @@ use crate::server::http_message_types::{ use crate::server::lifecycle_event::LifecycleEvent; use crate::server::middleware_stack::MiddlewareLayer; use crate::server::serve_strategy::acceptor::AcceptorArgs; -use crate::server::signal::send_lifecycle_event; +use crate::server::signal::{send_lifecycle_event, SHUTDOWN_REQUESTED}; use chrono::{self, DateTime, Local}; use either::Either; use http::header::ACCEPT_ENCODING; use http::{HeaderValue, Request}; use hyper::body::Incoming; -use hyper::service::Service; -use itsi_error::ItsiError; use regex::Regex; +use smallvec::SmallVec; +use std::ops::Deref; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::OnceLock; +use std::sync::{Arc, OnceLock}; use std::time::{Duration, Instant}; -use tracing::error; - -use std::{future::Future, ops::Deref, pin::Pin, sync::Arc}; use tokio::time::timeout; +use tracing::error; #[derive(Clone)] pub struct ItsiHttpService { @@ -80,12 +78,14 @@ pub struct RequestContextInner { pub request_start_time: OnceLock>, pub start_instant: Instant, pub if_none_match: OnceLock>, - pub supported_encoding_set: OnceLock>, + pub supported_encoding_set: OnceLock, pub is_ruby_request: Arc, } +type AcceptEncodingSet = SmallVec<[HeaderValue; 2]>; + impl HttpRequestContext { - fn new( + pub fn new( service: ItsiHttpService, matching_pattern: Option>, accept: ResponseFormat, @@ -109,12 +109,14 @@ impl HttpRequestContext { } pub fn set_supported_encoding_set(&self, req: &HttpRequest) { - self.inner.supported_encoding_set.get_or_init(move || { - req.headers() - .get_all(ACCEPT_ENCODING) - .into_iter() - .cloned() - .collect::>() + self.inner.supported_encoding_set.get_or_init(|| { + let mut set: AcceptEncodingSet = SmallVec::new(); + + for hv in req.headers().get_all(ACCEPT_ENCODING) { + set.push(hv.clone()); // clone ≈ 16 B struct copy + } + + set }); } @@ -164,7 +166,7 @@ impl HttpRequestContext { self.inner.response_format.get().unwrap() } - pub fn supported_encoding_set(&self) -> Option<&Vec> { + pub fn supported_encoding_set(&self) -> Option<&AcceptEncodingSet> { self.inner.supported_encoding_set.get() } } @@ -173,13 +175,8 @@ const SERVER_TOKEN_VERSION: HeaderValue = HeaderValue::from_static(concat!("Itsi/", env!("CARGO_PKG_VERSION"))); const SERVER_TOKEN_NAME: HeaderValue = HeaderValue::from_static("Itsi"); -impl Service> for ItsiHttpService { - type Response = HttpResponse; - type Error = ItsiError; - type Future = Pin> + Send>>; - - fn call(&self, req: Request) -> Self::Future { - let self_clone = self.clone(); +impl ItsiHttpService { + pub async fn handle_request(&self, req: Request) -> itsi_error::Result { let mut req = req.limit(); let accept: ResponseFormat = req.accept().into(); let is_single_mode = self.server_params.workers == 1; @@ -191,7 +188,7 @@ impl Service> for ItsiHttpService { let token_preference = self.server_params.itsi_server_token_preference; let service_future = async move { - let middleware_stack = self_clone + let middleware_stack = self .server_params .middleware .get() @@ -202,7 +199,7 @@ impl Service> for ItsiHttpService { let mut resp: Option = None; let mut context = - HttpRequestContext::new(self_clone.clone(), matching_pattern, accept, irr_clone); + HttpRequestContext::new(self.clone(), matching_pattern, accept, irr_clone); let mut depth = 0; for (index, elm) in stack.iter().enumerate() { @@ -243,28 +240,31 @@ impl Service> for ItsiHttpService { }; if let Some(timeout_duration) = request_timeout { - Box::pin(async move { - match timeout(timeout_duration, service_future).await { - Ok(result) => result, - Err(_) => { - // If we're still running Ruby at this point, we can't just kill the - // thread as it might be in a critical section. - // Instead we must ask the worker to hot restart. - if is_ruby_request.load(Ordering::Relaxed) { - if is_single_mode { - // If we're in single mode, re-exec the whole process - send_lifecycle_event(LifecycleEvent::Restart); - } else { - // Otherwise we can shutdown the worker and rely on the master to restart it - send_lifecycle_event(LifecycleEvent::Shutdown); - } + match timeout(timeout_duration, service_future).await { + Ok(result) => result, + Err(_) => { + // If we're still running Ruby at this point, we can't just kill the + // thread as it might be in a critical section. + // Instead we must ask the worker to hot restart. + // But only if we're not already shutting down + if is_ruby_request.load(Ordering::Relaxed) + && !SHUTDOWN_REQUESTED.load(Ordering::SeqCst) + { + // When we've detected a timeout, use the safer send_lifecycle_event + // which will properly handle signal-safe state transitions + if is_single_mode { + // If we're in single mode, re-exec the whole process + send_lifecycle_event(LifecycleEvent::Restart); + } else { + // Otherwise we can shutdown the worker and rely on the master to restart it + send_lifecycle_event(LifecycleEvent::Shutdown); } - Ok(TIMEOUT_RESPONSE.to_http_response(accept).await) } + Ok(TIMEOUT_RESPONSE.to_http_response(accept).await) } - }) + } } else { - Box::pin(service_future) + service_future.await } } } diff --git a/crates/itsi_server/src/services/static_file_server.rs b/crates/itsi_server/src/services/static_file_server.rs index ad00447d..1dda1cf2 100644 --- a/crates/itsi_server/src/services/static_file_server.rs +++ b/crates/itsi_server/src/services/static_file_server.rs @@ -2,7 +2,7 @@ use crate::{ default_responses::NOT_FOUND_RESPONSE, prelude::*, server::{ - http_message_types::{HttpRequest, HttpResponse, RequestExt, ResponseFormat}, + http_message_types::{HttpBody, HttpRequest, HttpResponse, RequestExt, ResponseFormat}, middleware_stack::ErrorResponse, redirect_type::RedirectType, }, @@ -16,7 +16,6 @@ use http::{ }, HeaderName, HeaderValue, Response, StatusCode, }; -use http_body_util::{combinators::BoxBody, Full}; use itsi_error::Result; use parking_lot::{Mutex, RwLock}; use percent_encoding::percent_decode_str; @@ -28,7 +27,6 @@ use std::{ borrow::Cow, cmp::Ordering, collections::HashMap, - convert::Infallible, fs::Metadata, ops::Deref, path::{Path, PathBuf}, @@ -324,7 +322,7 @@ impl StaticFileServer { }) => Response::builder() .status(StatusCode::MOVED_PERMANENTLY) .header(header::LOCATION, redirect_to) - .body(BoxBody::new(Full::new(Bytes::new()))) + .body(HttpBody::empty()) .unwrap(), Err(not_found_behavior) => match not_found_behavior { NotFoundBehavior::Error(error_response) => { @@ -340,7 +338,7 @@ impl StaticFileServer { NotFoundBehavior::Redirect(redirect) => Response::builder() .status(redirect.r#type.status_code()) .header(header::LOCATION, redirect.to) - .body(BoxBody::new(Full::new(Bytes::new()))) + .body(HttpBody::empty()) .unwrap(), }, }) @@ -407,7 +405,7 @@ impl StaticFileServer { Response::builder() .status(StatusCode::NOT_FOUND) - .body(BoxBody::new(Full::new(Bytes::new()))) + .body(HttpBody::empty()) .unwrap() } @@ -648,15 +646,8 @@ impl StaticFileServer { Err(nf) } - async fn stream_file_range( - &self, - path: PathBuf, - start: u64, - end: u64, - ) -> Option> { + async fn stream_file_range(&self, path: PathBuf, start: u64, end: u64) -> Option { use futures::TryStreamExt; - use http_body_util::StreamBody; - use hyper::body::Frame; use tokio::io::AsyncSeekExt; use tokio_util::io::ReaderStream; @@ -687,32 +678,25 @@ impl StaticFileServer { let range_length = end - start + 1; let limited_reader = tokio::io::AsyncReadExt::take(file, range_length); let path_clone = path.clone(); - let stream = ReaderStream::with_capacity(limited_reader, 64 * 1024) - .map_ok(Frame::data) - .map_err(move |e| { - warn!("Error streaming file {}: {}", path_clone.display(), e); - unreachable!("We handle IO errors above") - }); - - Some(BoxBody::new(StreamBody::new(stream))) + let stream = ReaderStream::with_capacity(limited_reader, 64 * 1024).map_err(move |e| { + warn!("Error streaming file {}: {}", path_clone.display(), e); + unreachable!("We handle IO errors above") + }); + Some(HttpBody::stream(stream)) } - async fn stream_file(&self, path: PathBuf) -> Option> { + async fn stream_file(&self, path: PathBuf) -> Option { use futures::TryStreamExt; - use http_body_util::StreamBody; - use hyper::body::Frame; use tokio_util::io::ReaderStream; match File::open(&path).await { Ok(file) => { let path_clone = path.clone(); - let stream = ReaderStream::with_capacity(file, 64 * 1024) - .map_ok(Frame::data) - .map_err(move |e| { - warn!("Error streaming file {}: {}", path_clone.display(), e); - unreachable!("We handle IO errors above") - }); - Some(BoxBody::new(StreamBody::new(stream))) + let stream = ReaderStream::with_capacity(file, 64 * 1024).map_err(move |e| { + warn!("Error streaming file {}: {}", path_clone.display(), e); + unreachable!("We handle IO errors above") + }); + Some(HttpBody::stream(stream)) } Err(e) => { warn!( @@ -749,7 +733,7 @@ impl StaticFileServer { return Response::builder() .status(StatusCode::RANGE_NOT_SATISFIABLE) .header("Content-Range", format!("bytes */{}", content_length)) - .body(BoxBody::new(Full::new(Bytes::new()))) + .body(HttpBody::empty()) .unwrap(); } @@ -795,7 +779,7 @@ impl StaticFileServer { builder = builder.header("Content-Range", range); } - return builder.body(BoxBody::new(Full::new(Bytes::new()))).unwrap(); + return builder.body(HttpBody::empty()).unwrap(); } // For GET requests, prepare the actual content @@ -829,10 +813,7 @@ impl StaticFileServer { } } - fn serve_cached_content( - &self, - serve_cache_args: ServeCacheArgs, - ) -> http::Response> { + fn serve_cached_content(&self, serve_cache_args: ServeCacheArgs) -> HttpResponse { let ServeCacheArgs( cache_entry, start, @@ -855,7 +836,7 @@ impl StaticFileServer { return Response::builder() .status(StatusCode::RANGE_NOT_SATISFIABLE) .header("Content-Range", format!("bytes */{}", content_length)) - .body(BoxBody::new(Full::new(Bytes::new()))) + .body(HttpBody::empty()) .unwrap(); } @@ -904,7 +885,7 @@ impl StaticFileServer { builder = builder.header("Content-Range", range); } - return builder.body(BoxBody::new(Full::new(Bytes::new()))).unwrap(); + return builder.body(HttpBody::empty()).unwrap(); } if is_range_request { @@ -920,7 +901,7 @@ impl StaticFileServer { cache_entry.last_modified_http_date.clone(), content_range, &self.headers, - BoxBody::new(Full::new(range_bytes)), + HttpBody::full(range_bytes), ) } else { // Return the full content @@ -987,15 +968,15 @@ fn format_http_date_header(time: SystemTime) -> HeaderValue { .unwrap() } -fn build_ok_body(bytes: Arc) -> BoxBody { - BoxBody::new(Full::new(bytes.as_ref().clone())) +fn build_ok_body(bytes: Arc) -> HttpBody { + HttpBody::full(bytes.as_ref().clone()) } // Helper function to handle not modified responses -fn build_not_modified_response() -> http::Response> { +fn build_not_modified_response() -> HttpResponse { Response::builder() .status(StatusCode::NOT_MODIFIED) - .body(BoxBody::new(Full::new(Bytes::new()))) + .body(HttpBody::empty()) .unwrap() } @@ -1009,8 +990,8 @@ fn build_file_response( last_modified_http_date: HeaderValue, range_header: Option, headers: &Option>, - body: BoxBody, -) -> http::Response> { + body: HttpBody, +) -> HttpResponse { let mut response = Response::new(body); *response.status_mut() = status; diff --git a/docs/benchmark-dashboard/.gitignore b/docs/benchmark-dashboard/.gitignore new file mode 100644 index 00000000..f650315f --- /dev/null +++ b/docs/benchmark-dashboard/.gitignore @@ -0,0 +1,27 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules + +# next.js +/.next/ +/out/ + +# production +/build + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.pnpm-debug.log* + +# env files +.env* + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts \ No newline at end of file diff --git a/docs/benchmark-dashboard/app/api/benchmarks/route.ts b/docs/benchmark-dashboard/app/api/benchmarks/route.ts new file mode 100644 index 00000000..17468956 --- /dev/null +++ b/docs/benchmark-dashboard/app/api/benchmarks/route.ts @@ -0,0 +1,22 @@ +import { NextResponse } from "next/server" + +// This would be your actual API endpoint to fetch benchmark data +export async function GET() { + try { + // In a real implementation, you would: + // 1. Scan the directory structure + // 2. Read and parse the JSON files + // 3. Return the aggregated data + + // For demo purposes, we're returning a mock response + return NextResponse.json({ + success: true, + data: [ + // Your benchmark data would go here + ], + }) + } catch (error) { + console.error("Error fetching benchmark data:", error) + return NextResponse.json({ success: false, error: "Failed to fetch benchmark data" }, { status: 500 }) + } +} diff --git a/docs/benchmark-dashboard/app/globals.css b/docs/benchmark-dashboard/app/globals.css new file mode 100644 index 00000000..ac684423 --- /dev/null +++ b/docs/benchmark-dashboard/app/globals.css @@ -0,0 +1,94 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +body { + font-family: Arial, Helvetica, sans-serif; +} + +@layer utilities { + .text-balance { + text-wrap: balance; + } +} + +@layer base { + :root { + --background: 0 0% 100%; + --foreground: 0 0% 3.9%; + --card: 0 0% 100%; + --card-foreground: 0 0% 3.9%; + --popover: 0 0% 100%; + --popover-foreground: 0 0% 3.9%; + --primary: 0 0% 9%; + --primary-foreground: 0 0% 98%; + --secondary: 0 0% 96.1%; + --secondary-foreground: 0 0% 9%; + --muted: 0 0% 96.1%; + --muted-foreground: 0 0% 45.1%; + --accent: 0 0% 96.1%; + --accent-foreground: 0 0% 9%; + --destructive: 0 84.2% 60.2%; + --destructive-foreground: 0 0% 98%; + --border: 0 0% 89.8%; + --input: 0 0% 89.8%; + --ring: 0 0% 3.9%; + --chart-1: 12 76% 61%; + --chart-2: 173 58% 39%; + --chart-3: 197 37% 24%; + --chart-4: 43 74% 66%; + --chart-5: 27 87% 67%; + --radius: 0.5rem; + --sidebar-background: 0 0% 98%; + --sidebar-foreground: 240 5.3% 26.1%; + --sidebar-primary: 240 5.9% 10%; + --sidebar-primary-foreground: 0 0% 98%; + --sidebar-accent: 240 4.8% 95.9%; + --sidebar-accent-foreground: 240 5.9% 10%; + --sidebar-border: 220 13% 91%; + --sidebar-ring: 217.2 91.2% 59.8%; + } + .dark { + --background: 0 0% 3.9%; + --foreground: 0 0% 98%; + --card: 0 0% 3.9%; + --card-foreground: 0 0% 98%; + --popover: 0 0% 3.9%; + --popover-foreground: 0 0% 98%; + --primary: 0 0% 98%; + --primary-foreground: 0 0% 9%; + --secondary: 0 0% 14.9%; + --secondary-foreground: 0 0% 98%; + --muted: 0 0% 14.9%; + --muted-foreground: 0 0% 63.9%; + --accent: 0 0% 14.9%; + --accent-foreground: 0 0% 98%; + --destructive: 0 62.8% 30.6%; + --destructive-foreground: 0 0% 98%; + --border: 0 0% 14.9%; + --input: 0 0% 14.9%; + --ring: 0 0% 83.1%; + --chart-1: 220 70% 50%; + --chart-2: 160 60% 45%; + --chart-3: 30 80% 55%; + --chart-4: 280 65% 60%; + --chart-5: 340 75% 55%; + --sidebar-background: 240 5.9% 10%; + --sidebar-foreground: 240 4.8% 95.9%; + --sidebar-primary: 224.3 76.3% 48%; + --sidebar-primary-foreground: 0 0% 100%; + --sidebar-accent: 240 3.7% 15.9%; + --sidebar-accent-foreground: 240 4.8% 95.9%; + --sidebar-border: 240 3.7% 15.9%; + --sidebar-ring: 217.2 91.2% 59.8%; + } +} + +@layer base { + * { + @apply border-border; + } + body { + @apply bg-background text-foreground; + } +} diff --git a/docs/benchmark-dashboard/app/layout.tsx b/docs/benchmark-dashboard/app/layout.tsx new file mode 100644 index 00000000..17b2ce8c --- /dev/null +++ b/docs/benchmark-dashboard/app/layout.tsx @@ -0,0 +1,20 @@ +import type { Metadata } from 'next' +import './globals.css' + +export const metadata: Metadata = { + title: 'v0 App', + description: 'Created with v0', + generator: 'v0.dev', +} + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode +}>) { + return ( + + {children} + + ) +} diff --git a/docs/benchmark-dashboard/app/page.tsx b/docs/benchmark-dashboard/app/page.tsx new file mode 100644 index 00000000..ba99c42f --- /dev/null +++ b/docs/benchmark-dashboard/app/page.tsx @@ -0,0 +1,252 @@ +"use client" + +import { useState, useEffect } from "react" +import { BenchmarkDashboard } from "@/components/benchmark-dashboard" +import { LoadingSpinner } from "@/components/ui/loading-spinner" + +// Updated sample data with the new group hierarchy and longer version strings +const sampleHierarchicalData = [ + { + cpu: "apple_m1_pro", + groups: [ + { + group: "rack", + tests: [ + { + test: "chunked", + servers: [ + { + server: "agoo", + results: [ + { + server: "agoo", + test_case: "chunked", + version: "Agoo v1.2.3 (Ruby 3.2.0) with experimental HTTP/2 support", + threads: 1, + workers: 1, + http2: false, + concurrency: 10, + rss_mb: 1.77, + results: { + successRate: 1.0, + total: 20000.0, + slowest: 5000000.0, + fastest: 200000.0, + average: 2500000.0, + requestsPerSec: 6500.0, + totalData: 0, + sizePerRequest: 0.0, + sizePerSec: 0.0, + errorDistribution: {}, + p95_latency: 4.8, + }, + timestamp: "2025-05-24T10:41:17Z", + }, + { + server: "agoo", + test_case: "chunked", + version: "Agoo v1.2.3 (Ruby 3.2.0) with experimental HTTP/2 support", + threads: 1, + workers: 1, + http2: false, + concurrency: 50, + rss_mb: 1.77, + results: { + successRate: 0.95, + total: 28000.0, + slowest: 12000000.0, + fastest: 400000.0, + average: 6000000.0, + requestsPerSec: 9500.0, + totalData: 0, + sizePerRequest: 0.0, + sizePerSec: 0.0, + errorDistribution: { + timeout: 50, + }, + p95_latency: 10.2, + }, + timestamp: "2025-05-24T10:41:17Z", + }, + ], + }, + { + server: "puma", + results: [ + { + server: "puma", + test_case: "chunked", + version: + "Puma version 6.6.0+h2o version 2.3.0-DEV@87e2aa634 (Ruby 3.2.2) with HTTP/2 support enabled", + threads: 1, + workers: 1, + http2: true, + concurrency: 10, + results: { + successRate: 1.0, + total: 15000.0, + slowest: 4000000.0, + fastest: 200000.0, + average: 2000000.0, + requestsPerSec: 5000.0, + totalData: 150000000, + sizePerRequest: 10000.0, + sizePerSec: 50000000.0, + errorDistribution: {}, + p95_latency: 3.8, + }, + }, + { + server: "puma", + test_case: "chunked", + version: + "Puma version 6.6.0+h2o version 2.3.0-DEV@87e2aa634 (Ruby 3.2.2) with HTTP/2 support enabled", + threads: 1, + workers: 1, + http2: true, + concurrency: 50, + results: { + successRate: 1.0, + total: 22000.0, + slowest: 12000000.0, + fastest: 400000.0, + average: 6000000.0, + requestsPerSec: 7500.0, + totalData: 220000000, + sizePerRequest: 10000.0, + sizePerSec: 75000000.0, + errorDistribution: {}, + p95_latency: 11.2, + }, + }, + ], + }, + ], + }, + { + test: "io_party", + servers: [ + { + server: "itsi", + results: [ + { + server: "itsi", + test_case: "io_party", + version: + "ITSI v0.9.1-beta.3 (Experimental) with advanced IO processing and HTTP/2 multiplexing support", + threads: 1, + workers: 1, + http2: true, + concurrency: 10, + rss_mb: 64.11, + results: { + successRate: 1.0, + total: 48591.0, + slowest: 2850375.0, + fastest: 116292.0, + average: 486975.0, + requestsPerSec: 16196.49, + totalData: 0, + sizePerRequest: 0.0, + sizePerSec: 0.0, + errorDistribution: {}, + p95_latency: 0.977999, + }, + timestamp: "2025-05-15T03:43:50Z", + }, + ], + }, + ], + }, + ], + }, + { + group: "sinatra", + tests: [ + { + test: "hello_world", + servers: [ + { + server: "falcon", + results: [ + { + server: "falcon", + test_case: "hello_world", + version: + "Falcon v0.51.1 (Ruby 3.3.0-preview1) with HTTP/2 and WebSocket support, running on Async::HTTP::Protocol::HTTP2 implementation", + threads: 2, + workers: 2, + http2: true, + concurrency: 10, + results: { + successRate: 1.0, + total: 30000.0, + slowest: 10000000.0, + fastest: 400000.0, + average: 5000000.0, + requestsPerSec: 10000.0, + totalData: 0, + sizePerRequest: 0.0, + sizePerSec: 0.0, + errorDistribution: {}, + p95_latency: 8.5, + }, + }, + ], + }, + ], + }, + ], + }, + ], + }, +] + +export default function Home() { + const [isLoading, setIsLoading] = useState(true) + const [benchmarkData, setBenchmarkData] = useState([]) + const [error, setError] = useState(null) + + useEffect(() => { + const fetchData = async () => { + try { + // In a real app, this would fetch from your API endpoint + // For demo purposes, we're using the sample hierarchical data + setBenchmarkData(sampleHierarchicalData) + setIsLoading(false) + } catch (err) { + console.error("Error fetching benchmark data:", err) + setError("Failed to load benchmark data. Please try again.") + setIsLoading(false) + } + } + + fetchData() + }, []) + + if (isLoading) { + return ( +
+ + Loading benchmark data... +
+ ) + } + + if (error) { + return ( +
+
+

Error

+

{error}

+
+
+ ) + } + + return ( +
+ +
+ ) +} diff --git a/docs/benchmark-dashboard/components.json b/docs/benchmark-dashboard/components.json new file mode 100644 index 00000000..d9ef0ae5 --- /dev/null +++ b/docs/benchmark-dashboard/components.json @@ -0,0 +1,21 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "default", + "rsc": true, + "tsx": true, + "tailwind": { + "config": "tailwind.config.ts", + "css": "app/globals.css", + "baseColor": "neutral", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + }, + "iconLibrary": "lucide" +} \ No newline at end of file diff --git a/docs/benchmark-dashboard/components/benchmark-dashboard.tsx b/docs/benchmark-dashboard/components/benchmark-dashboard.tsx new file mode 100644 index 00000000..65d4bdb9 --- /dev/null +++ b/docs/benchmark-dashboard/components/benchmark-dashboard.tsx @@ -0,0 +1,1663 @@ +"use client"; + +import type React from "react"; + +import { useState, useMemo, useCallback, useEffect } from "react"; +import { + Bar, + XAxis, + YAxis, + CartesianGrid, + ResponsiveContainer, + BarChart, +} from "recharts"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, + SelectGroup, + SelectLabel, +} from "@/components/ui/select"; +import { Label } from "@/components/ui/label"; +import { Badge } from "@/components/ui/badge"; +import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs"; +import { + InfoIcon, + TrendingUp, + TrendingDown, + TagIcon, + TrophyIcon, +} from "lucide-react"; + +// Updated types to match the new hierarchical structure with groups +type BenchmarkResult = { + server: string; + version?: string; + test_case: string; + threads: number; + workers: number; + http2: boolean; + concurrency: number; + rss_mb?: number; + results: { + successRate: number; + total: number; + slowest: number; + fastest: number; + average: number; + requestsPerSec: number; + totalData: number; + sizePerRequest: number; + sizePerSec: number; + errorDistribution: Record; + p95_latency: number; + }; + timestamp?: string; +}; + +type ServerData = { + server: string; + results: BenchmarkResult[]; +}; + +type TestData = { + test: string; + servers: ServerData[]; +}; + +type GroupData = { + group: string; + tests: TestData[]; +}; + +type CpuData = { + cpu: string; + groups: GroupData[]; +}; + +type HierarchicalBenchmarkData = CpuData[]; + +type FilterOptions = { + cpus: string[]; + testCases: string[]; + servers: string[]; + threads: number[]; + workers: number[]; + concurrencyLevels: number[]; + http2Options: (boolean | "all")[]; +}; + +type FilterState = { + cpu: string; + testCase: string; + threads: number; + workers: number; + concurrency: number; + http2: boolean | "all"; + xAxis: string; + metric: string; + visibleServers: string[]; +}; + +type BenchmarkDashboardProps = { + data: HierarchicalBenchmarkData; +}; + +export function BenchmarkDashboard({ data }: BenchmarkDashboardProps) { + // Helper function to format server names (replace __ with +) + const formatServerName = useCallback((serverName: string): string => { + return serverName.replace(/__/g, "+"); + }, []); + + // Flatten the hierarchical data into the format we need for processing + const flattenedData = useMemo(() => { + const flattened: BenchmarkResult[] = []; + + if (!data || !Array.isArray(data)) { + return flattened; + } + + data.forEach((cpuData) => { + if (!cpuData?.groups || !Array.isArray(cpuData.groups)) { + return; + } + + cpuData.groups.forEach((groupData) => { + if (!groupData?.tests || !Array.isArray(groupData.tests)) { + return; + } + + groupData.tests.forEach((testData) => { + if (!testData?.servers || !Array.isArray(testData.servers)) { + return; + } + + testData.servers.forEach((serverData) => { + if (!serverData?.results || !Array.isArray(serverData.results)) { + return; + } + + serverData.results.forEach((result) => { + // Add CPU and group information to each result + flattened.push({ + ...result, + cpu: cpuData.cpu, + group: groupData.group, + } as BenchmarkResult & { cpu: string; group: string }); + }); + }); + }); + }); + }); + + return flattened; + }, [data]); + + // Extract all possible filter options from hierarchical data + const allFilterOptions: FilterOptions = useMemo(() => { + const defaultOptions: FilterOptions = { + cpus: [], + testCases: [], + servers: [], + threads: [], + workers: [], + concurrencyLevels: [], + http2Options: ["all"], + }; + + if (!data || !Array.isArray(data) || data.length === 0) { + return defaultOptions; + } + + return { + cpus: data.map((cpuData) => cpuData.cpu).filter(Boolean), + testCases: [ + ...new Set(flattenedData.map((item) => item.test_case).filter(Boolean)), + ], + servers: [ + ...new Set(flattenedData.map((item) => item.server).filter(Boolean)), + ], + threads: [ + ...new Set( + flattenedData + .map((item) => item.threads) + .filter((t) => typeof t === "number"), + ), + ].sort((a, b) => a - b), + workers: [ + ...new Set( + flattenedData + .map((item) => item.workers) + .filter((w) => typeof w === "number"), + ), + ].sort((a, b) => a - b), + concurrencyLevels: [ + ...new Set( + flattenedData + .map((item) => item.concurrency) + .filter((c) => typeof c === "number"), + ), + ].sort((a, b) => a - b), + http2Options: [ + "all", + ...new Set( + flattenedData + .map((item) => item.http2) + .filter((h) => typeof h === "boolean"), + ), + ], + }; + }, [data, flattenedData]); + + // Generate consistent colors for all servers upfront + const serverColors = useMemo(() => { + const itsiColor = "#ff7f0e"; + + const palette = [ + "#1f77b4", // blue + "#2ca02c", // green + "#d62728", // red (not orange) + "#9467bd", // purple + "#8c564b", // brown + "#e377c2", // pink + "#7f7f7f", // gray + "#bcbd22", // lime + "#17becf", // cyan + "#393b79", // indigo + "#a55194", // magenta + ]; + + const colorMap: Record = {}; + const servers = [...allFilterOptions.servers]; + + const itsiIndex = servers.findIndex((s) => s === "itsi"); + if (itsiIndex !== -1) { + colorMap["itsi"] = itsiColor; + servers.splice(itsiIndex, 1); + } + + servers.forEach((server, index) => { + colorMap[server] = palette[index % palette.length]; + }); + + return colorMap; + }, [allFilterOptions.servers]); + + // Helper function to parse URL search parameters + const parseUrlParams = useCallback((): Partial | null => { + if (typeof window === "undefined") return null; + + try { + const urlParams = new URLSearchParams(window.location.search); + const filters: Partial = {}; + + // Parse each parameter + const cpu = urlParams.get("cpu"); + const testCase = urlParams.get("testCase"); + const threads = urlParams.get("threads"); + const workers = urlParams.get("workers"); + const concurrency = urlParams.get("concurrency"); + const http2 = urlParams.get("http2"); + const xAxis = urlParams.get("xAxis"); + const metric = urlParams.get("metric"); + + if (cpu) filters.cpu = cpu; + if (testCase) filters.testCase = testCase; + if (threads) filters.threads = Number.parseInt(threads); + if (workers) filters.workers = Number.parseInt(workers); + if (concurrency) filters.concurrency = Number.parseInt(concurrency); + if (http2) { + if (http2 === "all") { + filters.http2 = "all"; + } else { + filters.http2 = http2 === "true"; + } + } + if (xAxis) filters.xAxis = xAxis; + if (metric) filters.metric = metric; + + const visibleServersParam = urlParams.get("visibleServers"); + if (visibleServersParam) { + try { + filters.visibleServers = visibleServersParam + .split(",") + .filter(Boolean); + } catch (e) { + // Ignore parsing errors + } + } + + return Object.keys(filters).length > 0 ? filters : null; + } catch (error) { + console.warn("Failed to parse URL parameters:", error); + } + + return null; + }, []); + + // Helper function to update URL search parameters + const updateUrlParams = useCallback((filters: FilterState) => { + if (typeof window === "undefined") return; + + try { + const url = new URL(window.location.href); + + // Set each parameter + url.searchParams.set("cpu", filters.cpu); + url.searchParams.set("testCase", filters.testCase); + url.searchParams.set("threads", filters.threads.toString()); + url.searchParams.set("workers", filters.workers.toString()); + url.searchParams.set("concurrency", filters.concurrency.toString()); + url.searchParams.set("http2", filters.http2.toString()); + url.searchParams.set("xAxis", filters.xAxis); + url.searchParams.set("metric", filters.metric); + + // Set visible servers as comma-separated list + if (filters.visibleServers && filters.visibleServers.length > 0) { + url.searchParams.set( + "visibleServers", + filters.visibleServers.join(","), + ); + } else { + url.searchParams.delete("visibleServers"); + } + + // Update the URL without triggering a page reload + window.history.replaceState(null, "", url.toString()); + } catch (error) { + console.warn("Failed to update URL parameters:", error); + } + }, []); + + // Helper function to validate and sanitize filter state from URL + const validateAndSanitizeFilters = useCallback( + (urlFilters: Partial): FilterState => { + const defaultFilters: FilterState = { + cpu: allFilterOptions.cpus[0] || "", + testCase: allFilterOptions.testCases[0] || "", + threads: allFilterOptions.threads[0] || 1, + workers: allFilterOptions.workers[0] || 1, + concurrency: allFilterOptions.concurrencyLevels[0] || 10, + http2: "all", // Default to "all" + xAxis: "concurrency", + metric: "rps", + visibleServers: allFilterOptions.servers, // Default to all servers visible + }; + + // Validate each field and fall back to defaults if invalid + const validatedFilters: FilterState = { + cpu: allFilterOptions.cpus.includes(urlFilters.cpu || "") + ? urlFilters.cpu! + : defaultFilters.cpu, + testCase: allFilterOptions.testCases.includes(urlFilters.testCase || "") + ? urlFilters.testCase! + : defaultFilters.testCase, + threads: allFilterOptions.threads.includes(urlFilters.threads || 0) + ? urlFilters.threads! + : defaultFilters.threads, + workers: allFilterOptions.workers.includes(urlFilters.workers || 0) + ? urlFilters.workers! + : defaultFilters.workers, + concurrency: allFilterOptions.concurrencyLevels.includes( + urlFilters.concurrency || 0, + ) + ? urlFilters.concurrency! + : defaultFilters.concurrency, + http2: allFilterOptions.http2Options.includes(urlFilters.http2 as any) + ? (urlFilters.http2 as boolean | "all") + : defaultFilters.http2, + xAxis: ["concurrency", "threads", "workers"].includes( + urlFilters.xAxis || "", + ) + ? urlFilters.xAxis! + : defaultFilters.xAxis, + metric: ["rps", "p95_latency", "errorRate"].includes( + urlFilters.metric || "", + ) + ? urlFilters.metric! + : defaultFilters.metric, + visibleServers: Array.isArray(urlFilters.visibleServers) + ? urlFilters.visibleServers.filter((server) => + allFilterOptions.servers.includes(server), + ) + : defaultFilters.visibleServers, + }; + + return validatedFilters; + }, + [allFilterOptions], + ); + + // Initialize filter state with URL parameters or defaults + const [filters, setFilters] = useState(() => { + const urlFilters = parseUrlParams(); + if (urlFilters && allFilterOptions.cpus.length > 0) { + return validateAndSanitizeFilters(urlFilters); + } + + const preferredTestCase = allFilterOptions.testCases.includes("hello_world") + ? "hello_world" + : allFilterOptions.testCases[0] || ""; + + return { + cpu: allFilterOptions.cpus[0] || "", + testCase: preferredTestCase || "", + threads: allFilterOptions.threads[0] || 1, + workers: allFilterOptions.workers[0] || 1, + concurrency: allFilterOptions.concurrencyLevels[0] || 10, + http2: "all", // Default to "all" + xAxis: "concurrency", + metric: "rps", + visibleServers: allFilterOptions.servers, + }; + }); + + // Track which servers are visible + const [visibleServers, setVisibleServers] = useState>( + () => { + const initialVisibleServers: Record = {}; + allFilterOptions.servers.forEach((server) => { + initialVisibleServers[server] = true; + }); + + const urlFilters = parseUrlParams(); + if ( + urlFilters?.visibleServers && + Array.isArray(urlFilters.visibleServers) + ) { + allFilterOptions.servers.forEach((server) => { + initialVisibleServers[server] = + urlFilters.visibleServers!.includes(server); + }); + } + + return initialVisibleServers; + }, + ); + + // Currently hovered data point + const [hoveredPoint, setHoveredPoint] = useState( + null, + ); + const [activeDataKey, setActiveDataKey] = useState(null); + + // Track legend interactions to disable animations + // const [isLegendInteracting, setIsLegendInteracting] = useState(false) + + // Update URL parameters when filters change + useEffect(() => { + updateUrlParams(filters); + }, [filters, updateUrlParams]); + + // Handle initial load from URL parameters after data is available + useEffect(() => { + if (allFilterOptions.cpus.length > 0) { + const urlFilters = parseUrlParams(); + if (urlFilters) { + const validatedFilters = validateAndSanitizeFilters(urlFilters); + setFilters(validatedFilters); + + // Update visibleServers based on URL params after filters are set + const initialVisibleServers: Record = {}; + allFilterOptions.servers.forEach((server) => { + initialVisibleServers[server] = true; + }); + + if ( + urlFilters?.visibleServers && + Array.isArray(urlFilters.visibleServers) + ) { + allFilterOptions.servers.forEach((server) => { + initialVisibleServers[server] = + urlFilters.visibleServers!.includes(server); + }); + } + setVisibleServers(initialVisibleServers); + } + } + }, [allFilterOptions, parseUrlParams, validateAndSanitizeFilters]); + + // Get dynamic filter options based on selected CPU and test case + const filterOptions = useMemo(() => { + // First filter by CPU + const cpuFilteredData = flattenedData.filter( + (item) => (item as any).cpu === filters.cpu, + ); + + // Get available test cases for the selected CPU + const availableTestCases = [ + ...new Set(cpuFilteredData.map((item) => item.test_case).filter(Boolean)), + ]; + + // Then filter by test case to get the remaining filter options + const testCaseFilteredData = cpuFilteredData.filter( + (item) => item.test_case === filters.testCase, + ); + + return { + cpus: allFilterOptions.cpus, + testCases: availableTestCases, + servers: [ + ...new Set( + testCaseFilteredData.map((item) => item.server).filter(Boolean), + ), + ], + threads: [ + ...new Set( + testCaseFilteredData + .map((item) => item.threads) + .filter((t) => typeof t === "number"), + ), + ].sort((a, b) => a - b), + workers: [ + ...new Set( + testCaseFilteredData + .map((item) => item.workers) + .filter((w) => typeof w === "number"), + ), + ].sort((a, b) => a - b), + concurrencyLevels: [ + ...new Set( + testCaseFilteredData + .map((item) => item.concurrency) + .filter((c) => typeof c === "number"), + ), + ].sort((a, b) => a - b), + http2Options: [ + "all", + ...new Set( + testCaseFilteredData + .map((item) => item.http2) + .filter((h) => typeof h === "boolean"), + ), + ], + }; + }, [flattenedData, filters.cpu, filters.testCase, allFilterOptions.cpus]); + + // Get grouped test cases for the dropdown + const groupedTestCases = useMemo(() => { + // First filter by CPU to get available data + const cpuFilteredData = flattenedData.filter( + (item) => (item as any).cpu === filters.cpu, + ); + + // Group test cases by their group + const groupedTests: Record = {}; + cpuFilteredData.forEach((item) => { + const group = (item as any).group; + if (!group || !item.test_case) return; + + if (!groupedTests[group]) { + groupedTests[group] = []; + } + if (!groupedTests[group].includes(item.test_case)) { + groupedTests[group].push(item.test_case); + } + }); + + // Sort test cases within each group + Object.keys(groupedTests).forEach((group) => { + groupedTests[group].sort(); + }); + + // Sort groups: "rack" first, then alphabetically + const sortedGroups = Object.keys(groupedTests).sort((a, b) => { + if (a === "rack") return -1; + if (b === "rack") return 1; + return a.localeCompare(b); + }); + + return { groupedTests, sortedGroups }; + }, [flattenedData, filters.cpu]); + + // Update filters when CPU or test case changes to ensure valid selections + useEffect(() => { + setFilters((prev) => { + const newFilters = { ...prev }; + + // If test case is not available for selected CPU, select first available + if (!filterOptions.testCases.includes(prev.testCase)) { + newFilters.testCase = filterOptions.testCases[0] || ""; + } + + // Update other filters with first available value if current value is not valid + // BUT skip the filter that matches the current X-axis + if ( + prev.xAxis !== "threads" && + !filterOptions.threads.includes(prev.threads) + ) { + newFilters.threads = filterOptions.threads[0] || 1; + } + + if ( + prev.xAxis !== "workers" && + !filterOptions.workers.includes(prev.workers) + ) { + newFilters.workers = filterOptions.workers[0] || 1; + } + + if ( + prev.xAxis !== "concurrency" && + !filterOptions.concurrencyLevels.includes(prev.concurrency) + ) { + newFilters.concurrency = filterOptions.concurrencyLevels[0] || 10; + } + + if (!filterOptions.http2Options.includes(prev.http2)) { + newFilters.http2 = filterOptions.http2Options[0] || "all"; + } + + return newFilters; + }); + }, [filters.cpu, filterOptions]); + + // Apply filters to flattened data + const filteredData = useMemo(() => { + return flattenedData.filter((item) => { + const itemWithCpu = item as any; + if (itemWithCpu.cpu !== filters.cpu) return false; + if (item.test_case !== filters.testCase) return false; + + // Don't filter by the parameter that's being used as X-axis + if (filters.xAxis !== "threads" && item.threads !== filters.threads) + return false; + if (filters.xAxis !== "workers" && item.workers !== filters.workers) + return false; + if ( + filters.xAxis !== "concurrency" && + item.concurrency !== filters.concurrency + ) + return false; + + // Handle "all" protocol option + if (filters.http2 !== "all" && item.http2 !== filters.http2) return false; + + return true; + }); + }, [flattenedData, filters]); + + // Prepare data for chart based on selected x-axis + const chartData = useMemo(() => { + // Group data by the selected x-axis + const groupedByXAxis: Record = {}; + + filteredData.forEach((item) => { + const xAxisValue = String(item[filters.xAxis as keyof BenchmarkResult]); + if (!groupedByXAxis[xAxisValue]) { + groupedByXAxis[xAxisValue] = []; + } + groupedByXAxis[xAxisValue].push(item); + }); + + // Convert to format suitable for chart + return Object.entries(groupedByXAxis) + .map(([xAxisValue, items]) => { + const point: Record = { [filters.xAxis]: xAxisValue }; + + // Group items by server and protocol when "all" is selected + items.forEach((item) => { + if (visibleServers[item.server]) { + // Single metric based on selection + let metricValue: number; + switch (filters.metric) { + case "rps": + metricValue = item.results.requestsPerSec; + break; + case "p95_latency": + metricValue = item.results.p95_latency; + break; + case "errorRate": + metricValue = 1 - item.results.successRate; + break; + default: + metricValue = item.results.requestsPerSec; + } + + // Create unique key for server+protocol combination when showing all protocols + let dataKey: string; + if (filters.http2 === "all") { + const protocolSuffix = item.http2 ? " (HTTP/2)" : " (HTTP/1.1)"; + dataKey = `${formatServerName(item.server)}${protocolSuffix}`; + } else { + dataKey = formatServerName(item.server); + } + + point[dataKey] = metricValue; + // Store the full item for hover details + point[`${dataKey}_data`] = item; + } + }); + + return point; + }) + .sort((a, b) => { + // Sort numerically if the x-axis is a number + const aVal = a[filters.xAxis]; + const bVal = b[filters.xAxis]; + if (!isNaN(Number(aVal)) && !isNaN(Number(bVal))) { + return Number(aVal) - Number(bVal); + } + // Otherwise sort alphabetically + return String(aVal).localeCompare(String(bVal)); + }); + }, [filteredData, filters, visibleServers, formatServerName]); + + // Get metric info for display + const metricInfo = useMemo(() => { + // Helper function to format large numbers compactly + const formatCompact = (value: number, decimals = 1): string => { + if (value >= 1000000) { + return `${(value / 1000000).toFixed(decimals)}M`; + } else if (value >= 1000) { + return `${(value / 1000).toFixed(decimals)}K`; + } + return value.toFixed(decimals); + }; + + switch (filters.metric) { + case "rps": + return { + label: "Requests per Second", + isBetter: "higher", + icon: TrendingUp, + formatter: (value: number) => formatCompact(value, 1), + }; + case "p95_latency": + return { + label: "P95 Latency (ms)", + isBetter: "lower", + icon: TrendingDown, + formatter: (value: number) => + value >= 1000 ? formatCompact(value, 1) : value.toFixed(2), + }; + case "errorRate": + return { + label: "Error Rate", + isBetter: "lower", + icon: TrendingDown, + formatter: (value: number) => `${(value * 100).toFixed(1)}%`, + }; + default: + return { + label: "Requests per Second", + isBetter: "higher", + icon: TrendingUp, + formatter: (value: number) => formatCompact(value, 1), + }; + } + }, [filters.metric]); + + // Handle filter changes + const handleFilterChange = (key: keyof FilterState, value: any) => { + setFilters((prev) => ({ ...prev, [key]: value })); + }; + + // Toggle server visibility when clicking on legend + const handleLegendClick = useCallback( + (server: string, event?: React.MouseEvent) => { + // Disable animations during legend interaction + // setIsLegendInteracting(true) + + // Check if Ctrl (Windows/Linux) or Cmd (Mac) key is pressed + const isExclusiveMode = event?.ctrlKey || event?.metaKey; + + if (isExclusiveMode) { + // Ctrl/Cmd + click: Show only this server (hide all others) + const newVisibleServers: Record = {}; + allFilterOptions.servers.forEach((s) => { + newVisibleServers[s] = s === server; + }); + setVisibleServers(newVisibleServers); + } else { + // Normal click: Toggle this server + setVisibleServers((prev) => ({ + ...prev, + [server]: !prev[server], + })); + } + + // Re-enable animations after a short delay + // setTimeout(() => { + // setIsLegendInteracting(false) + // }, 100) + }, + [allFilterOptions.servers], + ); + + // Calculate summary statistics + const summaryStats = useMemo(() => { + if (filteredData.length === 0) return null; + + const values = filteredData.map((item) => { + switch (filters.metric) { + case "rps": + return item.results.requestsPerSec; + case "p95_latency": + return item.results.p95_latency; + case "errorRate": + return 1 - item.results.successRate; + default: + return item.results.requestsPerSec; + } + }); + + return { + count: filteredData.length, + min: Math.min(...values), + max: Math.max(...values), + avg: values.reduce((sum, val) => sum + val, 0) / values.length, + }; + }, [filteredData, filters.metric]); + + const topPerformers = useMemo(() => { + if (filteredData.length === 0) return []; + + const currentGroup = (filteredData[0] as any).group; + if (!currentGroup) return []; + + const performanceMap: Record< + string, + Record> + > = {}; + + flattenedData + .filter((item) => { + const itemWithCpu = item as any; + return ( + itemWithCpu.cpu === filters.cpu && itemWithCpu.group === currentGroup + ); + }) + .forEach((item) => { + const key = `${item.test_case}_${item.threads}_${item.workers}_${item.concurrency}`; + + if (!performanceMap[key]) { + performanceMap[key] = {}; + } + + if (!performanceMap[key][item.server]) { + performanceMap[key][item.server] = { true: [], false: [] }; + } + + performanceMap[key][item.server][item.http2].push( + item.results.requestsPerSec, + ); + }); + + const winCounts: Record = {}; + + Object.values(performanceMap).forEach((serverResults) => { + const allScores: [string, number][] = []; + + for (const [server, variants] of Object.entries(serverResults)) { + for (const [http2, values] of Object.entries(variants)) { + const parsedHttp2 = http2 === "true"; // keys are string + if (values.length > 0) { + const avg = values.reduce((sum, v) => sum + v, 0) / values.length; + allScores.push([`${server}___${parsedHttp2}`, avg]); + } + } + } + + if (allScores.length === 0) return; + + const bestScore = Math.max(...allScores.map(([, v]) => v)); + const winningServers = new Set( + allScores + .filter(([, v]) => v === bestScore) + .map(([k]) => k.split("___")[0]), // extract server + ); + + for (const server of winningServers) { + winCounts[server] = (winCounts[server] || 0) + 1; + } + }); + + return Object.entries(winCounts) + .sort(([, a], [, b]) => b - a) + .slice(0, 8) + .map(([server, count]) => ({ server, count })); + }, [flattenedData, filters.cpu, filteredData]); + + // Handle chart hover + const handleChartHover = (props: any) => { + if (props.activePayload && props.activePayload.length > 0) { + const { dataKey, payload } = props.activePayload[0]; + const serverData = payload[`${dataKey}_data`]; + + if (serverData) { + setHoveredPoint(serverData); + setActiveDataKey(dataKey); + return; + } + } + + setHoveredPoint(null); + setActiveDataKey(null); + }; + + // Get visible servers and their data keys for the chart + const visibleDataKeys = useMemo(() => { + const keys: string[] = []; + + filterOptions.servers.forEach((server) => { + if (visibleServers[server]) { + if (filters.http2 === "all") { + // Check if this server has both HTTP/1.1 and HTTP/2 data + const serverData = filteredData.filter( + (item) => item.server === server, + ); + const hasHttp1 = serverData.some((item) => !item.http2); + const hasHttp2 = serverData.some((item) => item.http2); + + if (hasHttp1) keys.push(`${formatServerName(server)} (HTTP/1.1)`); + if (hasHttp2) keys.push(`${formatServerName(server)} (HTTP/2)`); + } else { + keys.push(formatServerName(server)); + } + } + }); + + return keys; + }, [ + filterOptions.servers, + visibleServers, + filters.http2, + filteredData, + formatServerName, + ]); + + // Get protocol display text + const getProtocolDisplay = () => { + if (filters.http2 === "all") return "All Protocols"; + return filters.http2 ? "HTTP/2" : "HTTP/1.1"; + }; + + // Show loading state if no data + const visibleServersForUrl = useMemo(() => { + return Object.keys(visibleServers).filter( + (server) => visibleServers[server], + ); + }, [visibleServers]); + + // Update URL parameters when visible servers change + useEffect(() => { + updateUrlParams({ ...filters, visibleServers: visibleServersForUrl }); + }, [visibleServersForUrl, filters, updateUrlParams]); + + if (!data || !Array.isArray(data) || data.length === 0) { + return ( +
+

No benchmark data available

+
+ ); + } + + return ( +
+
+ {/* Left column: Filters */} +
+ + + Filters + + +
+ {/* CPU filter */} +
+ + +
+ + {/* Test case filter with groups */} +
+ + +
+ +
+ {/* Threads filter - disabled if xAxis is threads */} +
+ + +
+ + {/* Workers filter - disabled if xAxis is workers */} +
+ + +
+
+ +
+ {/* Concurrency filter - disabled if xAxis is concurrency */} +
+ + +
+ + {/* HTTP2 filter */} +
+ + +
+
+ + {/* X-Axis selection */} +
+ + +
+ + {/* Metric selection */} +
+ + + handleFilterChange("metric", value) + } + className="w-full" + > + + + RPS + + + P95 + + + Errors + + + + + {/* Better indicator */} +
+ + {metricInfo.isBetter} is better +
+
+ + {/* Compact Summary Stats */} + {summaryStats && ( +
+
+ Results: + {summaryStats.count} +
+
+ Min: + + {metricInfo.formatter(summaryStats.min)} + +
+
+ Max: + + {metricInfo.formatter(summaryStats.max)} + +
+
+ Avg: + + {metricInfo.formatter(summaryStats.avg)} + +
+
+ )} +
+
+
+
+ + {/* Right column: Chart and Error Distribution */} +
+
+ {/* Main Chart */} + + + + {metricInfo.label}: {filters.cpu} - {filters.testCase} + + {filters.xAxis !== "threads" && + `Threads: ${filters.threads}, `} + {filters.xAxis !== "workers" && + `Workers: ${filters.workers}, `} + {filters.xAxis !== "concurrency" && + `Concurrency: ${filters.concurrency}, `} + Protocol: {getProtocolDisplay()} + + + + + {chartData.length === 0 ? ( +
+

+ No data available for the selected filters +

+
+ ) : ( + <> +
+ + + + + + + + {/* Render bars for each visible data key */} + {visibleDataKeys.map((dataKey, index) => ( + + handleChartHover({ + activePayload: [ + { dataKey, payload: data.payload }, + ], + }) + } + onMouseLeave={() => { + setHoveredPoint(null); + setActiveDataKey(null); + }} + /> + ))} + + {/* Add invisible bars for zero values to enable hovering */} + {visibleDataKeys.map((dataKey, index) => ( + { + if (data.payload[dataKey] === 0) { + handleChartHover({ + activePayload: [ + { dataKey, payload: data.payload }, + ], + }); + } + }} + onMouseLeave={() => { + setHoveredPoint(null); + setActiveDataKey(null); + }} + /> + ))} + + +
+ + {/* Server Toggle Legend */} +
+ {filterOptions.servers.map((server) => ( +
handleLegendClick(server, event)} + title={`Click to toggle, ${navigator.platform.includes("Mac") ? "Cmd" : "Ctrl"}+click to show only this server`} + > +
+ + {formatServerName(server)} + +
+ ))} +
+ + )} + + + + {/* Top Performers and Benchmark Details in horizontal layout */} +
+
0 ? "lg:col-span-7" : "lg:col-span-12" + } + > + + + + {hoveredPoint ? "Benchmark Details" : "Hover Details"} + {!hoveredPoint && ( + + + Hover over chart bars to see details + + )} + + + + {hoveredPoint ? ( +
+ {/* Header with server and test case */} +
+ +
+ {formatServerName(hoveredPoint.server)} + + + {hoveredPoint.test_case} + + + {filters.xAxis}:{" "} + {String( + hoveredPoint[ + filters.xAxis as keyof BenchmarkResult + ], + )} + + + {hoveredPoint.http2 ? "HTTP/2" : "HTTP/1.1"} + +
+ + {/* Version information in a separate row */} + {hoveredPoint.version && ( +
+ + + {hoveredPoint.version} + +
+ )} + + {/* Performance metrics */} +
+
+ RPS: + + {hoveredPoint.results.requestsPerSec.toFixed(2)} + +
+
+ + Success Rate: + + + {(hoveredPoint.results.successRate * 100).toFixed( + 2, + )} + % + +
+
+
+
+
+ + P95 Latency: + + + {hoveredPoint.results.p95_latency != null + ? `${hoveredPoint.results.p95_latency.toFixed(2)} ms` + : "N/A"} + +
+
+ + Avg Latency: + + + {hoveredPoint.results.average != null + ? `${hoveredPoint.results.average.toFixed(2)} ms` + : "N/A"} + +
+
+ + {/* Error distribution */} + {Object.keys( + hoveredPoint.results.errorDistribution || {}, + ).length > 0 ? ( +
+
+ Error Distribution: +
+
+ {Object.entries( + hoveredPoint.results.errorDistribution || {}, + ).map(([errorType, count]) => ( +
+ {errorType} + + {count} + +
+ ))} +
+
+ ) : ( +

+ No errors reported +

+ )} +
+ ) : ( +

+ Hover over a data point to see benchmark details +

+ )} + + +
+ {topPerformers.length > 0 && ( +
+ + + + Top Performers{" "} + {filteredData[0] && (filteredData[0] as any).group && ( + + ({(filteredData[0] as any).group}) + + )} + + + +
+ {topPerformers.map(({ server, count }) => ( +
+
+
+ + {formatServerName(server)} + +
+ + {count} + +
+ ))} +
+ {(filteredData[0] as any).group == "rack" && ( +

+ Note: Some servers (e.g. Unicorn, Agoo) don’t + participate in multi-threaded test cases so will + appear less frequently in these results. +

+ )} + + +
+ )} +
+
+
+
+
+ ); +} diff --git a/docs/benchmark-dashboard/components/theme-provider.tsx b/docs/benchmark-dashboard/components/theme-provider.tsx new file mode 100644 index 00000000..55c2f6eb --- /dev/null +++ b/docs/benchmark-dashboard/components/theme-provider.tsx @@ -0,0 +1,11 @@ +'use client' + +import * as React from 'react' +import { + ThemeProvider as NextThemesProvider, + type ThemeProviderProps, +} from 'next-themes' + +export function ThemeProvider({ children, ...props }: ThemeProviderProps) { + return {children} +} diff --git a/docs/benchmark-dashboard/components/ui/accordion.tsx b/docs/benchmark-dashboard/components/ui/accordion.tsx new file mode 100644 index 00000000..24c788c2 --- /dev/null +++ b/docs/benchmark-dashboard/components/ui/accordion.tsx @@ -0,0 +1,58 @@ +"use client" + +import * as React from "react" +import * as AccordionPrimitive from "@radix-ui/react-accordion" +import { ChevronDown } from "lucide-react" + +import { cn } from "@/lib/utils" + +const Accordion = AccordionPrimitive.Root + +const AccordionItem = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AccordionItem.displayName = "AccordionItem" + +const AccordionTrigger = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + + svg]:rotate-180", + className + )} + {...props} + > + {children} + + + +)) +AccordionTrigger.displayName = AccordionPrimitive.Trigger.displayName + +const AccordionContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + +
{children}
+
+)) + +AccordionContent.displayName = AccordionPrimitive.Content.displayName + +export { Accordion, AccordionItem, AccordionTrigger, AccordionContent } diff --git a/docs/benchmark-dashboard/components/ui/alert-dialog.tsx b/docs/benchmark-dashboard/components/ui/alert-dialog.tsx new file mode 100644 index 00000000..25e7b474 --- /dev/null +++ b/docs/benchmark-dashboard/components/ui/alert-dialog.tsx @@ -0,0 +1,141 @@ +"use client" + +import * as React from "react" +import * as AlertDialogPrimitive from "@radix-ui/react-alert-dialog" + +import { cn } from "@/lib/utils" +import { buttonVariants } from "@/components/ui/button" + +const AlertDialog = AlertDialogPrimitive.Root + +const AlertDialogTrigger = AlertDialogPrimitive.Trigger + +const AlertDialogPortal = AlertDialogPrimitive.Portal + +const AlertDialogOverlay = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName + +const AlertDialogContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + + +)) +AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName + +const AlertDialogHeader = ({ + className, + ...props +}: React.HTMLAttributes) => ( +
+) +AlertDialogHeader.displayName = "AlertDialogHeader" + +const AlertDialogFooter = ({ + className, + ...props +}: React.HTMLAttributes) => ( +
+) +AlertDialogFooter.displayName = "AlertDialogFooter" + +const AlertDialogTitle = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName + +const AlertDialogDescription = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AlertDialogDescription.displayName = + AlertDialogPrimitive.Description.displayName + +const AlertDialogAction = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName + +const AlertDialogCancel = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName + +export { + AlertDialog, + AlertDialogPortal, + AlertDialogOverlay, + AlertDialogTrigger, + AlertDialogContent, + AlertDialogHeader, + AlertDialogFooter, + AlertDialogTitle, + AlertDialogDescription, + AlertDialogAction, + AlertDialogCancel, +} diff --git a/docs/benchmark-dashboard/components/ui/alert.tsx b/docs/benchmark-dashboard/components/ui/alert.tsx new file mode 100644 index 00000000..41fa7e05 --- /dev/null +++ b/docs/benchmark-dashboard/components/ui/alert.tsx @@ -0,0 +1,59 @@ +import * as React from "react" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const alertVariants = cva( + "relative w-full rounded-lg border p-4 [&>svg~*]:pl-7 [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground", + { + variants: { + variant: { + default: "bg-background text-foreground", + destructive: + "border-destructive/50 text-destructive dark:border-destructive [&>svg]:text-destructive", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +const Alert = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes & VariantProps +>(({ className, variant, ...props }, ref) => ( +
+)) +Alert.displayName = "Alert" + +const AlertTitle = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +AlertTitle.displayName = "AlertTitle" + +const AlertDescription = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +AlertDescription.displayName = "AlertDescription" + +export { Alert, AlertTitle, AlertDescription } diff --git a/docs/benchmark-dashboard/components/ui/aspect-ratio.tsx b/docs/benchmark-dashboard/components/ui/aspect-ratio.tsx new file mode 100644 index 00000000..d6a5226f --- /dev/null +++ b/docs/benchmark-dashboard/components/ui/aspect-ratio.tsx @@ -0,0 +1,7 @@ +"use client" + +import * as AspectRatioPrimitive from "@radix-ui/react-aspect-ratio" + +const AspectRatio = AspectRatioPrimitive.Root + +export { AspectRatio } diff --git a/docs/benchmark-dashboard/components/ui/avatar.tsx b/docs/benchmark-dashboard/components/ui/avatar.tsx new file mode 100644 index 00000000..51e507ba --- /dev/null +++ b/docs/benchmark-dashboard/components/ui/avatar.tsx @@ -0,0 +1,50 @@ +"use client" + +import * as React from "react" +import * as AvatarPrimitive from "@radix-ui/react-avatar" + +import { cn } from "@/lib/utils" + +const Avatar = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +Avatar.displayName = AvatarPrimitive.Root.displayName + +const AvatarImage = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AvatarImage.displayName = AvatarPrimitive.Image.displayName + +const AvatarFallback = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName + +export { Avatar, AvatarImage, AvatarFallback } diff --git a/docs/benchmark-dashboard/components/ui/badge.tsx b/docs/benchmark-dashboard/components/ui/badge.tsx new file mode 100644 index 00000000..f000e3ef --- /dev/null +++ b/docs/benchmark-dashboard/components/ui/badge.tsx @@ -0,0 +1,36 @@ +import * as React from "react" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const badgeVariants = cva( + "inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2", + { + variants: { + variant: { + default: + "border-transparent bg-primary text-primary-foreground hover:bg-primary/80", + secondary: + "border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80", + destructive: + "border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80", + outline: "text-foreground", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +export interface BadgeProps + extends React.HTMLAttributes, + VariantProps {} + +function Badge({ className, variant, ...props }: BadgeProps) { + return ( +
+ ) +} + +export { Badge, badgeVariants } diff --git a/docs/benchmark-dashboard/components/ui/breadcrumb.tsx b/docs/benchmark-dashboard/components/ui/breadcrumb.tsx new file mode 100644 index 00000000..60e6c96f --- /dev/null +++ b/docs/benchmark-dashboard/components/ui/breadcrumb.tsx @@ -0,0 +1,115 @@ +import * as React from "react" +import { Slot } from "@radix-ui/react-slot" +import { ChevronRight, MoreHorizontal } from "lucide-react" + +import { cn } from "@/lib/utils" + +const Breadcrumb = React.forwardRef< + HTMLElement, + React.ComponentPropsWithoutRef<"nav"> & { + separator?: React.ReactNode + } +>(({ ...props }, ref) =>