Upgrade to http/hyper v1.0 (#3726)

Co-authored-by: Salvador Girones <salvadorgirones@gmail.com>
Co-authored-by: Tobie Morgan Hitchcock <tobie@surrealdb.com>
This commit is contained in:
Raphael Darley 2024-08-07 11:42:25 +01:00 committed by GitHub
parent ebc140e8ad
commit 4f3b96334e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
40 changed files with 965 additions and 801 deletions

2
.gitignore vendored
View file

@ -28,6 +28,7 @@ Temporary Items
**/*.rs.bk
*.db
*.sw?
*.skv
# -----------------------------------
# Folders
@ -54,3 +55,4 @@ Temporary Items
/store/
surreal
history.txt

770
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -8,7 +8,13 @@ authors = ["Tobie Morgan Hitchcock <tobie@surrealdb.com>"]
[features]
# Public features
default = ["storage-mem", "storage-surrealkv", "storage-rocksdb", "scripting", "http"]
default = [
"storage-mem",
"storage-surrealkv",
"storage-rocksdb",
"scripting",
"http",
]
storage-mem = ["surrealdb/kv-mem"]
storage-rocksdb = ["surrealdb/kv-rocksdb"]
storage-tikv = ["surrealdb/kv-tikv"]
@ -33,9 +39,6 @@ members = [
"lib/examples/rocket",
]
[profile.make]
inherits = "dev"
[profile.release]
lto = true
strip = true
@ -46,11 +49,18 @@ codegen-units = 1
[profile.bench]
strip = false
[profile.make]
inherits = "dev"
[dependencies]
argon2 = "0.5.2"
axum = { version = "0.6.20", features = ["tracing", "ws", "headers"] }
axum-extra = { version = "0.7.7", features = ["query", "typed-routing"] }
axum-server = { version = "0.5.1", features = ["tls-rustls"] }
axum = { version = "0.7.4", features = ["tracing", "ws"] }
axum-extra = { version = "0.9.2", features = [
"query",
"typed-routing",
"typed-header",
] }
axum-server = { version = "0.7.1", features = ["tls-rustls-no-provider"] }
base64 = "0.21.5"
bytes = "1.5.0"
ciborium = "0.2.1"
@ -65,21 +75,32 @@ futures-util = "0.3.29"
geo = "0.28.0"
geo-types = "0.7.13"
glob = "0.3.1"
http = "0.2.11"
http-body = "0.4.5"
hyper = "0.14.27"
http = "1.1.0"
http-body = "1.0.0"
http-body-util = "0.1.1"
hyper = "1.2.0"
once_cell = "1.18.0"
opentelemetry = { version = "0.19", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.12.0", features = ["metrics"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
# OpenTelemetry versions are dependant on tracing-opentelemetry. See https://github.com/open-telemetry/opentelemetry-rust/issues/1571
# Make sure to update the versions of the OpenTelemetry crates if you update the version of tracing-opentelemetry.
# See matching versions here: https://github.com/tokio-rs/tracing-opentelemetry/blob/v0.1.x/Cargo.toml
tracing-opentelemetry = "0.25.0"
opentelemetry = { version = "0.24" }
opentelemetry_sdk = { version = "0.24", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.17.0", features = ["metrics"] }
pin-project-lite = "0.2.13"
pprof = { version = "0.13.0", features = [
"flamegraph",
"prost-codec",
], optional = true }
rand = "0.8.5"
reqwest = { version = "0.11.22", default-features = false, features = [
reqwest = { version = "0.12.5", default-features = false, features = [
"blocking",
"gzip",
"http2",
] }
revision = { version = "0.8.0", features = [
"chrono",
@ -105,7 +126,7 @@ thiserror = "1.0.50"
tokio = { version = "1.34.0", features = ["macros", "signal"] }
tokio-util = { version = "0.7.10", features = ["io"] }
tower = "0.4.13"
tower-http = { version = "0.4.4", features = [
tower-http = { version = "0.5.2", features = [
"trace",
"sensitive-headers",
"auth",
@ -118,11 +139,9 @@ tower-http = { version = "0.4.4", features = [
"add-extension",
"compression-full",
] }
tracing = "0.1"
tracing-opentelemetry = "0.19.0"
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
urlencoding = "2.1.3"
uuid = { version = "1.6.1", features = ["serde", "js", "v4", "v7"] }
tokio-tungstenite = "0.23.0"
[target.'cfg(unix)'.dependencies]
nix = { version = "0.27.1", features = ["user"] }
@ -138,12 +157,11 @@ jemallocator = "0.5.4"
[dev-dependencies]
assert_fs = "1.0.13"
chrono = { version = "0.4.31", features = ["serde"] }
chrono = "0.4.38"
env_logger = "0.10.1"
jsonwebtoken = { version = "8.3.0-surreal.1", package = "surrealdb-jsonwebtoken" }
opentelemetry-proto = { version = "0.2.0", features = [
jsonwebtoken = "9.3.0"
opentelemetry-proto = { version = "0.7.0", features = [
"gen-tonic",
"traces",
"metrics",
"logs",
] }
@ -152,10 +170,10 @@ serial_test = "2.0.0"
temp-env = { version = "0.3.6", features = ["async_closure"] }
test-log = { version = "0.2.13", features = ["trace"] }
tokio-stream = { version = "0.1", features = ["net"] }
tokio-tungstenite = { version = "0.20.1" }
tonic = "0.8.3"
tokio-tungstenite = { version = "0.23.0" }
tonic = "0.12.1"
ulid = "1.1.0"
wiremock = "0.5.22"
wiremock = "0.6.0"
[build-dependencies]
semver = "1.0.20"

View file

@ -1012,6 +1012,9 @@ allow_unsafe = true
[pkg.hyper-util]
allow_unsafe = true
allow_apis = [
"net",
]
[pkg.terminal_size]
allow_unsafe = true
@ -1365,3 +1368,9 @@ allow_unsafe = true
allow_apis = [
"fs",
]
[pkg.atomic-waker]
allow_unsafe = true
[pkg.rustls-pki-types]
allow_unsafe = true

View file

@ -116,7 +116,7 @@ rand = "0.8.5"
reblessive = { version = "0.4.0", features = ["tree"] }
regex = "1.10.2"
regex-syntax = { version = "0.8.2", optional = true, features = ["arbitrary"] }
reqwest = { version = "0.11.22", default-features = false, features = [
reqwest = { version = "0.12.5", default-features = false, features = [
"json",
"stream",
"multipart",
@ -157,7 +157,7 @@ test-log = { version = "0.2.13", features = ["trace"] }
time = { version = "0.3.36", features = ["serde"] }
tokio = { version = "1.34.0", features = ["macros", "sync", "rt-multi-thread"] }
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
wiremock = "0.5.22"
wiremock = "0.6.0"
[target.'cfg(target_arch = "wasm32")'.dependencies]
pharos = "0.5.3"
@ -183,7 +183,7 @@ tokio = { version = "1.34.0", default-features = false, features = [
"time",
"sync",
] }
tokio-tungstenite = { version = "0.20.1", optional = true }
tokio-tungstenite = { version = "0.21.0", optional = true }
uuid = { version = "1.6.1", features = ["serde", "v4", "v7"] }
[lib]

View file

@ -66,6 +66,11 @@ features = [
targets = []
[dependencies]
reqwest = { version = "0.12.5", default-features = false, features = [
"json",
"multipart",
"stream",
], optional = true }
bincode = "1.3.3"
channel = { version = "1.9.0", package = "async-channel" }
chrono = { version = "0.4.31", features = ["serde"] }
@ -76,11 +81,6 @@ indexmap = { version = "2.1.0", features = ["serde"] }
native-tls = { version = "0.2.11", optional = true }
once_cell = "1.18.0"
path-clean = "1.0.1"
reqwest = { version = "0.11.22", default-features = false, features = [
"json",
"stream",
"multipart",
], optional = true }
revision = { version = "0.8.0", features = [
"chrono",
"geo",
@ -90,7 +90,13 @@ revision = { version = "0.8.0", features = [
"uuid",
] }
rust_decimal = { version = "1.33.1", features = ["maths", "serde-str"] }
rustls = { version = "0.21.11", optional = true }
rustls = { version = "0.23.12", default-features = false, features = [
"ring",
"logging",
"std",
"tls12",
], optional = true }
rustls-pki-types = { version = "1.7.0", features = ["web"] }
semver = { version = "1.0.20", features = ["serde"] }
serde = { version = "1.0.193", features = ["derive"] }
serde_json = "1.0.108"
@ -119,7 +125,7 @@ time = { version = "0.3.36", features = ["serde"] }
tokio = { version = "1.34.0", features = ["macros", "sync", "rt-multi-thread"] }
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
ulid = { version = "1.1.0", features = ["serde"] }
wiremock = "0.5.22"
wiremock = "0.6.0"
[target.'cfg(target_arch = "wasm32")'.dependencies]
pharos = "0.5.3"
@ -145,7 +151,7 @@ tokio = { version = "1.34.0", default-features = false, features = [
"time",
"sync",
] }
tokio-tungstenite = { version = "0.20.1", optional = true }
tokio-tungstenite = { version = "0.23.1", optional = true, features = ["url"] }
uuid = { version = "1.6.1", features = ["serde", "v4", "v7"] }
[lib]

View file

@ -9,7 +9,6 @@ use crate::env;
use crate::err::Error;
use crate::net::{self, client_ip::ClientIp};
use clap::Args;
use opentelemetry::Context;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
@ -154,7 +153,7 @@ pub async fn init(
// Initialize opentelemetry and logging
crate::telemetry::builder().with_filter(log).init();
// Start metrics subsystem
crate::telemetry::metrics::init(&Context::current()).expect("failed to initialize metrics");
crate::telemetry::metrics::init().expect("failed to initialize metrics");
// Check if we should output a banner
if !no_banner {

View file

@ -1,8 +1,8 @@
use crate::cli::abstraction::auth::Error as SurrealAuthError;
use axum::extract::rejection::TypedHeaderRejection;
use axum::response::{IntoResponse, Response};
use axum::Error as AxumError;
use axum::Json;
use axum_extra::typed_header::TypedHeaderRejection;
use base64::DecodeError as Base64Error;
use http::{HeaderName, StatusCode};
use reqwest::Error as ReqwestError;

View file

@ -1,11 +1,10 @@
use axum::{
body::{boxed, Body, BoxBody},
headers::{
authorization::{Basic, Bearer},
Authorization, Origin,
},
Extension, RequestPartsExt, TypedHeader,
use axum::RequestPartsExt;
use axum::{body::Body, Extension};
use axum_extra::headers::{
authorization::{Basic, Bearer},
Authorization, Origin,
};
use axum_extra::TypedHeader;
use futures_util::future::BoxFuture;
use http::{request::Parts, StatusCode};
use hyper::{Request, Response};
@ -46,15 +45,12 @@ use super::{
#[derive(Clone, Copy)]
pub(super) struct SurrealAuth;
impl<B> AsyncAuthorizeRequest<B> for SurrealAuth
where
B: Send + Sync + 'static,
{
type RequestBody = B;
type ResponseBody = BoxBody;
type Future = BoxFuture<'static, Result<Request<B>, Response<Self::ResponseBody>>>;
impl AsyncAuthorizeRequest<Body> for SurrealAuth {
type RequestBody = Body;
type ResponseBody = Body;
type Future = BoxFuture<'static, Result<Request<Body>, Response<Self::ResponseBody>>>;
fn authorize(&mut self, request: Request<B>) -> Self::Future {
fn authorize(&mut self, request: Request<Body>) -> Self::Future {
Box::pin(async {
let (mut parts, body) = request.into_parts();
match check_auth(&mut parts).await {
@ -65,7 +61,7 @@ where
Err(err) => {
let unauthorized_response = Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body(boxed(Body::from(err.to_string())))
.body(Body::new(err.to_string()))
.unwrap();
Err(unauthorized_response)
}

View file

@ -2,13 +2,13 @@ use axum::async_trait;
use axum::extract::ConnectInfo;
use axum::extract::FromRef;
use axum::extract::FromRequestParts;
use axum::extract::Request;
use axum::middleware::Next;
use axum::response::Response;
use axum::Extension;
use axum::RequestPartsExt;
use clap::ValueEnum;
use http::request::Parts;
use http::Request;
use http::StatusCode;
use std::net::SocketAddr;
@ -69,6 +69,7 @@ impl ClientIp {
}
}
#[derive(Clone)]
pub(super) struct ExtractClientIP(pub Option<String>);
#[async_trait]
@ -116,13 +117,10 @@ where
}
}
pub(super) async fn client_ip_middleware<B>(
request: Request<B>,
next: Next<B>,
) -> Result<Response, StatusCode>
where
B: Send,
{
pub(super) async fn client_ip_middleware(
request: Request,
next: Next,
) -> Result<Response, StatusCode> {
let (mut parts, body) = request.into_parts();
if let Ok(Extension(state)) = parts.extract::<Extension<AppState>>().await {

View file

@ -1,21 +1,19 @@
use super::AppState;
use crate::err::Error;
use axum::body::Body;
use axum::response::IntoResponse;
use axum::routing::get;
use axum::Router;
use axum::{response::Response, Extension};
use bytes::Bytes;
use http::StatusCode;
use http_body::Body as HttpBody;
use hyper::body::Body;
use surrealdb::dbs::Session;
use surrealdb::iam::check::check_ns_db;
use surrealdb::iam::Action::View;
use surrealdb::iam::ResourceKind::Any;
pub(super) fn router<S, B>() -> Router<S, B>
pub(super) fn router<S>() -> Router<S>
where
B: HttpBody + Send + 'static,
S: Clone + Send + Sync + 'static,
{
Router::new().route("/export", get(handler))
@ -28,7 +26,8 @@ async fn handler(
// Get the datastore reference
let db = &state.datastore;
// Create a chunked response
let (mut chn, body) = Body::channel();
let (chn, body_stream) = surrealdb::channel::bounded::<Result<Bytes, Error>>(1);
let body = Body::from_stream(body_stream);
// Ensure a NS and DB are set
let (nsv, dbv) = check_ns_db(&session)?;
// Check the permissions level
@ -42,7 +41,7 @@ async fn handler(
// Process all chunk values
tokio::spawn(async move {
while let Ok(v) = rcv.recv().await {
let _ = chn.send_data(Bytes::from(v)).await;
let _ = chn.send(Ok(Bytes::from(v))).await;
}
});
// Return the chunked body

View file

@ -1,5 +1,5 @@
use axum::headers;
use axum::headers::Header;
use axum_extra::headers;
use axum_extra::headers::Header;
use http::HeaderName;
use http::HeaderValue;

View file

@ -1,5 +1,5 @@
use axum::headers;
use axum::headers::Header;
use axum_extra::headers;
use axum_extra::headers::Header;
use http::HeaderName;
use http::HeaderValue;
use surrealdb::headers::AUTH_DB;

View file

@ -1,5 +1,5 @@
use axum::headers;
use axum::headers::Header;
use axum_extra::headers;
use axum_extra::headers::Header;
use http::HeaderName;
use http::HeaderValue;
use surrealdb::headers::AUTH_NS;

View file

@ -1,5 +1,5 @@
use axum::headers;
use axum::headers::Header;
use axum_extra::headers;
use axum_extra::headers::Header;
use http::HeaderName;
use http::HeaderValue;

View file

@ -1,5 +1,5 @@
use axum::headers;
use axum::headers::Header;
use axum_extra::headers;
use axum_extra::headers::Header;
use http::HeaderName;
use http::HeaderValue;
use surrealdb::headers::DB;

View file

@ -1,5 +1,5 @@
use axum::headers;
use axum::headers::Header;
use axum_extra::headers;
use axum_extra::headers::Header;
use http::HeaderName;
use http::HeaderValue;
use surrealdb::headers::ID;

View file

@ -1,10 +1,9 @@
use crate::cnf::PKG_NAME;
use crate::cnf::PKG_VERSION;
use crate::err::Error;
use axum::extract::rejection::TypedHeaderRejection;
use axum::extract::rejection::TypedHeaderRejectionReason;
use axum::headers::Header;
use axum::TypedHeader;
use axum_extra::headers::Header;
use axum_extra::typed_header::{TypedHeaderRejection, TypedHeaderRejectionReason};
use axum_extra::TypedHeader;
use http::header::SERVER;
use http::HeaderValue;
use surrealdb::cnf::SERVER_NAME;

View file

@ -1,5 +1,5 @@
use axum::headers;
use axum::headers::Header;
use axum_extra::headers;
use axum_extra::headers::Header;
use http::HeaderName;
use http::HeaderValue;
use surrealdb::headers::NS;

View file

@ -4,12 +4,10 @@ use axum::response::IntoResponse;
use axum::routing::get;
use axum::Extension;
use axum::Router;
use http_body::Body as HttpBody;
use surrealdb::kvs::{LockType::*, TransactionType::*};
pub(super) fn router<S, B>() -> Router<S, B>
pub(super) fn router<S>() -> Router<S>
where
B: HttpBody + Send + 'static,
S: Clone + Send + Sync + 'static,
{
Router::new().route("/health", get(handler))

View file

@ -8,9 +8,8 @@ use axum::response::IntoResponse;
use axum::routing::post;
use axum::Extension;
use axum::Router;
use axum::TypedHeader;
use axum_extra::TypedHeader;
use bytes::Bytes;
use http_body::Body as HttpBody;
use surrealdb::dbs::Session;
use surrealdb::iam::Action::Edit;
use surrealdb::iam::ResourceKind::Any;
@ -18,11 +17,8 @@ use tower_http::limit::RequestBodyLimitLayer;
const MAX: usize = 1024 * 1024 * 1024 * 4; // 4 GiB
pub(super) fn router<S, B>() -> Router<S, B>
pub(super) fn router<S>() -> Router<S>
where
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: std::error::Error + Send + Sync + 'static,
S: Clone + Send + Sync + 'static,
{
Router::new()

View file

@ -5,10 +5,10 @@ use crate::net::params::Params;
use axum::extract::{DefaultBodyLimit, Path};
use axum::response::IntoResponse;
use axum::routing::options;
use axum::{Extension, Router, TypedHeader};
use axum::{Extension, Router};
use axum_extra::extract::Query;
use axum_extra::TypedHeader;
use bytes::Bytes;
use http_body::Body as HttpBody;
use serde::Deserialize;
use std::str;
use surrealdb::dbs::Session;
@ -28,11 +28,8 @@ struct QueryOptions {
pub fields: Option<Vec<String>>,
}
pub(super) fn router<S, B>() -> Router<S, B>
pub(super) fn router<S>() -> Router<S>
where
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: std::error::Error + Send + Sync + 'static,
S: Clone + Send + Sync + 'static,
{
Router::new()

View file

@ -2,7 +2,8 @@
use super::AppState;
use crate::err::Error;
use crate::net::output;
use axum::extract::{BodyStream, DefaultBodyLimit, Path};
use axum::body::Body;
use axum::extract::{DefaultBodyLimit, Path};
use axum::response::IntoResponse;
use axum::response::Response;
use axum::routing::{get, post};
@ -11,8 +12,6 @@ use axum::Router;
use bytes::Bytes;
use futures_util::StreamExt;
use http::StatusCode;
use http_body::Body as HttpBody;
use hyper::body::Body;
use surrealdb::dbs::Session;
use surrealdb::iam::check::check_ns_db;
use surrealdb::iam::Action::{Edit, View};
@ -25,11 +24,8 @@ use tower_http::limit::RequestBodyLimitLayer;
const MAX: usize = 1024 * 1024 * 1024 * 4; // 4 GiB
/// The router definition for the ML API endpoints.
pub(super) fn router<S, B>() -> Router<S, B>
pub(super) fn router<S>() -> Router<S>
where
B: HttpBody + Send + 'static,
B::Data: Send + Into<Bytes>,
B::Error: std::error::Error + Send + Sync + 'static,
S: Clone + Send + Sync + 'static,
{
Router::new()
@ -43,8 +39,9 @@ where
async fn import(
Extension(state): Extension<AppState>,
Extension(session): Extension<Session>,
mut stream: BodyStream,
body: Body,
) -> Result<impl IntoResponse, impl IntoResponse> {
let mut stream = body.into_data_stream();
// Get the datastore reference
let db = &state.datastore;
// Ensure a NS and DB are set
@ -112,11 +109,12 @@ async fn export(
// Export the file data in to the store
let mut data = surrealdb::obs::stream(path).await?;
// Create a chunked response
let (mut chn, body) = Body::channel();
let (chn, body_stream) = surrealdb::channel::bounded::<Result<Bytes, Error>>(1);
let body = Body::from_stream(body_stream);
// Process all stream values
tokio::spawn(async move {
while let Some(Ok(v)) = data.next().await {
let _ = chn.send_data(v).await;
let _ = chn.send(Ok(v)).await;
}
});
// Return the streamed body

View file

@ -158,7 +158,7 @@ pub async fn init(ds: Arc<Datastore>, ct: CancellationToken) -> Result<(), Error
.max_age(Duration::from_secs(86400)),
);
let axum_app = Router::<Arc<RpcState>, _>::new()
let axum_app = Router::<Arc<RpcState>>::new()
// Redirect until we provide a UI
.route("/", get(|| async { Redirect::temporary(cnf::APP_ENDPOINT) }))
.route("/status", get(|| async {}))

View file

@ -12,15 +12,14 @@ use crate::rpc::RpcState;
use axum::extract::State;
use axum::routing::get;
use axum::routing::post;
use axum::TypedHeader;
use axum::{
extract::ws::{WebSocket, WebSocketUpgrade},
response::IntoResponse,
Extension, Router,
};
use axum_extra::TypedHeader;
use bytes::Bytes;
use http::HeaderValue;
use http_body::Body as HttpBody;
use surrealdb::dbs::Session;
use surrealdb::kvs::Datastore;
use surrealdb::rpc::format::Format;
@ -35,12 +34,7 @@ use super::AppState;
use surrealdb::rpc::rpc_context::RpcContext;
pub(super) fn router<B>() -> Router<Arc<RpcState>, B>
where
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: std::error::Error + Send + Sync + 'static,
{
pub(super) fn router() -> Router<Arc<RpcState>> {
Router::new().route("/rpc", get(get_handler)).route("/rpc", post(post_handler))
}

View file

@ -6,9 +6,8 @@ use axum::response::IntoResponse;
use axum::routing::options;
use axum::Extension;
use axum::Router;
use axum::TypedHeader;
use axum_extra::TypedHeader;
use bytes::Bytes;
use http_body::Body as HttpBody;
use serde::Serialize;
use surrealdb::dbs::Session;
use surrealdb::sql::Value;
@ -36,11 +35,8 @@ impl Success {
}
}
pub(super) fn router<S, B>() -> Router<S, B>
pub(super) fn router<S>() -> Router<S>
where
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: std::error::Error + Send + Sync + 'static,
S: Clone + Send + Sync + 'static,
{
Router::new()

View file

@ -4,9 +4,9 @@ use crate::net::output;
use axum::extract::DefaultBodyLimit;
use axum::response::IntoResponse;
use axum::routing::options;
use axum::{Extension, Router, TypedHeader};
use axum::{Extension, Router};
use axum_extra::TypedHeader;
use bytes::Bytes;
use http_body::Body as HttpBody;
use serde::Serialize;
use surrealdb::dbs::Session;
use surrealdb::sql::Value;
@ -34,11 +34,8 @@ impl Success {
}
}
pub(super) fn router<S, B>() -> Router<S, B>
pub(super) fn router<S>() -> Router<S>
where
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: std::error::Error + Send + Sync + 'static,
S: Clone + Send + Sync + 'static,
{
Router::new()

View file

@ -11,10 +11,9 @@ use axum::response::IntoResponse;
use axum::routing::options;
use axum::Extension;
use axum::Router;
use axum::TypedHeader;
use axum_extra::TypedHeader;
use bytes::Bytes;
use futures::{SinkExt, StreamExt};
use http_body::Body as HttpBody;
use surrealdb::dbs::Session;
use tower_http::limit::RequestBodyLimitLayer;
@ -23,11 +22,8 @@ use super::AppState;
const MAX: usize = 1024 * 1024; // 1 MiB
pub(super) fn router<S, B>() -> Router<S, B>
pub(super) fn router<S>() -> Router<S>
where
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: std::error::Error + Send + Sync + 'static,
S: Clone + Send + Sync + 'static,
{
Router::new()

View file

@ -1,11 +1,9 @@
use axum::response::IntoResponse;
use axum::routing::get;
use axum::Router;
use http_body::Body as HttpBody;
pub(super) fn router<S, B>() -> Router<S, B>
pub(super) fn router<S>() -> Router<S>
where
B: HttpBody + Send + 'static,
S: Clone + Send + Sync + 'static,
{
Router::new().route("/sync", get(save).post(load))

View file

@ -3,11 +3,9 @@ use crate::cnf::PKG_VERSION;
use axum::response::IntoResponse;
use axum::routing::get;
use axum::Router;
use http_body::Body as HttpBody;
pub(super) fn router<S, B>() -> Router<S, B>
pub(super) fn router<S>() -> Router<S>
where
B: HttpBody + Send + 'static,
S: Clone + Send + Sync + 'static,
{
Router::new().route("/version", get(handler))

View file

@ -1,72 +1,61 @@
pub(super) mod tower_layer;
use once_cell::sync::Lazy;
use opentelemetry::metrics::{Histogram, MetricsError, Unit, UpDownCounter};
use opentelemetry::Context as TelemetryContext;
use opentelemetry::global;
use opentelemetry::metrics::{Histogram, Meter, MetricsError, UpDownCounter};
use self::tower_layer::HttpCallMetricTracker;
use super::{METER_DURATION, METER_SIZE};
static METER: Lazy<Meter> = Lazy::new(|| global::meter("surrealdb.http"));
pub static HTTP_SERVER_DURATION: Lazy<Histogram<u64>> = Lazy::new(|| {
METER_DURATION
METER
.u64_histogram("http.server.duration")
.with_description("The HTTP server duration in milliseconds.")
.with_unit(Unit::new("ms"))
.with_unit("ms")
.init()
});
pub static HTTP_SERVER_ACTIVE_REQUESTS: Lazy<UpDownCounter<i64>> = Lazy::new(|| {
METER_DURATION
METER
.i64_up_down_counter("http.server.active_requests")
.with_description("The number of active HTTP requests.")
.init()
});
pub static HTTP_SERVER_REQUEST_SIZE: Lazy<Histogram<u64>> = Lazy::new(|| {
METER_SIZE
METER
.u64_histogram("http.server.request.size")
.with_description("Measures the size of HTTP request messages.")
.with_unit(Unit::new("mb"))
.with_unit("mb")
.init()
});
pub static HTTP_SERVER_RESPONSE_SIZE: Lazy<Histogram<u64>> = Lazy::new(|| {
METER_SIZE
METER
.u64_histogram("http.server.response.size")
.with_description("Measures the size of HTTP response messages.")
.with_unit(Unit::new("mb"))
.with_unit("mb")
.init()
});
fn observe_active_request(value: i64, tracker: &HttpCallMetricTracker) -> Result<(), MetricsError> {
let attrs = tracker.active_req_attrs();
HTTP_SERVER_ACTIVE_REQUESTS.add(&TelemetryContext::current(), value, &attrs);
HTTP_SERVER_ACTIVE_REQUESTS.add(value, &attrs);
Ok(())
}
fn record_request_duration(tracker: &HttpCallMetricTracker) {
// Record the duration of the request.
HTTP_SERVER_DURATION.record(
&TelemetryContext::current(),
tracker.duration().as_millis() as u64,
&tracker.request_duration_attrs(),
);
HTTP_SERVER_DURATION
.record(tracker.duration().as_millis() as u64, &tracker.request_duration_attrs());
}
fn record_request_size(tracker: &HttpCallMetricTracker, size: u64) {
HTTP_SERVER_REQUEST_SIZE.record(
&TelemetryContext::current(),
size,
&tracker.request_size_attrs(),
);
HTTP_SERVER_REQUEST_SIZE.record(size, &tracker.request_size_attrs());
}
fn record_response_size(tracker: &HttpCallMetricTracker, size: u64) {
HTTP_SERVER_RESPONSE_SIZE.record(
&TelemetryContext::current(),
size,
&tracker.response_size_attrs(),
);
HTTP_SERVER_RESPONSE_SIZE.record(size, &tracker.response_size_attrs());
}

View file

@ -1,25 +1,16 @@
pub mod http;
pub mod ws;
use std::time::Duration;
use once_cell::sync::Lazy;
use opentelemetry::Context as TelemetryContext;
use opentelemetry::{
metrics::{Meter, MeterProvider, MetricsError},
runtime,
sdk::{
export::metrics::aggregation,
metrics::{
controllers::{self, BasicController},
processors, selectors,
},
},
};
use opentelemetry::metrics::MetricsError;
use opentelemetry::{global, Context as TelemetryContext};
use opentelemetry_otlp::MetricsExporterBuilder;
use opentelemetry_sdk::metrics::reader::{DefaultAggregationSelector, DefaultTemporalitySelector};
use opentelemetry_sdk::metrics::{
Aggregation, Instrument, PeriodicReader, SdkMeterProvider, Stream,
};
use opentelemetry_sdk::runtime;
pub use self::http::tower_layer::HttpMetricsLayer;
use self::ws::observe_active_connection;
use super::OTEL_DEFAULT_RESOURCE;
@ -48,39 +39,47 @@ const HISTOGRAM_BUCKETS_BYTES: &[f64] = &[
100.0 * MB, // 100 MB
];
fn build_controller(boundaries: &'static [f64]) -> BasicController {
fn build_controller() -> Result<SdkMeterProvider, MetricsError> {
let exporter = MetricsExporterBuilder::from(opentelemetry_otlp::new_exporter().tonic())
.build_metrics_exporter(Box::new(aggregation::cumulative_temporality_selector()))
.build_metrics_exporter(
Box::new(DefaultTemporalitySelector::new()),
Box::new(DefaultAggregationSelector::new()),
)
.unwrap();
let reader = PeriodicReader::builder(exporter, runtime::Tokio).build();
let builder = controllers::basic(processors::factory(
selectors::simple::histogram(boundaries),
aggregation::cumulative_temporality_selector(),
))
.with_push_timeout(Duration::from_secs(5))
.with_collect_period(Duration::from_secs(5))
.with_exporter(exporter)
.with_resource(OTEL_DEFAULT_RESOURCE.clone());
let histo_duration_view = {
let criteria = Instrument::new().name("*.duration");
let mask = Stream::new().aggregation(Aggregation::ExplicitBucketHistogram {
boundaries: HISTOGRAM_BUCKETS_MS.to_vec(),
record_min_max: true,
});
opentelemetry_sdk::metrics::new_view(criteria, mask)?
};
builder.build()
let histo_size_view = {
let criteria = Instrument::new().name("*.size");
let mask = Stream::new().aggregation(Aggregation::ExplicitBucketHistogram {
boundaries: HISTOGRAM_BUCKETS_BYTES.to_vec(),
record_min_max: true,
});
opentelemetry_sdk::metrics::new_view(criteria, mask)?
};
Ok(SdkMeterProvider::builder()
.with_reader(reader)
.with_resource(OTEL_DEFAULT_RESOURCE.clone())
.with_view(histo_duration_view)
.with_view(histo_size_view)
.build())
}
static METER_PROVIDER_DURATION: Lazy<BasicController> =
Lazy::new(|| build_controller(HISTOGRAM_BUCKETS_MS));
static METER_PROVIDER_SIZE: Lazy<BasicController> =
Lazy::new(|| build_controller(HISTOGRAM_BUCKETS_BYTES));
static METER_DURATION: Lazy<Meter> = Lazy::new(|| METER_PROVIDER_DURATION.meter("duration"));
static METER_SIZE: Lazy<Meter> = Lazy::new(|| METER_PROVIDER_SIZE.meter("size"));
/// Initialize the metrics subsystem
pub fn init(cx: &TelemetryContext) -> Result<(), MetricsError> {
METER_PROVIDER_DURATION.start(cx, runtime::Tokio)?;
METER_PROVIDER_SIZE.start(cx, runtime::Tokio)?;
observe_active_connection(0)?;
// Initialize the metrics subsystem
// Panics if initialization fails
pub fn init() -> Result<(), MetricsError> {
let meter_provider = build_controller()?;
global::set_meter_provider(meter_provider);
Ok(())
}

View file

@ -1,42 +1,43 @@
use std::time::Instant;
use once_cell::sync::Lazy;
use opentelemetry::KeyValue;
use opentelemetry::metrics::Meter;
use opentelemetry::{global, KeyValue};
use opentelemetry::{
metrics::{Histogram, MetricsError, Unit, UpDownCounter},
metrics::{Histogram, MetricsError, UpDownCounter},
Context as TelemetryContext,
};
use super::{METER_DURATION, METER_SIZE};
static METER: Lazy<Meter> = Lazy::new(|| global::meter("surrealdb.rpc"));
pub static RPC_SERVER_DURATION: Lazy<Histogram<u64>> = Lazy::new(|| {
METER_DURATION
METER
.u64_histogram("rpc.server.duration")
.with_description("Measures duration of inbound RPC requests in milliseconds.")
.with_unit(Unit::new("ms"))
.with_unit("ms")
.init()
});
pub static RPC_SERVER_ACTIVE_CONNECTIONS: Lazy<UpDownCounter<i64>> = Lazy::new(|| {
METER_DURATION
METER
.i64_up_down_counter("rpc.server.active_connections")
.with_description("The number of active WebSocket connections.")
.init()
});
pub static RPC_SERVER_REQUEST_SIZE: Lazy<Histogram<u64>> = Lazy::new(|| {
METER_SIZE
METER
.u64_histogram("rpc.server.request.size")
.with_description("Measures the size of HTTP request messages.")
.with_unit(Unit::new("mb"))
.with_unit("mb")
.init()
});
pub static RPC_SERVER_RESPONSE_SIZE: Lazy<Histogram<u64>> = Lazy::new(|| {
METER_SIZE
METER
.u64_histogram("rpc.server.response.size")
.with_description("Measures the size of HTTP response messages.")
.with_unit(Unit::new("mb"))
.with_unit("mb")
.init()
});
@ -57,7 +58,7 @@ pub fn on_disconnect() -> Result<(), MetricsError> {
pub(super) fn observe_active_connection(value: i64) -> Result<(), MetricsError> {
let attrs = otel_common_attrs();
RPC_SERVER_ACTIVE_CONNECTIONS.add(&TelemetryContext::current(), value, &attrs);
RPC_SERVER_ACTIVE_CONNECTIONS.add(value, &attrs);
Ok(())
}
@ -147,7 +148,7 @@ pub fn record_rpc(cx: &TelemetryContext, res_size: usize, is_error: bool) {
]);
};
RPC_SERVER_DURATION.record(cx, duration, &attrs);
RPC_SERVER_REQUEST_SIZE.record(cx, req_size, &attrs);
RPC_SERVER_RESPONSE_SIZE.record(cx, res_size as u64, &attrs);
RPC_SERVER_DURATION.record(duration, &attrs);
RPC_SERVER_REQUEST_SIZE.record(req_size, &attrs);
RPC_SERVER_RESPONSE_SIZE.record(res_size as u64, &attrs);
}

View file

@ -5,11 +5,12 @@ pub mod traces;
use crate::cli::validator::parser::env_filter::CustomEnvFilter;
use once_cell::sync::Lazy;
use opentelemetry::metrics::MetricsError;
use opentelemetry::sdk::resource::{
use opentelemetry::Context;
use opentelemetry::KeyValue;
use opentelemetry_sdk::resource::{
EnvResourceDetector, SdkProvidedResourceDetector, TelemetryResourceDetector,
};
use opentelemetry::sdk::Resource;
use opentelemetry::{Context, KeyValue};
use opentelemetry_sdk::Resource;
use std::time::Duration;
use tracing::{Level, Subscriber};
use tracing_subscriber::filter::ParseError;

View file

@ -2,6 +2,7 @@ use tracing::Subscriber;
use tracing_subscriber::Layer;
use crate::cli::validator::parser::env_filter::CustomEnvFilter;
use opentelemetry::trace::TracerProvider as _;
pub mod otlp;
pub mod rpc;
@ -22,8 +23,19 @@ where
}
// Init the registry with the OTLP tracer
"otlp" => {
debug!("Setup the OTLP tracer");
Some(otlp::new(filter))
// Create the OTLP tracer provider
let tracer_provider =
otlp::build_tracer_provider().expect("Failed to initialize OTLP tracer provider");
// Set it as the global tracer provider
let _ = opentelemetry::global::set_tracer_provider(tracer_provider.clone());
// Returns a tracing subscriber layer built with the selected tracer and filter.
// It will be used by the `tracing` crate to decide what spans to send to the global tracer provider
Some(
tracing_opentelemetry::layer()
.with_tracer(tracer_provider.tracer("surealdb"))
.with_filter(filter.0)
.boxed(),
)
}
tracer => {
panic!("unsupported tracer {tracer}");

View file

@ -1,26 +1,23 @@
use opentelemetry::sdk::trace::Tracer;
use opentelemetry::trace::TraceError;
use opentelemetry_otlp::WithExportConfig;
use tracing::Subscriber;
use tracing_subscriber::Layer;
// use opentelemetry::{
// trace::{Span, SpanBuilder, Tracer as _, TracerProvider as _},
// Context,
// };
use opentelemetry_otlp::SpanExporterBuilder;
use opentelemetry_sdk::trace::{Config, TracerProvider};
// use tracing_subscriber::prelude::*;
use crate::{
cli::validator::parser::env_filter::CustomEnvFilter, telemetry::OTEL_DEFAULT_RESOURCE,
};
use crate::telemetry::OTEL_DEFAULT_RESOURCE;
pub fn new<S>(filter: CustomEnvFilter) -> Box<dyn Layer<S> + Send + Sync>
where
S: Subscriber + for<'a> tracing_subscriber::registry::LookupSpan<'a> + Send + Sync,
{
tracing_opentelemetry::layer().with_tracer(tracer().unwrap()).with_filter(filter.0).boxed()
}
fn tracer() -> Result<Tracer, TraceError> {
opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(opentelemetry_otlp::new_exporter().tonic().with_env())
.with_trace_config(
opentelemetry::sdk::trace::config().with_resource(OTEL_DEFAULT_RESOURCE.clone()),
)
.install_batch(opentelemetry::runtime::Tokio)
pub(super) fn build_tracer_provider() -> Result<TracerProvider, TraceError> {
let exporter = opentelemetry_otlp::new_exporter().tonic();
let span_exporter = SpanExporterBuilder::Tonic(exporter).build_span_exporter()?;
let config = Config::default().with_resource(OTEL_DEFAULT_RESOURCE.clone());
let provider = TracerProvider::builder()
.with_batch_exporter(span_exporter, opentelemetry_sdk::runtime::Tokio)
.with_config(config)
.build();
Ok(provider)
}

View file

@ -34,12 +34,66 @@ user-id = 145457 # Tobie Morgan Hitchcock (tobiemh)
start = "2022-01-27"
end = "2025-01-24"
[[trusted.h2]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-03-13"
end = "2025-08-06"
[[trusted.hashbrown]]
criteria = "safe-to-deploy"
user-id = 2915 # Amanieu d'Antras (Amanieu)
start = "2019-04-02"
end = "2025-05-02"
[[trusted.headers]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-09-09"
end = "2025-08-06"
[[trusted.headers-core]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-06-11"
end = "2025-08-06"
[[trusted.http]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-04-05"
end = "2025-08-06"
[[trusted.http-body-util]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2022-10-25"
end = "2025-08-06"
[[trusted.httparse]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-07-03"
end = "2025-08-06"
[[trusted.hyper]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-03-01"
end = "2025-08-06"
[[trusted.hyper-tls]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-03-19"
end = "2025-08-06"
[[trusted.hyper-util]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2022-01-15"
end = "2025-08-06"
[[trusted.indxdb]]
criteria = "safe-to-deploy"
user-id = 145457 # Tobie Morgan Hitchcock (tobiemh)
@ -52,12 +106,30 @@ user-id = 145457 # Tobie Morgan Hitchcock (tobiemh)
start = "2023-03-26"
end = "2025-01-24"
[[trusted.mime]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-09-09"
end = "2025-08-06"
[[trusted.num_cpus]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-06-10"
end = "2025-08-06"
[[trusted.psl-types]]
criteria = "safe-to-deploy"
user-id = 3987 # Rushmore Mushambi (rushmorem)
start = "2021-03-12"
end = "2025-01-24"
[[trusted.reqwest]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-03-04"
end = "2025-08-06"
[[trusted.revision]]
criteria = "safe-to-deploy"
user-id = 145457 # Tobie Morgan Hitchcock (tobiemh)
@ -124,6 +196,12 @@ user-id = 145457 # Tobie Morgan Hitchcock (tobiemh)
start = "2022-02-17"
end = "2025-01-24"
[[trusted.unicase]]
criteria = "safe-to-deploy"
user-id = 359 # Sean McArthur (seanmonstar)
start = "2019-03-05"
end = "2025-08-06"
[[trusted.vart]]
criteria = "safe-to-deploy"
user-id = 145457 # Tobie Morgan Hitchcock (tobiemh)

View file

@ -215,6 +215,10 @@ criteria = "safe-to-deploy"
version = "1.0.3"
criteria = "safe-to-deploy"
[[exemptions.atomic-waker]]
version = "1.1.2"
criteria = "safe-to-deploy"
[[exemptions.axum]]
version = "0.6.20"
criteria = "safe-to-deploy"
@ -232,15 +236,15 @@ version = "0.4.3"
criteria = "safe-to-deploy"
[[exemptions.axum-extra]]
version = "0.7.7"
version = "0.9.2"
criteria = "safe-to-deploy"
[[exemptions.axum-macros]]
version = "0.3.8"
version = "0.4.1"
criteria = "safe-to-deploy"
[[exemptions.axum-server]]
version = "0.5.1"
version = "0.7.1"
criteria = "safe-to-deploy"
[[exemptions.backtrace]]
@ -356,7 +360,7 @@ version = "2.4.2"
criteria = "safe-to-deploy"
[[exemptions.chrono]]
version = "0.4.37"
version = "0.4.38"
criteria = "safe-to-deploy"
[[exemptions.ciborium]]
@ -492,7 +496,7 @@ version = "2.5.0"
criteria = "safe-to-deploy"
[[exemptions.deadpool]]
version = "0.9.5"
version = "0.10.0"
criteria = "safe-to-run"
[[exemptions.deadpool-runtime]]
@ -639,10 +643,6 @@ criteria = "safe-to-deploy"
version = "0.1.5"
criteria = "safe-to-deploy"
[[exemptions.futures-lite]]
version = "1.13.0"
criteria = "safe-to-run"
[[exemptions.futures-lite]]
version = "2.3.0"
criteria = "safe-to-deploy"
@ -659,10 +659,6 @@ criteria = "safe-to-deploy"
version = "0.3.30"
criteria = "safe-to-deploy"
[[exemptions.futures-timer]]
version = "3.0.3"
criteria = "safe-to-run"
[[exemptions.futures-util]]
version = "0.3.30"
criteria = "safe-to-deploy"
@ -719,10 +715,6 @@ criteria = "safe-to-run"
version = "0.9.1"
criteria = "safe-to-run"
[[exemptions.h2]]
version = "0.3.26"
criteria = "safe-to-deploy"
[[exemptions.half]]
version = "2.4.0"
criteria = "safe-to-deploy"
@ -735,10 +727,6 @@ criteria = "safe-to-deploy"
version = "0.3.1"
criteria = "safe-to-deploy"
[[exemptions.headers]]
version = "0.3.9"
criteria = "safe-to-deploy"
[[exemptions.heapless]]
version = "0.7.17"
criteria = "safe-to-deploy"
@ -759,58 +747,22 @@ criteria = "safe-to-deploy"
version = "0.27.0"
criteria = "safe-to-deploy"
[[exemptions.http]]
version = "0.2.12"
criteria = "safe-to-deploy"
[[exemptions.http]]
version = "1.1.0"
criteria = "safe-to-deploy"
[[exemptions.http-body]]
version = "0.4.6"
criteria = "safe-to-deploy"
[[exemptions.http-body-util]]
version = "0.1.1"
criteria = "safe-to-deploy"
[[exemptions.http-types]]
version = "2.12.0"
criteria = "safe-to-run"
[[exemptions.httparse]]
version = "1.8.0"
criteria = "safe-to-deploy"
[[exemptions.humantime]]
version = "2.1.0"
criteria = "safe-to-deploy"
[[exemptions.hyper]]
version = "0.14.28"
criteria = "safe-to-deploy"
[[exemptions.hyper]]
version = "1.2.0"
criteria = "safe-to-deploy"
[[exemptions.hyper-rustls]]
version = "0.24.2"
version = "0.27.2"
criteria = "safe-to-deploy"
[[exemptions.hyper-timeout]]
version = "0.4.1"
criteria = "safe-to-deploy"
[[exemptions.hyper-tls]]
version = "0.5.0"
criteria = "safe-to-deploy"
[[exemptions.hyper-util]]
version = "0.1.3"
criteria = "safe-to-deploy"
[[exemptions.iana-time-zone]]
version = "0.1.60"
criteria = "safe-to-deploy"
@ -835,10 +787,6 @@ criteria = "safe-to-deploy"
version = "2.2.6"
criteria = "safe-to-deploy"
[[exemptions.infer]]
version = "0.2.3"
criteria = "safe-to-run"
[[exemptions.inferno]]
version = "0.11.19"
criteria = "safe-to-deploy"
@ -883,6 +831,10 @@ criteria = "safe-to-deploy"
version = "0.3.69"
criteria = "safe-to-deploy"
[[exemptions.jsonwebtoken]]
version = "9.3.0"
criteria = "safe-to-run"
[[exemptions.lalrpop]]
version = "0.20.2"
criteria = "safe-to-deploy"
@ -1003,10 +955,6 @@ criteria = "safe-to-deploy"
version = "0.1.39"
criteria = "safe-to-deploy"
[[exemptions.mime]]
version = "0.3.17"
criteria = "safe-to-deploy"
[[exemptions.mime_guess]]
version = "2.0.4"
criteria = "safe-to-deploy"
@ -1071,10 +1019,6 @@ criteria = "safe-to-deploy"
version = "0.4.4"
criteria = "safe-to-deploy"
[[exemptions.num_cpus]]
version = "1.16.0"
criteria = "safe-to-deploy"
[[exemptions.object]]
version = "0.32.2"
criteria = "safe-to-deploy"
@ -1100,23 +1044,19 @@ version = "0.9.102"
criteria = "safe-to-deploy"
[[exemptions.opentelemetry]]
version = "0.19.0"
version = "0.24.0"
criteria = "safe-to-deploy"
[[exemptions.opentelemetry-otlp]]
version = "0.12.0"
version = "0.17.0"
criteria = "safe-to-deploy"
[[exemptions.opentelemetry-proto]]
version = "0.2.0"
criteria = "safe-to-deploy"
[[exemptions.opentelemetry_api]]
version = "0.19.0"
version = "0.7.0"
criteria = "safe-to-deploy"
[[exemptions.opentelemetry_sdk]]
version = "0.19.0"
version = "0.24.1"
criteria = "safe-to-deploy"
[[exemptions.ort]]
@ -1271,6 +1211,10 @@ criteria = "safe-to-deploy"
version = "0.11.9"
criteria = "safe-to-deploy"
[[exemptions.prost]]
version = "0.13.1"
criteria = "safe-to-deploy"
[[exemptions.prost-build]]
version = "0.12.3"
criteria = "safe-to-deploy"
@ -1279,6 +1223,10 @@ criteria = "safe-to-deploy"
version = "0.11.9"
criteria = "safe-to-deploy"
[[exemptions.prost-derive]]
version = "0.13.1"
criteria = "safe-to-deploy"
[[exemptions.prost-types]]
version = "0.12.3"
criteria = "safe-to-deploy"
@ -1299,6 +1247,18 @@ criteria = "safe-to-deploy"
version = "0.26.0"
criteria = "safe-to-deploy"
[[exemptions.quinn]]
version = "0.11.2"
criteria = "safe-to-deploy"
[[exemptions.quinn-proto]]
version = "0.11.3"
criteria = "safe-to-deploy"
[[exemptions.quinn-udp]]
version = "0.5.2"
criteria = "safe-to-deploy"
[[exemptions.radium]]
version = "0.7.0"
criteria = "safe-to-deploy"
@ -1387,14 +1347,6 @@ criteria = "safe-to-deploy"
version = "0.4.2"
criteria = "safe-to-deploy"
[[exemptions.reqwest]]
version = "0.11.27"
criteria = "safe-to-deploy"
[[exemptions.retain_mut]]
version = "0.1.9"
criteria = "safe-to-run"
[[exemptions.rexie]]
version = "0.4.2"
criteria = "safe-to-deploy"
@ -1500,19 +1452,27 @@ version = "0.38.32"
criteria = "safe-to-deploy"
[[exemptions.rustls]]
version = "0.21.10"
version = "0.21.8"
criteria = "safe-to-deploy"
[[exemptions.rustls]]
version = "0.22.3"
criteria = "safe-to-deploy"
[[exemptions.rustls]]
version = "0.23.12"
criteria = "safe-to-deploy"
[[exemptions.rustls-pemfile]]
version = "1.0.4"
criteria = "safe-to-deploy"
[[exemptions.rustls-pemfile]]
version = "2.1.1"
criteria = "safe-to-deploy"
[[exemptions.rustls-pki-types]]
version = "1.4.1"
version = "1.7.0"
criteria = "safe-to-deploy"
[[exemptions.rustls-webpki]]
@ -1520,7 +1480,7 @@ version = "0.101.7"
criteria = "safe-to-deploy"
[[exemptions.rustls-webpki]]
version = "0.102.2"
version = "0.102.6"
criteria = "safe-to-deploy"
[[exemptions.rustyline]]
@ -1595,10 +1555,6 @@ criteria = "safe-to-deploy"
version = "0.1.16"
criteria = "safe-to-deploy"
[[exemptions.serde_qs]]
version = "0.8.5"
criteria = "safe-to-run"
[[exemptions.serde_spanned]]
version = "0.6.5"
criteria = "safe-to-deploy"
@ -1747,14 +1703,6 @@ criteria = "safe-to-deploy"
version = "1.0.0"
criteria = "safe-to-deploy"
[[exemptions.system-configuration]]
version = "0.5.1"
criteria = "safe-to-deploy"
[[exemptions.system-configuration-sys]]
version = "0.5.0"
criteria = "safe-to-deploy"
[[exemptions.tar]]
version = "0.4.40"
criteria = "safe-to-deploy"
@ -1819,8 +1767,16 @@ criteria = "safe-to-deploy"
version = "0.24.1"
criteria = "safe-to-deploy"
[[exemptions.tokio-rustls]]
version = "0.26.0"
criteria = "safe-to-deploy"
[[exemptions.tokio-tungstenite]]
version = "0.20.1"
version = "0.21.0"
criteria = "safe-to-deploy"
[[exemptions.tokio-tungstenite]]
version = "0.23.1"
criteria = "safe-to-deploy"
[[exemptions.tokio-util]]
@ -1848,11 +1804,11 @@ version = "0.22.9"
criteria = "safe-to-deploy"
[[exemptions.tonic]]
version = "0.8.3"
version = "0.10.2"
criteria = "safe-to-deploy"
[[exemptions.tonic]]
version = "0.10.2"
version = "0.12.0"
criteria = "safe-to-deploy"
[[exemptions.tower]]
@ -1860,7 +1816,7 @@ version = "0.4.13"
criteria = "safe-to-deploy"
[[exemptions.tower-http]]
version = "0.4.4"
version = "0.5.2"
criteria = "safe-to-deploy"
[[exemptions.tower-layer]]
@ -1883,20 +1839,12 @@ criteria = "safe-to-deploy"
version = "0.1.32"
criteria = "safe-to-deploy"
[[exemptions.tracing-futures]]
version = "0.2.5"
criteria = "safe-to-deploy"
[[exemptions.tracing-log]]
version = "0.1.4"
criteria = "safe-to-deploy"
[[exemptions.tracing-log]]
version = "0.2.0"
criteria = "safe-to-deploy"
[[exemptions.tracing-opentelemetry]]
version = "0.19.0"
version = "0.25.0"
criteria = "safe-to-deploy"
[[exemptions.try_map]]
@ -1904,7 +1852,11 @@ version = "0.3.1"
criteria = "safe-to-deploy"
[[exemptions.tungstenite]]
version = "0.20.1"
version = "0.21.0"
criteria = "safe-to-deploy"
[[exemptions.tungstenite]]
version = "0.23.0"
criteria = "safe-to-deploy"
[[exemptions.typenum]]
@ -1923,10 +1875,6 @@ criteria = "safe-to-deploy"
version = "0.9.10"
criteria = "safe-to-deploy"
[[exemptions.unicase]]
version = "2.7.0"
criteria = "safe-to-deploy"
[[exemptions.unicode-script]]
version = "0.5.6"
criteria = "safe-to-deploy"
@ -1963,10 +1911,6 @@ criteria = "safe-to-deploy"
version = "0.1.2"
criteria = "safe-to-deploy"
[[exemptions.waker-fn]]
version = "1.1.1"
criteria = "safe-to-run"
[[exemptions.walkdir]]
version = "2.5.0"
criteria = "safe-to-deploy"
@ -2124,11 +2068,11 @@ version = "0.6.5"
criteria = "safe-to-deploy"
[[exemptions.winreg]]
version = "0.50.0"
version = "0.52.0"
criteria = "safe-to-deploy"
[[exemptions.wiremock]]
version = "0.5.22"
version = "0.6.0"
criteria = "safe-to-run"
[[exemptions.ws_stream_wasm]]

View file

@ -79,6 +79,20 @@ user-id = 4484
user-login = "hsivonen"
user-name = "Henri Sivonen"
[[publisher.h2]]
version = "0.3.26"
when = "2024-04-03"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.h2]]
version = "0.4.5"
when = "2024-05-17"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.hashbrown]]
version = "0.14.5"
when = "2024-04-28"
@ -86,6 +100,76 @@ user-id = 2915
user-login = "Amanieu"
user-name = "Amanieu d'Antras"
[[publisher.headers]]
version = "0.4.0"
when = "2023-11-24"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.headers-core]]
version = "0.3.0"
when = "2023-11-24"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.http]]
version = "0.2.12"
when = "2024-03-04"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.http]]
version = "1.1.0"
when = "2024-03-04"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.http-body-util]]
version = "0.1.1"
when = "2024-03-11"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.httparse]]
version = "1.8.0"
when = "2022-08-30"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.hyper]]
version = "0.14.28"
when = "2023-12-18"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.hyper]]
version = "1.4.1"
when = "2024-07-09"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.hyper-tls]]
version = "0.6.0"
when = "2023-11-27"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.hyper-util]]
version = "0.1.6"
when = "2024-07-01"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.indxdb]]
version = "0.5.0"
when = "2024-06-10"
@ -100,6 +184,20 @@ user-id = 145457
user-login = "tobiemh"
user-name = "Tobie Morgan Hitchcock"
[[publisher.mime]]
version = "0.3.17"
when = "2023-03-20"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.num_cpus]]
version = "1.16.0"
when = "2023-06-29"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.psl-types]]
version = "2.0.11"
when = "2022-08-10"
@ -107,6 +205,13 @@ user-id = 3987
user-login = "rushmorem"
user-name = "Rushmore Mushambi"
[[publisher.reqwest]]
version = "0.12.5"
when = "2024-06-17"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.revision]]
version = "0.7.1"
when = "2024-06-19"
@ -198,6 +303,13 @@ user-id = 145457
user-login = "tobiemh"
user-name = "Tobie Morgan Hitchcock"
[[publisher.unicase]]
version = "2.7.0"
when = "2023-08-21"
user-id = 359
user-login = "seanmonstar"
user-name = "Sean McArthur"
[[publisher.unicode-normalization]]
version = "0.1.23"
when = "2024-02-20"
@ -231,7 +343,7 @@ who = "Nick Fitzgerald <fitzgen@gmail.com>"
criteria = "safe-to-deploy"
user-id = 696 # Nick Fitzgerald (fitzgen)
start = "2020-01-14"
end = "2024-04-21"
end = "2025-07-30"
notes = "I am an author of this crate."
[[audits.bytecode-alliance.wildcard-audits.bumpalo]]
@ -239,14 +351,14 @@ who = "Nick Fitzgerald <fitzgen@gmail.com>"
criteria = "safe-to-deploy"
user-id = 696 # Nick Fitzgerald (fitzgen)
start = "2019-03-16"
end = "2024-03-10"
end = "2025-07-30"
[[audits.bytecode-alliance.wildcard-audits.derive_arbitrary]]
who = "Nick Fitzgerald <fitzgen@gmail.com>"
criteria = "safe-to-deploy"
user-id = 696 # Nick Fitzgerald (fitzgen)
start = "2020-01-14"
end = "2024-04-27"
end = "2025-07-30"
notes = "I am an author of this crate"
[[audits.bytecode-alliance.audits.adler]]
@ -578,16 +690,6 @@ who = "Pat Hickey <phickey@fastly.com>"
criteria = "safe-to-deploy"
version = "0.3.0"
[[audits.bytecode-alliance.audits.webpki-roots]]
who = "Pat Hickey <phickey@fastly.com>"
criteria = "safe-to-deploy"
delta = "0.22.4 -> 0.23.0"
[[audits.bytecode-alliance.audits.webpki-roots]]
who = "Pat Hickey <phickey@fastly.com>"
criteria = "safe-to-deploy"
delta = "0.23.0 -> 0.25.2"
[[audits.embark-studios.audits.assert-json-diff]]
who = "Johan Andersson <opensource@embark-studios.com>"
criteria = "safe-to-run"
@ -666,12 +768,6 @@ criteria = "safe-to-deploy"
version = "0.1.0"
notes = "No unsafe usage or ambient capabilities, sane build script"
[[audits.embark-studios.audits.webpki-roots]]
who = "Johan Andersson <opensource@embark-studios.com>"
criteria = "safe-to-deploy"
version = "0.22.4"
notes = "Inspected it to confirm that it only contains data definitions and no runtime code"
[audits.fermyon.audits]
[[audits.google.audits.async-stream]]
@ -728,13 +824,6 @@ and nothing changed from the baseline audit of 1.1.0. Skimmed through the
'''
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
[[audits.google.audits.base64]]
who = "Adam Langley <agl@chromium.org>"
criteria = "safe-to-deploy"
version = "0.13.1"
notes = "Skimmed the uses of `std` to ensure that nothing untoward is happening. Code uses `forbid(unsafe_code)` and, indeed, there are no uses of `unsafe`"
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
[[audits.google.audits.bitflags]]
who = "Lukasz Anforowicz <lukasza@chromium.org>"
criteria = "safe-to-deploy"
@ -828,24 +917,12 @@ https://source.chromium.org/chromium/chromium/src/+/28841c33c77833cc30b286f9ae24
"""
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
[[audits.google.audits.http-range-header]]
who = "George Burgess IV <gbiv@google.com>"
criteria = "safe-to-deploy"
version = "0.3.1"
aggregated-from = "https://chromium.googlesource.com/chromiumos/third_party/rust_crates/+/refs/heads/main/cargo-vet/audits.toml?format=TEXT"
[[audits.google.audits.httpdate]]
who = "George Burgess IV <gbiv@google.com>"
criteria = "safe-to-deploy"
version = "1.0.3"
aggregated-from = "https://chromium.googlesource.com/chromiumos/third_party/rust_crates/+/refs/heads/main/cargo-vet/audits.toml?format=TEXT"
[[audits.google.audits.instant]]
who = "George Burgess IV <gbiv@google.com>"
criteria = "safe-to-run"
version = "0.1.12"
aggregated-from = "https://chromium.googlesource.com/chromiumos/third_party/rust_crates/+/refs/heads/main/cargo-vet/audits.toml?format=TEXT"
[[audits.google.audits.itoa]]
who = "Lukasz Anforowicz <lukasza@chromium.org>"
criteria = "safe-to-deploy"
@ -1462,13 +1539,6 @@ version = "0.12.3"
notes = "This version is used in rust's libstd, so effectively we're already trusting it"
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
[[audits.mozilla.audits.headers-core]]
who = "Bobby Holley <bobbyholley@gmail.com>"
criteria = "safe-to-deploy"
version = "0.2.0"
notes = "Trivial crate, no unsafe code."
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
[[audits.mozilla.audits.hex]]
who = "Simon Friedberger <simon@mozilla.com>"
criteria = "safe-to-deploy"
@ -1878,6 +1948,13 @@ criteria = "safe-to-deploy"
delta = "0.3.28 -> 0.3.30"
aggregated-from = "https://raw.githubusercontent.com/zcash/librustzcash/main/supply-chain/audits.toml"
[[audits.zcash.audits.hyper-timeout]]
who = "Jack Grigg <jack@electriccoin.co>"
criteria = "safe-to-deploy"
delta = "0.4.1 -> 0.5.1"
notes = "New uses of pin_project! look fine."
aggregated-from = "https://raw.githubusercontent.com/zcash/librustzcash/main/supply-chain/audits.toml"
[[audits.zcash.audits.inout]]
who = "Daira Hopwood <daira@jacaranda.org>"
criteria = "safe-to-deploy"
@ -1966,6 +2043,48 @@ be set correctly by `cargo`.
"""
aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml"
[[audits.zcash.audits.rustls]]
who = "Daira-Emma Hopwood <daira@jacaranda.org>"
criteria = "safe-to-deploy"
delta = "0.21.8 -> 0.21.12"
notes = """
A comment in get_sni_extension asks whether the behaviour of parsing an IPv4 or IPv6 address
in a host_name field of a server_name extension, but then ignoring the extension (because
'Literal IPv4 and IPv6 addresses are not permitted in \"HostName\"'), as the server, is
compliant with RFC 6066. As an original author of RFC 3546 which has very similar wording,
I can speak to the intent: yes this is fine. The client is clearly nonconformant in this
case, but the server isn't.
RFC 3546 said \"If the server understood the client hello extension but does not recognize
the server name, it SHOULD send an \"unrecognized_name\" alert (which MAY be fatal).\"
This wording was preserved in RFC 5746, and then updated in RFC 6066 to:
If the server understood the ClientHello extension but
does not recognize the server name, the server SHOULD take one of two
actions: either abort the handshake by sending a fatal-level
unrecognized_name(112) alert or continue the handshake. It is NOT
RECOMMENDED to send a warning-level unrecognized_name(112) alert,
because the client's behavior in response to warning-level alerts is
unpredictable. If there is a mismatch between the server name used
by the client application and the server name of the credential
chosen by the server, this mismatch will become apparent when the
client application performs the server endpoint identification, at
which point the client application will have to decide whether to
proceed with the communication.
To me it's clear that it is reasonable to consider an IP address as a name that the
server does not recognize. And so the server SHOULD *either* send a fatal unrecognized_name
alert, *or* continue the handshake and let the client application decide when it \"performs
the server endpoint identification\". There's no conformance requirement for the server to
take any notice of a host_name that is \"not permitted\". (It would have been clearer to
express this by specifying the allowed client and server behaviour separately, i.e. saying
that the client MUST NOT send an IP address in host_name, and then explicitly specifying
the server behaviour if it does so anyway. That's how I would write it now. But honestly
this extension was one of the most bikeshedded parts of RFC 3546, to a much greater extent
than I'd anticipated, and I was tired.)
"""
aggregated-from = "https://raw.githubusercontent.com/zcash/librustzcash/main/supply-chain/audits.toml"
[[audits.zcash.audits.semver]]
who = "Jack Grigg <jack@electriccoin.co>"
criteria = "safe-to-deploy"
@ -2143,6 +2262,13 @@ criteria = "safe-to-deploy"
delta = "0.1.14 -> 0.1.15"
aggregated-from = "https://raw.githubusercontent.com/zcash/librustzcash/main/supply-chain/audits.toml"
[[audits.zcash.audits.tonic]]
who = "Jack Grigg <jack@electriccoin.co>"
criteria = "safe-to-deploy"
delta = "0.12.0 -> 0.12.1"
notes = "Changes to generics bounds look fine"
aggregated-from = "https://raw.githubusercontent.com/zcash/librustzcash/main/supply-chain/audits.toml"
[[audits.zcash.audits.tracing-subscriber]]
who = "Jack Grigg <jack@electriccoin.co>"
criteria = "safe-to-deploy"
@ -2202,10 +2328,3 @@ who = "Daira-Emma Hopwood <daira@jacaranda.org>"
criteria = "safe-to-deploy"
delta = "0.2.89 -> 0.2.92"
aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml"
[[audits.zcash.audits.webpki-roots]]
who = "Daira-Emma Hopwood <daira@jacaranda.org>"
criteria = "safe-to-deploy"
delta = "0.25.2 -> 0.25.4"
notes = "I have not checked consistency with the Mozilla IncludedCACertificateReportPEMCSV report."
aggregated-from = "https://raw.githubusercontent.com/zcash/librustzcash/main/supply-chain/audits.toml"

View file

@ -6,7 +6,7 @@ mod ml_integration {
use super::*;
use http::{header, StatusCode};
use hyper::Body;
use reqwest::Body;
use serde::{Deserialize, Serialize};
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration;