Remove SpeeDB storage engine (#4171)

This commit is contained in:
Tobie Morgan Hitchcock 2024-06-12 10:40:48 +01:00 committed by GitHub
parent 9ffe94a02f
commit e1123ae6d6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 41 additions and 935 deletions

View file

@ -550,29 +550,6 @@ jobs:
- name: Test rocksdb engine
run: cargo make ci-api-integration-rocksdb
speedb-engine:
name: SpeeDB engine
runs-on: ubuntu-latest
steps:
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Checkout sources
uses: actions/checkout@v4
- name: Setup cache
uses: Swatinem/rust-cache@v2
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-make
run: cargo install --debug --locked cargo-make
- name: Test speedb engine
run: cargo make ci-api-integration-speedb
tikv-engine:
name: TiKV engine
runs-on: ubuntu-latest

27
Cargo.lock generated
View file

@ -3192,22 +3192,6 @@ dependencies = [
"zstd-sys",
]
[[package]]
name = "libspeedb-sys"
version = "0.0.4+2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14468e1c90e57078dcc625b045d9ad38ed53550d21282d886c48d521163ed05b"
dependencies = [
"bindgen 0.65.1",
"bzip2-sys",
"cc",
"glob",
"libc",
"libz-sys",
"lz4-sys",
"zstd-sys",
]
[[package]]
name = "libz-sys"
version = "1.1.16"
@ -5767,16 +5751,6 @@ dependencies = [
"smallvec",
]
[[package]]
name = "speedb"
version = "0.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78a8bab270d7d5a088d3dd9f1eb6ce0f0c5ee6261acff6b04b3e230bfafe8a1c"
dependencies = [
"libc",
"libspeedb-sys",
]
[[package]]
name = "spin"
version = "0.5.2"
@ -6109,7 +6083,6 @@ dependencies = [
"sha1",
"sha2",
"snap",
"speedb",
"storekey",
"surrealdb-derive",
"surrealdb-jsonwebtoken",

View file

@ -11,7 +11,6 @@ authors = ["Tobie Morgan Hitchcock <tobie@surrealdb.com>"]
default = ["storage-mem", "storage-rocksdb", "scripting", "http"]
storage-mem = ["surrealdb/kv-mem"]
storage-rocksdb = ["surrealdb/kv-rocksdb"]
storage-speedb = ["surrealdb/kv-speedb"]
storage-tikv = ["surrealdb/kv-tikv"]
storage-fdb = ["surrealdb/kv-fdb-7_1"]
storage-surrealkv = ["surrealdb/kv-surrealkv"]

View file

@ -17,7 +17,7 @@ args = ["check", "--locked", "--package", "surrealdb", "--features", "protocol-w
category = "CI - CHECK"
command = "cargo"
env = { RUSTFLAGS = "--cfg surrealdb_unstable" }
args = ["clippy", "--all-targets", "--features", "storage-mem,storage-rocksdb,storage-speedb,storage-tikv,storage-fdb,scripting,http,jwks", "--tests", "--benches", "--examples", "--bins", "--", "-D", "warnings"]
args = ["clippy", "--all-targets", "--features", "storage-mem,storage-rocksdb,storage-tikv,storage-fdb,scripting,http,jwks", "--tests", "--benches", "--examples", "--bins", "--", "-D", "warnings"]
#
# Integration Tests
@ -142,11 +142,6 @@ category = "CI - INTEGRATION TESTS"
env = { _TEST_API_ENGINE = "rocksdb", _TEST_FEATURES = "kv-rocksdb" }
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = true }
[tasks.ci-api-integration-speedb]
category = "CI - INTEGRATION TESTS"
env = { _TEST_API_ENGINE = "speedb", _TEST_FEATURES = "kv-speedb" }
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = true }
[tasks.ci-api-integration-fdb]
category = "CI - INTEGRATION TESTS"
env = { _TEST_API_ENGINE = "fdb", _TEST_FEATURES = "kv-fdb-7_1" }

View file

@ -18,7 +18,7 @@ dependencies = ["cargo-upgrade", "cargo-update"]
category = "LOCAL USAGE"
command = "cargo"
env = { RUSTDOCFLAGS = "--cfg surrealdb_unstable" }
args = ["doc", "--open", "--no-deps", "--package", "surrealdb", "--features", "rustls,native-tls,protocol-ws,protocol-http,kv-mem,kv-speedb,kv-rocksdb,kv-tikv,http,scripting,jwks"]
args = ["doc", "--open", "--no-deps", "--package", "surrealdb", "--features", "rustls,native-tls,protocol-ws,protocol-http,kv-mem,kv-rocksdb,kv-tikv,http,scripting,jwks"]
# Test
[tasks.test]

View file

@ -26,7 +26,6 @@ resolver = "2"
default = ["kv-mem"]
kv-mem = ["dep:echodb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
kv-indxdb = ["dep:indxdb"]
kv-speedb = ["dep:speedb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
kv-rocksdb = ["dep:rocksdb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
kv-tikv = ["dep:tikv", "dep:tempfile", "dep:ext-sort"]
kv-fdb-5_1 = ["foundationdb/fdb-5_1", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
@ -142,7 +141,6 @@ serde_json = "1.0.108"
sha1 = "0.10.6"
sha2 = "0.10.8"
snap = "1.1.0"
speedb = { version = "0.0.4", features = ["lz4", "snappy"], optional = true }
storekey = "0.5.0"
surrealkv = { version = "0.1.5", optional = true }
surrealml = { version = "0.1.1", optional = true, package = "surrealml-core" }

View file

@ -46,7 +46,6 @@ pub static INSECURE_FORWARD_RECORD_ACCESS_ERRORS: Lazy<bool> =
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
/// Specifies the buffer limit for external sorting.
/// If the environment variable is not present or cannot be parsed, a default value of 50,000 is used.

View file

@ -22,7 +22,6 @@ use std::fmt::{self, Debug};
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
use std::path::PathBuf;
use std::str::FromStr;
@ -73,7 +72,6 @@ pub struct Context<'a> {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
// The temporary directory
temporary_directory: Option<Arc<PathBuf>>,
@ -110,7 +108,6 @@ impl<'a> Context<'a> {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
temporary_directory: Option<Arc<PathBuf>>,
) -> Result<Context<'a>, Error> {
@ -132,7 +129,6 @@ impl<'a> Context<'a> {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
temporary_directory,
transaction: None,
@ -162,7 +158,6 @@ impl<'a> Context<'a> {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
temporary_directory: None,
transaction: None,
@ -189,7 +184,6 @@ impl<'a> Context<'a> {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
temporary_directory: parent.temporary_directory.clone(),
transaction: parent.transaction.clone(),
@ -328,7 +322,6 @@ impl<'a> Context<'a> {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
/// Return the location of the temporary directory if any
pub fn temporary_directory(&self) -> Option<&Arc<PathBuf>> {

View file

@ -301,7 +301,6 @@ impl Iterator {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
ctx,
stm,

View file

@ -8,7 +8,6 @@ use crate::dbs::plan::Explanation;
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
use crate::dbs::store::file_store::FileCollector;
use crate::dbs::store::MemoryCollector;
@ -27,7 +26,6 @@ pub(super) enum Results {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
File(Box<FileCollector>),
Groups(GroupsCollector),
@ -43,7 +41,6 @@ impl Results {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
ctx: &Context<'_>,
stm: &Statement<'_>,
@ -58,7 +55,6 @@ impl Results {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
if stm.tempfiles() {
if let Some(temp_dir) = ctx.temporary_directory() {
@ -88,7 +84,6 @@ impl Results {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
Self::File(e) => {
e.push(val)?;
@ -110,7 +105,6 @@ impl Results {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
Self::File(f) => f.sort(orders),
_ => {}
@ -128,7 +122,6 @@ impl Results {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
Self::File(f) => f.start_limit(start, limit),
Self::Groups(_) => {}
@ -146,7 +139,6 @@ impl Results {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
Self::File(e) => e.len(),
Self::Groups(g) => g.len(),
@ -163,7 +155,6 @@ impl Results {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
Self::File(f) => f.take_vec()?,
_ => vec![],
@ -183,7 +174,6 @@ impl Results {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
Self::File(e) => {
e.explain(exp);

View file

@ -242,7 +242,6 @@ impl<'a> Statement<'a> {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
pub fn tempfiles(&self) -> bool {
match self {

View file

@ -56,7 +56,6 @@ impl From<Vec<Value>> for MemoryCollector {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
pub(super) mod file_store {
use crate::cnf::EXTERNAL_SORTING_BUFFER_LIMIT;

View file

@ -18,7 +18,6 @@ use bincode::Error as BincodeError;
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
use ext_sort::SortError;
use fst::Error as FstError;
@ -1078,13 +1077,6 @@ impl From<tikv::Error> for Error {
}
}
#[cfg(feature = "kv-speedb")]
impl From<speedb::Error> for Error {
fn from(e: speedb::Error) -> Error {
Error::Tx(e.to_string())
}
}
#[cfg(feature = "kv-rocksdb")]
impl From<rocksdb::Error> for Error {
fn from(e: rocksdb::Error) -> Error {
@ -1125,7 +1117,6 @@ impl From<reqwest::Error> for Error {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
impl<S, D, I> From<SortError<S, D, I>> for Error
where

View file

@ -7,7 +7,6 @@ use std::fmt;
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
use std::path::PathBuf;
use std::sync::Arc;
@ -100,7 +99,6 @@ pub struct Datastore {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
// The temporary directory
temporary_directory: Option<Arc<PathBuf>>,
@ -117,8 +115,6 @@ pub(super) enum Inner {
Mem(super::mem::Datastore),
#[cfg(feature = "kv-rocksdb")]
RocksDB(super::rocksdb::Datastore),
#[cfg(feature = "kv-speedb")]
SpeeDB(super::speedb::Datastore),
#[cfg(feature = "kv-indxdb")]
IndxDB(super::indxdb::Datastore),
#[cfg(feature = "kv-tikv")]
@ -137,8 +133,6 @@ impl fmt::Display for Datastore {
Inner::Mem(_) => write!(f, "memory"),
#[cfg(feature = "kv-rocksdb")]
Inner::RocksDB(_) => write!(f, "rocksdb"),
#[cfg(feature = "kv-speedb")]
Inner::SpeeDB(_) => write!(f, "speedb"),
#[cfg(feature = "kv-indxdb")]
Inner::IndxDB(_) => write!(f, "indxdb"),
#[cfg(feature = "kv-tikv")]
@ -216,7 +210,6 @@ impl Datastore {
#[cfg(not(any(
feature = "kv-mem",
feature = "kv-rocksdb",
feature = "kv-speedb",
feature = "kv-indxdb",
feature = "kv-tikv",
feature = "kv-fdb",
@ -270,22 +263,6 @@ impl Datastore {
}
#[cfg(not(feature = "kv-rocksdb"))]
return Err(Error::Ds("Cannot connect to the `rocksdb` storage engine as it is not enabled in this build of SurrealDB".to_owned()));
}
// Parse and initiate an SpeeDB database
s if s.starts_with("speedb:") => {
#[cfg(feature = "kv-speedb")]
{
info!("Starting kvs store at {}", path);
let s = s.trim_start_matches("speedb://");
let s = s.trim_start_matches("speedb:");
let v = super::speedb::Datastore::new(s).await.map(Inner::SpeeDB);
info!("Started kvs store at {}", path);
let default_clock = Arc::new(SizedClock::System(SystemClock::new()));
let clock = clock_override.unwrap_or(default_clock);
Ok((v, clock))
}
#[cfg(not(feature = "kv-speedb"))]
return Err(Error::Ds("Cannot connect to the `speedb` storage engine as it is not enabled in this build of SurrealDB".to_owned()));
}
// Parse and initiate an IndxDB database
s if s.starts_with("indxdb:") => {
@ -382,7 +359,6 @@ impl Datastore {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
temporary_directory: None,
lq_cf_store: Arc::new(RwLock::new(LiveQueryTracker::new())),
@ -438,7 +414,6 @@ impl Datastore {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
pub fn with_temporary_directory(mut self, path: PathBuf) -> Self {
self.temporary_directory = Some(Arc::new(path));
@ -1037,11 +1012,6 @@ impl Datastore {
let tx = v.transaction(write, lock).await?;
super::tx::Inner::RocksDB(tx)
}
#[cfg(feature = "kv-speedb")]
Inner::SpeeDB(v) => {
let tx = v.transaction(write, lock).await?;
super::tx::Inner::SpeeDB(tx)
}
#[cfg(feature = "kv-indxdb")]
Inner::IndxDB(v) => {
let tx = v.transaction(write, lock).await?;
@ -1170,7 +1140,6 @@ impl Datastore {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
self.temporary_directory.clone(),
)?;

View file

@ -9,7 +9,6 @@
//! - `fdb`: [FoundationDB](https://github.com/apple/foundationdb/) a distributed database designed to handle large volumes of structured data across clusters of commodity servers
//! - `indxdb`: WASM based database to store data in the browser
//! - `rocksdb`: [RocksDB](https://github.com/facebook/rocksdb) an embeddable persistent key-value store for fast storage
//! - `speedb`: [SpeedyDB](https://github.com/speedb-io/speedb) fork of rocksDB making it faster (Redis is using speedb but this is not acid transactions)
//! - `tikv`: [TiKV](https://github.com/tikv/tikv) a distributed, and transactional key-value database
//! - `mem`: in-memory database
mod cache;
@ -20,7 +19,6 @@ mod indxdb;
mod kv;
mod mem;
mod rocksdb;
mod speedb;
mod surrealkv;
mod tikv;
mod tx;

View file

@ -1,29 +0,0 @@
use crate::{lazy_env_parse, lazy_env_parse_or_else};
use once_cell::sync::Lazy;
pub static SPEEDB_THREAD_COUNT: Lazy<i32> =
lazy_env_parse_or_else!("SURREAL_SPEEDB_THREAD_COUNT", i32, |_| num_cpus::get() as i32);
pub static SPEEDB_WRITE_BUFFER_SIZE: Lazy<usize> =
lazy_env_parse!("SURREAL_SPEEDB_WRITE_BUFFER_SIZE", usize, 256 * 1024 * 1024);
pub static SPEEDB_TARGET_FILE_SIZE_BASE: Lazy<u64> =
lazy_env_parse!("SURREAL_SPEEDB_TARGET_FILE_SIZE_BASE", u64, 512 * 1024 * 1024);
pub static SPEEDB_MAX_WRITE_BUFFER_NUMBER: Lazy<i32> =
lazy_env_parse!("SURREAL_SPEEDB_MAX_WRITE_BUFFER_NUMBER", i32, 32);
pub static SPEEDB_MIN_WRITE_BUFFER_NUMBER_TO_MERGE: Lazy<i32> =
lazy_env_parse!("SURREAL_SPEEDB_MIN_WRITE_BUFFER_NUMBER_TO_MERGE", i32, 4);
pub static SPEEDB_ENABLE_PIPELINED_WRITES: Lazy<bool> =
lazy_env_parse!("SURREAL_SPEEDB_ENABLE_PIPELINED_WRITES", bool, true);
pub static SPEEDB_ENABLE_BLOB_FILES: Lazy<bool> =
lazy_env_parse!("SURREAL_SPEEDB_ENABLE_BLOB_FILES", bool, true);
pub static SPEEDB_MIN_BLOB_SIZE: Lazy<u64> =
lazy_env_parse!("SURREAL_SPEEDB_MIN_BLOB_SIZE", u64, 4 * 1024);
pub static SPEEDB_KEEP_LOG_FILE_NUM: Lazy<usize> =
lazy_env_parse!("SURREAL_SPEEDB_KEEP_LOG_FILE_NUM", usize, 20);

View file

@ -1,482 +0,0 @@
#![cfg(feature = "kv-speedb")]
mod cnf;
use crate::err::Error;
use crate::key::error::KeyCategory;
use crate::kvs::Check;
use crate::kvs::Key;
use crate::kvs::Val;
use crate::vs::{try_to_u64_be, u64_to_versionstamp, Versionstamp};
use futures::lock::Mutex;
use speedb::{
DBCompactionStyle, DBCompressionType, LogLevel, OptimisticTransactionDB,
OptimisticTransactionOptions, Options, ReadOptions, WriteOptions,
};
use std::ops::Range;
use std::pin::Pin;
use std::sync::Arc;
#[derive(Clone)]
#[non_exhaustive]
pub struct Datastore {
db: Pin<Arc<OptimisticTransactionDB>>,
}
#[non_exhaustive]
pub struct Transaction {
// Is the transaction complete?
done: bool,
// Is the transaction writeable?
write: bool,
/// Should we check unhandled transactions?
check: Check,
/// The underlying datastore transaction
inner: Arc<Mutex<Option<speedb::Transaction<'static, OptimisticTransactionDB>>>>,
// The read options containing the Snapshot
ro: ReadOptions,
// The above, supposedly 'static transaction
// actually points here, so we need to ensure
// the memory is kept alive. This pointer must
// be declared last, so that it is dropped last
_db: Pin<Arc<OptimisticTransactionDB>>,
}
impl Drop for Transaction {
fn drop(&mut self) {
if !self.done && self.write {
// Check if already panicking
if std::thread::panicking() {
return;
}
// Handle the behaviour
match self.check {
Check::None => {
trace!("A transaction was dropped without being committed or cancelled");
}
Check::Warn => {
warn!("A transaction was dropped without being committed or cancelled");
}
Check::Panic => {
#[cfg(debug_assertions)]
{
let backtrace = std::backtrace::Backtrace::force_capture();
if let std::backtrace::BacktraceStatus::Captured = backtrace.status() {
println!("{}", backtrace);
}
}
panic!("A transaction was dropped without being committed or cancelled");
}
}
}
}
}
impl Datastore {
/// Open a new database
pub(crate) async fn new(path: &str) -> Result<Datastore, Error> {
// Configure custom options
let mut opts = Options::default();
// Ensure we use fdatasync
opts.set_use_fsync(false);
// Only use warning log level
opts.set_log_level(LogLevel::Warn);
// Set the number of log files to keep
opts.set_keep_log_file_num(*cnf::SPEEDB_KEEP_LOG_FILE_NUM);
// Create database if missing
opts.create_if_missing(true);
// Create column families if missing
opts.create_missing_column_families(true);
// Set the datastore compaction style
opts.set_compaction_style(DBCompactionStyle::Level);
// Increase the background thread count
opts.increase_parallelism(*cnf::SPEEDB_THREAD_COUNT);
// Set the maximum number of write buffers
opts.set_max_write_buffer_number(*cnf::SPEEDB_MAX_WRITE_BUFFER_NUMBER);
// Set the amount of data to build up in memory
opts.set_write_buffer_size(*cnf::SPEEDB_WRITE_BUFFER_SIZE);
// Set the target file size for compaction
opts.set_target_file_size_base(*cnf::SPEEDB_TARGET_FILE_SIZE_BASE);
// Set minimum number of write buffers to merge
opts.set_min_write_buffer_number_to_merge(*cnf::SPEEDB_MIN_WRITE_BUFFER_NUMBER_TO_MERGE);
// Use separate write thread queues
opts.set_enable_pipelined_write(*cnf::SPEEDB_ENABLE_PIPELINED_WRITES);
// Enable separation of keys and values
opts.set_enable_blob_files(*cnf::SPEEDB_ENABLE_BLOB_FILES);
// Store 4KB values separate from keys
opts.set_min_blob_size(*cnf::SPEEDB_MIN_BLOB_SIZE);
// Set specific compression levels
opts.set_compression_per_level(&[
DBCompressionType::None,
DBCompressionType::None,
DBCompressionType::Lz4hc,
DBCompressionType::Lz4hc,
DBCompressionType::Lz4hc,
]);
// Create the datastore
Ok(Datastore {
db: Arc::pin(OptimisticTransactionDB::open(&opts, path)?),
})
}
/// Start a new transaction
pub(crate) async fn transaction(&self, write: bool, _: bool) -> Result<Transaction, Error> {
// Set the transaction options
let mut to = OptimisticTransactionOptions::default();
to.set_snapshot(true);
// Set the write options
let mut wo = WriteOptions::default();
wo.set_sync(false);
// Create a new transaction
let inner = self.db.transaction_opt(&wo, &to);
// The database reference must always outlive
// the transaction. If it doesn't then this
// is undefined behaviour. This unsafe block
// ensures that the transaction reference is
// static, but will cause a crash if the
// datastore is dropped prematurely.
let inner = unsafe {
std::mem::transmute::<
speedb::Transaction<'_, OptimisticTransactionDB>,
speedb::Transaction<'static, OptimisticTransactionDB>,
>(inner)
};
let mut ro = ReadOptions::default();
ro.set_snapshot(&inner.snapshot());
ro.fill_cache(true);
// Specify the check level
#[cfg(not(debug_assertions))]
let check = Check::Warn;
#[cfg(debug_assertions)]
let check = Check::Panic;
// Create a new transaction
Ok(Transaction {
done: false,
check,
write,
inner: Arc::new(Mutex::new(Some(inner))),
ro,
_db: self.db.clone(),
})
}
}
impl Transaction {
/// Behaviour if unclosed
pub(crate) fn check_level(&mut self, check: Check) {
self.check = check;
}
/// Check if closed
pub(crate) fn closed(&self) -> bool {
self.done
}
/// Cancel a transaction
pub(crate) async fn cancel(&mut self) -> Result<(), Error> {
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Mark this transaction as done
self.done = true;
// Cancel this transaction
match self.inner.lock().await.take() {
Some(inner) => inner.rollback()?,
None => unreachable!(),
};
// Continue
Ok(())
}
/// Commit a transaction
pub(crate) async fn commit(&mut self) -> Result<(), Error> {
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Check to see if transaction is writable
if !self.write {
return Err(Error::TxReadonly);
}
// Mark this transaction as done
self.done = true;
// Cancel this transaction
match self.inner.lock().await.take() {
Some(inner) => inner.commit()?,
None => unreachable!(),
};
// Continue
Ok(())
}
/// Check if a key exists
pub(crate) async fn exi<K>(&mut self, key: K) -> Result<bool, Error>
where
K: Into<Key>,
{
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Check the key
let res =
self.inner.lock().await.as_ref().unwrap().get_opt(key.into(), &self.ro)?.is_some();
// Return result
Ok(res)
}
/// Fetch a key from the database
pub(crate) async fn get<K>(&mut self, key: K) -> Result<Option<Val>, Error>
where
K: Into<Key>,
{
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Get the key
let res = self.inner.lock().await.as_ref().unwrap().get_opt(key.into(), &self.ro)?;
// Return result
Ok(res)
}
/// Obtain a new change timestamp for a key
/// which is replaced with the current timestamp when the transaction is committed.
/// NOTE: This should be called when composing the change feed entries for this transaction,
/// which should be done immediately before the transaction commit.
/// That is to keep other transactions commit delay(pessimistic) or conflict(optimistic) as less as possible.
#[allow(unused)]
pub(crate) async fn get_timestamp<K>(&mut self, key: K) -> Result<Versionstamp, Error>
where
K: Into<Key>,
{
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Write the timestamp to the "last-write-timestamp" key
// to ensure that no other transactions can commit with older timestamps.
let k: Key = key.into();
let prev = self.inner.lock().await.as_ref().unwrap().get_opt(k.clone(), &self.ro)?;
let ver = match prev {
Some(prev) => {
let slice = prev.as_slice();
let res: Result<[u8; 10], Error> = match slice.try_into() {
Ok(ba) => Ok(ba),
Err(e) => Err(Error::Ds(e.to_string())),
};
let array = res?;
let prev = try_to_u64_be(array)?;
prev + 1
}
None => 1,
};
let verbytes = u64_to_versionstamp(ver);
self.inner.lock().await.as_ref().unwrap().put(k, verbytes)?;
// Return the uint64 representation of the timestamp as the result
Ok(verbytes)
}
/// Obtain a new key that is suffixed with the change timestamp
pub(crate) async fn get_versionstamped_key<K>(
&mut self,
ts_key: K,
prefix: K,
suffix: K,
) -> Result<Vec<u8>, Error>
where
K: Into<Key>,
{
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Check to see if transaction is writable
if !self.write {
return Err(Error::TxReadonly);
}
let ts = self.get_timestamp(ts_key).await?;
let mut k: Vec<u8> = prefix.into();
k.append(&mut ts.to_vec());
k.append(&mut suffix.into());
Ok(k)
}
/// Insert or update a key in the database
pub(crate) async fn set<K, V>(&mut self, key: K, val: V) -> Result<(), Error>
where
K: Into<Key>,
V: Into<Val>,
{
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Check to see if transaction is writable
if !self.write {
return Err(Error::TxReadonly);
}
// Set the key
self.inner.lock().await.as_ref().unwrap().put(key.into(), val.into())?;
// Return result
Ok(())
}
/// Insert a key if it doesn't exist in the database
pub(crate) async fn put<K, V>(
&mut self,
category: KeyCategory,
key: K,
val: V,
) -> Result<(), Error>
where
K: Into<Key>,
V: Into<Val>,
{
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Check to see if transaction is writable
if !self.write {
return Err(Error::TxReadonly);
}
// Get the transaction
let inner = self.inner.lock().await;
let inner = inner.as_ref().unwrap();
// Get the arguments
let key = key.into();
let val = val.into();
// Set the key if empty
match inner.get_opt(&key, &self.ro)? {
None => inner.put(key, val)?,
_ => return Err(Error::TxKeyAlreadyExistsCategory(category)),
};
// Return result
Ok(())
}
/// Insert a key if it doesn't exist in the database
pub(crate) async fn putc<K, V>(&mut self, key: K, val: V, chk: Option<V>) -> Result<(), Error>
where
K: Into<Key>,
V: Into<Val>,
{
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Check to see if transaction is writable
if !self.write {
return Err(Error::TxReadonly);
}
// Get the transaction
let inner = self.inner.lock().await;
let inner = inner.as_ref().unwrap();
// Get the arguments
let key = key.into();
let val = val.into();
let chk = chk.map(Into::into);
// Set the key if valid
match (inner.get_opt(&key, &self.ro)?, chk) {
(Some(v), Some(w)) if v == w => inner.put(key, val)?,
(None, None) => inner.put(key, val)?,
_ => return Err(Error::TxConditionNotMet),
};
// Return result
Ok(())
}
/// Delete a key
pub(crate) async fn del<K>(&mut self, key: K) -> Result<(), Error>
where
K: Into<Key>,
{
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Check to see if transaction is writable
if !self.write {
return Err(Error::TxReadonly);
}
// Remove the key
self.inner.lock().await.as_ref().unwrap().delete(key.into())?;
// Return result
Ok(())
}
/// Delete a key
pub(crate) async fn delc<K, V>(&mut self, key: K, chk: Option<V>) -> Result<(), Error>
where
K: Into<Key>,
V: Into<Val>,
{
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Check to see if transaction is writable
if !self.write {
return Err(Error::TxReadonly);
}
// Get the transaction
let inner = self.inner.lock().await;
let inner = inner.as_ref().unwrap();
// Get the arguments
let key = key.into();
let chk = chk.map(Into::into);
// Delete the key if valid
match (inner.get_opt(&key, &self.ro)?, chk) {
(Some(v), Some(w)) if v == w => inner.delete(key)?,
(None, None) => inner.delete(key)?,
_ => return Err(Error::TxConditionNotMet),
};
// Return result
Ok(())
}
/// Retrieve a range of keys from the databases
pub(crate) async fn scan<K>(
&mut self,
rng: Range<K>,
limit: u32,
) -> Result<Vec<(Key, Val)>, Error>
where
K: Into<Key>,
{
// Check to see if transaction is closed
if self.done {
return Err(Error::TxFinished);
}
// Get the transaction
let inner = self.inner.lock().await;
let inner = inner.as_ref().unwrap();
// Convert the range to bytes
let rng: Range<Key> = Range {
start: rng.start.into(),
end: rng.end.into(),
};
// Create result set
let mut res: Vec<(Key, Val)> = vec![];
// Set the key range
let beg = rng.start.as_slice();
let end = rng.end.as_slice();
// Set the ReadOptions with the snapshot
let mut ro = ReadOptions::default();
ro.set_snapshot(&inner.snapshot());
// Create the iterator
let mut iter = inner.raw_iterator_opt(ro);
// Seek to the start key
iter.seek(&rng.start);
// Scan the keys in the iterator
while iter.valid() {
// Check the scan limit
if res.len() < limit as usize {
// Get the key and value
let (k, v) = (iter.key(), iter.value());
// Check the key and value
if let (Some(k), Some(v)) = (k, v) {
if k >= beg && k < end {
res.push((k.to_vec(), v.to_vec()));
iter.next();
continue;
}
}
}
// Exit
break;
}
// Return result
Ok(res)
}
}

View file

@ -1,7 +1,6 @@
#![cfg(any(
feature = "kv-mem",
feature = "kv-rocksdb",
feature = "kv-speedb",
feature = "kv-indxdb",
feature = "kv-tikv",
feature = "kv-fdb",
@ -18,8 +17,6 @@ pub(crate) enum Kvs {
#[allow(dead_code)]
Rocksdb,
#[allow(dead_code)]
Speedb,
#[allow(dead_code)]
Tikv,
#[allow(dead_code)]
Fdb,
@ -125,53 +122,6 @@ mod rocksdb {
include!("tx_test.rs");
}
#[cfg(feature = "kv-speedb")]
mod speedb {
use crate::kvs::tests::{ClockType, Kvs};
use crate::kvs::Transaction;
use crate::kvs::{Datastore, LockType, TransactionType};
use serial_test::serial;
use temp_dir::TempDir;
async fn new_ds(node_id: Uuid, clock_override: ClockType) -> (Datastore, Kvs) {
let path = TempDir::new().unwrap().path().to_string_lossy().to_string();
(
Datastore::new_full(format!("speedb:{path}").as_str(), Some(clock_override))
.await
.unwrap()
.with_node_id(sql::Uuid::from(node_id)),
Kvs::Speedb,
)
}
async fn new_tx(write: TransactionType, lock: LockType) -> Transaction {
// Shared node id for one-off transactions
// We should delete this, node IDs should be known.
let new_tx_uuid = Uuid::parse_str("5877e580-12ac-49e4-95e1-3c407c4887f3").unwrap();
let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default())));
new_ds(new_tx_uuid, clock).await.0.transaction(write, lock).await.unwrap()
}
include!("cluster_init.rs");
include!("hb.rs");
include!("helper.rs");
include!("lq.rs");
include!("nq.rs");
include!("raw.rs");
include!("snapshot.rs");
include!("tb.rs");
include!("multireader.rs");
include!("multiwriter_different_keys.rs");
include!("multiwriter_same_keys_conflict.rs");
include!("timestamp_to_versionstamp.rs");
include!("nd.rs");
include!("ndlq.rs");
include!("tblq.rs");
include!("tbnt.rs");
include!("tx_test.rs");
}
#[cfg(feature = "kv-tikv")]
mod tikv {

View file

@ -107,8 +107,6 @@ pub(super) enum Inner {
Mem(super::mem::Transaction),
#[cfg(feature = "kv-rocksdb")]
RocksDB(super::rocksdb::Transaction),
#[cfg(feature = "kv-speedb")]
SpeeDB(super::speedb::Transaction),
#[cfg(feature = "kv-indxdb")]
IndxDB(super::indxdb::Transaction),
#[cfg(feature = "kv-tikv")]
@ -149,8 +147,6 @@ impl fmt::Display for Transaction {
Inner::Mem(_) => write!(f, "memory"),
#[cfg(feature = "kv-rocksdb")]
Inner::RocksDB(_) => write!(f, "rocksdb"),
#[cfg(feature = "kv-speedb")]
Inner::SpeeDB(_) => write!(f, "speedb"),
#[cfg(feature = "kv-indxdb")]
Inner::IndxDB(_) => write!(f, "indxdb"),
#[cfg(feature = "kv-tikv")]
@ -213,11 +209,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.closed(),
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.closed(),
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -260,11 +251,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.cancel().await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.cancel().await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -307,11 +293,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.commit().await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.commit().await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -382,11 +363,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.del(key).await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.del(key).await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -431,11 +407,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.exi(key).await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.exi(key).await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -481,11 +452,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.get(key).await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.get(key).await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -532,11 +498,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.set(key, val).await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.set(key, val).await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -602,11 +563,6 @@ impl Transaction {
inner: Inner::FoundationDB(v),
..
} => v.get_timestamp().await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.get_timestamp(key).await,
#[cfg(feature = "kv-surrealkv")]
Transaction {
inner: Inner::SurrealKV(v),
@ -701,14 +657,6 @@ impl Transaction {
inner: Inner::FoundationDB(v),
..
} => v.set_versionstamped_key(prefix, suffix, val).await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => {
let k = v.get_versionstamped_key(ts_key, prefix, suffix).await?;
v.set(k, val).await
}
#[cfg(feature = "kv-surrealkv")]
Transaction {
inner: Inner::SurrealKV(v),
@ -740,11 +688,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.put(category, key, val).await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.put(category, key, val).await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -795,11 +738,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.scan(rng, limit).await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.scan(rng, limit).await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -851,11 +789,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.scan(range, batch_limit).await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.scan(range, batch_limit).await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -924,11 +857,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.putc(key, val, chk).await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.putc(key, val, chk).await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -975,11 +903,6 @@ impl Transaction {
inner: Inner::RocksDB(v),
..
} => v.delc(key, chk).await,
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(v),
..
} => v.delc(key, chk).await,
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(v),
@ -2902,11 +2825,6 @@ impl Transaction {
inner: Inner::RocksDB(ref mut v),
..
} => v.check_level(check),
#[cfg(feature = "kv-speedb")]
Transaction {
inner: Inner::SpeeDB(ref mut v),
..
} => v.check_level(check),
#[cfg(feature = "kv-indxdb")]
Transaction {
inner: Inner::IndxDB(ref mut v),

View file

@ -28,7 +28,6 @@ protocol-http = ["dep:reqwest", "dep:tokio-util"]
protocol-ws = ["dep:tokio-tungstenite", "dep:trice", "tokio/time"]
kv-mem = ["surrealdb-core/kv-mem", "tokio/time"]
kv-indxdb = ["surrealdb-core/kv-indxdb"]
kv-speedb = ["surrealdb-core/kv-speedb", "tokio/time"]
kv-rocksdb = ["surrealdb-core/kv-rocksdb", "tokio/time"]
kv-tikv = ["surrealdb-core/kv-tikv"]
kv-fdb-5_1 = ["surrealdb-core/kv-fdb-5_1", "kv-fdb"]

View file

@ -1,8 +1,8 @@
//! Dynamic support for any engine
//!
//! SurrealDB supports various ways of storing and accessing your data. For storing data we support a number of
//! key value stores. These are RocksDB, SpeeDB, TiKV, FoundationDB and an in-memory store. We call these
//! local engines. RocksDB and SpeeDB are file-based, single node key value stores. TiKV and FoundationDB are
//! key value stores. These are SurrealKV, RocksDB, TiKV, FoundationDB and an in-memory store. We call these
//! local engines. SurrealKV and RocksDB are file-based, single node key value stores. TiKV and FoundationDB are
//! are distributed stores that can scale horizontally across multiple nodes. The in-memory store does not persist
//! your data, it only stores it in memory. All these can be embedded in your application, so you don't need to
//! spin up a SurrealDB server first in order to use them. We also support spinning up a server externally and then
@ -270,14 +270,14 @@ impl Surreal<Any> {
/// // Instantiate an in-memory instance
/// let db = connect("mem://").await?;
///
/// // Instantiate an file-backed instance (currently uses RocksDB)
/// // Instantiate a file-backed instance (currently uses RocksDB)
/// let db = connect("file://path/to/database-folder").await?;
///
/// /// // Instantiate an RocksDB-backed instance
/// // Instantiate a RocksDB-backed instance
/// let db = connect("rocksdb://path/to/database-folder").await?;
///
/// // Instantiate an SpeeDB-backed instance
/// let db = connect("speedb://path/to/database-folder").await?;
/// // Instantiate a SurrealKV-backed instance
/// let db = connect("surrealkv://path/to/database-folder").await?;
///
/// // Instantiate an IndxDB-backed instance
/// let db = connect("indxdb://DatabaseName").await?;

View file

@ -109,22 +109,6 @@ impl Connection for Any {
.into());
}
EndpointKind::SpeeDb => {
#[cfg(feature = "kv-speedb")]
{
features.insert(ExtraFeatures::Backup);
features.insert(ExtraFeatures::LiveQueries);
engine::local::native::router(address, conn_tx, route_rx);
conn_rx.into_recv_async().await??
}
#[cfg(not(feature = "kv-speedb"))]
return Err(DbError::Ds(
"Cannot connect to the `speedb` storage engine as it is not enabled in this build of SurrealDB".to_owned(),
)
.into());
}
EndpointKind::TiKv => {
#[cfg(feature = "kv-tikv")]
{

View file

@ -107,21 +107,6 @@ impl Connection for Any {
.into());
}
EndpointKind::SpeeDb => {
#[cfg(feature = "kv-speedb")]
{
features.insert(ExtraFeatures::LiveQueries);
engine::local::wasm::router(address, conn_tx, route_rx);
conn_rx.into_recv_async().await??;
}
#[cfg(not(feature = "kv-speedb"))]
return Err(DbError::Ds(
"Cannot connect to the `speedb` storage engine as it is not enabled in this build of SurrealDB".to_owned(),
)
.into());
}
EndpointKind::SurrealKV => {
#[cfg(feature = "kv-surrealkv")]
{

View file

@ -227,42 +227,6 @@ pub struct File;
#[derive(Debug)]
pub struct RocksDb;
/// SpeeDB database
///
/// # Examples
///
/// Instantiating a SpeeDB-backed instance
///
/// ```no_run
/// # #[tokio::main]
/// # async fn main() -> surrealdb::Result<()> {
/// use surrealdb::Surreal;
/// use surrealdb::engine::local::SpeeDb;
///
/// let db = Surreal::new::<SpeeDb>("path/to/database-folder").await?;
/// # Ok(())
/// # }
/// ```
///
/// Instantiating a SpeeDB-backed strict instance
///
/// ```no_run
/// # #[tokio::main]
/// # async fn main() -> surrealdb::Result<()> {
/// use surrealdb::opt::Config;
/// use surrealdb::Surreal;
/// use surrealdb::engine::local::SpeeDb;
///
/// let config = Config::default().strict();
/// let db = Surreal::new::<SpeeDb>(("path/to/database-folder", config)).await?;
/// # Ok(())
/// # }
/// ```
#[cfg(feature = "kv-speedb")]
#[cfg_attr(docsrs, doc(cfg(feature = "kv-speedb")))]
#[derive(Debug)]
pub struct SpeeDb;
/// IndxDB database
///
/// # Examples
@ -371,6 +335,37 @@ pub struct TiKv;
#[derive(Debug)]
pub struct FDb;
/// SurrealKV database
///
/// # Examples
///
/// Instantiating a SurrealKV-backed instance
///
/// ```no_run
/// # #[tokio::main]
/// # async fn main() -> surrealdb::Result<()> {
/// use surrealdb::Surreal;
/// use surrealdb::engine::local::SurrealKV;
///
/// let db = Surreal::new::<SurrealKV>("path/to/database-folder").await?;
/// # Ok(())
/// # }
/// ```
///
/// Instantiating a SurrealKV-backed strict instance
///
/// ```no_run
/// # #[tokio::main]
/// # async fn main() -> surrealdb::Result<()> {
/// use surrealdb::opt::Config;
/// use surrealdb::Surreal;
/// use surrealdb::engine::local::SurrealKV;
///
/// let config = Config::default().strict();
/// let db = Surreal::new::<SurrealKV>(("path/to/database-folder", config)).await?;
/// # Ok(())
/// # }
/// ```
#[cfg(feature = "kv-surrealkv")]
#[cfg_attr(docsrs, doc(cfg(feature = "kv-surrealkv")))]
#[derive(Debug)]

View file

@ -151,7 +151,6 @@ pub(crate) fn router(
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
let kvs = match address.config.temporary_directory {
Some(tmp_dir) => kvs.with_temporary_directory(tmp_dir),

View file

@ -5,7 +5,6 @@ pub mod any;
feature = "kv-mem",
feature = "kv-tikv",
feature = "kv-rocksdb",
feature = "kv-speedb",
feature = "kv-fdb",
feature = "kv-indxdb",
feature = "kv-surrealkv",

View file

@ -6,7 +6,6 @@ use crate::{dbs::Capabilities, iam::Level};
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
use std::path::PathBuf;
use std::time::Duration;
@ -34,7 +33,6 @@ pub struct Config {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
pub(crate) temporary_directory: Option<PathBuf>,
}
@ -132,7 +130,6 @@ impl Config {
feature = "kv-rocksdb",
feature = "kv-fdb",
feature = "kv-tikv",
feature = "kv-speedb"
))]
pub fn temporary_directory(mut self, path: Option<PathBuf>) -> Self {
self.temporary_directory = path;

View file

@ -11,8 +11,6 @@ mod indxdb;
mod mem;
#[cfg(feature = "kv-rocksdb")]
mod rocksdb;
#[cfg(feature = "kv-speedb")]
mod speedb;
#[cfg(feature = "kv-surrealkv")]
mod surrealkv;
#[cfg(feature = "kv-tikv")]
@ -130,7 +128,6 @@ pub enum EndpointKind {
Memory,
RocksDb,
File,
SpeeDb,
TiKv,
Unsupported(String),
SurrealKV,
@ -149,7 +146,6 @@ impl From<&str> for EndpointKind {
"mem" => Self::Memory,
"file" => Self::File,
"rocksdb" => Self::RocksDb,
"speedb" => Self::SpeeDb,
"tikv" => Self::TiKv,
"surrealkv" => Self::SurrealKV,
_ => Self::Unsupported(s.to_owned()),

View file

@ -1,40 +0,0 @@
use crate::api::engine::local::Db;
use crate::api::engine::local::SpeeDb;
use crate::api::opt::Config;
use crate::api::opt::Endpoint;
use crate::api::opt::IntoEndpoint;
use crate::api::Result;
use std::path::Path;
use std::path::PathBuf;
use url::Url;
macro_rules! endpoints {
($($name:ty),*) => {
$(
impl IntoEndpoint<SpeeDb> for $name {
type Client = Db;
fn into_endpoint(self) -> Result<Endpoint> {
let protocol = "speedb://";
let url = Url::parse(protocol)
.unwrap_or_else(|_| unreachable!("`{protocol}` should be static and valid"));
let mut endpoint = Endpoint::new(url);
endpoint.path = super::path_to_string(protocol, self);
Ok(endpoint)
}
}
impl IntoEndpoint<SpeeDb> for ($name, Config) {
type Client = Db;
fn into_endpoint(self) -> Result<Endpoint> {
let mut endpoint = IntoEndpoint::<SpeeDb>::into_endpoint(self.0)?;
endpoint.config = self.1;
Ok(endpoint)
}
}
)*
}
}
endpoints!(&str, &String, String, &Path, PathBuf);

View file

@ -341,41 +341,6 @@ mod api_integration {
include!("api/backup.rs");
}
#[cfg(feature = "kv-speedb")]
mod speedb {
use super::*;
use surrealdb::engine::local::Db;
use surrealdb::engine::local::SpeeDb;
async fn new_db() -> (SemaphorePermit<'static>, Surreal<Db>) {
let permit = PERMITS.acquire().await.unwrap();
let path = format!("/tmp/{}.db", Ulid::new());
let root = Root {
username: ROOT_USER,
password: ROOT_PASS,
};
let config = Config::new()
.user(root)
.tick_interval(TICK_INTERVAL)
.capabilities(Capabilities::all());
let db = Surreal::new::<SpeeDb>((path, config)).await.unwrap();
db.signin(root).await.unwrap();
(permit, db)
}
#[test_log::test(tokio::test)]
async fn any_engine_can_connect() {
let path = format!("{}.db", Ulid::new());
surrealdb::engine::any::connect(format!("speedb://{path}")).await.unwrap();
surrealdb::engine::any::connect(format!("speedb:///tmp/{path}")).await.unwrap();
tokio::fs::remove_dir_all(path).await.unwrap();
}
include!("api/mod.rs");
include!("api/live.rs");
include!("api/backup.rs");
}
#[cfg(feature = "kv-tikv")]
mod tikv {
use super::*;

View file

@ -14,7 +14,6 @@ pub(crate) fn path_valid(v: &str) -> Result<String, String> {
"memory" => Ok(v.to_string()),
v if v.starts_with("file:") => Ok(v.to_string()),
v if v.starts_with("rocksdb:") => Ok(v.to_string()),
v if v.starts_with("speedb:") => Ok(v.to_string()),
v if v.starts_with("surrealkv:") => Ok(v.to_string()),
v if v.starts_with("tikv:") => Ok(v.to_string()),
v if v.starts_with("fdb:") => Ok(v.to_string()),
@ -59,8 +58,8 @@ pub(crate) fn endpoint_valid(v: &str) -> Result<String, String> {
let scheme = split_endpoint(v).0;
match scheme {
"http" | "https" | "ws" | "wss" | "fdb" | "mem" | "rocksdb" | "speedb" | "surrealkv"
| "file" | "tikv" => Ok(v.to_string()),
"http" | "https" | "ws" | "wss" | "fdb" | "mem" | "rocksdb" | "surrealkv" | "file"
| "tikv" => Ok(v.to_string()),
_ => Err(String::from("Provide a valid database connection string")),
}
}

View file

@ -939,10 +939,6 @@ criteria = "safe-to-deploy"
version = "0.11.0+8.1.1"
criteria = "safe-to-deploy"
[[exemptions.libspeedb-sys]]
version = "0.0.4+2.7.0"
criteria = "safe-to-deploy"
[[exemptions.libz-sys]]
version = "1.1.16"
criteria = "safe-to-deploy"
@ -1699,10 +1695,6 @@ criteria = "safe-to-deploy"
version = "2.6.0"
criteria = "safe-to-deploy"
[[exemptions.speedb]]
version = "0.0.4"
criteria = "safe-to-deploy"
[[exemptions.spin]]
version = "0.5.2"
criteria = "safe-to-deploy"