Refactor transaction, caching, and key-value store interfaces (#4257)

Co-authored-by: Gerard Guillemas Martos <gerard.guillemas@surrealdb.com>
This commit is contained in:
Tobie Morgan Hitchcock 2024-07-17 23:44:05 +01:00 committed by GitHub
parent 29b0df6060
commit bfc474e4d8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
229 changed files with 7232 additions and 10884 deletions

View file

@ -102,14 +102,14 @@ jobs:
features: "kv-mem"
- target: "lib-rocksdb"
features: "kv-rocksdb"
- target: "lib-fdb"
features: "kv-fdb-7_1"
- target: "lib-surrealkv"
features: "kv-surrealkv"
- target: "sdk-mem"
features: "kv-mem"
- target: "sdk-rocksdb"
features: "kv-rocksdb"
- target: "sdk-fdb"
features: "kv-fdb-7_1"
- target: "sdk-surrealkv"
features: "kv-surrealkv"
# This one fails because the server consumes too much memory and the kernel kills it. I tried with instances up to 16GB of RAM.
# - target: "sdk-ws"
# features: "protocol-ws"
@ -143,7 +143,7 @@ jobs:
uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0
if: ${{ matrix.target == 'lib-fdb' || matrix.target == 'sdk-fdb' }}
with:
version: "7.1.30"
version: "7.1.61"
# Run SurrealDB in the background if needed
- name: Build and start SurrealDB

View file

@ -571,16 +571,24 @@ jobs:
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Setup FoundationDB
uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0
with:
version: "7.1.30"
- name: Install cargo-make
run: cargo install --debug --locked cargo-make
- name: Setup FoundationDB
uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0
with:
version: "7.1.61"
- name: Test fdb engine
run: cargo make ci-api-integration-fdb
run: cargo make ci-api-integration-fdb-7_1
- name: Setup FoundationDB
uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0
with:
version: "7.3.47"
- name: Test fdb engine
run: cargo make ci-api-integration-fdb-7_3
- name: Debug info
if: always()

74
Cargo.lock generated
View file

@ -872,7 +872,6 @@ dependencies = [
"clang-sys",
"lazy_static",
"lazycell",
"log",
"peeking_take_while",
"prettyplease",
"proc-macro2",
@ -881,7 +880,6 @@ dependencies = [
"rustc-hash",
"shlex",
"syn 2.0.58",
"which",
]
[[package]]
@ -1782,9 +1780,9 @@ dependencies = [
[[package]]
name = "echodb"
version = "0.6.0"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ac31e38aeac770dd01b9d6c9ab2a6d7f025815f71105911cf6de073a5db8ee1"
checksum = "1d1eccc44ff21b80ca7e883ff57423a12610965a33637d5d0bef4adebcd81749"
dependencies = [
"arc-swap",
"imbl",
@ -2082,9 +2080,9 @@ dependencies = [
[[package]]
name = "foundationdb"
version = "0.8.0"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8696fd1be198f101eb58aeecf0f504fc02b28c7afcc008b4e4a998a91b305108"
checksum = "020bf4ae7238dbdb1ff01e9f981db028515cf66883c461e29faedfea130b2728"
dependencies = [
"async-recursion 1.1.0",
"async-trait",
@ -2102,18 +2100,18 @@ dependencies = [
[[package]]
name = "foundationdb-gen"
version = "0.8.0"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62239700f01b041b6372aaeb847c52f960e1a69fd2b1025dc995ea3dd90e3308"
checksum = "36878d54a76a48e794d0fe89be2096ab5968b071e7ec25f7becfe7846f55fa77"
dependencies = [
"xml-rs",
]
[[package]]
name = "foundationdb-macros"
version = "0.2.0"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83c8d52fe8b46ab822b4decdcc0d6d85aeedfc98f0d52ba2bd4aec4a97807516"
checksum = "f8db6653cbc621a3810d95d55bd342be3e71181d6df21a4eb29ef986202d3f9c"
dependencies = [
"proc-macro2",
"quote",
@ -2123,11 +2121,12 @@ dependencies = [
[[package]]
name = "foundationdb-sys"
version = "0.8.0"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "98e49545f5393d276b7b888c77e3f9519fd33727435f8244344be72c3284256f"
checksum = "ace2f49db8614b7d7e3b656a12e0059b5fbd0a4da3410b1797374bec3db269fa"
dependencies = [
"bindgen 0.65.1",
"bindgen 0.69.4",
"libc",
]
[[package]]
@ -2912,9 +2911,9 @@ dependencies = [
[[package]]
name = "indxdb"
version = "0.4.0"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1de97697bf90e30042ea4ae3260a976253e0bb1703fa339541bcc047cc994180"
checksum = "817e28ebe3466175be7e66f4eadfb9e6a221537db2f78b6be04e14b7051a56af"
dependencies = [
"js-sys",
"rexie",
@ -5432,9 +5431,9 @@ dependencies = [
[[package]]
name = "serde_bytes"
version = "0.11.14"
version = "0.11.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734"
checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a"
dependencies = [
"serde",
]
@ -6116,9 +6115,9 @@ dependencies = [
[[package]]
name = "surrealdb-tikv-client"
version = "0.2.0-surreal.2"
version = "0.3.0-surreal.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b79f921871d6ed67c970e8499b4aca3724115c189f99ab30f51b46c77bd19819"
checksum = "f9e204e84239374e8ba2dfabb88f5ac20f69baa09599eee225958445fb7e0a14"
dependencies = [
"async-recursion 0.3.2",
"async-trait",
@ -6130,15 +6129,17 @@ dependencies = [
"log",
"pin-project",
"prometheus",
"prost 0.11.9",
"prost 0.12.3",
"rand 0.8.5",
"regex",
"semver",
"serde",
"serde_derive",
"serde_json",
"take_mut",
"thiserror",
"tokio",
"tonic 0.9.2",
"tonic 0.10.2",
]
[[package]]
@ -6271,6 +6272,12 @@ dependencies = [
"libc",
]
[[package]]
name = "take_mut"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60"
[[package]]
name = "tap"
version = "1.0.1"
@ -6387,18 +6394,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.58"
version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.58"
version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
dependencies = [
"proc-macro2",
"quote",
@ -6482,9 +6489,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.37.0"
version = "1.38.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a"
dependencies = [
"backtrace",
"bytes",
@ -6511,9 +6518,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
version = "2.2.0"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
dependencies = [
"proc-macro2",
"quote",
@ -6673,17 +6680,15 @@ dependencies = [
[[package]]
name = "tonic"
version = "0.9.2"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a"
checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e"
dependencies = [
"async-stream",
"async-trait",
"axum 0.6.20",
"base64 0.21.7",
"bytes",
"futures-core",
"futures-util",
"h2",
"http 0.2.12",
"http-body 0.4.6",
@ -6691,7 +6696,8 @@ dependencies = [
"hyper-timeout",
"percent-encoding",
"pin-project",
"prost 0.11.9",
"prost 0.12.3",
"rustls 0.21.11",
"rustls-pemfile",
"tokio",
"tokio-rustls",

View file

@ -8,11 +8,11 @@ authors = ["Tobie Morgan Hitchcock <tobie@surrealdb.com>"]
[features]
# Public features
default = ["storage-mem", "storage-rocksdb", "scripting", "http"]
default = ["storage-mem", "storage-surrealkv", "storage-rocksdb", "scripting", "http"]
storage-mem = ["surrealdb/kv-mem"]
storage-rocksdb = ["surrealdb/kv-rocksdb"]
storage-tikv = ["surrealdb/kv-tikv"]
storage-fdb = ["surrealdb/kv-fdb-7_1"]
storage-fdb = ["surrealdb/kv-fdb"]
storage-surrealkv = ["surrealdb/kv-surrealkv"]
scripting = ["surrealdb/scripting"]
http = ["surrealdb/http"]
@ -20,6 +20,9 @@ http-compression = []
ml = ["surrealdb/ml"]
jwks = ["surrealdb/jwks"]
performance-profiler = ["dep:pprof"]
# Special features
storage-fdb-7_1 = ["surrealdb/kv-fdb-7_1"]
storage-fdb-7_3 = ["surrealdb/kv-fdb-7_3"]
[workspace]
members = [
@ -30,6 +33,9 @@ members = [
"lib/examples/rocket",
]
[profile.make]
inherits = "dev"
[profile.release]
lto = true
strip = true

View file

@ -45,10 +45,10 @@ serve: check-deps
sql: check-deps
cargo make sql
.PHONY: quick
quick: check-deps
cargo make quick
.PHONY: build
build: check-deps
cargo make build
.PHONY: release
release: check-deps
cargo make release

View file

@ -1,6 +1,6 @@
[tasks.ci-format]
category = "CI - CHECK"
dependencies = ["cargo-fmt", "cargo-fmt-unlinked"]
dependencies = ["cargo-fmt"]
[tasks.ci-check]
category = "CI - CHECK"
@ -15,7 +15,7 @@ args = ["check", "--locked", "--package", "surrealdb", "--features", "protocol-w
[tasks.ci-clippy]
category = "CI - CHECK"
command = "cargo"
args = ["clippy", "--all-targets", "--features", "storage-mem,storage-rocksdb,storage-tikv,storage-fdb,scripting,http,jwks", "--tests", "--benches", "--examples", "--bins", "--", "-D", "warnings"]
args = ["clippy", "--all-targets", "--features", "storage-mem,storage-surrealkv,storage-rocksdb,storage-tikv,storage-fdb,scripting,http,jwks,ml,storage-fdb-7_1", "--tests", "--benches", "--examples", "--bins", "--", "-D", "warnings"]
#
# Integration Tests
@ -151,11 +151,6 @@ category = "CI - INTEGRATION TESTS"
env = { _TEST_API_ENGINE = "rocksdb", _TEST_FEATURES = "kv-rocksdb" }
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = true }
[tasks.ci-api-integration-fdb]
category = "CI - INTEGRATION TESTS"
env = { _TEST_API_ENGINE = "fdb", _TEST_FEATURES = "kv-fdb-7_1" }
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = false }
[tasks.ci-api-integration-surrealkv]
category = "CI - INTEGRATION TESTS"
env = { _TEST_API_ENGINE = "surrealkv", _TEST_FEATURES = "kv-surrealkv" }
@ -166,6 +161,16 @@ category = "CI - INTEGRATION TESTS"
env = { _TEST_API_ENGINE = "tikv", _TEST_FEATURES = "kv-tikv" }
run_task = { name = ["start-tikv", "test-kvs", "test-api-integration", "stop-tikv"], fork = true, parallel = false }
[tasks.ci-api-integration-fdb-7_1]
category = "CI - INTEGRATION TESTS"
env = { _TEST_API_ENGINE = "fdb", _TEST_FEATURES = "kv-fdb,kv-fdb-7_1" }
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = false }
[tasks.ci-api-integration-fdb-7_3]
category = "CI - INTEGRATION TESTS"
env = { _TEST_API_ENGINE = "fdb", _TEST_FEATURES = "kv-fdb,kv-fdb-7_3" }
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = false }
#
# Services
@ -283,7 +288,7 @@ BENCH_WORKER_THREADS = { value = "1", condition = { env_not_set = ["BENCH_WORKER
BENCH_NUM_OPS = { value = "1000", condition = { env_not_set = ["BENCH_NUM_OPS"] } }
BENCH_DURATION = { value = "30", condition = { env_not_set = ["BENCH_DURATION"] } }
BENCH_SAMPLE_SIZE = { value = "10", condition = { env_not_set = ["BENCH_SAMPLE_SIZE"] } }
BENCH_FEATURES = { value = "protocol-ws,kv-mem,kv-rocksdb,kv-fdb-7_1,kv-surrealkv", condition = { env_not_set = ["BENCH_FEATURES"] } }
BENCH_FEATURES = { value = "protocol-ws,kv-mem,kv-rocksdb,kv-surrealkv", condition = { env_not_set = ["BENCH_FEATURES"] } }
[tasks.bench-target]
private = true
@ -301,11 +306,6 @@ category = "CI - BENCHMARK - SurrealDB Target"
env = { BENCH_DATASTORE_TARGET = "lib-rocksdb" }
run_task = { name = ["bench-target"] }
[tasks.bench-lib-fdb]
category = "CI - BENCHMARK - SurrealDB Target"
env = { BENCH_DATASTORE_TARGET = "lib-fdb" }
run_task = { name = ["bench-target"] }
[tasks.bench-sdk-mem]
category = "CI - BENCHMARK - SurrealDB Target"
env = { BENCH_DATASTORE_TARGET = "sdk-mem" }
@ -316,17 +316,12 @@ category = "CI - BENCHMARK - SurrealDB Target"
env = { BENCH_DATASTORE_TARGET = "sdk-rocksdb" }
run_task = { name = ["bench-target"] }
[tasks.bench-sdk-fdb]
[tasks.bench-lib-surrealkv]
category = "CI - BENCHMARK - SurrealDB Target"
env = { BENCH_DATASTORE_TARGET = "sdk-fdb" }
env = { BENCH_DATASTORE_TARGET = "lib-surrealkv" }
run_task = { name = ["bench-target"] }
[tasks.bench-sdk-ws]
category = "CI - BENCHMARK - SurrealDB Target"
env = { BENCH_DATASTORE_TARGET = "sdk-ws" }
run_task = { name = ["bench-target"] }
[tasks.bench-lib-surrealkv]
category = "CI - BENCHMARK - SurrealDB Target"
env = { BENCH_DATASTORE_TARGET = "lib-surrealkv" }
run_task = { name = ["bench-target"] }

View file

@ -24,35 +24,29 @@ args = ["doc", "--open", "--no-deps", "--package", "surrealdb", "--features", "r
category = "LOCAL USAGE"
command = "cargo"
env = { RUST_MIN_STACK={ value = "4194304", condition = { env_not_set = ["RUST_MIN_STACK"] } } }
args = ["test", "--workspace", "--no-fail-fast"]
# Check
[tasks.cargo-check]
category = "LOCAL USAGE"
command = "cargo"
args = ["check", "--workspace", "--features", "${DEV_FEATURES}"]
args = ["test", "--profile", "make", "--workspace", "--no-fail-fast"]
# Format
[tasks.cargo-fmt]
category = "LOCAL USAGE"
command = "cargo"
args = ["fmt", "--all", "--check"]
[tasks.cargo-fmt-unlinked]
# Check
[tasks.cargo-check]
category = "LOCAL USAGE"
script = """
set -e
cd ${CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}/
cargo fmt --all --check -- ./lib/tests/**/*.rs ./core/src/kvs/tests/*.rs
"""
command = "cargo"
args = ["check", "--profile", "make", "--workspace", "--all-targets", "--features", "${ALL_FEATURES}"]
# Clippy
[tasks.cargo-clippy]
category = "LOCAL USAGE"
command = "cargo"
args = ["clippy", "--all-targets", "--all-features", "--", "-D", "warnings"]
args = ["clippy", "--profile", "make", "--workspace", "--all-targets", "--features", "${ALL_FEATURES}", "--", "-D", "warnings"]
[tasks.check]
category = "LOCAL USAGE"
dependencies = ["cargo-check", "cargo-fmt", "cargo-fmt-unlinked", "cargo-clippy"]
dependencies = ["cargo-fmt", "cargo-check", "cargo-clippy"]
[tasks.check-wasm]
category = "LOCAL USAGE"
@ -74,30 +68,30 @@ args = ["bench", "--package", "surrealdb", "--no-default-features", "--features"
[tasks.run]
category = "LOCAL USAGE"
command = "cargo"
args = ["run", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "${@}"]
args = ["run", "--profile", "make", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "${@}"]
# Serve
[tasks.serve]
category = "LOCAL USAGE"
command = "cargo"
args = ["run", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "start", "--allow-all", "${@}"]
args = ["run", "--profile", "make", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "start", "--allow-all", "${@}"]
# SQL
[tasks.sql]
category = "LOCAL USAGE"
command = "cargo"
args = ["run", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "sql", "--pretty", "${@}"]
# Quick
[tasks.quick]
category = "LOCAL USAGE"
command = "cargo"
args = ["build", "${@}"]
args = ["run", "--profile", "make", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "sql", "--pretty", "${@}"]
# Build
[tasks.build]
category = "LOCAL USAGE"
command = "cargo"
args = ["build", "--profile", "make", "${@}"]
# Release
[tasks.release]
category = "LOCAL USAGE"
command = "cargo"
args = ["build", "--release", "${@}"]
# Default

View file

@ -10,8 +10,9 @@ reduce_output = true
default_to_workspace = false
[env]
DEV_FEATURES={ value = "storage-mem,scripting,http,ml,jwks", condition = { env_not_set = ["DEV_FEATURES"] } }
SURREAL_LOG={ value = "trace", condition = { env_not_set = ["SURREAL_LOG"] } }
ALL_FEATURES={ value = "storage-mem,storage-surrealkv,storage-rocksdb,storage-tikv,storage-fdb,scripting,http,jwks,ml,storage-fdb-7_1", condition = { env_not_set = ["ALL_FEATURES"] } }
DEV_FEATURES={ value = "storage-mem,storage-surrealkv,scripting,http,jwks,ml", condition = { env_not_set = ["DEV_FEATURES"] } }
SURREAL_LOG={ value = "full", condition = { env_not_set = ["SURREAL_LOG"] } }
SURREAL_USER={ value = "root", condition = { env_not_set = ["SURREAL_USER"] } }
SURREAL_PASS={ value = "root", condition = { env_not_set = ["SURREAL_PASS"] } }
SURREAL_PATH={ value = "memory", condition = { env_not_set = ["SURREAL_PATH"] } }

View file

@ -20,6 +20,10 @@ include = [
"rustix::fs",
"tokio::fs",
]
exclude = [
"std::path::Path",
"std::path::PathBuf",
]
[api.net]
include = [
@ -30,10 +34,14 @@ include = [
"surreal::net",
"surrealdb",
"surrealdb_core",
"surrealkv",
"tokio::net",
"tracing",
"tracing_core",
]
exclude = [
"hashbrown::map",
]
#
# Crates Linking to Libraries
@ -308,10 +316,12 @@ build.allow_apis = [
"process",
]
allow_unsafe = true
allow_apis = [
"fs",
]
[pkg.proc-macro2]
build.allow_apis = [
"fs",
"process",
]
allow_unsafe = true
@ -435,6 +445,9 @@ allow_unsafe = true
build.allow_apis = [
"process",
]
build.allow_build_instructions = [
"cargo:rustc-check-cfg=*",
]
allow_apis = [
"fs",
]
@ -617,9 +630,6 @@ build.allow_build_instructions = [
[pkg.dirs-sys-next]
allow_unsafe = true
from.build.allow_apis = [
"fs",
]
[pkg.crunchy]
build.allow_apis = [
@ -637,7 +647,6 @@ allow_unsafe = true
[pkg.anyhow]
build.allow_apis = [
"fs",
"process",
]
allow_unsafe = true
@ -742,9 +751,6 @@ allow_unsafe = true
[pkg.dashmap]
allow_unsafe = true
allow_apis = [
"net",
]
[pkg.tokio-stream]
allow_unsafe = true
@ -762,9 +768,6 @@ allow_apis = [
"fs",
]
[pkg.atomic-waker]
allow_unsafe = true
[pkg.doc-comment]
build.allow_apis = [
"process",
@ -986,6 +989,9 @@ allow_unsafe = true
[pkg.crossbeam-deque]
allow_unsafe = true
[pkg.crossbeam-queue]
allow_unsafe = true
[pkg.anstream]
allow_unsafe = true
@ -1056,9 +1062,6 @@ allow_unsafe = true
[pkg.argon2]
allow_unsafe = true
[pkg.futures-concurrency]
allow_unsafe = true
[pkg.quick_cache]
allow_unsafe = true
allow_apis = [
@ -1211,6 +1214,7 @@ allow_apis = [
[pkg.axum-server]
allow_apis = [
"fs",
"net",
]
@ -1353,3 +1357,11 @@ allow_unsafe = true
[pkg.tendril]
allow_unsafe = true
[pkg.lru]
allow_unsafe = true
[pkg.surrealkv]
allow_apis = [
"fs",
]

View file

@ -27,15 +27,8 @@ default = ["kv-mem"]
kv-mem = ["dep:echodb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
kv-indxdb = ["dep:indxdb"]
kv-rocksdb = ["dep:rocksdb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
kv-tikv = ["dep:tikv", "dep:tempfile", "dep:ext-sort"]
kv-fdb-5_1 = ["foundationdb/fdb-5_1", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
kv-fdb-5_2 = ["foundationdb/fdb-5_2", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
kv-fdb-6_0 = ["foundationdb/fdb-6_0", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
kv-fdb-6_1 = ["foundationdb/fdb-6_1", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
kv-fdb-6_2 = ["foundationdb/fdb-6_2", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
kv-fdb-6_3 = ["foundationdb/fdb-6_3", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
kv-fdb-7_0 = ["foundationdb/fdb-7_0", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
kv-fdb-7_1 = ["foundationdb/fdb-7_1", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
kv-tikv = ["dep:tikv", "tokio/time", "dep:tempfile", "dep:ext-sort"]
kv-fdb = ["dep:foundationdb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
kv-surrealkv = ["dep:surrealkv", "tokio/time", "dep:tempfile", "dep:ext-sort"]
scripting = ["dep:js"]
http = ["dep:reqwest"]
@ -48,8 +41,9 @@ arbitrary = [
"geo-types/arbitrary",
"uuid/arbitrary",
]
# Private features
kv-fdb = ["tokio/time"]
# Special features
kv-fdb-7_1 = ["foundationdb/fdb-7_1"]
kv-fdb-7_3 = ["foundationdb/fdb-7_3"]
[package.metadata.docs.rs]
rustdoc-args = ["--cfg", "docsrs"]
@ -76,10 +70,10 @@ dashmap = "5.5.3"
derive = { version = "0.12.0", package = "surrealdb-derive" }
deunicode = "1.4.1"
dmp = "0.2.0"
echodb = { version = "0.6.0", optional = true }
echodb = { version = "0.7.0", optional = true }
executor = { version = "1.8.0", package = "async-executor" }
ext-sort = { version = "^0.1.4", optional = true }
foundationdb = { version = "0.8.0", default-features = false, features = [
foundationdb = { version = "0.9.0", default-features = false, features = [
"embedded-fdb-include",
], optional = true }
fst = "0.4.7"
@ -89,7 +83,7 @@ geo = { version = "0.27.0", features = ["use-serde"] }
geo-types = { version = "0.7.12", features = ["arbitrary"] }
hashbrown = { version = "0.14.5", features = ["serde"] }
hex = { version = "0.4.3" }
indxdb = { version = "0.4.0", optional = true }
indxdb = { version = "0.5.0", optional = true }
ipnet = "2.9.0"
js = { version = "0.6.2", package = "rquickjs", features = [
"array-buffer",
@ -146,7 +140,7 @@ surrealkv = { version = "0.3.0", optional = true }
surrealml = { version = "0.1.1", optional = true, package = "surrealml-core" }
tempfile = { version = "3.10.1", optional = true }
thiserror = "1.0.50"
tikv = { version = "0.2.0-surreal.2", default-features = false, package = "surrealdb-tikv-client", optional = true }
tikv = { version = "0.3.0-surreal.1", default-features = false, package = "surrealdb-tikv-client", optional = true }
tracing = "0.1.40"
trice = "0.4.0"
ulid = { version = "1.1.0", features = ["serde"] }

View file

@ -1,7 +1,7 @@
use crate::err::Error;
use crate::key::change;
#[cfg(debug_assertions)]
use crate::key::debug::sprint_key;
use crate::key::debug::sprint;
use crate::kvs::Transaction;
use crate::vs;
use crate::vs::Versionstamp;
@ -9,42 +9,36 @@ use std::str;
// gc_all_at deletes all change feed entries that become stale at the given timestamp.
#[allow(unused)]
pub async fn gc_all_at(tx: &mut Transaction, ts: u64, limit: Option<u32>) -> Result<(), Error> {
let nses = tx.all_ns().await?;
let nses = nses.as_ref();
for ns in nses {
gc_ns(tx, ns.name.as_str(), limit, ts).await?;
pub async fn gc_all_at(tx: &Transaction, ts: u64) -> Result<(), Error> {
// Fetch all namespaces
let nss = tx.all_ns().await?;
// Loop over each namespace
for ns in nss.as_ref() {
// Trace for debugging
#[cfg(debug_assertions)]
trace!("Performing garbage collection on {ns} for timestamp {ts}");
// Process the namespace
gc_ns(tx, ts, ns.name.as_str()).await?;
}
Ok(())
}
// gc_ns deletes all change feed entries in the given namespace that are older than the given watermark.
#[allow(unused)]
pub async fn gc_ns(
tx: &mut Transaction,
ns: &str,
limit: Option<u32>,
ts: u64,
) -> Result<(), Error> {
pub async fn gc_ns(tx: &Transaction, ts: u64, ns: &str) -> Result<(), Error> {
// Fetch all databases
let dbs = tx.all_db(ns).await?;
let dbs = dbs.as_ref();
for db in dbs {
// We get the expiration of the change feed defined on the database
let db_cf_expiry = match &db.changefeed {
None => 0,
Some(cf) => cf.expiry.as_secs(),
};
// Loop over each database
for db in dbs.as_ref() {
// Trace for debugging
#[cfg(debug_assertions)]
trace!(
"Performing garbage collection on ns {} db {} for ts {}. The cf expiration is {}",
ns,
db.name,
ts,
db_cf_expiry
);
let tbs = tx.all_tb(ns, db.name.as_str()).await?;
let tbs = tbs.as_ref();
let max_tb_cf_expiry = tbs.iter().fold(0, |acc, tb| match &tb.changefeed {
trace!("Performing garbage collection on {ns}:{db} for timestamp {ts}");
// Fetch all tables
let tbs = tx.all_tb(ns, &db.name).await?;
// Get the database changefeed expiration
let db_cf_expiry = db.changefeed.map(|v| v.expiry.as_secs()).unwrap_or_default();
// Get the maximum table changefeed expiration
let tb_cf_expiry = tbs.as_ref().iter().fold(0, |acc, tb| match &tb.changefeed {
None => acc,
Some(cf) => {
if cf.expiry.is_zero() {
@ -54,46 +48,47 @@ pub async fn gc_ns(
}
}
});
let cf_expiry = db_cf_expiry.max(max_tb_cf_expiry);
// Calculate the maximum changefeed expiration
let cf_expiry = db_cf_expiry.max(tb_cf_expiry);
// Ignore this database if the expiry is greater
if ts < cf_expiry {
continue;
}
// We only want to retain the expiry window, so we are going to delete everything before
// Calculate the watermark expiry window
let watermark_ts = ts - cf_expiry;
#[cfg(debug_assertions)]
trace!("The watermark is {} after removing {cf_expiry} from {ts}", watermark_ts);
let watermark_vs =
tx.get_versionstamp_from_timestamp(watermark_ts, ns, db.name.as_str(), true).await?;
// Calculate the watermark versionstamp
let watermark_vs = tx
.lock()
.await
.get_versionstamp_from_timestamp(watermark_ts, ns, &db.name, true)
.await?;
// If a versionstamp exists, then garbage collect
if let Some(watermark_vs) = watermark_vs {
gc_db(tx, ns, db.name.as_str(), watermark_vs, limit).await?;
gc_range(tx, ns, &db.name, watermark_vs).await?;
}
}
Ok(())
}
// gc_db deletes all change feed entries in the given database that are older than the given watermark.
pub async fn gc_db(
tx: &mut Transaction,
pub async fn gc_range(
tx: &Transaction,
ns: &str,
db: &str,
watermark: Versionstamp,
limit: Option<u32>,
) -> Result<(), Error> {
let beg: Vec<u8> = change::prefix_ts(ns, db, vs::u64_to_versionstamp(0));
// Calculate the range
let beg = change::prefix_ts(ns, db, vs::u64_to_versionstamp(0));
let end = change::prefix_ts(ns, db, watermark);
// Trace for debugging
#[cfg(debug_assertions)]
trace!(
"DB GC: ns: {}, db: {}, watermark: {:?}, prefix: {}, end: {}",
ns,
db,
watermark,
sprint_key(&beg),
sprint_key(&end)
"Performing garbage collection on {ns}:{db} for watermark {watermark:?}, between {} and {}",
sprint(&beg),
sprint(&end)
);
let limit = limit.unwrap_or(100);
tx.delr(beg..end, limit).await?;
// Delete the entire range in grouped batches
tx.delr(beg..end).await?;
// Ok all good
Ok(())
}

View file

@ -2,8 +2,8 @@ use crate::cf::{ChangeSet, DatabaseMutation, TableMutations};
use crate::err::Error;
use crate::key::change;
#[cfg(debug_assertions)]
use crate::key::debug::sprint_key;
use crate::kvs::{Limit, ScanPage, Transaction};
use crate::key::debug::sprint;
use crate::kvs::Transaction;
use crate::sql::statements::show::ShowSince;
use crate::vs;
@ -16,18 +16,19 @@ use crate::vs;
// You can use this to read the change feed in chunks.
// The second call would start from the last versionstamp + 1 of the first call.
pub async fn read(
tx: &mut Transaction,
tx: &Transaction,
ns: &str,
db: &str,
tb: Option<&str>,
start: ShowSince,
limit: Option<u32>,
) -> Result<Vec<ChangeSet>, Error> {
// Calculate the start of the changefeed range
let beg = match start {
ShowSince::Versionstamp(x) => change::prefix_ts(ns, db, vs::u64_to_versionstamp(x)),
ShowSince::Timestamp(x) => {
let ts = x.0.timestamp() as u64;
let vs = tx.get_versionstamp_from_timestamp(ts, ns, db, true).await?;
let vs = tx.lock().await.get_versionstamp_from_timestamp(ts, ns, db, true).await?;
match vs {
Some(vs) => change::prefix_ts(ns, db, vs),
None => {
@ -38,63 +39,49 @@ pub async fn read(
}
}
};
// Calculate the end of the changefeed range
let end = change::suffix(ns, db);
let limit = limit.unwrap_or(100);
let scan = tx
.scan_paged(
ScanPage {
range: beg..end,
limit: Limit::Limited(limit),
},
limit,
)
.await?;
// Limit the changefeed results with a default
let limit = limit.unwrap_or(100).min(1000);
// Create an empty buffer for the versionstamp
let mut vs: Option<[u8; 10]> = None;
// Create an empty buffer for the table mutations
let mut buf: Vec<TableMutations> = Vec::new();
let mut r = Vec::<ChangeSet>::new();
// Create an empty buffer for the final changesets
let mut res = Vec::<ChangeSet>::new();
// iterate over _x and put decoded elements to r
for (k, v) in scan.values {
for (k, v) in tx.scan(beg..end, limit).await? {
#[cfg(debug_assertions)]
trace!("read change feed; {}", sprint_key(&k));
trace!("Reading change feed entry: {}", sprint(&k));
// Decode the changefeed entry key
let dec = crate::key::change::Cf::decode(&k).unwrap();
if let Some(tb) = tb {
if dec.tb != tb {
// Check the change is for the desired table
if tb.is_some_and(|tb| tb != dec.tb) {
continue;
}
}
let _tb = dec.tb;
let ts = dec.vs;
// Decode the byte array into a vector of operations
let tb_muts: TableMutations = v.into();
// Get the timestamp of the changefeed entry
match vs {
Some(x) => {
if ts != x {
if dec.vs != x {
let db_mut = DatabaseMutation(buf);
r.push(ChangeSet(x, db_mut));
res.push(ChangeSet(x, db_mut));
buf = Vec::new();
vs = Some(ts)
vs = Some(dec.vs)
}
}
None => {
vs = Some(ts);
vs = Some(dec.vs);
}
}
buf.push(tb_muts);
}
// Collect all mutations together
if !buf.is_empty() {
let db_mut = DatabaseMutation(buf);
r.push(ChangeSet(vs.unwrap(), db_mut));
res.push(ChangeSet(vs.unwrap(), db_mut));
}
Ok(r)
// Return the results
Ok(res)
}

View file

@ -153,7 +153,6 @@ mod tests {
use crate::cf::{ChangeSet, DatabaseMutation, TableMutation, TableMutations};
use crate::dbs::Session;
use crate::fflags::FFLAGS;
use crate::key::key_req::KeyRequirements;
use crate::kvs::{Datastore, LockType::*, Transaction, TransactionType::*};
use crate::sql::changefeed::ChangeFeed;
use crate::sql::id::Id;
@ -186,7 +185,7 @@ mod tests {
// Write things to the table.
//
let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap();
let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap().inner();
let thing_a = Thing {
tb: TB.to_owned(),
id: Id::String("A".to_string()),
@ -205,7 +204,7 @@ mod tests {
tx1.complete_changes(true).await.unwrap();
tx1.commit().await.unwrap();
let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap();
let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap().inner();
let thing_c = Thing {
tb: TB.to_owned(),
id: Id::String("C".to_string()),
@ -223,8 +222,7 @@ mod tests {
tx2.complete_changes(true).await.unwrap();
tx2.commit().await.unwrap();
let x = ds.transaction(Write, Optimistic).await;
let mut tx3 = x.unwrap();
let mut tx3 = ds.transaction(Write, Optimistic).await.unwrap().inner();
let thing_b = Thing {
tb: TB.to_owned(),
id: Id::String("B".to_string()),
@ -262,9 +260,8 @@ mod tests {
let start: u64 = 0;
let mut tx4 = ds.transaction(Write, Optimistic).await.unwrap();
let r =
crate::cf::read(&mut tx4, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10))
let tx4 = ds.transaction(Write, Optimistic).await.unwrap();
let r = crate::cf::read(&tx4, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10))
.await
.unwrap();
tx4.commit().await.unwrap();
@ -338,16 +335,15 @@ mod tests {
assert_eq!(r, want);
let mut tx5 = ds.transaction(Write, Optimistic).await.unwrap();
let tx5 = ds.transaction(Write, Optimistic).await.unwrap();
// gc_all needs to be committed before we can read the changes
crate::cf::gc_db(&mut tx5, NS, DB, vs::u64_to_versionstamp(4), Some(10)).await.unwrap();
crate::cf::gc_range(&tx5, NS, DB, vs::u64_to_versionstamp(4)).await.unwrap();
// We now commit tx5, which should persist the gc_all resullts
tx5.commit().await.unwrap();
// Now we should see the gc_all results
let mut tx6 = ds.transaction(Write, Optimistic).await.unwrap();
let r =
crate::cf::read(&mut tx6, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10))
let tx6 = ds.transaction(Write, Optimistic).await.unwrap();
let r = crate::cf::read(&tx6, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10))
.await
.unwrap();
tx6.commit().await.unwrap();
@ -387,8 +383,8 @@ mod tests {
// Now we should see the gc_all results
ds.tick_at((ts.0.timestamp() + 5).try_into().unwrap()).await.unwrap();
let mut tx7 = ds.transaction(Write, Optimistic).await.unwrap();
let r = crate::cf::read(&mut tx7, NS, DB, Some(TB), ShowSince::Timestamp(ts), Some(10))
let tx7 = ds.transaction(Write, Optimistic).await.unwrap();
let r = crate::cf::read(&tx7, NS, DB, Some(TB), ShowSince::Timestamp(ts), Some(10))
.await
.unwrap();
tx7.commit().await.unwrap();
@ -406,7 +402,7 @@ mod tests {
)
.await;
ds.tick_at(10).await.unwrap();
let mut tx = ds.transaction(Write, Optimistic).await.unwrap();
let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner();
let vs1 = tx.get_versionstamp_from_timestamp(5, NS, DB, false).await.unwrap().unwrap();
let vs2 = tx.get_versionstamp_from_timestamp(10, NS, DB, false).await.unwrap().unwrap();
tx.cancel().await.unwrap();
@ -511,18 +507,17 @@ mod tests {
assert_eq!(r, expected);
}
async fn change_feed_ts(mut tx: Transaction, ts: &Datetime) -> Vec<ChangeSet> {
let r =
crate::cf::read(&mut tx, NS, DB, Some(TB), ShowSince::Timestamp(ts.clone()), Some(10))
async fn change_feed_ts(tx: Transaction, ts: &Datetime) -> Vec<ChangeSet> {
let r = crate::cf::read(&tx, NS, DB, Some(TB), ShowSince::Timestamp(ts.clone()), Some(10))
.await
.unwrap();
tx.cancel().await.unwrap();
r
}
async fn change_feed_vs(mut tx: Transaction, vs: &Versionstamp) -> Vec<ChangeSet> {
async fn change_feed_vs(tx: Transaction, vs: &Versionstamp) -> Vec<ChangeSet> {
let r = crate::cf::read(
&mut tx,
&tx,
NS,
DB,
Some(TB),
@ -535,14 +530,14 @@ mod tests {
r
}
async fn record_change_feed_entry(mut tx: Transaction, id: String) -> Thing {
async fn record_change_feed_entry(tx: Transaction, id: String) -> Thing {
let thing = Thing {
tb: TB.to_owned(),
id: Id::String(id),
};
let value_a: Value = "a".into();
let previous = Cow::from(Value::None);
tx.record_change(
tx.lock().await.record_change(
NS,
DB,
TB,
@ -551,7 +546,7 @@ mod tests {
Cow::Borrowed(&value_a),
DONT_STORE_PREVIOUS,
);
tx.complete_changes(true).await.unwrap();
tx.lock().await.complete_changes(true).await.unwrap();
tx.commit().await.unwrap();
thing
}
@ -585,14 +580,14 @@ mod tests {
// work.
//
let mut tx0 = ds.transaction(Write, Optimistic).await.unwrap();
let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner();
let ns_root = crate::key::root::ns::new(NS);
tx0.put(ns_root.key_category(), &ns_root, dns).await.unwrap();
tx.put(&ns_root, dns).await.unwrap();
let db_root = crate::key::namespace::db::new(NS, DB);
tx0.put(db_root.key_category(), &db_root, ddb).await.unwrap();
tx.put(&db_root, ddb).await.unwrap();
let tb_root = crate::key::database::tb::new(NS, DB, TB);
tx0.put(tb_root.key_category(), &tb_root, dtb.clone()).await.unwrap();
tx0.commit().await.unwrap();
tx.put(&tb_root, dtb.clone()).await.unwrap();
tx.commit().await.unwrap();
ds
}
}

View file

@ -1,28 +1,5 @@
use once_cell::sync::Lazy;
#[cfg(not(target_arch = "wasm32"))]
#[allow(dead_code)]
/// Specifies how many concurrent jobs can be buffered in the worker channel.
pub const MAX_CONCURRENT_TASKS: usize = 64;
/// Specifies how deep various forms of computation will go before the query fails
/// with [`crate::err::Error::ComputationDepthExceeded`].
///
/// For reference, use ~15 per MiB of stack in release mode.
///
/// During query parsing, the total depth of calls to parse values (including arrays, expressions,
/// functions, objects, sub-queries), Javascript values, and geometry collections count against
/// this limit.
///
/// During query execution, all potentially-recursive code paths count against this limit. Whereas
/// parsing assigns equal weight to each recursion, certain expensive code paths are allowed to
/// count for more than one unit of depth during execution.
pub static MAX_COMPUTATION_DEPTH: Lazy<u32> =
lazy_env_parse!("SURREAL_MAX_COMPUTATION_DEPTH", u32, 120);
/// Specifies the names of parameters which can not be specified in a query.
pub const PROTECTED_PARAM_NAMES: &[&str] = &["access", "auth", "token", "session"];
/// The characters which are supported in server record IDs.
pub const ID_CHARS: [char; 36] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
@ -32,8 +9,31 @@ pub const ID_CHARS: [char; 36] = [
/// The publicly visible name of the server
pub const SERVER_NAME: &str = "SurrealDB";
/// Datastore processor batch size for scan operations
pub const PROCESSOR_BATCH_SIZE: u32 = 50;
/// Specifies the names of parameters which can not be specified in a query.
pub const PROTECTED_PARAM_NAMES: &[&str] = &["access", "auth", "token", "session"];
/// Specifies how many concurrent jobs can be buffered in the worker channel.
#[cfg(not(target_arch = "wasm32"))]
pub static MAX_CONCURRENT_TASKS: Lazy<usize> =
lazy_env_parse!("SURREAL_MAX_CONCURRENT_TASKS", usize, 64);
/// Specifies how deep computation recursive call will go before en error is returned.
pub static MAX_COMPUTATION_DEPTH: Lazy<u32> =
lazy_env_parse!("SURREAL_MAX_COMPUTATION_DEPTH", u32, 120);
/// Specifies the number of items which can be cached within a single transaction.
pub static TRANSACTION_CACHE_SIZE: Lazy<usize> =
lazy_env_parse!("SURREAL_TRANSACTION_CACHE_SIZE", usize, 10_000);
/// The maximum number of keys that should be scanned at once in general queries.
pub static NORMAL_FETCH_SIZE: Lazy<u32> = lazy_env_parse!("SURREAL_NORMAL_FETCH_SIZE", u32, 50);
/// The maximum number of keys that should be scanned at once for export queries.
pub static EXPORT_BATCH_SIZE: Lazy<u32> = lazy_env_parse!("SURREAL_EXPORT_BATCH_SIZE", u32, 1000);
/// The maximum number of keys that should be fetched when streaming range scanns in a Scanner.
pub static MAX_STREAM_BATCH_SIZE: Lazy<u32> =
lazy_env_parse!("SURREAL_MAX_STREAM_BATCH_SIZE", u32, 1000);
/// Forward all signup/signin query errors to a client performing record access. Do not use in production.
pub static INSECURE_FORWARD_RECORD_ACCESS_ERRORS: Lazy<bool> =
@ -50,6 +50,3 @@ pub static INSECURE_FORWARD_RECORD_ACCESS_ERRORS: Lazy<bool> =
/// If the environment variable is not present or cannot be parsed, a default value of 50,000 is used.
pub static EXTERNAL_SORTING_BUFFER_LIMIT: Lazy<usize> =
lazy_env_parse!("SURREAL_EXTERNAL_SORTING_BUFFER_LIMIT", usize, 50_000);
/// The number of records that should be fetched and grouped together in an INSERT statement when exporting.
pub static EXPORT_BATCH_SIZE: Lazy<u32> = lazy_env_parse!("SURREAL_EXPORT_BATCH_SIZE", u32, 1000);

View file

@ -2,15 +2,14 @@ use crate::ctx::canceller::Canceller;
use crate::ctx::reason::Reason;
#[cfg(feature = "http")]
use crate::dbs::capabilities::NetTarget;
use crate::dbs::{Capabilities, Notification, Transaction};
use crate::dbs::{Capabilities, Notification};
use crate::err::Error;
use crate::idx::planner::executor::QueryExecutor;
use crate::idx::planner::{IterationStage, QueryPlanner};
use crate::idx::trees::store::IndexStores;
use crate::kvs;
use crate::kvs::Transaction;
use crate::sql::value::Value;
use channel::Sender;
use futures::lock::MutexLockFuture;
use std::borrow::Cow;
use std::collections::HashMap;
use std::fmt::{self, Debug};
@ -72,7 +71,7 @@ pub struct Context<'a> {
// The temporary directory
temporary_directory: Option<Arc<PathBuf>>,
// An optional transaction
transaction: Option<Transaction>,
transaction: Option<Arc<Transaction>>,
}
impl<'a> Default for Context<'a> {
@ -81,6 +80,12 @@ impl<'a> Default for Context<'a> {
}
}
impl<'a> From<Transaction> for Context<'a> {
fn from(txn: Transaction) -> Self {
Context::background().with_transaction(Arc::new(txn))
}
}
impl<'a> Debug for Context<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Context")
@ -239,23 +244,19 @@ impl<'a> Context<'a> {
self.iteration_stage = Some(is);
}
pub(crate) fn set_transaction_mut(&mut self, txn: Transaction) {
pub(crate) fn set_transaction(&mut self, txn: Arc<Transaction>) {
self.transaction = Some(txn);
}
pub fn set_transaction(mut self, txn: Transaction) -> Self {
pub(crate) fn with_transaction(mut self, txn: Arc<Transaction>) -> Self {
self.transaction = Some(txn);
self
}
pub fn get_transaction(&self) -> Option<&Transaction> {
self.transaction.as_ref()
}
pub(crate) fn tx_lock(&self) -> MutexLockFuture<'_, kvs::Transaction> {
pub(crate) fn tx(&self) -> Arc<Transaction> {
self.transaction
.as_ref()
.map(|txn| txn.lock())
.map(Arc::clone)
.unwrap_or_else(|| unreachable!("The context was not associated with a transaction"))
}

View file

@ -1,27 +1,13 @@
use std::sync::Arc;
use channel::Receiver;
use futures::lock::Mutex;
use futures::StreamExt;
use reblessive::TreeStack;
#[cfg(not(target_arch = "wasm32"))]
use tokio::spawn;
use tracing::instrument;
use trice::Instant;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_futures::spawn_local as spawn;
use crate::ctx::Context;
use crate::dbs::response::Response;
use crate::dbs::Force;
use crate::dbs::Notification;
use crate::dbs::Options;
use crate::dbs::QueryType;
use crate::dbs::Transaction;
use crate::err::Error;
use crate::iam::Action;
use crate::iam::ResourceKind;
use crate::kvs::lq_structs::TrackedResult;
use crate::kvs::Transaction;
use crate::kvs::TransactionType;
use crate::kvs::{Datastore, LockType::*, TransactionType::*};
use crate::sql::paths::DB;
@ -30,11 +16,21 @@ use crate::sql::query::Query;
use crate::sql::statement::Statement;
use crate::sql::value::Value;
use crate::sql::Base;
use channel::Receiver;
use futures::StreamExt;
use reblessive::TreeStack;
use std::sync::Arc;
#[cfg(not(target_arch = "wasm32"))]
use tokio::spawn;
use tracing::instrument;
use trice::Instant;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_futures::spawn_local as spawn;
pub(crate) struct Executor<'a> {
err: bool,
kvs: &'a Datastore,
txn: Option<Transaction>,
txn: Option<Arc<Transaction>>,
}
impl<'a> Executor<'a> {
@ -46,7 +42,7 @@ impl<'a> Executor<'a> {
}
}
fn txn(&self) -> Transaction {
fn txn(&self) -> Arc<Transaction> {
self.txn.clone().expect("unreachable: txn was None after successful begin")
}
@ -60,7 +56,7 @@ impl<'a> Executor<'a> {
Some(_) => false,
None => match self.kvs.transaction(write, Optimistic).await {
Ok(v) => {
self.txn = Some(Arc::new(Mutex::new(v)));
self.txn = Some(Arc::new(v));
true
}
Err(_) => {
@ -81,37 +77,27 @@ impl<'a> Executor<'a> {
if local {
// Extract the transaction
if let Some(txn) = self.txn.take() {
// Lock the transaction
let mut txn = txn.lock().await;
// Check for any errors
if self.err {
// Cancel and ignore any error because the error flag was
// already set
let _ = txn.cancel().await;
} else {
let r = match txn.complete_changes(false).await {
Ok(_) => {
match txn.commit().await {
Ok(()) => {
// Commit succeeded, do post commit operations that do not matter to the tx
let lqs: Vec<TrackedResult> =
txn.consume_pending_live_queries();
// Track the live queries in the data store
self.kvs.handle_postprocessing_of_statements(&lqs).await?;
Ok(())
}
Err(e) => Err(e),
}
}
r => r,
};
if let Err(e) = r {
// Transaction failed to commit
//
// TODO: Not all commit errors definitively mean
// the transaction didn't commit. Detect that and tell
// the user.
if let Err(e) = txn.complete_changes(false).await {
// Rollback the transaction
let _ = txn.cancel().await;
// Return the error message
self.err = true;
return Err(e);
}
if let Err(e) = txn.commit().await {
// Rollback the transaction
let _ = txn.cancel().await;
// Return the error message
self.err = true;
return Err(e);
};
}
}
}
@ -122,7 +108,6 @@ impl<'a> Executor<'a> {
if local {
// Extract the transaction
if let Some(txn) = self.txn.take() {
let mut txn = txn.lock().await;
if txn.cancel().await.is_err() {
self.err = true;
}
@ -168,7 +153,6 @@ impl<'a> Executor<'a> {
/// Flush notifications from a buffer channel (live queries) to the committed notification channel.
/// This is because we don't want to broadcast notifications to the user for failed transactions.
/// TODO we can delete this once we migrate to lq v2
async fn flush(&self, ctx: &Context<'_>, mut rcv: Receiver<Notification>) {
let sender = ctx.notifications();
spawn(async move {
@ -182,17 +166,6 @@ impl<'a> Executor<'a> {
});
}
/// A transaction collects created live queries which can then be consumed when a transaction is committed
/// We use this function to get these transactions and send them to the invoker without channels
async fn consume_committed_live_query_registrations(&self) -> Option<Vec<TrackedResult>> {
if let Some(txn) = self.txn.as_ref() {
let txn = txn.lock().await;
Some(txn.consume_pending_live_queries())
} else {
None
}
}
async fn set_ns(&self, ctx: &mut Context<'_>, opt: &mut Options, ns: &str) {
let mut session = ctx.value("session").unwrap_or(&Value::None).clone();
session.put(NS.as_ref(), ns.to_owned().into());
@ -213,10 +186,9 @@ impl<'a> Executor<'a> {
mut ctx: Context<'_>,
opt: Options,
qry: Query,
) -> Result<(Vec<Response>, Vec<TrackedResult>), Error> {
) -> Result<Vec<Response>, Error> {
// The stack to run the executor in.
let mut stack = TreeStack::new();
// Create a notification channel
let (send, recv) = channel::unbounded();
// Set the notification channel
@ -225,7 +197,6 @@ impl<'a> Executor<'a> {
let mut buf: Vec<Response> = vec![];
// Initialise array of responses
let mut out: Vec<Response> = vec![];
let mut live_queries: Vec<TrackedResult> = vec![];
// Do we fast-forward a transaction?
// Set to true when we encounter a return statement in a transaction
let mut ff_txn = false;
@ -293,9 +264,6 @@ impl<'a> Executor<'a> {
let commit_error = self.commit(true).await.err();
buf = buf.into_iter().map(|v| self.buf_commit(v, &commit_error)).collect();
self.flush(&ctx, recv.clone()).await;
if let Some(lqs) = self.consume_committed_live_query_registrations().await {
live_queries.extend(lqs);
}
out.append(&mut buf);
debug_assert!(self.txn.is_none(), "commit(true) should have unset txn");
self.txn = None;
@ -322,7 +290,8 @@ impl<'a> Executor<'a> {
true => Err(Error::TxFailure),
// The transaction began successfully
false => {
ctx.set_transaction_mut(self.txn());
// ctx.set_transaction(txn)
ctx.set_transaction(self.txn());
// Check the statement
match stack
.enter(|stk| stm.compute(stk, &ctx, &opt, None))
@ -347,12 +316,6 @@ impl<'a> Executor<'a> {
Ok(_) => {
// Flush live query notifications
self.flush(&ctx, recv.clone()).await;
if let Some(lqs) = self
.consume_committed_live_query_registrations()
.await
{
live_queries.extend(lqs);
}
Ok(Value::None)
}
}
@ -395,7 +358,7 @@ impl<'a> Executor<'a> {
if let Err(err) = ctx.add_timeout(timeout) {
Err(err)
} else {
ctx.set_transaction_mut(self.txn());
ctx.set_transaction(self.txn());
// Process the statement
let res = stack
.enter(|stk| stm.compute(stk, &ctx, &opt, None))
@ -410,7 +373,7 @@ impl<'a> Executor<'a> {
}
// There is no timeout clause
None => {
ctx.set_transaction_mut(self.txn());
ctx.set_transaction(self.txn());
stack
.enter(|stk| stm.compute(stk, &ctx, &opt, None))
.finish()
@ -445,11 +408,6 @@ impl<'a> Executor<'a> {
} else {
// Flush the live query change notifications
self.flush(&ctx, recv.clone()).await;
if let Some(lqs) =
self.consume_committed_live_query_registrations().await
{
live_queries.extend(lqs);
}
res
}
} else {
@ -475,18 +433,8 @@ impl<'a> Executor<'a> {
e
}),
query_type: match (is_stm_live, is_stm_kill) {
(true, _) => {
if let Some(lqs) = self.consume_committed_live_query_registrations().await {
live_queries.extend(lqs);
}
QueryType::Live
}
(_, true) => {
if let Some(lqs) = self.consume_committed_live_query_registrations().await {
live_queries.extend(lqs);
}
QueryType::Kill
}
(true, _) => QueryType::Live,
(_, true) => QueryType::Kill,
_ => QueryType::Other,
},
};
@ -502,7 +450,7 @@ impl<'a> Executor<'a> {
}
}
// Return responses
Ok((out, live_queries))
Ok(out)
}
}

View file

@ -529,7 +529,7 @@ impl Iterator {
// Create a channel to shutdown
let (end, exit) = channel::bounded::<()>(1);
// Create an unbounded channel
let (chn, docs) = channel::bounded(crate::cnf::MAX_CONCURRENT_TASKS);
let (chn, docs) = channel::bounded(*crate::cnf::MAX_CONCURRENT_TASKS);
// Create an async closure for prepared values
let adocs = async {
// Process all prepared values
@ -553,7 +553,7 @@ impl Iterator {
drop(chn);
};
// Create an unbounded channel
let (chn, vals) = channel::bounded(crate::cnf::MAX_CONCURRENT_TASKS);
let (chn, vals) = channel::bounded(*crate::cnf::MAX_CONCURRENT_TASKS);
// Create an async closure for received values
let avals = async {
// Process all received values

View file

@ -15,7 +15,6 @@ mod result;
mod session;
mod statement;
mod store;
mod transaction;
mod variables;
pub mod capabilities;
@ -32,7 +31,6 @@ pub use self::session::*;
pub(crate) use self::executor::*;
pub(crate) use self::iterator::*;
pub(crate) use self::statement::*;
pub(crate) use self::transaction::*;
pub(crate) use self::variables::*;
#[doc(hidden)]

View file

@ -1,28 +1,117 @@
use crate::err::Error;
use crate::err::Error::TimestampOverflow;
use crate::sql::Duration;
use derive::{Key, Store};
use crate::sql::statements::info::InfoStructure;
use crate::sql::Value;
use derive::Store;
use revision::revisioned;
use revision::Error;
use serde::{Deserialize, Serialize};
use std::fmt::{self, Display};
use std::ops::{Add, Sub};
use std::time::Duration;
use uuid::Uuid;
// NOTE: This is not a statement, but as per layering, keeping it here till we
// have a better structure.
#[revisioned(revision = 1)]
#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, PartialOrd, Hash, Store)]
#[revisioned(revision = 2)]
#[derive(Clone, Debug, Default, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash, Store)]
#[non_exhaustive]
pub struct ClusterMembership {
pub struct Node {
#[revision(start = 2, default_fn = "default_id")]
pub id: Uuid,
#[revision(start = 2, default_fn = "default_hb")]
pub hb: Timestamp,
#[revision(start = 2, default_fn = "default_gc")]
pub gc: bool,
#[revision(end = 2, convert_fn = "convert_name")]
pub name: String,
// TiKV = TiKV TSO Timestamp as u64
// not TiKV = local nanos as u64
#[revision(end = 2, convert_fn = "convert_heartbeat")]
pub heartbeat: Timestamp,
}
impl Node {
/// Create a new Node entry
pub fn new(id: Uuid, hb: Timestamp, gc: bool) -> Self {
Self {
id,
hb,
gc,
..Default::default()
}
}
/// Mark this node as archived
pub fn archive(&self) -> Self {
Node {
gc: true,
..self.to_owned()
}
}
/// Check if this node is active
pub fn id(&self) -> Uuid {
self.id
}
/// Check if this node is active
pub fn is_active(&self) -> bool {
!self.gc
}
/// Check if this node is archived
pub fn is_archived(&self) -> bool {
self.gc
}
// Return the node id if archived
pub fn archived(&self) -> Option<Uuid> {
match self.is_archived() {
true => Some(self.id),
false => None,
}
}
// Sets the default gc value for old nodes
fn default_id(_revision: u16) -> Uuid {
Uuid::default()
}
// Sets the default gc value for old nodes
fn default_hb(_revision: u16) -> Timestamp {
Timestamp::default()
}
// Sets the default gc value for old nodes
fn default_gc(_revision: u16) -> bool {
true
}
// Sets the default gc value for old nodes
fn convert_name(&mut self, _revision: u16, value: String) -> Result<(), Error> {
self.id = Uuid::parse_str(&value).unwrap();
Ok(())
}
// Sets the default gc value for old nodes
fn convert_heartbeat(&mut self, _revision: u16, value: Timestamp) -> Result<(), Error> {
self.hb = value;
Ok(())
}
}
impl Display for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "NODE {} SEEN {}", self.id, self.hb)?;
match self.gc {
true => write!(f, " ARCHIVED")?,
false => write!(f, " ACTIVE")?,
};
Ok(())
}
}
impl InfoStructure for Node {
fn structure(self) -> Value {
Value::from(map! {
"id".to_string() => Value::from(self.id),
"seen".to_string() => self.hb.structure(),
"active".to_string() => Value::from(!self.gc),
})
}
}
// This struct is meant to represent a timestamp that can be used to partially order
// events in a cluster. It should be derived from a timestamp oracle, such as the
// one available in TiKV via the client `TimestampExt` implementation.
#[revisioned(revision = 1)]
#[derive(
Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize, Ord, PartialOrd, Hash, Store, Default,
Clone, Copy, Default, Debug, Eq, PartialEq, PartialOrd, Deserialize, Serialize, Hash, Store,
)]
#[non_exhaustive]
pub struct Timestamp {
@ -30,62 +119,49 @@ pub struct Timestamp {
}
impl From<u64> for Timestamp {
fn from(ts: u64) -> Self {
fn from(value: u64) -> Self {
Timestamp {
value: ts,
value,
}
}
}
// This struct is to be used only when storing keys as the macro currently
// conflicts when you have Store and Key derive macros.
#[revisioned(revision = 1)]
#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, PartialOrd, Hash, Key)]
#[non_exhaustive]
pub struct KeyTimestamp {
pub value: u64,
}
impl From<&Timestamp> for KeyTimestamp {
fn from(ts: &Timestamp) -> Self {
KeyTimestamp {
value: ts.value,
}
}
}
impl Add<&Duration> for &Timestamp {
impl Add<Duration> for Timestamp {
type Output = Timestamp;
fn add(self, rhs: &Duration) -> Timestamp {
fn add(self, rhs: Duration) -> Self::Output {
Timestamp {
value: self.value + rhs.as_millis() as u64,
value: self.value.wrapping_add(rhs.as_millis() as u64),
}
}
}
impl Sub<&Duration> for &Timestamp {
type Output = Result<Timestamp, Error>;
fn sub(self, rhs: &Duration) -> Self::Output {
let millis = rhs.as_millis() as u64;
if self.value <= millis {
// Removing the duration from this timestamp will cause it to overflow
return Err(TimestampOverflow(format!(
"Failed to subtract {} from {}",
&millis, &self.value
)));
impl Sub<Duration> for Timestamp {
type Output = Timestamp;
fn sub(self, rhs: Duration) -> Self::Output {
Timestamp {
value: self.value.wrapping_sub(rhs.as_millis() as u64),
}
Ok(Timestamp {
value: self.value - millis,
})
}
}
impl Display for Timestamp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.value)
}
}
impl InfoStructure for Timestamp {
fn structure(self) -> Value {
self.value.into()
}
}
#[cfg(test)]
mod test {
use crate::dbs::node::Timestamp;
use crate::sql::Duration;
use chrono::prelude::Utc;
use chrono::TimeZone;
use std::time::Duration;
#[test]
fn timestamps_can_be_added_duration() {
@ -94,10 +170,10 @@ mod test {
value: t.timestamp_millis() as u64,
};
let hour = Duration(core::time::Duration::from_secs(60 * 60));
let ts = &ts + &hour;
let ts = &ts + &hour;
let ts = &ts + &hour;
let hour = Duration::from_secs(60 * 60);
let ts = ts + hour;
let ts = ts + hour;
let ts = ts + hour;
let end_time = Utc.timestamp_millis_opt(ts.value as i64).unwrap();
let expected_end_time = Utc.with_ymd_and_hms(2000, 1, 1, 15, 30, 0).unwrap();
@ -111,10 +187,10 @@ mod test {
value: t.timestamp_millis() as u64,
};
let hour = Duration(core::time::Duration::from_secs(60 * 60));
let ts = (&ts - &hour).unwrap();
let ts = (&ts - &hour).unwrap();
let ts = (&ts - &hour).unwrap();
let hour = Duration::from_secs(60 * 60);
let ts = ts - hour;
let ts = ts - hour;
let ts = ts - hour;
let end_time = Utc.timestamp_millis_opt(ts.value as i64).unwrap();
let expected_end_time = Utc.with_ymd_and_hms(2000, 1, 1, 9, 30, 0).unwrap();

View file

@ -57,16 +57,6 @@ pub enum Force {
Index(Arc<[DefineIndexStatement]>),
}
impl Force {
pub fn is_none(&self) -> bool {
matches!(self, Force::None)
}
pub fn is_forced(&self) -> bool {
!matches!(self, Force::None)
}
}
impl Default for Options {
fn default() -> Self {
Options::new()
@ -111,8 +101,9 @@ impl Options {
// --------------------------------------------------
/// Set all the required options from a single point.
/// The system expects these values to always be set, so this should be called for all
/// instances when there is doubt.
/// The system expects these values to always be set,
/// so this should be called for all instances when
/// there is doubt.
pub fn with_required(
mut self,
node_id: Uuid,
@ -334,21 +325,25 @@ impl Options {
// --------------------------------------------------
/// Get current Node ID
#[inline(always)]
pub fn id(&self) -> Result<Uuid, Error> {
self.id.ok_or(Error::Unreachable("Options::id"))
self.id.ok_or(Error::Unreachable("No Node ID is specified"))
}
/// Get currently selected NS
#[inline(always)]
pub fn ns(&self) -> Result<&str, Error> {
self.ns.as_ref().map(AsRef::as_ref).ok_or(Error::NsEmpty)
}
/// Get currently selected DB
#[inline(always)]
pub fn db(&self) -> Result<&str, Error> {
self.db.as_ref().map(AsRef::as_ref).ok_or(Error::DbEmpty)
}
/// Check whether this request supports realtime queries
#[inline(always)]
pub fn realtime(&self) -> Result<(), Error> {
if !self.live {
return Err(Error::RealtimeDisabled);
@ -357,6 +352,7 @@ impl Options {
}
// Validate Options for Namespace
#[inline(always)]
pub fn valid_for_ns(&self) -> Result<(), Error> {
if self.ns.is_none() {
return Err(Error::NsEmpty);
@ -365,9 +361,11 @@ impl Options {
}
// Validate Options for Database
#[inline(always)]
pub fn valid_for_db(&self) -> Result<(), Error> {
self.valid_for_ns()?;
if self.ns.is_none() {
return Err(Error::NsEmpty);
}
if self.db.is_none() {
return Err(Error::DbEmpty);
}

View file

@ -1,4 +1,4 @@
use crate::cnf::PROCESSOR_BATCH_SIZE;
use crate::cnf::NORMAL_FETCH_SIZE;
use crate::ctx::Context;
#[cfg(not(target_arch = "wasm32"))]
use crate::dbs::distinct::AsyncDistinct;
@ -8,12 +8,12 @@ use crate::err::Error;
use crate::idx::planner::iterators::{CollectorRecord, IteratorRef, ThingIterator};
use crate::idx::planner::IterationStage;
use crate::key::{graph, thing};
use crate::kvs;
use crate::kvs::ScanPage;
use crate::kvs::Transaction;
use crate::sql::dir::Dir;
use crate::sql::{Edges, Range, Table, Thing, Value};
#[cfg(not(target_arch = "wasm32"))]
use channel::Sender;
use futures::StreamExt;
use reblessive::tree::Stk;
use std::ops::Bound;
use std::vec;
@ -150,10 +150,10 @@ impl<'a> Processor<'a> {
self.process_index(stk, ctx, opt, stm, &t, irf).await?
}
Iterable::Mergeable(v, o) => {
self.process_mergeable(stk, ctx, opt, stm, v, o).await?
self.process_mergeable(stk, ctx, opt, stm, (v, o)).await?
}
Iterable::Relatable(f, v, w, o) => {
self.process_relatable(stk, ctx, opt, stm, f, v, w, o).await?
self.process_relatable(stk, ctx, opt, stm, (f, v, w, o)).await?
}
}
}
@ -178,6 +178,27 @@ impl<'a> Processor<'a> {
self.process(stk, ctx, opt, stm, pro).await
}
async fn process_defer(
&mut self,
stk: &mut Stk,
ctx: &Context<'_>,
opt: &Options,
stm: &Statement<'_>,
v: Thing,
) -> Result<(), Error> {
// Check that the table exists
ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
// Process the document record
let pro = Processed {
rid: Some(v),
ir: None,
val: Operable::Value(Value::None),
};
self.process(stk, ctx, opt, stm, pro).await?;
// Everything ok
Ok(())
}
async fn process_thing(
&mut self,
stk: &mut Stk,
@ -187,10 +208,10 @@ impl<'a> Processor<'a> {
v: Thing,
) -> Result<(), Error> {
// Check that the table exists
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
// Fetch the data from the store
let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
let val = ctx.tx_lock().await.get(key).await?;
let val = ctx.tx().get(key).await?;
// Parse the data from the store
let val = Operable::Value(match val {
Some(v) => Value::from(v),
@ -207,41 +228,19 @@ impl<'a> Processor<'a> {
Ok(())
}
async fn process_defer(
&mut self,
stk: &mut Stk,
ctx: &Context<'_>,
opt: &Options,
stm: &Statement<'_>,
v: Thing,
) -> Result<(), Error> {
// Check that the table exists
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
// Process the document record
let pro = Processed {
rid: Some(v),
ir: None,
val: Operable::Value(Value::None),
};
self.process(stk, ctx, opt, stm, pro).await?;
// Everything ok
Ok(())
}
async fn process_mergeable(
&mut self,
stk: &mut Stk,
ctx: &Context<'_>,
opt: &Options,
stm: &Statement<'_>,
v: Thing,
o: Value,
(v, o): (Thing, Value),
) -> Result<(), Error> {
// Check that the table exists
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
// Fetch the data from the store
let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
let val = ctx.tx_lock().await.get(key).await?;
let val = ctx.tx().get(key).await?;
// Parse the data from the store
let x = match val {
Some(v) => Value::from(v),
@ -260,23 +259,19 @@ impl<'a> Processor<'a> {
Ok(())
}
#[allow(clippy::too_many_arguments)]
async fn process_relatable(
&mut self,
stk: &mut Stk,
ctx: &Context<'_>,
opt: &Options,
stm: &Statement<'_>,
f: Thing,
v: Thing,
w: Thing,
o: Option<Value>,
(f, v, w, o): (Thing, Thing, Thing, Option<Value>),
) -> Result<(), Error> {
// Check that the table exists
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
// Fetch the data from the store
let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
let val = ctx.tx_lock().await.get(key).await?;
let val = ctx.tx().get(key).await?;
// Parse the data from the store
let x = match val {
Some(v) => Value::from(v),
@ -303,33 +298,23 @@ impl<'a> Processor<'a> {
stm: &Statement<'_>,
v: &Table,
) -> Result<(), Error> {
// Get the transaction
let txn = ctx.tx();
// Check that the table exists
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, v, opt.strict).await?;
txn.check_ns_db_tb(opt.ns()?, opt.db()?, v, opt.strict).await?;
// Prepare the start and end keys
let beg = thing::prefix(opt.ns()?, opt.db()?, v);
let end = thing::suffix(opt.ns()?, opt.db()?, v);
// Loop until no more keys
let mut next_page = Some(ScanPage::from(beg..end));
while let Some(page) = next_page {
// Create a new iterable range
let mut stream = txn.stream(beg..end);
// Loop until no more entries
while let Some(res) = stream.next().await {
// Check if the context is finished
if ctx.is_done() {
break;
}
// Get the next batch of key-value entries
let res = ctx.tx_lock().await.scan_paged(page, PROCESSOR_BATCH_SIZE).await?;
next_page = res.next_page;
let res = res.values;
// If no results then break
if res.is_empty() {
break;
}
// Loop over results
for (k, v) in res.into_iter() {
// Check the context
if ctx.is_done() {
break;
}
// Parse the data from the store
let (k, v) = res?;
let key: thing::Thing = (&k).into();
let val: Value = (&v).into();
let rid = Thing::from((key.tb, key.id));
@ -343,8 +328,6 @@ impl<'a> Processor<'a> {
};
self.process(stk, ctx, opt, stm, pro).await?;
}
continue;
}
// Everything ok
Ok(())
}
@ -357,8 +340,10 @@ impl<'a> Processor<'a> {
stm: &Statement<'_>,
v: Range,
) -> Result<(), Error> {
// Get the transaction
let txn = ctx.tx();
// Check that the table exists
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
txn.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
// Prepare the range start key
let beg = match &v.beg {
Bound::Unbounded => thing::prefix(opt.ns()?, opt.db()?, &v.tb),
@ -379,28 +364,16 @@ impl<'a> Processor<'a> {
key
}
};
// Loop until no more keys
let mut next_page = Some(ScanPage::from(beg..end));
while let Some(page) = next_page {
// Create a new iterable range
let mut stream = txn.stream(beg..end);
// Loop until no more entries
while let Some(res) = stream.next().await {
// Check if the context is finished
if ctx.is_done() {
break;
}
let res = ctx.tx_lock().await.scan_paged(page, PROCESSOR_BATCH_SIZE).await?;
next_page = res.next_page;
// Get the next batch of key-value entries
let res = res.values;
// If there are key-value entries then fetch them
if res.is_empty() {
break;
}
// Loop over results
for (k, v) in res.into_iter() {
// Check the context
if ctx.is_done() {
break;
}
// Parse the data from the store
let (k, v) = res?;
let key: thing::Thing = (&k).into();
let val: Value = (&v).into();
let rid = Thing::from((key.tb, key.id));
@ -414,8 +387,6 @@ impl<'a> Processor<'a> {
};
self.process(stk, ctx, opt, stm, pro).await?;
}
continue;
}
// Everything ok
Ok(())
}
@ -496,34 +467,27 @@ impl<'a> Processor<'a> {
.collect::<Vec<_>>(),
},
};
//
for (beg, end) in keys.iter() {
// Loop until no more keys
let mut next_page = Some(ScanPage::from(beg.clone()..end.clone()));
while let Some(page) = next_page {
// Get the transaction
let txn = ctx.tx();
// Check that the table exists
txn.check_ns_db_tb(opt.ns()?, opt.db()?, tb, opt.strict).await?;
// Loop over the chosen edge types
for (beg, end) in keys.into_iter() {
// Create a new iterable range
let mut stream = txn.stream(beg..end);
// Loop until no more entries
while let Some(res) = stream.next().await {
// Check if the context is finished
if ctx.is_done() {
break;
}
// Get the next batch key-value entries
let res = ctx.tx_lock().await.scan_paged(page, PROCESSOR_BATCH_SIZE).await?;
next_page = res.next_page;
let res = res.values;
// If there are key-value entries then fetch them
if res.is_empty() {
break;
}
// Loop over results
for (k, _) in res.into_iter() {
// Check the context
if ctx.is_done() {
break;
}
// Parse the key from the result
let key = res?.0;
// Parse the data from the store
let gra: graph::Graph = graph::Graph::decode(&k)?;
let gra: graph::Graph = graph::Graph::decode(&key)?;
// Fetch the data from the store
let key = thing::new(opt.ns()?, opt.db()?, gra.ft, &gra.fk);
let val = ctx.tx_lock().await.get(key).await?;
let val = txn.get(key).await?;
let rid = Thing::from((gra.ft, gra.fk));
// Parse the data from the store
let val = Operable::Value(match val {
@ -538,8 +502,6 @@ impl<'a> Processor<'a> {
};
self.process(stk, ctx, opt, stm, pro).await?;
}
continue;
}
}
// Everything ok
Ok(())
@ -555,7 +517,7 @@ impl<'a> Processor<'a> {
irf: IteratorRef,
) -> Result<(), Error> {
// Check that the table exists
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &table.0, opt.strict).await?;
ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &table.0, opt.strict).await?;
if let Some(exe) = ctx.get_query_executor() {
if let Some(mut iterator) = exe.new_iterator(opt, irf).await? {
// Get the first batch
@ -592,9 +554,9 @@ impl<'a> Processor<'a> {
opt: &Options,
iterator: &mut ThingIterator,
) -> Result<Vec<Processed>, Error> {
let mut tx = ctx.tx_lock().await;
let txn = ctx.tx();
let records: Vec<CollectorRecord> =
iterator.next_batch(ctx, &mut tx, PROCESSOR_BATCH_SIZE).await?;
iterator.next_batch(ctx, &txn, *NORMAL_FETCH_SIZE).await?;
let mut to_process = Vec::with_capacity(records.len());
for r in records {
let v = if let Some(v) = r.2 {
@ -602,7 +564,7 @@ impl<'a> Processor<'a> {
v
} else {
// Otherwise we have to fetch the record
Iterable::fetch_thing(&mut tx, opt, &r.0).await?
Iterable::fetch_thing(&txn, opt, &r.0).await?
};
let p = Processed {
rid: Some(r.0),
@ -618,14 +580,14 @@ impl<'a> Processor<'a> {
impl Iterable {
/// Returns the value from the store, or Value::None it the value does not exist.
pub(crate) async fn fetch_thing(
tx: &mut kvs::Transaction,
txn: &Transaction,
opt: &Options,
thg: &Thing,
) -> Result<Value, Error> {
// Fetch the data from the store
let key = thing::new(opt.ns()?, opt.db()?, &thg.tb, &thg.id);
// Fetch and parse the data from the store
let val = tx.get(key).await?.map(Value::from).unwrap_or(Value::None);
let val = txn.get(key).await?.map(Value::from).unwrap_or(Value::None);
// Return the result
Ok(val)
}

View file

@ -8,7 +8,8 @@ use std::sync::Arc;
pub async fn mock<'a>() -> (Context<'a>, Options) {
let opt = Options::default().with_auth(Arc::new(Auth::for_root(Role::Owner)));
let kvs = Datastore::new("memory").await.unwrap();
let txn = kvs.transaction(Write, Optimistic).await.unwrap().rollback_and_ignore().enclose();
let ctx = Context::default().set_transaction(txn);
let txn = kvs.transaction(Write, Optimistic).await.unwrap();
let txn = txn.rollback_and_ignore().await.enclose();
let ctx = Context::default().with_transaction(txn);
(ctx, opt)
}

View file

@ -1,5 +0,0 @@
use crate::kvs;
use futures::lock::Mutex;
use std::sync::Arc;
pub(crate) type Transaction = Arc<Mutex<kvs::Transaction>>;

View file

@ -15,23 +15,20 @@ impl<'a> Document<'a> {
if !self.changed() {
return Ok(());
}
//
// Get the table
let tb = self.tb(ctx, opt).await?;
// Claim transaction
let mut run = ctx.tx_lock().await;
// Get the transaction
let txn = ctx.tx();
// Get the database and the table for the record
let db = run.add_and_cache_db(opt.ns()?, opt.db()?, opt.strict).await?;
let db = txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?;
// Check if changefeeds are enabled
if let Some(cf) = db.as_ref().changefeed.as_ref().or(tb.as_ref().changefeed.as_ref()) {
// Get the arguments
let tb = tb.name.as_str();
let id = self.id.as_ref().unwrap();
// Create the changefeed entry
run.record_change(
txn.lock().await.record_change(
opt.ns()?,
opt.db()?,
tb,
id,
tb.name.as_str(),
self.id.unwrap(),
self.initial.doc.clone(),
self.current.doc.clone(),
cf.store_diff,

View file

@ -52,7 +52,7 @@ impl<'a> Document<'a> {
Err(Error::RetryWithId(v)) => {
// Fetch the data from the store
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
let val = ctx.tx_lock().await.get(key).await?;
let val = ctx.tx().get(key).await?;
// Parse the data from the store
let val = match val {
Some(v) => Value::from(v),

View file

@ -93,25 +93,6 @@ impl<'a> Document<'a> {
}
}
/// Create a new document that is not going through the standard lifecycle of documents
///
/// This allows for it to be crafted without needing statements to operate on it
#[doc(hidden)]
pub fn new_artificial(
id: Option<&'a Thing>,
ir: Option<&'a IteratorRecord>,
val: Cow<'a, Value>,
initial: Cow<'a, Value>,
extras: Workable,
) -> Self {
Document {
id,
extras,
current: CursorDoc::new(id, ir, val),
initial: CursorDoc::new(id, ir, initial),
}
}
/// Get the current document, as it is being modified
#[allow(unused)]
pub(crate) fn current_doc(&self) -> &Value {
@ -136,23 +117,18 @@ impl<'a> Document<'a> {
self.initial.doc.is_none() && self.current.doc.is_some()
}
/// Check if document is being deleted
pub fn is_delete(&self) -> bool {
self.current.doc.is_none()
}
/// Get the table for this document
pub async fn tb(
&self,
ctx: &Context<'a>,
opt: &Options,
) -> Result<Arc<DefineTableStatement>, Error> {
// Claim transaction
let mut run = ctx.tx_lock().await;
// Get transaction
let txn = ctx.tx();
// Get the record id
let rid = self.id.as_ref().unwrap();
// Get the table definition
let tb = run.get_and_cache_tb(opt.ns()?, opt.db()?, &rid.tb).await;
let tb = txn.get_tb(opt.ns()?, opt.db()?, &rid.tb).await;
// Return the table or attempt to define it
match tb {
// The table doesn't exist
@ -162,9 +138,7 @@ impl<'a> Document<'a> {
// Allowed to run?
opt.is_allowed(Action::Edit, ResourceKind::Table, &Base::Db)?;
// We can create the table automatically
run.add_and_cache_ns(opt.ns()?, opt.strict).await?;
run.add_and_cache_db(opt.ns()?, opt.db()?, opt.strict).await?;
run.add_and_cache_tb(opt.ns()?, opt.db()?, &rid.tb, opt.strict).await
txn.ensure_ns_db_tb(opt.ns()?, opt.db()?, &rid.tb, opt.strict).await
}
// There was an error
Err(err) => Err(err),
@ -181,7 +155,7 @@ impl<'a> Document<'a> {
// Get the record id
let id = self.id.as_ref().unwrap();
// Get the table definitions
ctx.tx_lock().await.all_tb_views(opt.ns()?, opt.db()?, &id.tb).await
ctx.tx().all_tb_views(opt.ns()?, opt.db()?, &id.tb).await
}
/// Get the events for this document
pub async fn ev(
@ -192,7 +166,7 @@ impl<'a> Document<'a> {
// Get the record id
let id = self.id.as_ref().unwrap();
// Get the event definitions
ctx.tx_lock().await.all_tb_events(opt.ns()?, opt.db()?, &id.tb).await
ctx.tx().all_tb_events(opt.ns()?, opt.db()?, &id.tb).await
}
/// Get the fields for this document
pub async fn fd(
@ -203,7 +177,7 @@ impl<'a> Document<'a> {
// Get the record id
let id = self.id.as_ref().unwrap();
// Get the field definitions
ctx.tx_lock().await.all_tb_fields(opt.ns()?, opt.db()?, &id.tb).await
ctx.tx().all_tb_fields(opt.ns()?, opt.db()?, &id.tb).await
}
/// Get the indexes for this document
pub async fn ix(
@ -214,7 +188,7 @@ impl<'a> Document<'a> {
// Get the record id
let id = self.id.as_ref().unwrap();
// Get the index definitions
ctx.tx_lock().await.all_tb_indexes(opt.ns()?, opt.db()?, &id.tb).await
ctx.tx().all_tb_indexes(opt.ns()?, opt.db()?, &id.tb).await
}
// Get the lives for this document
pub async fn lv(
@ -225,6 +199,6 @@ impl<'a> Document<'a> {
// Get the record id
let id = self.id.as_ref().unwrap();
// Get the table definition
ctx.tx_lock().await.all_tb_lives(opt.ns()?, opt.db()?, &id.tb).await
ctx.tx().all_tb_lives(opt.ns()?, opt.db()?, &id.tb).await
}
}

View file

@ -21,8 +21,10 @@ impl<'a> Document<'a> {
if self.tb(ctx, opt).await?.drop {
return Ok(());
}
// Claim transaction
let mut run = ctx.tx_lock().await;
// Get the transaction
let txn = ctx.tx();
// Lock the transaction
let mut txn = txn.lock().await;
// Get the record id
let rid = self.id.as_ref().unwrap();
// Store the record edges
@ -31,16 +33,16 @@ impl<'a> Document<'a> {
let (ref o, ref i) = (Dir::Out, Dir::In);
// Store the left pointer edge
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &l.tb, &l.id, o, rid);
run.set(key, vec![]).await?;
txn.set(key, vec![]).await?;
// Store the left inner edge
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, i, l);
run.set(key, vec![]).await?;
txn.set(key, vec![]).await?;
// Store the right inner edge
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, o, r);
run.set(key, vec![]).await?;
txn.set(key, vec![]).await?;
// Store the right pointer edge
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &r.tb, &r.id, i, rid);
run.set(key, vec![]).await?;
txn.set(key, vec![]).await?;
// Store the edges on the record
self.current.doc.to_mut().put(&*EDGE, Value::Bool(true));
self.current.doc.to_mut().put(&*IN, l.clone().into());

View file

@ -280,13 +280,16 @@ impl<'a> IndexOperation<'a> {
}
async fn index_unique(&mut self, ctx: &Context<'_>) -> Result<(), Error> {
let mut run = ctx.tx_lock().await;
// Get the transaction
let txn = ctx.tx();
// Lock the transaction
let mut txn = txn.lock().await;
// Delete the old index data
if let Some(o) = self.o.take() {
let i = Indexable::new(o, self.ix);
for o in i {
let key = self.get_unique_index_key(&o)?;
match run.delc(key, Some(self.rid)).await {
match txn.delc(key, Some(self.rid)).await {
Err(Error::TxConditionNotMet) => Ok(()),
Err(e) => Err(e),
Ok(v) => Ok(v),
@ -299,9 +302,9 @@ impl<'a> IndexOperation<'a> {
for n in i {
if !n.is_all_none_or_null() {
let key = self.get_unique_index_key(&n)?;
if run.putc(key, self.rid, None).await.is_err() {
if txn.putc(key, self.rid, None).await.is_err() {
let key = self.get_unique_index_key(&n)?;
let val = run.get(key).await?.unwrap();
let val = txn.get(key).await?.unwrap();
let rid: Thing = val.into();
return self.err_index_exists(rid, n);
}
@ -312,13 +315,16 @@ impl<'a> IndexOperation<'a> {
}
async fn index_non_unique(&mut self, ctx: &Context<'_>) -> Result<(), Error> {
let mut run = ctx.tx_lock().await;
// Get the transaction
let txn = ctx.tx();
// Lock the transaction
let mut txn = txn.lock().await;
// Delete the old index data
if let Some(o) = self.o.take() {
let i = Indexable::new(o, self.ix);
for o in i {
let key = self.get_non_unique_index_key(&o)?;
match run.delc(key, Some(self.rid)).await {
match txn.delc(key, Some(self.rid)).await {
Err(Error::TxConditionNotMet) => Ok(()),
Err(e) => Err(e),
Ok(v) => Ok(v),
@ -330,9 +336,9 @@ impl<'a> IndexOperation<'a> {
let i = Indexable::new(n, self.ix);
for n in i {
let key = self.get_non_unique_index_key(&n)?;
if run.putc(key, self.rid, None).await.is_err() {
if txn.putc(key, self.rid, None).await.is_err() {
let key = self.get_non_unique_index_key(&n)?;
let val = run.get(key).await?.unwrap();
let val = txn.get(key).await?.unwrap();
let rid: Thing = val.into();
return self.err_index_exists(rid, n);
}
@ -376,20 +382,19 @@ impl<'a> IndexOperation<'a> {
ctx: &Context<'_>,
p: &MTreeParams,
) -> Result<(), Error> {
let mut tx = ctx.tx_lock().await;
let txn = ctx.tx();
let ikb = IndexKeyBase::new(self.opt.ns()?, self.opt.db()?, self.ix)?;
let mut mt =
MTreeIndex::new(ctx.get_index_stores(), &mut tx, ikb, p, TransactionType::Write)
.await?;
MTreeIndex::new(ctx.get_index_stores(), &txn, ikb, p, TransactionType::Write).await?;
// Delete the old index data
if let Some(o) = self.o.take() {
mt.remove_document(stk, &mut tx, self.rid, &o).await?;
mt.remove_document(stk, &txn, self.rid, &o).await?;
}
// Create the new index data
if let Some(n) = self.n.take() {
mt.index_document(stk, &mut tx, self.rid, &n).await?;
mt.index_document(stk, &txn, self.rid, &n).await?;
}
mt.finish(&mut tx).await
mt.finish(&txn).await
}
async fn index_hnsw(&mut self, ctx: &Context<'_>, p: &HnswParams) -> Result<(), Error> {

View file

@ -6,19 +6,15 @@ use crate::dbs::Statement;
use crate::doc::CursorDoc;
use crate::doc::Document;
use crate::err::Error;
use crate::fflags::FFLAGS;
use crate::sql::paths::AC;
use crate::sql::paths::META;
use crate::sql::paths::RD;
use crate::sql::paths::TK;
use crate::sql::permission::Permission;
use crate::sql::statements::LiveStatement;
use crate::sql::Value;
use channel::Sender;
use reblessive::tree::Stk;
use std::ops::Deref;
use std::sync::Arc;
use uuid::Uuid;
impl<'a> Document<'a> {
pub async fn lives(
@ -28,27 +24,145 @@ impl<'a> Document<'a> {
opt: &Options,
stm: &Statement<'_>,
) -> Result<(), Error> {
// Check import
if opt.import {
return Ok(());
}
// Check if changed
if !self.changed() {
return Ok(());
}
// Under the new mechanism, live query notifications only come from polling the change feed
// This check can be moved up the call stack, as this entire method will become unnecessary
if FFLAGS.change_feed_live_queries.enabled() {
return Ok(());
}
// Check if we can send notifications
if let Some(chn) = &opt.sender {
// Get all live queries for this table
let lvs = self.lv(ctx, opt).await?;
// Loop through all index statements
let lq_stms = self.lv(ctx, opt).await?;
let borrows = lq_stms.iter().collect::<Vec<_>>();
self.check_lqs_and_send_notifications(stk, ctx, opt, stm, borrows.as_slice(), chn)
for lv in lvs.iter() {
// Create a new statement
let lq = Statement::from(lv);
// Get the event action
let met = if stm.is_delete() {
Value::from("DELETE")
} else if self.is_new() {
Value::from("CREATE")
} else {
Value::from("UPDATE")
};
// Check if this is a delete statement
let doc = match stm.is_delete() {
true => &self.initial,
false => &self.current,
};
// Ensure that a session exists on the LIVE query
let sess = match lv.session.as_ref() {
Some(v) => v,
None => continue,
};
// Ensure that auth info exists on the LIVE query
let auth = match lv.auth.clone() {
Some(v) => v,
None => continue,
};
// We need to create a new context which we will
// use for processing this LIVE query statement.
// This ensures that we are using the session
// of the user who created the LIVE query.
let mut lqctx = Context::background();
// Set the current transaction on the new LIVE
// query context to prevent unreachable behaviour
// and ensure that queries can be executed.
lqctx.set_transaction(ctx.tx());
// Add the session params to this LIVE query, so
// that queries can use these within field
// projections and WHERE clauses.
lqctx.add_value("access", sess.pick(AC.as_ref()));
lqctx.add_value("auth", sess.pick(RD.as_ref()));
lqctx.add_value("token", sess.pick(TK.as_ref()));
lqctx.add_value("session", sess);
// Add $before, $after, $value, and $event params
// to this LIVE query so the user can use these
// within field projections and WHERE clauses.
lqctx.add_value("event", met);
lqctx.add_value("value", self.current.doc.deref());
lqctx.add_value("after", self.current.doc.deref());
lqctx.add_value("before", self.initial.doc.deref());
// We need to create a new options which we will
// use for processing this LIVE query statement.
// This ensures that we are using the auth data
// of the user who created the LIVE query.
let lqopt = opt.new_with_perms(true).with_auth(Arc::from(auth));
// First of all, let's check to see if the WHERE
// clause of the LIVE query is matched by this
// document. If it is then we can continue.
match self.lq_check(stk, &lqctx, &lqopt, &lq, doc).await {
Err(Error::Ignore) => continue,
Err(e) => return Err(e),
Ok(_) => (),
}
// Secondly, let's check to see if any PERMISSIONS
// clause for this table allows this document to
// be viewed by the user who created this LIVE
// query. If it does, then we can continue.
match self.lq_allow(stk, &lqctx, &lqopt, &lq, doc).await {
Err(Error::Ignore) => continue,
Err(e) => return Err(e),
Ok(_) => (),
}
// Finally, let's check what type of statement
// caused this LIVE query to run, and send the
// relevant notification based on the statement.
if stm.is_delete() {
// Send a DELETE notification
if opt.id()? == lv.node.0 {
chn.send(Notification {
id: lv.id,
action: Action::Delete,
result: {
// Ensure futures are run
let lqopt: &Options = &lqopt.new_with_futures(true);
// Output the full document before any changes were applied
let mut value =
doc.doc.compute(stk, &lqctx, lqopt, Some(doc)).await?;
// Remove metadata fields on output
value.del(stk, &lqctx, lqopt, &*META).await?;
// Output result
value
},
})
.await?;
} else {
// TODO: Send to message broker
}
} else if self.is_new() {
// Send a CREATE notification
if opt.id()? == lv.node.0 {
chn.send(Notification {
id: lv.id,
action: Action::Create,
result: self.pluck(stk, &lqctx, &lqopt, &lq).await?,
})
.await?;
} else {
// TODO: Send to message broker
}
} else {
// Send a UPDATE notification
if opt.id()? == lv.node.0 {
chn.send(Notification {
id: lv.id,
action: Action::Update,
result: self.pluck(stk, &lqctx, &lqopt, &lq).await?,
})
.await?;
} else {
// TODO: Send to message broker
}
};
}
}
// Carry on
Ok(())
}
/// Check the WHERE clause for a LIVE query
async fn lq_check(
&self,
@ -69,7 +183,6 @@ impl<'a> Document<'a> {
// Carry on
Ok(())
}
/// Check any PERRMISSIONS for a LIVE query
async fn lq_allow(
&self,
@ -100,176 +213,4 @@ impl<'a> Document<'a> {
// Carry on
Ok(())
}
/// Process live query for notifications
pub(crate) async fn check_lqs_and_send_notifications(
&self,
stk: &mut Stk,
ctx: &Context<'_>,
opt: &Options,
stm: &Statement<'_>,
live_statements: &[&LiveStatement],
sender: &Sender<Notification>,
) -> Result<(), Error> {
trace!(
"Called check_lqs_and_send_notifications with {} live statements",
live_statements.len()
);
// Technically this isnt the condition - the `lives` function is passing in the currently evaluated statement
// but the ds.rs invocation of this function is reconstructing this statement
let is_delete = match FFLAGS.change_feed_live_queries.enabled() {
true => self.is_delete(),
false => stm.is_delete(),
};
for lv in live_statements {
// Create a new statement
let lq = Statement::from(*lv);
// Get the event action
let evt = if stm.is_delete() {
Value::from("DELETE")
} else if self.is_new() {
Value::from("CREATE")
} else {
Value::from("UPDATE")
};
// Check if this is a delete statement
let doc = match is_delete {
true => &self.initial,
false => &self.current,
};
// Ensure that a session exists on the LIVE query
let sess = match lv.session.as_ref() {
Some(v) => v,
None => {
trace!("live query did not have a session, skipping");
continue;
}
};
// Ensure that auth info exists on the LIVE query
let auth = match lv.auth.clone() {
Some(v) => v,
None => {
trace!("live query did not have auth info, skipping");
continue;
}
};
// We need to create a new context which we will
// use for processing this LIVE query statement.
// This ensures that we are using the session
// of the user who created the LIVE query.
let lqctx = Context::background();
let mut lqctx =
lqctx.set_transaction(ctx.get_transaction().cloned().unwrap_or_else(|| {
unreachable!("Expected transaction to be available in parent context")
}));
lqctx.add_value("access", sess.pick(AC.as_ref()));
lqctx.add_value("auth", sess.pick(RD.as_ref()));
lqctx.add_value("token", sess.pick(TK.as_ref()));
lqctx.add_value("session", sess);
// We need to create a new options which we will
// use for processing this LIVE query statement.
// This ensures that we are using the auth data
// of the user who created the LIVE query.
let lqopt = opt.new_with_perms(true).with_auth(Arc::from(auth));
// Add $before, $after, $value, and $event params
// to this LIVE query so that user can use these
// within field projections and WHERE clauses.
lqctx.add_value("event", evt);
lqctx.add_value("value", self.current.doc.deref());
lqctx.add_value("after", self.current.doc.deref());
lqctx.add_value("before", self.initial.doc.deref());
// First of all, let's check to see if the WHERE
// clause of the LIVE query is matched by this
// document. If it is then we can continue.
match self.lq_check(stk, &lqctx, &lqopt, &lq, doc).await {
Err(Error::Ignore) => {
trace!("live query did not match the where clause, skipping");
continue;
}
Err(e) => return Err(e),
Ok(_) => (),
}
// Secondly, let's check to see if any PERMISSIONS
// clause for this table allows this document to
// be viewed by the user who created this LIVE
// query. If it does, then we can continue.
match self.lq_allow(stk, &lqctx, &lqopt, &lq, doc).await {
Err(Error::Ignore) => {
trace!("live query did not have permission to view this document, skipping");
continue;
}
Err(e) => return Err(e),
Ok(_) => (),
}
// Finally, let's check what type of statement
// caused this LIVE query to run, and send the
// relevant notification based on the statement.
let default_node_id = Uuid::default();
let node_id = opt.id().unwrap_or(default_node_id);
// This bool is deprecated since lq v2 on cf
// We check against defaults because clients register live queries with their local node id
// But the cf scanner uses the server node id, which is different from the client
let node_matches_live_query =
node_id == default_node_id || lv.node.0 == default_node_id || node_id == lv.node.0;
trace!(
"Notification node matches live query: {} ({} != {})",
node_matches_live_query,
node_id,
lv.node.0
);
if is_delete {
// Send a DELETE notification
if node_matches_live_query {
sender
.send(Notification {
id: lv.id,
action: Action::Delete,
result: {
// Ensure futures are run
let lqopt: &Options = &lqopt.new_with_futures(true);
// Output the full document before any changes were applied
let mut value =
doc.doc.compute(stk, &lqctx, lqopt, Some(doc)).await?;
// TODO(SUR-349): We need an empty object instead of Value::None for serialisation
if value.is_none() {
value = Value::Object(Default::default());
}
// Remove metadata fields on output
value.del(stk, &lqctx, lqopt, &*META).await?;
// Output result
value
},
})
.await?;
}
} else if self.is_new() {
// Send a CREATE notification
if node_matches_live_query {
trace!("Sending lq create notification");
sender
.send(Notification {
id: lv.id,
action: Action::Create,
result: self.pluck(stk, &lqctx, &lqopt, &lq).await?,
})
.await?;
}
} else {
// Send a UPDATE notification
if node_matches_live_query {
trace!("Sending lq update notification");
sender
.send(Notification {
id: lv.id,
action: Action::Update,
result: self.pluck(stk, &lqctx, &lqopt, &lq).await?,
})
.await?;
}
};
}
trace!("Ended check_lqs_and_send_notifications");
Ok(())
}
}

View file

@ -46,7 +46,7 @@ impl<'a> Document<'a> {
Err(Error::RetryWithId(v)) => {
// Fetch the data from the store
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
let val = ctx.tx_lock().await.get(key).await?;
let val = ctx.tx().get(key).await?;
// Parse the data from the store
let val = match val {
Some(v) => Value::from(v),

View file

@ -25,13 +25,15 @@ impl<'a> Document<'a> {
if !self.changed() {
return Ok(());
}
// Claim transaction
let mut run = ctx.tx_lock().await;
// Get the transaction
let txn = ctx.tx();
// Lock the transaction
let mut txn = txn.lock().await;
// Get the record id
if let Some(rid) = self.id {
// Purge the record data
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id);
run.del(key).await?;
txn.del(key).await?;
// Purge the record edges
match (
self.initial.doc.pick(&*EDGE),
@ -43,20 +45,20 @@ impl<'a> Document<'a> {
let (ref o, ref i) = (Dir::Out, Dir::In);
// Purge the left pointer edge
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &l.tb, &l.id, o, rid);
run.del(key).await?;
txn.del(key).await?;
// Purge the left inner edge
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, i, l);
run.del(key).await?;
txn.del(key).await?;
// Purge the right inner edge
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, o, r);
run.del(key).await?;
txn.del(key).await?;
// Purge the right pointer edge
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &r.tb, &r.id, i, rid);
run.del(key).await?;
txn.del(key).await?;
}
_ => {
// Release the transaction
drop(run);
drop(txn);
// Setup the delete statement
let stm = DeleteStatement {
what: Values(vec![Value::from(Edges {

View file

@ -3,7 +3,6 @@ use crate::dbs::Options;
use crate::dbs::Statement;
use crate::doc::Document;
use crate::err::Error;
use crate::key::key_req::KeyRequirements;
impl<'a> Document<'a> {
pub async fn store(
@ -20,18 +19,18 @@ impl<'a> Document<'a> {
if self.tb(ctx, opt).await?.drop {
return Ok(());
}
// Claim transaction
let mut run = ctx.tx_lock().await;
// Get the transaction
let txn = ctx.tx();
// Get the record id
let rid = self.id.as_ref().unwrap();
// Store the record data
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id);
//
// Match the statement type
match stm {
// This is a CREATE statement so try to insert the key
Statement::Create(_) => match run.put(key.key_category(), key, self).await {
Statement::Create(_) => match txn.put(key, self).await {
// The key already exists, so return an error
Err(Error::TxKeyAlreadyExistsCategory(_)) => Err(Error::RecordExists {
Err(Error::TxKeyAlreadyExists) => Err(Error::RecordExists {
thing: rid.to_string(),
}),
// Return any other received error
@ -40,7 +39,7 @@ impl<'a> Document<'a> {
Ok(v) => Ok(v),
},
// This is not a CREATE statement, so update the key
_ => run.set(key, self).await,
_ => txn.set(key, self).await,
}?;
// Carry on
Ok(())

View file

@ -1,7 +1,6 @@
use crate::iam::Error as IamError;
use crate::idx::ft::MatchRef;
use crate::idx::trees::vector::SharedVector;
use crate::key::error::KeyCategory;
use crate::sql::idiom::Idiom;
use crate::sql::index::Distance;
use crate::sql::thing::Thing;
@ -92,7 +91,6 @@ pub enum Error {
/// The key being inserted in the transaction already exists
#[error("The key being inserted already exists")]
#[deprecated(note = "Use TxKeyAlreadyExistsCategory")]
TxKeyAlreadyExists,
/// The key exceeds a limit set by the KV store
@ -388,6 +386,12 @@ pub enum Error {
value: String,
},
/// The requested record does not exist
#[error("The record '{value}' does not exist")]
IdNotFound {
value: String,
},
#[error("Unsupported distance: {0}")]
UnsupportedDistance(Distance),
@ -810,10 +814,6 @@ pub enum Error {
#[error("Auth token is missing the '{0}' claim")]
MissingTokenClaim(String),
/// The key being inserted in the transaction already exists
#[error("The key being inserted already exists: {0}")]
TxKeyAlreadyExistsCategory(KeyCategory),
/// The db is running without an available storage engine
#[error("The db is running without an available storage engine")]
MissingStorageEngine,
@ -921,10 +921,6 @@ pub enum Error {
#[error("A node task has failed: {0}")]
NodeAgent(&'static str),
/// An error related to live query occurred
#[error("Failed to process Live Query: {0}")]
LiveQueryError(LiveQueryCause),
/// The supplied type could not be serialiazed into `sql::Value`
#[error("Serialization error: {0}")]
Serialization(String),
@ -1041,9 +1037,7 @@ impl From<regex::Error> for Error {
impl From<echodb::err::Error> for Error {
fn from(e: echodb::err::Error) -> Error {
match e {
echodb::err::Error::KeyAlreadyExists => {
Error::TxKeyAlreadyExistsCategory(crate::key::error::KeyCategory::Unknown)
}
echodb::err::Error::KeyAlreadyExists => Error::TxKeyAlreadyExists,
echodb::err::Error::ValNotExpectedValue => Error::TxConditionNotMet,
_ => Error::Tx(e.to_string()),
}
@ -1054,9 +1048,7 @@ impl From<echodb::err::Error> for Error {
impl From<indxdb::err::Error> for Error {
fn from(e: indxdb::err::Error) -> Error {
match e {
indxdb::err::Error::KeyAlreadyExists => {
Error::TxKeyAlreadyExistsCategory(crate::key::error::KeyCategory::Unknown)
}
indxdb::err::Error::KeyAlreadyExists => Error::TxKeyAlreadyExists,
indxdb::err::Error::ValNotExpectedValue => Error::TxConditionNotMet,
_ => Error::Tx(e.to_string()),
}
@ -1067,9 +1059,7 @@ impl From<indxdb::err::Error> for Error {
impl From<tikv::Error> for Error {
fn from(e: tikv::Error) -> Error {
match e {
tikv::Error::DuplicateKeyInsertion => {
Error::TxKeyAlreadyExistsCategory(crate::key::error::KeyCategory::Unknown)
}
tikv::Error::DuplicateKeyInsertion => Error::TxKeyAlreadyExists,
tikv::Error::KeyError(ke) if ke.abort.contains("KeyTooLarge") => Error::TxKeyTooLarge,
tikv::Error::RegionError(re) if re.raft_entry_too_large.is_some() => Error::TxTooLarge,
_ => Error::Tx(e.to_string()),
@ -1091,6 +1081,20 @@ impl From<surrealkv::Error> for Error {
}
}
#[cfg(feature = "kv-fdb")]
impl From<foundationdb::FdbError> for Error {
fn from(e: foundationdb::FdbError) -> Error {
Error::Ds(e.to_string())
}
}
#[cfg(feature = "kv-fdb")]
impl From<foundationdb::TransactionCommitError> for Error {
fn from(e: foundationdb::TransactionCommitError) -> Error {
Error::Tx(e.to_string())
}
}
impl From<channel::RecvError> for Error {
fn from(e: channel::RecvError) -> Error {
Error::Channel(e.to_string())
@ -1136,14 +1140,3 @@ impl Serialize for Error {
serializer.serialize_str(self.to_string().as_str())
}
}
#[derive(Error, Debug)]
#[non_exhaustive]
pub enum LiveQueryCause {
#[doc(hidden)]
#[error("The Live Query must have a change feed for it it work")]
MissingChangeFeed,
#[doc(hidden)]
#[error("The Live Query must have a change feed that includes relative changes")]
ChangeFeedNoOriginal,
}

View file

@ -32,17 +32,17 @@ where
I::Item: TryFuture,
{
#[cfg(target_arch = "wasm32")]
const LIMIT: usize = 1;
let limit: usize = 1;
#[cfg(not(target_arch = "wasm32"))]
const LIMIT: usize = crate::cnf::MAX_CONCURRENT_TASKS;
let limit: usize = *crate::cnf::MAX_CONCURRENT_TASKS;
let mut input = iter.into_iter();
let (lo, hi) = input.size_hint();
let initial_capacity = hi.unwrap_or(lo);
let mut active = FuturesOrdered::new();
while active.len() < LIMIT {
while active.len() < limit {
if let Some(next) = input.next() {
active.push_back(TryFutureExt::into_future(next));
} else {

View file

@ -13,8 +13,8 @@ pub async fn analyze(
(az, val): (Value, Value),
) -> Result<Value, Error> {
if let (Some(opt), Value::Strand(az), Value::Strand(val)) = (opt, az, val) {
let az: Analyzer =
ctx.tx_lock().await.get_db_analyzer(opt.ns()?, opt.db()?, az.as_str()).await?.into();
// TODO: @emmanuel-keller this `into()` is expansive and clones the value
let az: Analyzer = ctx.tx().get_db_analyzer(opt.ns()?, opt.db()?, &az).await?.into();
az.analyze(stk, ctx, opt, val.0).await
} else {
Ok(Value::None)

View file

@ -20,7 +20,6 @@ pub async fn signin(kvs: &Datastore, session: &mut Session, vars: Object) -> Res
let ns = vars.get("NS").or_else(|| vars.get("ns"));
let db = vars.get("DB").or_else(|| vars.get("db"));
let ac = vars.get("AC").or_else(|| vars.get("ac"));
// Check if the parameters exist
match (ns, db, ac) {
// DB signin with access method
@ -102,7 +101,7 @@ pub async fn db_access(
vars: Object,
) -> Result<String, Error> {
// Create a new readonly transaction
let mut tx = kvs.transaction(Read, Optimistic).await?;
let tx = kvs.transaction(Read, Optimistic).await?;
// Fetch the specified access method from storage
let access = tx.get_db_access(&ns, &db, &ac).await;
// Ensure that the transaction is cancelled
@ -114,7 +113,7 @@ pub async fn db_access(
// All access method types are supported except for JWT
// The JWT access method is the one that is internal to SurrealDB
// The equivalent of signing in with JWT is to authenticate it
match av.kind {
match av.kind.clone() {
AccessType::Record(at) => {
// Check if the record access method supports issuing tokens
let iss = match at.jwt.issue {

View file

@ -47,7 +47,7 @@ pub async fn db_access(
vars: Object,
) -> Result<Option<String>, Error> {
// Create a new readonly transaction
let mut tx = kvs.transaction(Read, Optimistic).await?;
let tx = kvs.transaction(Read, Optimistic).await?;
// Fetch the specified access method from storage
let access = tx.get_db_access(&ns, &db, &ac).await;
// Ensure that the transaction is cancelled
@ -57,7 +57,7 @@ pub async fn db_access(
Ok(av) => {
// Check the access method type
// Currently, only the record access method supports signup
match av.kind {
match av.kind.clone() {
AccessType::Record(at) => {
// Check if the record access method supports issuing tokens
let iss = match at.jwt.issue {

View file

@ -15,60 +15,47 @@ use once_cell::sync::Lazy;
use std::str::{self, FromStr};
use std::sync::Arc;
fn config(alg: Algorithm, key: String) -> Result<(DecodingKey, Validation), Error> {
fn config(alg: Algorithm, key: &[u8]) -> Result<(DecodingKey, Validation), Error> {
match alg {
Algorithm::Hs256 => Ok((
DecodingKey::from_secret(key.as_ref()),
Validation::new(jsonwebtoken::Algorithm::HS256),
)),
Algorithm::Hs384 => Ok((
DecodingKey::from_secret(key.as_ref()),
Validation::new(jsonwebtoken::Algorithm::HS384),
)),
Algorithm::Hs512 => Ok((
DecodingKey::from_secret(key.as_ref()),
Validation::new(jsonwebtoken::Algorithm::HS512),
)),
Algorithm::EdDSA => Ok((
DecodingKey::from_ed_pem(key.as_ref())?,
Validation::new(jsonwebtoken::Algorithm::EdDSA),
)),
Algorithm::Es256 => Ok((
DecodingKey::from_ec_pem(key.as_ref())?,
Validation::new(jsonwebtoken::Algorithm::ES256),
)),
Algorithm::Es384 => Ok((
DecodingKey::from_ec_pem(key.as_ref())?,
Validation::new(jsonwebtoken::Algorithm::ES384),
)),
Algorithm::Es512 => Ok((
DecodingKey::from_ec_pem(key.as_ref())?,
Validation::new(jsonwebtoken::Algorithm::ES384),
)),
Algorithm::Ps256 => Ok((
DecodingKey::from_rsa_pem(key.as_ref())?,
Validation::new(jsonwebtoken::Algorithm::PS256),
)),
Algorithm::Ps384 => Ok((
DecodingKey::from_rsa_pem(key.as_ref())?,
Validation::new(jsonwebtoken::Algorithm::PS384),
)),
Algorithm::Ps512 => Ok((
DecodingKey::from_rsa_pem(key.as_ref())?,
Validation::new(jsonwebtoken::Algorithm::PS512),
)),
Algorithm::Rs256 => Ok((
DecodingKey::from_rsa_pem(key.as_ref())?,
Validation::new(jsonwebtoken::Algorithm::RS256),
)),
Algorithm::Rs384 => Ok((
DecodingKey::from_rsa_pem(key.as_ref())?,
Validation::new(jsonwebtoken::Algorithm::RS384),
)),
Algorithm::Rs512 => Ok((
DecodingKey::from_rsa_pem(key.as_ref())?,
Validation::new(jsonwebtoken::Algorithm::RS512),
)),
Algorithm::Hs256 => {
Ok((DecodingKey::from_secret(key), Validation::new(jsonwebtoken::Algorithm::HS256)))
}
Algorithm::Hs384 => {
Ok((DecodingKey::from_secret(key), Validation::new(jsonwebtoken::Algorithm::HS384)))
}
Algorithm::Hs512 => {
Ok((DecodingKey::from_secret(key), Validation::new(jsonwebtoken::Algorithm::HS512)))
}
Algorithm::EdDSA => {
Ok((DecodingKey::from_ed_pem(key)?, Validation::new(jsonwebtoken::Algorithm::EdDSA)))
}
Algorithm::Es256 => {
Ok((DecodingKey::from_ec_pem(key)?, Validation::new(jsonwebtoken::Algorithm::ES256)))
}
Algorithm::Es384 => {
Ok((DecodingKey::from_ec_pem(key)?, Validation::new(jsonwebtoken::Algorithm::ES384)))
}
Algorithm::Es512 => {
Ok((DecodingKey::from_ec_pem(key)?, Validation::new(jsonwebtoken::Algorithm::ES384)))
}
Algorithm::Ps256 => {
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::PS256)))
}
Algorithm::Ps384 => {
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::PS384)))
}
Algorithm::Ps512 => {
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::PS512)))
}
Algorithm::Rs256 => {
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::RS256)))
}
Algorithm::Rs384 => {
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::RS384)))
}
Algorithm::Rs512 => {
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::RS512)))
}
}
}
@ -92,7 +79,6 @@ pub async fn basic(
) -> Result<(), Error> {
// Log the authentication type
trace!("Attempting basic authentication");
// Check if the parameters exist
match (ns, db) {
// DB signin
@ -163,16 +149,18 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
// Log the decoded authentication claims
trace!("Authenticating with record access method `{}`", ac);
// Create a new readonly transaction
let mut tx = kvs.transaction(Read, Optimistic).await?;
let tx = kvs.transaction(Read, Optimistic).await?;
// Parse the record id
let mut rid = syn::thing(&id)?;
// Get the database access method
let de = tx.get_db_access(&ns, &db, &ac).await?;
// Ensure that the transaction is cancelled
tx.cancel().await?;
// Obtain the configuration to verify the token based on the access method
let (au, cf) = match de.kind {
let (au, cf) = match de.kind.clone() {
AccessType::Record(at) => {
let cf = match at.jwt.verify.clone() {
JwtAccessVerify::Key(key) => config(key.alg, key.key),
JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()),
#[cfg(feature = "jwks")]
JwtAccessVerify::Jwks(jwks) => {
if let Some(kid) = token_data.header.kid {
@ -244,15 +232,17 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
// Log the decoded authentication claims
trace!("Authenticating to database `{}` with access method `{}`", db, ac);
// Create a new readonly transaction
let mut tx = kvs.transaction(Read, Optimistic).await?;
let tx = kvs.transaction(Read, Optimistic).await?;
// Get the database access method
let de = tx.get_db_access(&ns, &db, &ac).await?;
// Ensure that the transaction is cancelled
tx.cancel().await?;
// Obtain the configuration to verify the token based on the access method
match de.kind {
match de.kind.clone() {
// If the access type is Jwt, this is database access
AccessType::Jwt(at) => {
let cf = match at.verify {
JwtAccessVerify::Key(key) => config(key.alg, key.key),
JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()),
#[cfg(feature = "jwks")]
JwtAccessVerify::Jwks(jwks) => {
if let Some(kid) = token_data.header.kid {
@ -300,7 +290,7 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
Some(au) => {
trace!("Access method `{}` is record access with authenticate clause", ac);
let cf = match at.jwt.verify {
JwtAccessVerify::Key(key) => config(key.alg, key.key),
JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()),
#[cfg(feature = "jwks")]
JwtAccessVerify::Jwks(jwks) => {
if let Some(kid) = token_data.header.kid {
@ -366,13 +356,16 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
// Log the decoded authentication claims
trace!("Authenticating to database `{}` with user `{}`", db, id);
// Create a new readonly transaction
let mut tx = kvs.transaction(Read, Optimistic).await?;
let tx = kvs.transaction(Read, Optimistic).await?;
// Get the database user
let de = tx.get_db_user(&ns, &db, &id).await.map_err(|e| {
trace!("Error while authenticating to database `{db}`: {e}");
Error::InvalidAuth
})?;
let cf = config(Algorithm::Hs512, de.code)?;
// Ensure that the transaction is cancelled
tx.cancel().await?;
// Check the algorithm
let cf = config(Algorithm::Hs512, de.code.as_bytes())?;
// Verify the token
decode::<Claims>(token, &cf.0, &cf.1)?;
// Log the success
@ -398,13 +391,15 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
// Log the decoded authentication claims
trace!("Authenticating to namespace `{}` with access method `{}`", ns, ac);
// Create a new readonly transaction
let mut tx = kvs.transaction(Read, Optimistic).await?;
let tx = kvs.transaction(Read, Optimistic).await?;
// Get the namespace access method
let de = tx.get_ns_access(&ns, &ac).await?;
// Ensure that the transaction is cancelled
tx.cancel().await?;
// Obtain the configuration to verify the token based on the access method
let cf = match de.kind {
let cf = match de.kind.clone() {
AccessType::Jwt(ac) => match ac.verify {
JwtAccessVerify::Key(key) => config(key.alg, key.key),
JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()),
#[cfg(feature = "jwks")]
JwtAccessVerify::Jwks(jwks) => {
if let Some(kid) = token_data.header.kid {
@ -452,13 +447,16 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
// Log the decoded authentication claims
trace!("Authenticating to namespace `{}` with user `{}`", ns, id);
// Create a new readonly transaction
let mut tx = kvs.transaction(Read, Optimistic).await?;
let tx = kvs.transaction(Read, Optimistic).await?;
// Get the namespace user
let de = tx.get_ns_user(&ns, &id).await.map_err(|e| {
trace!("Error while authenticating to namespace `{ns}`: {e}");
Error::InvalidAuth
})?;
let cf = config(Algorithm::Hs512, de.code)?;
// Ensure that the transaction is cancelled
tx.cancel().await?;
// Check the algorithm
let cf = config(Algorithm::Hs512, de.code.as_bytes())?;
// Verify the token
decode::<Claims>(token, &cf.0, &cf.1)?;
// Log the success
@ -482,13 +480,15 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
// Log the decoded authentication claims
trace!("Authenticating to root with access method `{}`", ac);
// Create a new readonly transaction
let mut tx = kvs.transaction(Read, Optimistic).await?;
let tx = kvs.transaction(Read, Optimistic).await?;
// Get the namespace access method
let de = tx.get_root_access(&ac).await?;
// Ensure that the transaction is cancelled
tx.cancel().await?;
// Obtain the configuration to verify the token based on the access method
let cf = match de.kind {
let cf = match de.kind.clone() {
AccessType::Jwt(ac) => match ac.verify {
JwtAccessVerify::Key(key) => config(key.alg, key.key),
JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()),
#[cfg(feature = "jwks")]
JwtAccessVerify::Jwks(jwks) => {
if let Some(kid) = token_data.header.kid {
@ -533,13 +533,16 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
// Log the decoded authentication claims
trace!("Authenticating to root level with user `{}`", id);
// Create a new readonly transaction
let mut tx = kvs.transaction(Read, Optimistic).await?;
let tx = kvs.transaction(Read, Optimistic).await?;
// Get the namespace user
let de = tx.get_root_user(&id).await.map_err(|e| {
trace!("Error while authenticating to root: {e}");
Error::InvalidAuth
})?;
let cf = config(Algorithm::Hs512, de.code)?;
// Ensure that the transaction is cancelled
tx.cancel().await?;
// Check the algorithm
let cf = config(Algorithm::Hs512, de.code.as_bytes())?;
// Verify the token
decode::<Claims>(token, &cf.0, &cf.1)?;
// Log the success
@ -565,14 +568,18 @@ pub async fn verify_root_creds(
pass: &str,
) -> Result<DefineUserStatement, Error> {
// Create a new readonly transaction
let mut tx = ds.transaction(Read, Optimistic).await?;
let tx = ds.transaction(Read, Optimistic).await?;
// Fetch the specified user from storage
let user = tx.get_root_user(user).await.map_err(|e| {
trace!("Error while authenticating to root: {e}");
Error::InvalidAuth
})?;
// Ensure that the transaction is cancelled
tx.cancel().await?;
// Verify the specified password for the user
verify_pass(pass, user.hash.as_ref())?;
// Clone the cached user object
let user = (*user).clone();
// Return the verified user object
Ok(user)
}
@ -584,14 +591,18 @@ pub async fn verify_ns_creds(
pass: &str,
) -> Result<DefineUserStatement, Error> {
// Create a new readonly transaction
let mut tx = ds.transaction(Read, Optimistic).await?;
let tx = ds.transaction(Read, Optimistic).await?;
// Fetch the specified user from storage
let user = tx.get_ns_user(ns, user).await.map_err(|e| {
trace!("Error while authenticating to namespace `{ns}`: {e}");
Error::InvalidAuth
})?;
// Ensure that the transaction is cancelled
tx.cancel().await?;
// Verify the specified password for the user
verify_pass(pass, user.hash.as_ref())?;
// Clone the cached user object
let user = (*user).clone();
// Return the verified user object
Ok(user)
}
@ -604,14 +615,18 @@ pub async fn verify_db_creds(
pass: &str,
) -> Result<DefineUserStatement, Error> {
// Create a new readonly transaction
let mut tx = ds.transaction(Read, Optimistic).await?;
let tx = ds.transaction(Read, Optimistic).await?;
// Fetch the specified user from storage
let user = tx.get_db_user(ns, db, user).await.map_err(|e| {
trace!("Error while authenticating to database `{ns}/{db}`: {e}");
Error::InvalidAuth
})?;
// Ensure that the transaction is cancelled
tx.cancel().await?;
// Verify the specified password for the user
verify_pass(pass, user.hash.as_ref())?;
// Clone the cached user object
let user = (*user).clone();
// Return the verified user object
Ok(user)
}
@ -1685,7 +1700,7 @@ mod tests {
algorithm: jsonwebtoken::jwk::AlgorithmParameters::OctetKey(
jsonwebtoken::jwk::OctetKeyParameters {
key_type: jsonwebtoken::jwk::OctetKeyType::Octet,
value: STANDARD_NO_PAD.encode(&secret),
value: STANDARD_NO_PAD.encode(secret),
},
),
}],

View file

@ -115,18 +115,18 @@ mod tests {
use crate::kvs::{Datastore, LockType::*, Transaction, TransactionType::*};
async fn get_ids(ds: &Datastore) -> (Transaction, U32) {
let mut tx = ds.transaction(Write, Optimistic).await.unwrap();
let txn = ds.transaction(Write, Optimistic).await.unwrap();
let key = "foo";
let v = tx.get(key).await.unwrap();
let v = txn.get(key).await.unwrap();
let d = U32::new(key.into(), v).await.unwrap();
(tx, d)
(txn, d)
}
async fn finish(mut tx: Transaction, mut d: U32) -> Result<(), Error> {
async fn finish(txn: Transaction, mut d: U32) -> Result<(), Error> {
if let Some((key, val)) = d.finish() {
tx.set(key, val).await?;
txn.set(key, val).await?;
}
tx.commit().await
txn.commit().await
}
#[tokio::test]

View file

@ -23,7 +23,7 @@ pub struct DocIds {
impl DocIds {
pub async fn new(
ixs: &IndexStores,
tx: &mut Transaction,
tx: &Transaction,
tt: TransactionType,
ikb: IndexKeyBase,
default_btree_order: u32,
@ -73,7 +73,7 @@ impl DocIds {
pub(crate) async fn get_doc_id(
&self,
tx: &mut Transaction,
tx: &Transaction,
doc_key: Key,
) -> Result<Option<DocId>, Error> {
self.btree.search(tx, &self.store, &doc_key).await
@ -83,7 +83,7 @@ impl DocIds {
/// If the doc_id does not exists, a new one is created, and associated to the given key.
pub(in crate::idx) async fn resolve_doc_id(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
doc_key: Key,
) -> Result<Resolved, Error> {
{
@ -99,7 +99,7 @@ impl DocIds {
pub(in crate::idx) async fn remove_doc(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
doc_key: Key,
) -> Result<Option<DocId>, Error> {
if let Some(doc_id) = self.btree.delete(tx, &mut self.store, doc_key).await? {
@ -119,7 +119,7 @@ impl DocIds {
pub(in crate::idx) async fn get_doc_key(
&self,
tx: &mut Transaction,
tx: &Transaction,
doc_id: DocId,
) -> Result<Option<Key>, Error> {
let doc_id_key = self.index_key_base.new_bi_key(doc_id);
@ -130,14 +130,11 @@ impl DocIds {
}
}
pub(in crate::idx) async fn statistics(
&self,
tx: &mut Transaction,
) -> Result<BStatistics, Error> {
pub(in crate::idx) async fn statistics(&self, tx: &Transaction) -> Result<BStatistics, Error> {
self.btree.statistics(tx, &self.store).await
}
pub(in crate::idx) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> {
pub(in crate::idx) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> {
if let Some(new_cache) = self.store.finish(tx).await? {
let btree = self.btree.inc_generation().clone();
let state = State {
@ -260,16 +257,15 @@ mod tests {
const BTREE_ORDER: u32 = 7;
async fn new_operation(ds: &Datastore, tt: TransactionType) -> (Transaction, DocIds) {
let mut tx = ds.transaction(tt, Optimistic).await.unwrap();
let d =
DocIds::new(ds.index_store(), &mut tx, tt, IndexKeyBase::default(), BTREE_ORDER, 100)
let tx = ds.transaction(tt, Optimistic).await.unwrap();
let d = DocIds::new(ds.index_store(), &tx, tt, IndexKeyBase::default(), BTREE_ORDER, 100)
.await
.unwrap();
(tx, d)
}
async fn finish(mut tx: Transaction, mut d: DocIds) {
d.finish(&mut tx).await.unwrap();
async fn finish(tx: Transaction, mut d: DocIds) {
d.finish(&tx).await.unwrap();
tx.commit().await.unwrap();
}
@ -279,83 +275,65 @@ mod tests {
// Resolve a first doc key
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
let doc_id = d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap();
let (tx, mut d) = new_operation(&ds, Write).await;
let doc_id = d.resolve_doc_id(&tx, "Foo".into()).await.unwrap();
finish(tx, d).await;
let (mut tx, d) = new_operation(&ds, Read).await;
assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 1);
assert_eq!(d.get_doc_key(&mut tx, 0).await.unwrap(), Some("Foo".into()));
let (tx, d) = new_operation(&ds, Read).await;
assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 1);
assert_eq!(d.get_doc_key(&tx, 0).await.unwrap(), Some("Foo".into()));
assert_eq!(doc_id, Resolved::New(0));
}
// Resolve the same doc key
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
let doc_id = d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap();
let (tx, mut d) = new_operation(&ds, Write).await;
let doc_id = d.resolve_doc_id(&tx, "Foo".into()).await.unwrap();
finish(tx, d).await;
let (mut tx, d) = new_operation(&ds, Read).await;
assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 1);
assert_eq!(d.get_doc_key(&mut tx, 0).await.unwrap(), Some("Foo".into()));
let (tx, d) = new_operation(&ds, Read).await;
assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 1);
assert_eq!(d.get_doc_key(&tx, 0).await.unwrap(), Some("Foo".into()));
assert_eq!(doc_id, Resolved::Existing(0));
}
// Resolve another single doc key
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
let doc_id = d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap();
let (tx, mut d) = new_operation(&ds, Write).await;
let doc_id = d.resolve_doc_id(&tx, "Bar".into()).await.unwrap();
finish(tx, d).await;
let (mut tx, d) = new_operation(&ds, Read).await;
assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 2);
assert_eq!(d.get_doc_key(&mut tx, 1).await.unwrap(), Some("Bar".into()));
let (tx, d) = new_operation(&ds, Read).await;
assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 2);
assert_eq!(d.get_doc_key(&tx, 1).await.unwrap(), Some("Bar".into()));
assert_eq!(doc_id, Resolved::New(1));
}
// Resolve another two existing doc keys and two new doc keys (interlaced)
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(
d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(),
Resolved::Existing(0)
);
assert_eq!(d.resolve_doc_id(&mut tx, "Hello".into()).await.unwrap(), Resolved::New(2));
assert_eq!(
d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap(),
Resolved::Existing(1)
);
assert_eq!(d.resolve_doc_id(&mut tx, "World".into()).await.unwrap(), Resolved::New(3));
let (tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(), Resolved::Existing(0));
assert_eq!(d.resolve_doc_id(&tx, "Hello".into()).await.unwrap(), Resolved::New(2));
assert_eq!(d.resolve_doc_id(&tx, "Bar".into()).await.unwrap(), Resolved::Existing(1));
assert_eq!(d.resolve_doc_id(&tx, "World".into()).await.unwrap(), Resolved::New(3));
finish(tx, d).await;
let (mut tx, d) = new_operation(&ds, Read).await;
assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 4);
let (tx, d) = new_operation(&ds, Read).await;
assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 4);
}
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(
d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(),
Resolved::Existing(0)
);
assert_eq!(
d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap(),
Resolved::Existing(1)
);
assert_eq!(
d.resolve_doc_id(&mut tx, "Hello".into()).await.unwrap(),
Resolved::Existing(2)
);
assert_eq!(
d.resolve_doc_id(&mut tx, "World".into()).await.unwrap(),
Resolved::Existing(3)
);
let (tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(), Resolved::Existing(0));
assert_eq!(d.resolve_doc_id(&tx, "Bar".into()).await.unwrap(), Resolved::Existing(1));
assert_eq!(d.resolve_doc_id(&tx, "Hello".into()).await.unwrap(), Resolved::Existing(2));
assert_eq!(d.resolve_doc_id(&tx, "World".into()).await.unwrap(), Resolved::Existing(3));
finish(tx, d).await;
let (mut tx, d) = new_operation(&ds, Read).await;
assert_eq!(d.get_doc_key(&mut tx, 0).await.unwrap(), Some("Foo".into()));
assert_eq!(d.get_doc_key(&mut tx, 1).await.unwrap(), Some("Bar".into()));
assert_eq!(d.get_doc_key(&mut tx, 2).await.unwrap(), Some("Hello".into()));
assert_eq!(d.get_doc_key(&mut tx, 3).await.unwrap(), Some("World".into()));
assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 4);
let (tx, d) = new_operation(&ds, Read).await;
assert_eq!(d.get_doc_key(&tx, 0).await.unwrap(), Some("Foo".into()));
assert_eq!(d.get_doc_key(&tx, 1).await.unwrap(), Some("Bar".into()));
assert_eq!(d.get_doc_key(&tx, 2).await.unwrap(), Some("Hello".into()));
assert_eq!(d.get_doc_key(&tx, 3).await.unwrap(), Some("World".into()));
assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 4);
}
}
@ -365,53 +343,53 @@ mod tests {
// Create two docs
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(), Resolved::New(0));
assert_eq!(d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap(), Resolved::New(1));
let (tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(), Resolved::New(0));
assert_eq!(d.resolve_doc_id(&tx, "Bar".into()).await.unwrap(), Resolved::New(1));
finish(tx, d).await;
}
// Remove doc 1
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.remove_doc(&mut tx, "Dummy".into()).await.unwrap(), None);
assert_eq!(d.remove_doc(&mut tx, "Foo".into()).await.unwrap(), Some(0));
let (tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.remove_doc(&tx, "Dummy".into()).await.unwrap(), None);
assert_eq!(d.remove_doc(&tx, "Foo".into()).await.unwrap(), Some(0));
finish(tx, d).await;
}
// Check 'Foo' has been removed
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.remove_doc(&mut tx, "Foo".into()).await.unwrap(), None);
let (tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.remove_doc(&tx, "Foo".into()).await.unwrap(), None);
finish(tx, d).await;
}
// Insert a new doc - should take the available id 1
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.resolve_doc_id(&mut tx, "Hello".into()).await.unwrap(), Resolved::New(0));
let (tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.resolve_doc_id(&tx, "Hello".into()).await.unwrap(), Resolved::New(0));
finish(tx, d).await;
}
// Remove doc 2
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.remove_doc(&mut tx, "Dummy".into()).await.unwrap(), None);
assert_eq!(d.remove_doc(&mut tx, "Bar".into()).await.unwrap(), Some(1));
let (tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.remove_doc(&tx, "Dummy".into()).await.unwrap(), None);
assert_eq!(d.remove_doc(&tx, "Bar".into()).await.unwrap(), Some(1));
finish(tx, d).await;
}
// Check 'Bar' has been removed
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.remove_doc(&mut tx, "Foo".into()).await.unwrap(), None);
let (tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.remove_doc(&tx, "Foo".into()).await.unwrap(), None);
finish(tx, d).await;
}
// Insert a new doc - should take the available id 2
{
let (mut tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.resolve_doc_id(&mut tx, "World".into()).await.unwrap(), Resolved::New(1));
let (tx, mut d) = new_operation(&ds, Write).await;
assert_eq!(d.resolve_doc_id(&tx, "World".into()).await.unwrap(), Resolved::New(1));
finish(tx, d).await;
}
}

View file

@ -15,7 +15,7 @@ use filter::Filter;
use reblessive::tree::Stk;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
mod filter;
mod tokenizer;
@ -35,6 +35,17 @@ impl From<DefineAnalyzerStatement> for Analyzer {
}
}
// TODO: @emmanuel-keller we probably don't need to clone the value here
impl From<Arc<DefineAnalyzerStatement>> for Analyzer {
fn from(az: Arc<DefineAnalyzerStatement>) -> Self {
Self {
function: az.function.clone().map(|i| i.0),
tokenizers: az.tokenizers.clone(),
filters: Filter::from(az.filters.clone()),
}
}
}
pub(in crate::idx) type TermsList = Vec<Option<(TermId, TermLen)>>;
pub(in crate::idx) struct TermsSet {
@ -72,13 +83,13 @@ impl Analyzer {
let mut list = Vec::with_capacity(tokens.list().len());
let mut unique_tokens = HashSet::new();
let mut set = HashSet::new();
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
let mut has_unknown_terms = false;
for token in tokens.list() {
// Tokens can contains duplicated, not need to evaluate them again
if unique_tokens.insert(token) {
// Is the term known in the index?
let opt_term_id = t.get_term_id(&mut tx, tokens.get_token_string(token)?).await?;
let opt_term_id = t.get_term_id(&tx, tokens.get_token_string(token)?).await?;
list.push(opt_term_id.map(|tid| (tid, token.get_char_len())));
if let Some(term_id) = opt_term_id {
set.insert(term_id);
@ -109,12 +120,10 @@ impl Analyzer {
self.analyze_value(stk, ctx, opt, content, FilteringStage::Indexing, &mut tv).await?;
let mut set = HashSet::new();
let mut has_unknown_terms = false;
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
for tokens in tv {
for token in tokens.list() {
if let Some(term_id) =
t.get_term_id(&mut tx, tokens.get_token_string(token)?).await?
{
if let Some(term_id) = t.get_term_id(&tx, tokens.get_token_string(token)?).await? {
set.insert(term_id);
} else {
has_unknown_terms = true;
@ -162,9 +171,9 @@ impl Analyzer {
}
// Now we can resolve the term ids
let mut tfid = Vec::with_capacity(tf.len());
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
for (t, f) in tf {
tfid.push((terms.resolve_term_id(&mut tx, t).await?, f));
tfid.push((terms.resolve_term_id(&tx, t).await?, f));
}
drop(tx);
Ok((dl, tfid))
@ -204,9 +213,9 @@ impl Analyzer {
// Now we can resolve the term ids
let mut tfid = Vec::with_capacity(tfos.len());
let mut osid = Vec::with_capacity(tfos.len());
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
for (t, o) in tfos {
let id = terms.resolve_term_id(&mut tx, t).await?;
let id = terms.resolve_term_id(&tx, t).await?;
tfid.push((id, o.len() as TermFrequency));
osid.push((id, OffsetRecords(o)));
}
@ -308,7 +317,7 @@ impl Analyzer {
mod tests {
use super::Analyzer;
use crate::ctx::Context;
use crate::dbs::{Options, Transaction};
use crate::dbs::Options;
use crate::idx::ft::analyzer::filter::FilteringStage;
use crate::idx::ft::analyzer::tokenizer::{Token, Tokens};
use crate::kvs::{Datastore, LockType, TransactionType};
@ -316,14 +325,12 @@ mod tests {
sql::{statements::DefineStatement, Statement},
syn,
};
use futures::lock::Mutex;
use std::sync::Arc;
async fn get_analyzer_tokens(def: &str, input: &str) -> Tokens {
let ds = Datastore::new("memory").await.unwrap();
let tx = ds.transaction(TransactionType::Read, LockType::Optimistic).await.unwrap();
let txn: Transaction = Arc::new(Mutex::new(tx));
let ctx = Context::default().set_transaction(txn);
let txn = ds.transaction(TransactionType::Read, LockType::Optimistic).await.unwrap();
let ctx = Context::default().with_transaction(Arc::new(txn));
let mut stmt = syn::parse(&format!("DEFINE {def}")).unwrap();
let Some(Statement::Define(DefineStatement::Analyzer(az))) = stmt.0 .0.pop() else {

View file

@ -18,7 +18,7 @@ pub(super) struct DocLengths {
impl DocLengths {
pub(super) async fn new(
ixs: &IndexStores,
tx: &mut Transaction,
tx: &Transaction,
ikb: IndexKeyBase,
default_btree_order: u32,
tt: TransactionType,
@ -48,7 +48,7 @@ impl DocLengths {
pub(super) async fn get_doc_length(
&self,
tx: &mut Transaction,
tx: &Transaction,
doc_id: DocId,
) -> Result<Option<DocLength>, Error> {
self.btree.search(tx, &self.store, &doc_id.to_be_bytes().to_vec()).await
@ -56,7 +56,7 @@ impl DocLengths {
pub(super) async fn get_doc_length_mut(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
doc_id: DocId,
) -> Result<Option<DocLength>, Error> {
self.btree.search_mut(tx, &mut self.store, &doc_id.to_be_bytes().to_vec()).await
@ -64,7 +64,7 @@ impl DocLengths {
pub(super) async fn set_doc_length(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
doc_id: DocId,
doc_length: DocLength,
) -> Result<(), Error> {
@ -74,17 +74,17 @@ impl DocLengths {
pub(super) async fn remove_doc_length(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
doc_id: DocId,
) -> Result<Option<Payload>, Error> {
self.btree.delete(tx, &mut self.store, doc_id.to_be_bytes().to_vec()).await
}
pub(super) async fn statistics(&self, tx: &mut Transaction) -> Result<BStatistics, Error> {
pub(super) async fn statistics(&self, tx: &Transaction) -> Result<BStatistics, Error> {
self.btree.statistics(tx, &self.store).await
}
pub(super) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> {
pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> {
if let Some(new_cache) = self.store.finish(tx).await? {
let state = self.btree.inc_generation();
tx.set(self.state_key.clone(), state.try_to_val()?).await?;
@ -105,16 +105,15 @@ mod tests {
order: u32,
tt: TransactionType,
) -> (Transaction, DocLengths) {
let mut tx = ds.transaction(TransactionType::Write, Optimistic).await.unwrap();
let dl =
DocLengths::new(ds.index_store(), &mut tx, IndexKeyBase::default(), order, tt, 100)
let tx = ds.transaction(TransactionType::Write, Optimistic).await.unwrap();
let dl = DocLengths::new(ds.index_store(), &tx, IndexKeyBase::default(), order, tt, 100)
.await
.unwrap();
(tx, dl)
}
async fn finish(mut l: DocLengths, mut tx: Transaction) {
l.finish(&mut tx).await.unwrap();
async fn finish(mut l: DocLengths, tx: Transaction) {
l.finish(&tx).await.unwrap();
tx.commit().await.unwrap()
}
@ -126,54 +125,54 @@ mod tests {
{
// Check empty state
let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
assert_eq!(l.statistics(&mut tx).await.unwrap().keys_count, 0);
let dl = l.get_doc_length(&mut tx, 99).await.unwrap();
let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
assert_eq!(l.statistics(&tx).await.unwrap().keys_count, 0);
let dl = l.get_doc_length(&tx, 99).await.unwrap();
assert_eq!(dl, None);
tx.cancel().await.unwrap();
}
{
// Set a doc length
let (mut tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
l.set_doc_length(&mut tx, 99, 199).await.unwrap();
let (tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
l.set_doc_length(&tx, 99, 199).await.unwrap();
finish(l, tx).await;
}
{
let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
assert_eq!(l.statistics(&mut tx).await.unwrap().keys_count, 1);
let dl = l.get_doc_length(&mut tx, 99).await.unwrap();
let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
assert_eq!(l.statistics(&tx).await.unwrap().keys_count, 1);
let dl = l.get_doc_length(&tx, 99).await.unwrap();
assert_eq!(dl, Some(199));
tx.cancel().await.unwrap();
}
{
// Update doc length
let (mut tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
l.set_doc_length(&mut tx, 99, 299).await.unwrap();
let (tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
l.set_doc_length(&tx, 99, 299).await.unwrap();
finish(l, tx).await;
}
{
let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
assert_eq!(l.statistics(&mut tx).await.unwrap().keys_count, 1);
let dl = l.get_doc_length(&mut tx, 99).await.unwrap();
let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
assert_eq!(l.statistics(&tx).await.unwrap().keys_count, 1);
let dl = l.get_doc_length(&tx, 99).await.unwrap();
assert_eq!(dl, Some(299));
tx.cancel().await.unwrap();
}
{
// Remove doc lengths
let (mut tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
assert_eq!(l.remove_doc_length(&mut tx, 99).await.unwrap(), Some(299));
assert_eq!(l.remove_doc_length(&mut tx, 99).await.unwrap(), None);
let (tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
assert_eq!(l.remove_doc_length(&tx, 99).await.unwrap(), Some(299));
assert_eq!(l.remove_doc_length(&tx, 99).await.unwrap(), None);
finish(l, tx).await;
}
{
let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
let dl = l.get_doc_length(&mut tx, 99).await.unwrap();
let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
let dl = l.get_doc_length(&tx, 99).await.unwrap();
assert_eq!(dl, None);
tx.cancel().await.unwrap();
}

View file

@ -22,7 +22,7 @@ use crate::idx::ft::terms::{TermId, TermLen, Terms};
use crate::idx::trees::btree::BStatistics;
use crate::idx::trees::store::IndexStores;
use crate::idx::{IndexKeyBase, VersionedSerdeState};
use crate::kvs;
use crate::kvs::Transaction;
use crate::kvs::{Key, TransactionType};
use crate::sql::index::SearchParams;
use crate::sql::scoring::Scoring;
@ -105,35 +105,33 @@ impl FtIndex {
p: &SearchParams,
tt: TransactionType,
) -> Result<Self, Error> {
let mut tx = ctx.tx_lock().await;
let az = tx.get_db_analyzer(opt.ns()?, opt.db()?, az).await?;
let res =
Self::with_analyzer(ctx.get_index_stores(), &mut tx, az, index_key_base, p, tt).await;
drop(tx);
res
let tx = ctx.tx();
// TODO: @emmanuel-keller we probably don't need to clone the value here
let az = tx.get_db_analyzer(opt.ns()?, opt.db()?, az).await?.as_ref().to_owned();
Self::with_analyzer(ctx.get_index_stores(), &tx, az, index_key_base, p, tt).await
}
async fn with_analyzer(
ixs: &IndexStores,
run: &mut kvs::Transaction,
txn: &Transaction,
az: DefineAnalyzerStatement,
index_key_base: IndexKeyBase,
p: &SearchParams,
tt: TransactionType,
) -> Result<Self, Error> {
let state_key: Key = index_key_base.new_bs_key();
let state: State = if let Some(val) = run.get(state_key.clone()).await? {
let state: State = if let Some(val) = txn.get(state_key.clone()).await? {
State::try_from_val(val)?
} else {
State::default()
};
let doc_ids = Arc::new(RwLock::new(
DocIds::new(ixs, run, tt, index_key_base.clone(), p.doc_ids_order, p.doc_ids_cache)
DocIds::new(ixs, txn, tt, index_key_base.clone(), p.doc_ids_order, p.doc_ids_cache)
.await?,
));
let doc_lengths = Arc::new(RwLock::new(
DocLengths::new(
ixs,
run,
txn,
index_key_base.clone(),
p.doc_lengths_order,
tt,
@ -142,11 +140,11 @@ impl FtIndex {
.await?,
));
let postings = Arc::new(RwLock::new(
Postings::new(ixs, run, index_key_base.clone(), p.postings_order, tt, p.postings_cache)
Postings::new(ixs, txn, index_key_base.clone(), p.postings_order, tt, p.postings_cache)
.await?,
));
let terms = Arc::new(RwLock::new(
Terms::new(ixs, run, index_key_base.clone(), p.terms_order, tt, p.terms_cache).await?,
Terms::new(ixs, txn, index_key_base.clone(), p.terms_order, tt, p.terms_cache).await?,
));
let termdocs = TermDocs::new(index_key_base.clone());
let offsets = Offsets::new(index_key_base.clone());
@ -194,17 +192,17 @@ impl FtIndex {
ctx: &Context<'_>,
rid: &Thing,
) -> Result<(), Error> {
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
// Extract and remove the doc_id (if any)
let mut doc_ids = self.doc_ids.write().await;
let doc_id = doc_ids.remove_doc(&mut tx, rid.into()).await?;
let doc_id = doc_ids.remove_doc(&tx, rid.into()).await?;
drop(doc_ids);
if let Some(doc_id) = doc_id {
self.state.doc_count -= 1;
// Remove the doc length
let mut doc_lengths = self.doc_lengths.write().await;
let dl = doc_lengths.remove_doc_length(&mut tx, doc_id).await?;
let dl = doc_lengths.remove_doc_length(&tx, doc_id).await?;
drop(doc_lengths);
if let Some(doc_lengths) = dl {
self.state.total_docs_lengths -= doc_lengths as u128;
@ -217,11 +215,11 @@ impl FtIndex {
let mut p = self.postings.write().await;
let mut t = self.terms.write().await;
for term_id in &term_list {
p.remove_posting(&mut tx, term_id, doc_id).await?;
p.remove_posting(&tx, term_id, doc_id).await?;
// if the term is not present in any document in the index, we can remove it
let doc_count = self.term_docs.remove_doc(&mut tx, term_id, doc_id).await?;
let doc_count = self.term_docs.remove_doc(&tx, term_id, doc_id).await?;
if doc_count == 0 {
t.remove_term_id(&mut tx, term_id).await?;
t.remove_term_id(&tx, term_id).await?;
}
}
drop(p);
@ -230,7 +228,7 @@ impl FtIndex {
if self.highlighting {
for term_id in term_list {
// TODO?: Removal can be done with a prefix on doc_id
self.offsets.remove_offsets(&mut tx, doc_id, term_id).await?;
self.offsets.remove_offsets(&tx, doc_id, term_id).await?;
}
}
}
@ -248,11 +246,10 @@ impl FtIndex {
content: Vec<Value>,
) -> Result<(), Error> {
// Resolve the doc_id
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
let mut doc_ids = self.doc_ids.write().await;
let resolved = doc_ids.resolve_doc_id(&mut tx, rid.into()).await?;
let resolved = doc_ids.resolve_doc_id(&tx, rid.into()).await?;
drop(doc_ids);
drop(tx);
let doc_id = *resolved.doc_id();
// Extract the doc_lengths, terms en frequencies (and offset)
@ -272,14 +269,14 @@ impl FtIndex {
};
// Set the doc length
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
let mut dl = self.doc_lengths.write().await;
if resolved.was_existing() {
if let Some(old_doc_length) = dl.get_doc_length_mut(&mut tx, doc_id).await? {
if let Some(old_doc_length) = dl.get_doc_length_mut(&tx, doc_id).await? {
self.state.total_docs_lengths -= old_doc_length as u128;
}
}
dl.set_doc_length(&mut tx, doc_id, doc_length).await?;
dl.set_doc_length(&tx, doc_id, doc_length).await?;
drop(dl);
// Retrieve the existing terms for this document (if any)
@ -294,22 +291,22 @@ impl FtIndex {
let mut terms_ids = RoaringTreemap::default();
let mut p = self.postings.write().await;
for (term_id, term_freq) in terms_and_frequencies {
p.update_posting(&mut tx, term_id, doc_id, term_freq).await?;
p.update_posting(&tx, term_id, doc_id, term_freq).await?;
if let Some(old_term_ids) = &mut old_term_ids {
old_term_ids.remove(term_id);
}
self.term_docs.set_doc(&mut tx, term_id, doc_id).await?;
self.term_docs.set_doc(&tx, term_id, doc_id).await?;
terms_ids.insert(term_id);
}
// Remove any remaining postings
if let Some(old_term_ids) = &old_term_ids {
for old_term_id in old_term_ids {
p.remove_posting(&mut tx, old_term_id, doc_id).await?;
let doc_count = self.term_docs.remove_doc(&mut tx, old_term_id, doc_id).await?;
p.remove_posting(&tx, old_term_id, doc_id).await?;
let doc_count = self.term_docs.remove_doc(&tx, old_term_id, doc_id).await?;
// if the term does not have anymore postings, we can remove the term
if doc_count == 0 {
t.remove_term_id(&mut tx, old_term_id).await?;
t.remove_term_id(&tx, old_term_id).await?;
}
}
}
@ -321,14 +318,14 @@ impl FtIndex {
if let Some(ofs) = offsets {
if !ofs.is_empty() {
for (tid, or) in ofs {
self.offsets.set_offsets(&mut tx, doc_id, tid, or).await?;
self.offsets.set_offsets(&tx, doc_id, tid, or).await?;
}
}
}
// In case of an update, w remove the offset for the terms that does not exist anymore
if let Some(old_term_ids) = old_term_ids {
for old_term_id in old_term_ids {
self.offsets.remove_offsets(&mut tx, doc_id, old_term_id).await?;
self.offsets.remove_offsets(&tx, doc_id, old_term_id).await?;
}
}
}
@ -365,7 +362,7 @@ impl FtIndex {
pub(super) async fn get_terms_docs(
&self,
tx: &mut kvs::Transaction,
tx: &Transaction,
terms: &TermsList,
) -> Result<Vec<Option<(TermId, RoaringTreemap)>>, Error> {
let mut terms_docs = Vec::with_capacity(terms.len());
@ -424,7 +421,7 @@ impl FtIndex {
pub(super) async fn highlight(
&self,
tx: &mut kvs::Transaction,
tx: &Transaction,
thg: &Thing,
terms: &[Option<(TermId, TermLen)>],
hlp: HighlightParams,
@ -450,7 +447,7 @@ impl FtIndex {
pub(super) async fn extract_offsets(
&self,
tx: &mut kvs::Transaction,
tx: &Transaction,
thg: &Thing,
terms: &[Option<(TermId, u32)>],
partial: bool,
@ -473,25 +470,22 @@ impl FtIndex {
}
pub(crate) async fn statistics(&self, ctx: &Context<'_>) -> Result<FtStatistics, Error> {
// TODO do parallel execution
let mut run = ctx.tx_lock().await;
let txn = ctx.tx();
let res = FtStatistics {
doc_ids: self.doc_ids.read().await.statistics(&mut run).await?,
terms: self.terms.read().await.statistics(&mut run).await?,
doc_lengths: self.doc_lengths.read().await.statistics(&mut run).await?,
postings: self.postings.read().await.statistics(&mut run).await?,
doc_ids: self.doc_ids.read().await.statistics(&txn).await?,
terms: self.terms.read().await.statistics(&txn).await?,
doc_lengths: self.doc_lengths.read().await.statistics(&txn).await?,
postings: self.postings.read().await.statistics(&txn).await?,
};
drop(run);
Ok(res)
}
pub(crate) async fn finish(&self, ctx: &Context<'_>) -> Result<(), Error> {
let mut run = ctx.tx_lock().await;
self.doc_ids.write().await.finish(&mut run).await?;
self.doc_lengths.write().await.finish(&mut run).await?;
self.postings.write().await.finish(&mut run).await?;
self.terms.write().await.finish(&mut run).await?;
drop(run);
let txn = ctx.tx();
self.doc_ids.write().await.finish(&txn).await?;
self.doc_lengths.write().await.finish(&txn).await?;
self.postings.write().await.finish(&txn).await?;
self.terms.write().await.finish(&txn).await?;
Ok(())
}
}
@ -518,10 +512,7 @@ impl HitsIterator {
self.iter.size_hint().0
}
pub(crate) async fn next(
&mut self,
tx: &mut kvs::Transaction,
) -> Result<Option<(Thing, DocId)>, Error> {
pub(crate) async fn next(&mut self, tx: &Transaction) -> Result<Option<(Thing, DocId)>, Error> {
let di = self.doc_ids.read().await;
for doc_id in self.iter.by_ref() {
if let Some(doc_key) = di.get_doc_key(tx, doc_id).await? {
@ -546,7 +537,6 @@ mod tests {
use crate::sql::statements::{DefineAnalyzerStatement, DefineStatement};
use crate::sql::{Array, Statement, Thing, Value};
use crate::syn;
use futures::lock::Mutex;
use reblessive::tree::Stk;
use std::collections::HashMap;
use std::sync::Arc;
@ -558,11 +548,11 @@ mod tests {
scr: BM25Scorer,
e: Vec<(&Thing, Option<Score>)>,
) {
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
if let Some(mut hits) = hits {
let mut map = HashMap::new();
while let Some((k, d)) = hits.next(&mut tx).await.unwrap() {
let s = scr.score(&mut tx, d).await.unwrap();
while let Some((k, d)) = hits.next(&tx).await.unwrap() {
let s = scr.score(&tx, d).await.unwrap();
map.insert(k, s);
}
assert_eq!(map.len(), e.len());
@ -572,7 +562,6 @@ mod tests {
} else {
panic!("hits is none");
}
drop(tx);
}
async fn search(
@ -584,9 +573,8 @@ mod tests {
) -> (Option<HitsIterator>, BM25Scorer) {
let (term_list, _) =
fti.extract_querying_terms(stk, ctx, opt, qs.to_string()).await.unwrap();
let mut tx = ctx.tx_lock().await;
let td = Arc::new(fti.get_terms_docs(&mut tx, &term_list).await.unwrap());
drop(tx);
let tx = ctx.tx();
let td = Arc::new(fti.get_terms_docs(&tx, &term_list).await.unwrap());
let scr = fti.new_scorer(td.clone()).unwrap().unwrap();
let hits = fti.new_hits_iterator(td).unwrap();
(hits, scr)
@ -600,10 +588,10 @@ mod tests {
hl: bool,
) -> (Context<'a>, Options, FtIndex) {
let mut ctx = Context::default();
let mut tx = ds.transaction(tt, Optimistic).await.unwrap();
let tx = ds.transaction(tt, Optimistic).await.unwrap();
let fti = FtIndex::with_analyzer(
ctx.get_index_stores(),
&mut tx,
&tx,
az.clone(),
IndexKeyBase::default(),
&SearchParams {
@ -623,14 +611,14 @@ mod tests {
)
.await
.unwrap();
let txn = Arc::new(Mutex::new(tx));
ctx.set_transaction_mut(txn);
let txn = Arc::new(tx);
ctx.set_transaction(txn);
(ctx, Options::default(), fti)
}
pub(super) async fn finish(ctx: &Context<'_>, fti: FtIndex) {
fti.finish(ctx).await.unwrap();
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
tx.commit().await.unwrap();
}

View file

@ -19,7 +19,7 @@ impl Offsets {
pub(super) async fn set_offsets(
&self,
tx: &mut Transaction,
tx: &Transaction,
doc_id: DocId,
term_id: TermId,
offsets: OffsetRecords,
@ -32,7 +32,7 @@ impl Offsets {
pub(super) async fn get_offsets(
&self,
tx: &mut Transaction,
tx: &Transaction,
doc_id: DocId,
term_id: TermId,
) -> Result<Option<OffsetRecords>, Error> {
@ -47,7 +47,7 @@ impl Offsets {
pub(super) async fn remove_offsets(
&self,
tx: &mut Transaction,
tx: &Transaction,
doc_id: DocId,
term_id: TermId,
) -> Result<(), Error> {

View file

@ -20,7 +20,7 @@ pub(super) struct Postings {
impl Postings {
pub(super) async fn new(
ixs: &IndexStores,
tx: &mut Transaction,
tx: &Transaction,
index_key_base: IndexKeyBase,
order: u32,
tt: TransactionType,
@ -51,7 +51,7 @@ impl Postings {
pub(super) async fn update_posting(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
term_id: TermId,
doc_id: DocId,
term_freq: TermFrequency,
@ -62,7 +62,7 @@ impl Postings {
pub(super) async fn get_term_frequency(
&self,
tx: &mut Transaction,
tx: &Transaction,
term_id: TermId,
doc_id: DocId,
) -> Result<Option<TermFrequency>, Error> {
@ -72,7 +72,7 @@ impl Postings {
pub(super) async fn remove_posting(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
term_id: TermId,
doc_id: DocId,
) -> Result<Option<TermFrequency>, Error> {
@ -80,11 +80,11 @@ impl Postings {
self.btree.delete(tx, &mut self.store, key).await
}
pub(super) async fn statistics(&self, tx: &mut Transaction) -> Result<BStatistics, Error> {
pub(super) async fn statistics(&self, tx: &Transaction) -> Result<BStatistics, Error> {
self.btree.statistics(tx, &self.store).await
}
pub(super) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> {
pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> {
if let Some(new_cache) = self.store.finish(tx).await? {
let state = self.btree.inc_generation();
tx.set(self.state_key.clone(), state.try_to_val()?).await?;
@ -106,15 +106,15 @@ mod tests {
order: u32,
tt: TransactionType,
) -> (Transaction, Postings) {
let mut tx = ds.transaction(tt, Optimistic).await.unwrap();
let p = Postings::new(ds.index_store(), &mut tx, IndexKeyBase::default(), order, tt, 100)
let tx = ds.transaction(tt, Optimistic).await.unwrap();
let p = Postings::new(ds.index_store(), &tx, IndexKeyBase::default(), order, tt, 100)
.await
.unwrap();
(tx, p)
}
async fn finish(mut tx: Transaction, mut p: Postings) {
p.finish(&mut tx).await.unwrap();
async fn finish(tx: Transaction, mut p: Postings) {
p.finish(&tx).await.unwrap();
tx.commit().await.unwrap();
}
@ -129,33 +129,33 @@ mod tests {
let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
finish(tx, p).await;
let (mut tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
assert_eq!(p.statistics(&mut tx).await.unwrap().keys_count, 0);
let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
assert_eq!(p.statistics(&tx).await.unwrap().keys_count, 0);
// Add postings
let (mut tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
p.update_posting(&mut tx, 1, 2, 3).await.unwrap();
p.update_posting(&mut tx, 1, 4, 5).await.unwrap();
let (tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
p.update_posting(&tx, 1, 2, 3).await.unwrap();
p.update_posting(&tx, 1, 4, 5).await.unwrap();
finish(tx, p).await;
let (mut tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
assert_eq!(p.statistics(&mut tx).await.unwrap().keys_count, 2);
let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
assert_eq!(p.statistics(&tx).await.unwrap().keys_count, 2);
assert_eq!(p.get_term_frequency(&mut tx, 1, 2).await.unwrap(), Some(3));
assert_eq!(p.get_term_frequency(&mut tx, 1, 4).await.unwrap(), Some(5));
assert_eq!(p.get_term_frequency(&tx, 1, 2).await.unwrap(), Some(3));
assert_eq!(p.get_term_frequency(&tx, 1, 4).await.unwrap(), Some(5));
let (mut tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
let (tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
// Check removal of doc 2
assert_eq!(p.remove_posting(&mut tx, 1, 2).await.unwrap(), Some(3));
assert_eq!(p.remove_posting(&tx, 1, 2).await.unwrap(), Some(3));
// Again the same
assert_eq!(p.remove_posting(&mut tx, 1, 2).await.unwrap(), None);
assert_eq!(p.remove_posting(&tx, 1, 2).await.unwrap(), None);
// Remove doc 4
assert_eq!(p.remove_posting(&mut tx, 1, 4).await.unwrap(), Some(5));
assert_eq!(p.remove_posting(&tx, 1, 4).await.unwrap(), Some(5));
finish(tx, p).await;
// The underlying b-tree should be empty now
let (mut tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
assert_eq!(p.statistics(&mut tx).await.unwrap().keys_count, 0);
let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
assert_eq!(p.statistics(&tx).await.unwrap().keys_count, 0);
}
}
}

View file

@ -40,7 +40,7 @@ impl BM25Scorer {
async fn term_score(
&self,
tx: &mut Transaction,
tx: &Transaction,
doc_id: DocId,
term_doc_count: DocLength,
term_frequency: TermFrequency,
@ -53,7 +53,7 @@ impl BM25Scorer {
pub(crate) async fn score(
&self,
tx: &mut Transaction,
tx: &Transaction,
doc_id: DocId,
) -> Result<Option<Score>, Error> {
let mut sc = 0.0;

View file

@ -22,7 +22,7 @@ impl TermDocs {
pub(super) async fn set_doc(
&self,
tx: &mut Transaction,
tx: &Transaction,
term_id: TermId,
doc_id: DocId,
) -> Result<(), Error> {
@ -38,7 +38,7 @@ impl TermDocs {
pub(super) async fn get_docs(
&self,
tx: &mut Transaction,
tx: &Transaction,
term_id: TermId,
) -> Result<Option<RoaringTreemap>, Error> {
let key = self.index_key_base.new_bc_key(term_id);
@ -52,7 +52,7 @@ impl TermDocs {
pub(super) async fn remove_doc(
&self,
tx: &mut Transaction,
tx: &Transaction,
term_id: TermId,
doc_id: DocId,
) -> Result<DocLength, Error> {

View file

@ -24,7 +24,7 @@ pub(in crate::idx) struct Terms {
impl Terms {
pub(super) async fn new(
ixs: &IndexStores,
tx: &mut Transaction,
tx: &Transaction,
index_key_base: IndexKeyBase,
default_btree_order: u32,
tt: TransactionType,
@ -74,7 +74,7 @@ impl Terms {
pub(super) async fn resolve_term_id(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
term: &str,
) -> Result<TermId, Error> {
let term_key = term.into();
@ -91,7 +91,7 @@ impl Terms {
pub(super) async fn get_term_id(
&self,
tx: &mut Transaction,
tx: &Transaction,
term: &str,
) -> Result<Option<TermId>, Error> {
self.btree.search(tx, &self.store, &term.into()).await
@ -99,7 +99,7 @@ impl Terms {
pub(super) async fn remove_term_id(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
term_id: TermId,
) -> Result<(), Error> {
let term_id_key = self.index_key_base.new_bu_key(term_id);
@ -117,11 +117,11 @@ impl Terms {
Ok(())
}
pub(super) async fn statistics(&self, tx: &mut Transaction) -> Result<BStatistics, Error> {
pub(super) async fn statistics(&self, tx: &Transaction) -> Result<BStatistics, Error> {
self.btree.statistics(tx, &self.store).await
}
pub(super) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> {
pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> {
if let Some(new_cache) = self.store.finish(tx).await? {
let btree = self.btree.inc_generation().clone();
let state = State {
@ -253,15 +253,15 @@ mod tests {
order: u32,
tt: TransactionType,
) -> (Transaction, Terms) {
let mut tx = ds.transaction(tt, Optimistic).await.unwrap();
let t = Terms::new(ds.index_store(), &mut tx, IndexKeyBase::default(), order, tt, 100)
let tx = ds.transaction(tt, Optimistic).await.unwrap();
let t = Terms::new(ds.index_store(), &tx, IndexKeyBase::default(), order, tt, 100)
.await
.unwrap();
(tx, t)
}
async fn finish(mut tx: Transaction, mut t: Terms) {
t.finish(&mut tx).await.unwrap();
async fn finish(tx: Transaction, mut t: Terms) {
t.finish(&tx).await.unwrap();
tx.commit().await.unwrap();
}
@ -279,43 +279,43 @@ mod tests {
// Resolve a first term
{
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
assert_eq!(t.resolve_term_id(&mut tx, "C").await.unwrap(), 0);
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
assert_eq!(t.resolve_term_id(&tx, "C").await.unwrap(), 0);
finish(tx, t).await;
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 1);
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 1);
}
// Resolve a second term
{
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
assert_eq!(t.resolve_term_id(&mut tx, "D").await.unwrap(), 1);
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
assert_eq!(t.resolve_term_id(&tx, "D").await.unwrap(), 1);
finish(tx, t).await;
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 2);
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 2);
}
// Resolve two existing terms with new frequencies
{
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
assert_eq!(t.resolve_term_id(&mut tx, "C").await.unwrap(), 0);
assert_eq!(t.resolve_term_id(&mut tx, "D").await.unwrap(), 1);
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
assert_eq!(t.resolve_term_id(&tx, "C").await.unwrap(), 0);
assert_eq!(t.resolve_term_id(&tx, "D").await.unwrap(), 1);
finish(tx, t).await;
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 2);
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 2);
}
// Resolve one existing terms and two new terms
{
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
assert_eq!(t.resolve_term_id(&mut tx, "A").await.unwrap(), 2);
assert_eq!(t.resolve_term_id(&mut tx, "C").await.unwrap(), 0);
assert_eq!(t.resolve_term_id(&mut tx, "E").await.unwrap(), 3);
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
assert_eq!(t.resolve_term_id(&tx, "A").await.unwrap(), 2);
assert_eq!(t.resolve_term_id(&tx, "C").await.unwrap(), 0);
assert_eq!(t.resolve_term_id(&tx, "E").await.unwrap(), 3);
finish(tx, t).await;
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 4);
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 4);
}
}
@ -326,38 +326,38 @@ mod tests {
let ds = Datastore::new("memory").await.unwrap();
{
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
// Check removing an non-existing term id returns None
assert!(t.remove_term_id(&mut tx, 0).await.is_ok());
assert!(t.remove_term_id(&tx, 0).await.is_ok());
// Create few terms
t.resolve_term_id(&mut tx, "A").await.unwrap();
t.resolve_term_id(&mut tx, "C").await.unwrap();
t.resolve_term_id(&mut tx, "E").await.unwrap();
t.resolve_term_id(&tx, "A").await.unwrap();
t.resolve_term_id(&tx, "C").await.unwrap();
t.resolve_term_id(&tx, "E").await.unwrap();
finish(tx, t).await;
}
for term in ["A", "C", "E"] {
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
let term_id = t.get_term_id(&mut tx, term).await.unwrap();
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
let term_id = t.get_term_id(&tx, term).await.unwrap();
if let Some(term_id) = term_id {
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
t.remove_term_id(&mut tx, term_id).await.unwrap();
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
t.remove_term_id(&tx, term_id).await.unwrap();
finish(tx, t).await;
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
assert_eq!(t.get_term_id(&mut tx, term).await.unwrap(), None);
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
assert_eq!(t.get_term_id(&tx, term).await.unwrap(), None);
} else {
panic!("Term ID not found: {}", term);
}
}
// Check id recycling
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
assert_eq!(t.resolve_term_id(&mut tx, "B").await.unwrap(), 0);
assert_eq!(t.resolve_term_id(&mut tx, "D").await.unwrap(), 1);
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
assert_eq!(t.resolve_term_id(&tx, "B").await.unwrap(), 0);
assert_eq!(t.resolve_term_id(&tx, "D").await.unwrap(), 1);
finish(tx, t).await;
}
@ -375,10 +375,10 @@ mod tests {
async fn test_resolve_100_docs_with_50_words_one_by_one() {
let ds = Datastore::new("memory").await.unwrap();
for _ in 0..100 {
let (mut tx, mut t) = new_operation(&ds, 100, Write).await;
let (tx, mut t) = new_operation(&ds, 100, Write).await;
let terms_string = random_term_freq_vec(50);
for (term, _) in terms_string {
t.resolve_term_id(&mut tx, &term).await.unwrap();
t.resolve_term_id(&tx, &term).await.unwrap();
}
finish(tx, t).await;
}
@ -388,11 +388,11 @@ mod tests {
async fn test_resolve_100_docs_with_50_words_batch_of_10() {
let ds = Datastore::new("memory").await.unwrap();
for _ in 0..10 {
let (mut tx, mut t) = new_operation(&ds, 100, Write).await;
let (tx, mut t) = new_operation(&ds, 100, Write).await;
for _ in 0..10 {
let terms_string = random_term_freq_vec(50);
for (term, _) in terms_string {
t.resolve_term_id(&mut tx, &term).await.unwrap();
t.resolve_term_id(&tx, &term).await.unwrap();
}
}
finish(tx, t).await;

View file

@ -144,13 +144,12 @@ impl<'a> MTreeChecker<'a> {
return Ok(VecDeque::from([]));
}
let mut result = VecDeque::with_capacity(res.len());
let mut tx = self.ctx.tx_lock().await;
let txn = self.ctx.tx();
for (doc_id, dist) in res {
if let Some(key) = doc_ids.get_doc_key(&mut tx, doc_id).await? {
if let Some(key) = doc_ids.get_doc_key(&txn, doc_id).await? {
result.push_back((key.into(), dist, None));
}
}
drop(tx);
Ok(result)
}
}
@ -186,9 +185,8 @@ impl CheckerCacheEntry {
cond: &Cond,
) -> Result<Self, Error> {
if let Some(rid) = rid {
let mut tx = ctx.tx_lock().await;
let val = Iterable::fetch_thing(&mut tx, opt, &rid).await?;
drop(tx);
let txn = ctx.tx();
let val = Iterable::fetch_thing(&txn, opt, &rid).await?;
if !val.is_none_or_null() {
let (value, truthy) = {
let cursor_doc = CursorDoc {
@ -229,9 +227,8 @@ impl<'a> MTreeCondChecker<'a> {
match self.cache.entry(doc_id) {
Entry::Occupied(e) => Ok(e.get().truthy),
Entry::Vacant(e) => {
let mut tx = self.ctx.tx_lock().await;
let rid = doc_ids.get_doc_key(&mut tx, doc_id).await?.map(|k| k.into());
drop(tx);
let txn = self.ctx.tx();
let rid = doc_ids.get_doc_key(&txn, doc_id).await?.map(|k| k.into());
let ent =
CheckerCacheEntry::build(stk, self.ctx, self.opt, rid, self.cond.as_ref())
.await?;

View file

@ -175,10 +175,10 @@ impl InnerQueryExecutor {
}
Entry::Vacant(e) => {
let ikb = IndexKeyBase::new(opt.ns()?, opt.db()?, idx_def)?;
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
let mt = MTreeIndex::new(
ctx.get_index_stores(),
&mut tx,
&tx,
ikb,
p,
TransactionType::Read,
@ -563,11 +563,10 @@ impl QueryExecutor {
ft: &FtEntry,
) -> Result<bool, Error> {
let doc_key: Key = thg.into();
let mut run = ctx.tx_lock().await;
let tx = ctx.tx();
let di = ft.0.doc_ids.read().await;
let doc_id = di.get_doc_id(&mut run, doc_key).await?;
let doc_id = di.get_doc_id(&tx, doc_key).await?;
drop(di);
drop(run);
if let Some(doc_id) = doc_id {
let term_goals = ft.0.terms_docs.len();
// If there is no terms, it can't be a match
@ -640,18 +639,10 @@ impl QueryExecutor {
doc: &Value,
) -> Result<Value, Error> {
if let Some((e, ft)) = self.get_ft_entry_and_index(hlp.match_ref()) {
let mut run = ctx.tx_lock().await;
let tx = ctx.tx();
let res = ft
.highlight(
&mut run,
thg,
&e.0.query_terms_list,
hlp,
e.0.index_option.id_ref(),
doc,
)
.highlight(&tx, thg, &e.0.query_terms_list, hlp, e.0.index_option.id_ref(), doc)
.await;
drop(run);
return res;
}
Ok(Value::None)
@ -665,9 +656,8 @@ impl QueryExecutor {
partial: bool,
) -> Result<Value, Error> {
if let Some((e, ft)) = self.get_ft_entry_and_index(&match_ref) {
let mut run = ctx.tx_lock().await;
let res = ft.extract_offsets(&mut run, thg, &e.0.query_terms_list, partial).await;
drop(run);
let tx = ctx.tx();
let res = ft.extract_offsets(&tx, thg, &e.0.query_terms_list, partial).await;
return res;
}
Ok(Value::None)
@ -682,7 +672,7 @@ impl QueryExecutor {
) -> Result<Value, Error> {
if let Some(e) = self.get_ft_entry(match_ref) {
if let Some(scorer) = &e.0.scorer {
let mut run = ctx.tx_lock().await;
let tx = ctx.tx();
let mut doc_id = if let Some(ir) = ir {
ir.doc_id()
} else {
@ -691,17 +681,15 @@ impl QueryExecutor {
if doc_id.is_none() {
let key: Key = rid.into();
let di = e.0.doc_ids.read().await;
doc_id = di.get_doc_id(&mut run, key).await?;
doc_id = di.get_doc_id(&tx, key).await?;
drop(di);
}
if let Some(doc_id) = doc_id {
let score = scorer.score(&mut run, doc_id).await?;
let score = scorer.score(&tx, doc_id).await?;
if let Some(score) = score {
drop(run);
return Ok(Value::from(score));
}
}
drop(run);
}
}
Ok(Value::None)
@ -733,8 +721,8 @@ impl FtEntry {
if let Matches(qs, _) = io.op() {
let (terms_list, terms_set) =
ft.extract_querying_terms(stk, ctx, opt, qs.to_owned()).await?;
let mut tx = ctx.tx_lock().await;
let terms_docs = Arc::new(ft.get_terms_docs(&mut tx, &terms_list).await?);
let tx = ctx.tx();
let terms_docs = Arc::new(ft.get_terms_docs(&tx, &terms_list).await?);
drop(tx);
Ok(Some(Self(Arc::new(Inner {
index_option: io,

View file

@ -6,8 +6,8 @@ use crate::idx::ft::termdocs::TermsDocs;
use crate::idx::ft::{FtIndex, HitsIterator};
use crate::idx::planner::plan::RangeValue;
use crate::key::index::Index;
use crate::kvs;
use crate::kvs::{Key, Limit, ScanPage};
use crate::kvs::Key;
use crate::kvs::Transaction;
use crate::sql::statements::DefineIndexStatement;
use crate::sql::{Array, Ident, Thing, Value};
use radix_trie::Trie;
@ -118,20 +118,20 @@ impl ThingIterator {
pub(crate) async fn next_batch<B: IteratorBatch>(
&mut self,
ctx: &Context<'_>,
tx: &mut kvs::Transaction,
txn: &Transaction,
size: u32,
) -> Result<B, Error> {
match self {
Self::IndexEqual(i) => i.next_batch(tx, size).await,
Self::UniqueEqual(i) => i.next_batch(tx).await,
Self::IndexRange(i) => i.next_batch(tx, size).await,
Self::UniqueRange(i) => i.next_batch(tx, size).await,
Self::IndexUnion(i) => i.next_batch(ctx, tx, size).await,
Self::UniqueUnion(i) => i.next_batch(ctx, tx, size).await,
Self::Matches(i) => i.next_batch(ctx, tx, size).await,
Self::IndexEqual(i) => i.next_batch(txn, size).await,
Self::UniqueEqual(i) => i.next_batch(txn).await,
Self::IndexRange(i) => i.next_batch(txn, size).await,
Self::UniqueRange(i) => i.next_batch(txn, size).await,
Self::IndexUnion(i) => i.next_batch(ctx, txn, size).await,
Self::UniqueUnion(i) => i.next_batch(ctx, txn, size).await,
Self::Matches(i) => i.next_batch(ctx, txn, size).await,
Self::Knn(i) => i.next_batch(ctx, size).await,
Self::IndexJoin(i) => Box::pin(i.next_batch(ctx, tx, size)).await,
Self::UniqueJoin(i) => Box::pin(i.next_batch(ctx, tx, size)).await,
Self::IndexJoin(i) => Box::pin(i.next_batch(ctx, txn, size)).await,
Self::UniqueJoin(i) => Box::pin(i.next_batch(ctx, txn, size)).await,
}
}
}
@ -164,7 +164,7 @@ impl IndexEqualThingIterator {
}
async fn next_scan<B: IteratorBatch>(
tx: &mut kvs::Transaction,
tx: &Transaction,
irf: IteratorRef,
beg: &mut Vec<u8>,
end: &[u8],
@ -172,16 +172,7 @@ impl IndexEqualThingIterator {
) -> Result<B, Error> {
let min = beg.clone();
let max = end.to_owned();
let res = tx
.scan_paged(
ScanPage {
range: min..max,
limit: Limit::Limited(limit),
},
limit,
)
.await?;
let res = res.values;
let res = tx.scan(min..max, limit).await?;
if let Some((key, _)) = res.last() {
let mut key = key.clone();
key.push(0x00);
@ -194,7 +185,7 @@ impl IndexEqualThingIterator {
async fn next_batch<B: IteratorBatch>(
&mut self,
tx: &mut kvs::Transaction,
tx: &Transaction,
limit: u32,
) -> Result<B, Error> {
Self::next_scan(tx, self.irf, &mut self.beg, &self.end, limit).await
@ -306,21 +297,12 @@ impl IndexRangeThingIterator {
async fn next_batch<B: IteratorBatch>(
&mut self,
tx: &mut kvs::Transaction,
tx: &Transaction,
limit: u32,
) -> Result<B, Error> {
let min = self.r.beg.clone();
let max = self.r.end.clone();
let res = tx
.scan_paged(
ScanPage {
range: min..max,
limit: Limit::Limited(limit),
},
limit,
)
.await?;
let res = res.values;
let res = tx.scan(min..max, limit).await?;
if let Some((key, _)) = res.last() {
self.r.beg.clone_from(key);
self.r.beg.push(0x00);
@ -369,7 +351,7 @@ impl IndexUnionThingIterator {
async fn next_batch<B: IteratorBatch>(
&mut self,
ctx: &Context<'_>,
tx: &mut kvs::Transaction,
tx: &Transaction,
limit: u32,
) -> Result<B, Error> {
while let Some(r) = &mut self.current {
@ -423,7 +405,7 @@ impl JoinThingIterator {
async fn next_current_remote_batch(
&mut self,
ctx: &Context<'_>,
tx: &mut kvs::Transaction,
tx: &Transaction,
limit: u32,
) -> Result<bool, Error> {
while !ctx.is_done() {
@ -444,7 +426,7 @@ impl JoinThingIterator {
async fn next_current_local<F>(
&mut self,
ctx: &Context<'_>,
tx: &mut kvs::Transaction,
tx: &Transaction,
limit: u32,
new_iter: F,
) -> Result<bool, Error>
@ -471,7 +453,7 @@ impl JoinThingIterator {
async fn next_batch<F, B: IteratorBatch>(
&mut self,
ctx: &Context<'_>,
tx: &mut kvs::Transaction,
tx: &Transaction,
limit: u32,
new_iter: F,
) -> Result<B, Error>
@ -508,7 +490,7 @@ impl IndexJoinThingIterator {
async fn next_batch<B: IteratorBatch>(
&mut self,
ctx: &Context<'_>,
tx: &mut kvs::Transaction,
tx: &Transaction,
limit: u32,
) -> Result<B, Error> {
let new_iter = |ns: &str, db: &str, ix_what: &Ident, ix_name: &Ident, value: Value| {
@ -541,10 +523,7 @@ impl UniqueEqualThingIterator {
}
}
async fn next_batch<B: IteratorBatch>(
&mut self,
tx: &mut kvs::Transaction,
) -> Result<B, Error> {
async fn next_batch<B: IteratorBatch>(&mut self, tx: &Transaction) -> Result<B, Error> {
if let Some(key) = self.key.take() {
if let Some(val) = tx.get(key).await? {
let record = (val.into(), self.irf.into(), None);
@ -612,7 +591,7 @@ impl UniqueRangeThingIterator {
async fn next_batch<B: IteratorBatch>(
&mut self,
tx: &mut kvs::Transaction,
tx: &Transaction,
mut limit: u32,
) -> Result<B, Error> {
if self.done {
@ -621,17 +600,9 @@ impl UniqueRangeThingIterator {
let min = self.r.beg.clone();
let max = self.r.end.clone();
limit += 1;
let res = tx
.scan_paged(
ScanPage {
range: min..max,
limit: Limit::Limited(limit),
},
limit,
)
.await?;
let mut records = B::with_capacity(res.values.len());
for (k, v) in res.values {
let res = tx.scan(min..max, limit).await?;
let mut records = B::with_capacity(res.len());
for (k, v) in res {
limit -= 1;
if limit == 0 {
self.r.beg = k;
@ -682,7 +653,7 @@ impl UniqueUnionThingIterator {
async fn next_batch<B: IteratorBatch>(
&mut self,
ctx: &Context<'_>,
tx: &mut kvs::Transaction,
tx: &Transaction,
limit: u32,
) -> Result<B, Error> {
let limit = limit as usize;
@ -717,7 +688,7 @@ impl UniqueJoinThingIterator {
async fn next_batch<B: IteratorBatch>(
&mut self,
ctx: &Context<'_>,
tx: &mut kvs::Transaction,
tx: &Transaction,
limit: u32,
) -> Result<B, Error> {
let new_iter = |ns: &str, db: &str, ix_what: &Ident, ix_name: &Ident, value: Value| {
@ -756,7 +727,7 @@ impl MatchesThingIterator {
async fn next_batch<B: IteratorBatch>(
&mut self,
ctx: &Context<'_>,
tx: &mut kvs::Transaction,
tx: &Transaction,
limit: u32,
) -> Result<B, Error> {
if let Some(hits) = &mut self.hits {

View file

@ -6,7 +6,7 @@ use crate::idx::planner::executor::{
};
use crate::idx::planner::plan::{IndexOperator, IndexOption};
use crate::idx::planner::rewriter::KnnConditionRewriter;
use crate::kvs;
use crate::kvs::Transaction;
use crate::sql::index::Index;
use crate::sql::statements::{DefineFieldStatement, DefineIndexStatement};
use crate::sql::{
@ -115,7 +115,7 @@ impl<'a> TreeBuilder<'a> {
async fn lazy_load_schema_resolver(
&mut self,
tx: &mut kvs::Transaction,
tx: &Transaction,
table: &Table,
) -> Result<(), Error> {
if self.schemas.contains_key(table) {
@ -198,8 +198,8 @@ impl<'a> TreeBuilder<'a> {
}
async fn resolve_idiom(&mut self, i: &Idiom) -> Result<Node, Error> {
let mut tx = self.ctx.tx_lock().await;
self.lazy_load_schema_resolver(&mut tx, self.table).await?;
let tx = self.ctx.tx();
self.lazy_load_schema_resolver(&tx, self.table).await?;
// Try to detect if it matches an index
if let Some(schema) = self.schemas.get(self.table).cloned() {
@ -208,12 +208,10 @@ impl<'a> TreeBuilder<'a> {
return Ok(Node::IndexedField(i.clone(), irs));
}
// Try to detect an indexed record field
if let Some(ro) = self.resolve_record_field(&mut tx, schema.fields.as_ref(), i).await? {
drop(tx);
if let Some(ro) = self.resolve_record_field(&tx, schema.fields.as_ref(), i).await? {
return Ok(Node::RecordField(i.clone(), ro));
}
}
drop(tx);
Ok(Node::NonIndexedField(i.clone()))
}
@ -246,7 +244,7 @@ impl<'a> TreeBuilder<'a> {
async fn resolve_record_field(
&mut self,
tx: &mut kvs::Transaction,
tx: &Transaction,
fields: &[DefineFieldStatement],
idiom: &Idiom,
) -> Result<Option<RecordOptions>, Error> {
@ -544,7 +542,7 @@ struct SchemaCache {
}
impl SchemaCache {
async fn new(opt: &Options, table: &Table, tx: &mut kvs::Transaction) -> Result<Self, Error> {
async fn new(opt: &Options, table: &Table, tx: &Transaction) -> Result<Self, Error> {
let indexes = tx.all_tb_indexes(opt.ns()?, opt.db()?, table).await?;
let fields = tx.all_tb_fields(opt.ns()?, opt.db()?, table).await?;
Ok(Self {

View file

@ -287,7 +287,7 @@ where
pub async fn search(
&self,
tx: &mut Transaction,
tx: &Transaction,
store: &BTreeStore<BK>,
searched_key: &Key,
) -> Result<Option<Payload>, Error> {
@ -307,7 +307,7 @@ where
pub async fn search_mut(
&self,
tx: &mut Transaction,
tx: &Transaction,
store: &mut BTreeStore<BK>,
searched_key: &Key,
) -> Result<Option<Payload>, Error> {
@ -329,7 +329,7 @@ where
pub async fn insert(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
store: &mut BTreeStore<BK>,
key: Key,
payload: Payload,
@ -366,7 +366,7 @@ where
async fn insert_non_full(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
store: &mut BTreeStore<BK>,
node_id: NodeId,
key: Key,
@ -481,7 +481,7 @@ where
pub(in crate::idx) async fn delete(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
store: &mut BTreeStore<BK>,
key_to_delete: Key,
) -> Result<Option<Payload>, Error> {
@ -592,7 +592,7 @@ where
async fn deleted_from_internal(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
store: &mut BTreeStore<BK>,
keys: &mut BK,
children: &mut Vec<NodeId>,
@ -669,7 +669,7 @@ where
async fn find_highest(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
store: &mut BTreeStore<BK>,
node: StoredNode<BTreeNode<BK>>,
) -> Result<(Key, Payload), Error> {
@ -697,7 +697,7 @@ where
async fn find_lowest(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
store: &mut BTreeStore<BK>,
node: StoredNode<BTreeNode<BK>>,
) -> Result<(Key, Payload), Error> {
@ -725,7 +725,7 @@ where
async fn deleted_traversal(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
store: &mut BTreeStore<BK>,
keys: &mut BK,
children: &mut Vec<NodeId>,
@ -949,7 +949,7 @@ where
pub(in crate::idx) async fn statistics(
&self,
tx: &mut Transaction,
tx: &Transaction,
store: &BTreeStore<BK>,
) -> Result<BStatistics, Error> {
let mut stats = BStatistics::default();
@ -998,7 +998,7 @@ mod tests {
};
use crate::idx::trees::store::{NodeId, TreeNode, TreeNodeProvider};
use crate::idx::VersionedSerdeState;
use crate::kvs::{Datastore, Key, LockType::*, ScanPage, Transaction, TransactionType};
use crate::kvs::{Datastore, Key, LockType::*, Transaction, TransactionType};
use rand::prelude::SliceRandom;
use rand::thread_rng;
use std::cmp::Ordering;
@ -1034,7 +1034,7 @@ mod tests {
}
async fn insertions_test<F, BK>(
mut tx: Transaction,
tx: Transaction,
mut st: BTreeStore<BK>,
t: &mut BTree<BK>,
samples_size: usize,
@ -1046,14 +1046,14 @@ mod tests {
for i in 0..samples_size {
let (key, payload) = sample_provider(i);
// Insert the sample
t.insert(&mut tx, &mut st, key, payload).await.unwrap();
t.insert(&tx, &mut st, key, payload).await.unwrap();
}
st.finish(&mut tx).await.unwrap();
st.finish(&tx).await.unwrap();
tx.commit().await.unwrap();
}
async fn check_insertions<F, BK>(
mut tx: Transaction,
tx: Transaction,
st: BTreeStore<BK>,
t: &mut BTree<BK>,
samples_size: usize,
@ -1064,7 +1064,7 @@ mod tests {
{
for i in 0..samples_size {
let (key, payload) = sample_provider(i);
assert_eq!(t.search(&mut tx, &st, &key).await.unwrap(), Some(payload));
assert_eq!(t.search(&tx, &st, &key).await.unwrap(), Some(payload));
}
tx.cancel().await.unwrap();
}
@ -1124,9 +1124,9 @@ mod tests {
}
{
let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
assert_eq!(
t.statistics(&mut tx, &st).await.unwrap(),
t.statistics(&tx, &st).await.unwrap(),
BStatistics {
keys_count: 100,
max_depth: 3,
@ -1154,9 +1154,9 @@ mod tests {
}
{
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
assert_eq!(
t.statistics(&mut tx, &st).await.unwrap(),
t.statistics(&tx, &st).await.unwrap(),
BStatistics {
keys_count: 100,
max_depth: 3,
@ -1188,8 +1188,8 @@ mod tests {
}
{
let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
let s = t.statistics(&mut tx, &st).await.unwrap();
let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
let s = t.statistics(&tx, &st).await.unwrap();
assert_eq!(s.keys_count, 100);
tx.cancel().await.unwrap();
}
@ -1215,8 +1215,8 @@ mod tests {
}
{
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let s = t.statistics(&mut tx, &st).await.unwrap();
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let s = t.statistics(&tx, &st).await.unwrap();
assert_eq!(s.keys_count, 100);
tx.cancel().await.unwrap();
}
@ -1238,9 +1238,9 @@ mod tests {
}
{
let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
assert_eq!(
t.statistics(&mut tx, &st).await.unwrap(),
t.statistics(&tx, &st).await.unwrap(),
BStatistics {
keys_count: 10000,
max_depth: 3,
@ -1267,9 +1267,9 @@ mod tests {
}
{
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, cache_size).await;
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, cache_size).await;
assert_eq!(
t.statistics(&mut tx, &st).await.unwrap(),
t.statistics(&tx, &st).await.unwrap(),
BStatistics {
keys_count: 10000,
max_depth: 3,
@ -1309,8 +1309,8 @@ mod tests {
.await;
}
let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
let statistics = t.statistics(&mut tx, &st).await.unwrap();
let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
let statistics = t.statistics(&tx, &st).await.unwrap();
tx.cancel().await.unwrap();
statistics
}
@ -1327,8 +1327,8 @@ mod tests {
.await;
}
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let statistics = t.statistics(&mut tx, &st).await.unwrap();
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let statistics = t.statistics(&tx, &st).await.unwrap();
tx.cancel().await.unwrap();
statistics
@ -1417,28 +1417,25 @@ mod tests {
let mut t = BTree::<TrieKeys>::new(BState::new(3));
{
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
for (key, payload) in CLRS_EXAMPLE {
t.insert(&mut tx, &mut st, key.into(), payload).await.unwrap();
t.insert(&tx, &mut st, key.into(), payload).await.unwrap();
}
st.finish(&mut tx).await.unwrap();
st.finish(&tx).await.unwrap();
tx.commit().await.unwrap();
}
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let s = t.statistics(&mut tx, &st).await.unwrap();
let s = t.statistics(&tx, &st).await.unwrap();
assert_eq!(s.keys_count, 23);
assert_eq!(s.max_depth, 3);
assert_eq!(s.nodes_count, 10);
// There should be one record per node
assert_eq!(
10,
tx.scan_paged(ScanPage::from(vec![]..vec![0xf]), 100).await.unwrap().values.len()
);
assert_eq!(10, tx.scan(vec![]..vec![0xf], 100).await.unwrap().len());
let nodes_count = t
.inspect_nodes(&mut tx, &mut st, |count, depth, node_id, node| match count {
.inspect_nodes(&tx, &mut st, |count, depth, node_id, node| match count {
0 => {
assert_eq!(depth, 1);
assert_eq!(node_id, 7);
@ -1504,14 +1501,14 @@ mod tests {
async fn check_finish_commit<BK>(
t: &mut BTree<BK>,
mut st: BTreeStore<BK>,
mut tx: Transaction,
tx: Transaction,
mut gen: u64,
info: String,
) -> Result<u64, Error>
where
BK: BKeys + Clone + Debug,
{
if st.finish(&mut tx).await?.is_some() {
if st.finish(&tx).await?.is_some() {
t.state.generation += 1;
}
gen += 1;
@ -1527,9 +1524,9 @@ mod tests {
let mut t = BTree::<TrieKeys>::new(BState::new(3));
let mut check_generation = 0;
{
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
for (key, payload) in CLRS_EXAMPLE {
t.insert(&mut tx, &mut st, key.into(), payload).await?;
t.insert(&tx, &mut st, key.into(), payload).await?;
}
check_generation = check_finish_commit(
&mut t,
@ -1545,10 +1542,10 @@ mod tests {
let mut key_count = CLRS_EXAMPLE.len() as u64;
for (key, payload) in [("f", 6), ("m", 13), ("g", 7), ("d", 4), ("b", 2)] {
{
let (mut tx, mut st) =
let (tx, mut st) =
new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
debug!("Delete {}", key);
assert_eq!(t.delete(&mut tx, &mut st, key.into()).await?, Some(payload));
assert_eq!(t.delete(&tx, &mut st, key.into()).await?, Some(payload));
check_generation = check_finish_commit(
&mut t,
st,
@ -1560,27 +1557,24 @@ mod tests {
}
key_count -= 1;
{
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let s = t.statistics(&mut tx, &st).await?;
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let s = t.statistics(&tx, &st).await?;
assert_eq!(s.keys_count, key_count);
}
}
}
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let s = t.statistics(&mut tx, &st).await.unwrap();
let s = t.statistics(&tx, &st).await.unwrap();
assert_eq!(s.keys_count, 18);
assert_eq!(s.max_depth, 2);
assert_eq!(s.nodes_count, 7);
// There should be one record per node
assert_eq!(
7,
tx.scan_paged(ScanPage::from(vec![]..vec![0xf]), 100).await.unwrap().values.len()
);
assert_eq!(7, tx.scan(vec![]..vec![0xf], 100).await.unwrap().len());
let nodes_count = t
.inspect_nodes(&mut tx, &mut st, |count, depth, node_id, node| match count {
.inspect_nodes(&tx, &mut st, |count, depth, node_id, node| match count {
0 => {
assert_eq!(depth, 1);
assert_eq!(node_id, 1);
@ -1639,11 +1633,11 @@ mod tests {
let mut check_generation = 0;
{
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
for (key, payload) in CLRS_EXAMPLE {
expected_keys.insert(key.to_string(), payload);
t.insert(&mut tx, &mut st, key.into(), payload).await?;
let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?;
t.insert(&tx, &mut st, key.into(), payload).await?;
let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?;
assert_eq!(expected_keys, tree_keys);
}
check_generation = check_finish_commit(
@ -1657,8 +1651,8 @@ mod tests {
}
{
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
print_tree(&mut tx, &mut st, &t).await;
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
print_tree(&tx, &mut st, &t).await;
tx.cancel().await?;
}
@ -1666,11 +1660,10 @@ mod tests {
debug!("------------------------");
debug!("Delete {}", key);
{
let (mut tx, mut st) =
new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
assert!(t.delete(&mut tx, &mut st, key.into()).await?.is_some());
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
assert!(t.delete(&tx, &mut st, key.into()).await?.is_some());
expected_keys.remove(key);
let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?;
let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?;
assert_eq!(expected_keys, tree_keys);
check_generation = check_finish_commit(
&mut t,
@ -1684,10 +1677,10 @@ mod tests {
// Check that every expected keys are still found in the tree
{
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
for (key, payload) in &expected_keys {
assert_eq!(
t.search(&mut tx, &st, &key.as_str().into()).await?,
t.search(&tx, &st, &key.as_str().into()).await?,
Some(*payload),
"Can't find: {key}",
)
@ -1696,13 +1689,13 @@ mod tests {
}
}
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let s = t.statistics(&mut tx, &st).await?;
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
let s = t.statistics(&tx, &st).await?;
assert_eq!(s.keys_count, 0);
assert_eq!(s.max_depth, 0);
assert_eq!(s.nodes_count, 0);
// There should not be any record in the database
assert_eq!(0, tx.scan_paged(ScanPage::from(vec![]..vec![0xf]), 100).await?.values.len());
assert_eq!(0, tx.scan(vec![]..vec![0xf], 100).await.unwrap().len());
tx.cancel().await?;
Ok(())
}
@ -1829,37 +1822,37 @@ mod tests {
];
let mut keys = BTreeMap::new();
{
let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
for term in terms {
t.insert(&mut tx, &mut st, term.into(), 0).await?;
t.insert(&tx, &mut st, term.into(), 0).await?;
keys.insert(term.to_string(), 0);
let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?;
let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?;
assert_eq!(keys, tree_keys);
}
st.finish(&mut tx).await?;
st.finish(&tx).await?;
tx.commit().await?;
}
{
let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Read, 100).await;
print_tree(&mut tx, &mut st, &t).await;
let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Read, 100).await;
print_tree(&tx, &mut st, &t).await;
}
{
let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
for term in terms {
debug!("Delete {term}");
t.delete(&mut tx, &mut st, term.into()).await?;
print_tree_mut(&mut tx, &mut st, &t).await;
t.delete(&tx, &mut st, term.into()).await?;
print_tree_mut(&tx, &mut st, &t).await;
keys.remove(term);
let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?;
let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?;
assert_eq!(keys, tree_keys);
}
st.finish(&mut tx).await?;
st.finish(&tx).await?;
tx.commit().await?;
}
{
let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
assert_eq!(check_btree_properties(&t, &mut tx, &mut st).await?.0, 0);
st.finish(&mut tx).await?;
let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
assert_eq!(check_btree_properties(&t, &tx, &mut st).await?.0, 0);
st.finish(&tx).await?;
tx.cancel().await?;
}
Ok(())
@ -1867,7 +1860,7 @@ mod tests {
async fn check_btree_properties<BK>(
t: &BTree<BK>,
tx: &mut Transaction,
tx: &Transaction,
st: &mut BTreeStore<BK>,
) -> Result<(usize, BTreeMap<String, Payload>), Error>
where
@ -1919,7 +1912,7 @@ mod tests {
}
}
async fn print_tree<BK>(tx: &mut Transaction, st: &mut BTreeStore<BK>, t: &BTree<BK>)
async fn print_tree<BK>(tx: &Transaction, st: &mut BTreeStore<BK>, t: &BTree<BK>)
where
BK: BKeys + Debug + Clone,
{
@ -1932,7 +1925,7 @@ mod tests {
debug!("----------------------------------");
}
async fn print_tree_mut<BK>(tx: &mut Transaction, st: &mut BTreeStore<BK>, t: &BTree<BK>)
async fn print_tree_mut<BK>(tx: &Transaction, st: &mut BTreeStore<BK>, t: &BTree<BK>)
where
BK: BKeys + Debug + Clone,
{
@ -1967,7 +1960,7 @@ mod tests {
/// This is for debugging
async fn inspect_nodes<F>(
&self,
tx: &mut Transaction,
tx: &Transaction,
st: &mut BTreeStore<BK>,
inspect_func: F,
) -> Result<usize, Error>
@ -1996,7 +1989,7 @@ mod tests {
/// This is for debugging
async fn inspect_nodes_mut<F>(
&self,
tx: &mut Transaction,
tx: &Transaction,
st: &mut BTreeStore<BK>,
mut inspect_func: F,
) -> Result<usize, Error>

View file

@ -48,16 +48,16 @@ struct MTreeSearchContext<'a> {
impl MTreeIndex {
pub async fn new(
ixs: &IndexStores,
tx: &mut Transaction,
txn: &Transaction,
ikb: IndexKeyBase,
p: &MTreeParams,
tt: TransactionType,
) -> Result<Self, Error> {
let doc_ids = Arc::new(RwLock::new(
DocIds::new(ixs, tx, tt, ikb.clone(), p.doc_ids_order, p.doc_ids_cache).await?,
DocIds::new(ixs, txn, tt, ikb.clone(), p.doc_ids_order, p.doc_ids_cache).await?,
));
let state_key = ikb.new_vm_key(None);
let state: MState = if let Some(val) = tx.get(state_key.clone()).await? {
let state: MState = if let Some(val) = txn.get(state_key.clone()).await? {
MState::try_from_val(val)?
} else {
MState::new(p.capacity)
@ -81,16 +81,17 @@ impl MTreeIndex {
store,
})
}
pub async fn index_document(
&mut self,
stk: &mut Stk,
tx: &mut Transaction,
txn: &Transaction,
rid: &Thing,
content: &Vec<Value>,
) -> Result<(), Error> {
// Resolve the doc_id
let mut doc_ids = self.doc_ids.write().await;
let resolved = doc_ids.resolve_doc_id(tx, rid.into()).await?;
let resolved = doc_ids.resolve_doc_id(txn, rid.into()).await?;
let doc_id = *resolved.doc_id();
drop(doc_ids);
// Index the values
@ -100,12 +101,37 @@ impl MTreeIndex {
let vector = Vector::try_from_value(self.vector_type, self.dim, v)?;
vector.check_dimension(self.dim)?;
// Insert the vector in the index
mtree.insert(stk, tx, &mut self.store, vector.into(), doc_id).await?;
mtree.insert(stk, txn, &mut self.store, vector.into(), doc_id).await?;
}
drop(mtree);
Ok(())
}
pub async fn remove_document(
&mut self,
stk: &mut Stk,
txn: &Transaction,
rid: &Thing,
content: &Vec<Value>,
) -> Result<(), Error> {
let mut doc_ids = self.doc_ids.write().await;
let doc_id = doc_ids.remove_doc(txn, rid.into()).await?;
drop(doc_ids);
if let Some(doc_id) = doc_id {
// Lock the index
let mut mtree = self.mtree.write().await;
for v in content {
// Extract the vector
let vector = Vector::try_from_value(self.vector_type, self.dim, v)?;
vector.check_dimension(self.dim)?;
// Remove the vector
mtree.delete(stk, txn, &mut self.store, vector.into(), doc_id).await?;
}
drop(mtree);
}
Ok(())
}
pub async fn knn_search(
&self,
stk: &mut Stk,
@ -136,38 +162,13 @@ impl MTreeIndex {
res
}
pub async fn remove_document(
&mut self,
stk: &mut Stk,
tx: &mut Transaction,
rid: &Thing,
content: &Vec<Value>,
) -> Result<(), Error> {
let mut doc_ids = self.doc_ids.write().await;
let doc_id = doc_ids.remove_doc(tx, rid.into()).await?;
drop(doc_ids);
if let Some(doc_id) = doc_id {
// Lock the index
let mut mtree = self.mtree.write().await;
for v in content {
// Extract the vector
let vector = Vector::try_from_value(self.vector_type, self.dim, v)?;
vector.check_dimension(self.dim)?;
// Remove the vector
mtree.delete(stk, tx, &mut self.store, vector.into(), doc_id).await?;
}
drop(mtree);
}
Ok(())
}
pub(crate) async fn statistics(&self, tx: &mut Transaction) -> Result<MtStatistics, Error> {
pub(crate) async fn statistics(&self, tx: &Transaction) -> Result<MtStatistics, Error> {
Ok(MtStatistics {
doc_ids: self.doc_ids.read().await.statistics(tx).await?,
})
}
pub async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> {
pub async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> {
let mut doc_ids = self.doc_ids.write().await;
doc_ids.finish(tx).await?;
drop(doc_ids);
@ -296,7 +297,7 @@ impl MTree {
async fn insert(
&mut self,
stk: &mut Stk,
tx: &mut Transaction,
tx: &Transaction,
store: &mut MTreeStore,
obj: SharedVector,
id: DocId,
@ -368,7 +369,7 @@ impl MTree {
async fn append(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
store: &mut MTreeStore,
object: &SharedVector,
id: DocId,
@ -406,7 +407,7 @@ impl MTree {
async fn insert_at_node(
&mut self,
stk: &mut Stk,
tx: &mut Transaction,
tx: &Transaction,
store: &mut MTreeStore,
node: MStoredNode,
parent_center: &Option<SharedVector>,
@ -442,7 +443,7 @@ impl MTree {
async fn insert_node_internal(
&mut self,
stk: &mut Stk,
tx: &mut Transaction,
tx: &Transaction,
store: &mut MTreeStore,
node_id: NodeId,
node_key: Key,
@ -749,7 +750,7 @@ impl MTree {
async fn delete(
&mut self,
stk: &mut Stk,
tx: &mut Transaction,
tx: &Transaction,
store: &mut MTreeStore,
object: SharedVector,
doc_id: DocId,
@ -795,7 +796,7 @@ impl MTree {
async fn delete_at_node(
&mut self,
stk: &mut Stk,
tx: &mut Transaction,
tx: &Transaction,
store: &mut MTreeStore,
node: MStoredNode,
parent_center: &Option<SharedVector>,
@ -844,7 +845,7 @@ impl MTree {
async fn delete_node_internal(
&mut self,
stk: &mut Stk,
tx: &mut Transaction,
tx: &Transaction,
store: &mut MTreeStore,
node_id: NodeId,
node_key: Key,
@ -975,7 +976,7 @@ impl MTree {
#[allow(clippy::too_many_arguments)]
async fn deletion_underflown(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
store: &mut MTreeStore,
parent_center: &Option<SharedVector>,
n_node: &mut InternalNode,
@ -1471,16 +1472,9 @@ impl VersionedSerdeState for MState {}
#[cfg(test)]
mod tests {
use futures::lock::Mutex;
use hashbrown::{HashMap, HashSet};
use reblessive::tree::Stk;
use std::collections::VecDeque;
use std::sync::Arc;
use crate::ctx::Context;
use crate::err::Error;
use test_log::test;
use crate::idx::docids::{DocId, DocIds};
use crate::idx::planner::checker::MTreeConditionChecker;
use crate::idx::trees::knn::tests::TestCollection;
@ -1492,6 +1486,10 @@ mod tests {
use crate::kvs::Transaction;
use crate::kvs::{Datastore, TransactionType};
use crate::sql::index::{Distance, VectorType};
use hashbrown::{HashMap, HashSet};
use reblessive::tree::Stk;
use std::collections::VecDeque;
use test_log::test;
async fn new_operation<'a>(
ds: &Datastore,
@ -1503,15 +1501,15 @@ mod tests {
.index_store()
.get_store_mtree(TreeNodeProvider::Debug, t.state.generation, tt, cache_size)
.await;
let tx = Arc::new(Mutex::new(ds.transaction(tt, Optimistic).await.unwrap()));
let ctx = Context::default().set_transaction(tx);
let tx = ds.transaction(tt, Optimistic).await.unwrap().enclose();
let ctx = Context::default().with_transaction(tx);
(ctx, st)
}
async fn finish_operation(
ds: &Datastore,
t: &mut MTree,
tx: &mut Transaction,
tx: &Transaction,
mut st: TreeStore<MTreeNode>,
commit: bool,
) -> Result<(), Error> {
@ -1540,18 +1538,16 @@ mod tests {
for (doc_id, obj) in collection.to_vec_ref() {
{
let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await;
let mut tx = ctx.tx_lock().await;
t.insert(stk, &mut tx, &mut st, obj.clone(), *doc_id).await?;
finish_operation(ds, t, &mut tx, st, true).await?;
drop(tx);
let tx = ctx.tx();
t.insert(stk, &tx, &mut st, obj.clone(), *doc_id).await?;
finish_operation(ds, t, &tx, st, true).await?;
map.insert(*doc_id, obj.clone());
}
c += 1;
{
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
let mut tx = ctx.tx_lock().await;
let p = check_tree_properties(&mut tx, &mut st, t).await?;
drop(tx);
let tx = ctx.tx();
let p = check_tree_properties(&tx, &mut st, t).await?;
assert_eq!(p.doc_count, c);
}
}
@ -1568,19 +1564,17 @@ mod tests {
let mut map = HashMap::with_capacity(collection.len());
{
let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await;
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
for (doc_id, obj) in collection.to_vec_ref() {
t.insert(stk, &mut tx, &mut st, obj.clone(), *doc_id).await?;
t.insert(stk, &tx, &mut st, obj.clone(), *doc_id).await?;
map.insert(*doc_id, obj.clone());
}
finish_operation(ds, t, &mut tx, st, true).await?;
drop(tx);
finish_operation(ds, t, &tx, st, true).await?;
}
{
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
let mut tx = ctx.tx_lock().await;
check_tree_properties(&mut tx, &mut st, t).await?;
drop(tx);
let tx = ctx.tx();
check_tree_properties(&tx, &mut st, t).await?;
}
Ok(map)
}
@ -1598,9 +1592,9 @@ mod tests {
let deleted = {
debug!("### Remove {} {:?}", doc_id, obj);
let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await;
let mut tx = ctx.tx_lock().await;
let deleted = t.delete(stk, &mut tx, &mut st, obj.clone(), *doc_id).await?;
finish_operation(ds, t, &mut tx, st, true).await?;
let tx = ctx.tx();
let deleted = t.delete(stk, &tx, &mut st, obj.clone(), *doc_id).await?;
finish_operation(ds, t, &tx, st, true).await?;
drop(tx);
deleted
};
@ -1627,16 +1621,16 @@ mod tests {
}
{
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
let mut tx = ctx.tx_lock().await;
check_tree_properties(&mut tx, &mut st, t).await?;
let tx = ctx.tx();
check_tree_properties(&tx, &mut st, t).await?;
drop(tx);
}
}
if all_deleted {
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
let mut tx = ctx.tx_lock().await;
check_tree_properties(&mut tx, &mut st, t).await?.check(0, 0, None, None, 0, 0);
let tx = ctx.tx();
check_tree_properties(&tx, &mut st, t).await?.check(0, 0, None, None, 0, 0);
drop(tx);
}
Ok(())
@ -1677,9 +1671,8 @@ mod tests {
if expected_len != res.docs.len() {
#[cfg(debug_assertions)]
debug!("{:?}", res.visited_nodes);
let mut tx = ctx.tx_lock().await;
check_tree_properties(&mut tx, &mut st, t).await?;
drop(tx);
let tx = ctx.tx();
check_tree_properties(&tx, &mut st, t).await?;
}
assert_eq!(
expected_len,
@ -1761,10 +1754,10 @@ mod tests {
let mut t = MTree::new(MState::new(*capacity), distance.clone());
let (ctx, _st) = new_operation(&ds, &t, TransactionType::Read, cache_size).await;
let mut tx = ctx.tx_lock().await;
let tx = ctx.tx();
let doc_ids = DocIds::new(
ds.index_store(),
&mut tx,
&tx,
TransactionType::Read,
IndexKeyBase::default(),
7,
@ -1772,7 +1765,6 @@ mod tests {
)
.await
.unwrap();
drop(tx);
let map = if collection.len() < 1000 {
insert_collection_one_by_one(stk, &ds, &mut t, &collection, cache_size).await?
@ -2078,7 +2070,7 @@ mod tests {
}
async fn check_tree_properties(
tx: &mut Transaction,
tx: &Transaction,
st: &mut MTreeStore,
t: &MTree,
) -> Result<CheckedProperties, Error> {

View file

@ -131,7 +131,7 @@ where
pub(super) async fn get_node(
&self,
tx: &mut Transaction,
tx: &Transaction,
node_id: NodeId,
) -> Result<Arc<StoredNode<N>>, Error> {
match self {
@ -208,7 +208,7 @@ where
async fn get_node(
&self,
tx: &mut Transaction,
tx: &Transaction,
node_id: NodeId,
) -> Result<Arc<StoredNode<N>>, Error> {
if let Some(n) = self.lru.get(node_id).await {
@ -260,7 +260,7 @@ where
pub(super) async fn get_node(
&self,
tx: &mut Transaction,
tx: &Transaction,
node_id: NodeId,
) -> Result<Arc<StoredNode<N>>, Error> {
match self.cache.entry(node_id) {

View file

@ -47,7 +47,7 @@ where
pub(in crate::idx) async fn get_node_mut(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
node_id: NodeId,
) -> Result<StoredNode<N>, Error> {
match self {
@ -58,7 +58,7 @@ where
pub(in crate::idx) async fn get_node(
&self,
tx: &mut Transaction,
tx: &Transaction,
node_id: NodeId,
) -> Result<Arc<StoredNode<N>>, Error> {
match self {
@ -74,10 +74,8 @@ where
) -> Result<Arc<StoredNode<N>>, Error> {
match self {
Self::Read(r) => {
let mut tx = ctx.tx_lock().await;
let n = r.get_node(&mut tx, node_id).await;
drop(tx);
n
let tx = ctx.tx();
r.get_node(&tx, node_id).await
}
_ => Err(Error::Unreachable("TreeStore::get_node_txn")),
}
@ -112,7 +110,7 @@ where
}
}
pub async fn finish(&mut self, tx: &mut Transaction) -> Result<Option<TreeCache<N>>, Error> {
pub async fn finish(&mut self, tx: &Transaction) -> Result<Option<TreeCache<N>>, Error> {
match self {
Self::Write(w) => w.finish(tx).await,
_ => Ok(None),
@ -143,7 +141,7 @@ impl TreeNodeProvider {
}
}
async fn load<N>(&self, tx: &mut Transaction, id: NodeId) -> Result<StoredNode<N>, Error>
async fn load<N>(&self, tx: &Transaction, id: NodeId) -> Result<StoredNode<N>, Error>
where
N: TreeNode + Clone,
{
@ -157,7 +155,7 @@ impl TreeNodeProvider {
}
}
async fn save<N>(&self, tx: &mut Transaction, node: &mut StoredNode<N>) -> Result<(), Error>
async fn save<N>(&self, tx: &Transaction, node: &mut StoredNode<N>) -> Result<(), Error>
where
N: TreeNode + Clone + Display,
{
@ -290,20 +288,16 @@ impl IndexStores {
pub(crate) async fn index_removed(
&self,
tx: &mut Transaction,
tx: &Transaction,
ns: &str,
db: &str,
tb: &str,
ix: &str,
) -> Result<(), Error> {
self.remove_index(ns, db, tx.get_and_cache_tb_index(ns, db, tb, ix).await?.as_ref()).await
self.remove_index(ns, db, tx.get_tb_index(ns, db, tb, ix).await?.as_ref()).await
}
pub(crate) async fn namespace_removed(
&self,
tx: &mut Transaction,
ns: &str,
) -> Result<(), Error> {
pub(crate) async fn namespace_removed(&self, tx: &Transaction, ns: &str) -> Result<(), Error> {
for db in tx.all_db(ns).await?.iter() {
self.database_removed(tx, ns, &db.name).await?;
}
@ -312,7 +306,7 @@ impl IndexStores {
pub(crate) async fn database_removed(
&self,
tx: &mut Transaction,
tx: &Transaction,
ns: &str,
db: &str,
) -> Result<(), Error> {
@ -324,7 +318,7 @@ impl IndexStores {
pub(crate) async fn table_removed(
&self,
tx: &mut Transaction,
tx: &Transaction,
ns: &str,
db: &str,
tb: &str,

View file

@ -41,7 +41,7 @@ where
pub(super) async fn get_node_mut(
&mut self,
tx: &mut Transaction,
tx: &Transaction,
node_id: NodeId,
) -> Result<StoredNode<N>, Error> {
#[cfg(debug_assertions)]
@ -95,10 +95,7 @@ where
Ok(())
}
pub(super) async fn finish(
&mut self,
tx: &mut Transaction,
) -> Result<Option<TreeCache<N>>, Error> {
pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<Option<TreeCache<N>>, Error> {
#[cfg(debug_assertions)]
{
if !self.out.is_empty() {
@ -167,7 +164,7 @@ where
pub(super) async fn get_node(
&self,
tx: &mut Transaction,
tx: &Transaction,
node_id: NodeId,
) -> Result<Arc<StoredNode<N>>, Error> {
let r = self.cache.get_node(tx, node_id).await?;

View file

@ -545,12 +545,11 @@ mod tests {
assert_eq!(dist.compute(&v1, &v2).unwrap(), res.into());
// Check the "Vector" optimised implementations
for t in [VectorType::F64] {
let t = VectorType::F64;
let v1: SharedVector = Vector::try_from_vector(t, &v1).unwrap().into();
let v2: SharedVector = Vector::try_from_vector(t, &v2).unwrap().into();
assert_eq!(dist.calculate(&v1, &v2), res);
}
}
fn test_distance_collection(dist: Distance, size: usize, dim: usize) {
let mut rng = get_seed_rnd();

View file

@ -1,17 +1,19 @@
use std::fmt::{Display, Formatter};
#[allow(unused)]
pub(crate) trait Categorise {
/// Returns the category of the key for error reporting
fn categorise(&self) -> Category;
}
#[derive(Debug, Copy, Clone)]
#[non_exhaustive]
pub enum KeyCategory {
/// This category is reserved for cases when we do not know the category
/// It should be caught and re-populated with the correct category where appropriate
Unknown,
#[allow(unused)]
pub enum Category {
/// crate::key::root::all /
Root,
/// crate::key::root::ac /!ac{ac}
Access,
/// crate::key::root::hb /!hb{ts}/{nd}
Heartbeat,
/// crate::key::root::nd /!nd{nd}
Node,
/// crate::key::root::ni /!ni
@ -21,24 +23,33 @@ pub enum KeyCategory {
/// crate::key::root::us /!us{us}
User,
///
/// ------------------------------
///
/// crate::key::node::all /${nd}
NodeRoot,
/// crate::key::node::lq /${nd}!lq{lq}{ns}{db}
NodeLiveQuery,
///
/// ------------------------------
///
/// crate::key::namespace::di /+{ni}!di
DatabaseIdentifier,
/// crate::key::database::ti /+{ni}*{di}!ti
DatabaseTableIdentifier,
///
/// ------------------------------
///
/// crate::key::namespace::all /*{ns}
NamespaceRoot,
/// crate::key::namespace::db /*{ns}!db{db}
DatabaseAlias,
/// crate::key::namespace::di /+{ns id}!di
DatabaseIdentifier,
/// crate::key::namespace::lg /*{ns}!lg{lg}
DatabaseLogAlias,
/// crate::key::namespace::ac /*{ns}!ac{ac}
NamespaceAccess,
/// crate::key::namespace::us /*{ns}!us{us}
NamespaceUser,
///
/// ------------------------------
///
/// crate::key::database::all /*{ns}*{db}
DatabaseRoot,
/// crate::key::database::ac /*{ns}*{db}!ac{ac}
@ -47,16 +58,12 @@ pub enum KeyCategory {
DatabaseAnalyzer,
/// crate::key::database::fc /*{ns}*{db}!fn{fc}
DatabaseFunction,
/// crate::key::database::lg /*{ns}*{db}!lg{lg}
DatabaseLog,
/// crate::key::database::ml /*{ns}*{db}!ml{ml}{vn}
DatabaseModel,
/// crate::key::database::pa /*{ns}*{db}!pa{pa}
DatabaseParameter,
/// crate::key::database::tb /*{ns}*{db}!tb{tb}
DatabaseTable,
/// crate::key::database::ti /+{ns id}*{db id}!ti
DatabaseTableIdentifier,
/// crate::key::database::ts /*{ns}*{db}!ts{ts}
DatabaseTimestamp,
/// crate::key::database::us /*{ns}*{db}!us{us}
@ -64,6 +71,8 @@ pub enum KeyCategory {
/// crate::key::database::vs /*{ns}*{db}!vs
DatabaseVersionstamp,
///
/// ------------------------------
///
/// crate::key::table::all /*{ns}*{db}*{tb}
TableRoot,
/// crate::key::table::ev /*{ns}*{db}*{tb}!ev{ev}
@ -77,6 +86,8 @@ pub enum KeyCategory {
/// crate::key::table::lq /*{ns}*{db}*{tb}!lq{lq}
TableLiveQuery,
///
/// ------------------------------
///
/// crate::key::index::all /*{ns}*{db}*{tb}+{ix}
IndexRoot,
/// crate::key::index::bc /*{ns}*{db}*{tb}+{ix}!bc{id}
@ -104,69 +115,71 @@ pub enum KeyCategory {
/// crate::key::index /*{ns}*{db}*{tb}+{ix}*{fd}{id}
Index,
///
/// ------------------------------
///
/// crate::key::change /*{ns}*{db}#{ts}
ChangeFeed,
///
/// ------------------------------
///
/// crate::key::thing /*{ns}*{db}*{tb}*{id}
Thing,
///
/// ------------------------------
///
/// crate::key::graph /*{ns}*{db}*{tb}~{id}{eg}{fk}
Graph,
}
impl Display for KeyCategory {
impl Display for Category {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let name = match self {
KeyCategory::Unknown => "Unknown",
KeyCategory::Root => "Root",
KeyCategory::Access => "Access",
KeyCategory::Heartbeat => "Heartbeat",
KeyCategory::Node => "Node",
KeyCategory::NamespaceIdentifier => "NamespaceIdentifier",
KeyCategory::Namespace => "Namespace",
KeyCategory::User => "User",
KeyCategory::NodeRoot => "NodeRoot",
KeyCategory::NodeLiveQuery => "NodeLiveQuery",
KeyCategory::NamespaceRoot => "NamespaceRoot",
KeyCategory::DatabaseAlias => "DatabaseAlias",
KeyCategory::DatabaseIdentifier => "DatabaseIdentifier",
KeyCategory::DatabaseLogAlias => "DatabaseLogAlias",
KeyCategory::NamespaceAccess => "NamespaceAccess",
KeyCategory::NamespaceUser => "NamespaceUser",
KeyCategory::DatabaseRoot => "DatabaseRoot",
KeyCategory::DatabaseAccess => "DatabaseAccess",
KeyCategory::DatabaseAnalyzer => "DatabaseAnalyzer",
KeyCategory::DatabaseFunction => "DatabaseFunction",
KeyCategory::DatabaseLog => "DatabaseLog",
KeyCategory::DatabaseModel => "DatabaseModel",
KeyCategory::DatabaseParameter => "DatabaseParameter",
KeyCategory::DatabaseTable => "DatabaseTable",
KeyCategory::DatabaseTableIdentifier => "DatabaseTableIdentifier",
KeyCategory::DatabaseTimestamp => "DatabaseTimestamp",
KeyCategory::DatabaseUser => "DatabaseUser",
KeyCategory::DatabaseVersionstamp => "DatabaseVersionstamp",
KeyCategory::TableRoot => "TableRoot",
KeyCategory::TableEvent => "TableEvent",
KeyCategory::TableField => "TableField",
KeyCategory::TableView => "TableView",
KeyCategory::IndexDefinition => "IndexDefinition",
KeyCategory::TableLiveQuery => "TableLiveQuery",
KeyCategory::IndexRoot => "IndexRoot",
KeyCategory::IndexTermDocList => "IndexTermDocList",
KeyCategory::IndexBTreeNode => "IndexBTreeNode",
KeyCategory::IndexTermDocFrequency => "IndexTermDocFrequency",
KeyCategory::IndexDocKeys => "IndexDocKeys",
KeyCategory::IndexTermList => "IndexTermList",
KeyCategory::IndexBTreeNodeDocLengths => "IndexBTreeNodeDocLengths",
KeyCategory::IndexOffset => "IndexOffset",
KeyCategory::IndexBTreeNodePostings => "IndexBTreeNodePostings",
KeyCategory::IndexFullTextState => "IndexFullTextState",
KeyCategory::IndexBTreeNodeTerms => "IndexBTreeNodeTerms",
KeyCategory::IndexTerms => "IndexTerms",
KeyCategory::Index => "Index",
KeyCategory::ChangeFeed => "ChangeFeed",
KeyCategory::Thing => "Thing",
KeyCategory::Graph => "Graph",
Self::Root => "Root",
Self::Access => "Access",
Self::Node => "Node",
Self::NamespaceIdentifier => "NamespaceIdentifier",
Self::Namespace => "Namespace",
Self::User => "User",
Self::NodeRoot => "NodeRoot",
Self::NodeLiveQuery => "NodeLiveQuery",
Self::NamespaceRoot => "NamespaceRoot",
Self::DatabaseAlias => "DatabaseAlias",
Self::DatabaseIdentifier => "DatabaseIdentifier",
Self::NamespaceAccess => "NamespaceAccess",
Self::NamespaceUser => "NamespaceUser",
Self::DatabaseRoot => "DatabaseRoot",
Self::DatabaseAccess => "DatabaseAccess",
Self::DatabaseAnalyzer => "DatabaseAnalyzer",
Self::DatabaseFunction => "DatabaseFunction",
Self::DatabaseModel => "DatabaseModel",
Self::DatabaseParameter => "DatabaseParameter",
Self::DatabaseTable => "DatabaseTable",
Self::DatabaseTableIdentifier => "DatabaseTableIdentifier",
Self::DatabaseTimestamp => "DatabaseTimestamp",
Self::DatabaseUser => "DatabaseUser",
Self::DatabaseVersionstamp => "DatabaseVersionstamp",
Self::TableRoot => "TableRoot",
Self::TableEvent => "TableEvent",
Self::TableField => "TableField",
Self::TableView => "TableView",
Self::IndexDefinition => "IndexDefinition",
Self::TableLiveQuery => "TableLiveQuery",
Self::IndexRoot => "IndexRoot",
Self::IndexTermDocList => "IndexTermDocList",
Self::IndexBTreeNode => "IndexBTreeNode",
Self::IndexTermDocFrequency => "IndexTermDocFrequency",
Self::IndexDocKeys => "IndexDocKeys",
Self::IndexTermList => "IndexTermList",
Self::IndexBTreeNodeDocLengths => "IndexBTreeNodeDocLengths",
Self::IndexOffset => "IndexOffset",
Self::IndexBTreeNodePostings => "IndexBTreeNodePostings",
Self::IndexFullTextState => "IndexFullTextState",
Self::IndexBTreeNodeTerms => "IndexBTreeNodeTerms",
Self::IndexTerms => "IndexTerms",
Self::Index => "Index",
Self::ChangeFeed => "ChangeFeed",
Self::Thing => "Thing",
Self::Graph => "Graph",
};
write!(f, "{}", name)
}

View file

@ -1,11 +1,9 @@
/// Stores change feeds
//! Stores change feeds
use crate::key::category::Categorise;
use crate::key::category::Category;
use crate::vs;
use derive::Key;
use serde::{Deserialize, Serialize};
use crate::vs;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use std::str;
// Cf stands for change feeds
@ -73,9 +71,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Cf<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::ChangeFeed
impl Categorise for Cf<'_> {
fn categorise(&self) -> Category {
Category::ChangeFeed
}
}

View file

@ -1,6 +1,6 @@
/// Stores a DEFINE ACCESS ON DATABASE config definition
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
//! Stores a DEFINE ACCESS ON DATABASE config definition
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Ac<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseAccess
impl Categorise for Ac<'_> {
fn categorise(&self) -> Category {
Category::DatabaseAccess
}
}

View file

@ -1,6 +1,6 @@
//! Stores the key prefix for all keys under a database
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -18,9 +18,9 @@ pub fn new<'a>(ns: &'a str, db: &'a str) -> All<'a> {
All::new(ns, db)
}
impl KeyRequirements for All<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseRoot
impl Categorise for All<'_> {
fn categorise(&self) -> Category {
Category::DatabaseRoot
}
}

View file

@ -1,6 +1,6 @@
//! Stores a DEFINE ANALYZER config definition
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Az<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseAnalyzer
impl Categorise for Az<'_> {
fn categorise(&self) -> Category {
Category::DatabaseAnalyzer
}
}

View file

@ -1,6 +1,6 @@
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
/// Stores a DEFINE FUNCTION config definition
//! Stores a DEFINE FUNCTION config definition
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Fc<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseFunction
impl Categorise for Fc<'_> {
fn categorise(&self) -> Category {
Category::DatabaseFunction
}
}

View file

@ -1,6 +1,6 @@
/// Stores a DEFINE MODEL config definition
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
//! Stores a DEFINE MODEL config definition
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -35,9 +35,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Ml<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseModel
impl Categorise for Ml<'_> {
fn categorise(&self) -> Category {
Category::DatabaseModel
}
}

View file

@ -1,6 +1,6 @@
//! Stores a DEFINE PARAM config definition
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Pa<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseParameter
impl Categorise for Pa<'_> {
fn categorise(&self) -> Category {
Category::DatabaseParameter
}
}

View file

@ -1,6 +1,6 @@
//! Stores a DEFINE TABLE config definition
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Tb<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseTable
impl Categorise for Tb<'_> {
fn categorise(&self) -> Category {
Category::DatabaseTable
}
}

View file

@ -1,6 +1,6 @@
//! Stores the next and available freed IDs for documents
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -22,9 +22,9 @@ pub fn new(ns: u32, db: u32) -> Ti {
Ti::new(ns, db)
}
impl KeyRequirements for Ti {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseTableIdentifier
impl Categorise for Ti {
fn categorise(&self) -> Category {
Category::DatabaseTableIdentifier
}
}

View file

@ -1,6 +1,6 @@
//! Stores database timestamps
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -39,9 +39,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Ts<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseTimestamp
impl Categorise for Ts<'_> {
fn categorise(&self) -> Category {
Category::DatabaseTimestamp
}
}

View file

@ -1,5 +1,6 @@
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
//! Stores a DEFINE USER ON DATABASE config definition
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -33,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Us<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseUser
impl Categorise for Us<'_> {
fn categorise(&self) -> Category {
Category::DatabaseUser
}
}

View file

@ -1,6 +1,6 @@
//! Stores database versionstamps
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -23,9 +23,9 @@ pub fn new<'a>(ns: &'a str, db: &'a str) -> Vs<'a> {
Vs::new(ns, db)
}
impl KeyRequirements for Vs<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseVersionstamp
impl Categorise for Vs<'_> {
fn categorise(&self) -> Category {
Category::DatabaseVersionstamp
}
}

View file

@ -1,11 +1,6 @@
/// Debug purposes only. It may be used in logs. Not for key handling in implementation code.
/// Helpers for debugging keys
/// sprint_key converts a key to an escaped string.
/// This is used for logging and debugging tests and should not be used in implementation code.
#[doc(hidden)]
pub fn sprint_key<T>(key: &T) -> String
/// Displays a key in a human-readable format.
#[cfg(debug_assertions)]
pub fn sprint<T>(key: &T) -> String
where
T: AsRef<[u8]>,
{

View file

@ -1,6 +1,6 @@
//! Stores a graph edge pointer
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use crate::sql::dir::Dir;
use crate::sql::id::Id;
use crate::sql::thing::Thing;
@ -164,9 +164,9 @@ pub fn ftsuffix(ns: &str, db: &str, tb: &str, id: &Id, eg: &Dir, ft: &str) -> Ve
k
}
impl KeyRequirements for Graph<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::Graph
impl Categorise for Graph<'_> {
fn categorise(&self) -> Category {
Category::Graph
}
}

View file

@ -1,6 +1,6 @@
//! Stores the key prefix for all keys under an index
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -22,9 +22,9 @@ pub fn new<'a>(ns: &'a str, db: &'a str, tb: &'a str, ix: &'a str) -> All<'a> {
All::new(ns, db, tb, ix)
}
impl KeyRequirements for All<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexRoot
impl Categorise for All<'_> {
fn categorise(&self) -> Category {
Category::IndexRoot
}
}

View file

@ -1,7 +1,7 @@
//! Stores Doc list for each term
use crate::idx::ft::terms::TermId;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -23,9 +23,9 @@ pub struct Bc<'a> {
pub term_id: TermId,
}
impl KeyRequirements for Bc<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexTermDocList
impl Categorise for Bc<'_> {
fn categorise(&self) -> Category {
Category::IndexTermDocList
}
}

View file

@ -1,7 +1,7 @@
//! Stores BTree nodes for doc ids
use crate::idx::trees::store::NodeId;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -23,9 +23,9 @@ pub struct Bd<'a> {
pub node_id: Option<NodeId>,
}
impl KeyRequirements for Bd<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexBTreeNode
impl Categorise for Bd<'_> {
fn categorise(&self) -> Category {
Category::IndexBTreeNode
}
}

View file

@ -1,8 +1,8 @@
//! Stores Term/Doc frequency
use crate::idx::docids::DocId;
use crate::idx::ft::terms::TermId;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -25,9 +25,9 @@ pub struct Bf<'a> {
pub doc_id: DocId,
}
impl KeyRequirements for Bf<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexTermDocFrequency
impl Categorise for Bf<'_> {
fn categorise(&self) -> Category {
Category::IndexTermDocFrequency
}
}

View file

@ -1,7 +1,7 @@
//! Stores doc keys for doc_ids
use crate::idx::trees::store::NodeId;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -23,9 +23,9 @@ pub struct Bi<'a> {
pub node_id: NodeId,
}
impl KeyRequirements for Bi<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexDocKeys
impl Categorise for Bi<'_> {
fn categorise(&self) -> Category {
Category::IndexDocKeys
}
}

View file

@ -1,7 +1,7 @@
//! Stores the term list for doc_ids
use crate::idx::docids::DocId;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -23,9 +23,9 @@ pub struct Bk<'a> {
pub doc_id: DocId,
}
impl KeyRequirements for Bk<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexTermList
impl Categorise for Bk<'_> {
fn categorise(&self) -> Category {
Category::IndexTermList
}
}

View file

@ -1,7 +1,7 @@
//! Stores BTree nodes for doc lengths
use crate::idx::trees::store::NodeId;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -23,9 +23,9 @@ pub struct Bl<'a> {
pub node_id: Option<NodeId>,
}
impl KeyRequirements for Bl<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexBTreeNodeDocLengths
impl Categorise for Bl<'_> {
fn categorise(&self) -> Category {
Category::IndexBTreeNodeDocLengths
}
}

View file

@ -1,8 +1,8 @@
//! Stores the offsets
use crate::idx::docids::DocId;
use crate::idx::ft::terms::TermId;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -25,9 +25,9 @@ pub struct Bo<'a> {
pub term_id: TermId,
}
impl KeyRequirements for Bo<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexOffset
impl Categorise for Bo<'_> {
fn categorise(&self) -> Category {
Category::IndexOffset
}
}

View file

@ -1,7 +1,7 @@
//! Stores BTree nodes for postings
use crate::idx::trees::store::NodeId;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -23,9 +23,9 @@ pub struct Bp<'a> {
pub node_id: Option<NodeId>,
}
impl KeyRequirements for Bp<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexBTreeNodePostings
impl Categorise for Bp<'_> {
fn categorise(&self) -> Category {
Category::IndexBTreeNodePostings
}
}

View file

@ -1,6 +1,6 @@
//! Stores FullText index states
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -20,9 +20,9 @@ pub struct Bs<'a> {
pub ix: &'a str,
}
impl KeyRequirements for Bs<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexFullTextState
impl Categorise for Bs<'_> {
fn categorise(&self) -> Category {
Category::IndexFullTextState
}
}

View file

@ -1,7 +1,7 @@
//! Stores BTree nodes for terms
use crate::idx::trees::store::NodeId;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -23,9 +23,9 @@ pub struct Bt<'a> {
pub node_id: Option<NodeId>,
}
impl KeyRequirements for Bt<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexBTreeNodeTerms
impl Categorise for Bt<'_> {
fn categorise(&self) -> Category {
Category::IndexBTreeNodeTerms
}
}

View file

@ -1,7 +1,7 @@
//! Stores terms for term_ids
use crate::idx::ft::terms::TermId;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -23,9 +23,9 @@ pub struct Bu<'a> {
pub term_id: TermId,
}
impl KeyRequirements for Bu<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::IndexTerms
impl Categorise for Bu<'_> {
fn categorise(&self) -> Category {
Category::IndexTerms
}
}

View file

@ -13,8 +13,8 @@ pub mod bt;
pub mod bu;
pub mod vm;
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use crate::sql::array::Array;
use crate::sql::id::Id;
use derive::Key;
@ -103,9 +103,9 @@ pub struct Index<'a> {
pub id: Option<Cow<'a, Id>>,
}
impl KeyRequirements for Index<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::Index
impl Categorise for Index<'_> {
fn categorise(&self) -> Category {
Category::Index
}
}

View file

@ -1,7 +0,0 @@
use crate::key::error::KeyCategory;
/// Key requirements are functions that we expect all keys to have
pub(crate) trait KeyRequirements {
/// Returns the category of the key for error reporting
fn key_category(&self) -> KeyCategory;
}

View file

@ -22,7 +22,7 @@
/// crate::key::database::ac /*{ns}*{db}!ac{ac}
/// crate::key::database::az /*{ns}*{db}!az{az}
/// crate::key::database::fc /*{ns}*{db}!fn{fc}
/// crate::key::database::lg /*{ns}*{db}!lg{lg}
/// crate::key::database::ml /*{ns}*{db}!ml{ml}{vn}
/// crate::key::database::pa /*{ns}*{db}!pa{pa}
/// crate::key::database::tb /*{ns}*{db}!tb{tb}
/// crate::key::database::ti /+{ns id}*{db id}!ti
@ -57,15 +57,14 @@
///
/// crate::key::graph /*{ns}*{db}*{tb}~{id}{eg}{fk}
///
pub mod change;
pub mod database;
pub mod debug;
pub(crate) mod error;
pub mod graph;
pub mod index;
pub(crate) mod key_req;
pub mod namespace;
pub mod node;
pub mod root;
pub mod table;
pub mod thing;
pub(crate) mod category;
pub(crate) mod change;
pub(crate) mod database;
pub(crate) mod debug;
pub(crate) mod graph;
pub(crate) mod index;
pub(crate) mod namespace;
pub(crate) mod node;
pub(crate) mod root;
pub(crate) mod table;
pub(crate) mod thing;

View file

@ -1,6 +1,6 @@
//! Stores a DEFINE ACCESS ON NAMESPACE config definition
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -32,9 +32,9 @@ pub fn suffix(ns: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Ac<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::NamespaceAccess
impl Categorise for Ac<'_> {
fn categorise(&self) -> Category {
Category::NamespaceAccess
}
}

View file

@ -1,6 +1,6 @@
//! Stores the key prefix for all keys under a namespace
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -16,9 +16,9 @@ pub fn new(ns: &str) -> All<'_> {
All::new(ns)
}
impl KeyRequirements for All<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::NamespaceRoot
impl Categorise for All<'_> {
fn categorise(&self) -> Category {
Category::NamespaceRoot
}
}

View file

@ -1,6 +1,6 @@
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
/// Stores a DEFINE DATABASE config definition
//! Stores a DEFINE DATABASE config definition
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -32,9 +32,9 @@ pub fn suffix(ns: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Db<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseAlias
impl Categorise for Db<'_> {
fn categorise(&self) -> Category {
Category::DatabaseAlias
}
}

View file

@ -1,6 +1,6 @@
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
/// Stores a database ID generator state
//! Stores a database ID generator state
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -19,9 +19,9 @@ pub fn new(ns: u32) -> Di {
Di::new(ns)
}
impl KeyRequirements for Di {
fn key_category(&self) -> KeyCategory {
KeyCategory::DatabaseIdentifier
impl Categorise for Di {
fn categorise(&self) -> Category {
Category::DatabaseIdentifier
}
}
impl Di {

View file

@ -1,5 +1,6 @@
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
//! Stores a DEFINE USER ON NAMESPACE config definition
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -31,9 +32,9 @@ pub fn suffix(ns: &str) -> Vec<u8> {
k
}
impl KeyRequirements for Us<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::NamespaceUser
impl Categorise for Us<'_> {
fn categorise(&self) -> Category {
Category::NamespaceUser
}
}

View file

@ -1,6 +1,6 @@
//! Stores the key prefix for all nodes
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
@ -18,9 +18,9 @@ pub fn new(nd: Uuid) -> All {
All::new(nd)
}
impl KeyRequirements for All {
fn key_category(&self) -> KeyCategory {
KeyCategory::NodeRoot
impl Categorise for All {
fn categorise(&self) -> Category {
Category::NodeRoot
}
}

View file

@ -1,6 +1,6 @@
//! Stores a LIVE SELECT query definition on the cluster
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
@ -12,7 +12,7 @@ use uuid::Uuid;
/// The value is just the table of the live query as a Strand, which is the missing information from the key path
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Key)]
#[non_exhaustive]
pub struct Lq<'a> {
pub struct Lq {
__: u8,
_a: u8,
#[serde(with = "uuid::serde::compact")]
@ -22,38 +22,32 @@ pub struct Lq<'a> {
_d: u8,
#[serde(with = "uuid::serde::compact")]
pub lq: Uuid,
_e: u8,
pub ns: &'a str,
_f: u8,
pub db: &'a str,
}
pub fn new<'a>(nd: Uuid, lq: Uuid, ns: &'a str, db: &'a str) -> Lq<'a> {
Lq::new(nd, lq, ns, db)
pub fn new(nd: Uuid, lq: Uuid) -> Lq {
Lq::new(nd, lq)
}
pub fn prefix_nd(nd: &Uuid) -> Vec<u8> {
let mut k = [b'/', b'$'].to_vec();
k.extend_from_slice(nd.as_bytes());
k.extend_from_slice(&[0x00]);
pub fn prefix(nd: Uuid) -> Vec<u8> {
let mut k = super::all::new(nd).encode().unwrap();
k.extend_from_slice(&[b'!', b'l', b'q', 0x00]);
k
}
pub fn suffix_nd(nd: &Uuid) -> Vec<u8> {
let mut k = [b'/', b'$'].to_vec();
k.extend_from_slice(nd.as_bytes());
k.extend_from_slice(&[0xff]);
pub fn suffix(nd: Uuid) -> Vec<u8> {
let mut k = super::all::new(nd).encode().unwrap();
k.extend_from_slice(&[b'!', b'l', b'q', 0xff]);
k
}
impl KeyRequirements for Lq<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::NodeLiveQuery
impl Categorise for Lq {
fn categorise(&self) -> Category {
Category::NodeLiveQuery
}
}
impl<'a> Lq<'a> {
pub fn new(nd: Uuid, lq: Uuid, ns: &'a str, db: &'a str) -> Self {
impl Lq {
pub fn new(nd: Uuid, lq: Uuid) -> Self {
Self {
__: b'/',
_a: b'$',
@ -62,10 +56,6 @@ impl<'a> Lq<'a> {
_c: b'l',
_d: b'q',
lq,
_e: b'*',
ns,
_f: b'*',
db,
}
}
}
@ -80,35 +70,40 @@ mod tests {
let nd = Uuid::from_bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10]);
#[rustfmt::skip]
let lq = Uuid::from_bytes([0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20]);
let val = Lq::new(nd, lq, "testns", "testdb");
let val = Lq::new(nd, lq);
let enc = Lq::encode(&val).unwrap();
assert_eq!(
enc,
b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\
!lq\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\
*testns\0*testdb\0"
!lq\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
);
let dec = Lq::decode(&enc).unwrap();
assert_eq!(val, dec);
}
#[test]
fn prefix_nd() {
fn test_prefix() {
use super::*;
let nd = Uuid::from_bytes([
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10,
]);
let val = prefix_nd(&nd);
assert_eq!(val, b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x00");
#[rustfmt::skip]
let nd = Uuid::from_bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10]);
let val = super::prefix(nd);
assert_eq!(
val,
b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\
!lq\x00"
);
}
#[test]
fn suffix_nd() {
fn test_suffix() {
use super::*;
let nd = Uuid::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let val = suffix_nd(&nd);
assert_eq!(val, b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\xff");
#[rustfmt::skip]
let nd = Uuid::from_bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10]);
let val = super::suffix(nd);
assert_eq!(
val,
b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\
!lq\xff"
);
}
}

View file

@ -1,5 +1,6 @@
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
//! Stores a DEFINE ACCESS ON ROOT config definition
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -29,9 +30,9 @@ pub fn suffix() -> Vec<u8> {
k
}
impl KeyRequirements for Ac<'_> {
fn key_category(&self) -> KeyCategory {
KeyCategory::Access
impl Categorise for Ac<'_> {
fn categorise(&self) -> Category {
Category::Access
}
}

View file

@ -1,6 +1,6 @@
//! Stores the key prefix for all keys
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
@ -20,9 +20,9 @@ impl Default for Kv {
}
}
impl KeyRequirements for Kv {
fn key_category(&self) -> KeyCategory {
KeyCategory::Root
impl Categorise for Kv {
fn categorise(&self) -> Category {
Category::Root
}
}

View file

@ -1,101 +0,0 @@
//! Stores a heartbeat per registered cluster node
use crate::dbs::node::{KeyTimestamp, Timestamp};
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use derive::Key;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Key)]
#[non_exhaustive]
pub struct Hb {
__: u8,
_a: u8,
_b: u8,
_c: u8,
pub hb: Timestamp,
_d: u8,
#[serde(with = "uuid::serde::compact")]
pub nd: Uuid,
}
impl KeyRequirements for Hb {
fn key_category(&self) -> KeyCategory {
KeyCategory::Heartbeat
}
}
impl Hb {
pub fn new(hb: Timestamp, nd: Uuid) -> Self {
Self {
__: b'/',
_a: b'!',
_b: b'h',
_c: b'b',
hb,
_d: b'/',
nd,
}
}
pub fn prefix() -> Vec<u8> {
let mut k = crate::key::root::all::new().encode().unwrap();
k.extend_from_slice(&[b'!', b'h', b'b', 0x00]);
k
}
pub fn suffix(ts: &Timestamp) -> Vec<u8> {
// Add one to timestamp so we get a complete range inclusive of provided timestamp
// Also convert type
let tskey: KeyTimestamp = KeyTimestamp {
value: ts.value + 1,
};
let mut k = crate::key::root::all::new().encode().unwrap();
k.extend_from_slice(&[b'!', b'h', b'b']);
k.extend_from_slice(tskey.encode().unwrap().as_ref());
k
}
}
impl From<Timestamp> for Hb {
fn from(ts: Timestamp) -> Self {
let empty_uuid = uuid::Uuid::nil();
Self::new(ts, empty_uuid)
}
}
#[cfg(test)]
mod tests {
#[test]
fn key() {
use super::*;
#[rustfmt::skip]
let val = Hb::new(
Timestamp { value: 123 },
Uuid::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
);
let enc = Hb::encode(&val).unwrap();
assert_eq!(
enc,
b"/!hb\x00\x00\x00\x00\x00\x00\x00\x7b/\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10");
let dec = Hb::decode(&enc).unwrap();
assert_eq!(val, dec);
}
#[test]
fn prefix() {
use super::*;
let actual = Hb::prefix();
assert_eq!(actual, b"/!hb\x00")
}
#[test]
fn suffix() {
use super::*;
let ts: Timestamp = Timestamp {
value: 456,
};
let actual = Hb::suffix(&ts);
assert_eq!(actual, b"/!hb\x00\x00\x00\x00\x00\x00\x01\xc9") // 457, because we add 1 to the timestamp
}
}

View file

@ -1,6 +1,5 @@
pub mod ac;
pub mod all;
pub mod hb;
pub mod nd;
pub mod ni;
pub mod ns;

View file

@ -1,6 +1,6 @@
//! Stores cluster membership information
use crate::key::error::KeyCategory;
use crate::key::key_req::KeyRequirements;
use crate::key::category::Categorise;
use crate::key::category::Category;
use derive::Key;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
@ -18,9 +18,25 @@ pub struct Nd {
pub nd: Uuid,
}
impl KeyRequirements for Nd {
fn key_category(&self) -> KeyCategory {
KeyCategory::Node
pub fn new(nd: Uuid) -> Nd {
Nd::new(nd)
}
pub fn prefix() -> Vec<u8> {
let mut k = crate::key::root::all::new().encode().unwrap();
k.extend_from_slice(&[b'!', b'n', b'd', 0x00]);
k
}
pub fn suffix() -> Vec<u8> {
let mut k = crate::key::root::all::new().encode().unwrap();
k.extend_from_slice(&[b'!', b'n', b'd', 0xff]);
k
}
impl Categorise for Nd {
fn categorise(&self) -> Category {
Category::Node
}
}
@ -34,18 +50,6 @@ impl Nd {
nd,
}
}
pub fn prefix() -> Vec<u8> {
let mut k = crate::key::root::all::new().encode().unwrap();
k.extend_from_slice(&[b'!', b'n', b'd', 0x00]);
k
}
pub fn suffix() -> Vec<u8> {
let mut k = crate::key::root::all::new().encode().unwrap();
k.extend_from_slice(&[b'!', b'n', b'd', 0xff]);
k
}
}
#[cfg(test)]
@ -61,13 +65,13 @@ mod tests {
#[test]
fn test_prefix() {
let val = super::Nd::prefix();
let val = super::prefix();
assert_eq!(val, b"/!nd\0")
}
#[test]
fn test_suffix() {
let val = super::Nd::suffix();
let val = super::suffix();
assert_eq!(val, b"/!nd\xff")
}
}

Some files were not shown because too many files have changed in this diff Show more