Refactor transaction, caching, and key-value store interfaces (#4257)
Co-authored-by: Gerard Guillemas Martos <gerard.guillemas@surrealdb.com>
This commit is contained in:
parent
29b0df6060
commit
bfc474e4d8
229 changed files with 7232 additions and 10884 deletions
10
.github/workflows/bench.yml
vendored
10
.github/workflows/bench.yml
vendored
|
@ -102,14 +102,14 @@ jobs:
|
||||||
features: "kv-mem"
|
features: "kv-mem"
|
||||||
- target: "lib-rocksdb"
|
- target: "lib-rocksdb"
|
||||||
features: "kv-rocksdb"
|
features: "kv-rocksdb"
|
||||||
- target: "lib-fdb"
|
- target: "lib-surrealkv"
|
||||||
features: "kv-fdb-7_1"
|
features: "kv-surrealkv"
|
||||||
- target: "sdk-mem"
|
- target: "sdk-mem"
|
||||||
features: "kv-mem"
|
features: "kv-mem"
|
||||||
- target: "sdk-rocksdb"
|
- target: "sdk-rocksdb"
|
||||||
features: "kv-rocksdb"
|
features: "kv-rocksdb"
|
||||||
- target: "sdk-fdb"
|
- target: "sdk-surrealkv"
|
||||||
features: "kv-fdb-7_1"
|
features: "kv-surrealkv"
|
||||||
# This one fails because the server consumes too much memory and the kernel kills it. I tried with instances up to 16GB of RAM.
|
# This one fails because the server consumes too much memory and the kernel kills it. I tried with instances up to 16GB of RAM.
|
||||||
# - target: "sdk-ws"
|
# - target: "sdk-ws"
|
||||||
# features: "protocol-ws"
|
# features: "protocol-ws"
|
||||||
|
@ -143,7 +143,7 @@ jobs:
|
||||||
uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0
|
uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0
|
||||||
if: ${{ matrix.target == 'lib-fdb' || matrix.target == 'sdk-fdb' }}
|
if: ${{ matrix.target == 'lib-fdb' || matrix.target == 'sdk-fdb' }}
|
||||||
with:
|
with:
|
||||||
version: "7.1.30"
|
version: "7.1.61"
|
||||||
|
|
||||||
# Run SurrealDB in the background if needed
|
# Run SurrealDB in the background if needed
|
||||||
- name: Build and start SurrealDB
|
- name: Build and start SurrealDB
|
||||||
|
|
20
.github/workflows/ci.yml
vendored
20
.github/workflows/ci.yml
vendored
|
@ -571,16 +571,24 @@ jobs:
|
||||||
with:
|
with:
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
|
|
||||||
- name: Setup FoundationDB
|
|
||||||
uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0
|
|
||||||
with:
|
|
||||||
version: "7.1.30"
|
|
||||||
|
|
||||||
- name: Install cargo-make
|
- name: Install cargo-make
|
||||||
run: cargo install --debug --locked cargo-make
|
run: cargo install --debug --locked cargo-make
|
||||||
|
|
||||||
|
- name: Setup FoundationDB
|
||||||
|
uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0
|
||||||
|
with:
|
||||||
|
version: "7.1.61"
|
||||||
|
|
||||||
- name: Test fdb engine
|
- name: Test fdb engine
|
||||||
run: cargo make ci-api-integration-fdb
|
run: cargo make ci-api-integration-fdb-7_1
|
||||||
|
|
||||||
|
- name: Setup FoundationDB
|
||||||
|
uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0
|
||||||
|
with:
|
||||||
|
version: "7.3.47"
|
||||||
|
|
||||||
|
- name: Test fdb engine
|
||||||
|
run: cargo make ci-api-integration-fdb-7_3
|
||||||
|
|
||||||
- name: Debug info
|
- name: Debug info
|
||||||
if: always()
|
if: always()
|
||||||
|
|
74
Cargo.lock
generated
74
Cargo.lock
generated
|
@ -872,7 +872,6 @@ dependencies = [
|
||||||
"clang-sys",
|
"clang-sys",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"lazycell",
|
"lazycell",
|
||||||
"log",
|
|
||||||
"peeking_take_while",
|
"peeking_take_while",
|
||||||
"prettyplease",
|
"prettyplease",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
|
@ -881,7 +880,6 @@ dependencies = [
|
||||||
"rustc-hash",
|
"rustc-hash",
|
||||||
"shlex",
|
"shlex",
|
||||||
"syn 2.0.58",
|
"syn 2.0.58",
|
||||||
"which",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1782,9 +1780,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "echodb"
|
name = "echodb"
|
||||||
version = "0.6.0"
|
version = "0.7.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1ac31e38aeac770dd01b9d6c9ab2a6d7f025815f71105911cf6de073a5db8ee1"
|
checksum = "1d1eccc44ff21b80ca7e883ff57423a12610965a33637d5d0bef4adebcd81749"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"imbl",
|
"imbl",
|
||||||
|
@ -2082,9 +2080,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "foundationdb"
|
name = "foundationdb"
|
||||||
version = "0.8.0"
|
version = "0.9.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8696fd1be198f101eb58aeecf0f504fc02b28c7afcc008b4e4a998a91b305108"
|
checksum = "020bf4ae7238dbdb1ff01e9f981db028515cf66883c461e29faedfea130b2728"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-recursion 1.1.0",
|
"async-recursion 1.1.0",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -2102,18 +2100,18 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "foundationdb-gen"
|
name = "foundationdb-gen"
|
||||||
version = "0.8.0"
|
version = "0.9.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "62239700f01b041b6372aaeb847c52f960e1a69fd2b1025dc995ea3dd90e3308"
|
checksum = "36878d54a76a48e794d0fe89be2096ab5968b071e7ec25f7becfe7846f55fa77"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"xml-rs",
|
"xml-rs",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "foundationdb-macros"
|
name = "foundationdb-macros"
|
||||||
version = "0.2.0"
|
version = "0.3.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "83c8d52fe8b46ab822b4decdcc0d6d85aeedfc98f0d52ba2bd4aec4a97807516"
|
checksum = "f8db6653cbc621a3810d95d55bd342be3e71181d6df21a4eb29ef986202d3f9c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
@ -2123,11 +2121,12 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "foundationdb-sys"
|
name = "foundationdb-sys"
|
||||||
version = "0.8.0"
|
version = "0.9.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "98e49545f5393d276b7b888c77e3f9519fd33727435f8244344be72c3284256f"
|
checksum = "ace2f49db8614b7d7e3b656a12e0059b5fbd0a4da3410b1797374bec3db269fa"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bindgen 0.65.1",
|
"bindgen 0.69.4",
|
||||||
|
"libc",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -2912,9 +2911,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "indxdb"
|
name = "indxdb"
|
||||||
version = "0.4.0"
|
version = "0.5.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1de97697bf90e30042ea4ae3260a976253e0bb1703fa339541bcc047cc994180"
|
checksum = "817e28ebe3466175be7e66f4eadfb9e6a221537db2f78b6be04e14b7051a56af"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js-sys",
|
"js-sys",
|
||||||
"rexie",
|
"rexie",
|
||||||
|
@ -5432,9 +5431,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_bytes"
|
name = "serde_bytes"
|
||||||
version = "0.11.14"
|
version = "0.11.15"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734"
|
checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
|
@ -6116,9 +6115,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "surrealdb-tikv-client"
|
name = "surrealdb-tikv-client"
|
||||||
version = "0.2.0-surreal.2"
|
version = "0.3.0-surreal.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b79f921871d6ed67c970e8499b4aca3724115c189f99ab30f51b46c77bd19819"
|
checksum = "f9e204e84239374e8ba2dfabb88f5ac20f69baa09599eee225958445fb7e0a14"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-recursion 0.3.2",
|
"async-recursion 0.3.2",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -6130,15 +6129,17 @@ dependencies = [
|
||||||
"log",
|
"log",
|
||||||
"pin-project",
|
"pin-project",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
"prost 0.11.9",
|
"prost 0.12.3",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"regex",
|
"regex",
|
||||||
"semver",
|
"semver",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_derive",
|
"serde_derive",
|
||||||
|
"serde_json",
|
||||||
|
"take_mut",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tonic 0.9.2",
|
"tonic 0.10.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -6271,6 +6272,12 @@ dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "take_mut"
|
||||||
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tap"
|
name = "tap"
|
||||||
version = "1.0.1"
|
version = "1.0.1"
|
||||||
|
@ -6387,18 +6394,18 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror"
|
name = "thiserror"
|
||||||
version = "1.0.58"
|
version = "1.0.61"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
|
checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"thiserror-impl",
|
"thiserror-impl",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror-impl"
|
name = "thiserror-impl"
|
||||||
version = "1.0.58"
|
version = "1.0.61"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
|
checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
@ -6482,9 +6489,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio"
|
name = "tokio"
|
||||||
version = "1.37.0"
|
version = "1.38.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
|
checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"backtrace",
|
"backtrace",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
@ -6511,9 +6518,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-macros"
|
name = "tokio-macros"
|
||||||
version = "2.2.0"
|
version = "2.3.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
|
checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
@ -6673,17 +6680,15 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tonic"
|
name = "tonic"
|
||||||
version = "0.9.2"
|
version = "0.10.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a"
|
checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"axum 0.6.20",
|
"axum 0.6.20",
|
||||||
"base64 0.21.7",
|
"base64 0.21.7",
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-core",
|
|
||||||
"futures-util",
|
|
||||||
"h2",
|
"h2",
|
||||||
"http 0.2.12",
|
"http 0.2.12",
|
||||||
"http-body 0.4.6",
|
"http-body 0.4.6",
|
||||||
|
@ -6691,7 +6696,8 @@ dependencies = [
|
||||||
"hyper-timeout",
|
"hyper-timeout",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"pin-project",
|
"pin-project",
|
||||||
"prost 0.11.9",
|
"prost 0.12.3",
|
||||||
|
"rustls 0.21.11",
|
||||||
"rustls-pemfile",
|
"rustls-pemfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-rustls",
|
"tokio-rustls",
|
||||||
|
|
10
Cargo.toml
10
Cargo.toml
|
@ -8,11 +8,11 @@ authors = ["Tobie Morgan Hitchcock <tobie@surrealdb.com>"]
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
# Public features
|
# Public features
|
||||||
default = ["storage-mem", "storage-rocksdb", "scripting", "http"]
|
default = ["storage-mem", "storage-surrealkv", "storage-rocksdb", "scripting", "http"]
|
||||||
storage-mem = ["surrealdb/kv-mem"]
|
storage-mem = ["surrealdb/kv-mem"]
|
||||||
storage-rocksdb = ["surrealdb/kv-rocksdb"]
|
storage-rocksdb = ["surrealdb/kv-rocksdb"]
|
||||||
storage-tikv = ["surrealdb/kv-tikv"]
|
storage-tikv = ["surrealdb/kv-tikv"]
|
||||||
storage-fdb = ["surrealdb/kv-fdb-7_1"]
|
storage-fdb = ["surrealdb/kv-fdb"]
|
||||||
storage-surrealkv = ["surrealdb/kv-surrealkv"]
|
storage-surrealkv = ["surrealdb/kv-surrealkv"]
|
||||||
scripting = ["surrealdb/scripting"]
|
scripting = ["surrealdb/scripting"]
|
||||||
http = ["surrealdb/http"]
|
http = ["surrealdb/http"]
|
||||||
|
@ -20,6 +20,9 @@ http-compression = []
|
||||||
ml = ["surrealdb/ml"]
|
ml = ["surrealdb/ml"]
|
||||||
jwks = ["surrealdb/jwks"]
|
jwks = ["surrealdb/jwks"]
|
||||||
performance-profiler = ["dep:pprof"]
|
performance-profiler = ["dep:pprof"]
|
||||||
|
# Special features
|
||||||
|
storage-fdb-7_1 = ["surrealdb/kv-fdb-7_1"]
|
||||||
|
storage-fdb-7_3 = ["surrealdb/kv-fdb-7_3"]
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
|
@ -30,6 +33,9 @@ members = [
|
||||||
"lib/examples/rocket",
|
"lib/examples/rocket",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[profile.make]
|
||||||
|
inherits = "dev"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
lto = true
|
lto = true
|
||||||
strip = true
|
strip = true
|
||||||
|
|
8
Makefile
8
Makefile
|
@ -45,10 +45,10 @@ serve: check-deps
|
||||||
sql: check-deps
|
sql: check-deps
|
||||||
cargo make sql
|
cargo make sql
|
||||||
|
|
||||||
.PHONY: quick
|
|
||||||
quick: check-deps
|
|
||||||
cargo make quick
|
|
||||||
|
|
||||||
.PHONY: build
|
.PHONY: build
|
||||||
build: check-deps
|
build: check-deps
|
||||||
cargo make build
|
cargo make build
|
||||||
|
|
||||||
|
.PHONY: release
|
||||||
|
release: check-deps
|
||||||
|
cargo make release
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tasks.ci-format]
|
[tasks.ci-format]
|
||||||
category = "CI - CHECK"
|
category = "CI - CHECK"
|
||||||
dependencies = ["cargo-fmt", "cargo-fmt-unlinked"]
|
dependencies = ["cargo-fmt"]
|
||||||
|
|
||||||
[tasks.ci-check]
|
[tasks.ci-check]
|
||||||
category = "CI - CHECK"
|
category = "CI - CHECK"
|
||||||
|
@ -15,7 +15,7 @@ args = ["check", "--locked", "--package", "surrealdb", "--features", "protocol-w
|
||||||
[tasks.ci-clippy]
|
[tasks.ci-clippy]
|
||||||
category = "CI - CHECK"
|
category = "CI - CHECK"
|
||||||
command = "cargo"
|
command = "cargo"
|
||||||
args = ["clippy", "--all-targets", "--features", "storage-mem,storage-rocksdb,storage-tikv,storage-fdb,scripting,http,jwks", "--tests", "--benches", "--examples", "--bins", "--", "-D", "warnings"]
|
args = ["clippy", "--all-targets", "--features", "storage-mem,storage-surrealkv,storage-rocksdb,storage-tikv,storage-fdb,scripting,http,jwks,ml,storage-fdb-7_1", "--tests", "--benches", "--examples", "--bins", "--", "-D", "warnings"]
|
||||||
|
|
||||||
#
|
#
|
||||||
# Integration Tests
|
# Integration Tests
|
||||||
|
@ -151,11 +151,6 @@ category = "CI - INTEGRATION TESTS"
|
||||||
env = { _TEST_API_ENGINE = "rocksdb", _TEST_FEATURES = "kv-rocksdb" }
|
env = { _TEST_API_ENGINE = "rocksdb", _TEST_FEATURES = "kv-rocksdb" }
|
||||||
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = true }
|
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = true }
|
||||||
|
|
||||||
[tasks.ci-api-integration-fdb]
|
|
||||||
category = "CI - INTEGRATION TESTS"
|
|
||||||
env = { _TEST_API_ENGINE = "fdb", _TEST_FEATURES = "kv-fdb-7_1" }
|
|
||||||
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = false }
|
|
||||||
|
|
||||||
[tasks.ci-api-integration-surrealkv]
|
[tasks.ci-api-integration-surrealkv]
|
||||||
category = "CI - INTEGRATION TESTS"
|
category = "CI - INTEGRATION TESTS"
|
||||||
env = { _TEST_API_ENGINE = "surrealkv", _TEST_FEATURES = "kv-surrealkv" }
|
env = { _TEST_API_ENGINE = "surrealkv", _TEST_FEATURES = "kv-surrealkv" }
|
||||||
|
@ -166,6 +161,16 @@ category = "CI - INTEGRATION TESTS"
|
||||||
env = { _TEST_API_ENGINE = "tikv", _TEST_FEATURES = "kv-tikv" }
|
env = { _TEST_API_ENGINE = "tikv", _TEST_FEATURES = "kv-tikv" }
|
||||||
run_task = { name = ["start-tikv", "test-kvs", "test-api-integration", "stop-tikv"], fork = true, parallel = false }
|
run_task = { name = ["start-tikv", "test-kvs", "test-api-integration", "stop-tikv"], fork = true, parallel = false }
|
||||||
|
|
||||||
|
[tasks.ci-api-integration-fdb-7_1]
|
||||||
|
category = "CI - INTEGRATION TESTS"
|
||||||
|
env = { _TEST_API_ENGINE = "fdb", _TEST_FEATURES = "kv-fdb,kv-fdb-7_1" }
|
||||||
|
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = false }
|
||||||
|
|
||||||
|
[tasks.ci-api-integration-fdb-7_3]
|
||||||
|
category = "CI - INTEGRATION TESTS"
|
||||||
|
env = { _TEST_API_ENGINE = "fdb", _TEST_FEATURES = "kv-fdb,kv-fdb-7_3" }
|
||||||
|
run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = false }
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Services
|
# Services
|
||||||
|
@ -283,7 +288,7 @@ BENCH_WORKER_THREADS = { value = "1", condition = { env_not_set = ["BENCH_WORKER
|
||||||
BENCH_NUM_OPS = { value = "1000", condition = { env_not_set = ["BENCH_NUM_OPS"] } }
|
BENCH_NUM_OPS = { value = "1000", condition = { env_not_set = ["BENCH_NUM_OPS"] } }
|
||||||
BENCH_DURATION = { value = "30", condition = { env_not_set = ["BENCH_DURATION"] } }
|
BENCH_DURATION = { value = "30", condition = { env_not_set = ["BENCH_DURATION"] } }
|
||||||
BENCH_SAMPLE_SIZE = { value = "10", condition = { env_not_set = ["BENCH_SAMPLE_SIZE"] } }
|
BENCH_SAMPLE_SIZE = { value = "10", condition = { env_not_set = ["BENCH_SAMPLE_SIZE"] } }
|
||||||
BENCH_FEATURES = { value = "protocol-ws,kv-mem,kv-rocksdb,kv-fdb-7_1,kv-surrealkv", condition = { env_not_set = ["BENCH_FEATURES"] } }
|
BENCH_FEATURES = { value = "protocol-ws,kv-mem,kv-rocksdb,kv-surrealkv", condition = { env_not_set = ["BENCH_FEATURES"] } }
|
||||||
|
|
||||||
[tasks.bench-target]
|
[tasks.bench-target]
|
||||||
private = true
|
private = true
|
||||||
|
@ -301,11 +306,6 @@ category = "CI - BENCHMARK - SurrealDB Target"
|
||||||
env = { BENCH_DATASTORE_TARGET = "lib-rocksdb" }
|
env = { BENCH_DATASTORE_TARGET = "lib-rocksdb" }
|
||||||
run_task = { name = ["bench-target"] }
|
run_task = { name = ["bench-target"] }
|
||||||
|
|
||||||
[tasks.bench-lib-fdb]
|
|
||||||
category = "CI - BENCHMARK - SurrealDB Target"
|
|
||||||
env = { BENCH_DATASTORE_TARGET = "lib-fdb" }
|
|
||||||
run_task = { name = ["bench-target"] }
|
|
||||||
|
|
||||||
[tasks.bench-sdk-mem]
|
[tasks.bench-sdk-mem]
|
||||||
category = "CI - BENCHMARK - SurrealDB Target"
|
category = "CI - BENCHMARK - SurrealDB Target"
|
||||||
env = { BENCH_DATASTORE_TARGET = "sdk-mem" }
|
env = { BENCH_DATASTORE_TARGET = "sdk-mem" }
|
||||||
|
@ -316,17 +316,12 @@ category = "CI - BENCHMARK - SurrealDB Target"
|
||||||
env = { BENCH_DATASTORE_TARGET = "sdk-rocksdb" }
|
env = { BENCH_DATASTORE_TARGET = "sdk-rocksdb" }
|
||||||
run_task = { name = ["bench-target"] }
|
run_task = { name = ["bench-target"] }
|
||||||
|
|
||||||
[tasks.bench-sdk-fdb]
|
[tasks.bench-lib-surrealkv]
|
||||||
category = "CI - BENCHMARK - SurrealDB Target"
|
category = "CI - BENCHMARK - SurrealDB Target"
|
||||||
env = { BENCH_DATASTORE_TARGET = "sdk-fdb" }
|
env = { BENCH_DATASTORE_TARGET = "lib-surrealkv" }
|
||||||
run_task = { name = ["bench-target"] }
|
run_task = { name = ["bench-target"] }
|
||||||
|
|
||||||
[tasks.bench-sdk-ws]
|
[tasks.bench-sdk-ws]
|
||||||
category = "CI - BENCHMARK - SurrealDB Target"
|
category = "CI - BENCHMARK - SurrealDB Target"
|
||||||
env = { BENCH_DATASTORE_TARGET = "sdk-ws" }
|
env = { BENCH_DATASTORE_TARGET = "sdk-ws" }
|
||||||
run_task = { name = ["bench-target"] }
|
run_task = { name = ["bench-target"] }
|
||||||
|
|
||||||
[tasks.bench-lib-surrealkv]
|
|
||||||
category = "CI - BENCHMARK - SurrealDB Target"
|
|
||||||
env = { BENCH_DATASTORE_TARGET = "lib-surrealkv" }
|
|
||||||
run_task = { name = ["bench-target"] }
|
|
||||||
|
|
|
@ -24,35 +24,29 @@ args = ["doc", "--open", "--no-deps", "--package", "surrealdb", "--features", "r
|
||||||
category = "LOCAL USAGE"
|
category = "LOCAL USAGE"
|
||||||
command = "cargo"
|
command = "cargo"
|
||||||
env = { RUST_MIN_STACK={ value = "4194304", condition = { env_not_set = ["RUST_MIN_STACK"] } } }
|
env = { RUST_MIN_STACK={ value = "4194304", condition = { env_not_set = ["RUST_MIN_STACK"] } } }
|
||||||
args = ["test", "--workspace", "--no-fail-fast"]
|
args = ["test", "--profile", "make", "--workspace", "--no-fail-fast"]
|
||||||
|
|
||||||
# Check
|
|
||||||
[tasks.cargo-check]
|
|
||||||
category = "LOCAL USAGE"
|
|
||||||
command = "cargo"
|
|
||||||
args = ["check", "--workspace", "--features", "${DEV_FEATURES}"]
|
|
||||||
|
|
||||||
|
# Format
|
||||||
[tasks.cargo-fmt]
|
[tasks.cargo-fmt]
|
||||||
category = "LOCAL USAGE"
|
category = "LOCAL USAGE"
|
||||||
command = "cargo"
|
command = "cargo"
|
||||||
args = ["fmt", "--all", "--check"]
|
args = ["fmt", "--all", "--check"]
|
||||||
|
|
||||||
[tasks.cargo-fmt-unlinked]
|
# Check
|
||||||
|
[tasks.cargo-check]
|
||||||
category = "LOCAL USAGE"
|
category = "LOCAL USAGE"
|
||||||
script = """
|
command = "cargo"
|
||||||
set -e
|
args = ["check", "--profile", "make", "--workspace", "--all-targets", "--features", "${ALL_FEATURES}"]
|
||||||
cd ${CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}/
|
|
||||||
cargo fmt --all --check -- ./lib/tests/**/*.rs ./core/src/kvs/tests/*.rs
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
# Clippy
|
||||||
[tasks.cargo-clippy]
|
[tasks.cargo-clippy]
|
||||||
category = "LOCAL USAGE"
|
category = "LOCAL USAGE"
|
||||||
command = "cargo"
|
command = "cargo"
|
||||||
args = ["clippy", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
args = ["clippy", "--profile", "make", "--workspace", "--all-targets", "--features", "${ALL_FEATURES}", "--", "-D", "warnings"]
|
||||||
|
|
||||||
[tasks.check]
|
[tasks.check]
|
||||||
category = "LOCAL USAGE"
|
category = "LOCAL USAGE"
|
||||||
dependencies = ["cargo-check", "cargo-fmt", "cargo-fmt-unlinked", "cargo-clippy"]
|
dependencies = ["cargo-fmt", "cargo-check", "cargo-clippy"]
|
||||||
|
|
||||||
[tasks.check-wasm]
|
[tasks.check-wasm]
|
||||||
category = "LOCAL USAGE"
|
category = "LOCAL USAGE"
|
||||||
|
@ -74,30 +68,30 @@ args = ["bench", "--package", "surrealdb", "--no-default-features", "--features"
|
||||||
[tasks.run]
|
[tasks.run]
|
||||||
category = "LOCAL USAGE"
|
category = "LOCAL USAGE"
|
||||||
command = "cargo"
|
command = "cargo"
|
||||||
args = ["run", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "${@}"]
|
args = ["run", "--profile", "make", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "${@}"]
|
||||||
|
|
||||||
# Serve
|
# Serve
|
||||||
[tasks.serve]
|
[tasks.serve]
|
||||||
category = "LOCAL USAGE"
|
category = "LOCAL USAGE"
|
||||||
command = "cargo"
|
command = "cargo"
|
||||||
args = ["run", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "start", "--allow-all", "${@}"]
|
args = ["run", "--profile", "make", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "start", "--allow-all", "${@}"]
|
||||||
|
|
||||||
# SQL
|
# SQL
|
||||||
[tasks.sql]
|
[tasks.sql]
|
||||||
category = "LOCAL USAGE"
|
category = "LOCAL USAGE"
|
||||||
command = "cargo"
|
command = "cargo"
|
||||||
args = ["run", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "sql", "--pretty", "${@}"]
|
args = ["run", "--profile", "make", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "sql", "--pretty", "${@}"]
|
||||||
|
|
||||||
# Quick
|
|
||||||
[tasks.quick]
|
|
||||||
category = "LOCAL USAGE"
|
|
||||||
command = "cargo"
|
|
||||||
args = ["build", "${@}"]
|
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
[tasks.build]
|
[tasks.build]
|
||||||
category = "LOCAL USAGE"
|
category = "LOCAL USAGE"
|
||||||
command = "cargo"
|
command = "cargo"
|
||||||
|
args = ["build", "--profile", "make", "${@}"]
|
||||||
|
|
||||||
|
# Release
|
||||||
|
[tasks.release]
|
||||||
|
category = "LOCAL USAGE"
|
||||||
|
command = "cargo"
|
||||||
args = ["build", "--release", "${@}"]
|
args = ["build", "--release", "${@}"]
|
||||||
|
|
||||||
# Default
|
# Default
|
||||||
|
|
|
@ -10,8 +10,9 @@ reduce_output = true
|
||||||
default_to_workspace = false
|
default_to_workspace = false
|
||||||
|
|
||||||
[env]
|
[env]
|
||||||
DEV_FEATURES={ value = "storage-mem,scripting,http,ml,jwks", condition = { env_not_set = ["DEV_FEATURES"] } }
|
ALL_FEATURES={ value = "storage-mem,storage-surrealkv,storage-rocksdb,storage-tikv,storage-fdb,scripting,http,jwks,ml,storage-fdb-7_1", condition = { env_not_set = ["ALL_FEATURES"] } }
|
||||||
SURREAL_LOG={ value = "trace", condition = { env_not_set = ["SURREAL_LOG"] } }
|
DEV_FEATURES={ value = "storage-mem,storage-surrealkv,scripting,http,jwks,ml", condition = { env_not_set = ["DEV_FEATURES"] } }
|
||||||
|
SURREAL_LOG={ value = "full", condition = { env_not_set = ["SURREAL_LOG"] } }
|
||||||
SURREAL_USER={ value = "root", condition = { env_not_set = ["SURREAL_USER"] } }
|
SURREAL_USER={ value = "root", condition = { env_not_set = ["SURREAL_USER"] } }
|
||||||
SURREAL_PASS={ value = "root", condition = { env_not_set = ["SURREAL_PASS"] } }
|
SURREAL_PASS={ value = "root", condition = { env_not_set = ["SURREAL_PASS"] } }
|
||||||
SURREAL_PATH={ value = "memory", condition = { env_not_set = ["SURREAL_PATH"] } }
|
SURREAL_PATH={ value = "memory", condition = { env_not_set = ["SURREAL_PATH"] } }
|
||||||
|
|
40
cackle.toml
40
cackle.toml
|
@ -20,6 +20,10 @@ include = [
|
||||||
"rustix::fs",
|
"rustix::fs",
|
||||||
"tokio::fs",
|
"tokio::fs",
|
||||||
]
|
]
|
||||||
|
exclude = [
|
||||||
|
"std::path::Path",
|
||||||
|
"std::path::PathBuf",
|
||||||
|
]
|
||||||
|
|
||||||
[api.net]
|
[api.net]
|
||||||
include = [
|
include = [
|
||||||
|
@ -30,10 +34,14 @@ include = [
|
||||||
"surreal::net",
|
"surreal::net",
|
||||||
"surrealdb",
|
"surrealdb",
|
||||||
"surrealdb_core",
|
"surrealdb_core",
|
||||||
|
"surrealkv",
|
||||||
"tokio::net",
|
"tokio::net",
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing_core",
|
"tracing_core",
|
||||||
]
|
]
|
||||||
|
exclude = [
|
||||||
|
"hashbrown::map",
|
||||||
|
]
|
||||||
|
|
||||||
#
|
#
|
||||||
# Crates Linking to Libraries
|
# Crates Linking to Libraries
|
||||||
|
@ -308,10 +316,12 @@ build.allow_apis = [
|
||||||
"process",
|
"process",
|
||||||
]
|
]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
|
allow_apis = [
|
||||||
|
"fs",
|
||||||
|
]
|
||||||
|
|
||||||
[pkg.proc-macro2]
|
[pkg.proc-macro2]
|
||||||
build.allow_apis = [
|
build.allow_apis = [
|
||||||
"fs",
|
|
||||||
"process",
|
"process",
|
||||||
]
|
]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
|
@ -435,6 +445,9 @@ allow_unsafe = true
|
||||||
build.allow_apis = [
|
build.allow_apis = [
|
||||||
"process",
|
"process",
|
||||||
]
|
]
|
||||||
|
build.allow_build_instructions = [
|
||||||
|
"cargo:rustc-check-cfg=*",
|
||||||
|
]
|
||||||
allow_apis = [
|
allow_apis = [
|
||||||
"fs",
|
"fs",
|
||||||
]
|
]
|
||||||
|
@ -617,9 +630,6 @@ build.allow_build_instructions = [
|
||||||
|
|
||||||
[pkg.dirs-sys-next]
|
[pkg.dirs-sys-next]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
from.build.allow_apis = [
|
|
||||||
"fs",
|
|
||||||
]
|
|
||||||
|
|
||||||
[pkg.crunchy]
|
[pkg.crunchy]
|
||||||
build.allow_apis = [
|
build.allow_apis = [
|
||||||
|
@ -637,7 +647,6 @@ allow_unsafe = true
|
||||||
|
|
||||||
[pkg.anyhow]
|
[pkg.anyhow]
|
||||||
build.allow_apis = [
|
build.allow_apis = [
|
||||||
"fs",
|
|
||||||
"process",
|
"process",
|
||||||
]
|
]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
|
@ -742,9 +751,6 @@ allow_unsafe = true
|
||||||
|
|
||||||
[pkg.dashmap]
|
[pkg.dashmap]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
allow_apis = [
|
|
||||||
"net",
|
|
||||||
]
|
|
||||||
|
|
||||||
[pkg.tokio-stream]
|
[pkg.tokio-stream]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
|
@ -762,9 +768,6 @@ allow_apis = [
|
||||||
"fs",
|
"fs",
|
||||||
]
|
]
|
||||||
|
|
||||||
[pkg.atomic-waker]
|
|
||||||
allow_unsafe = true
|
|
||||||
|
|
||||||
[pkg.doc-comment]
|
[pkg.doc-comment]
|
||||||
build.allow_apis = [
|
build.allow_apis = [
|
||||||
"process",
|
"process",
|
||||||
|
@ -986,6 +989,9 @@ allow_unsafe = true
|
||||||
[pkg.crossbeam-deque]
|
[pkg.crossbeam-deque]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
|
|
||||||
|
[pkg.crossbeam-queue]
|
||||||
|
allow_unsafe = true
|
||||||
|
|
||||||
[pkg.anstream]
|
[pkg.anstream]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
|
|
||||||
|
@ -1056,9 +1062,6 @@ allow_unsafe = true
|
||||||
[pkg.argon2]
|
[pkg.argon2]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
|
|
||||||
[pkg.futures-concurrency]
|
|
||||||
allow_unsafe = true
|
|
||||||
|
|
||||||
[pkg.quick_cache]
|
[pkg.quick_cache]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
allow_apis = [
|
allow_apis = [
|
||||||
|
@ -1211,6 +1214,7 @@ allow_apis = [
|
||||||
|
|
||||||
[pkg.axum-server]
|
[pkg.axum-server]
|
||||||
allow_apis = [
|
allow_apis = [
|
||||||
|
"fs",
|
||||||
"net",
|
"net",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -1353,3 +1357,11 @@ allow_unsafe = true
|
||||||
|
|
||||||
[pkg.tendril]
|
[pkg.tendril]
|
||||||
allow_unsafe = true
|
allow_unsafe = true
|
||||||
|
|
||||||
|
[pkg.lru]
|
||||||
|
allow_unsafe = true
|
||||||
|
|
||||||
|
[pkg.surrealkv]
|
||||||
|
allow_apis = [
|
||||||
|
"fs",
|
||||||
|
]
|
||||||
|
|
|
@ -27,15 +27,8 @@ default = ["kv-mem"]
|
||||||
kv-mem = ["dep:echodb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
|
kv-mem = ["dep:echodb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
|
||||||
kv-indxdb = ["dep:indxdb"]
|
kv-indxdb = ["dep:indxdb"]
|
||||||
kv-rocksdb = ["dep:rocksdb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
|
kv-rocksdb = ["dep:rocksdb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
|
||||||
kv-tikv = ["dep:tikv", "dep:tempfile", "dep:ext-sort"]
|
kv-tikv = ["dep:tikv", "tokio/time", "dep:tempfile", "dep:ext-sort"]
|
||||||
kv-fdb-5_1 = ["foundationdb/fdb-5_1", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
|
kv-fdb = ["dep:foundationdb", "tokio/time", "dep:tempfile", "dep:ext-sort"]
|
||||||
kv-fdb-5_2 = ["foundationdb/fdb-5_2", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
|
|
||||||
kv-fdb-6_0 = ["foundationdb/fdb-6_0", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
|
|
||||||
kv-fdb-6_1 = ["foundationdb/fdb-6_1", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
|
|
||||||
kv-fdb-6_2 = ["foundationdb/fdb-6_2", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
|
|
||||||
kv-fdb-6_3 = ["foundationdb/fdb-6_3", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
|
|
||||||
kv-fdb-7_0 = ["foundationdb/fdb-7_0", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
|
|
||||||
kv-fdb-7_1 = ["foundationdb/fdb-7_1", "kv-fdb", "dep:tempfile", "dep:ext-sort"]
|
|
||||||
kv-surrealkv = ["dep:surrealkv", "tokio/time", "dep:tempfile", "dep:ext-sort"]
|
kv-surrealkv = ["dep:surrealkv", "tokio/time", "dep:tempfile", "dep:ext-sort"]
|
||||||
scripting = ["dep:js"]
|
scripting = ["dep:js"]
|
||||||
http = ["dep:reqwest"]
|
http = ["dep:reqwest"]
|
||||||
|
@ -48,8 +41,9 @@ arbitrary = [
|
||||||
"geo-types/arbitrary",
|
"geo-types/arbitrary",
|
||||||
"uuid/arbitrary",
|
"uuid/arbitrary",
|
||||||
]
|
]
|
||||||
# Private features
|
# Special features
|
||||||
kv-fdb = ["tokio/time"]
|
kv-fdb-7_1 = ["foundationdb/fdb-7_1"]
|
||||||
|
kv-fdb-7_3 = ["foundationdb/fdb-7_3"]
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
@ -76,10 +70,10 @@ dashmap = "5.5.3"
|
||||||
derive = { version = "0.12.0", package = "surrealdb-derive" }
|
derive = { version = "0.12.0", package = "surrealdb-derive" }
|
||||||
deunicode = "1.4.1"
|
deunicode = "1.4.1"
|
||||||
dmp = "0.2.0"
|
dmp = "0.2.0"
|
||||||
echodb = { version = "0.6.0", optional = true }
|
echodb = { version = "0.7.0", optional = true }
|
||||||
executor = { version = "1.8.0", package = "async-executor" }
|
executor = { version = "1.8.0", package = "async-executor" }
|
||||||
ext-sort = { version = "^0.1.4", optional = true }
|
ext-sort = { version = "^0.1.4", optional = true }
|
||||||
foundationdb = { version = "0.8.0", default-features = false, features = [
|
foundationdb = { version = "0.9.0", default-features = false, features = [
|
||||||
"embedded-fdb-include",
|
"embedded-fdb-include",
|
||||||
], optional = true }
|
], optional = true }
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
|
@ -89,7 +83,7 @@ geo = { version = "0.27.0", features = ["use-serde"] }
|
||||||
geo-types = { version = "0.7.12", features = ["arbitrary"] }
|
geo-types = { version = "0.7.12", features = ["arbitrary"] }
|
||||||
hashbrown = { version = "0.14.5", features = ["serde"] }
|
hashbrown = { version = "0.14.5", features = ["serde"] }
|
||||||
hex = { version = "0.4.3" }
|
hex = { version = "0.4.3" }
|
||||||
indxdb = { version = "0.4.0", optional = true }
|
indxdb = { version = "0.5.0", optional = true }
|
||||||
ipnet = "2.9.0"
|
ipnet = "2.9.0"
|
||||||
js = { version = "0.6.2", package = "rquickjs", features = [
|
js = { version = "0.6.2", package = "rquickjs", features = [
|
||||||
"array-buffer",
|
"array-buffer",
|
||||||
|
@ -146,7 +140,7 @@ surrealkv = { version = "0.3.0", optional = true }
|
||||||
surrealml = { version = "0.1.1", optional = true, package = "surrealml-core" }
|
surrealml = { version = "0.1.1", optional = true, package = "surrealml-core" }
|
||||||
tempfile = { version = "3.10.1", optional = true }
|
tempfile = { version = "3.10.1", optional = true }
|
||||||
thiserror = "1.0.50"
|
thiserror = "1.0.50"
|
||||||
tikv = { version = "0.2.0-surreal.2", default-features = false, package = "surrealdb-tikv-client", optional = true }
|
tikv = { version = "0.3.0-surreal.1", default-features = false, package = "surrealdb-tikv-client", optional = true }
|
||||||
tracing = "0.1.40"
|
tracing = "0.1.40"
|
||||||
trice = "0.4.0"
|
trice = "0.4.0"
|
||||||
ulid = { version = "1.1.0", features = ["serde"] }
|
ulid = { version = "1.1.0", features = ["serde"] }
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use crate::err::Error;
|
use crate::err::Error;
|
||||||
use crate::key::change;
|
use crate::key::change;
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
use crate::key::debug::sprint_key;
|
use crate::key::debug::sprint;
|
||||||
use crate::kvs::Transaction;
|
use crate::kvs::Transaction;
|
||||||
use crate::vs;
|
use crate::vs;
|
||||||
use crate::vs::Versionstamp;
|
use crate::vs::Versionstamp;
|
||||||
|
@ -9,42 +9,36 @@ use std::str;
|
||||||
|
|
||||||
// gc_all_at deletes all change feed entries that become stale at the given timestamp.
|
// gc_all_at deletes all change feed entries that become stale at the given timestamp.
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub async fn gc_all_at(tx: &mut Transaction, ts: u64, limit: Option<u32>) -> Result<(), Error> {
|
pub async fn gc_all_at(tx: &Transaction, ts: u64) -> Result<(), Error> {
|
||||||
let nses = tx.all_ns().await?;
|
// Fetch all namespaces
|
||||||
let nses = nses.as_ref();
|
let nss = tx.all_ns().await?;
|
||||||
for ns in nses {
|
// Loop over each namespace
|
||||||
gc_ns(tx, ns.name.as_str(), limit, ts).await?;
|
for ns in nss.as_ref() {
|
||||||
|
// Trace for debugging
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
trace!("Performing garbage collection on {ns} for timestamp {ts}");
|
||||||
|
// Process the namespace
|
||||||
|
gc_ns(tx, ts, ns.name.as_str()).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// gc_ns deletes all change feed entries in the given namespace that are older than the given watermark.
|
// gc_ns deletes all change feed entries in the given namespace that are older than the given watermark.
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub async fn gc_ns(
|
pub async fn gc_ns(tx: &Transaction, ts: u64, ns: &str) -> Result<(), Error> {
|
||||||
tx: &mut Transaction,
|
// Fetch all databases
|
||||||
ns: &str,
|
|
||||||
limit: Option<u32>,
|
|
||||||
ts: u64,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let dbs = tx.all_db(ns).await?;
|
let dbs = tx.all_db(ns).await?;
|
||||||
let dbs = dbs.as_ref();
|
// Loop over each database
|
||||||
for db in dbs {
|
for db in dbs.as_ref() {
|
||||||
// We get the expiration of the change feed defined on the database
|
// Trace for debugging
|
||||||
let db_cf_expiry = match &db.changefeed {
|
|
||||||
None => 0,
|
|
||||||
Some(cf) => cf.expiry.as_secs(),
|
|
||||||
};
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
trace!(
|
trace!("Performing garbage collection on {ns}:{db} for timestamp {ts}");
|
||||||
"Performing garbage collection on ns {} db {} for ts {}. The cf expiration is {}",
|
// Fetch all tables
|
||||||
ns,
|
let tbs = tx.all_tb(ns, &db.name).await?;
|
||||||
db.name,
|
// Get the database changefeed expiration
|
||||||
ts,
|
let db_cf_expiry = db.changefeed.map(|v| v.expiry.as_secs()).unwrap_or_default();
|
||||||
db_cf_expiry
|
// Get the maximum table changefeed expiration
|
||||||
);
|
let tb_cf_expiry = tbs.as_ref().iter().fold(0, |acc, tb| match &tb.changefeed {
|
||||||
let tbs = tx.all_tb(ns, db.name.as_str()).await?;
|
|
||||||
let tbs = tbs.as_ref();
|
|
||||||
let max_tb_cf_expiry = tbs.iter().fold(0, |acc, tb| match &tb.changefeed {
|
|
||||||
None => acc,
|
None => acc,
|
||||||
Some(cf) => {
|
Some(cf) => {
|
||||||
if cf.expiry.is_zero() {
|
if cf.expiry.is_zero() {
|
||||||
|
@ -54,46 +48,47 @@ pub async fn gc_ns(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
let cf_expiry = db_cf_expiry.max(max_tb_cf_expiry);
|
// Calculate the maximum changefeed expiration
|
||||||
|
let cf_expiry = db_cf_expiry.max(tb_cf_expiry);
|
||||||
|
// Ignore this database if the expiry is greater
|
||||||
if ts < cf_expiry {
|
if ts < cf_expiry {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// We only want to retain the expiry window, so we are going to delete everything before
|
// Calculate the watermark expiry window
|
||||||
let watermark_ts = ts - cf_expiry;
|
let watermark_ts = ts - cf_expiry;
|
||||||
#[cfg(debug_assertions)]
|
// Calculate the watermark versionstamp
|
||||||
trace!("The watermark is {} after removing {cf_expiry} from {ts}", watermark_ts);
|
let watermark_vs = tx
|
||||||
let watermark_vs =
|
.lock()
|
||||||
tx.get_versionstamp_from_timestamp(watermark_ts, ns, db.name.as_str(), true).await?;
|
.await
|
||||||
|
.get_versionstamp_from_timestamp(watermark_ts, ns, &db.name, true)
|
||||||
|
.await?;
|
||||||
|
// If a versionstamp exists, then garbage collect
|
||||||
if let Some(watermark_vs) = watermark_vs {
|
if let Some(watermark_vs) = watermark_vs {
|
||||||
gc_db(tx, ns, db.name.as_str(), watermark_vs, limit).await?;
|
gc_range(tx, ns, &db.name, watermark_vs).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// gc_db deletes all change feed entries in the given database that are older than the given watermark.
|
// gc_db deletes all change feed entries in the given database that are older than the given watermark.
|
||||||
pub async fn gc_db(
|
pub async fn gc_range(
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
ns: &str,
|
ns: &str,
|
||||||
db: &str,
|
db: &str,
|
||||||
watermark: Versionstamp,
|
watermark: Versionstamp,
|
||||||
limit: Option<u32>,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let beg: Vec<u8> = change::prefix_ts(ns, db, vs::u64_to_versionstamp(0));
|
// Calculate the range
|
||||||
|
let beg = change::prefix_ts(ns, db, vs::u64_to_versionstamp(0));
|
||||||
let end = change::prefix_ts(ns, db, watermark);
|
let end = change::prefix_ts(ns, db, watermark);
|
||||||
|
// Trace for debugging
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
trace!(
|
trace!(
|
||||||
"DB GC: ns: {}, db: {}, watermark: {:?}, prefix: {}, end: {}",
|
"Performing garbage collection on {ns}:{db} for watermark {watermark:?}, between {} and {}",
|
||||||
ns,
|
sprint(&beg),
|
||||||
db,
|
sprint(&end)
|
||||||
watermark,
|
|
||||||
sprint_key(&beg),
|
|
||||||
sprint_key(&end)
|
|
||||||
);
|
);
|
||||||
|
// Delete the entire range in grouped batches
|
||||||
let limit = limit.unwrap_or(100);
|
tx.delr(beg..end).await?;
|
||||||
|
// Ok all good
|
||||||
tx.delr(beg..end, limit).await?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,8 @@ use crate::cf::{ChangeSet, DatabaseMutation, TableMutations};
|
||||||
use crate::err::Error;
|
use crate::err::Error;
|
||||||
use crate::key::change;
|
use crate::key::change;
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
use crate::key::debug::sprint_key;
|
use crate::key::debug::sprint;
|
||||||
use crate::kvs::{Limit, ScanPage, Transaction};
|
use crate::kvs::Transaction;
|
||||||
use crate::sql::statements::show::ShowSince;
|
use crate::sql::statements::show::ShowSince;
|
||||||
use crate::vs;
|
use crate::vs;
|
||||||
|
|
||||||
|
@ -16,18 +16,19 @@ use crate::vs;
|
||||||
// You can use this to read the change feed in chunks.
|
// You can use this to read the change feed in chunks.
|
||||||
// The second call would start from the last versionstamp + 1 of the first call.
|
// The second call would start from the last versionstamp + 1 of the first call.
|
||||||
pub async fn read(
|
pub async fn read(
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
ns: &str,
|
ns: &str,
|
||||||
db: &str,
|
db: &str,
|
||||||
tb: Option<&str>,
|
tb: Option<&str>,
|
||||||
start: ShowSince,
|
start: ShowSince,
|
||||||
limit: Option<u32>,
|
limit: Option<u32>,
|
||||||
) -> Result<Vec<ChangeSet>, Error> {
|
) -> Result<Vec<ChangeSet>, Error> {
|
||||||
|
// Calculate the start of the changefeed range
|
||||||
let beg = match start {
|
let beg = match start {
|
||||||
ShowSince::Versionstamp(x) => change::prefix_ts(ns, db, vs::u64_to_versionstamp(x)),
|
ShowSince::Versionstamp(x) => change::prefix_ts(ns, db, vs::u64_to_versionstamp(x)),
|
||||||
ShowSince::Timestamp(x) => {
|
ShowSince::Timestamp(x) => {
|
||||||
let ts = x.0.timestamp() as u64;
|
let ts = x.0.timestamp() as u64;
|
||||||
let vs = tx.get_versionstamp_from_timestamp(ts, ns, db, true).await?;
|
let vs = tx.lock().await.get_versionstamp_from_timestamp(ts, ns, db, true).await?;
|
||||||
match vs {
|
match vs {
|
||||||
Some(vs) => change::prefix_ts(ns, db, vs),
|
Some(vs) => change::prefix_ts(ns, db, vs),
|
||||||
None => {
|
None => {
|
||||||
|
@ -38,63 +39,49 @@ pub async fn read(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
// Calculate the end of the changefeed range
|
||||||
let end = change::suffix(ns, db);
|
let end = change::suffix(ns, db);
|
||||||
|
// Limit the changefeed results with a default
|
||||||
let limit = limit.unwrap_or(100);
|
let limit = limit.unwrap_or(100).min(1000);
|
||||||
|
// Create an empty buffer for the versionstamp
|
||||||
let scan = tx
|
|
||||||
.scan_paged(
|
|
||||||
ScanPage {
|
|
||||||
range: beg..end,
|
|
||||||
limit: Limit::Limited(limit),
|
|
||||||
},
|
|
||||||
limit,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut vs: Option<[u8; 10]> = None;
|
let mut vs: Option<[u8; 10]> = None;
|
||||||
|
// Create an empty buffer for the table mutations
|
||||||
let mut buf: Vec<TableMutations> = Vec::new();
|
let mut buf: Vec<TableMutations> = Vec::new();
|
||||||
|
// Create an empty buffer for the final changesets
|
||||||
let mut r = Vec::<ChangeSet>::new();
|
let mut res = Vec::<ChangeSet>::new();
|
||||||
// iterate over _x and put decoded elements to r
|
// iterate over _x and put decoded elements to r
|
||||||
for (k, v) in scan.values {
|
for (k, v) in tx.scan(beg..end, limit).await? {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
trace!("read change feed; {}", sprint_key(&k));
|
trace!("Reading change feed entry: {}", sprint(&k));
|
||||||
|
// Decode the changefeed entry key
|
||||||
let dec = crate::key::change::Cf::decode(&k).unwrap();
|
let dec = crate::key::change::Cf::decode(&k).unwrap();
|
||||||
|
// Check the change is for the desired table
|
||||||
if let Some(tb) = tb {
|
if tb.is_some_and(|tb| tb != dec.tb) {
|
||||||
if dec.tb != tb {
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let _tb = dec.tb;
|
|
||||||
let ts = dec.vs;
|
|
||||||
|
|
||||||
// Decode the byte array into a vector of operations
|
// Decode the byte array into a vector of operations
|
||||||
let tb_muts: TableMutations = v.into();
|
let tb_muts: TableMutations = v.into();
|
||||||
|
// Get the timestamp of the changefeed entry
|
||||||
match vs {
|
match vs {
|
||||||
Some(x) => {
|
Some(x) => {
|
||||||
if ts != x {
|
if dec.vs != x {
|
||||||
let db_mut = DatabaseMutation(buf);
|
let db_mut = DatabaseMutation(buf);
|
||||||
r.push(ChangeSet(x, db_mut));
|
res.push(ChangeSet(x, db_mut));
|
||||||
buf = Vec::new();
|
buf = Vec::new();
|
||||||
vs = Some(ts)
|
vs = Some(dec.vs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
vs = Some(ts);
|
vs = Some(dec.vs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buf.push(tb_muts);
|
buf.push(tb_muts);
|
||||||
}
|
}
|
||||||
|
// Collect all mutations together
|
||||||
if !buf.is_empty() {
|
if !buf.is_empty() {
|
||||||
let db_mut = DatabaseMutation(buf);
|
let db_mut = DatabaseMutation(buf);
|
||||||
r.push(ChangeSet(vs.unwrap(), db_mut));
|
res.push(ChangeSet(vs.unwrap(), db_mut));
|
||||||
}
|
}
|
||||||
|
// Return the results
|
||||||
Ok(r)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
|
@ -153,7 +153,6 @@ mod tests {
|
||||||
use crate::cf::{ChangeSet, DatabaseMutation, TableMutation, TableMutations};
|
use crate::cf::{ChangeSet, DatabaseMutation, TableMutation, TableMutations};
|
||||||
use crate::dbs::Session;
|
use crate::dbs::Session;
|
||||||
use crate::fflags::FFLAGS;
|
use crate::fflags::FFLAGS;
|
||||||
use crate::key::key_req::KeyRequirements;
|
|
||||||
use crate::kvs::{Datastore, LockType::*, Transaction, TransactionType::*};
|
use crate::kvs::{Datastore, LockType::*, Transaction, TransactionType::*};
|
||||||
use crate::sql::changefeed::ChangeFeed;
|
use crate::sql::changefeed::ChangeFeed;
|
||||||
use crate::sql::id::Id;
|
use crate::sql::id::Id;
|
||||||
|
@ -186,7 +185,7 @@ mod tests {
|
||||||
// Write things to the table.
|
// Write things to the table.
|
||||||
//
|
//
|
||||||
|
|
||||||
let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap();
|
let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap().inner();
|
||||||
let thing_a = Thing {
|
let thing_a = Thing {
|
||||||
tb: TB.to_owned(),
|
tb: TB.to_owned(),
|
||||||
id: Id::String("A".to_string()),
|
id: Id::String("A".to_string()),
|
||||||
|
@ -205,7 +204,7 @@ mod tests {
|
||||||
tx1.complete_changes(true).await.unwrap();
|
tx1.complete_changes(true).await.unwrap();
|
||||||
tx1.commit().await.unwrap();
|
tx1.commit().await.unwrap();
|
||||||
|
|
||||||
let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap();
|
let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap().inner();
|
||||||
let thing_c = Thing {
|
let thing_c = Thing {
|
||||||
tb: TB.to_owned(),
|
tb: TB.to_owned(),
|
||||||
id: Id::String("C".to_string()),
|
id: Id::String("C".to_string()),
|
||||||
|
@ -223,8 +222,7 @@ mod tests {
|
||||||
tx2.complete_changes(true).await.unwrap();
|
tx2.complete_changes(true).await.unwrap();
|
||||||
tx2.commit().await.unwrap();
|
tx2.commit().await.unwrap();
|
||||||
|
|
||||||
let x = ds.transaction(Write, Optimistic).await;
|
let mut tx3 = ds.transaction(Write, Optimistic).await.unwrap().inner();
|
||||||
let mut tx3 = x.unwrap();
|
|
||||||
let thing_b = Thing {
|
let thing_b = Thing {
|
||||||
tb: TB.to_owned(),
|
tb: TB.to_owned(),
|
||||||
id: Id::String("B".to_string()),
|
id: Id::String("B".to_string()),
|
||||||
|
@ -262,9 +260,8 @@ mod tests {
|
||||||
|
|
||||||
let start: u64 = 0;
|
let start: u64 = 0;
|
||||||
|
|
||||||
let mut tx4 = ds.transaction(Write, Optimistic).await.unwrap();
|
let tx4 = ds.transaction(Write, Optimistic).await.unwrap();
|
||||||
let r =
|
let r = crate::cf::read(&tx4, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10))
|
||||||
crate::cf::read(&mut tx4, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10))
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
tx4.commit().await.unwrap();
|
tx4.commit().await.unwrap();
|
||||||
|
@ -338,16 +335,15 @@ mod tests {
|
||||||
|
|
||||||
assert_eq!(r, want);
|
assert_eq!(r, want);
|
||||||
|
|
||||||
let mut tx5 = ds.transaction(Write, Optimistic).await.unwrap();
|
let tx5 = ds.transaction(Write, Optimistic).await.unwrap();
|
||||||
// gc_all needs to be committed before we can read the changes
|
// gc_all needs to be committed before we can read the changes
|
||||||
crate::cf::gc_db(&mut tx5, NS, DB, vs::u64_to_versionstamp(4), Some(10)).await.unwrap();
|
crate::cf::gc_range(&tx5, NS, DB, vs::u64_to_versionstamp(4)).await.unwrap();
|
||||||
// We now commit tx5, which should persist the gc_all resullts
|
// We now commit tx5, which should persist the gc_all resullts
|
||||||
tx5.commit().await.unwrap();
|
tx5.commit().await.unwrap();
|
||||||
|
|
||||||
// Now we should see the gc_all results
|
// Now we should see the gc_all results
|
||||||
let mut tx6 = ds.transaction(Write, Optimistic).await.unwrap();
|
let tx6 = ds.transaction(Write, Optimistic).await.unwrap();
|
||||||
let r =
|
let r = crate::cf::read(&tx6, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10))
|
||||||
crate::cf::read(&mut tx6, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10))
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
tx6.commit().await.unwrap();
|
tx6.commit().await.unwrap();
|
||||||
|
@ -387,8 +383,8 @@ mod tests {
|
||||||
// Now we should see the gc_all results
|
// Now we should see the gc_all results
|
||||||
ds.tick_at((ts.0.timestamp() + 5).try_into().unwrap()).await.unwrap();
|
ds.tick_at((ts.0.timestamp() + 5).try_into().unwrap()).await.unwrap();
|
||||||
|
|
||||||
let mut tx7 = ds.transaction(Write, Optimistic).await.unwrap();
|
let tx7 = ds.transaction(Write, Optimistic).await.unwrap();
|
||||||
let r = crate::cf::read(&mut tx7, NS, DB, Some(TB), ShowSince::Timestamp(ts), Some(10))
|
let r = crate::cf::read(&tx7, NS, DB, Some(TB), ShowSince::Timestamp(ts), Some(10))
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
tx7.commit().await.unwrap();
|
tx7.commit().await.unwrap();
|
||||||
|
@ -406,7 +402,7 @@ mod tests {
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
ds.tick_at(10).await.unwrap();
|
ds.tick_at(10).await.unwrap();
|
||||||
let mut tx = ds.transaction(Write, Optimistic).await.unwrap();
|
let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner();
|
||||||
let vs1 = tx.get_versionstamp_from_timestamp(5, NS, DB, false).await.unwrap().unwrap();
|
let vs1 = tx.get_versionstamp_from_timestamp(5, NS, DB, false).await.unwrap().unwrap();
|
||||||
let vs2 = tx.get_versionstamp_from_timestamp(10, NS, DB, false).await.unwrap().unwrap();
|
let vs2 = tx.get_versionstamp_from_timestamp(10, NS, DB, false).await.unwrap().unwrap();
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
|
@ -511,18 +507,17 @@ mod tests {
|
||||||
assert_eq!(r, expected);
|
assert_eq!(r, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn change_feed_ts(mut tx: Transaction, ts: &Datetime) -> Vec<ChangeSet> {
|
async fn change_feed_ts(tx: Transaction, ts: &Datetime) -> Vec<ChangeSet> {
|
||||||
let r =
|
let r = crate::cf::read(&tx, NS, DB, Some(TB), ShowSince::Timestamp(ts.clone()), Some(10))
|
||||||
crate::cf::read(&mut tx, NS, DB, Some(TB), ShowSince::Timestamp(ts.clone()), Some(10))
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
r
|
r
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn change_feed_vs(mut tx: Transaction, vs: &Versionstamp) -> Vec<ChangeSet> {
|
async fn change_feed_vs(tx: Transaction, vs: &Versionstamp) -> Vec<ChangeSet> {
|
||||||
let r = crate::cf::read(
|
let r = crate::cf::read(
|
||||||
&mut tx,
|
&tx,
|
||||||
NS,
|
NS,
|
||||||
DB,
|
DB,
|
||||||
Some(TB),
|
Some(TB),
|
||||||
|
@ -535,14 +530,14 @@ mod tests {
|
||||||
r
|
r
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn record_change_feed_entry(mut tx: Transaction, id: String) -> Thing {
|
async fn record_change_feed_entry(tx: Transaction, id: String) -> Thing {
|
||||||
let thing = Thing {
|
let thing = Thing {
|
||||||
tb: TB.to_owned(),
|
tb: TB.to_owned(),
|
||||||
id: Id::String(id),
|
id: Id::String(id),
|
||||||
};
|
};
|
||||||
let value_a: Value = "a".into();
|
let value_a: Value = "a".into();
|
||||||
let previous = Cow::from(Value::None);
|
let previous = Cow::from(Value::None);
|
||||||
tx.record_change(
|
tx.lock().await.record_change(
|
||||||
NS,
|
NS,
|
||||||
DB,
|
DB,
|
||||||
TB,
|
TB,
|
||||||
|
@ -551,7 +546,7 @@ mod tests {
|
||||||
Cow::Borrowed(&value_a),
|
Cow::Borrowed(&value_a),
|
||||||
DONT_STORE_PREVIOUS,
|
DONT_STORE_PREVIOUS,
|
||||||
);
|
);
|
||||||
tx.complete_changes(true).await.unwrap();
|
tx.lock().await.complete_changes(true).await.unwrap();
|
||||||
tx.commit().await.unwrap();
|
tx.commit().await.unwrap();
|
||||||
thing
|
thing
|
||||||
}
|
}
|
||||||
|
@ -585,14 +580,14 @@ mod tests {
|
||||||
// work.
|
// work.
|
||||||
//
|
//
|
||||||
|
|
||||||
let mut tx0 = ds.transaction(Write, Optimistic).await.unwrap();
|
let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner();
|
||||||
let ns_root = crate::key::root::ns::new(NS);
|
let ns_root = crate::key::root::ns::new(NS);
|
||||||
tx0.put(ns_root.key_category(), &ns_root, dns).await.unwrap();
|
tx.put(&ns_root, dns).await.unwrap();
|
||||||
let db_root = crate::key::namespace::db::new(NS, DB);
|
let db_root = crate::key::namespace::db::new(NS, DB);
|
||||||
tx0.put(db_root.key_category(), &db_root, ddb).await.unwrap();
|
tx.put(&db_root, ddb).await.unwrap();
|
||||||
let tb_root = crate::key::database::tb::new(NS, DB, TB);
|
let tb_root = crate::key::database::tb::new(NS, DB, TB);
|
||||||
tx0.put(tb_root.key_category(), &tb_root, dtb.clone()).await.unwrap();
|
tx.put(&tb_root, dtb.clone()).await.unwrap();
|
||||||
tx0.commit().await.unwrap();
|
tx.commit().await.unwrap();
|
||||||
ds
|
ds
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,28 +1,5 @@
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
|
||||||
#[allow(dead_code)]
|
|
||||||
/// Specifies how many concurrent jobs can be buffered in the worker channel.
|
|
||||||
pub const MAX_CONCURRENT_TASKS: usize = 64;
|
|
||||||
|
|
||||||
/// Specifies how deep various forms of computation will go before the query fails
|
|
||||||
/// with [`crate::err::Error::ComputationDepthExceeded`].
|
|
||||||
///
|
|
||||||
/// For reference, use ~15 per MiB of stack in release mode.
|
|
||||||
///
|
|
||||||
/// During query parsing, the total depth of calls to parse values (including arrays, expressions,
|
|
||||||
/// functions, objects, sub-queries), Javascript values, and geometry collections count against
|
|
||||||
/// this limit.
|
|
||||||
///
|
|
||||||
/// During query execution, all potentially-recursive code paths count against this limit. Whereas
|
|
||||||
/// parsing assigns equal weight to each recursion, certain expensive code paths are allowed to
|
|
||||||
/// count for more than one unit of depth during execution.
|
|
||||||
pub static MAX_COMPUTATION_DEPTH: Lazy<u32> =
|
|
||||||
lazy_env_parse!("SURREAL_MAX_COMPUTATION_DEPTH", u32, 120);
|
|
||||||
|
|
||||||
/// Specifies the names of parameters which can not be specified in a query.
|
|
||||||
pub const PROTECTED_PARAM_NAMES: &[&str] = &["access", "auth", "token", "session"];
|
|
||||||
|
|
||||||
/// The characters which are supported in server record IDs.
|
/// The characters which are supported in server record IDs.
|
||||||
pub const ID_CHARS: [char; 36] = [
|
pub const ID_CHARS: [char; 36] = [
|
||||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
|
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
|
||||||
|
@ -32,8 +9,31 @@ pub const ID_CHARS: [char; 36] = [
|
||||||
/// The publicly visible name of the server
|
/// The publicly visible name of the server
|
||||||
pub const SERVER_NAME: &str = "SurrealDB";
|
pub const SERVER_NAME: &str = "SurrealDB";
|
||||||
|
|
||||||
/// Datastore processor batch size for scan operations
|
/// Specifies the names of parameters which can not be specified in a query.
|
||||||
pub const PROCESSOR_BATCH_SIZE: u32 = 50;
|
pub const PROTECTED_PARAM_NAMES: &[&str] = &["access", "auth", "token", "session"];
|
||||||
|
|
||||||
|
/// Specifies how many concurrent jobs can be buffered in the worker channel.
|
||||||
|
#[cfg(not(target_arch = "wasm32"))]
|
||||||
|
pub static MAX_CONCURRENT_TASKS: Lazy<usize> =
|
||||||
|
lazy_env_parse!("SURREAL_MAX_CONCURRENT_TASKS", usize, 64);
|
||||||
|
|
||||||
|
/// Specifies how deep computation recursive call will go before en error is returned.
|
||||||
|
pub static MAX_COMPUTATION_DEPTH: Lazy<u32> =
|
||||||
|
lazy_env_parse!("SURREAL_MAX_COMPUTATION_DEPTH", u32, 120);
|
||||||
|
|
||||||
|
/// Specifies the number of items which can be cached within a single transaction.
|
||||||
|
pub static TRANSACTION_CACHE_SIZE: Lazy<usize> =
|
||||||
|
lazy_env_parse!("SURREAL_TRANSACTION_CACHE_SIZE", usize, 10_000);
|
||||||
|
|
||||||
|
/// The maximum number of keys that should be scanned at once in general queries.
|
||||||
|
pub static NORMAL_FETCH_SIZE: Lazy<u32> = lazy_env_parse!("SURREAL_NORMAL_FETCH_SIZE", u32, 50);
|
||||||
|
|
||||||
|
/// The maximum number of keys that should be scanned at once for export queries.
|
||||||
|
pub static EXPORT_BATCH_SIZE: Lazy<u32> = lazy_env_parse!("SURREAL_EXPORT_BATCH_SIZE", u32, 1000);
|
||||||
|
|
||||||
|
/// The maximum number of keys that should be fetched when streaming range scanns in a Scanner.
|
||||||
|
pub static MAX_STREAM_BATCH_SIZE: Lazy<u32> =
|
||||||
|
lazy_env_parse!("SURREAL_MAX_STREAM_BATCH_SIZE", u32, 1000);
|
||||||
|
|
||||||
/// Forward all signup/signin query errors to a client performing record access. Do not use in production.
|
/// Forward all signup/signin query errors to a client performing record access. Do not use in production.
|
||||||
pub static INSECURE_FORWARD_RECORD_ACCESS_ERRORS: Lazy<bool> =
|
pub static INSECURE_FORWARD_RECORD_ACCESS_ERRORS: Lazy<bool> =
|
||||||
|
@ -50,6 +50,3 @@ pub static INSECURE_FORWARD_RECORD_ACCESS_ERRORS: Lazy<bool> =
|
||||||
/// If the environment variable is not present or cannot be parsed, a default value of 50,000 is used.
|
/// If the environment variable is not present or cannot be parsed, a default value of 50,000 is used.
|
||||||
pub static EXTERNAL_SORTING_BUFFER_LIMIT: Lazy<usize> =
|
pub static EXTERNAL_SORTING_BUFFER_LIMIT: Lazy<usize> =
|
||||||
lazy_env_parse!("SURREAL_EXTERNAL_SORTING_BUFFER_LIMIT", usize, 50_000);
|
lazy_env_parse!("SURREAL_EXTERNAL_SORTING_BUFFER_LIMIT", usize, 50_000);
|
||||||
|
|
||||||
/// The number of records that should be fetched and grouped together in an INSERT statement when exporting.
|
|
||||||
pub static EXPORT_BATCH_SIZE: Lazy<u32> = lazy_env_parse!("SURREAL_EXPORT_BATCH_SIZE", u32, 1000);
|
|
||||||
|
|
|
@ -2,15 +2,14 @@ use crate::ctx::canceller::Canceller;
|
||||||
use crate::ctx::reason::Reason;
|
use crate::ctx::reason::Reason;
|
||||||
#[cfg(feature = "http")]
|
#[cfg(feature = "http")]
|
||||||
use crate::dbs::capabilities::NetTarget;
|
use crate::dbs::capabilities::NetTarget;
|
||||||
use crate::dbs::{Capabilities, Notification, Transaction};
|
use crate::dbs::{Capabilities, Notification};
|
||||||
use crate::err::Error;
|
use crate::err::Error;
|
||||||
use crate::idx::planner::executor::QueryExecutor;
|
use crate::idx::planner::executor::QueryExecutor;
|
||||||
use crate::idx::planner::{IterationStage, QueryPlanner};
|
use crate::idx::planner::{IterationStage, QueryPlanner};
|
||||||
use crate::idx::trees::store::IndexStores;
|
use crate::idx::trees::store::IndexStores;
|
||||||
use crate::kvs;
|
use crate::kvs::Transaction;
|
||||||
use crate::sql::value::Value;
|
use crate::sql::value::Value;
|
||||||
use channel::Sender;
|
use channel::Sender;
|
||||||
use futures::lock::MutexLockFuture;
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt::{self, Debug};
|
use std::fmt::{self, Debug};
|
||||||
|
@ -72,7 +71,7 @@ pub struct Context<'a> {
|
||||||
// The temporary directory
|
// The temporary directory
|
||||||
temporary_directory: Option<Arc<PathBuf>>,
|
temporary_directory: Option<Arc<PathBuf>>,
|
||||||
// An optional transaction
|
// An optional transaction
|
||||||
transaction: Option<Transaction>,
|
transaction: Option<Arc<Transaction>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Default for Context<'a> {
|
impl<'a> Default for Context<'a> {
|
||||||
|
@ -81,6 +80,12 @@ impl<'a> Default for Context<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a> From<Transaction> for Context<'a> {
|
||||||
|
fn from(txn: Transaction) -> Self {
|
||||||
|
Context::background().with_transaction(Arc::new(txn))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<'a> Debug for Context<'a> {
|
impl<'a> Debug for Context<'a> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
f.debug_struct("Context")
|
f.debug_struct("Context")
|
||||||
|
@ -239,23 +244,19 @@ impl<'a> Context<'a> {
|
||||||
self.iteration_stage = Some(is);
|
self.iteration_stage = Some(is);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn set_transaction_mut(&mut self, txn: Transaction) {
|
pub(crate) fn set_transaction(&mut self, txn: Arc<Transaction>) {
|
||||||
self.transaction = Some(txn);
|
self.transaction = Some(txn);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_transaction(mut self, txn: Transaction) -> Self {
|
pub(crate) fn with_transaction(mut self, txn: Arc<Transaction>) -> Self {
|
||||||
self.transaction = Some(txn);
|
self.transaction = Some(txn);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_transaction(&self) -> Option<&Transaction> {
|
pub(crate) fn tx(&self) -> Arc<Transaction> {
|
||||||
self.transaction.as_ref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn tx_lock(&self) -> MutexLockFuture<'_, kvs::Transaction> {
|
|
||||||
self.transaction
|
self.transaction
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|txn| txn.lock())
|
.map(Arc::clone)
|
||||||
.unwrap_or_else(|| unreachable!("The context was not associated with a transaction"))
|
.unwrap_or_else(|| unreachable!("The context was not associated with a transaction"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,27 +1,13 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use channel::Receiver;
|
|
||||||
use futures::lock::Mutex;
|
|
||||||
use futures::StreamExt;
|
|
||||||
use reblessive::TreeStack;
|
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
|
||||||
use tokio::spawn;
|
|
||||||
use tracing::instrument;
|
|
||||||
use trice::Instant;
|
|
||||||
#[cfg(target_arch = "wasm32")]
|
|
||||||
use wasm_bindgen_futures::spawn_local as spawn;
|
|
||||||
|
|
||||||
use crate::ctx::Context;
|
use crate::ctx::Context;
|
||||||
use crate::dbs::response::Response;
|
use crate::dbs::response::Response;
|
||||||
use crate::dbs::Force;
|
use crate::dbs::Force;
|
||||||
use crate::dbs::Notification;
|
use crate::dbs::Notification;
|
||||||
use crate::dbs::Options;
|
use crate::dbs::Options;
|
||||||
use crate::dbs::QueryType;
|
use crate::dbs::QueryType;
|
||||||
use crate::dbs::Transaction;
|
|
||||||
use crate::err::Error;
|
use crate::err::Error;
|
||||||
use crate::iam::Action;
|
use crate::iam::Action;
|
||||||
use crate::iam::ResourceKind;
|
use crate::iam::ResourceKind;
|
||||||
use crate::kvs::lq_structs::TrackedResult;
|
use crate::kvs::Transaction;
|
||||||
use crate::kvs::TransactionType;
|
use crate::kvs::TransactionType;
|
||||||
use crate::kvs::{Datastore, LockType::*, TransactionType::*};
|
use crate::kvs::{Datastore, LockType::*, TransactionType::*};
|
||||||
use crate::sql::paths::DB;
|
use crate::sql::paths::DB;
|
||||||
|
@ -30,11 +16,21 @@ use crate::sql::query::Query;
|
||||||
use crate::sql::statement::Statement;
|
use crate::sql::statement::Statement;
|
||||||
use crate::sql::value::Value;
|
use crate::sql::value::Value;
|
||||||
use crate::sql::Base;
|
use crate::sql::Base;
|
||||||
|
use channel::Receiver;
|
||||||
|
use futures::StreamExt;
|
||||||
|
use reblessive::TreeStack;
|
||||||
|
use std::sync::Arc;
|
||||||
|
#[cfg(not(target_arch = "wasm32"))]
|
||||||
|
use tokio::spawn;
|
||||||
|
use tracing::instrument;
|
||||||
|
use trice::Instant;
|
||||||
|
#[cfg(target_arch = "wasm32")]
|
||||||
|
use wasm_bindgen_futures::spawn_local as spawn;
|
||||||
|
|
||||||
pub(crate) struct Executor<'a> {
|
pub(crate) struct Executor<'a> {
|
||||||
err: bool,
|
err: bool,
|
||||||
kvs: &'a Datastore,
|
kvs: &'a Datastore,
|
||||||
txn: Option<Transaction>,
|
txn: Option<Arc<Transaction>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Executor<'a> {
|
impl<'a> Executor<'a> {
|
||||||
|
@ -46,7 +42,7 @@ impl<'a> Executor<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn txn(&self) -> Transaction {
|
fn txn(&self) -> Arc<Transaction> {
|
||||||
self.txn.clone().expect("unreachable: txn was None after successful begin")
|
self.txn.clone().expect("unreachable: txn was None after successful begin")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,7 +56,7 @@ impl<'a> Executor<'a> {
|
||||||
Some(_) => false,
|
Some(_) => false,
|
||||||
None => match self.kvs.transaction(write, Optimistic).await {
|
None => match self.kvs.transaction(write, Optimistic).await {
|
||||||
Ok(v) => {
|
Ok(v) => {
|
||||||
self.txn = Some(Arc::new(Mutex::new(v)));
|
self.txn = Some(Arc::new(v));
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
|
@ -81,37 +77,27 @@ impl<'a> Executor<'a> {
|
||||||
if local {
|
if local {
|
||||||
// Extract the transaction
|
// Extract the transaction
|
||||||
if let Some(txn) = self.txn.take() {
|
if let Some(txn) = self.txn.take() {
|
||||||
|
// Lock the transaction
|
||||||
let mut txn = txn.lock().await;
|
let mut txn = txn.lock().await;
|
||||||
|
// Check for any errors
|
||||||
if self.err {
|
if self.err {
|
||||||
// Cancel and ignore any error because the error flag was
|
|
||||||
// already set
|
|
||||||
let _ = txn.cancel().await;
|
let _ = txn.cancel().await;
|
||||||
} else {
|
} else {
|
||||||
let r = match txn.complete_changes(false).await {
|
|
||||||
Ok(_) => {
|
|
||||||
match txn.commit().await {
|
|
||||||
Ok(()) => {
|
|
||||||
// Commit succeeded, do post commit operations that do not matter to the tx
|
|
||||||
let lqs: Vec<TrackedResult> =
|
|
||||||
txn.consume_pending_live_queries();
|
|
||||||
// Track the live queries in the data store
|
|
||||||
self.kvs.handle_postprocessing_of_statements(&lqs).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r => r,
|
|
||||||
};
|
|
||||||
if let Err(e) = r {
|
|
||||||
// Transaction failed to commit
|
|
||||||
//
|
//
|
||||||
// TODO: Not all commit errors definitively mean
|
if let Err(e) = txn.complete_changes(false).await {
|
||||||
// the transaction didn't commit. Detect that and tell
|
// Rollback the transaction
|
||||||
// the user.
|
let _ = txn.cancel().await;
|
||||||
|
// Return the error message
|
||||||
self.err = true;
|
self.err = true;
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
if let Err(e) = txn.commit().await {
|
||||||
|
// Rollback the transaction
|
||||||
|
let _ = txn.cancel().await;
|
||||||
|
// Return the error message
|
||||||
|
self.err = true;
|
||||||
|
return Err(e);
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -122,7 +108,6 @@ impl<'a> Executor<'a> {
|
||||||
if local {
|
if local {
|
||||||
// Extract the transaction
|
// Extract the transaction
|
||||||
if let Some(txn) = self.txn.take() {
|
if let Some(txn) = self.txn.take() {
|
||||||
let mut txn = txn.lock().await;
|
|
||||||
if txn.cancel().await.is_err() {
|
if txn.cancel().await.is_err() {
|
||||||
self.err = true;
|
self.err = true;
|
||||||
}
|
}
|
||||||
|
@ -168,7 +153,6 @@ impl<'a> Executor<'a> {
|
||||||
|
|
||||||
/// Flush notifications from a buffer channel (live queries) to the committed notification channel.
|
/// Flush notifications from a buffer channel (live queries) to the committed notification channel.
|
||||||
/// This is because we don't want to broadcast notifications to the user for failed transactions.
|
/// This is because we don't want to broadcast notifications to the user for failed transactions.
|
||||||
/// TODO we can delete this once we migrate to lq v2
|
|
||||||
async fn flush(&self, ctx: &Context<'_>, mut rcv: Receiver<Notification>) {
|
async fn flush(&self, ctx: &Context<'_>, mut rcv: Receiver<Notification>) {
|
||||||
let sender = ctx.notifications();
|
let sender = ctx.notifications();
|
||||||
spawn(async move {
|
spawn(async move {
|
||||||
|
@ -182,17 +166,6 @@ impl<'a> Executor<'a> {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A transaction collects created live queries which can then be consumed when a transaction is committed
|
|
||||||
/// We use this function to get these transactions and send them to the invoker without channels
|
|
||||||
async fn consume_committed_live_query_registrations(&self) -> Option<Vec<TrackedResult>> {
|
|
||||||
if let Some(txn) = self.txn.as_ref() {
|
|
||||||
let txn = txn.lock().await;
|
|
||||||
Some(txn.consume_pending_live_queries())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn set_ns(&self, ctx: &mut Context<'_>, opt: &mut Options, ns: &str) {
|
async fn set_ns(&self, ctx: &mut Context<'_>, opt: &mut Options, ns: &str) {
|
||||||
let mut session = ctx.value("session").unwrap_or(&Value::None).clone();
|
let mut session = ctx.value("session").unwrap_or(&Value::None).clone();
|
||||||
session.put(NS.as_ref(), ns.to_owned().into());
|
session.put(NS.as_ref(), ns.to_owned().into());
|
||||||
|
@ -213,10 +186,9 @@ impl<'a> Executor<'a> {
|
||||||
mut ctx: Context<'_>,
|
mut ctx: Context<'_>,
|
||||||
opt: Options,
|
opt: Options,
|
||||||
qry: Query,
|
qry: Query,
|
||||||
) -> Result<(Vec<Response>, Vec<TrackedResult>), Error> {
|
) -> Result<Vec<Response>, Error> {
|
||||||
// The stack to run the executor in.
|
// The stack to run the executor in.
|
||||||
let mut stack = TreeStack::new();
|
let mut stack = TreeStack::new();
|
||||||
|
|
||||||
// Create a notification channel
|
// Create a notification channel
|
||||||
let (send, recv) = channel::unbounded();
|
let (send, recv) = channel::unbounded();
|
||||||
// Set the notification channel
|
// Set the notification channel
|
||||||
|
@ -225,7 +197,6 @@ impl<'a> Executor<'a> {
|
||||||
let mut buf: Vec<Response> = vec![];
|
let mut buf: Vec<Response> = vec![];
|
||||||
// Initialise array of responses
|
// Initialise array of responses
|
||||||
let mut out: Vec<Response> = vec![];
|
let mut out: Vec<Response> = vec![];
|
||||||
let mut live_queries: Vec<TrackedResult> = vec![];
|
|
||||||
// Do we fast-forward a transaction?
|
// Do we fast-forward a transaction?
|
||||||
// Set to true when we encounter a return statement in a transaction
|
// Set to true when we encounter a return statement in a transaction
|
||||||
let mut ff_txn = false;
|
let mut ff_txn = false;
|
||||||
|
@ -293,9 +264,6 @@ impl<'a> Executor<'a> {
|
||||||
let commit_error = self.commit(true).await.err();
|
let commit_error = self.commit(true).await.err();
|
||||||
buf = buf.into_iter().map(|v| self.buf_commit(v, &commit_error)).collect();
|
buf = buf.into_iter().map(|v| self.buf_commit(v, &commit_error)).collect();
|
||||||
self.flush(&ctx, recv.clone()).await;
|
self.flush(&ctx, recv.clone()).await;
|
||||||
if let Some(lqs) = self.consume_committed_live_query_registrations().await {
|
|
||||||
live_queries.extend(lqs);
|
|
||||||
}
|
|
||||||
out.append(&mut buf);
|
out.append(&mut buf);
|
||||||
debug_assert!(self.txn.is_none(), "commit(true) should have unset txn");
|
debug_assert!(self.txn.is_none(), "commit(true) should have unset txn");
|
||||||
self.txn = None;
|
self.txn = None;
|
||||||
|
@ -322,7 +290,8 @@ impl<'a> Executor<'a> {
|
||||||
true => Err(Error::TxFailure),
|
true => Err(Error::TxFailure),
|
||||||
// The transaction began successfully
|
// The transaction began successfully
|
||||||
false => {
|
false => {
|
||||||
ctx.set_transaction_mut(self.txn());
|
// ctx.set_transaction(txn)
|
||||||
|
ctx.set_transaction(self.txn());
|
||||||
// Check the statement
|
// Check the statement
|
||||||
match stack
|
match stack
|
||||||
.enter(|stk| stm.compute(stk, &ctx, &opt, None))
|
.enter(|stk| stm.compute(stk, &ctx, &opt, None))
|
||||||
|
@ -347,12 +316,6 @@ impl<'a> Executor<'a> {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
// Flush live query notifications
|
// Flush live query notifications
|
||||||
self.flush(&ctx, recv.clone()).await;
|
self.flush(&ctx, recv.clone()).await;
|
||||||
if let Some(lqs) = self
|
|
||||||
.consume_committed_live_query_registrations()
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
live_queries.extend(lqs);
|
|
||||||
}
|
|
||||||
Ok(Value::None)
|
Ok(Value::None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -395,7 +358,7 @@ impl<'a> Executor<'a> {
|
||||||
if let Err(err) = ctx.add_timeout(timeout) {
|
if let Err(err) = ctx.add_timeout(timeout) {
|
||||||
Err(err)
|
Err(err)
|
||||||
} else {
|
} else {
|
||||||
ctx.set_transaction_mut(self.txn());
|
ctx.set_transaction(self.txn());
|
||||||
// Process the statement
|
// Process the statement
|
||||||
let res = stack
|
let res = stack
|
||||||
.enter(|stk| stm.compute(stk, &ctx, &opt, None))
|
.enter(|stk| stm.compute(stk, &ctx, &opt, None))
|
||||||
|
@ -410,7 +373,7 @@ impl<'a> Executor<'a> {
|
||||||
}
|
}
|
||||||
// There is no timeout clause
|
// There is no timeout clause
|
||||||
None => {
|
None => {
|
||||||
ctx.set_transaction_mut(self.txn());
|
ctx.set_transaction(self.txn());
|
||||||
stack
|
stack
|
||||||
.enter(|stk| stm.compute(stk, &ctx, &opt, None))
|
.enter(|stk| stm.compute(stk, &ctx, &opt, None))
|
||||||
.finish()
|
.finish()
|
||||||
|
@ -445,11 +408,6 @@ impl<'a> Executor<'a> {
|
||||||
} else {
|
} else {
|
||||||
// Flush the live query change notifications
|
// Flush the live query change notifications
|
||||||
self.flush(&ctx, recv.clone()).await;
|
self.flush(&ctx, recv.clone()).await;
|
||||||
if let Some(lqs) =
|
|
||||||
self.consume_committed_live_query_registrations().await
|
|
||||||
{
|
|
||||||
live_queries.extend(lqs);
|
|
||||||
}
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -475,18 +433,8 @@ impl<'a> Executor<'a> {
|
||||||
e
|
e
|
||||||
}),
|
}),
|
||||||
query_type: match (is_stm_live, is_stm_kill) {
|
query_type: match (is_stm_live, is_stm_kill) {
|
||||||
(true, _) => {
|
(true, _) => QueryType::Live,
|
||||||
if let Some(lqs) = self.consume_committed_live_query_registrations().await {
|
(_, true) => QueryType::Kill,
|
||||||
live_queries.extend(lqs);
|
|
||||||
}
|
|
||||||
QueryType::Live
|
|
||||||
}
|
|
||||||
(_, true) => {
|
|
||||||
if let Some(lqs) = self.consume_committed_live_query_registrations().await {
|
|
||||||
live_queries.extend(lqs);
|
|
||||||
}
|
|
||||||
QueryType::Kill
|
|
||||||
}
|
|
||||||
_ => QueryType::Other,
|
_ => QueryType::Other,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -502,7 +450,7 @@ impl<'a> Executor<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Return responses
|
// Return responses
|
||||||
Ok((out, live_queries))
|
Ok(out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -529,7 +529,7 @@ impl Iterator {
|
||||||
// Create a channel to shutdown
|
// Create a channel to shutdown
|
||||||
let (end, exit) = channel::bounded::<()>(1);
|
let (end, exit) = channel::bounded::<()>(1);
|
||||||
// Create an unbounded channel
|
// Create an unbounded channel
|
||||||
let (chn, docs) = channel::bounded(crate::cnf::MAX_CONCURRENT_TASKS);
|
let (chn, docs) = channel::bounded(*crate::cnf::MAX_CONCURRENT_TASKS);
|
||||||
// Create an async closure for prepared values
|
// Create an async closure for prepared values
|
||||||
let adocs = async {
|
let adocs = async {
|
||||||
// Process all prepared values
|
// Process all prepared values
|
||||||
|
@ -553,7 +553,7 @@ impl Iterator {
|
||||||
drop(chn);
|
drop(chn);
|
||||||
};
|
};
|
||||||
// Create an unbounded channel
|
// Create an unbounded channel
|
||||||
let (chn, vals) = channel::bounded(crate::cnf::MAX_CONCURRENT_TASKS);
|
let (chn, vals) = channel::bounded(*crate::cnf::MAX_CONCURRENT_TASKS);
|
||||||
// Create an async closure for received values
|
// Create an async closure for received values
|
||||||
let avals = async {
|
let avals = async {
|
||||||
// Process all received values
|
// Process all received values
|
||||||
|
|
|
@ -15,7 +15,6 @@ mod result;
|
||||||
mod session;
|
mod session;
|
||||||
mod statement;
|
mod statement;
|
||||||
mod store;
|
mod store;
|
||||||
mod transaction;
|
|
||||||
mod variables;
|
mod variables;
|
||||||
|
|
||||||
pub mod capabilities;
|
pub mod capabilities;
|
||||||
|
@ -32,7 +31,6 @@ pub use self::session::*;
|
||||||
pub(crate) use self::executor::*;
|
pub(crate) use self::executor::*;
|
||||||
pub(crate) use self::iterator::*;
|
pub(crate) use self::iterator::*;
|
||||||
pub(crate) use self::statement::*;
|
pub(crate) use self::statement::*;
|
||||||
pub(crate) use self::transaction::*;
|
|
||||||
pub(crate) use self::variables::*;
|
pub(crate) use self::variables::*;
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
|
|
|
@ -1,28 +1,117 @@
|
||||||
use crate::err::Error;
|
use crate::sql::statements::info::InfoStructure;
|
||||||
use crate::err::Error::TimestampOverflow;
|
use crate::sql::Value;
|
||||||
use crate::sql::Duration;
|
use derive::Store;
|
||||||
use derive::{Key, Store};
|
|
||||||
use revision::revisioned;
|
use revision::revisioned;
|
||||||
|
use revision::Error;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::fmt::{self, Display};
|
||||||
use std::ops::{Add, Sub};
|
use std::ops::{Add, Sub};
|
||||||
|
use std::time::Duration;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
// NOTE: This is not a statement, but as per layering, keeping it here till we
|
#[revisioned(revision = 2)]
|
||||||
// have a better structure.
|
#[derive(Clone, Debug, Default, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash, Store)]
|
||||||
#[revisioned(revision = 1)]
|
|
||||||
#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, PartialOrd, Hash, Store)]
|
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub struct ClusterMembership {
|
pub struct Node {
|
||||||
|
#[revision(start = 2, default_fn = "default_id")]
|
||||||
|
pub id: Uuid,
|
||||||
|
#[revision(start = 2, default_fn = "default_hb")]
|
||||||
|
pub hb: Timestamp,
|
||||||
|
#[revision(start = 2, default_fn = "default_gc")]
|
||||||
|
pub gc: bool,
|
||||||
|
#[revision(end = 2, convert_fn = "convert_name")]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
// TiKV = TiKV TSO Timestamp as u64
|
#[revision(end = 2, convert_fn = "convert_heartbeat")]
|
||||||
// not TiKV = local nanos as u64
|
|
||||||
pub heartbeat: Timestamp,
|
pub heartbeat: Timestamp,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Node {
|
||||||
|
/// Create a new Node entry
|
||||||
|
pub fn new(id: Uuid, hb: Timestamp, gc: bool) -> Self {
|
||||||
|
Self {
|
||||||
|
id,
|
||||||
|
hb,
|
||||||
|
gc,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Mark this node as archived
|
||||||
|
pub fn archive(&self) -> Self {
|
||||||
|
Node {
|
||||||
|
gc: true,
|
||||||
|
..self.to_owned()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Check if this node is active
|
||||||
|
pub fn id(&self) -> Uuid {
|
||||||
|
self.id
|
||||||
|
}
|
||||||
|
/// Check if this node is active
|
||||||
|
pub fn is_active(&self) -> bool {
|
||||||
|
!self.gc
|
||||||
|
}
|
||||||
|
/// Check if this node is archived
|
||||||
|
pub fn is_archived(&self) -> bool {
|
||||||
|
self.gc
|
||||||
|
}
|
||||||
|
// Return the node id if archived
|
||||||
|
pub fn archived(&self) -> Option<Uuid> {
|
||||||
|
match self.is_archived() {
|
||||||
|
true => Some(self.id),
|
||||||
|
false => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Sets the default gc value for old nodes
|
||||||
|
fn default_id(_revision: u16) -> Uuid {
|
||||||
|
Uuid::default()
|
||||||
|
}
|
||||||
|
// Sets the default gc value for old nodes
|
||||||
|
fn default_hb(_revision: u16) -> Timestamp {
|
||||||
|
Timestamp::default()
|
||||||
|
}
|
||||||
|
// Sets the default gc value for old nodes
|
||||||
|
fn default_gc(_revision: u16) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
// Sets the default gc value for old nodes
|
||||||
|
fn convert_name(&mut self, _revision: u16, value: String) -> Result<(), Error> {
|
||||||
|
self.id = Uuid::parse_str(&value).unwrap();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
// Sets the default gc value for old nodes
|
||||||
|
fn convert_heartbeat(&mut self, _revision: u16, value: Timestamp) -> Result<(), Error> {
|
||||||
|
self.hb = value;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Node {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "NODE {} SEEN {}", self.id, self.hb)?;
|
||||||
|
match self.gc {
|
||||||
|
true => write!(f, " ARCHIVED")?,
|
||||||
|
false => write!(f, " ACTIVE")?,
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InfoStructure for Node {
|
||||||
|
fn structure(self) -> Value {
|
||||||
|
Value::from(map! {
|
||||||
|
"id".to_string() => Value::from(self.id),
|
||||||
|
"seen".to_string() => self.hb.structure(),
|
||||||
|
"active".to_string() => Value::from(!self.gc),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// This struct is meant to represent a timestamp that can be used to partially order
|
// This struct is meant to represent a timestamp that can be used to partially order
|
||||||
// events in a cluster. It should be derived from a timestamp oracle, such as the
|
// events in a cluster. It should be derived from a timestamp oracle, such as the
|
||||||
// one available in TiKV via the client `TimestampExt` implementation.
|
// one available in TiKV via the client `TimestampExt` implementation.
|
||||||
#[revisioned(revision = 1)]
|
#[revisioned(revision = 1)]
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize, Ord, PartialOrd, Hash, Store, Default,
|
Clone, Copy, Default, Debug, Eq, PartialEq, PartialOrd, Deserialize, Serialize, Hash, Store,
|
||||||
)]
|
)]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub struct Timestamp {
|
pub struct Timestamp {
|
||||||
|
@ -30,62 +119,49 @@ pub struct Timestamp {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<u64> for Timestamp {
|
impl From<u64> for Timestamp {
|
||||||
fn from(ts: u64) -> Self {
|
fn from(value: u64) -> Self {
|
||||||
Timestamp {
|
Timestamp {
|
||||||
value: ts,
|
value,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This struct is to be used only when storing keys as the macro currently
|
impl Add<Duration> for Timestamp {
|
||||||
// conflicts when you have Store and Key derive macros.
|
|
||||||
#[revisioned(revision = 1)]
|
|
||||||
#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, PartialOrd, Hash, Key)]
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub struct KeyTimestamp {
|
|
||||||
pub value: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<&Timestamp> for KeyTimestamp {
|
|
||||||
fn from(ts: &Timestamp) -> Self {
|
|
||||||
KeyTimestamp {
|
|
||||||
value: ts.value,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Add<&Duration> for &Timestamp {
|
|
||||||
type Output = Timestamp;
|
type Output = Timestamp;
|
||||||
fn add(self, rhs: &Duration) -> Timestamp {
|
fn add(self, rhs: Duration) -> Self::Output {
|
||||||
Timestamp {
|
Timestamp {
|
||||||
value: self.value + rhs.as_millis() as u64,
|
value: self.value.wrapping_add(rhs.as_millis() as u64),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Sub<&Duration> for &Timestamp {
|
impl Sub<Duration> for Timestamp {
|
||||||
type Output = Result<Timestamp, Error>;
|
type Output = Timestamp;
|
||||||
fn sub(self, rhs: &Duration) -> Self::Output {
|
fn sub(self, rhs: Duration) -> Self::Output {
|
||||||
let millis = rhs.as_millis() as u64;
|
Timestamp {
|
||||||
if self.value <= millis {
|
value: self.value.wrapping_sub(rhs.as_millis() as u64),
|
||||||
// Removing the duration from this timestamp will cause it to overflow
|
|
||||||
return Err(TimestampOverflow(format!(
|
|
||||||
"Failed to subtract {} from {}",
|
|
||||||
&millis, &self.value
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
Ok(Timestamp {
|
}
|
||||||
value: self.value - millis,
|
}
|
||||||
})
|
|
||||||
|
impl Display for Timestamp {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "{}", self.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InfoStructure for Timestamp {
|
||||||
|
fn structure(self) -> Value {
|
||||||
|
self.value.into()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use crate::dbs::node::Timestamp;
|
use crate::dbs::node::Timestamp;
|
||||||
use crate::sql::Duration;
|
|
||||||
use chrono::prelude::Utc;
|
use chrono::prelude::Utc;
|
||||||
use chrono::TimeZone;
|
use chrono::TimeZone;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn timestamps_can_be_added_duration() {
|
fn timestamps_can_be_added_duration() {
|
||||||
|
@ -94,10 +170,10 @@ mod test {
|
||||||
value: t.timestamp_millis() as u64,
|
value: t.timestamp_millis() as u64,
|
||||||
};
|
};
|
||||||
|
|
||||||
let hour = Duration(core::time::Duration::from_secs(60 * 60));
|
let hour = Duration::from_secs(60 * 60);
|
||||||
let ts = &ts + &hour;
|
let ts = ts + hour;
|
||||||
let ts = &ts + &hour;
|
let ts = ts + hour;
|
||||||
let ts = &ts + &hour;
|
let ts = ts + hour;
|
||||||
|
|
||||||
let end_time = Utc.timestamp_millis_opt(ts.value as i64).unwrap();
|
let end_time = Utc.timestamp_millis_opt(ts.value as i64).unwrap();
|
||||||
let expected_end_time = Utc.with_ymd_and_hms(2000, 1, 1, 15, 30, 0).unwrap();
|
let expected_end_time = Utc.with_ymd_and_hms(2000, 1, 1, 15, 30, 0).unwrap();
|
||||||
|
@ -111,10 +187,10 @@ mod test {
|
||||||
value: t.timestamp_millis() as u64,
|
value: t.timestamp_millis() as u64,
|
||||||
};
|
};
|
||||||
|
|
||||||
let hour = Duration(core::time::Duration::from_secs(60 * 60));
|
let hour = Duration::from_secs(60 * 60);
|
||||||
let ts = (&ts - &hour).unwrap();
|
let ts = ts - hour;
|
||||||
let ts = (&ts - &hour).unwrap();
|
let ts = ts - hour;
|
||||||
let ts = (&ts - &hour).unwrap();
|
let ts = ts - hour;
|
||||||
|
|
||||||
let end_time = Utc.timestamp_millis_opt(ts.value as i64).unwrap();
|
let end_time = Utc.timestamp_millis_opt(ts.value as i64).unwrap();
|
||||||
let expected_end_time = Utc.with_ymd_and_hms(2000, 1, 1, 9, 30, 0).unwrap();
|
let expected_end_time = Utc.with_ymd_and_hms(2000, 1, 1, 9, 30, 0).unwrap();
|
||||||
|
|
|
@ -57,16 +57,6 @@ pub enum Force {
|
||||||
Index(Arc<[DefineIndexStatement]>),
|
Index(Arc<[DefineIndexStatement]>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Force {
|
|
||||||
pub fn is_none(&self) -> bool {
|
|
||||||
matches!(self, Force::None)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_forced(&self) -> bool {
|
|
||||||
!matches!(self, Force::None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Options {
|
impl Default for Options {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Options::new()
|
Options::new()
|
||||||
|
@ -111,8 +101,9 @@ impl Options {
|
||||||
// --------------------------------------------------
|
// --------------------------------------------------
|
||||||
|
|
||||||
/// Set all the required options from a single point.
|
/// Set all the required options from a single point.
|
||||||
/// The system expects these values to always be set, so this should be called for all
|
/// The system expects these values to always be set,
|
||||||
/// instances when there is doubt.
|
/// so this should be called for all instances when
|
||||||
|
/// there is doubt.
|
||||||
pub fn with_required(
|
pub fn with_required(
|
||||||
mut self,
|
mut self,
|
||||||
node_id: Uuid,
|
node_id: Uuid,
|
||||||
|
@ -334,21 +325,25 @@ impl Options {
|
||||||
// --------------------------------------------------
|
// --------------------------------------------------
|
||||||
|
|
||||||
/// Get current Node ID
|
/// Get current Node ID
|
||||||
|
#[inline(always)]
|
||||||
pub fn id(&self) -> Result<Uuid, Error> {
|
pub fn id(&self) -> Result<Uuid, Error> {
|
||||||
self.id.ok_or(Error::Unreachable("Options::id"))
|
self.id.ok_or(Error::Unreachable("No Node ID is specified"))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get currently selected NS
|
/// Get currently selected NS
|
||||||
|
#[inline(always)]
|
||||||
pub fn ns(&self) -> Result<&str, Error> {
|
pub fn ns(&self) -> Result<&str, Error> {
|
||||||
self.ns.as_ref().map(AsRef::as_ref).ok_or(Error::NsEmpty)
|
self.ns.as_ref().map(AsRef::as_ref).ok_or(Error::NsEmpty)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get currently selected DB
|
/// Get currently selected DB
|
||||||
|
#[inline(always)]
|
||||||
pub fn db(&self) -> Result<&str, Error> {
|
pub fn db(&self) -> Result<&str, Error> {
|
||||||
self.db.as_ref().map(AsRef::as_ref).ok_or(Error::DbEmpty)
|
self.db.as_ref().map(AsRef::as_ref).ok_or(Error::DbEmpty)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check whether this request supports realtime queries
|
/// Check whether this request supports realtime queries
|
||||||
|
#[inline(always)]
|
||||||
pub fn realtime(&self) -> Result<(), Error> {
|
pub fn realtime(&self) -> Result<(), Error> {
|
||||||
if !self.live {
|
if !self.live {
|
||||||
return Err(Error::RealtimeDisabled);
|
return Err(Error::RealtimeDisabled);
|
||||||
|
@ -357,6 +352,7 @@ impl Options {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate Options for Namespace
|
// Validate Options for Namespace
|
||||||
|
#[inline(always)]
|
||||||
pub fn valid_for_ns(&self) -> Result<(), Error> {
|
pub fn valid_for_ns(&self) -> Result<(), Error> {
|
||||||
if self.ns.is_none() {
|
if self.ns.is_none() {
|
||||||
return Err(Error::NsEmpty);
|
return Err(Error::NsEmpty);
|
||||||
|
@ -365,9 +361,11 @@ impl Options {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate Options for Database
|
// Validate Options for Database
|
||||||
|
#[inline(always)]
|
||||||
pub fn valid_for_db(&self) -> Result<(), Error> {
|
pub fn valid_for_db(&self) -> Result<(), Error> {
|
||||||
self.valid_for_ns()?;
|
if self.ns.is_none() {
|
||||||
|
return Err(Error::NsEmpty);
|
||||||
|
}
|
||||||
if self.db.is_none() {
|
if self.db.is_none() {
|
||||||
return Err(Error::DbEmpty);
|
return Err(Error::DbEmpty);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::cnf::PROCESSOR_BATCH_SIZE;
|
use crate::cnf::NORMAL_FETCH_SIZE;
|
||||||
use crate::ctx::Context;
|
use crate::ctx::Context;
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
#[cfg(not(target_arch = "wasm32"))]
|
||||||
use crate::dbs::distinct::AsyncDistinct;
|
use crate::dbs::distinct::AsyncDistinct;
|
||||||
|
@ -8,12 +8,12 @@ use crate::err::Error;
|
||||||
use crate::idx::planner::iterators::{CollectorRecord, IteratorRef, ThingIterator};
|
use crate::idx::planner::iterators::{CollectorRecord, IteratorRef, ThingIterator};
|
||||||
use crate::idx::planner::IterationStage;
|
use crate::idx::planner::IterationStage;
|
||||||
use crate::key::{graph, thing};
|
use crate::key::{graph, thing};
|
||||||
use crate::kvs;
|
use crate::kvs::Transaction;
|
||||||
use crate::kvs::ScanPage;
|
|
||||||
use crate::sql::dir::Dir;
|
use crate::sql::dir::Dir;
|
||||||
use crate::sql::{Edges, Range, Table, Thing, Value};
|
use crate::sql::{Edges, Range, Table, Thing, Value};
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
#[cfg(not(target_arch = "wasm32"))]
|
||||||
use channel::Sender;
|
use channel::Sender;
|
||||||
|
use futures::StreamExt;
|
||||||
use reblessive::tree::Stk;
|
use reblessive::tree::Stk;
|
||||||
use std::ops::Bound;
|
use std::ops::Bound;
|
||||||
use std::vec;
|
use std::vec;
|
||||||
|
@ -150,10 +150,10 @@ impl<'a> Processor<'a> {
|
||||||
self.process_index(stk, ctx, opt, stm, &t, irf).await?
|
self.process_index(stk, ctx, opt, stm, &t, irf).await?
|
||||||
}
|
}
|
||||||
Iterable::Mergeable(v, o) => {
|
Iterable::Mergeable(v, o) => {
|
||||||
self.process_mergeable(stk, ctx, opt, stm, v, o).await?
|
self.process_mergeable(stk, ctx, opt, stm, (v, o)).await?
|
||||||
}
|
}
|
||||||
Iterable::Relatable(f, v, w, o) => {
|
Iterable::Relatable(f, v, w, o) => {
|
||||||
self.process_relatable(stk, ctx, opt, stm, f, v, w, o).await?
|
self.process_relatable(stk, ctx, opt, stm, (f, v, w, o)).await?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -178,6 +178,27 @@ impl<'a> Processor<'a> {
|
||||||
self.process(stk, ctx, opt, stm, pro).await
|
self.process(stk, ctx, opt, stm, pro).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn process_defer(
|
||||||
|
&mut self,
|
||||||
|
stk: &mut Stk,
|
||||||
|
ctx: &Context<'_>,
|
||||||
|
opt: &Options,
|
||||||
|
stm: &Statement<'_>,
|
||||||
|
v: Thing,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// Check that the table exists
|
||||||
|
ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
|
||||||
|
// Process the document record
|
||||||
|
let pro = Processed {
|
||||||
|
rid: Some(v),
|
||||||
|
ir: None,
|
||||||
|
val: Operable::Value(Value::None),
|
||||||
|
};
|
||||||
|
self.process(stk, ctx, opt, stm, pro).await?;
|
||||||
|
// Everything ok
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
async fn process_thing(
|
async fn process_thing(
|
||||||
&mut self,
|
&mut self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
|
@ -187,10 +208,10 @@ impl<'a> Processor<'a> {
|
||||||
v: Thing,
|
v: Thing,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// Check that the table exists
|
// Check that the table exists
|
||||||
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
|
ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
|
||||||
// Fetch the data from the store
|
// Fetch the data from the store
|
||||||
let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
|
let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
|
||||||
let val = ctx.tx_lock().await.get(key).await?;
|
let val = ctx.tx().get(key).await?;
|
||||||
// Parse the data from the store
|
// Parse the data from the store
|
||||||
let val = Operable::Value(match val {
|
let val = Operable::Value(match val {
|
||||||
Some(v) => Value::from(v),
|
Some(v) => Value::from(v),
|
||||||
|
@ -207,41 +228,19 @@ impl<'a> Processor<'a> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn process_defer(
|
|
||||||
&mut self,
|
|
||||||
stk: &mut Stk,
|
|
||||||
ctx: &Context<'_>,
|
|
||||||
opt: &Options,
|
|
||||||
stm: &Statement<'_>,
|
|
||||||
v: Thing,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
// Check that the table exists
|
|
||||||
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
|
|
||||||
// Process the document record
|
|
||||||
let pro = Processed {
|
|
||||||
rid: Some(v),
|
|
||||||
ir: None,
|
|
||||||
val: Operable::Value(Value::None),
|
|
||||||
};
|
|
||||||
self.process(stk, ctx, opt, stm, pro).await?;
|
|
||||||
// Everything ok
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn process_mergeable(
|
async fn process_mergeable(
|
||||||
&mut self,
|
&mut self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
opt: &Options,
|
opt: &Options,
|
||||||
stm: &Statement<'_>,
|
stm: &Statement<'_>,
|
||||||
v: Thing,
|
(v, o): (Thing, Value),
|
||||||
o: Value,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// Check that the table exists
|
// Check that the table exists
|
||||||
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
|
ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
|
||||||
// Fetch the data from the store
|
// Fetch the data from the store
|
||||||
let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
|
let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
|
||||||
let val = ctx.tx_lock().await.get(key).await?;
|
let val = ctx.tx().get(key).await?;
|
||||||
// Parse the data from the store
|
// Parse the data from the store
|
||||||
let x = match val {
|
let x = match val {
|
||||||
Some(v) => Value::from(v),
|
Some(v) => Value::from(v),
|
||||||
|
@ -260,23 +259,19 @@ impl<'a> Processor<'a> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn process_relatable(
|
async fn process_relatable(
|
||||||
&mut self,
|
&mut self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
opt: &Options,
|
opt: &Options,
|
||||||
stm: &Statement<'_>,
|
stm: &Statement<'_>,
|
||||||
f: Thing,
|
(f, v, w, o): (Thing, Thing, Thing, Option<Value>),
|
||||||
v: Thing,
|
|
||||||
w: Thing,
|
|
||||||
o: Option<Value>,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// Check that the table exists
|
// Check that the table exists
|
||||||
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
|
ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
|
||||||
// Fetch the data from the store
|
// Fetch the data from the store
|
||||||
let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
|
let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
|
||||||
let val = ctx.tx_lock().await.get(key).await?;
|
let val = ctx.tx().get(key).await?;
|
||||||
// Parse the data from the store
|
// Parse the data from the store
|
||||||
let x = match val {
|
let x = match val {
|
||||||
Some(v) => Value::from(v),
|
Some(v) => Value::from(v),
|
||||||
|
@ -303,33 +298,23 @@ impl<'a> Processor<'a> {
|
||||||
stm: &Statement<'_>,
|
stm: &Statement<'_>,
|
||||||
v: &Table,
|
v: &Table,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
// Get the transaction
|
||||||
|
let txn = ctx.tx();
|
||||||
// Check that the table exists
|
// Check that the table exists
|
||||||
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, v, opt.strict).await?;
|
txn.check_ns_db_tb(opt.ns()?, opt.db()?, v, opt.strict).await?;
|
||||||
// Prepare the start and end keys
|
// Prepare the start and end keys
|
||||||
let beg = thing::prefix(opt.ns()?, opt.db()?, v);
|
let beg = thing::prefix(opt.ns()?, opt.db()?, v);
|
||||||
let end = thing::suffix(opt.ns()?, opt.db()?, v);
|
let end = thing::suffix(opt.ns()?, opt.db()?, v);
|
||||||
// Loop until no more keys
|
// Create a new iterable range
|
||||||
let mut next_page = Some(ScanPage::from(beg..end));
|
let mut stream = txn.stream(beg..end);
|
||||||
while let Some(page) = next_page {
|
// Loop until no more entries
|
||||||
|
while let Some(res) = stream.next().await {
|
||||||
// Check if the context is finished
|
// Check if the context is finished
|
||||||
if ctx.is_done() {
|
if ctx.is_done() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Get the next batch of key-value entries
|
|
||||||
let res = ctx.tx_lock().await.scan_paged(page, PROCESSOR_BATCH_SIZE).await?;
|
|
||||||
next_page = res.next_page;
|
|
||||||
let res = res.values;
|
|
||||||
// If no results then break
|
|
||||||
if res.is_empty() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Loop over results
|
|
||||||
for (k, v) in res.into_iter() {
|
|
||||||
// Check the context
|
|
||||||
if ctx.is_done() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Parse the data from the store
|
// Parse the data from the store
|
||||||
|
let (k, v) = res?;
|
||||||
let key: thing::Thing = (&k).into();
|
let key: thing::Thing = (&k).into();
|
||||||
let val: Value = (&v).into();
|
let val: Value = (&v).into();
|
||||||
let rid = Thing::from((key.tb, key.id));
|
let rid = Thing::from((key.tb, key.id));
|
||||||
|
@ -343,8 +328,6 @@ impl<'a> Processor<'a> {
|
||||||
};
|
};
|
||||||
self.process(stk, ctx, opt, stm, pro).await?;
|
self.process(stk, ctx, opt, stm, pro).await?;
|
||||||
}
|
}
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Everything ok
|
// Everything ok
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -357,8 +340,10 @@ impl<'a> Processor<'a> {
|
||||||
stm: &Statement<'_>,
|
stm: &Statement<'_>,
|
||||||
v: Range,
|
v: Range,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
// Get the transaction
|
||||||
|
let txn = ctx.tx();
|
||||||
// Check that the table exists
|
// Check that the table exists
|
||||||
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
|
txn.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?;
|
||||||
// Prepare the range start key
|
// Prepare the range start key
|
||||||
let beg = match &v.beg {
|
let beg = match &v.beg {
|
||||||
Bound::Unbounded => thing::prefix(opt.ns()?, opt.db()?, &v.tb),
|
Bound::Unbounded => thing::prefix(opt.ns()?, opt.db()?, &v.tb),
|
||||||
|
@ -379,28 +364,16 @@ impl<'a> Processor<'a> {
|
||||||
key
|
key
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
// Loop until no more keys
|
// Create a new iterable range
|
||||||
let mut next_page = Some(ScanPage::from(beg..end));
|
let mut stream = txn.stream(beg..end);
|
||||||
while let Some(page) = next_page {
|
// Loop until no more entries
|
||||||
|
while let Some(res) = stream.next().await {
|
||||||
// Check if the context is finished
|
// Check if the context is finished
|
||||||
if ctx.is_done() {
|
if ctx.is_done() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let res = ctx.tx_lock().await.scan_paged(page, PROCESSOR_BATCH_SIZE).await?;
|
|
||||||
next_page = res.next_page;
|
|
||||||
// Get the next batch of key-value entries
|
|
||||||
let res = res.values;
|
|
||||||
// If there are key-value entries then fetch them
|
|
||||||
if res.is_empty() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Loop over results
|
|
||||||
for (k, v) in res.into_iter() {
|
|
||||||
// Check the context
|
|
||||||
if ctx.is_done() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Parse the data from the store
|
// Parse the data from the store
|
||||||
|
let (k, v) = res?;
|
||||||
let key: thing::Thing = (&k).into();
|
let key: thing::Thing = (&k).into();
|
||||||
let val: Value = (&v).into();
|
let val: Value = (&v).into();
|
||||||
let rid = Thing::from((key.tb, key.id));
|
let rid = Thing::from((key.tb, key.id));
|
||||||
|
@ -414,8 +387,6 @@ impl<'a> Processor<'a> {
|
||||||
};
|
};
|
||||||
self.process(stk, ctx, opt, stm, pro).await?;
|
self.process(stk, ctx, opt, stm, pro).await?;
|
||||||
}
|
}
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Everything ok
|
// Everything ok
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -496,34 +467,27 @@ impl<'a> Processor<'a> {
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
//
|
// Get the transaction
|
||||||
for (beg, end) in keys.iter() {
|
let txn = ctx.tx();
|
||||||
// Loop until no more keys
|
// Check that the table exists
|
||||||
let mut next_page = Some(ScanPage::from(beg.clone()..end.clone()));
|
txn.check_ns_db_tb(opt.ns()?, opt.db()?, tb, opt.strict).await?;
|
||||||
while let Some(page) = next_page {
|
// Loop over the chosen edge types
|
||||||
|
for (beg, end) in keys.into_iter() {
|
||||||
|
// Create a new iterable range
|
||||||
|
let mut stream = txn.stream(beg..end);
|
||||||
|
// Loop until no more entries
|
||||||
|
while let Some(res) = stream.next().await {
|
||||||
// Check if the context is finished
|
// Check if the context is finished
|
||||||
if ctx.is_done() {
|
if ctx.is_done() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Get the next batch key-value entries
|
// Parse the key from the result
|
||||||
let res = ctx.tx_lock().await.scan_paged(page, PROCESSOR_BATCH_SIZE).await?;
|
let key = res?.0;
|
||||||
next_page = res.next_page;
|
|
||||||
let res = res.values;
|
|
||||||
// If there are key-value entries then fetch them
|
|
||||||
if res.is_empty() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Loop over results
|
|
||||||
for (k, _) in res.into_iter() {
|
|
||||||
// Check the context
|
|
||||||
if ctx.is_done() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Parse the data from the store
|
// Parse the data from the store
|
||||||
let gra: graph::Graph = graph::Graph::decode(&k)?;
|
let gra: graph::Graph = graph::Graph::decode(&key)?;
|
||||||
// Fetch the data from the store
|
// Fetch the data from the store
|
||||||
let key = thing::new(opt.ns()?, opt.db()?, gra.ft, &gra.fk);
|
let key = thing::new(opt.ns()?, opt.db()?, gra.ft, &gra.fk);
|
||||||
let val = ctx.tx_lock().await.get(key).await?;
|
let val = txn.get(key).await?;
|
||||||
let rid = Thing::from((gra.ft, gra.fk));
|
let rid = Thing::from((gra.ft, gra.fk));
|
||||||
// Parse the data from the store
|
// Parse the data from the store
|
||||||
let val = Operable::Value(match val {
|
let val = Operable::Value(match val {
|
||||||
|
@ -538,8 +502,6 @@ impl<'a> Processor<'a> {
|
||||||
};
|
};
|
||||||
self.process(stk, ctx, opt, stm, pro).await?;
|
self.process(stk, ctx, opt, stm, pro).await?;
|
||||||
}
|
}
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Everything ok
|
// Everything ok
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -555,7 +517,7 @@ impl<'a> Processor<'a> {
|
||||||
irf: IteratorRef,
|
irf: IteratorRef,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// Check that the table exists
|
// Check that the table exists
|
||||||
ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &table.0, opt.strict).await?;
|
ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &table.0, opt.strict).await?;
|
||||||
if let Some(exe) = ctx.get_query_executor() {
|
if let Some(exe) = ctx.get_query_executor() {
|
||||||
if let Some(mut iterator) = exe.new_iterator(opt, irf).await? {
|
if let Some(mut iterator) = exe.new_iterator(opt, irf).await? {
|
||||||
// Get the first batch
|
// Get the first batch
|
||||||
|
@ -592,9 +554,9 @@ impl<'a> Processor<'a> {
|
||||||
opt: &Options,
|
opt: &Options,
|
||||||
iterator: &mut ThingIterator,
|
iterator: &mut ThingIterator,
|
||||||
) -> Result<Vec<Processed>, Error> {
|
) -> Result<Vec<Processed>, Error> {
|
||||||
let mut tx = ctx.tx_lock().await;
|
let txn = ctx.tx();
|
||||||
let records: Vec<CollectorRecord> =
|
let records: Vec<CollectorRecord> =
|
||||||
iterator.next_batch(ctx, &mut tx, PROCESSOR_BATCH_SIZE).await?;
|
iterator.next_batch(ctx, &txn, *NORMAL_FETCH_SIZE).await?;
|
||||||
let mut to_process = Vec::with_capacity(records.len());
|
let mut to_process = Vec::with_capacity(records.len());
|
||||||
for r in records {
|
for r in records {
|
||||||
let v = if let Some(v) = r.2 {
|
let v = if let Some(v) = r.2 {
|
||||||
|
@ -602,7 +564,7 @@ impl<'a> Processor<'a> {
|
||||||
v
|
v
|
||||||
} else {
|
} else {
|
||||||
// Otherwise we have to fetch the record
|
// Otherwise we have to fetch the record
|
||||||
Iterable::fetch_thing(&mut tx, opt, &r.0).await?
|
Iterable::fetch_thing(&txn, opt, &r.0).await?
|
||||||
};
|
};
|
||||||
let p = Processed {
|
let p = Processed {
|
||||||
rid: Some(r.0),
|
rid: Some(r.0),
|
||||||
|
@ -618,14 +580,14 @@ impl<'a> Processor<'a> {
|
||||||
impl Iterable {
|
impl Iterable {
|
||||||
/// Returns the value from the store, or Value::None it the value does not exist.
|
/// Returns the value from the store, or Value::None it the value does not exist.
|
||||||
pub(crate) async fn fetch_thing(
|
pub(crate) async fn fetch_thing(
|
||||||
tx: &mut kvs::Transaction,
|
txn: &Transaction,
|
||||||
opt: &Options,
|
opt: &Options,
|
||||||
thg: &Thing,
|
thg: &Thing,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
// Fetch the data from the store
|
// Fetch the data from the store
|
||||||
let key = thing::new(opt.ns()?, opt.db()?, &thg.tb, &thg.id);
|
let key = thing::new(opt.ns()?, opt.db()?, &thg.tb, &thg.id);
|
||||||
// Fetch and parse the data from the store
|
// Fetch and parse the data from the store
|
||||||
let val = tx.get(key).await?.map(Value::from).unwrap_or(Value::None);
|
let val = txn.get(key).await?.map(Value::from).unwrap_or(Value::None);
|
||||||
// Return the result
|
// Return the result
|
||||||
Ok(val)
|
Ok(val)
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,8 @@ use std::sync::Arc;
|
||||||
pub async fn mock<'a>() -> (Context<'a>, Options) {
|
pub async fn mock<'a>() -> (Context<'a>, Options) {
|
||||||
let opt = Options::default().with_auth(Arc::new(Auth::for_root(Role::Owner)));
|
let opt = Options::default().with_auth(Arc::new(Auth::for_root(Role::Owner)));
|
||||||
let kvs = Datastore::new("memory").await.unwrap();
|
let kvs = Datastore::new("memory").await.unwrap();
|
||||||
let txn = kvs.transaction(Write, Optimistic).await.unwrap().rollback_and_ignore().enclose();
|
let txn = kvs.transaction(Write, Optimistic).await.unwrap();
|
||||||
let ctx = Context::default().set_transaction(txn);
|
let txn = txn.rollback_and_ignore().await.enclose();
|
||||||
|
let ctx = Context::default().with_transaction(txn);
|
||||||
(ctx, opt)
|
(ctx, opt)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
use crate::kvs;
|
|
||||||
use futures::lock::Mutex;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
pub(crate) type Transaction = Arc<Mutex<kvs::Transaction>>;
|
|
|
@ -15,23 +15,20 @@ impl<'a> Document<'a> {
|
||||||
if !self.changed() {
|
if !self.changed() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
//
|
// Get the table
|
||||||
let tb = self.tb(ctx, opt).await?;
|
let tb = self.tb(ctx, opt).await?;
|
||||||
// Claim transaction
|
// Get the transaction
|
||||||
let mut run = ctx.tx_lock().await;
|
let txn = ctx.tx();
|
||||||
// Get the database and the table for the record
|
// Get the database and the table for the record
|
||||||
let db = run.add_and_cache_db(opt.ns()?, opt.db()?, opt.strict).await?;
|
let db = txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?;
|
||||||
// Check if changefeeds are enabled
|
// Check if changefeeds are enabled
|
||||||
if let Some(cf) = db.as_ref().changefeed.as_ref().or(tb.as_ref().changefeed.as_ref()) {
|
if let Some(cf) = db.as_ref().changefeed.as_ref().or(tb.as_ref().changefeed.as_ref()) {
|
||||||
// Get the arguments
|
|
||||||
let tb = tb.name.as_str();
|
|
||||||
let id = self.id.as_ref().unwrap();
|
|
||||||
// Create the changefeed entry
|
// Create the changefeed entry
|
||||||
run.record_change(
|
txn.lock().await.record_change(
|
||||||
opt.ns()?,
|
opt.ns()?,
|
||||||
opt.db()?,
|
opt.db()?,
|
||||||
tb,
|
tb.name.as_str(),
|
||||||
id,
|
self.id.unwrap(),
|
||||||
self.initial.doc.clone(),
|
self.initial.doc.clone(),
|
||||||
self.current.doc.clone(),
|
self.current.doc.clone(),
|
||||||
cf.store_diff,
|
cf.store_diff,
|
||||||
|
|
|
@ -52,7 +52,7 @@ impl<'a> Document<'a> {
|
||||||
Err(Error::RetryWithId(v)) => {
|
Err(Error::RetryWithId(v)) => {
|
||||||
// Fetch the data from the store
|
// Fetch the data from the store
|
||||||
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
|
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
|
||||||
let val = ctx.tx_lock().await.get(key).await?;
|
let val = ctx.tx().get(key).await?;
|
||||||
// Parse the data from the store
|
// Parse the data from the store
|
||||||
let val = match val {
|
let val = match val {
|
||||||
Some(v) => Value::from(v),
|
Some(v) => Value::from(v),
|
||||||
|
|
|
@ -93,25 +93,6 @@ impl<'a> Document<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new document that is not going through the standard lifecycle of documents
|
|
||||||
///
|
|
||||||
/// This allows for it to be crafted without needing statements to operate on it
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn new_artificial(
|
|
||||||
id: Option<&'a Thing>,
|
|
||||||
ir: Option<&'a IteratorRecord>,
|
|
||||||
val: Cow<'a, Value>,
|
|
||||||
initial: Cow<'a, Value>,
|
|
||||||
extras: Workable,
|
|
||||||
) -> Self {
|
|
||||||
Document {
|
|
||||||
id,
|
|
||||||
extras,
|
|
||||||
current: CursorDoc::new(id, ir, val),
|
|
||||||
initial: CursorDoc::new(id, ir, initial),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the current document, as it is being modified
|
/// Get the current document, as it is being modified
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub(crate) fn current_doc(&self) -> &Value {
|
pub(crate) fn current_doc(&self) -> &Value {
|
||||||
|
@ -136,23 +117,18 @@ impl<'a> Document<'a> {
|
||||||
self.initial.doc.is_none() && self.current.doc.is_some()
|
self.initial.doc.is_none() && self.current.doc.is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if document is being deleted
|
|
||||||
pub fn is_delete(&self) -> bool {
|
|
||||||
self.current.doc.is_none()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the table for this document
|
/// Get the table for this document
|
||||||
pub async fn tb(
|
pub async fn tb(
|
||||||
&self,
|
&self,
|
||||||
ctx: &Context<'a>,
|
ctx: &Context<'a>,
|
||||||
opt: &Options,
|
opt: &Options,
|
||||||
) -> Result<Arc<DefineTableStatement>, Error> {
|
) -> Result<Arc<DefineTableStatement>, Error> {
|
||||||
// Claim transaction
|
// Get transaction
|
||||||
let mut run = ctx.tx_lock().await;
|
let txn = ctx.tx();
|
||||||
// Get the record id
|
// Get the record id
|
||||||
let rid = self.id.as_ref().unwrap();
|
let rid = self.id.as_ref().unwrap();
|
||||||
// Get the table definition
|
// Get the table definition
|
||||||
let tb = run.get_and_cache_tb(opt.ns()?, opt.db()?, &rid.tb).await;
|
let tb = txn.get_tb(opt.ns()?, opt.db()?, &rid.tb).await;
|
||||||
// Return the table or attempt to define it
|
// Return the table or attempt to define it
|
||||||
match tb {
|
match tb {
|
||||||
// The table doesn't exist
|
// The table doesn't exist
|
||||||
|
@ -162,9 +138,7 @@ impl<'a> Document<'a> {
|
||||||
// Allowed to run?
|
// Allowed to run?
|
||||||
opt.is_allowed(Action::Edit, ResourceKind::Table, &Base::Db)?;
|
opt.is_allowed(Action::Edit, ResourceKind::Table, &Base::Db)?;
|
||||||
// We can create the table automatically
|
// We can create the table automatically
|
||||||
run.add_and_cache_ns(opt.ns()?, opt.strict).await?;
|
txn.ensure_ns_db_tb(opt.ns()?, opt.db()?, &rid.tb, opt.strict).await
|
||||||
run.add_and_cache_db(opt.ns()?, opt.db()?, opt.strict).await?;
|
|
||||||
run.add_and_cache_tb(opt.ns()?, opt.db()?, &rid.tb, opt.strict).await
|
|
||||||
}
|
}
|
||||||
// There was an error
|
// There was an error
|
||||||
Err(err) => Err(err),
|
Err(err) => Err(err),
|
||||||
|
@ -181,7 +155,7 @@ impl<'a> Document<'a> {
|
||||||
// Get the record id
|
// Get the record id
|
||||||
let id = self.id.as_ref().unwrap();
|
let id = self.id.as_ref().unwrap();
|
||||||
// Get the table definitions
|
// Get the table definitions
|
||||||
ctx.tx_lock().await.all_tb_views(opt.ns()?, opt.db()?, &id.tb).await
|
ctx.tx().all_tb_views(opt.ns()?, opt.db()?, &id.tb).await
|
||||||
}
|
}
|
||||||
/// Get the events for this document
|
/// Get the events for this document
|
||||||
pub async fn ev(
|
pub async fn ev(
|
||||||
|
@ -192,7 +166,7 @@ impl<'a> Document<'a> {
|
||||||
// Get the record id
|
// Get the record id
|
||||||
let id = self.id.as_ref().unwrap();
|
let id = self.id.as_ref().unwrap();
|
||||||
// Get the event definitions
|
// Get the event definitions
|
||||||
ctx.tx_lock().await.all_tb_events(opt.ns()?, opt.db()?, &id.tb).await
|
ctx.tx().all_tb_events(opt.ns()?, opt.db()?, &id.tb).await
|
||||||
}
|
}
|
||||||
/// Get the fields for this document
|
/// Get the fields for this document
|
||||||
pub async fn fd(
|
pub async fn fd(
|
||||||
|
@ -203,7 +177,7 @@ impl<'a> Document<'a> {
|
||||||
// Get the record id
|
// Get the record id
|
||||||
let id = self.id.as_ref().unwrap();
|
let id = self.id.as_ref().unwrap();
|
||||||
// Get the field definitions
|
// Get the field definitions
|
||||||
ctx.tx_lock().await.all_tb_fields(opt.ns()?, opt.db()?, &id.tb).await
|
ctx.tx().all_tb_fields(opt.ns()?, opt.db()?, &id.tb).await
|
||||||
}
|
}
|
||||||
/// Get the indexes for this document
|
/// Get the indexes for this document
|
||||||
pub async fn ix(
|
pub async fn ix(
|
||||||
|
@ -214,7 +188,7 @@ impl<'a> Document<'a> {
|
||||||
// Get the record id
|
// Get the record id
|
||||||
let id = self.id.as_ref().unwrap();
|
let id = self.id.as_ref().unwrap();
|
||||||
// Get the index definitions
|
// Get the index definitions
|
||||||
ctx.tx_lock().await.all_tb_indexes(opt.ns()?, opt.db()?, &id.tb).await
|
ctx.tx().all_tb_indexes(opt.ns()?, opt.db()?, &id.tb).await
|
||||||
}
|
}
|
||||||
// Get the lives for this document
|
// Get the lives for this document
|
||||||
pub async fn lv(
|
pub async fn lv(
|
||||||
|
@ -225,6 +199,6 @@ impl<'a> Document<'a> {
|
||||||
// Get the record id
|
// Get the record id
|
||||||
let id = self.id.as_ref().unwrap();
|
let id = self.id.as_ref().unwrap();
|
||||||
// Get the table definition
|
// Get the table definition
|
||||||
ctx.tx_lock().await.all_tb_lives(opt.ns()?, opt.db()?, &id.tb).await
|
ctx.tx().all_tb_lives(opt.ns()?, opt.db()?, &id.tb).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,10 @@ impl<'a> Document<'a> {
|
||||||
if self.tb(ctx, opt).await?.drop {
|
if self.tb(ctx, opt).await?.drop {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
// Claim transaction
|
// Get the transaction
|
||||||
let mut run = ctx.tx_lock().await;
|
let txn = ctx.tx();
|
||||||
|
// Lock the transaction
|
||||||
|
let mut txn = txn.lock().await;
|
||||||
// Get the record id
|
// Get the record id
|
||||||
let rid = self.id.as_ref().unwrap();
|
let rid = self.id.as_ref().unwrap();
|
||||||
// Store the record edges
|
// Store the record edges
|
||||||
|
@ -31,16 +33,16 @@ impl<'a> Document<'a> {
|
||||||
let (ref o, ref i) = (Dir::Out, Dir::In);
|
let (ref o, ref i) = (Dir::Out, Dir::In);
|
||||||
// Store the left pointer edge
|
// Store the left pointer edge
|
||||||
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &l.tb, &l.id, o, rid);
|
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &l.tb, &l.id, o, rid);
|
||||||
run.set(key, vec![]).await?;
|
txn.set(key, vec![]).await?;
|
||||||
// Store the left inner edge
|
// Store the left inner edge
|
||||||
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, i, l);
|
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, i, l);
|
||||||
run.set(key, vec![]).await?;
|
txn.set(key, vec![]).await?;
|
||||||
// Store the right inner edge
|
// Store the right inner edge
|
||||||
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, o, r);
|
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, o, r);
|
||||||
run.set(key, vec![]).await?;
|
txn.set(key, vec![]).await?;
|
||||||
// Store the right pointer edge
|
// Store the right pointer edge
|
||||||
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &r.tb, &r.id, i, rid);
|
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &r.tb, &r.id, i, rid);
|
||||||
run.set(key, vec![]).await?;
|
txn.set(key, vec![]).await?;
|
||||||
// Store the edges on the record
|
// Store the edges on the record
|
||||||
self.current.doc.to_mut().put(&*EDGE, Value::Bool(true));
|
self.current.doc.to_mut().put(&*EDGE, Value::Bool(true));
|
||||||
self.current.doc.to_mut().put(&*IN, l.clone().into());
|
self.current.doc.to_mut().put(&*IN, l.clone().into());
|
||||||
|
|
|
@ -280,13 +280,16 @@ impl<'a> IndexOperation<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn index_unique(&mut self, ctx: &Context<'_>) -> Result<(), Error> {
|
async fn index_unique(&mut self, ctx: &Context<'_>) -> Result<(), Error> {
|
||||||
let mut run = ctx.tx_lock().await;
|
// Get the transaction
|
||||||
|
let txn = ctx.tx();
|
||||||
|
// Lock the transaction
|
||||||
|
let mut txn = txn.lock().await;
|
||||||
// Delete the old index data
|
// Delete the old index data
|
||||||
if let Some(o) = self.o.take() {
|
if let Some(o) = self.o.take() {
|
||||||
let i = Indexable::new(o, self.ix);
|
let i = Indexable::new(o, self.ix);
|
||||||
for o in i {
|
for o in i {
|
||||||
let key = self.get_unique_index_key(&o)?;
|
let key = self.get_unique_index_key(&o)?;
|
||||||
match run.delc(key, Some(self.rid)).await {
|
match txn.delc(key, Some(self.rid)).await {
|
||||||
Err(Error::TxConditionNotMet) => Ok(()),
|
Err(Error::TxConditionNotMet) => Ok(()),
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
Ok(v) => Ok(v),
|
Ok(v) => Ok(v),
|
||||||
|
@ -299,9 +302,9 @@ impl<'a> IndexOperation<'a> {
|
||||||
for n in i {
|
for n in i {
|
||||||
if !n.is_all_none_or_null() {
|
if !n.is_all_none_or_null() {
|
||||||
let key = self.get_unique_index_key(&n)?;
|
let key = self.get_unique_index_key(&n)?;
|
||||||
if run.putc(key, self.rid, None).await.is_err() {
|
if txn.putc(key, self.rid, None).await.is_err() {
|
||||||
let key = self.get_unique_index_key(&n)?;
|
let key = self.get_unique_index_key(&n)?;
|
||||||
let val = run.get(key).await?.unwrap();
|
let val = txn.get(key).await?.unwrap();
|
||||||
let rid: Thing = val.into();
|
let rid: Thing = val.into();
|
||||||
return self.err_index_exists(rid, n);
|
return self.err_index_exists(rid, n);
|
||||||
}
|
}
|
||||||
|
@ -312,13 +315,16 @@ impl<'a> IndexOperation<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn index_non_unique(&mut self, ctx: &Context<'_>) -> Result<(), Error> {
|
async fn index_non_unique(&mut self, ctx: &Context<'_>) -> Result<(), Error> {
|
||||||
let mut run = ctx.tx_lock().await;
|
// Get the transaction
|
||||||
|
let txn = ctx.tx();
|
||||||
|
// Lock the transaction
|
||||||
|
let mut txn = txn.lock().await;
|
||||||
// Delete the old index data
|
// Delete the old index data
|
||||||
if let Some(o) = self.o.take() {
|
if let Some(o) = self.o.take() {
|
||||||
let i = Indexable::new(o, self.ix);
|
let i = Indexable::new(o, self.ix);
|
||||||
for o in i {
|
for o in i {
|
||||||
let key = self.get_non_unique_index_key(&o)?;
|
let key = self.get_non_unique_index_key(&o)?;
|
||||||
match run.delc(key, Some(self.rid)).await {
|
match txn.delc(key, Some(self.rid)).await {
|
||||||
Err(Error::TxConditionNotMet) => Ok(()),
|
Err(Error::TxConditionNotMet) => Ok(()),
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
Ok(v) => Ok(v),
|
Ok(v) => Ok(v),
|
||||||
|
@ -330,9 +336,9 @@ impl<'a> IndexOperation<'a> {
|
||||||
let i = Indexable::new(n, self.ix);
|
let i = Indexable::new(n, self.ix);
|
||||||
for n in i {
|
for n in i {
|
||||||
let key = self.get_non_unique_index_key(&n)?;
|
let key = self.get_non_unique_index_key(&n)?;
|
||||||
if run.putc(key, self.rid, None).await.is_err() {
|
if txn.putc(key, self.rid, None).await.is_err() {
|
||||||
let key = self.get_non_unique_index_key(&n)?;
|
let key = self.get_non_unique_index_key(&n)?;
|
||||||
let val = run.get(key).await?.unwrap();
|
let val = txn.get(key).await?.unwrap();
|
||||||
let rid: Thing = val.into();
|
let rid: Thing = val.into();
|
||||||
return self.err_index_exists(rid, n);
|
return self.err_index_exists(rid, n);
|
||||||
}
|
}
|
||||||
|
@ -376,20 +382,19 @@ impl<'a> IndexOperation<'a> {
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
p: &MTreeParams,
|
p: &MTreeParams,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut tx = ctx.tx_lock().await;
|
let txn = ctx.tx();
|
||||||
let ikb = IndexKeyBase::new(self.opt.ns()?, self.opt.db()?, self.ix)?;
|
let ikb = IndexKeyBase::new(self.opt.ns()?, self.opt.db()?, self.ix)?;
|
||||||
let mut mt =
|
let mut mt =
|
||||||
MTreeIndex::new(ctx.get_index_stores(), &mut tx, ikb, p, TransactionType::Write)
|
MTreeIndex::new(ctx.get_index_stores(), &txn, ikb, p, TransactionType::Write).await?;
|
||||||
.await?;
|
|
||||||
// Delete the old index data
|
// Delete the old index data
|
||||||
if let Some(o) = self.o.take() {
|
if let Some(o) = self.o.take() {
|
||||||
mt.remove_document(stk, &mut tx, self.rid, &o).await?;
|
mt.remove_document(stk, &txn, self.rid, &o).await?;
|
||||||
}
|
}
|
||||||
// Create the new index data
|
// Create the new index data
|
||||||
if let Some(n) = self.n.take() {
|
if let Some(n) = self.n.take() {
|
||||||
mt.index_document(stk, &mut tx, self.rid, &n).await?;
|
mt.index_document(stk, &txn, self.rid, &n).await?;
|
||||||
}
|
}
|
||||||
mt.finish(&mut tx).await
|
mt.finish(&txn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn index_hnsw(&mut self, ctx: &Context<'_>, p: &HnswParams) -> Result<(), Error> {
|
async fn index_hnsw(&mut self, ctx: &Context<'_>, p: &HnswParams) -> Result<(), Error> {
|
||||||
|
|
|
@ -6,19 +6,15 @@ use crate::dbs::Statement;
|
||||||
use crate::doc::CursorDoc;
|
use crate::doc::CursorDoc;
|
||||||
use crate::doc::Document;
|
use crate::doc::Document;
|
||||||
use crate::err::Error;
|
use crate::err::Error;
|
||||||
use crate::fflags::FFLAGS;
|
|
||||||
use crate::sql::paths::AC;
|
use crate::sql::paths::AC;
|
||||||
use crate::sql::paths::META;
|
use crate::sql::paths::META;
|
||||||
use crate::sql::paths::RD;
|
use crate::sql::paths::RD;
|
||||||
use crate::sql::paths::TK;
|
use crate::sql::paths::TK;
|
||||||
use crate::sql::permission::Permission;
|
use crate::sql::permission::Permission;
|
||||||
use crate::sql::statements::LiveStatement;
|
|
||||||
use crate::sql::Value;
|
use crate::sql::Value;
|
||||||
use channel::Sender;
|
|
||||||
use reblessive::tree::Stk;
|
use reblessive::tree::Stk;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
impl<'a> Document<'a> {
|
impl<'a> Document<'a> {
|
||||||
pub async fn lives(
|
pub async fn lives(
|
||||||
|
@ -28,27 +24,145 @@ impl<'a> Document<'a> {
|
||||||
opt: &Options,
|
opt: &Options,
|
||||||
stm: &Statement<'_>,
|
stm: &Statement<'_>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
// Check import
|
||||||
|
if opt.import {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
// Check if changed
|
// Check if changed
|
||||||
if !self.changed() {
|
if !self.changed() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
// Under the new mechanism, live query notifications only come from polling the change feed
|
|
||||||
// This check can be moved up the call stack, as this entire method will become unnecessary
|
|
||||||
if FFLAGS.change_feed_live_queries.enabled() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
// Check if we can send notifications
|
// Check if we can send notifications
|
||||||
if let Some(chn) = &opt.sender {
|
if let Some(chn) = &opt.sender {
|
||||||
|
// Get all live queries for this table
|
||||||
|
let lvs = self.lv(ctx, opt).await?;
|
||||||
// Loop through all index statements
|
// Loop through all index statements
|
||||||
let lq_stms = self.lv(ctx, opt).await?;
|
for lv in lvs.iter() {
|
||||||
let borrows = lq_stms.iter().collect::<Vec<_>>();
|
// Create a new statement
|
||||||
self.check_lqs_and_send_notifications(stk, ctx, opt, stm, borrows.as_slice(), chn)
|
let lq = Statement::from(lv);
|
||||||
|
// Get the event action
|
||||||
|
let met = if stm.is_delete() {
|
||||||
|
Value::from("DELETE")
|
||||||
|
} else if self.is_new() {
|
||||||
|
Value::from("CREATE")
|
||||||
|
} else {
|
||||||
|
Value::from("UPDATE")
|
||||||
|
};
|
||||||
|
// Check if this is a delete statement
|
||||||
|
let doc = match stm.is_delete() {
|
||||||
|
true => &self.initial,
|
||||||
|
false => &self.current,
|
||||||
|
};
|
||||||
|
// Ensure that a session exists on the LIVE query
|
||||||
|
let sess = match lv.session.as_ref() {
|
||||||
|
Some(v) => v,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
// Ensure that auth info exists on the LIVE query
|
||||||
|
let auth = match lv.auth.clone() {
|
||||||
|
Some(v) => v,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
// We need to create a new context which we will
|
||||||
|
// use for processing this LIVE query statement.
|
||||||
|
// This ensures that we are using the session
|
||||||
|
// of the user who created the LIVE query.
|
||||||
|
let mut lqctx = Context::background();
|
||||||
|
// Set the current transaction on the new LIVE
|
||||||
|
// query context to prevent unreachable behaviour
|
||||||
|
// and ensure that queries can be executed.
|
||||||
|
lqctx.set_transaction(ctx.tx());
|
||||||
|
// Add the session params to this LIVE query, so
|
||||||
|
// that queries can use these within field
|
||||||
|
// projections and WHERE clauses.
|
||||||
|
lqctx.add_value("access", sess.pick(AC.as_ref()));
|
||||||
|
lqctx.add_value("auth", sess.pick(RD.as_ref()));
|
||||||
|
lqctx.add_value("token", sess.pick(TK.as_ref()));
|
||||||
|
lqctx.add_value("session", sess);
|
||||||
|
// Add $before, $after, $value, and $event params
|
||||||
|
// to this LIVE query so the user can use these
|
||||||
|
// within field projections and WHERE clauses.
|
||||||
|
lqctx.add_value("event", met);
|
||||||
|
lqctx.add_value("value", self.current.doc.deref());
|
||||||
|
lqctx.add_value("after", self.current.doc.deref());
|
||||||
|
lqctx.add_value("before", self.initial.doc.deref());
|
||||||
|
// We need to create a new options which we will
|
||||||
|
// use for processing this LIVE query statement.
|
||||||
|
// This ensures that we are using the auth data
|
||||||
|
// of the user who created the LIVE query.
|
||||||
|
let lqopt = opt.new_with_perms(true).with_auth(Arc::from(auth));
|
||||||
|
// First of all, let's check to see if the WHERE
|
||||||
|
// clause of the LIVE query is matched by this
|
||||||
|
// document. If it is then we can continue.
|
||||||
|
match self.lq_check(stk, &lqctx, &lqopt, &lq, doc).await {
|
||||||
|
Err(Error::Ignore) => continue,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
Ok(_) => (),
|
||||||
|
}
|
||||||
|
// Secondly, let's check to see if any PERMISSIONS
|
||||||
|
// clause for this table allows this document to
|
||||||
|
// be viewed by the user who created this LIVE
|
||||||
|
// query. If it does, then we can continue.
|
||||||
|
match self.lq_allow(stk, &lqctx, &lqopt, &lq, doc).await {
|
||||||
|
Err(Error::Ignore) => continue,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
Ok(_) => (),
|
||||||
|
}
|
||||||
|
// Finally, let's check what type of statement
|
||||||
|
// caused this LIVE query to run, and send the
|
||||||
|
// relevant notification based on the statement.
|
||||||
|
if stm.is_delete() {
|
||||||
|
// Send a DELETE notification
|
||||||
|
if opt.id()? == lv.node.0 {
|
||||||
|
chn.send(Notification {
|
||||||
|
id: lv.id,
|
||||||
|
action: Action::Delete,
|
||||||
|
result: {
|
||||||
|
// Ensure futures are run
|
||||||
|
let lqopt: &Options = &lqopt.new_with_futures(true);
|
||||||
|
// Output the full document before any changes were applied
|
||||||
|
let mut value =
|
||||||
|
doc.doc.compute(stk, &lqctx, lqopt, Some(doc)).await?;
|
||||||
|
// Remove metadata fields on output
|
||||||
|
value.del(stk, &lqctx, lqopt, &*META).await?;
|
||||||
|
// Output result
|
||||||
|
value
|
||||||
|
},
|
||||||
|
})
|
||||||
.await?;
|
.await?;
|
||||||
|
} else {
|
||||||
|
// TODO: Send to message broker
|
||||||
|
}
|
||||||
|
} else if self.is_new() {
|
||||||
|
// Send a CREATE notification
|
||||||
|
if opt.id()? == lv.node.0 {
|
||||||
|
chn.send(Notification {
|
||||||
|
id: lv.id,
|
||||||
|
action: Action::Create,
|
||||||
|
result: self.pluck(stk, &lqctx, &lqopt, &lq).await?,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
} else {
|
||||||
|
// TODO: Send to message broker
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Send a UPDATE notification
|
||||||
|
if opt.id()? == lv.node.0 {
|
||||||
|
chn.send(Notification {
|
||||||
|
id: lv.id,
|
||||||
|
action: Action::Update,
|
||||||
|
result: self.pluck(stk, &lqctx, &lqopt, &lq).await?,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
} else {
|
||||||
|
// TODO: Send to message broker
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Carry on
|
// Carry on
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check the WHERE clause for a LIVE query
|
/// Check the WHERE clause for a LIVE query
|
||||||
async fn lq_check(
|
async fn lq_check(
|
||||||
&self,
|
&self,
|
||||||
|
@ -69,7 +183,6 @@ impl<'a> Document<'a> {
|
||||||
// Carry on
|
// Carry on
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check any PERRMISSIONS for a LIVE query
|
/// Check any PERRMISSIONS for a LIVE query
|
||||||
async fn lq_allow(
|
async fn lq_allow(
|
||||||
&self,
|
&self,
|
||||||
|
@ -100,176 +213,4 @@ impl<'a> Document<'a> {
|
||||||
// Carry on
|
// Carry on
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process live query for notifications
|
|
||||||
pub(crate) async fn check_lqs_and_send_notifications(
|
|
||||||
&self,
|
|
||||||
stk: &mut Stk,
|
|
||||||
ctx: &Context<'_>,
|
|
||||||
opt: &Options,
|
|
||||||
stm: &Statement<'_>,
|
|
||||||
live_statements: &[&LiveStatement],
|
|
||||||
sender: &Sender<Notification>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
trace!(
|
|
||||||
"Called check_lqs_and_send_notifications with {} live statements",
|
|
||||||
live_statements.len()
|
|
||||||
);
|
|
||||||
// Technically this isnt the condition - the `lives` function is passing in the currently evaluated statement
|
|
||||||
// but the ds.rs invocation of this function is reconstructing this statement
|
|
||||||
let is_delete = match FFLAGS.change_feed_live_queries.enabled() {
|
|
||||||
true => self.is_delete(),
|
|
||||||
false => stm.is_delete(),
|
|
||||||
};
|
|
||||||
for lv in live_statements {
|
|
||||||
// Create a new statement
|
|
||||||
let lq = Statement::from(*lv);
|
|
||||||
// Get the event action
|
|
||||||
let evt = if stm.is_delete() {
|
|
||||||
Value::from("DELETE")
|
|
||||||
} else if self.is_new() {
|
|
||||||
Value::from("CREATE")
|
|
||||||
} else {
|
|
||||||
Value::from("UPDATE")
|
|
||||||
};
|
|
||||||
// Check if this is a delete statement
|
|
||||||
let doc = match is_delete {
|
|
||||||
true => &self.initial,
|
|
||||||
false => &self.current,
|
|
||||||
};
|
|
||||||
// Ensure that a session exists on the LIVE query
|
|
||||||
let sess = match lv.session.as_ref() {
|
|
||||||
Some(v) => v,
|
|
||||||
None => {
|
|
||||||
trace!("live query did not have a session, skipping");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// Ensure that auth info exists on the LIVE query
|
|
||||||
let auth = match lv.auth.clone() {
|
|
||||||
Some(v) => v,
|
|
||||||
None => {
|
|
||||||
trace!("live query did not have auth info, skipping");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// We need to create a new context which we will
|
|
||||||
// use for processing this LIVE query statement.
|
|
||||||
// This ensures that we are using the session
|
|
||||||
// of the user who created the LIVE query.
|
|
||||||
let lqctx = Context::background();
|
|
||||||
let mut lqctx =
|
|
||||||
lqctx.set_transaction(ctx.get_transaction().cloned().unwrap_or_else(|| {
|
|
||||||
unreachable!("Expected transaction to be available in parent context")
|
|
||||||
}));
|
|
||||||
lqctx.add_value("access", sess.pick(AC.as_ref()));
|
|
||||||
lqctx.add_value("auth", sess.pick(RD.as_ref()));
|
|
||||||
lqctx.add_value("token", sess.pick(TK.as_ref()));
|
|
||||||
lqctx.add_value("session", sess);
|
|
||||||
// We need to create a new options which we will
|
|
||||||
// use for processing this LIVE query statement.
|
|
||||||
// This ensures that we are using the auth data
|
|
||||||
// of the user who created the LIVE query.
|
|
||||||
let lqopt = opt.new_with_perms(true).with_auth(Arc::from(auth));
|
|
||||||
// Add $before, $after, $value, and $event params
|
|
||||||
// to this LIVE query so that user can use these
|
|
||||||
// within field projections and WHERE clauses.
|
|
||||||
lqctx.add_value("event", evt);
|
|
||||||
lqctx.add_value("value", self.current.doc.deref());
|
|
||||||
lqctx.add_value("after", self.current.doc.deref());
|
|
||||||
lqctx.add_value("before", self.initial.doc.deref());
|
|
||||||
// First of all, let's check to see if the WHERE
|
|
||||||
// clause of the LIVE query is matched by this
|
|
||||||
// document. If it is then we can continue.
|
|
||||||
match self.lq_check(stk, &lqctx, &lqopt, &lq, doc).await {
|
|
||||||
Err(Error::Ignore) => {
|
|
||||||
trace!("live query did not match the where clause, skipping");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
Ok(_) => (),
|
|
||||||
}
|
|
||||||
// Secondly, let's check to see if any PERMISSIONS
|
|
||||||
// clause for this table allows this document to
|
|
||||||
// be viewed by the user who created this LIVE
|
|
||||||
// query. If it does, then we can continue.
|
|
||||||
match self.lq_allow(stk, &lqctx, &lqopt, &lq, doc).await {
|
|
||||||
Err(Error::Ignore) => {
|
|
||||||
trace!("live query did not have permission to view this document, skipping");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
Ok(_) => (),
|
|
||||||
}
|
|
||||||
// Finally, let's check what type of statement
|
|
||||||
// caused this LIVE query to run, and send the
|
|
||||||
// relevant notification based on the statement.
|
|
||||||
let default_node_id = Uuid::default();
|
|
||||||
let node_id = opt.id().unwrap_or(default_node_id);
|
|
||||||
// This bool is deprecated since lq v2 on cf
|
|
||||||
// We check against defaults because clients register live queries with their local node id
|
|
||||||
// But the cf scanner uses the server node id, which is different from the client
|
|
||||||
let node_matches_live_query =
|
|
||||||
node_id == default_node_id || lv.node.0 == default_node_id || node_id == lv.node.0;
|
|
||||||
trace!(
|
|
||||||
"Notification node matches live query: {} ({} != {})",
|
|
||||||
node_matches_live_query,
|
|
||||||
node_id,
|
|
||||||
lv.node.0
|
|
||||||
);
|
|
||||||
if is_delete {
|
|
||||||
// Send a DELETE notification
|
|
||||||
if node_matches_live_query {
|
|
||||||
sender
|
|
||||||
.send(Notification {
|
|
||||||
id: lv.id,
|
|
||||||
action: Action::Delete,
|
|
||||||
result: {
|
|
||||||
// Ensure futures are run
|
|
||||||
let lqopt: &Options = &lqopt.new_with_futures(true);
|
|
||||||
// Output the full document before any changes were applied
|
|
||||||
let mut value =
|
|
||||||
doc.doc.compute(stk, &lqctx, lqopt, Some(doc)).await?;
|
|
||||||
|
|
||||||
// TODO(SUR-349): We need an empty object instead of Value::None for serialisation
|
|
||||||
if value.is_none() {
|
|
||||||
value = Value::Object(Default::default());
|
|
||||||
}
|
|
||||||
// Remove metadata fields on output
|
|
||||||
value.del(stk, &lqctx, lqopt, &*META).await?;
|
|
||||||
// Output result
|
|
||||||
value
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
} else if self.is_new() {
|
|
||||||
// Send a CREATE notification
|
|
||||||
if node_matches_live_query {
|
|
||||||
trace!("Sending lq create notification");
|
|
||||||
sender
|
|
||||||
.send(Notification {
|
|
||||||
id: lv.id,
|
|
||||||
action: Action::Create,
|
|
||||||
result: self.pluck(stk, &lqctx, &lqopt, &lq).await?,
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Send a UPDATE notification
|
|
||||||
if node_matches_live_query {
|
|
||||||
trace!("Sending lq update notification");
|
|
||||||
sender
|
|
||||||
.send(Notification {
|
|
||||||
id: lv.id,
|
|
||||||
action: Action::Update,
|
|
||||||
result: self.pluck(stk, &lqctx, &lqopt, &lq).await?,
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
trace!("Ended check_lqs_and_send_notifications");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@ impl<'a> Document<'a> {
|
||||||
Err(Error::RetryWithId(v)) => {
|
Err(Error::RetryWithId(v)) => {
|
||||||
// Fetch the data from the store
|
// Fetch the data from the store
|
||||||
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
|
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id);
|
||||||
let val = ctx.tx_lock().await.get(key).await?;
|
let val = ctx.tx().get(key).await?;
|
||||||
// Parse the data from the store
|
// Parse the data from the store
|
||||||
let val = match val {
|
let val = match val {
|
||||||
Some(v) => Value::from(v),
|
Some(v) => Value::from(v),
|
||||||
|
|
|
@ -25,13 +25,15 @@ impl<'a> Document<'a> {
|
||||||
if !self.changed() {
|
if !self.changed() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
// Claim transaction
|
// Get the transaction
|
||||||
let mut run = ctx.tx_lock().await;
|
let txn = ctx.tx();
|
||||||
|
// Lock the transaction
|
||||||
|
let mut txn = txn.lock().await;
|
||||||
// Get the record id
|
// Get the record id
|
||||||
if let Some(rid) = self.id {
|
if let Some(rid) = self.id {
|
||||||
// Purge the record data
|
// Purge the record data
|
||||||
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id);
|
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id);
|
||||||
run.del(key).await?;
|
txn.del(key).await?;
|
||||||
// Purge the record edges
|
// Purge the record edges
|
||||||
match (
|
match (
|
||||||
self.initial.doc.pick(&*EDGE),
|
self.initial.doc.pick(&*EDGE),
|
||||||
|
@ -43,20 +45,20 @@ impl<'a> Document<'a> {
|
||||||
let (ref o, ref i) = (Dir::Out, Dir::In);
|
let (ref o, ref i) = (Dir::Out, Dir::In);
|
||||||
// Purge the left pointer edge
|
// Purge the left pointer edge
|
||||||
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &l.tb, &l.id, o, rid);
|
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &l.tb, &l.id, o, rid);
|
||||||
run.del(key).await?;
|
txn.del(key).await?;
|
||||||
// Purge the left inner edge
|
// Purge the left inner edge
|
||||||
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, i, l);
|
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, i, l);
|
||||||
run.del(key).await?;
|
txn.del(key).await?;
|
||||||
// Purge the right inner edge
|
// Purge the right inner edge
|
||||||
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, o, r);
|
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, o, r);
|
||||||
run.del(key).await?;
|
txn.del(key).await?;
|
||||||
// Purge the right pointer edge
|
// Purge the right pointer edge
|
||||||
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &r.tb, &r.id, i, rid);
|
let key = crate::key::graph::new(opt.ns()?, opt.db()?, &r.tb, &r.id, i, rid);
|
||||||
run.del(key).await?;
|
txn.del(key).await?;
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
// Release the transaction
|
// Release the transaction
|
||||||
drop(run);
|
drop(txn);
|
||||||
// Setup the delete statement
|
// Setup the delete statement
|
||||||
let stm = DeleteStatement {
|
let stm = DeleteStatement {
|
||||||
what: Values(vec![Value::from(Edges {
|
what: Values(vec![Value::from(Edges {
|
||||||
|
|
|
@ -3,7 +3,6 @@ use crate::dbs::Options;
|
||||||
use crate::dbs::Statement;
|
use crate::dbs::Statement;
|
||||||
use crate::doc::Document;
|
use crate::doc::Document;
|
||||||
use crate::err::Error;
|
use crate::err::Error;
|
||||||
use crate::key::key_req::KeyRequirements;
|
|
||||||
|
|
||||||
impl<'a> Document<'a> {
|
impl<'a> Document<'a> {
|
||||||
pub async fn store(
|
pub async fn store(
|
||||||
|
@ -20,18 +19,18 @@ impl<'a> Document<'a> {
|
||||||
if self.tb(ctx, opt).await?.drop {
|
if self.tb(ctx, opt).await?.drop {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
// Claim transaction
|
// Get the transaction
|
||||||
let mut run = ctx.tx_lock().await;
|
let txn = ctx.tx();
|
||||||
// Get the record id
|
// Get the record id
|
||||||
let rid = self.id.as_ref().unwrap();
|
let rid = self.id.as_ref().unwrap();
|
||||||
// Store the record data
|
// Store the record data
|
||||||
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id);
|
let key = crate::key::thing::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id);
|
||||||
//
|
// Match the statement type
|
||||||
match stm {
|
match stm {
|
||||||
// This is a CREATE statement so try to insert the key
|
// This is a CREATE statement so try to insert the key
|
||||||
Statement::Create(_) => match run.put(key.key_category(), key, self).await {
|
Statement::Create(_) => match txn.put(key, self).await {
|
||||||
// The key already exists, so return an error
|
// The key already exists, so return an error
|
||||||
Err(Error::TxKeyAlreadyExistsCategory(_)) => Err(Error::RecordExists {
|
Err(Error::TxKeyAlreadyExists) => Err(Error::RecordExists {
|
||||||
thing: rid.to_string(),
|
thing: rid.to_string(),
|
||||||
}),
|
}),
|
||||||
// Return any other received error
|
// Return any other received error
|
||||||
|
@ -40,7 +39,7 @@ impl<'a> Document<'a> {
|
||||||
Ok(v) => Ok(v),
|
Ok(v) => Ok(v),
|
||||||
},
|
},
|
||||||
// This is not a CREATE statement, so update the key
|
// This is not a CREATE statement, so update the key
|
||||||
_ => run.set(key, self).await,
|
_ => txn.set(key, self).await,
|
||||||
}?;
|
}?;
|
||||||
// Carry on
|
// Carry on
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
use crate::iam::Error as IamError;
|
use crate::iam::Error as IamError;
|
||||||
use crate::idx::ft::MatchRef;
|
use crate::idx::ft::MatchRef;
|
||||||
use crate::idx::trees::vector::SharedVector;
|
use crate::idx::trees::vector::SharedVector;
|
||||||
use crate::key::error::KeyCategory;
|
|
||||||
use crate::sql::idiom::Idiom;
|
use crate::sql::idiom::Idiom;
|
||||||
use crate::sql::index::Distance;
|
use crate::sql::index::Distance;
|
||||||
use crate::sql::thing::Thing;
|
use crate::sql::thing::Thing;
|
||||||
|
@ -92,7 +91,6 @@ pub enum Error {
|
||||||
|
|
||||||
/// The key being inserted in the transaction already exists
|
/// The key being inserted in the transaction already exists
|
||||||
#[error("The key being inserted already exists")]
|
#[error("The key being inserted already exists")]
|
||||||
#[deprecated(note = "Use TxKeyAlreadyExistsCategory")]
|
|
||||||
TxKeyAlreadyExists,
|
TxKeyAlreadyExists,
|
||||||
|
|
||||||
/// The key exceeds a limit set by the KV store
|
/// The key exceeds a limit set by the KV store
|
||||||
|
@ -388,6 +386,12 @@ pub enum Error {
|
||||||
value: String,
|
value: String,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/// The requested record does not exist
|
||||||
|
#[error("The record '{value}' does not exist")]
|
||||||
|
IdNotFound {
|
||||||
|
value: String,
|
||||||
|
},
|
||||||
|
|
||||||
#[error("Unsupported distance: {0}")]
|
#[error("Unsupported distance: {0}")]
|
||||||
UnsupportedDistance(Distance),
|
UnsupportedDistance(Distance),
|
||||||
|
|
||||||
|
@ -810,10 +814,6 @@ pub enum Error {
|
||||||
#[error("Auth token is missing the '{0}' claim")]
|
#[error("Auth token is missing the '{0}' claim")]
|
||||||
MissingTokenClaim(String),
|
MissingTokenClaim(String),
|
||||||
|
|
||||||
/// The key being inserted in the transaction already exists
|
|
||||||
#[error("The key being inserted already exists: {0}")]
|
|
||||||
TxKeyAlreadyExistsCategory(KeyCategory),
|
|
||||||
|
|
||||||
/// The db is running without an available storage engine
|
/// The db is running without an available storage engine
|
||||||
#[error("The db is running without an available storage engine")]
|
#[error("The db is running without an available storage engine")]
|
||||||
MissingStorageEngine,
|
MissingStorageEngine,
|
||||||
|
@ -921,10 +921,6 @@ pub enum Error {
|
||||||
#[error("A node task has failed: {0}")]
|
#[error("A node task has failed: {0}")]
|
||||||
NodeAgent(&'static str),
|
NodeAgent(&'static str),
|
||||||
|
|
||||||
/// An error related to live query occurred
|
|
||||||
#[error("Failed to process Live Query: {0}")]
|
|
||||||
LiveQueryError(LiveQueryCause),
|
|
||||||
|
|
||||||
/// The supplied type could not be serialiazed into `sql::Value`
|
/// The supplied type could not be serialiazed into `sql::Value`
|
||||||
#[error("Serialization error: {0}")]
|
#[error("Serialization error: {0}")]
|
||||||
Serialization(String),
|
Serialization(String),
|
||||||
|
@ -1041,9 +1037,7 @@ impl From<regex::Error> for Error {
|
||||||
impl From<echodb::err::Error> for Error {
|
impl From<echodb::err::Error> for Error {
|
||||||
fn from(e: echodb::err::Error) -> Error {
|
fn from(e: echodb::err::Error) -> Error {
|
||||||
match e {
|
match e {
|
||||||
echodb::err::Error::KeyAlreadyExists => {
|
echodb::err::Error::KeyAlreadyExists => Error::TxKeyAlreadyExists,
|
||||||
Error::TxKeyAlreadyExistsCategory(crate::key::error::KeyCategory::Unknown)
|
|
||||||
}
|
|
||||||
echodb::err::Error::ValNotExpectedValue => Error::TxConditionNotMet,
|
echodb::err::Error::ValNotExpectedValue => Error::TxConditionNotMet,
|
||||||
_ => Error::Tx(e.to_string()),
|
_ => Error::Tx(e.to_string()),
|
||||||
}
|
}
|
||||||
|
@ -1054,9 +1048,7 @@ impl From<echodb::err::Error> for Error {
|
||||||
impl From<indxdb::err::Error> for Error {
|
impl From<indxdb::err::Error> for Error {
|
||||||
fn from(e: indxdb::err::Error) -> Error {
|
fn from(e: indxdb::err::Error) -> Error {
|
||||||
match e {
|
match e {
|
||||||
indxdb::err::Error::KeyAlreadyExists => {
|
indxdb::err::Error::KeyAlreadyExists => Error::TxKeyAlreadyExists,
|
||||||
Error::TxKeyAlreadyExistsCategory(crate::key::error::KeyCategory::Unknown)
|
|
||||||
}
|
|
||||||
indxdb::err::Error::ValNotExpectedValue => Error::TxConditionNotMet,
|
indxdb::err::Error::ValNotExpectedValue => Error::TxConditionNotMet,
|
||||||
_ => Error::Tx(e.to_string()),
|
_ => Error::Tx(e.to_string()),
|
||||||
}
|
}
|
||||||
|
@ -1067,9 +1059,7 @@ impl From<indxdb::err::Error> for Error {
|
||||||
impl From<tikv::Error> for Error {
|
impl From<tikv::Error> for Error {
|
||||||
fn from(e: tikv::Error) -> Error {
|
fn from(e: tikv::Error) -> Error {
|
||||||
match e {
|
match e {
|
||||||
tikv::Error::DuplicateKeyInsertion => {
|
tikv::Error::DuplicateKeyInsertion => Error::TxKeyAlreadyExists,
|
||||||
Error::TxKeyAlreadyExistsCategory(crate::key::error::KeyCategory::Unknown)
|
|
||||||
}
|
|
||||||
tikv::Error::KeyError(ke) if ke.abort.contains("KeyTooLarge") => Error::TxKeyTooLarge,
|
tikv::Error::KeyError(ke) if ke.abort.contains("KeyTooLarge") => Error::TxKeyTooLarge,
|
||||||
tikv::Error::RegionError(re) if re.raft_entry_too_large.is_some() => Error::TxTooLarge,
|
tikv::Error::RegionError(re) if re.raft_entry_too_large.is_some() => Error::TxTooLarge,
|
||||||
_ => Error::Tx(e.to_string()),
|
_ => Error::Tx(e.to_string()),
|
||||||
|
@ -1091,6 +1081,20 @@ impl From<surrealkv::Error> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "kv-fdb")]
|
||||||
|
impl From<foundationdb::FdbError> for Error {
|
||||||
|
fn from(e: foundationdb::FdbError) -> Error {
|
||||||
|
Error::Ds(e.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "kv-fdb")]
|
||||||
|
impl From<foundationdb::TransactionCommitError> for Error {
|
||||||
|
fn from(e: foundationdb::TransactionCommitError) -> Error {
|
||||||
|
Error::Tx(e.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<channel::RecvError> for Error {
|
impl From<channel::RecvError> for Error {
|
||||||
fn from(e: channel::RecvError) -> Error {
|
fn from(e: channel::RecvError) -> Error {
|
||||||
Error::Channel(e.to_string())
|
Error::Channel(e.to_string())
|
||||||
|
@ -1136,14 +1140,3 @@ impl Serialize for Error {
|
||||||
serializer.serialize_str(self.to_string().as_str())
|
serializer.serialize_str(self.to_string().as_str())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub enum LiveQueryCause {
|
|
||||||
#[doc(hidden)]
|
|
||||||
#[error("The Live Query must have a change feed for it it work")]
|
|
||||||
MissingChangeFeed,
|
|
||||||
#[doc(hidden)]
|
|
||||||
#[error("The Live Query must have a change feed that includes relative changes")]
|
|
||||||
ChangeFeedNoOriginal,
|
|
||||||
}
|
|
||||||
|
|
|
@ -32,17 +32,17 @@ where
|
||||||
I::Item: TryFuture,
|
I::Item: TryFuture,
|
||||||
{
|
{
|
||||||
#[cfg(target_arch = "wasm32")]
|
#[cfg(target_arch = "wasm32")]
|
||||||
const LIMIT: usize = 1;
|
let limit: usize = 1;
|
||||||
|
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
#[cfg(not(target_arch = "wasm32"))]
|
||||||
const LIMIT: usize = crate::cnf::MAX_CONCURRENT_TASKS;
|
let limit: usize = *crate::cnf::MAX_CONCURRENT_TASKS;
|
||||||
|
|
||||||
let mut input = iter.into_iter();
|
let mut input = iter.into_iter();
|
||||||
let (lo, hi) = input.size_hint();
|
let (lo, hi) = input.size_hint();
|
||||||
let initial_capacity = hi.unwrap_or(lo);
|
let initial_capacity = hi.unwrap_or(lo);
|
||||||
let mut active = FuturesOrdered::new();
|
let mut active = FuturesOrdered::new();
|
||||||
|
|
||||||
while active.len() < LIMIT {
|
while active.len() < limit {
|
||||||
if let Some(next) = input.next() {
|
if let Some(next) = input.next() {
|
||||||
active.push_back(TryFutureExt::into_future(next));
|
active.push_back(TryFutureExt::into_future(next));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -13,8 +13,8 @@ pub async fn analyze(
|
||||||
(az, val): (Value, Value),
|
(az, val): (Value, Value),
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
if let (Some(opt), Value::Strand(az), Value::Strand(val)) = (opt, az, val) {
|
if let (Some(opt), Value::Strand(az), Value::Strand(val)) = (opt, az, val) {
|
||||||
let az: Analyzer =
|
// TODO: @emmanuel-keller this `into()` is expansive and clones the value
|
||||||
ctx.tx_lock().await.get_db_analyzer(opt.ns()?, opt.db()?, az.as_str()).await?.into();
|
let az: Analyzer = ctx.tx().get_db_analyzer(opt.ns()?, opt.db()?, &az).await?.into();
|
||||||
az.analyze(stk, ctx, opt, val.0).await
|
az.analyze(stk, ctx, opt, val.0).await
|
||||||
} else {
|
} else {
|
||||||
Ok(Value::None)
|
Ok(Value::None)
|
||||||
|
|
|
@ -20,7 +20,6 @@ pub async fn signin(kvs: &Datastore, session: &mut Session, vars: Object) -> Res
|
||||||
let ns = vars.get("NS").or_else(|| vars.get("ns"));
|
let ns = vars.get("NS").or_else(|| vars.get("ns"));
|
||||||
let db = vars.get("DB").or_else(|| vars.get("db"));
|
let db = vars.get("DB").or_else(|| vars.get("db"));
|
||||||
let ac = vars.get("AC").or_else(|| vars.get("ac"));
|
let ac = vars.get("AC").or_else(|| vars.get("ac"));
|
||||||
|
|
||||||
// Check if the parameters exist
|
// Check if the parameters exist
|
||||||
match (ns, db, ac) {
|
match (ns, db, ac) {
|
||||||
// DB signin with access method
|
// DB signin with access method
|
||||||
|
@ -102,7 +101,7 @@ pub async fn db_access(
|
||||||
vars: Object,
|
vars: Object,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = kvs.transaction(Read, Optimistic).await?;
|
let tx = kvs.transaction(Read, Optimistic).await?;
|
||||||
// Fetch the specified access method from storage
|
// Fetch the specified access method from storage
|
||||||
let access = tx.get_db_access(&ns, &db, &ac).await;
|
let access = tx.get_db_access(&ns, &db, &ac).await;
|
||||||
// Ensure that the transaction is cancelled
|
// Ensure that the transaction is cancelled
|
||||||
|
@ -114,7 +113,7 @@ pub async fn db_access(
|
||||||
// All access method types are supported except for JWT
|
// All access method types are supported except for JWT
|
||||||
// The JWT access method is the one that is internal to SurrealDB
|
// The JWT access method is the one that is internal to SurrealDB
|
||||||
// The equivalent of signing in with JWT is to authenticate it
|
// The equivalent of signing in with JWT is to authenticate it
|
||||||
match av.kind {
|
match av.kind.clone() {
|
||||||
AccessType::Record(at) => {
|
AccessType::Record(at) => {
|
||||||
// Check if the record access method supports issuing tokens
|
// Check if the record access method supports issuing tokens
|
||||||
let iss = match at.jwt.issue {
|
let iss = match at.jwt.issue {
|
||||||
|
|
|
@ -47,7 +47,7 @@ pub async fn db_access(
|
||||||
vars: Object,
|
vars: Object,
|
||||||
) -> Result<Option<String>, Error> {
|
) -> Result<Option<String>, Error> {
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = kvs.transaction(Read, Optimistic).await?;
|
let tx = kvs.transaction(Read, Optimistic).await?;
|
||||||
// Fetch the specified access method from storage
|
// Fetch the specified access method from storage
|
||||||
let access = tx.get_db_access(&ns, &db, &ac).await;
|
let access = tx.get_db_access(&ns, &db, &ac).await;
|
||||||
// Ensure that the transaction is cancelled
|
// Ensure that the transaction is cancelled
|
||||||
|
@ -57,7 +57,7 @@ pub async fn db_access(
|
||||||
Ok(av) => {
|
Ok(av) => {
|
||||||
// Check the access method type
|
// Check the access method type
|
||||||
// Currently, only the record access method supports signup
|
// Currently, only the record access method supports signup
|
||||||
match av.kind {
|
match av.kind.clone() {
|
||||||
AccessType::Record(at) => {
|
AccessType::Record(at) => {
|
||||||
// Check if the record access method supports issuing tokens
|
// Check if the record access method supports issuing tokens
|
||||||
let iss = match at.jwt.issue {
|
let iss = match at.jwt.issue {
|
||||||
|
|
|
@ -15,60 +15,47 @@ use once_cell::sync::Lazy;
|
||||||
use std::str::{self, FromStr};
|
use std::str::{self, FromStr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
fn config(alg: Algorithm, key: String) -> Result<(DecodingKey, Validation), Error> {
|
fn config(alg: Algorithm, key: &[u8]) -> Result<(DecodingKey, Validation), Error> {
|
||||||
match alg {
|
match alg {
|
||||||
Algorithm::Hs256 => Ok((
|
Algorithm::Hs256 => {
|
||||||
DecodingKey::from_secret(key.as_ref()),
|
Ok((DecodingKey::from_secret(key), Validation::new(jsonwebtoken::Algorithm::HS256)))
|
||||||
Validation::new(jsonwebtoken::Algorithm::HS256),
|
}
|
||||||
)),
|
Algorithm::Hs384 => {
|
||||||
Algorithm::Hs384 => Ok((
|
Ok((DecodingKey::from_secret(key), Validation::new(jsonwebtoken::Algorithm::HS384)))
|
||||||
DecodingKey::from_secret(key.as_ref()),
|
}
|
||||||
Validation::new(jsonwebtoken::Algorithm::HS384),
|
Algorithm::Hs512 => {
|
||||||
)),
|
Ok((DecodingKey::from_secret(key), Validation::new(jsonwebtoken::Algorithm::HS512)))
|
||||||
Algorithm::Hs512 => Ok((
|
}
|
||||||
DecodingKey::from_secret(key.as_ref()),
|
Algorithm::EdDSA => {
|
||||||
Validation::new(jsonwebtoken::Algorithm::HS512),
|
Ok((DecodingKey::from_ed_pem(key)?, Validation::new(jsonwebtoken::Algorithm::EdDSA)))
|
||||||
)),
|
}
|
||||||
Algorithm::EdDSA => Ok((
|
Algorithm::Es256 => {
|
||||||
DecodingKey::from_ed_pem(key.as_ref())?,
|
Ok((DecodingKey::from_ec_pem(key)?, Validation::new(jsonwebtoken::Algorithm::ES256)))
|
||||||
Validation::new(jsonwebtoken::Algorithm::EdDSA),
|
}
|
||||||
)),
|
Algorithm::Es384 => {
|
||||||
Algorithm::Es256 => Ok((
|
Ok((DecodingKey::from_ec_pem(key)?, Validation::new(jsonwebtoken::Algorithm::ES384)))
|
||||||
DecodingKey::from_ec_pem(key.as_ref())?,
|
}
|
||||||
Validation::new(jsonwebtoken::Algorithm::ES256),
|
Algorithm::Es512 => {
|
||||||
)),
|
Ok((DecodingKey::from_ec_pem(key)?, Validation::new(jsonwebtoken::Algorithm::ES384)))
|
||||||
Algorithm::Es384 => Ok((
|
}
|
||||||
DecodingKey::from_ec_pem(key.as_ref())?,
|
Algorithm::Ps256 => {
|
||||||
Validation::new(jsonwebtoken::Algorithm::ES384),
|
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::PS256)))
|
||||||
)),
|
}
|
||||||
Algorithm::Es512 => Ok((
|
Algorithm::Ps384 => {
|
||||||
DecodingKey::from_ec_pem(key.as_ref())?,
|
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::PS384)))
|
||||||
Validation::new(jsonwebtoken::Algorithm::ES384),
|
}
|
||||||
)),
|
Algorithm::Ps512 => {
|
||||||
Algorithm::Ps256 => Ok((
|
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::PS512)))
|
||||||
DecodingKey::from_rsa_pem(key.as_ref())?,
|
}
|
||||||
Validation::new(jsonwebtoken::Algorithm::PS256),
|
Algorithm::Rs256 => {
|
||||||
)),
|
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::RS256)))
|
||||||
Algorithm::Ps384 => Ok((
|
}
|
||||||
DecodingKey::from_rsa_pem(key.as_ref())?,
|
Algorithm::Rs384 => {
|
||||||
Validation::new(jsonwebtoken::Algorithm::PS384),
|
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::RS384)))
|
||||||
)),
|
}
|
||||||
Algorithm::Ps512 => Ok((
|
Algorithm::Rs512 => {
|
||||||
DecodingKey::from_rsa_pem(key.as_ref())?,
|
Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::RS512)))
|
||||||
Validation::new(jsonwebtoken::Algorithm::PS512),
|
}
|
||||||
)),
|
|
||||||
Algorithm::Rs256 => Ok((
|
|
||||||
DecodingKey::from_rsa_pem(key.as_ref())?,
|
|
||||||
Validation::new(jsonwebtoken::Algorithm::RS256),
|
|
||||||
)),
|
|
||||||
Algorithm::Rs384 => Ok((
|
|
||||||
DecodingKey::from_rsa_pem(key.as_ref())?,
|
|
||||||
Validation::new(jsonwebtoken::Algorithm::RS384),
|
|
||||||
)),
|
|
||||||
Algorithm::Rs512 => Ok((
|
|
||||||
DecodingKey::from_rsa_pem(key.as_ref())?,
|
|
||||||
Validation::new(jsonwebtoken::Algorithm::RS512),
|
|
||||||
)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,7 +79,6 @@ pub async fn basic(
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// Log the authentication type
|
// Log the authentication type
|
||||||
trace!("Attempting basic authentication");
|
trace!("Attempting basic authentication");
|
||||||
|
|
||||||
// Check if the parameters exist
|
// Check if the parameters exist
|
||||||
match (ns, db) {
|
match (ns, db) {
|
||||||
// DB signin
|
// DB signin
|
||||||
|
@ -163,16 +149,18 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
|
||||||
// Log the decoded authentication claims
|
// Log the decoded authentication claims
|
||||||
trace!("Authenticating with record access method `{}`", ac);
|
trace!("Authenticating with record access method `{}`", ac);
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = kvs.transaction(Read, Optimistic).await?;
|
let tx = kvs.transaction(Read, Optimistic).await?;
|
||||||
// Parse the record id
|
// Parse the record id
|
||||||
let mut rid = syn::thing(&id)?;
|
let mut rid = syn::thing(&id)?;
|
||||||
// Get the database access method
|
// Get the database access method
|
||||||
let de = tx.get_db_access(&ns, &db, &ac).await?;
|
let de = tx.get_db_access(&ns, &db, &ac).await?;
|
||||||
|
// Ensure that the transaction is cancelled
|
||||||
|
tx.cancel().await?;
|
||||||
// Obtain the configuration to verify the token based on the access method
|
// Obtain the configuration to verify the token based on the access method
|
||||||
let (au, cf) = match de.kind {
|
let (au, cf) = match de.kind.clone() {
|
||||||
AccessType::Record(at) => {
|
AccessType::Record(at) => {
|
||||||
let cf = match at.jwt.verify.clone() {
|
let cf = match at.jwt.verify.clone() {
|
||||||
JwtAccessVerify::Key(key) => config(key.alg, key.key),
|
JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()),
|
||||||
#[cfg(feature = "jwks")]
|
#[cfg(feature = "jwks")]
|
||||||
JwtAccessVerify::Jwks(jwks) => {
|
JwtAccessVerify::Jwks(jwks) => {
|
||||||
if let Some(kid) = token_data.header.kid {
|
if let Some(kid) = token_data.header.kid {
|
||||||
|
@ -244,15 +232,17 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
|
||||||
// Log the decoded authentication claims
|
// Log the decoded authentication claims
|
||||||
trace!("Authenticating to database `{}` with access method `{}`", db, ac);
|
trace!("Authenticating to database `{}` with access method `{}`", db, ac);
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = kvs.transaction(Read, Optimistic).await?;
|
let tx = kvs.transaction(Read, Optimistic).await?;
|
||||||
// Get the database access method
|
// Get the database access method
|
||||||
let de = tx.get_db_access(&ns, &db, &ac).await?;
|
let de = tx.get_db_access(&ns, &db, &ac).await?;
|
||||||
|
// Ensure that the transaction is cancelled
|
||||||
|
tx.cancel().await?;
|
||||||
// Obtain the configuration to verify the token based on the access method
|
// Obtain the configuration to verify the token based on the access method
|
||||||
match de.kind {
|
match de.kind.clone() {
|
||||||
// If the access type is Jwt, this is database access
|
// If the access type is Jwt, this is database access
|
||||||
AccessType::Jwt(at) => {
|
AccessType::Jwt(at) => {
|
||||||
let cf = match at.verify {
|
let cf = match at.verify {
|
||||||
JwtAccessVerify::Key(key) => config(key.alg, key.key),
|
JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()),
|
||||||
#[cfg(feature = "jwks")]
|
#[cfg(feature = "jwks")]
|
||||||
JwtAccessVerify::Jwks(jwks) => {
|
JwtAccessVerify::Jwks(jwks) => {
|
||||||
if let Some(kid) = token_data.header.kid {
|
if let Some(kid) = token_data.header.kid {
|
||||||
|
@ -300,7 +290,7 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
|
||||||
Some(au) => {
|
Some(au) => {
|
||||||
trace!("Access method `{}` is record access with authenticate clause", ac);
|
trace!("Access method `{}` is record access with authenticate clause", ac);
|
||||||
let cf = match at.jwt.verify {
|
let cf = match at.jwt.verify {
|
||||||
JwtAccessVerify::Key(key) => config(key.alg, key.key),
|
JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()),
|
||||||
#[cfg(feature = "jwks")]
|
#[cfg(feature = "jwks")]
|
||||||
JwtAccessVerify::Jwks(jwks) => {
|
JwtAccessVerify::Jwks(jwks) => {
|
||||||
if let Some(kid) = token_data.header.kid {
|
if let Some(kid) = token_data.header.kid {
|
||||||
|
@ -366,13 +356,16 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
|
||||||
// Log the decoded authentication claims
|
// Log the decoded authentication claims
|
||||||
trace!("Authenticating to database `{}` with user `{}`", db, id);
|
trace!("Authenticating to database `{}` with user `{}`", db, id);
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = kvs.transaction(Read, Optimistic).await?;
|
let tx = kvs.transaction(Read, Optimistic).await?;
|
||||||
// Get the database user
|
// Get the database user
|
||||||
let de = tx.get_db_user(&ns, &db, &id).await.map_err(|e| {
|
let de = tx.get_db_user(&ns, &db, &id).await.map_err(|e| {
|
||||||
trace!("Error while authenticating to database `{db}`: {e}");
|
trace!("Error while authenticating to database `{db}`: {e}");
|
||||||
Error::InvalidAuth
|
Error::InvalidAuth
|
||||||
})?;
|
})?;
|
||||||
let cf = config(Algorithm::Hs512, de.code)?;
|
// Ensure that the transaction is cancelled
|
||||||
|
tx.cancel().await?;
|
||||||
|
// Check the algorithm
|
||||||
|
let cf = config(Algorithm::Hs512, de.code.as_bytes())?;
|
||||||
// Verify the token
|
// Verify the token
|
||||||
decode::<Claims>(token, &cf.0, &cf.1)?;
|
decode::<Claims>(token, &cf.0, &cf.1)?;
|
||||||
// Log the success
|
// Log the success
|
||||||
|
@ -398,13 +391,15 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
|
||||||
// Log the decoded authentication claims
|
// Log the decoded authentication claims
|
||||||
trace!("Authenticating to namespace `{}` with access method `{}`", ns, ac);
|
trace!("Authenticating to namespace `{}` with access method `{}`", ns, ac);
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = kvs.transaction(Read, Optimistic).await?;
|
let tx = kvs.transaction(Read, Optimistic).await?;
|
||||||
// Get the namespace access method
|
// Get the namespace access method
|
||||||
let de = tx.get_ns_access(&ns, &ac).await?;
|
let de = tx.get_ns_access(&ns, &ac).await?;
|
||||||
|
// Ensure that the transaction is cancelled
|
||||||
|
tx.cancel().await?;
|
||||||
// Obtain the configuration to verify the token based on the access method
|
// Obtain the configuration to verify the token based on the access method
|
||||||
let cf = match de.kind {
|
let cf = match de.kind.clone() {
|
||||||
AccessType::Jwt(ac) => match ac.verify {
|
AccessType::Jwt(ac) => match ac.verify {
|
||||||
JwtAccessVerify::Key(key) => config(key.alg, key.key),
|
JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()),
|
||||||
#[cfg(feature = "jwks")]
|
#[cfg(feature = "jwks")]
|
||||||
JwtAccessVerify::Jwks(jwks) => {
|
JwtAccessVerify::Jwks(jwks) => {
|
||||||
if let Some(kid) = token_data.header.kid {
|
if let Some(kid) = token_data.header.kid {
|
||||||
|
@ -452,13 +447,16 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
|
||||||
// Log the decoded authentication claims
|
// Log the decoded authentication claims
|
||||||
trace!("Authenticating to namespace `{}` with user `{}`", ns, id);
|
trace!("Authenticating to namespace `{}` with user `{}`", ns, id);
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = kvs.transaction(Read, Optimistic).await?;
|
let tx = kvs.transaction(Read, Optimistic).await?;
|
||||||
// Get the namespace user
|
// Get the namespace user
|
||||||
let de = tx.get_ns_user(&ns, &id).await.map_err(|e| {
|
let de = tx.get_ns_user(&ns, &id).await.map_err(|e| {
|
||||||
trace!("Error while authenticating to namespace `{ns}`: {e}");
|
trace!("Error while authenticating to namespace `{ns}`: {e}");
|
||||||
Error::InvalidAuth
|
Error::InvalidAuth
|
||||||
})?;
|
})?;
|
||||||
let cf = config(Algorithm::Hs512, de.code)?;
|
// Ensure that the transaction is cancelled
|
||||||
|
tx.cancel().await?;
|
||||||
|
// Check the algorithm
|
||||||
|
let cf = config(Algorithm::Hs512, de.code.as_bytes())?;
|
||||||
// Verify the token
|
// Verify the token
|
||||||
decode::<Claims>(token, &cf.0, &cf.1)?;
|
decode::<Claims>(token, &cf.0, &cf.1)?;
|
||||||
// Log the success
|
// Log the success
|
||||||
|
@ -482,13 +480,15 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
|
||||||
// Log the decoded authentication claims
|
// Log the decoded authentication claims
|
||||||
trace!("Authenticating to root with access method `{}`", ac);
|
trace!("Authenticating to root with access method `{}`", ac);
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = kvs.transaction(Read, Optimistic).await?;
|
let tx = kvs.transaction(Read, Optimistic).await?;
|
||||||
// Get the namespace access method
|
// Get the namespace access method
|
||||||
let de = tx.get_root_access(&ac).await?;
|
let de = tx.get_root_access(&ac).await?;
|
||||||
|
// Ensure that the transaction is cancelled
|
||||||
|
tx.cancel().await?;
|
||||||
// Obtain the configuration to verify the token based on the access method
|
// Obtain the configuration to verify the token based on the access method
|
||||||
let cf = match de.kind {
|
let cf = match de.kind.clone() {
|
||||||
AccessType::Jwt(ac) => match ac.verify {
|
AccessType::Jwt(ac) => match ac.verify {
|
||||||
JwtAccessVerify::Key(key) => config(key.alg, key.key),
|
JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()),
|
||||||
#[cfg(feature = "jwks")]
|
#[cfg(feature = "jwks")]
|
||||||
JwtAccessVerify::Jwks(jwks) => {
|
JwtAccessVerify::Jwks(jwks) => {
|
||||||
if let Some(kid) = token_data.header.kid {
|
if let Some(kid) = token_data.header.kid {
|
||||||
|
@ -533,13 +533,16 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul
|
||||||
// Log the decoded authentication claims
|
// Log the decoded authentication claims
|
||||||
trace!("Authenticating to root level with user `{}`", id);
|
trace!("Authenticating to root level with user `{}`", id);
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = kvs.transaction(Read, Optimistic).await?;
|
let tx = kvs.transaction(Read, Optimistic).await?;
|
||||||
// Get the namespace user
|
// Get the namespace user
|
||||||
let de = tx.get_root_user(&id).await.map_err(|e| {
|
let de = tx.get_root_user(&id).await.map_err(|e| {
|
||||||
trace!("Error while authenticating to root: {e}");
|
trace!("Error while authenticating to root: {e}");
|
||||||
Error::InvalidAuth
|
Error::InvalidAuth
|
||||||
})?;
|
})?;
|
||||||
let cf = config(Algorithm::Hs512, de.code)?;
|
// Ensure that the transaction is cancelled
|
||||||
|
tx.cancel().await?;
|
||||||
|
// Check the algorithm
|
||||||
|
let cf = config(Algorithm::Hs512, de.code.as_bytes())?;
|
||||||
// Verify the token
|
// Verify the token
|
||||||
decode::<Claims>(token, &cf.0, &cf.1)?;
|
decode::<Claims>(token, &cf.0, &cf.1)?;
|
||||||
// Log the success
|
// Log the success
|
||||||
|
@ -565,14 +568,18 @@ pub async fn verify_root_creds(
|
||||||
pass: &str,
|
pass: &str,
|
||||||
) -> Result<DefineUserStatement, Error> {
|
) -> Result<DefineUserStatement, Error> {
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = ds.transaction(Read, Optimistic).await?;
|
let tx = ds.transaction(Read, Optimistic).await?;
|
||||||
// Fetch the specified user from storage
|
// Fetch the specified user from storage
|
||||||
let user = tx.get_root_user(user).await.map_err(|e| {
|
let user = tx.get_root_user(user).await.map_err(|e| {
|
||||||
trace!("Error while authenticating to root: {e}");
|
trace!("Error while authenticating to root: {e}");
|
||||||
Error::InvalidAuth
|
Error::InvalidAuth
|
||||||
})?;
|
})?;
|
||||||
|
// Ensure that the transaction is cancelled
|
||||||
|
tx.cancel().await?;
|
||||||
// Verify the specified password for the user
|
// Verify the specified password for the user
|
||||||
verify_pass(pass, user.hash.as_ref())?;
|
verify_pass(pass, user.hash.as_ref())?;
|
||||||
|
// Clone the cached user object
|
||||||
|
let user = (*user).clone();
|
||||||
// Return the verified user object
|
// Return the verified user object
|
||||||
Ok(user)
|
Ok(user)
|
||||||
}
|
}
|
||||||
|
@ -584,14 +591,18 @@ pub async fn verify_ns_creds(
|
||||||
pass: &str,
|
pass: &str,
|
||||||
) -> Result<DefineUserStatement, Error> {
|
) -> Result<DefineUserStatement, Error> {
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = ds.transaction(Read, Optimistic).await?;
|
let tx = ds.transaction(Read, Optimistic).await?;
|
||||||
// Fetch the specified user from storage
|
// Fetch the specified user from storage
|
||||||
let user = tx.get_ns_user(ns, user).await.map_err(|e| {
|
let user = tx.get_ns_user(ns, user).await.map_err(|e| {
|
||||||
trace!("Error while authenticating to namespace `{ns}`: {e}");
|
trace!("Error while authenticating to namespace `{ns}`: {e}");
|
||||||
Error::InvalidAuth
|
Error::InvalidAuth
|
||||||
})?;
|
})?;
|
||||||
|
// Ensure that the transaction is cancelled
|
||||||
|
tx.cancel().await?;
|
||||||
// Verify the specified password for the user
|
// Verify the specified password for the user
|
||||||
verify_pass(pass, user.hash.as_ref())?;
|
verify_pass(pass, user.hash.as_ref())?;
|
||||||
|
// Clone the cached user object
|
||||||
|
let user = (*user).clone();
|
||||||
// Return the verified user object
|
// Return the verified user object
|
||||||
Ok(user)
|
Ok(user)
|
||||||
}
|
}
|
||||||
|
@ -604,14 +615,18 @@ pub async fn verify_db_creds(
|
||||||
pass: &str,
|
pass: &str,
|
||||||
) -> Result<DefineUserStatement, Error> {
|
) -> Result<DefineUserStatement, Error> {
|
||||||
// Create a new readonly transaction
|
// Create a new readonly transaction
|
||||||
let mut tx = ds.transaction(Read, Optimistic).await?;
|
let tx = ds.transaction(Read, Optimistic).await?;
|
||||||
// Fetch the specified user from storage
|
// Fetch the specified user from storage
|
||||||
let user = tx.get_db_user(ns, db, user).await.map_err(|e| {
|
let user = tx.get_db_user(ns, db, user).await.map_err(|e| {
|
||||||
trace!("Error while authenticating to database `{ns}/{db}`: {e}");
|
trace!("Error while authenticating to database `{ns}/{db}`: {e}");
|
||||||
Error::InvalidAuth
|
Error::InvalidAuth
|
||||||
})?;
|
})?;
|
||||||
|
// Ensure that the transaction is cancelled
|
||||||
|
tx.cancel().await?;
|
||||||
// Verify the specified password for the user
|
// Verify the specified password for the user
|
||||||
verify_pass(pass, user.hash.as_ref())?;
|
verify_pass(pass, user.hash.as_ref())?;
|
||||||
|
// Clone the cached user object
|
||||||
|
let user = (*user).clone();
|
||||||
// Return the verified user object
|
// Return the verified user object
|
||||||
Ok(user)
|
Ok(user)
|
||||||
}
|
}
|
||||||
|
@ -1685,7 +1700,7 @@ mod tests {
|
||||||
algorithm: jsonwebtoken::jwk::AlgorithmParameters::OctetKey(
|
algorithm: jsonwebtoken::jwk::AlgorithmParameters::OctetKey(
|
||||||
jsonwebtoken::jwk::OctetKeyParameters {
|
jsonwebtoken::jwk::OctetKeyParameters {
|
||||||
key_type: jsonwebtoken::jwk::OctetKeyType::Octet,
|
key_type: jsonwebtoken::jwk::OctetKeyType::Octet,
|
||||||
value: STANDARD_NO_PAD.encode(&secret),
|
value: STANDARD_NO_PAD.encode(secret),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
}],
|
}],
|
||||||
|
|
|
@ -115,18 +115,18 @@ mod tests {
|
||||||
use crate::kvs::{Datastore, LockType::*, Transaction, TransactionType::*};
|
use crate::kvs::{Datastore, LockType::*, Transaction, TransactionType::*};
|
||||||
|
|
||||||
async fn get_ids(ds: &Datastore) -> (Transaction, U32) {
|
async fn get_ids(ds: &Datastore) -> (Transaction, U32) {
|
||||||
let mut tx = ds.transaction(Write, Optimistic).await.unwrap();
|
let txn = ds.transaction(Write, Optimistic).await.unwrap();
|
||||||
let key = "foo";
|
let key = "foo";
|
||||||
let v = tx.get(key).await.unwrap();
|
let v = txn.get(key).await.unwrap();
|
||||||
let d = U32::new(key.into(), v).await.unwrap();
|
let d = U32::new(key.into(), v).await.unwrap();
|
||||||
(tx, d)
|
(txn, d)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn finish(mut tx: Transaction, mut d: U32) -> Result<(), Error> {
|
async fn finish(txn: Transaction, mut d: U32) -> Result<(), Error> {
|
||||||
if let Some((key, val)) = d.finish() {
|
if let Some((key, val)) = d.finish() {
|
||||||
tx.set(key, val).await?;
|
txn.set(key, val).await?;
|
||||||
}
|
}
|
||||||
tx.commit().await
|
txn.commit().await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
|
|
@ -23,7 +23,7 @@ pub struct DocIds {
|
||||||
impl DocIds {
|
impl DocIds {
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
ixs: &IndexStores,
|
ixs: &IndexStores,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
tt: TransactionType,
|
tt: TransactionType,
|
||||||
ikb: IndexKeyBase,
|
ikb: IndexKeyBase,
|
||||||
default_btree_order: u32,
|
default_btree_order: u32,
|
||||||
|
@ -73,7 +73,7 @@ impl DocIds {
|
||||||
|
|
||||||
pub(crate) async fn get_doc_id(
|
pub(crate) async fn get_doc_id(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_key: Key,
|
doc_key: Key,
|
||||||
) -> Result<Option<DocId>, Error> {
|
) -> Result<Option<DocId>, Error> {
|
||||||
self.btree.search(tx, &self.store, &doc_key).await
|
self.btree.search(tx, &self.store, &doc_key).await
|
||||||
|
@ -83,7 +83,7 @@ impl DocIds {
|
||||||
/// If the doc_id does not exists, a new one is created, and associated to the given key.
|
/// If the doc_id does not exists, a new one is created, and associated to the given key.
|
||||||
pub(in crate::idx) async fn resolve_doc_id(
|
pub(in crate::idx) async fn resolve_doc_id(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_key: Key,
|
doc_key: Key,
|
||||||
) -> Result<Resolved, Error> {
|
) -> Result<Resolved, Error> {
|
||||||
{
|
{
|
||||||
|
@ -99,7 +99,7 @@ impl DocIds {
|
||||||
|
|
||||||
pub(in crate::idx) async fn remove_doc(
|
pub(in crate::idx) async fn remove_doc(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_key: Key,
|
doc_key: Key,
|
||||||
) -> Result<Option<DocId>, Error> {
|
) -> Result<Option<DocId>, Error> {
|
||||||
if let Some(doc_id) = self.btree.delete(tx, &mut self.store, doc_key).await? {
|
if let Some(doc_id) = self.btree.delete(tx, &mut self.store, doc_key).await? {
|
||||||
|
@ -119,7 +119,7 @@ impl DocIds {
|
||||||
|
|
||||||
pub(in crate::idx) async fn get_doc_key(
|
pub(in crate::idx) async fn get_doc_key(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
) -> Result<Option<Key>, Error> {
|
) -> Result<Option<Key>, Error> {
|
||||||
let doc_id_key = self.index_key_base.new_bi_key(doc_id);
|
let doc_id_key = self.index_key_base.new_bi_key(doc_id);
|
||||||
|
@ -130,14 +130,11 @@ impl DocIds {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(in crate::idx) async fn statistics(
|
pub(in crate::idx) async fn statistics(&self, tx: &Transaction) -> Result<BStatistics, Error> {
|
||||||
&self,
|
|
||||||
tx: &mut Transaction,
|
|
||||||
) -> Result<BStatistics, Error> {
|
|
||||||
self.btree.statistics(tx, &self.store).await
|
self.btree.statistics(tx, &self.store).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(in crate::idx) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> {
|
pub(in crate::idx) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> {
|
||||||
if let Some(new_cache) = self.store.finish(tx).await? {
|
if let Some(new_cache) = self.store.finish(tx).await? {
|
||||||
let btree = self.btree.inc_generation().clone();
|
let btree = self.btree.inc_generation().clone();
|
||||||
let state = State {
|
let state = State {
|
||||||
|
@ -260,16 +257,15 @@ mod tests {
|
||||||
const BTREE_ORDER: u32 = 7;
|
const BTREE_ORDER: u32 = 7;
|
||||||
|
|
||||||
async fn new_operation(ds: &Datastore, tt: TransactionType) -> (Transaction, DocIds) {
|
async fn new_operation(ds: &Datastore, tt: TransactionType) -> (Transaction, DocIds) {
|
||||||
let mut tx = ds.transaction(tt, Optimistic).await.unwrap();
|
let tx = ds.transaction(tt, Optimistic).await.unwrap();
|
||||||
let d =
|
let d = DocIds::new(ds.index_store(), &tx, tt, IndexKeyBase::default(), BTREE_ORDER, 100)
|
||||||
DocIds::new(ds.index_store(), &mut tx, tt, IndexKeyBase::default(), BTREE_ORDER, 100)
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
(tx, d)
|
(tx, d)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn finish(mut tx: Transaction, mut d: DocIds) {
|
async fn finish(tx: Transaction, mut d: DocIds) {
|
||||||
d.finish(&mut tx).await.unwrap();
|
d.finish(&tx).await.unwrap();
|
||||||
tx.commit().await.unwrap();
|
tx.commit().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,83 +275,65 @@ mod tests {
|
||||||
|
|
||||||
// Resolve a first doc key
|
// Resolve a first doc key
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
let doc_id = d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap();
|
let doc_id = d.resolve_doc_id(&tx, "Foo".into()).await.unwrap();
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
|
|
||||||
let (mut tx, d) = new_operation(&ds, Read).await;
|
let (tx, d) = new_operation(&ds, Read).await;
|
||||||
assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 1);
|
assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 1);
|
||||||
assert_eq!(d.get_doc_key(&mut tx, 0).await.unwrap(), Some("Foo".into()));
|
assert_eq!(d.get_doc_key(&tx, 0).await.unwrap(), Some("Foo".into()));
|
||||||
assert_eq!(doc_id, Resolved::New(0));
|
assert_eq!(doc_id, Resolved::New(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve the same doc key
|
// Resolve the same doc key
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
let doc_id = d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap();
|
let doc_id = d.resolve_doc_id(&tx, "Foo".into()).await.unwrap();
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
|
|
||||||
let (mut tx, d) = new_operation(&ds, Read).await;
|
let (tx, d) = new_operation(&ds, Read).await;
|
||||||
assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 1);
|
assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 1);
|
||||||
assert_eq!(d.get_doc_key(&mut tx, 0).await.unwrap(), Some("Foo".into()));
|
assert_eq!(d.get_doc_key(&tx, 0).await.unwrap(), Some("Foo".into()));
|
||||||
assert_eq!(doc_id, Resolved::Existing(0));
|
assert_eq!(doc_id, Resolved::Existing(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve another single doc key
|
// Resolve another single doc key
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
let doc_id = d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap();
|
let doc_id = d.resolve_doc_id(&tx, "Bar".into()).await.unwrap();
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
|
|
||||||
let (mut tx, d) = new_operation(&ds, Read).await;
|
let (tx, d) = new_operation(&ds, Read).await;
|
||||||
assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 2);
|
assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 2);
|
||||||
assert_eq!(d.get_doc_key(&mut tx, 1).await.unwrap(), Some("Bar".into()));
|
assert_eq!(d.get_doc_key(&tx, 1).await.unwrap(), Some("Bar".into()));
|
||||||
assert_eq!(doc_id, Resolved::New(1));
|
assert_eq!(doc_id, Resolved::New(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve another two existing doc keys and two new doc keys (interlaced)
|
// Resolve another two existing doc keys and two new doc keys (interlaced)
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
assert_eq!(
|
assert_eq!(d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(), Resolved::Existing(0));
|
||||||
d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(),
|
assert_eq!(d.resolve_doc_id(&tx, "Hello".into()).await.unwrap(), Resolved::New(2));
|
||||||
Resolved::Existing(0)
|
assert_eq!(d.resolve_doc_id(&tx, "Bar".into()).await.unwrap(), Resolved::Existing(1));
|
||||||
);
|
assert_eq!(d.resolve_doc_id(&tx, "World".into()).await.unwrap(), Resolved::New(3));
|
||||||
assert_eq!(d.resolve_doc_id(&mut tx, "Hello".into()).await.unwrap(), Resolved::New(2));
|
|
||||||
assert_eq!(
|
|
||||||
d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap(),
|
|
||||||
Resolved::Existing(1)
|
|
||||||
);
|
|
||||||
assert_eq!(d.resolve_doc_id(&mut tx, "World".into()).await.unwrap(), Resolved::New(3));
|
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
let (mut tx, d) = new_operation(&ds, Read).await;
|
let (tx, d) = new_operation(&ds, Read).await;
|
||||||
assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 4);
|
assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
assert_eq!(
|
assert_eq!(d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(), Resolved::Existing(0));
|
||||||
d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(),
|
assert_eq!(d.resolve_doc_id(&tx, "Bar".into()).await.unwrap(), Resolved::Existing(1));
|
||||||
Resolved::Existing(0)
|
assert_eq!(d.resolve_doc_id(&tx, "Hello".into()).await.unwrap(), Resolved::Existing(2));
|
||||||
);
|
assert_eq!(d.resolve_doc_id(&tx, "World".into()).await.unwrap(), Resolved::Existing(3));
|
||||||
assert_eq!(
|
|
||||||
d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap(),
|
|
||||||
Resolved::Existing(1)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
d.resolve_doc_id(&mut tx, "Hello".into()).await.unwrap(),
|
|
||||||
Resolved::Existing(2)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
d.resolve_doc_id(&mut tx, "World".into()).await.unwrap(),
|
|
||||||
Resolved::Existing(3)
|
|
||||||
);
|
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
let (mut tx, d) = new_operation(&ds, Read).await;
|
let (tx, d) = new_operation(&ds, Read).await;
|
||||||
assert_eq!(d.get_doc_key(&mut tx, 0).await.unwrap(), Some("Foo".into()));
|
assert_eq!(d.get_doc_key(&tx, 0).await.unwrap(), Some("Foo".into()));
|
||||||
assert_eq!(d.get_doc_key(&mut tx, 1).await.unwrap(), Some("Bar".into()));
|
assert_eq!(d.get_doc_key(&tx, 1).await.unwrap(), Some("Bar".into()));
|
||||||
assert_eq!(d.get_doc_key(&mut tx, 2).await.unwrap(), Some("Hello".into()));
|
assert_eq!(d.get_doc_key(&tx, 2).await.unwrap(), Some("Hello".into()));
|
||||||
assert_eq!(d.get_doc_key(&mut tx, 3).await.unwrap(), Some("World".into()));
|
assert_eq!(d.get_doc_key(&tx, 3).await.unwrap(), Some("World".into()));
|
||||||
assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 4);
|
assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -365,53 +343,53 @@ mod tests {
|
||||||
|
|
||||||
// Create two docs
|
// Create two docs
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
assert_eq!(d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(), Resolved::New(0));
|
assert_eq!(d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(), Resolved::New(0));
|
||||||
assert_eq!(d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap(), Resolved::New(1));
|
assert_eq!(d.resolve_doc_id(&tx, "Bar".into()).await.unwrap(), Resolved::New(1));
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove doc 1
|
// Remove doc 1
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
assert_eq!(d.remove_doc(&mut tx, "Dummy".into()).await.unwrap(), None);
|
assert_eq!(d.remove_doc(&tx, "Dummy".into()).await.unwrap(), None);
|
||||||
assert_eq!(d.remove_doc(&mut tx, "Foo".into()).await.unwrap(), Some(0));
|
assert_eq!(d.remove_doc(&tx, "Foo".into()).await.unwrap(), Some(0));
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check 'Foo' has been removed
|
// Check 'Foo' has been removed
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
assert_eq!(d.remove_doc(&mut tx, "Foo".into()).await.unwrap(), None);
|
assert_eq!(d.remove_doc(&tx, "Foo".into()).await.unwrap(), None);
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert a new doc - should take the available id 1
|
// Insert a new doc - should take the available id 1
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
assert_eq!(d.resolve_doc_id(&mut tx, "Hello".into()).await.unwrap(), Resolved::New(0));
|
assert_eq!(d.resolve_doc_id(&tx, "Hello".into()).await.unwrap(), Resolved::New(0));
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove doc 2
|
// Remove doc 2
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
assert_eq!(d.remove_doc(&mut tx, "Dummy".into()).await.unwrap(), None);
|
assert_eq!(d.remove_doc(&tx, "Dummy".into()).await.unwrap(), None);
|
||||||
assert_eq!(d.remove_doc(&mut tx, "Bar".into()).await.unwrap(), Some(1));
|
assert_eq!(d.remove_doc(&tx, "Bar".into()).await.unwrap(), Some(1));
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check 'Bar' has been removed
|
// Check 'Bar' has been removed
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
assert_eq!(d.remove_doc(&mut tx, "Foo".into()).await.unwrap(), None);
|
assert_eq!(d.remove_doc(&tx, "Foo".into()).await.unwrap(), None);
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert a new doc - should take the available id 2
|
// Insert a new doc - should take the available id 2
|
||||||
{
|
{
|
||||||
let (mut tx, mut d) = new_operation(&ds, Write).await;
|
let (tx, mut d) = new_operation(&ds, Write).await;
|
||||||
assert_eq!(d.resolve_doc_id(&mut tx, "World".into()).await.unwrap(), Resolved::New(1));
|
assert_eq!(d.resolve_doc_id(&tx, "World".into()).await.unwrap(), Resolved::New(1));
|
||||||
finish(tx, d).await;
|
finish(tx, d).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ use filter::Filter;
|
||||||
use reblessive::tree::Stk;
|
use reblessive::tree::Stk;
|
||||||
use std::collections::hash_map::Entry;
|
use std::collections::hash_map::Entry;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::sync::Arc;
|
||||||
mod filter;
|
mod filter;
|
||||||
mod tokenizer;
|
mod tokenizer;
|
||||||
|
|
||||||
|
@ -35,6 +35,17 @@ impl From<DefineAnalyzerStatement> for Analyzer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: @emmanuel-keller we probably don't need to clone the value here
|
||||||
|
impl From<Arc<DefineAnalyzerStatement>> for Analyzer {
|
||||||
|
fn from(az: Arc<DefineAnalyzerStatement>) -> Self {
|
||||||
|
Self {
|
||||||
|
function: az.function.clone().map(|i| i.0),
|
||||||
|
tokenizers: az.tokenizers.clone(),
|
||||||
|
filters: Filter::from(az.filters.clone()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(in crate::idx) type TermsList = Vec<Option<(TermId, TermLen)>>;
|
pub(in crate::idx) type TermsList = Vec<Option<(TermId, TermLen)>>;
|
||||||
|
|
||||||
pub(in crate::idx) struct TermsSet {
|
pub(in crate::idx) struct TermsSet {
|
||||||
|
@ -72,13 +83,13 @@ impl Analyzer {
|
||||||
let mut list = Vec::with_capacity(tokens.list().len());
|
let mut list = Vec::with_capacity(tokens.list().len());
|
||||||
let mut unique_tokens = HashSet::new();
|
let mut unique_tokens = HashSet::new();
|
||||||
let mut set = HashSet::new();
|
let mut set = HashSet::new();
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let mut has_unknown_terms = false;
|
let mut has_unknown_terms = false;
|
||||||
for token in tokens.list() {
|
for token in tokens.list() {
|
||||||
// Tokens can contains duplicated, not need to evaluate them again
|
// Tokens can contains duplicated, not need to evaluate them again
|
||||||
if unique_tokens.insert(token) {
|
if unique_tokens.insert(token) {
|
||||||
// Is the term known in the index?
|
// Is the term known in the index?
|
||||||
let opt_term_id = t.get_term_id(&mut tx, tokens.get_token_string(token)?).await?;
|
let opt_term_id = t.get_term_id(&tx, tokens.get_token_string(token)?).await?;
|
||||||
list.push(opt_term_id.map(|tid| (tid, token.get_char_len())));
|
list.push(opt_term_id.map(|tid| (tid, token.get_char_len())));
|
||||||
if let Some(term_id) = opt_term_id {
|
if let Some(term_id) = opt_term_id {
|
||||||
set.insert(term_id);
|
set.insert(term_id);
|
||||||
|
@ -109,12 +120,10 @@ impl Analyzer {
|
||||||
self.analyze_value(stk, ctx, opt, content, FilteringStage::Indexing, &mut tv).await?;
|
self.analyze_value(stk, ctx, opt, content, FilteringStage::Indexing, &mut tv).await?;
|
||||||
let mut set = HashSet::new();
|
let mut set = HashSet::new();
|
||||||
let mut has_unknown_terms = false;
|
let mut has_unknown_terms = false;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
for tokens in tv {
|
for tokens in tv {
|
||||||
for token in tokens.list() {
|
for token in tokens.list() {
|
||||||
if let Some(term_id) =
|
if let Some(term_id) = t.get_term_id(&tx, tokens.get_token_string(token)?).await? {
|
||||||
t.get_term_id(&mut tx, tokens.get_token_string(token)?).await?
|
|
||||||
{
|
|
||||||
set.insert(term_id);
|
set.insert(term_id);
|
||||||
} else {
|
} else {
|
||||||
has_unknown_terms = true;
|
has_unknown_terms = true;
|
||||||
|
@ -162,9 +171,9 @@ impl Analyzer {
|
||||||
}
|
}
|
||||||
// Now we can resolve the term ids
|
// Now we can resolve the term ids
|
||||||
let mut tfid = Vec::with_capacity(tf.len());
|
let mut tfid = Vec::with_capacity(tf.len());
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
for (t, f) in tf {
|
for (t, f) in tf {
|
||||||
tfid.push((terms.resolve_term_id(&mut tx, t).await?, f));
|
tfid.push((terms.resolve_term_id(&tx, t).await?, f));
|
||||||
}
|
}
|
||||||
drop(tx);
|
drop(tx);
|
||||||
Ok((dl, tfid))
|
Ok((dl, tfid))
|
||||||
|
@ -204,9 +213,9 @@ impl Analyzer {
|
||||||
// Now we can resolve the term ids
|
// Now we can resolve the term ids
|
||||||
let mut tfid = Vec::with_capacity(tfos.len());
|
let mut tfid = Vec::with_capacity(tfos.len());
|
||||||
let mut osid = Vec::with_capacity(tfos.len());
|
let mut osid = Vec::with_capacity(tfos.len());
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
for (t, o) in tfos {
|
for (t, o) in tfos {
|
||||||
let id = terms.resolve_term_id(&mut tx, t).await?;
|
let id = terms.resolve_term_id(&tx, t).await?;
|
||||||
tfid.push((id, o.len() as TermFrequency));
|
tfid.push((id, o.len() as TermFrequency));
|
||||||
osid.push((id, OffsetRecords(o)));
|
osid.push((id, OffsetRecords(o)));
|
||||||
}
|
}
|
||||||
|
@ -308,7 +317,7 @@ impl Analyzer {
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::Analyzer;
|
use super::Analyzer;
|
||||||
use crate::ctx::Context;
|
use crate::ctx::Context;
|
||||||
use crate::dbs::{Options, Transaction};
|
use crate::dbs::Options;
|
||||||
use crate::idx::ft::analyzer::filter::FilteringStage;
|
use crate::idx::ft::analyzer::filter::FilteringStage;
|
||||||
use crate::idx::ft::analyzer::tokenizer::{Token, Tokens};
|
use crate::idx::ft::analyzer::tokenizer::{Token, Tokens};
|
||||||
use crate::kvs::{Datastore, LockType, TransactionType};
|
use crate::kvs::{Datastore, LockType, TransactionType};
|
||||||
|
@ -316,14 +325,12 @@ mod tests {
|
||||||
sql::{statements::DefineStatement, Statement},
|
sql::{statements::DefineStatement, Statement},
|
||||||
syn,
|
syn,
|
||||||
};
|
};
|
||||||
use futures::lock::Mutex;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
async fn get_analyzer_tokens(def: &str, input: &str) -> Tokens {
|
async fn get_analyzer_tokens(def: &str, input: &str) -> Tokens {
|
||||||
let ds = Datastore::new("memory").await.unwrap();
|
let ds = Datastore::new("memory").await.unwrap();
|
||||||
let tx = ds.transaction(TransactionType::Read, LockType::Optimistic).await.unwrap();
|
let txn = ds.transaction(TransactionType::Read, LockType::Optimistic).await.unwrap();
|
||||||
let txn: Transaction = Arc::new(Mutex::new(tx));
|
let ctx = Context::default().with_transaction(Arc::new(txn));
|
||||||
let ctx = Context::default().set_transaction(txn);
|
|
||||||
|
|
||||||
let mut stmt = syn::parse(&format!("DEFINE {def}")).unwrap();
|
let mut stmt = syn::parse(&format!("DEFINE {def}")).unwrap();
|
||||||
let Some(Statement::Define(DefineStatement::Analyzer(az))) = stmt.0 .0.pop() else {
|
let Some(Statement::Define(DefineStatement::Analyzer(az))) = stmt.0 .0.pop() else {
|
||||||
|
|
|
@ -18,7 +18,7 @@ pub(super) struct DocLengths {
|
||||||
impl DocLengths {
|
impl DocLengths {
|
||||||
pub(super) async fn new(
|
pub(super) async fn new(
|
||||||
ixs: &IndexStores,
|
ixs: &IndexStores,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
ikb: IndexKeyBase,
|
ikb: IndexKeyBase,
|
||||||
default_btree_order: u32,
|
default_btree_order: u32,
|
||||||
tt: TransactionType,
|
tt: TransactionType,
|
||||||
|
@ -48,7 +48,7 @@ impl DocLengths {
|
||||||
|
|
||||||
pub(super) async fn get_doc_length(
|
pub(super) async fn get_doc_length(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
) -> Result<Option<DocLength>, Error> {
|
) -> Result<Option<DocLength>, Error> {
|
||||||
self.btree.search(tx, &self.store, &doc_id.to_be_bytes().to_vec()).await
|
self.btree.search(tx, &self.store, &doc_id.to_be_bytes().to_vec()).await
|
||||||
|
@ -56,7 +56,7 @@ impl DocLengths {
|
||||||
|
|
||||||
pub(super) async fn get_doc_length_mut(
|
pub(super) async fn get_doc_length_mut(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
) -> Result<Option<DocLength>, Error> {
|
) -> Result<Option<DocLength>, Error> {
|
||||||
self.btree.search_mut(tx, &mut self.store, &doc_id.to_be_bytes().to_vec()).await
|
self.btree.search_mut(tx, &mut self.store, &doc_id.to_be_bytes().to_vec()).await
|
||||||
|
@ -64,7 +64,7 @@ impl DocLengths {
|
||||||
|
|
||||||
pub(super) async fn set_doc_length(
|
pub(super) async fn set_doc_length(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
doc_length: DocLength,
|
doc_length: DocLength,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
@ -74,17 +74,17 @@ impl DocLengths {
|
||||||
|
|
||||||
pub(super) async fn remove_doc_length(
|
pub(super) async fn remove_doc_length(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
) -> Result<Option<Payload>, Error> {
|
) -> Result<Option<Payload>, Error> {
|
||||||
self.btree.delete(tx, &mut self.store, doc_id.to_be_bytes().to_vec()).await
|
self.btree.delete(tx, &mut self.store, doc_id.to_be_bytes().to_vec()).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn statistics(&self, tx: &mut Transaction) -> Result<BStatistics, Error> {
|
pub(super) async fn statistics(&self, tx: &Transaction) -> Result<BStatistics, Error> {
|
||||||
self.btree.statistics(tx, &self.store).await
|
self.btree.statistics(tx, &self.store).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> {
|
pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> {
|
||||||
if let Some(new_cache) = self.store.finish(tx).await? {
|
if let Some(new_cache) = self.store.finish(tx).await? {
|
||||||
let state = self.btree.inc_generation();
|
let state = self.btree.inc_generation();
|
||||||
tx.set(self.state_key.clone(), state.try_to_val()?).await?;
|
tx.set(self.state_key.clone(), state.try_to_val()?).await?;
|
||||||
|
@ -105,16 +105,15 @@ mod tests {
|
||||||
order: u32,
|
order: u32,
|
||||||
tt: TransactionType,
|
tt: TransactionType,
|
||||||
) -> (Transaction, DocLengths) {
|
) -> (Transaction, DocLengths) {
|
||||||
let mut tx = ds.transaction(TransactionType::Write, Optimistic).await.unwrap();
|
let tx = ds.transaction(TransactionType::Write, Optimistic).await.unwrap();
|
||||||
let dl =
|
let dl = DocLengths::new(ds.index_store(), &tx, IndexKeyBase::default(), order, tt, 100)
|
||||||
DocLengths::new(ds.index_store(), &mut tx, IndexKeyBase::default(), order, tt, 100)
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
(tx, dl)
|
(tx, dl)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn finish(mut l: DocLengths, mut tx: Transaction) {
|
async fn finish(mut l: DocLengths, tx: Transaction) {
|
||||||
l.finish(&mut tx).await.unwrap();
|
l.finish(&tx).await.unwrap();
|
||||||
tx.commit().await.unwrap()
|
tx.commit().await.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,54 +125,54 @@ mod tests {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Check empty state
|
// Check empty state
|
||||||
let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
|
let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
|
||||||
assert_eq!(l.statistics(&mut tx).await.unwrap().keys_count, 0);
|
assert_eq!(l.statistics(&tx).await.unwrap().keys_count, 0);
|
||||||
let dl = l.get_doc_length(&mut tx, 99).await.unwrap();
|
let dl = l.get_doc_length(&tx, 99).await.unwrap();
|
||||||
assert_eq!(dl, None);
|
assert_eq!(dl, None);
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
// Set a doc length
|
// Set a doc length
|
||||||
let (mut tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
|
let (tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
|
||||||
l.set_doc_length(&mut tx, 99, 199).await.unwrap();
|
l.set_doc_length(&tx, 99, 199).await.unwrap();
|
||||||
finish(l, tx).await;
|
finish(l, tx).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
|
let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
|
||||||
assert_eq!(l.statistics(&mut tx).await.unwrap().keys_count, 1);
|
assert_eq!(l.statistics(&tx).await.unwrap().keys_count, 1);
|
||||||
let dl = l.get_doc_length(&mut tx, 99).await.unwrap();
|
let dl = l.get_doc_length(&tx, 99).await.unwrap();
|
||||||
assert_eq!(dl, Some(199));
|
assert_eq!(dl, Some(199));
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
// Update doc length
|
// Update doc length
|
||||||
let (mut tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
|
let (tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
|
||||||
l.set_doc_length(&mut tx, 99, 299).await.unwrap();
|
l.set_doc_length(&tx, 99, 299).await.unwrap();
|
||||||
finish(l, tx).await;
|
finish(l, tx).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
|
let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
|
||||||
assert_eq!(l.statistics(&mut tx).await.unwrap().keys_count, 1);
|
assert_eq!(l.statistics(&tx).await.unwrap().keys_count, 1);
|
||||||
let dl = l.get_doc_length(&mut tx, 99).await.unwrap();
|
let dl = l.get_doc_length(&tx, 99).await.unwrap();
|
||||||
assert_eq!(dl, Some(299));
|
assert_eq!(dl, Some(299));
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
// Remove doc lengths
|
// Remove doc lengths
|
||||||
let (mut tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
|
let (tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await;
|
||||||
assert_eq!(l.remove_doc_length(&mut tx, 99).await.unwrap(), Some(299));
|
assert_eq!(l.remove_doc_length(&tx, 99).await.unwrap(), Some(299));
|
||||||
assert_eq!(l.remove_doc_length(&mut tx, 99).await.unwrap(), None);
|
assert_eq!(l.remove_doc_length(&tx, 99).await.unwrap(), None);
|
||||||
finish(l, tx).await;
|
finish(l, tx).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
|
let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await;
|
||||||
let dl = l.get_doc_length(&mut tx, 99).await.unwrap();
|
let dl = l.get_doc_length(&tx, 99).await.unwrap();
|
||||||
assert_eq!(dl, None);
|
assert_eq!(dl, None);
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ use crate::idx::ft::terms::{TermId, TermLen, Terms};
|
||||||
use crate::idx::trees::btree::BStatistics;
|
use crate::idx::trees::btree::BStatistics;
|
||||||
use crate::idx::trees::store::IndexStores;
|
use crate::idx::trees::store::IndexStores;
|
||||||
use crate::idx::{IndexKeyBase, VersionedSerdeState};
|
use crate::idx::{IndexKeyBase, VersionedSerdeState};
|
||||||
use crate::kvs;
|
use crate::kvs::Transaction;
|
||||||
use crate::kvs::{Key, TransactionType};
|
use crate::kvs::{Key, TransactionType};
|
||||||
use crate::sql::index::SearchParams;
|
use crate::sql::index::SearchParams;
|
||||||
use crate::sql::scoring::Scoring;
|
use crate::sql::scoring::Scoring;
|
||||||
|
@ -105,35 +105,33 @@ impl FtIndex {
|
||||||
p: &SearchParams,
|
p: &SearchParams,
|
||||||
tt: TransactionType,
|
tt: TransactionType,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let az = tx.get_db_analyzer(opt.ns()?, opt.db()?, az).await?;
|
// TODO: @emmanuel-keller we probably don't need to clone the value here
|
||||||
let res =
|
let az = tx.get_db_analyzer(opt.ns()?, opt.db()?, az).await?.as_ref().to_owned();
|
||||||
Self::with_analyzer(ctx.get_index_stores(), &mut tx, az, index_key_base, p, tt).await;
|
Self::with_analyzer(ctx.get_index_stores(), &tx, az, index_key_base, p, tt).await
|
||||||
drop(tx);
|
|
||||||
res
|
|
||||||
}
|
}
|
||||||
async fn with_analyzer(
|
async fn with_analyzer(
|
||||||
ixs: &IndexStores,
|
ixs: &IndexStores,
|
||||||
run: &mut kvs::Transaction,
|
txn: &Transaction,
|
||||||
az: DefineAnalyzerStatement,
|
az: DefineAnalyzerStatement,
|
||||||
index_key_base: IndexKeyBase,
|
index_key_base: IndexKeyBase,
|
||||||
p: &SearchParams,
|
p: &SearchParams,
|
||||||
tt: TransactionType,
|
tt: TransactionType,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let state_key: Key = index_key_base.new_bs_key();
|
let state_key: Key = index_key_base.new_bs_key();
|
||||||
let state: State = if let Some(val) = run.get(state_key.clone()).await? {
|
let state: State = if let Some(val) = txn.get(state_key.clone()).await? {
|
||||||
State::try_from_val(val)?
|
State::try_from_val(val)?
|
||||||
} else {
|
} else {
|
||||||
State::default()
|
State::default()
|
||||||
};
|
};
|
||||||
let doc_ids = Arc::new(RwLock::new(
|
let doc_ids = Arc::new(RwLock::new(
|
||||||
DocIds::new(ixs, run, tt, index_key_base.clone(), p.doc_ids_order, p.doc_ids_cache)
|
DocIds::new(ixs, txn, tt, index_key_base.clone(), p.doc_ids_order, p.doc_ids_cache)
|
||||||
.await?,
|
.await?,
|
||||||
));
|
));
|
||||||
let doc_lengths = Arc::new(RwLock::new(
|
let doc_lengths = Arc::new(RwLock::new(
|
||||||
DocLengths::new(
|
DocLengths::new(
|
||||||
ixs,
|
ixs,
|
||||||
run,
|
txn,
|
||||||
index_key_base.clone(),
|
index_key_base.clone(),
|
||||||
p.doc_lengths_order,
|
p.doc_lengths_order,
|
||||||
tt,
|
tt,
|
||||||
|
@ -142,11 +140,11 @@ impl FtIndex {
|
||||||
.await?,
|
.await?,
|
||||||
));
|
));
|
||||||
let postings = Arc::new(RwLock::new(
|
let postings = Arc::new(RwLock::new(
|
||||||
Postings::new(ixs, run, index_key_base.clone(), p.postings_order, tt, p.postings_cache)
|
Postings::new(ixs, txn, index_key_base.clone(), p.postings_order, tt, p.postings_cache)
|
||||||
.await?,
|
.await?,
|
||||||
));
|
));
|
||||||
let terms = Arc::new(RwLock::new(
|
let terms = Arc::new(RwLock::new(
|
||||||
Terms::new(ixs, run, index_key_base.clone(), p.terms_order, tt, p.terms_cache).await?,
|
Terms::new(ixs, txn, index_key_base.clone(), p.terms_order, tt, p.terms_cache).await?,
|
||||||
));
|
));
|
||||||
let termdocs = TermDocs::new(index_key_base.clone());
|
let termdocs = TermDocs::new(index_key_base.clone());
|
||||||
let offsets = Offsets::new(index_key_base.clone());
|
let offsets = Offsets::new(index_key_base.clone());
|
||||||
|
@ -194,17 +192,17 @@ impl FtIndex {
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
rid: &Thing,
|
rid: &Thing,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
// Extract and remove the doc_id (if any)
|
// Extract and remove the doc_id (if any)
|
||||||
let mut doc_ids = self.doc_ids.write().await;
|
let mut doc_ids = self.doc_ids.write().await;
|
||||||
let doc_id = doc_ids.remove_doc(&mut tx, rid.into()).await?;
|
let doc_id = doc_ids.remove_doc(&tx, rid.into()).await?;
|
||||||
drop(doc_ids);
|
drop(doc_ids);
|
||||||
if let Some(doc_id) = doc_id {
|
if let Some(doc_id) = doc_id {
|
||||||
self.state.doc_count -= 1;
|
self.state.doc_count -= 1;
|
||||||
|
|
||||||
// Remove the doc length
|
// Remove the doc length
|
||||||
let mut doc_lengths = self.doc_lengths.write().await;
|
let mut doc_lengths = self.doc_lengths.write().await;
|
||||||
let dl = doc_lengths.remove_doc_length(&mut tx, doc_id).await?;
|
let dl = doc_lengths.remove_doc_length(&tx, doc_id).await?;
|
||||||
drop(doc_lengths);
|
drop(doc_lengths);
|
||||||
if let Some(doc_lengths) = dl {
|
if let Some(doc_lengths) = dl {
|
||||||
self.state.total_docs_lengths -= doc_lengths as u128;
|
self.state.total_docs_lengths -= doc_lengths as u128;
|
||||||
|
@ -217,11 +215,11 @@ impl FtIndex {
|
||||||
let mut p = self.postings.write().await;
|
let mut p = self.postings.write().await;
|
||||||
let mut t = self.terms.write().await;
|
let mut t = self.terms.write().await;
|
||||||
for term_id in &term_list {
|
for term_id in &term_list {
|
||||||
p.remove_posting(&mut tx, term_id, doc_id).await?;
|
p.remove_posting(&tx, term_id, doc_id).await?;
|
||||||
// if the term is not present in any document in the index, we can remove it
|
// if the term is not present in any document in the index, we can remove it
|
||||||
let doc_count = self.term_docs.remove_doc(&mut tx, term_id, doc_id).await?;
|
let doc_count = self.term_docs.remove_doc(&tx, term_id, doc_id).await?;
|
||||||
if doc_count == 0 {
|
if doc_count == 0 {
|
||||||
t.remove_term_id(&mut tx, term_id).await?;
|
t.remove_term_id(&tx, term_id).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drop(p);
|
drop(p);
|
||||||
|
@ -230,7 +228,7 @@ impl FtIndex {
|
||||||
if self.highlighting {
|
if self.highlighting {
|
||||||
for term_id in term_list {
|
for term_id in term_list {
|
||||||
// TODO?: Removal can be done with a prefix on doc_id
|
// TODO?: Removal can be done with a prefix on doc_id
|
||||||
self.offsets.remove_offsets(&mut tx, doc_id, term_id).await?;
|
self.offsets.remove_offsets(&tx, doc_id, term_id).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -248,11 +246,10 @@ impl FtIndex {
|
||||||
content: Vec<Value>,
|
content: Vec<Value>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// Resolve the doc_id
|
// Resolve the doc_id
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let mut doc_ids = self.doc_ids.write().await;
|
let mut doc_ids = self.doc_ids.write().await;
|
||||||
let resolved = doc_ids.resolve_doc_id(&mut tx, rid.into()).await?;
|
let resolved = doc_ids.resolve_doc_id(&tx, rid.into()).await?;
|
||||||
drop(doc_ids);
|
drop(doc_ids);
|
||||||
drop(tx);
|
|
||||||
let doc_id = *resolved.doc_id();
|
let doc_id = *resolved.doc_id();
|
||||||
|
|
||||||
// Extract the doc_lengths, terms en frequencies (and offset)
|
// Extract the doc_lengths, terms en frequencies (and offset)
|
||||||
|
@ -272,14 +269,14 @@ impl FtIndex {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Set the doc length
|
// Set the doc length
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let mut dl = self.doc_lengths.write().await;
|
let mut dl = self.doc_lengths.write().await;
|
||||||
if resolved.was_existing() {
|
if resolved.was_existing() {
|
||||||
if let Some(old_doc_length) = dl.get_doc_length_mut(&mut tx, doc_id).await? {
|
if let Some(old_doc_length) = dl.get_doc_length_mut(&tx, doc_id).await? {
|
||||||
self.state.total_docs_lengths -= old_doc_length as u128;
|
self.state.total_docs_lengths -= old_doc_length as u128;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dl.set_doc_length(&mut tx, doc_id, doc_length).await?;
|
dl.set_doc_length(&tx, doc_id, doc_length).await?;
|
||||||
drop(dl);
|
drop(dl);
|
||||||
|
|
||||||
// Retrieve the existing terms for this document (if any)
|
// Retrieve the existing terms for this document (if any)
|
||||||
|
@ -294,22 +291,22 @@ impl FtIndex {
|
||||||
let mut terms_ids = RoaringTreemap::default();
|
let mut terms_ids = RoaringTreemap::default();
|
||||||
let mut p = self.postings.write().await;
|
let mut p = self.postings.write().await;
|
||||||
for (term_id, term_freq) in terms_and_frequencies {
|
for (term_id, term_freq) in terms_and_frequencies {
|
||||||
p.update_posting(&mut tx, term_id, doc_id, term_freq).await?;
|
p.update_posting(&tx, term_id, doc_id, term_freq).await?;
|
||||||
if let Some(old_term_ids) = &mut old_term_ids {
|
if let Some(old_term_ids) = &mut old_term_ids {
|
||||||
old_term_ids.remove(term_id);
|
old_term_ids.remove(term_id);
|
||||||
}
|
}
|
||||||
self.term_docs.set_doc(&mut tx, term_id, doc_id).await?;
|
self.term_docs.set_doc(&tx, term_id, doc_id).await?;
|
||||||
terms_ids.insert(term_id);
|
terms_ids.insert(term_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove any remaining postings
|
// Remove any remaining postings
|
||||||
if let Some(old_term_ids) = &old_term_ids {
|
if let Some(old_term_ids) = &old_term_ids {
|
||||||
for old_term_id in old_term_ids {
|
for old_term_id in old_term_ids {
|
||||||
p.remove_posting(&mut tx, old_term_id, doc_id).await?;
|
p.remove_posting(&tx, old_term_id, doc_id).await?;
|
||||||
let doc_count = self.term_docs.remove_doc(&mut tx, old_term_id, doc_id).await?;
|
let doc_count = self.term_docs.remove_doc(&tx, old_term_id, doc_id).await?;
|
||||||
// if the term does not have anymore postings, we can remove the term
|
// if the term does not have anymore postings, we can remove the term
|
||||||
if doc_count == 0 {
|
if doc_count == 0 {
|
||||||
t.remove_term_id(&mut tx, old_term_id).await?;
|
t.remove_term_id(&tx, old_term_id).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -321,14 +318,14 @@ impl FtIndex {
|
||||||
if let Some(ofs) = offsets {
|
if let Some(ofs) = offsets {
|
||||||
if !ofs.is_empty() {
|
if !ofs.is_empty() {
|
||||||
for (tid, or) in ofs {
|
for (tid, or) in ofs {
|
||||||
self.offsets.set_offsets(&mut tx, doc_id, tid, or).await?;
|
self.offsets.set_offsets(&tx, doc_id, tid, or).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// In case of an update, w remove the offset for the terms that does not exist anymore
|
// In case of an update, w remove the offset for the terms that does not exist anymore
|
||||||
if let Some(old_term_ids) = old_term_ids {
|
if let Some(old_term_ids) = old_term_ids {
|
||||||
for old_term_id in old_term_ids {
|
for old_term_id in old_term_ids {
|
||||||
self.offsets.remove_offsets(&mut tx, doc_id, old_term_id).await?;
|
self.offsets.remove_offsets(&tx, doc_id, old_term_id).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -365,7 +362,7 @@ impl FtIndex {
|
||||||
|
|
||||||
pub(super) async fn get_terms_docs(
|
pub(super) async fn get_terms_docs(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
terms: &TermsList,
|
terms: &TermsList,
|
||||||
) -> Result<Vec<Option<(TermId, RoaringTreemap)>>, Error> {
|
) -> Result<Vec<Option<(TermId, RoaringTreemap)>>, Error> {
|
||||||
let mut terms_docs = Vec::with_capacity(terms.len());
|
let mut terms_docs = Vec::with_capacity(terms.len());
|
||||||
|
@ -424,7 +421,7 @@ impl FtIndex {
|
||||||
|
|
||||||
pub(super) async fn highlight(
|
pub(super) async fn highlight(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
thg: &Thing,
|
thg: &Thing,
|
||||||
terms: &[Option<(TermId, TermLen)>],
|
terms: &[Option<(TermId, TermLen)>],
|
||||||
hlp: HighlightParams,
|
hlp: HighlightParams,
|
||||||
|
@ -450,7 +447,7 @@ impl FtIndex {
|
||||||
|
|
||||||
pub(super) async fn extract_offsets(
|
pub(super) async fn extract_offsets(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
thg: &Thing,
|
thg: &Thing,
|
||||||
terms: &[Option<(TermId, u32)>],
|
terms: &[Option<(TermId, u32)>],
|
||||||
partial: bool,
|
partial: bool,
|
||||||
|
@ -473,25 +470,22 @@ impl FtIndex {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn statistics(&self, ctx: &Context<'_>) -> Result<FtStatistics, Error> {
|
pub(crate) async fn statistics(&self, ctx: &Context<'_>) -> Result<FtStatistics, Error> {
|
||||||
// TODO do parallel execution
|
let txn = ctx.tx();
|
||||||
let mut run = ctx.tx_lock().await;
|
|
||||||
let res = FtStatistics {
|
let res = FtStatistics {
|
||||||
doc_ids: self.doc_ids.read().await.statistics(&mut run).await?,
|
doc_ids: self.doc_ids.read().await.statistics(&txn).await?,
|
||||||
terms: self.terms.read().await.statistics(&mut run).await?,
|
terms: self.terms.read().await.statistics(&txn).await?,
|
||||||
doc_lengths: self.doc_lengths.read().await.statistics(&mut run).await?,
|
doc_lengths: self.doc_lengths.read().await.statistics(&txn).await?,
|
||||||
postings: self.postings.read().await.statistics(&mut run).await?,
|
postings: self.postings.read().await.statistics(&txn).await?,
|
||||||
};
|
};
|
||||||
drop(run);
|
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn finish(&self, ctx: &Context<'_>) -> Result<(), Error> {
|
pub(crate) async fn finish(&self, ctx: &Context<'_>) -> Result<(), Error> {
|
||||||
let mut run = ctx.tx_lock().await;
|
let txn = ctx.tx();
|
||||||
self.doc_ids.write().await.finish(&mut run).await?;
|
self.doc_ids.write().await.finish(&txn).await?;
|
||||||
self.doc_lengths.write().await.finish(&mut run).await?;
|
self.doc_lengths.write().await.finish(&txn).await?;
|
||||||
self.postings.write().await.finish(&mut run).await?;
|
self.postings.write().await.finish(&txn).await?;
|
||||||
self.terms.write().await.finish(&mut run).await?;
|
self.terms.write().await.finish(&txn).await?;
|
||||||
drop(run);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -518,10 +512,7 @@ impl HitsIterator {
|
||||||
self.iter.size_hint().0
|
self.iter.size_hint().0
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn next(
|
pub(crate) async fn next(&mut self, tx: &Transaction) -> Result<Option<(Thing, DocId)>, Error> {
|
||||||
&mut self,
|
|
||||||
tx: &mut kvs::Transaction,
|
|
||||||
) -> Result<Option<(Thing, DocId)>, Error> {
|
|
||||||
let di = self.doc_ids.read().await;
|
let di = self.doc_ids.read().await;
|
||||||
for doc_id in self.iter.by_ref() {
|
for doc_id in self.iter.by_ref() {
|
||||||
if let Some(doc_key) = di.get_doc_key(tx, doc_id).await? {
|
if let Some(doc_key) = di.get_doc_key(tx, doc_id).await? {
|
||||||
|
@ -546,7 +537,6 @@ mod tests {
|
||||||
use crate::sql::statements::{DefineAnalyzerStatement, DefineStatement};
|
use crate::sql::statements::{DefineAnalyzerStatement, DefineStatement};
|
||||||
use crate::sql::{Array, Statement, Thing, Value};
|
use crate::sql::{Array, Statement, Thing, Value};
|
||||||
use crate::syn;
|
use crate::syn;
|
||||||
use futures::lock::Mutex;
|
|
||||||
use reblessive::tree::Stk;
|
use reblessive::tree::Stk;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -558,11 +548,11 @@ mod tests {
|
||||||
scr: BM25Scorer,
|
scr: BM25Scorer,
|
||||||
e: Vec<(&Thing, Option<Score>)>,
|
e: Vec<(&Thing, Option<Score>)>,
|
||||||
) {
|
) {
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
if let Some(mut hits) = hits {
|
if let Some(mut hits) = hits {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
while let Some((k, d)) = hits.next(&mut tx).await.unwrap() {
|
while let Some((k, d)) = hits.next(&tx).await.unwrap() {
|
||||||
let s = scr.score(&mut tx, d).await.unwrap();
|
let s = scr.score(&tx, d).await.unwrap();
|
||||||
map.insert(k, s);
|
map.insert(k, s);
|
||||||
}
|
}
|
||||||
assert_eq!(map.len(), e.len());
|
assert_eq!(map.len(), e.len());
|
||||||
|
@ -572,7 +562,6 @@ mod tests {
|
||||||
} else {
|
} else {
|
||||||
panic!("hits is none");
|
panic!("hits is none");
|
||||||
}
|
}
|
||||||
drop(tx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn search(
|
async fn search(
|
||||||
|
@ -584,9 +573,8 @@ mod tests {
|
||||||
) -> (Option<HitsIterator>, BM25Scorer) {
|
) -> (Option<HitsIterator>, BM25Scorer) {
|
||||||
let (term_list, _) =
|
let (term_list, _) =
|
||||||
fti.extract_querying_terms(stk, ctx, opt, qs.to_string()).await.unwrap();
|
fti.extract_querying_terms(stk, ctx, opt, qs.to_string()).await.unwrap();
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let td = Arc::new(fti.get_terms_docs(&mut tx, &term_list).await.unwrap());
|
let td = Arc::new(fti.get_terms_docs(&tx, &term_list).await.unwrap());
|
||||||
drop(tx);
|
|
||||||
let scr = fti.new_scorer(td.clone()).unwrap().unwrap();
|
let scr = fti.new_scorer(td.clone()).unwrap().unwrap();
|
||||||
let hits = fti.new_hits_iterator(td).unwrap();
|
let hits = fti.new_hits_iterator(td).unwrap();
|
||||||
(hits, scr)
|
(hits, scr)
|
||||||
|
@ -600,10 +588,10 @@ mod tests {
|
||||||
hl: bool,
|
hl: bool,
|
||||||
) -> (Context<'a>, Options, FtIndex) {
|
) -> (Context<'a>, Options, FtIndex) {
|
||||||
let mut ctx = Context::default();
|
let mut ctx = Context::default();
|
||||||
let mut tx = ds.transaction(tt, Optimistic).await.unwrap();
|
let tx = ds.transaction(tt, Optimistic).await.unwrap();
|
||||||
let fti = FtIndex::with_analyzer(
|
let fti = FtIndex::with_analyzer(
|
||||||
ctx.get_index_stores(),
|
ctx.get_index_stores(),
|
||||||
&mut tx,
|
&tx,
|
||||||
az.clone(),
|
az.clone(),
|
||||||
IndexKeyBase::default(),
|
IndexKeyBase::default(),
|
||||||
&SearchParams {
|
&SearchParams {
|
||||||
|
@ -623,14 +611,14 @@ mod tests {
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let txn = Arc::new(Mutex::new(tx));
|
let txn = Arc::new(tx);
|
||||||
ctx.set_transaction_mut(txn);
|
ctx.set_transaction(txn);
|
||||||
(ctx, Options::default(), fti)
|
(ctx, Options::default(), fti)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn finish(ctx: &Context<'_>, fti: FtIndex) {
|
pub(super) async fn finish(ctx: &Context<'_>, fti: FtIndex) {
|
||||||
fti.finish(ctx).await.unwrap();
|
fti.finish(ctx).await.unwrap();
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
tx.commit().await.unwrap();
|
tx.commit().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ impl Offsets {
|
||||||
|
|
||||||
pub(super) async fn set_offsets(
|
pub(super) async fn set_offsets(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
term_id: TermId,
|
term_id: TermId,
|
||||||
offsets: OffsetRecords,
|
offsets: OffsetRecords,
|
||||||
|
@ -32,7 +32,7 @@ impl Offsets {
|
||||||
|
|
||||||
pub(super) async fn get_offsets(
|
pub(super) async fn get_offsets(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
term_id: TermId,
|
term_id: TermId,
|
||||||
) -> Result<Option<OffsetRecords>, Error> {
|
) -> Result<Option<OffsetRecords>, Error> {
|
||||||
|
@ -47,7 +47,7 @@ impl Offsets {
|
||||||
|
|
||||||
pub(super) async fn remove_offsets(
|
pub(super) async fn remove_offsets(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
term_id: TermId,
|
term_id: TermId,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
|
@ -20,7 +20,7 @@ pub(super) struct Postings {
|
||||||
impl Postings {
|
impl Postings {
|
||||||
pub(super) async fn new(
|
pub(super) async fn new(
|
||||||
ixs: &IndexStores,
|
ixs: &IndexStores,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
index_key_base: IndexKeyBase,
|
index_key_base: IndexKeyBase,
|
||||||
order: u32,
|
order: u32,
|
||||||
tt: TransactionType,
|
tt: TransactionType,
|
||||||
|
@ -51,7 +51,7 @@ impl Postings {
|
||||||
|
|
||||||
pub(super) async fn update_posting(
|
pub(super) async fn update_posting(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
term_id: TermId,
|
term_id: TermId,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
term_freq: TermFrequency,
|
term_freq: TermFrequency,
|
||||||
|
@ -62,7 +62,7 @@ impl Postings {
|
||||||
|
|
||||||
pub(super) async fn get_term_frequency(
|
pub(super) async fn get_term_frequency(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
term_id: TermId,
|
term_id: TermId,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
) -> Result<Option<TermFrequency>, Error> {
|
) -> Result<Option<TermFrequency>, Error> {
|
||||||
|
@ -72,7 +72,7 @@ impl Postings {
|
||||||
|
|
||||||
pub(super) async fn remove_posting(
|
pub(super) async fn remove_posting(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
term_id: TermId,
|
term_id: TermId,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
) -> Result<Option<TermFrequency>, Error> {
|
) -> Result<Option<TermFrequency>, Error> {
|
||||||
|
@ -80,11 +80,11 @@ impl Postings {
|
||||||
self.btree.delete(tx, &mut self.store, key).await
|
self.btree.delete(tx, &mut self.store, key).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn statistics(&self, tx: &mut Transaction) -> Result<BStatistics, Error> {
|
pub(super) async fn statistics(&self, tx: &Transaction) -> Result<BStatistics, Error> {
|
||||||
self.btree.statistics(tx, &self.store).await
|
self.btree.statistics(tx, &self.store).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> {
|
pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> {
|
||||||
if let Some(new_cache) = self.store.finish(tx).await? {
|
if let Some(new_cache) = self.store.finish(tx).await? {
|
||||||
let state = self.btree.inc_generation();
|
let state = self.btree.inc_generation();
|
||||||
tx.set(self.state_key.clone(), state.try_to_val()?).await?;
|
tx.set(self.state_key.clone(), state.try_to_val()?).await?;
|
||||||
|
@ -106,15 +106,15 @@ mod tests {
|
||||||
order: u32,
|
order: u32,
|
||||||
tt: TransactionType,
|
tt: TransactionType,
|
||||||
) -> (Transaction, Postings) {
|
) -> (Transaction, Postings) {
|
||||||
let mut tx = ds.transaction(tt, Optimistic).await.unwrap();
|
let tx = ds.transaction(tt, Optimistic).await.unwrap();
|
||||||
let p = Postings::new(ds.index_store(), &mut tx, IndexKeyBase::default(), order, tt, 100)
|
let p = Postings::new(ds.index_store(), &tx, IndexKeyBase::default(), order, tt, 100)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
(tx, p)
|
(tx, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn finish(mut tx: Transaction, mut p: Postings) {
|
async fn finish(tx: Transaction, mut p: Postings) {
|
||||||
p.finish(&mut tx).await.unwrap();
|
p.finish(&tx).await.unwrap();
|
||||||
tx.commit().await.unwrap();
|
tx.commit().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,33 +129,33 @@ mod tests {
|
||||||
let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
|
let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
|
||||||
finish(tx, p).await;
|
finish(tx, p).await;
|
||||||
|
|
||||||
let (mut tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
|
let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
|
||||||
assert_eq!(p.statistics(&mut tx).await.unwrap().keys_count, 0);
|
assert_eq!(p.statistics(&tx).await.unwrap().keys_count, 0);
|
||||||
|
|
||||||
// Add postings
|
// Add postings
|
||||||
let (mut tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
|
let (tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
|
||||||
p.update_posting(&mut tx, 1, 2, 3).await.unwrap();
|
p.update_posting(&tx, 1, 2, 3).await.unwrap();
|
||||||
p.update_posting(&mut tx, 1, 4, 5).await.unwrap();
|
p.update_posting(&tx, 1, 4, 5).await.unwrap();
|
||||||
finish(tx, p).await;
|
finish(tx, p).await;
|
||||||
|
|
||||||
let (mut tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
|
let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
|
||||||
assert_eq!(p.statistics(&mut tx).await.unwrap().keys_count, 2);
|
assert_eq!(p.statistics(&tx).await.unwrap().keys_count, 2);
|
||||||
|
|
||||||
assert_eq!(p.get_term_frequency(&mut tx, 1, 2).await.unwrap(), Some(3));
|
assert_eq!(p.get_term_frequency(&tx, 1, 2).await.unwrap(), Some(3));
|
||||||
assert_eq!(p.get_term_frequency(&mut tx, 1, 4).await.unwrap(), Some(5));
|
assert_eq!(p.get_term_frequency(&tx, 1, 4).await.unwrap(), Some(5));
|
||||||
|
|
||||||
let (mut tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
|
let (tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await;
|
||||||
// Check removal of doc 2
|
// Check removal of doc 2
|
||||||
assert_eq!(p.remove_posting(&mut tx, 1, 2).await.unwrap(), Some(3));
|
assert_eq!(p.remove_posting(&tx, 1, 2).await.unwrap(), Some(3));
|
||||||
// Again the same
|
// Again the same
|
||||||
assert_eq!(p.remove_posting(&mut tx, 1, 2).await.unwrap(), None);
|
assert_eq!(p.remove_posting(&tx, 1, 2).await.unwrap(), None);
|
||||||
// Remove doc 4
|
// Remove doc 4
|
||||||
assert_eq!(p.remove_posting(&mut tx, 1, 4).await.unwrap(), Some(5));
|
assert_eq!(p.remove_posting(&tx, 1, 4).await.unwrap(), Some(5));
|
||||||
finish(tx, p).await;
|
finish(tx, p).await;
|
||||||
|
|
||||||
// The underlying b-tree should be empty now
|
// The underlying b-tree should be empty now
|
||||||
let (mut tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
|
let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await;
|
||||||
assert_eq!(p.statistics(&mut tx).await.unwrap().keys_count, 0);
|
assert_eq!(p.statistics(&tx).await.unwrap().keys_count, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ impl BM25Scorer {
|
||||||
|
|
||||||
async fn term_score(
|
async fn term_score(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
term_doc_count: DocLength,
|
term_doc_count: DocLength,
|
||||||
term_frequency: TermFrequency,
|
term_frequency: TermFrequency,
|
||||||
|
@ -53,7 +53,7 @@ impl BM25Scorer {
|
||||||
|
|
||||||
pub(crate) async fn score(
|
pub(crate) async fn score(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
) -> Result<Option<Score>, Error> {
|
) -> Result<Option<Score>, Error> {
|
||||||
let mut sc = 0.0;
|
let mut sc = 0.0;
|
||||||
|
|
|
@ -22,7 +22,7 @@ impl TermDocs {
|
||||||
|
|
||||||
pub(super) async fn set_doc(
|
pub(super) async fn set_doc(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
term_id: TermId,
|
term_id: TermId,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
@ -38,7 +38,7 @@ impl TermDocs {
|
||||||
|
|
||||||
pub(super) async fn get_docs(
|
pub(super) async fn get_docs(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
term_id: TermId,
|
term_id: TermId,
|
||||||
) -> Result<Option<RoaringTreemap>, Error> {
|
) -> Result<Option<RoaringTreemap>, Error> {
|
||||||
let key = self.index_key_base.new_bc_key(term_id);
|
let key = self.index_key_base.new_bc_key(term_id);
|
||||||
|
@ -52,7 +52,7 @@ impl TermDocs {
|
||||||
|
|
||||||
pub(super) async fn remove_doc(
|
pub(super) async fn remove_doc(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
term_id: TermId,
|
term_id: TermId,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
) -> Result<DocLength, Error> {
|
) -> Result<DocLength, Error> {
|
||||||
|
|
|
@ -24,7 +24,7 @@ pub(in crate::idx) struct Terms {
|
||||||
impl Terms {
|
impl Terms {
|
||||||
pub(super) async fn new(
|
pub(super) async fn new(
|
||||||
ixs: &IndexStores,
|
ixs: &IndexStores,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
index_key_base: IndexKeyBase,
|
index_key_base: IndexKeyBase,
|
||||||
default_btree_order: u32,
|
default_btree_order: u32,
|
||||||
tt: TransactionType,
|
tt: TransactionType,
|
||||||
|
@ -74,7 +74,7 @@ impl Terms {
|
||||||
|
|
||||||
pub(super) async fn resolve_term_id(
|
pub(super) async fn resolve_term_id(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
term: &str,
|
term: &str,
|
||||||
) -> Result<TermId, Error> {
|
) -> Result<TermId, Error> {
|
||||||
let term_key = term.into();
|
let term_key = term.into();
|
||||||
|
@ -91,7 +91,7 @@ impl Terms {
|
||||||
|
|
||||||
pub(super) async fn get_term_id(
|
pub(super) async fn get_term_id(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
term: &str,
|
term: &str,
|
||||||
) -> Result<Option<TermId>, Error> {
|
) -> Result<Option<TermId>, Error> {
|
||||||
self.btree.search(tx, &self.store, &term.into()).await
|
self.btree.search(tx, &self.store, &term.into()).await
|
||||||
|
@ -99,7 +99,7 @@ impl Terms {
|
||||||
|
|
||||||
pub(super) async fn remove_term_id(
|
pub(super) async fn remove_term_id(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
term_id: TermId,
|
term_id: TermId,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let term_id_key = self.index_key_base.new_bu_key(term_id);
|
let term_id_key = self.index_key_base.new_bu_key(term_id);
|
||||||
|
@ -117,11 +117,11 @@ impl Terms {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn statistics(&self, tx: &mut Transaction) -> Result<BStatistics, Error> {
|
pub(super) async fn statistics(&self, tx: &Transaction) -> Result<BStatistics, Error> {
|
||||||
self.btree.statistics(tx, &self.store).await
|
self.btree.statistics(tx, &self.store).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> {
|
pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> {
|
||||||
if let Some(new_cache) = self.store.finish(tx).await? {
|
if let Some(new_cache) = self.store.finish(tx).await? {
|
||||||
let btree = self.btree.inc_generation().clone();
|
let btree = self.btree.inc_generation().clone();
|
||||||
let state = State {
|
let state = State {
|
||||||
|
@ -253,15 +253,15 @@ mod tests {
|
||||||
order: u32,
|
order: u32,
|
||||||
tt: TransactionType,
|
tt: TransactionType,
|
||||||
) -> (Transaction, Terms) {
|
) -> (Transaction, Terms) {
|
||||||
let mut tx = ds.transaction(tt, Optimistic).await.unwrap();
|
let tx = ds.transaction(tt, Optimistic).await.unwrap();
|
||||||
let t = Terms::new(ds.index_store(), &mut tx, IndexKeyBase::default(), order, tt, 100)
|
let t = Terms::new(ds.index_store(), &tx, IndexKeyBase::default(), order, tt, 100)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
(tx, t)
|
(tx, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn finish(mut tx: Transaction, mut t: Terms) {
|
async fn finish(tx: Transaction, mut t: Terms) {
|
||||||
t.finish(&mut tx).await.unwrap();
|
t.finish(&tx).await.unwrap();
|
||||||
tx.commit().await.unwrap();
|
tx.commit().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,43 +279,43 @@ mod tests {
|
||||||
|
|
||||||
// Resolve a first term
|
// Resolve a first term
|
||||||
{
|
{
|
||||||
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
||||||
assert_eq!(t.resolve_term_id(&mut tx, "C").await.unwrap(), 0);
|
assert_eq!(t.resolve_term_id(&tx, "C").await.unwrap(), 0);
|
||||||
finish(tx, t).await;
|
finish(tx, t).await;
|
||||||
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
||||||
assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 1);
|
assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve a second term
|
// Resolve a second term
|
||||||
{
|
{
|
||||||
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
||||||
assert_eq!(t.resolve_term_id(&mut tx, "D").await.unwrap(), 1);
|
assert_eq!(t.resolve_term_id(&tx, "D").await.unwrap(), 1);
|
||||||
finish(tx, t).await;
|
finish(tx, t).await;
|
||||||
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
||||||
assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 2);
|
assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve two existing terms with new frequencies
|
// Resolve two existing terms with new frequencies
|
||||||
{
|
{
|
||||||
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
||||||
assert_eq!(t.resolve_term_id(&mut tx, "C").await.unwrap(), 0);
|
assert_eq!(t.resolve_term_id(&tx, "C").await.unwrap(), 0);
|
||||||
assert_eq!(t.resolve_term_id(&mut tx, "D").await.unwrap(), 1);
|
assert_eq!(t.resolve_term_id(&tx, "D").await.unwrap(), 1);
|
||||||
finish(tx, t).await;
|
finish(tx, t).await;
|
||||||
|
|
||||||
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
||||||
assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 2);
|
assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve one existing terms and two new terms
|
// Resolve one existing terms and two new terms
|
||||||
{
|
{
|
||||||
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
||||||
assert_eq!(t.resolve_term_id(&mut tx, "A").await.unwrap(), 2);
|
assert_eq!(t.resolve_term_id(&tx, "A").await.unwrap(), 2);
|
||||||
assert_eq!(t.resolve_term_id(&mut tx, "C").await.unwrap(), 0);
|
assert_eq!(t.resolve_term_id(&tx, "C").await.unwrap(), 0);
|
||||||
assert_eq!(t.resolve_term_id(&mut tx, "E").await.unwrap(), 3);
|
assert_eq!(t.resolve_term_id(&tx, "E").await.unwrap(), 3);
|
||||||
finish(tx, t).await;
|
finish(tx, t).await;
|
||||||
|
|
||||||
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
||||||
assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 4);
|
assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,38 +326,38 @@ mod tests {
|
||||||
let ds = Datastore::new("memory").await.unwrap();
|
let ds = Datastore::new("memory").await.unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
||||||
|
|
||||||
// Check removing an non-existing term id returns None
|
// Check removing an non-existing term id returns None
|
||||||
assert!(t.remove_term_id(&mut tx, 0).await.is_ok());
|
assert!(t.remove_term_id(&tx, 0).await.is_ok());
|
||||||
|
|
||||||
// Create few terms
|
// Create few terms
|
||||||
t.resolve_term_id(&mut tx, "A").await.unwrap();
|
t.resolve_term_id(&tx, "A").await.unwrap();
|
||||||
t.resolve_term_id(&mut tx, "C").await.unwrap();
|
t.resolve_term_id(&tx, "C").await.unwrap();
|
||||||
t.resolve_term_id(&mut tx, "E").await.unwrap();
|
t.resolve_term_id(&tx, "E").await.unwrap();
|
||||||
finish(tx, t).await;
|
finish(tx, t).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
for term in ["A", "C", "E"] {
|
for term in ["A", "C", "E"] {
|
||||||
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
||||||
let term_id = t.get_term_id(&mut tx, term).await.unwrap();
|
let term_id = t.get_term_id(&tx, term).await.unwrap();
|
||||||
|
|
||||||
if let Some(term_id) = term_id {
|
if let Some(term_id) = term_id {
|
||||||
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
||||||
t.remove_term_id(&mut tx, term_id).await.unwrap();
|
t.remove_term_id(&tx, term_id).await.unwrap();
|
||||||
finish(tx, t).await;
|
finish(tx, t).await;
|
||||||
|
|
||||||
let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await;
|
||||||
assert_eq!(t.get_term_id(&mut tx, term).await.unwrap(), None);
|
assert_eq!(t.get_term_id(&tx, term).await.unwrap(), None);
|
||||||
} else {
|
} else {
|
||||||
panic!("Term ID not found: {}", term);
|
panic!("Term ID not found: {}", term);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check id recycling
|
// Check id recycling
|
||||||
let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await;
|
||||||
assert_eq!(t.resolve_term_id(&mut tx, "B").await.unwrap(), 0);
|
assert_eq!(t.resolve_term_id(&tx, "B").await.unwrap(), 0);
|
||||||
assert_eq!(t.resolve_term_id(&mut tx, "D").await.unwrap(), 1);
|
assert_eq!(t.resolve_term_id(&tx, "D").await.unwrap(), 1);
|
||||||
finish(tx, t).await;
|
finish(tx, t).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,10 +375,10 @@ mod tests {
|
||||||
async fn test_resolve_100_docs_with_50_words_one_by_one() {
|
async fn test_resolve_100_docs_with_50_words_one_by_one() {
|
||||||
let ds = Datastore::new("memory").await.unwrap();
|
let ds = Datastore::new("memory").await.unwrap();
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
let (mut tx, mut t) = new_operation(&ds, 100, Write).await;
|
let (tx, mut t) = new_operation(&ds, 100, Write).await;
|
||||||
let terms_string = random_term_freq_vec(50);
|
let terms_string = random_term_freq_vec(50);
|
||||||
for (term, _) in terms_string {
|
for (term, _) in terms_string {
|
||||||
t.resolve_term_id(&mut tx, &term).await.unwrap();
|
t.resolve_term_id(&tx, &term).await.unwrap();
|
||||||
}
|
}
|
||||||
finish(tx, t).await;
|
finish(tx, t).await;
|
||||||
}
|
}
|
||||||
|
@ -388,11 +388,11 @@ mod tests {
|
||||||
async fn test_resolve_100_docs_with_50_words_batch_of_10() {
|
async fn test_resolve_100_docs_with_50_words_batch_of_10() {
|
||||||
let ds = Datastore::new("memory").await.unwrap();
|
let ds = Datastore::new("memory").await.unwrap();
|
||||||
for _ in 0..10 {
|
for _ in 0..10 {
|
||||||
let (mut tx, mut t) = new_operation(&ds, 100, Write).await;
|
let (tx, mut t) = new_operation(&ds, 100, Write).await;
|
||||||
for _ in 0..10 {
|
for _ in 0..10 {
|
||||||
let terms_string = random_term_freq_vec(50);
|
let terms_string = random_term_freq_vec(50);
|
||||||
for (term, _) in terms_string {
|
for (term, _) in terms_string {
|
||||||
t.resolve_term_id(&mut tx, &term).await.unwrap();
|
t.resolve_term_id(&tx, &term).await.unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
finish(tx, t).await;
|
finish(tx, t).await;
|
||||||
|
|
|
@ -144,13 +144,12 @@ impl<'a> MTreeChecker<'a> {
|
||||||
return Ok(VecDeque::from([]));
|
return Ok(VecDeque::from([]));
|
||||||
}
|
}
|
||||||
let mut result = VecDeque::with_capacity(res.len());
|
let mut result = VecDeque::with_capacity(res.len());
|
||||||
let mut tx = self.ctx.tx_lock().await;
|
let txn = self.ctx.tx();
|
||||||
for (doc_id, dist) in res {
|
for (doc_id, dist) in res {
|
||||||
if let Some(key) = doc_ids.get_doc_key(&mut tx, doc_id).await? {
|
if let Some(key) = doc_ids.get_doc_key(&txn, doc_id).await? {
|
||||||
result.push_back((key.into(), dist, None));
|
result.push_back((key.into(), dist, None));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drop(tx);
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -186,9 +185,8 @@ impl CheckerCacheEntry {
|
||||||
cond: &Cond,
|
cond: &Cond,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
if let Some(rid) = rid {
|
if let Some(rid) = rid {
|
||||||
let mut tx = ctx.tx_lock().await;
|
let txn = ctx.tx();
|
||||||
let val = Iterable::fetch_thing(&mut tx, opt, &rid).await?;
|
let val = Iterable::fetch_thing(&txn, opt, &rid).await?;
|
||||||
drop(tx);
|
|
||||||
if !val.is_none_or_null() {
|
if !val.is_none_or_null() {
|
||||||
let (value, truthy) = {
|
let (value, truthy) = {
|
||||||
let cursor_doc = CursorDoc {
|
let cursor_doc = CursorDoc {
|
||||||
|
@ -229,9 +227,8 @@ impl<'a> MTreeCondChecker<'a> {
|
||||||
match self.cache.entry(doc_id) {
|
match self.cache.entry(doc_id) {
|
||||||
Entry::Occupied(e) => Ok(e.get().truthy),
|
Entry::Occupied(e) => Ok(e.get().truthy),
|
||||||
Entry::Vacant(e) => {
|
Entry::Vacant(e) => {
|
||||||
let mut tx = self.ctx.tx_lock().await;
|
let txn = self.ctx.tx();
|
||||||
let rid = doc_ids.get_doc_key(&mut tx, doc_id).await?.map(|k| k.into());
|
let rid = doc_ids.get_doc_key(&txn, doc_id).await?.map(|k| k.into());
|
||||||
drop(tx);
|
|
||||||
let ent =
|
let ent =
|
||||||
CheckerCacheEntry::build(stk, self.ctx, self.opt, rid, self.cond.as_ref())
|
CheckerCacheEntry::build(stk, self.ctx, self.opt, rid, self.cond.as_ref())
|
||||||
.await?;
|
.await?;
|
||||||
|
|
|
@ -175,10 +175,10 @@ impl InnerQueryExecutor {
|
||||||
}
|
}
|
||||||
Entry::Vacant(e) => {
|
Entry::Vacant(e) => {
|
||||||
let ikb = IndexKeyBase::new(opt.ns()?, opt.db()?, idx_def)?;
|
let ikb = IndexKeyBase::new(opt.ns()?, opt.db()?, idx_def)?;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let mt = MTreeIndex::new(
|
let mt = MTreeIndex::new(
|
||||||
ctx.get_index_stores(),
|
ctx.get_index_stores(),
|
||||||
&mut tx,
|
&tx,
|
||||||
ikb,
|
ikb,
|
||||||
p,
|
p,
|
||||||
TransactionType::Read,
|
TransactionType::Read,
|
||||||
|
@ -563,11 +563,10 @@ impl QueryExecutor {
|
||||||
ft: &FtEntry,
|
ft: &FtEntry,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
let doc_key: Key = thg.into();
|
let doc_key: Key = thg.into();
|
||||||
let mut run = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let di = ft.0.doc_ids.read().await;
|
let di = ft.0.doc_ids.read().await;
|
||||||
let doc_id = di.get_doc_id(&mut run, doc_key).await?;
|
let doc_id = di.get_doc_id(&tx, doc_key).await?;
|
||||||
drop(di);
|
drop(di);
|
||||||
drop(run);
|
|
||||||
if let Some(doc_id) = doc_id {
|
if let Some(doc_id) = doc_id {
|
||||||
let term_goals = ft.0.terms_docs.len();
|
let term_goals = ft.0.terms_docs.len();
|
||||||
// If there is no terms, it can't be a match
|
// If there is no terms, it can't be a match
|
||||||
|
@ -640,18 +639,10 @@ impl QueryExecutor {
|
||||||
doc: &Value,
|
doc: &Value,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
if let Some((e, ft)) = self.get_ft_entry_and_index(hlp.match_ref()) {
|
if let Some((e, ft)) = self.get_ft_entry_and_index(hlp.match_ref()) {
|
||||||
let mut run = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let res = ft
|
let res = ft
|
||||||
.highlight(
|
.highlight(&tx, thg, &e.0.query_terms_list, hlp, e.0.index_option.id_ref(), doc)
|
||||||
&mut run,
|
|
||||||
thg,
|
|
||||||
&e.0.query_terms_list,
|
|
||||||
hlp,
|
|
||||||
e.0.index_option.id_ref(),
|
|
||||||
doc,
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
drop(run);
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
Ok(Value::None)
|
Ok(Value::None)
|
||||||
|
@ -665,9 +656,8 @@ impl QueryExecutor {
|
||||||
partial: bool,
|
partial: bool,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
if let Some((e, ft)) = self.get_ft_entry_and_index(&match_ref) {
|
if let Some((e, ft)) = self.get_ft_entry_and_index(&match_ref) {
|
||||||
let mut run = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let res = ft.extract_offsets(&mut run, thg, &e.0.query_terms_list, partial).await;
|
let res = ft.extract_offsets(&tx, thg, &e.0.query_terms_list, partial).await;
|
||||||
drop(run);
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
Ok(Value::None)
|
Ok(Value::None)
|
||||||
|
@ -682,7 +672,7 @@ impl QueryExecutor {
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
if let Some(e) = self.get_ft_entry(match_ref) {
|
if let Some(e) = self.get_ft_entry(match_ref) {
|
||||||
if let Some(scorer) = &e.0.scorer {
|
if let Some(scorer) = &e.0.scorer {
|
||||||
let mut run = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let mut doc_id = if let Some(ir) = ir {
|
let mut doc_id = if let Some(ir) = ir {
|
||||||
ir.doc_id()
|
ir.doc_id()
|
||||||
} else {
|
} else {
|
||||||
|
@ -691,17 +681,15 @@ impl QueryExecutor {
|
||||||
if doc_id.is_none() {
|
if doc_id.is_none() {
|
||||||
let key: Key = rid.into();
|
let key: Key = rid.into();
|
||||||
let di = e.0.doc_ids.read().await;
|
let di = e.0.doc_ids.read().await;
|
||||||
doc_id = di.get_doc_id(&mut run, key).await?;
|
doc_id = di.get_doc_id(&tx, key).await?;
|
||||||
drop(di);
|
drop(di);
|
||||||
}
|
}
|
||||||
if let Some(doc_id) = doc_id {
|
if let Some(doc_id) = doc_id {
|
||||||
let score = scorer.score(&mut run, doc_id).await?;
|
let score = scorer.score(&tx, doc_id).await?;
|
||||||
if let Some(score) = score {
|
if let Some(score) = score {
|
||||||
drop(run);
|
|
||||||
return Ok(Value::from(score));
|
return Ok(Value::from(score));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drop(run);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(Value::None)
|
Ok(Value::None)
|
||||||
|
@ -733,8 +721,8 @@ impl FtEntry {
|
||||||
if let Matches(qs, _) = io.op() {
|
if let Matches(qs, _) = io.op() {
|
||||||
let (terms_list, terms_set) =
|
let (terms_list, terms_set) =
|
||||||
ft.extract_querying_terms(stk, ctx, opt, qs.to_owned()).await?;
|
ft.extract_querying_terms(stk, ctx, opt, qs.to_owned()).await?;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let terms_docs = Arc::new(ft.get_terms_docs(&mut tx, &terms_list).await?);
|
let terms_docs = Arc::new(ft.get_terms_docs(&tx, &terms_list).await?);
|
||||||
drop(tx);
|
drop(tx);
|
||||||
Ok(Some(Self(Arc::new(Inner {
|
Ok(Some(Self(Arc::new(Inner {
|
||||||
index_option: io,
|
index_option: io,
|
||||||
|
|
|
@ -6,8 +6,8 @@ use crate::idx::ft::termdocs::TermsDocs;
|
||||||
use crate::idx::ft::{FtIndex, HitsIterator};
|
use crate::idx::ft::{FtIndex, HitsIterator};
|
||||||
use crate::idx::planner::plan::RangeValue;
|
use crate::idx::planner::plan::RangeValue;
|
||||||
use crate::key::index::Index;
|
use crate::key::index::Index;
|
||||||
use crate::kvs;
|
use crate::kvs::Key;
|
||||||
use crate::kvs::{Key, Limit, ScanPage};
|
use crate::kvs::Transaction;
|
||||||
use crate::sql::statements::DefineIndexStatement;
|
use crate::sql::statements::DefineIndexStatement;
|
||||||
use crate::sql::{Array, Ident, Thing, Value};
|
use crate::sql::{Array, Ident, Thing, Value};
|
||||||
use radix_trie::Trie;
|
use radix_trie::Trie;
|
||||||
|
@ -118,20 +118,20 @@ impl ThingIterator {
|
||||||
pub(crate) async fn next_batch<B: IteratorBatch>(
|
pub(crate) async fn next_batch<B: IteratorBatch>(
|
||||||
&mut self,
|
&mut self,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
tx: &mut kvs::Transaction,
|
txn: &Transaction,
|
||||||
size: u32,
|
size: u32,
|
||||||
) -> Result<B, Error> {
|
) -> Result<B, Error> {
|
||||||
match self {
|
match self {
|
||||||
Self::IndexEqual(i) => i.next_batch(tx, size).await,
|
Self::IndexEqual(i) => i.next_batch(txn, size).await,
|
||||||
Self::UniqueEqual(i) => i.next_batch(tx).await,
|
Self::UniqueEqual(i) => i.next_batch(txn).await,
|
||||||
Self::IndexRange(i) => i.next_batch(tx, size).await,
|
Self::IndexRange(i) => i.next_batch(txn, size).await,
|
||||||
Self::UniqueRange(i) => i.next_batch(tx, size).await,
|
Self::UniqueRange(i) => i.next_batch(txn, size).await,
|
||||||
Self::IndexUnion(i) => i.next_batch(ctx, tx, size).await,
|
Self::IndexUnion(i) => i.next_batch(ctx, txn, size).await,
|
||||||
Self::UniqueUnion(i) => i.next_batch(ctx, tx, size).await,
|
Self::UniqueUnion(i) => i.next_batch(ctx, txn, size).await,
|
||||||
Self::Matches(i) => i.next_batch(ctx, tx, size).await,
|
Self::Matches(i) => i.next_batch(ctx, txn, size).await,
|
||||||
Self::Knn(i) => i.next_batch(ctx, size).await,
|
Self::Knn(i) => i.next_batch(ctx, size).await,
|
||||||
Self::IndexJoin(i) => Box::pin(i.next_batch(ctx, tx, size)).await,
|
Self::IndexJoin(i) => Box::pin(i.next_batch(ctx, txn, size)).await,
|
||||||
Self::UniqueJoin(i) => Box::pin(i.next_batch(ctx, tx, size)).await,
|
Self::UniqueJoin(i) => Box::pin(i.next_batch(ctx, txn, size)).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ impl IndexEqualThingIterator {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn next_scan<B: IteratorBatch>(
|
async fn next_scan<B: IteratorBatch>(
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
irf: IteratorRef,
|
irf: IteratorRef,
|
||||||
beg: &mut Vec<u8>,
|
beg: &mut Vec<u8>,
|
||||||
end: &[u8],
|
end: &[u8],
|
||||||
|
@ -172,16 +172,7 @@ impl IndexEqualThingIterator {
|
||||||
) -> Result<B, Error> {
|
) -> Result<B, Error> {
|
||||||
let min = beg.clone();
|
let min = beg.clone();
|
||||||
let max = end.to_owned();
|
let max = end.to_owned();
|
||||||
let res = tx
|
let res = tx.scan(min..max, limit).await?;
|
||||||
.scan_paged(
|
|
||||||
ScanPage {
|
|
||||||
range: min..max,
|
|
||||||
limit: Limit::Limited(limit),
|
|
||||||
},
|
|
||||||
limit,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let res = res.values;
|
|
||||||
if let Some((key, _)) = res.last() {
|
if let Some((key, _)) = res.last() {
|
||||||
let mut key = key.clone();
|
let mut key = key.clone();
|
||||||
key.push(0x00);
|
key.push(0x00);
|
||||||
|
@ -194,7 +185,7 @@ impl IndexEqualThingIterator {
|
||||||
|
|
||||||
async fn next_batch<B: IteratorBatch>(
|
async fn next_batch<B: IteratorBatch>(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
limit: u32,
|
limit: u32,
|
||||||
) -> Result<B, Error> {
|
) -> Result<B, Error> {
|
||||||
Self::next_scan(tx, self.irf, &mut self.beg, &self.end, limit).await
|
Self::next_scan(tx, self.irf, &mut self.beg, &self.end, limit).await
|
||||||
|
@ -306,21 +297,12 @@ impl IndexRangeThingIterator {
|
||||||
|
|
||||||
async fn next_batch<B: IteratorBatch>(
|
async fn next_batch<B: IteratorBatch>(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
limit: u32,
|
limit: u32,
|
||||||
) -> Result<B, Error> {
|
) -> Result<B, Error> {
|
||||||
let min = self.r.beg.clone();
|
let min = self.r.beg.clone();
|
||||||
let max = self.r.end.clone();
|
let max = self.r.end.clone();
|
||||||
let res = tx
|
let res = tx.scan(min..max, limit).await?;
|
||||||
.scan_paged(
|
|
||||||
ScanPage {
|
|
||||||
range: min..max,
|
|
||||||
limit: Limit::Limited(limit),
|
|
||||||
},
|
|
||||||
limit,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let res = res.values;
|
|
||||||
if let Some((key, _)) = res.last() {
|
if let Some((key, _)) = res.last() {
|
||||||
self.r.beg.clone_from(key);
|
self.r.beg.clone_from(key);
|
||||||
self.r.beg.push(0x00);
|
self.r.beg.push(0x00);
|
||||||
|
@ -369,7 +351,7 @@ impl IndexUnionThingIterator {
|
||||||
async fn next_batch<B: IteratorBatch>(
|
async fn next_batch<B: IteratorBatch>(
|
||||||
&mut self,
|
&mut self,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
limit: u32,
|
limit: u32,
|
||||||
) -> Result<B, Error> {
|
) -> Result<B, Error> {
|
||||||
while let Some(r) = &mut self.current {
|
while let Some(r) = &mut self.current {
|
||||||
|
@ -423,7 +405,7 @@ impl JoinThingIterator {
|
||||||
async fn next_current_remote_batch(
|
async fn next_current_remote_batch(
|
||||||
&mut self,
|
&mut self,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
limit: u32,
|
limit: u32,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
while !ctx.is_done() {
|
while !ctx.is_done() {
|
||||||
|
@ -444,7 +426,7 @@ impl JoinThingIterator {
|
||||||
async fn next_current_local<F>(
|
async fn next_current_local<F>(
|
||||||
&mut self,
|
&mut self,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
limit: u32,
|
limit: u32,
|
||||||
new_iter: F,
|
new_iter: F,
|
||||||
) -> Result<bool, Error>
|
) -> Result<bool, Error>
|
||||||
|
@ -471,7 +453,7 @@ impl JoinThingIterator {
|
||||||
async fn next_batch<F, B: IteratorBatch>(
|
async fn next_batch<F, B: IteratorBatch>(
|
||||||
&mut self,
|
&mut self,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
limit: u32,
|
limit: u32,
|
||||||
new_iter: F,
|
new_iter: F,
|
||||||
) -> Result<B, Error>
|
) -> Result<B, Error>
|
||||||
|
@ -508,7 +490,7 @@ impl IndexJoinThingIterator {
|
||||||
async fn next_batch<B: IteratorBatch>(
|
async fn next_batch<B: IteratorBatch>(
|
||||||
&mut self,
|
&mut self,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
limit: u32,
|
limit: u32,
|
||||||
) -> Result<B, Error> {
|
) -> Result<B, Error> {
|
||||||
let new_iter = |ns: &str, db: &str, ix_what: &Ident, ix_name: &Ident, value: Value| {
|
let new_iter = |ns: &str, db: &str, ix_what: &Ident, ix_name: &Ident, value: Value| {
|
||||||
|
@ -541,10 +523,7 @@ impl UniqueEqualThingIterator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn next_batch<B: IteratorBatch>(
|
async fn next_batch<B: IteratorBatch>(&mut self, tx: &Transaction) -> Result<B, Error> {
|
||||||
&mut self,
|
|
||||||
tx: &mut kvs::Transaction,
|
|
||||||
) -> Result<B, Error> {
|
|
||||||
if let Some(key) = self.key.take() {
|
if let Some(key) = self.key.take() {
|
||||||
if let Some(val) = tx.get(key).await? {
|
if let Some(val) = tx.get(key).await? {
|
||||||
let record = (val.into(), self.irf.into(), None);
|
let record = (val.into(), self.irf.into(), None);
|
||||||
|
@ -612,7 +591,7 @@ impl UniqueRangeThingIterator {
|
||||||
|
|
||||||
async fn next_batch<B: IteratorBatch>(
|
async fn next_batch<B: IteratorBatch>(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
mut limit: u32,
|
mut limit: u32,
|
||||||
) -> Result<B, Error> {
|
) -> Result<B, Error> {
|
||||||
if self.done {
|
if self.done {
|
||||||
|
@ -621,17 +600,9 @@ impl UniqueRangeThingIterator {
|
||||||
let min = self.r.beg.clone();
|
let min = self.r.beg.clone();
|
||||||
let max = self.r.end.clone();
|
let max = self.r.end.clone();
|
||||||
limit += 1;
|
limit += 1;
|
||||||
let res = tx
|
let res = tx.scan(min..max, limit).await?;
|
||||||
.scan_paged(
|
let mut records = B::with_capacity(res.len());
|
||||||
ScanPage {
|
for (k, v) in res {
|
||||||
range: min..max,
|
|
||||||
limit: Limit::Limited(limit),
|
|
||||||
},
|
|
||||||
limit,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let mut records = B::with_capacity(res.values.len());
|
|
||||||
for (k, v) in res.values {
|
|
||||||
limit -= 1;
|
limit -= 1;
|
||||||
if limit == 0 {
|
if limit == 0 {
|
||||||
self.r.beg = k;
|
self.r.beg = k;
|
||||||
|
@ -682,7 +653,7 @@ impl UniqueUnionThingIterator {
|
||||||
async fn next_batch<B: IteratorBatch>(
|
async fn next_batch<B: IteratorBatch>(
|
||||||
&mut self,
|
&mut self,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
limit: u32,
|
limit: u32,
|
||||||
) -> Result<B, Error> {
|
) -> Result<B, Error> {
|
||||||
let limit = limit as usize;
|
let limit = limit as usize;
|
||||||
|
@ -717,7 +688,7 @@ impl UniqueJoinThingIterator {
|
||||||
async fn next_batch<B: IteratorBatch>(
|
async fn next_batch<B: IteratorBatch>(
|
||||||
&mut self,
|
&mut self,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
limit: u32,
|
limit: u32,
|
||||||
) -> Result<B, Error> {
|
) -> Result<B, Error> {
|
||||||
let new_iter = |ns: &str, db: &str, ix_what: &Ident, ix_name: &Ident, value: Value| {
|
let new_iter = |ns: &str, db: &str, ix_what: &Ident, ix_name: &Ident, value: Value| {
|
||||||
|
@ -756,7 +727,7 @@ impl MatchesThingIterator {
|
||||||
async fn next_batch<B: IteratorBatch>(
|
async fn next_batch<B: IteratorBatch>(
|
||||||
&mut self,
|
&mut self,
|
||||||
ctx: &Context<'_>,
|
ctx: &Context<'_>,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
limit: u32,
|
limit: u32,
|
||||||
) -> Result<B, Error> {
|
) -> Result<B, Error> {
|
||||||
if let Some(hits) = &mut self.hits {
|
if let Some(hits) = &mut self.hits {
|
||||||
|
|
|
@ -6,7 +6,7 @@ use crate::idx::planner::executor::{
|
||||||
};
|
};
|
||||||
use crate::idx::planner::plan::{IndexOperator, IndexOption};
|
use crate::idx::planner::plan::{IndexOperator, IndexOption};
|
||||||
use crate::idx::planner::rewriter::KnnConditionRewriter;
|
use crate::idx::planner::rewriter::KnnConditionRewriter;
|
||||||
use crate::kvs;
|
use crate::kvs::Transaction;
|
||||||
use crate::sql::index::Index;
|
use crate::sql::index::Index;
|
||||||
use crate::sql::statements::{DefineFieldStatement, DefineIndexStatement};
|
use crate::sql::statements::{DefineFieldStatement, DefineIndexStatement};
|
||||||
use crate::sql::{
|
use crate::sql::{
|
||||||
|
@ -115,7 +115,7 @@ impl<'a> TreeBuilder<'a> {
|
||||||
|
|
||||||
async fn lazy_load_schema_resolver(
|
async fn lazy_load_schema_resolver(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
table: &Table,
|
table: &Table,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
if self.schemas.contains_key(table) {
|
if self.schemas.contains_key(table) {
|
||||||
|
@ -198,8 +198,8 @@ impl<'a> TreeBuilder<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn resolve_idiom(&mut self, i: &Idiom) -> Result<Node, Error> {
|
async fn resolve_idiom(&mut self, i: &Idiom) -> Result<Node, Error> {
|
||||||
let mut tx = self.ctx.tx_lock().await;
|
let tx = self.ctx.tx();
|
||||||
self.lazy_load_schema_resolver(&mut tx, self.table).await?;
|
self.lazy_load_schema_resolver(&tx, self.table).await?;
|
||||||
|
|
||||||
// Try to detect if it matches an index
|
// Try to detect if it matches an index
|
||||||
if let Some(schema) = self.schemas.get(self.table).cloned() {
|
if let Some(schema) = self.schemas.get(self.table).cloned() {
|
||||||
|
@ -208,12 +208,10 @@ impl<'a> TreeBuilder<'a> {
|
||||||
return Ok(Node::IndexedField(i.clone(), irs));
|
return Ok(Node::IndexedField(i.clone(), irs));
|
||||||
}
|
}
|
||||||
// Try to detect an indexed record field
|
// Try to detect an indexed record field
|
||||||
if let Some(ro) = self.resolve_record_field(&mut tx, schema.fields.as_ref(), i).await? {
|
if let Some(ro) = self.resolve_record_field(&tx, schema.fields.as_ref(), i).await? {
|
||||||
drop(tx);
|
|
||||||
return Ok(Node::RecordField(i.clone(), ro));
|
return Ok(Node::RecordField(i.clone(), ro));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drop(tx);
|
|
||||||
Ok(Node::NonIndexedField(i.clone()))
|
Ok(Node::NonIndexedField(i.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,7 +244,7 @@ impl<'a> TreeBuilder<'a> {
|
||||||
|
|
||||||
async fn resolve_record_field(
|
async fn resolve_record_field(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut kvs::Transaction,
|
tx: &Transaction,
|
||||||
fields: &[DefineFieldStatement],
|
fields: &[DefineFieldStatement],
|
||||||
idiom: &Idiom,
|
idiom: &Idiom,
|
||||||
) -> Result<Option<RecordOptions>, Error> {
|
) -> Result<Option<RecordOptions>, Error> {
|
||||||
|
@ -544,7 +542,7 @@ struct SchemaCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SchemaCache {
|
impl SchemaCache {
|
||||||
async fn new(opt: &Options, table: &Table, tx: &mut kvs::Transaction) -> Result<Self, Error> {
|
async fn new(opt: &Options, table: &Table, tx: &Transaction) -> Result<Self, Error> {
|
||||||
let indexes = tx.all_tb_indexes(opt.ns()?, opt.db()?, table).await?;
|
let indexes = tx.all_tb_indexes(opt.ns()?, opt.db()?, table).await?;
|
||||||
let fields = tx.all_tb_fields(opt.ns()?, opt.db()?, table).await?;
|
let fields = tx.all_tb_fields(opt.ns()?, opt.db()?, table).await?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
|
|
|
@ -287,7 +287,7 @@ where
|
||||||
|
|
||||||
pub async fn search(
|
pub async fn search(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &BTreeStore<BK>,
|
store: &BTreeStore<BK>,
|
||||||
searched_key: &Key,
|
searched_key: &Key,
|
||||||
) -> Result<Option<Payload>, Error> {
|
) -> Result<Option<Payload>, Error> {
|
||||||
|
@ -307,7 +307,7 @@ where
|
||||||
|
|
||||||
pub async fn search_mut(
|
pub async fn search_mut(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut BTreeStore<BK>,
|
store: &mut BTreeStore<BK>,
|
||||||
searched_key: &Key,
|
searched_key: &Key,
|
||||||
) -> Result<Option<Payload>, Error> {
|
) -> Result<Option<Payload>, Error> {
|
||||||
|
@ -329,7 +329,7 @@ where
|
||||||
|
|
||||||
pub async fn insert(
|
pub async fn insert(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut BTreeStore<BK>,
|
store: &mut BTreeStore<BK>,
|
||||||
key: Key,
|
key: Key,
|
||||||
payload: Payload,
|
payload: Payload,
|
||||||
|
@ -366,7 +366,7 @@ where
|
||||||
|
|
||||||
async fn insert_non_full(
|
async fn insert_non_full(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut BTreeStore<BK>,
|
store: &mut BTreeStore<BK>,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
key: Key,
|
key: Key,
|
||||||
|
@ -481,7 +481,7 @@ where
|
||||||
|
|
||||||
pub(in crate::idx) async fn delete(
|
pub(in crate::idx) async fn delete(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut BTreeStore<BK>,
|
store: &mut BTreeStore<BK>,
|
||||||
key_to_delete: Key,
|
key_to_delete: Key,
|
||||||
) -> Result<Option<Payload>, Error> {
|
) -> Result<Option<Payload>, Error> {
|
||||||
|
@ -592,7 +592,7 @@ where
|
||||||
|
|
||||||
async fn deleted_from_internal(
|
async fn deleted_from_internal(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut BTreeStore<BK>,
|
store: &mut BTreeStore<BK>,
|
||||||
keys: &mut BK,
|
keys: &mut BK,
|
||||||
children: &mut Vec<NodeId>,
|
children: &mut Vec<NodeId>,
|
||||||
|
@ -669,7 +669,7 @@ where
|
||||||
|
|
||||||
async fn find_highest(
|
async fn find_highest(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut BTreeStore<BK>,
|
store: &mut BTreeStore<BK>,
|
||||||
node: StoredNode<BTreeNode<BK>>,
|
node: StoredNode<BTreeNode<BK>>,
|
||||||
) -> Result<(Key, Payload), Error> {
|
) -> Result<(Key, Payload), Error> {
|
||||||
|
@ -697,7 +697,7 @@ where
|
||||||
|
|
||||||
async fn find_lowest(
|
async fn find_lowest(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut BTreeStore<BK>,
|
store: &mut BTreeStore<BK>,
|
||||||
node: StoredNode<BTreeNode<BK>>,
|
node: StoredNode<BTreeNode<BK>>,
|
||||||
) -> Result<(Key, Payload), Error> {
|
) -> Result<(Key, Payload), Error> {
|
||||||
|
@ -725,7 +725,7 @@ where
|
||||||
|
|
||||||
async fn deleted_traversal(
|
async fn deleted_traversal(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut BTreeStore<BK>,
|
store: &mut BTreeStore<BK>,
|
||||||
keys: &mut BK,
|
keys: &mut BK,
|
||||||
children: &mut Vec<NodeId>,
|
children: &mut Vec<NodeId>,
|
||||||
|
@ -949,7 +949,7 @@ where
|
||||||
|
|
||||||
pub(in crate::idx) async fn statistics(
|
pub(in crate::idx) async fn statistics(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &BTreeStore<BK>,
|
store: &BTreeStore<BK>,
|
||||||
) -> Result<BStatistics, Error> {
|
) -> Result<BStatistics, Error> {
|
||||||
let mut stats = BStatistics::default();
|
let mut stats = BStatistics::default();
|
||||||
|
@ -998,7 +998,7 @@ mod tests {
|
||||||
};
|
};
|
||||||
use crate::idx::trees::store::{NodeId, TreeNode, TreeNodeProvider};
|
use crate::idx::trees::store::{NodeId, TreeNode, TreeNodeProvider};
|
||||||
use crate::idx::VersionedSerdeState;
|
use crate::idx::VersionedSerdeState;
|
||||||
use crate::kvs::{Datastore, Key, LockType::*, ScanPage, Transaction, TransactionType};
|
use crate::kvs::{Datastore, Key, LockType::*, Transaction, TransactionType};
|
||||||
use rand::prelude::SliceRandom;
|
use rand::prelude::SliceRandom;
|
||||||
use rand::thread_rng;
|
use rand::thread_rng;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
|
@ -1034,7 +1034,7 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn insertions_test<F, BK>(
|
async fn insertions_test<F, BK>(
|
||||||
mut tx: Transaction,
|
tx: Transaction,
|
||||||
mut st: BTreeStore<BK>,
|
mut st: BTreeStore<BK>,
|
||||||
t: &mut BTree<BK>,
|
t: &mut BTree<BK>,
|
||||||
samples_size: usize,
|
samples_size: usize,
|
||||||
|
@ -1046,14 +1046,14 @@ mod tests {
|
||||||
for i in 0..samples_size {
|
for i in 0..samples_size {
|
||||||
let (key, payload) = sample_provider(i);
|
let (key, payload) = sample_provider(i);
|
||||||
// Insert the sample
|
// Insert the sample
|
||||||
t.insert(&mut tx, &mut st, key, payload).await.unwrap();
|
t.insert(&tx, &mut st, key, payload).await.unwrap();
|
||||||
}
|
}
|
||||||
st.finish(&mut tx).await.unwrap();
|
st.finish(&tx).await.unwrap();
|
||||||
tx.commit().await.unwrap();
|
tx.commit().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn check_insertions<F, BK>(
|
async fn check_insertions<F, BK>(
|
||||||
mut tx: Transaction,
|
tx: Transaction,
|
||||||
st: BTreeStore<BK>,
|
st: BTreeStore<BK>,
|
||||||
t: &mut BTree<BK>,
|
t: &mut BTree<BK>,
|
||||||
samples_size: usize,
|
samples_size: usize,
|
||||||
|
@ -1064,7 +1064,7 @@ mod tests {
|
||||||
{
|
{
|
||||||
for i in 0..samples_size {
|
for i in 0..samples_size {
|
||||||
let (key, payload) = sample_provider(i);
|
let (key, payload) = sample_provider(i);
|
||||||
assert_eq!(t.search(&mut tx, &st, &key).await.unwrap(), Some(payload));
|
assert_eq!(t.search(&tx, &st, &key).await.unwrap(), Some(payload));
|
||||||
}
|
}
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
}
|
}
|
||||||
|
@ -1124,9 +1124,9 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
t.statistics(&mut tx, &st).await.unwrap(),
|
t.statistics(&tx, &st).await.unwrap(),
|
||||||
BStatistics {
|
BStatistics {
|
||||||
keys_count: 100,
|
keys_count: 100,
|
||||||
max_depth: 3,
|
max_depth: 3,
|
||||||
|
@ -1154,9 +1154,9 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
t.statistics(&mut tx, &st).await.unwrap(),
|
t.statistics(&tx, &st).await.unwrap(),
|
||||||
BStatistics {
|
BStatistics {
|
||||||
keys_count: 100,
|
keys_count: 100,
|
||||||
max_depth: 3,
|
max_depth: 3,
|
||||||
|
@ -1188,8 +1188,8 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
|
||||||
let s = t.statistics(&mut tx, &st).await.unwrap();
|
let s = t.statistics(&tx, &st).await.unwrap();
|
||||||
assert_eq!(s.keys_count, 100);
|
assert_eq!(s.keys_count, 100);
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
}
|
}
|
||||||
|
@ -1215,8 +1215,8 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
||||||
let s = t.statistics(&mut tx, &st).await.unwrap();
|
let s = t.statistics(&tx, &st).await.unwrap();
|
||||||
assert_eq!(s.keys_count, 100);
|
assert_eq!(s.keys_count, 100);
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
}
|
}
|
||||||
|
@ -1238,9 +1238,9 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
t.statistics(&mut tx, &st).await.unwrap(),
|
t.statistics(&tx, &st).await.unwrap(),
|
||||||
BStatistics {
|
BStatistics {
|
||||||
keys_count: 10000,
|
keys_count: 10000,
|
||||||
max_depth: 3,
|
max_depth: 3,
|
||||||
|
@ -1267,9 +1267,9 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, cache_size).await;
|
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, cache_size).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
t.statistics(&mut tx, &st).await.unwrap(),
|
t.statistics(&tx, &st).await.unwrap(),
|
||||||
BStatistics {
|
BStatistics {
|
||||||
keys_count: 10000,
|
keys_count: 10000,
|
||||||
max_depth: 3,
|
max_depth: 3,
|
||||||
|
@ -1309,8 +1309,8 @@ mod tests {
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await;
|
||||||
let statistics = t.statistics(&mut tx, &st).await.unwrap();
|
let statistics = t.statistics(&tx, &st).await.unwrap();
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
statistics
|
statistics
|
||||||
}
|
}
|
||||||
|
@ -1327,8 +1327,8 @@ mod tests {
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
||||||
let statistics = t.statistics(&mut tx, &st).await.unwrap();
|
let statistics = t.statistics(&tx, &st).await.unwrap();
|
||||||
tx.cancel().await.unwrap();
|
tx.cancel().await.unwrap();
|
||||||
|
|
||||||
statistics
|
statistics
|
||||||
|
@ -1417,28 +1417,25 @@ mod tests {
|
||||||
let mut t = BTree::<TrieKeys>::new(BState::new(3));
|
let mut t = BTree::<TrieKeys>::new(BState::new(3));
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
|
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
|
||||||
for (key, payload) in CLRS_EXAMPLE {
|
for (key, payload) in CLRS_EXAMPLE {
|
||||||
t.insert(&mut tx, &mut st, key.into(), payload).await.unwrap();
|
t.insert(&tx, &mut st, key.into(), payload).await.unwrap();
|
||||||
}
|
}
|
||||||
st.finish(&mut tx).await.unwrap();
|
st.finish(&tx).await.unwrap();
|
||||||
tx.commit().await.unwrap();
|
tx.commit().await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
||||||
|
|
||||||
let s = t.statistics(&mut tx, &st).await.unwrap();
|
let s = t.statistics(&tx, &st).await.unwrap();
|
||||||
assert_eq!(s.keys_count, 23);
|
assert_eq!(s.keys_count, 23);
|
||||||
assert_eq!(s.max_depth, 3);
|
assert_eq!(s.max_depth, 3);
|
||||||
assert_eq!(s.nodes_count, 10);
|
assert_eq!(s.nodes_count, 10);
|
||||||
// There should be one record per node
|
// There should be one record per node
|
||||||
assert_eq!(
|
assert_eq!(10, tx.scan(vec![]..vec![0xf], 100).await.unwrap().len());
|
||||||
10,
|
|
||||||
tx.scan_paged(ScanPage::from(vec![]..vec![0xf]), 100).await.unwrap().values.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
let nodes_count = t
|
let nodes_count = t
|
||||||
.inspect_nodes(&mut tx, &mut st, |count, depth, node_id, node| match count {
|
.inspect_nodes(&tx, &mut st, |count, depth, node_id, node| match count {
|
||||||
0 => {
|
0 => {
|
||||||
assert_eq!(depth, 1);
|
assert_eq!(depth, 1);
|
||||||
assert_eq!(node_id, 7);
|
assert_eq!(node_id, 7);
|
||||||
|
@ -1504,14 +1501,14 @@ mod tests {
|
||||||
async fn check_finish_commit<BK>(
|
async fn check_finish_commit<BK>(
|
||||||
t: &mut BTree<BK>,
|
t: &mut BTree<BK>,
|
||||||
mut st: BTreeStore<BK>,
|
mut st: BTreeStore<BK>,
|
||||||
mut tx: Transaction,
|
tx: Transaction,
|
||||||
mut gen: u64,
|
mut gen: u64,
|
||||||
info: String,
|
info: String,
|
||||||
) -> Result<u64, Error>
|
) -> Result<u64, Error>
|
||||||
where
|
where
|
||||||
BK: BKeys + Clone + Debug,
|
BK: BKeys + Clone + Debug,
|
||||||
{
|
{
|
||||||
if st.finish(&mut tx).await?.is_some() {
|
if st.finish(&tx).await?.is_some() {
|
||||||
t.state.generation += 1;
|
t.state.generation += 1;
|
||||||
}
|
}
|
||||||
gen += 1;
|
gen += 1;
|
||||||
|
@ -1527,9 +1524,9 @@ mod tests {
|
||||||
let mut t = BTree::<TrieKeys>::new(BState::new(3));
|
let mut t = BTree::<TrieKeys>::new(BState::new(3));
|
||||||
let mut check_generation = 0;
|
let mut check_generation = 0;
|
||||||
{
|
{
|
||||||
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
|
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
|
||||||
for (key, payload) in CLRS_EXAMPLE {
|
for (key, payload) in CLRS_EXAMPLE {
|
||||||
t.insert(&mut tx, &mut st, key.into(), payload).await?;
|
t.insert(&tx, &mut st, key.into(), payload).await?;
|
||||||
}
|
}
|
||||||
check_generation = check_finish_commit(
|
check_generation = check_finish_commit(
|
||||||
&mut t,
|
&mut t,
|
||||||
|
@ -1545,10 +1542,10 @@ mod tests {
|
||||||
let mut key_count = CLRS_EXAMPLE.len() as u64;
|
let mut key_count = CLRS_EXAMPLE.len() as u64;
|
||||||
for (key, payload) in [("f", 6), ("m", 13), ("g", 7), ("d", 4), ("b", 2)] {
|
for (key, payload) in [("f", 6), ("m", 13), ("g", 7), ("d", 4), ("b", 2)] {
|
||||||
{
|
{
|
||||||
let (mut tx, mut st) =
|
let (tx, mut st) =
|
||||||
new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
|
new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
|
||||||
debug!("Delete {}", key);
|
debug!("Delete {}", key);
|
||||||
assert_eq!(t.delete(&mut tx, &mut st, key.into()).await?, Some(payload));
|
assert_eq!(t.delete(&tx, &mut st, key.into()).await?, Some(payload));
|
||||||
check_generation = check_finish_commit(
|
check_generation = check_finish_commit(
|
||||||
&mut t,
|
&mut t,
|
||||||
st,
|
st,
|
||||||
|
@ -1560,27 +1557,24 @@ mod tests {
|
||||||
}
|
}
|
||||||
key_count -= 1;
|
key_count -= 1;
|
||||||
{
|
{
|
||||||
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
||||||
let s = t.statistics(&mut tx, &st).await?;
|
let s = t.statistics(&tx, &st).await?;
|
||||||
assert_eq!(s.keys_count, key_count);
|
assert_eq!(s.keys_count, key_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
||||||
|
|
||||||
let s = t.statistics(&mut tx, &st).await.unwrap();
|
let s = t.statistics(&tx, &st).await.unwrap();
|
||||||
assert_eq!(s.keys_count, 18);
|
assert_eq!(s.keys_count, 18);
|
||||||
assert_eq!(s.max_depth, 2);
|
assert_eq!(s.max_depth, 2);
|
||||||
assert_eq!(s.nodes_count, 7);
|
assert_eq!(s.nodes_count, 7);
|
||||||
// There should be one record per node
|
// There should be one record per node
|
||||||
assert_eq!(
|
assert_eq!(7, tx.scan(vec![]..vec![0xf], 100).await.unwrap().len());
|
||||||
7,
|
|
||||||
tx.scan_paged(ScanPage::from(vec![]..vec![0xf]), 100).await.unwrap().values.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
let nodes_count = t
|
let nodes_count = t
|
||||||
.inspect_nodes(&mut tx, &mut st, |count, depth, node_id, node| match count {
|
.inspect_nodes(&tx, &mut st, |count, depth, node_id, node| match count {
|
||||||
0 => {
|
0 => {
|
||||||
assert_eq!(depth, 1);
|
assert_eq!(depth, 1);
|
||||||
assert_eq!(node_id, 1);
|
assert_eq!(node_id, 1);
|
||||||
|
@ -1639,11 +1633,11 @@ mod tests {
|
||||||
|
|
||||||
let mut check_generation = 0;
|
let mut check_generation = 0;
|
||||||
{
|
{
|
||||||
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
|
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
|
||||||
for (key, payload) in CLRS_EXAMPLE {
|
for (key, payload) in CLRS_EXAMPLE {
|
||||||
expected_keys.insert(key.to_string(), payload);
|
expected_keys.insert(key.to_string(), payload);
|
||||||
t.insert(&mut tx, &mut st, key.into(), payload).await?;
|
t.insert(&tx, &mut st, key.into(), payload).await?;
|
||||||
let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?;
|
let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?;
|
||||||
assert_eq!(expected_keys, tree_keys);
|
assert_eq!(expected_keys, tree_keys);
|
||||||
}
|
}
|
||||||
check_generation = check_finish_commit(
|
check_generation = check_finish_commit(
|
||||||
|
@ -1657,8 +1651,8 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
||||||
print_tree(&mut tx, &mut st, &t).await;
|
print_tree(&tx, &mut st, &t).await;
|
||||||
tx.cancel().await?;
|
tx.cancel().await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1666,11 +1660,10 @@ mod tests {
|
||||||
debug!("------------------------");
|
debug!("------------------------");
|
||||||
debug!("Delete {}", key);
|
debug!("Delete {}", key);
|
||||||
{
|
{
|
||||||
let (mut tx, mut st) =
|
let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
|
||||||
new_operation_trie(&ds, &t, TransactionType::Write, 20).await;
|
assert!(t.delete(&tx, &mut st, key.into()).await?.is_some());
|
||||||
assert!(t.delete(&mut tx, &mut st, key.into()).await?.is_some());
|
|
||||||
expected_keys.remove(key);
|
expected_keys.remove(key);
|
||||||
let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?;
|
let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?;
|
||||||
assert_eq!(expected_keys, tree_keys);
|
assert_eq!(expected_keys, tree_keys);
|
||||||
check_generation = check_finish_commit(
|
check_generation = check_finish_commit(
|
||||||
&mut t,
|
&mut t,
|
||||||
|
@ -1684,10 +1677,10 @@ mod tests {
|
||||||
|
|
||||||
// Check that every expected keys are still found in the tree
|
// Check that every expected keys are still found in the tree
|
||||||
{
|
{
|
||||||
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
||||||
for (key, payload) in &expected_keys {
|
for (key, payload) in &expected_keys {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
t.search(&mut tx, &st, &key.as_str().into()).await?,
|
t.search(&tx, &st, &key.as_str().into()).await?,
|
||||||
Some(*payload),
|
Some(*payload),
|
||||||
"Can't find: {key}",
|
"Can't find: {key}",
|
||||||
)
|
)
|
||||||
|
@ -1696,13 +1689,13 @@ mod tests {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await;
|
||||||
let s = t.statistics(&mut tx, &st).await?;
|
let s = t.statistics(&tx, &st).await?;
|
||||||
assert_eq!(s.keys_count, 0);
|
assert_eq!(s.keys_count, 0);
|
||||||
assert_eq!(s.max_depth, 0);
|
assert_eq!(s.max_depth, 0);
|
||||||
assert_eq!(s.nodes_count, 0);
|
assert_eq!(s.nodes_count, 0);
|
||||||
// There should not be any record in the database
|
// There should not be any record in the database
|
||||||
assert_eq!(0, tx.scan_paged(ScanPage::from(vec![]..vec![0xf]), 100).await?.values.len());
|
assert_eq!(0, tx.scan(vec![]..vec![0xf], 100).await.unwrap().len());
|
||||||
tx.cancel().await?;
|
tx.cancel().await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1829,37 +1822,37 @@ mod tests {
|
||||||
];
|
];
|
||||||
let mut keys = BTreeMap::new();
|
let mut keys = BTreeMap::new();
|
||||||
{
|
{
|
||||||
let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
|
let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
|
||||||
for term in terms {
|
for term in terms {
|
||||||
t.insert(&mut tx, &mut st, term.into(), 0).await?;
|
t.insert(&tx, &mut st, term.into(), 0).await?;
|
||||||
keys.insert(term.to_string(), 0);
|
keys.insert(term.to_string(), 0);
|
||||||
let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?;
|
let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?;
|
||||||
assert_eq!(keys, tree_keys);
|
assert_eq!(keys, tree_keys);
|
||||||
}
|
}
|
||||||
st.finish(&mut tx).await?;
|
st.finish(&tx).await?;
|
||||||
tx.commit().await?;
|
tx.commit().await?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Read, 100).await;
|
let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Read, 100).await;
|
||||||
print_tree(&mut tx, &mut st, &t).await;
|
print_tree(&tx, &mut st, &t).await;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
|
let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
|
||||||
for term in terms {
|
for term in terms {
|
||||||
debug!("Delete {term}");
|
debug!("Delete {term}");
|
||||||
t.delete(&mut tx, &mut st, term.into()).await?;
|
t.delete(&tx, &mut st, term.into()).await?;
|
||||||
print_tree_mut(&mut tx, &mut st, &t).await;
|
print_tree_mut(&tx, &mut st, &t).await;
|
||||||
keys.remove(term);
|
keys.remove(term);
|
||||||
let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?;
|
let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?;
|
||||||
assert_eq!(keys, tree_keys);
|
assert_eq!(keys, tree_keys);
|
||||||
}
|
}
|
||||||
st.finish(&mut tx).await?;
|
st.finish(&tx).await?;
|
||||||
tx.commit().await?;
|
tx.commit().await?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
|
let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await;
|
||||||
assert_eq!(check_btree_properties(&t, &mut tx, &mut st).await?.0, 0);
|
assert_eq!(check_btree_properties(&t, &tx, &mut st).await?.0, 0);
|
||||||
st.finish(&mut tx).await?;
|
st.finish(&tx).await?;
|
||||||
tx.cancel().await?;
|
tx.cancel().await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -1867,7 +1860,7 @@ mod tests {
|
||||||
|
|
||||||
async fn check_btree_properties<BK>(
|
async fn check_btree_properties<BK>(
|
||||||
t: &BTree<BK>,
|
t: &BTree<BK>,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
st: &mut BTreeStore<BK>,
|
st: &mut BTreeStore<BK>,
|
||||||
) -> Result<(usize, BTreeMap<String, Payload>), Error>
|
) -> Result<(usize, BTreeMap<String, Payload>), Error>
|
||||||
where
|
where
|
||||||
|
@ -1919,7 +1912,7 @@ mod tests {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn print_tree<BK>(tx: &mut Transaction, st: &mut BTreeStore<BK>, t: &BTree<BK>)
|
async fn print_tree<BK>(tx: &Transaction, st: &mut BTreeStore<BK>, t: &BTree<BK>)
|
||||||
where
|
where
|
||||||
BK: BKeys + Debug + Clone,
|
BK: BKeys + Debug + Clone,
|
||||||
{
|
{
|
||||||
|
@ -1932,7 +1925,7 @@ mod tests {
|
||||||
debug!("----------------------------------");
|
debug!("----------------------------------");
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn print_tree_mut<BK>(tx: &mut Transaction, st: &mut BTreeStore<BK>, t: &BTree<BK>)
|
async fn print_tree_mut<BK>(tx: &Transaction, st: &mut BTreeStore<BK>, t: &BTree<BK>)
|
||||||
where
|
where
|
||||||
BK: BKeys + Debug + Clone,
|
BK: BKeys + Debug + Clone,
|
||||||
{
|
{
|
||||||
|
@ -1967,7 +1960,7 @@ mod tests {
|
||||||
/// This is for debugging
|
/// This is for debugging
|
||||||
async fn inspect_nodes<F>(
|
async fn inspect_nodes<F>(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
st: &mut BTreeStore<BK>,
|
st: &mut BTreeStore<BK>,
|
||||||
inspect_func: F,
|
inspect_func: F,
|
||||||
) -> Result<usize, Error>
|
) -> Result<usize, Error>
|
||||||
|
@ -1996,7 +1989,7 @@ mod tests {
|
||||||
/// This is for debugging
|
/// This is for debugging
|
||||||
async fn inspect_nodes_mut<F>(
|
async fn inspect_nodes_mut<F>(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
st: &mut BTreeStore<BK>,
|
st: &mut BTreeStore<BK>,
|
||||||
mut inspect_func: F,
|
mut inspect_func: F,
|
||||||
) -> Result<usize, Error>
|
) -> Result<usize, Error>
|
||||||
|
|
|
@ -48,16 +48,16 @@ struct MTreeSearchContext<'a> {
|
||||||
impl MTreeIndex {
|
impl MTreeIndex {
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
ixs: &IndexStores,
|
ixs: &IndexStores,
|
||||||
tx: &mut Transaction,
|
txn: &Transaction,
|
||||||
ikb: IndexKeyBase,
|
ikb: IndexKeyBase,
|
||||||
p: &MTreeParams,
|
p: &MTreeParams,
|
||||||
tt: TransactionType,
|
tt: TransactionType,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let doc_ids = Arc::new(RwLock::new(
|
let doc_ids = Arc::new(RwLock::new(
|
||||||
DocIds::new(ixs, tx, tt, ikb.clone(), p.doc_ids_order, p.doc_ids_cache).await?,
|
DocIds::new(ixs, txn, tt, ikb.clone(), p.doc_ids_order, p.doc_ids_cache).await?,
|
||||||
));
|
));
|
||||||
let state_key = ikb.new_vm_key(None);
|
let state_key = ikb.new_vm_key(None);
|
||||||
let state: MState = if let Some(val) = tx.get(state_key.clone()).await? {
|
let state: MState = if let Some(val) = txn.get(state_key.clone()).await? {
|
||||||
MState::try_from_val(val)?
|
MState::try_from_val(val)?
|
||||||
} else {
|
} else {
|
||||||
MState::new(p.capacity)
|
MState::new(p.capacity)
|
||||||
|
@ -81,16 +81,17 @@ impl MTreeIndex {
|
||||||
store,
|
store,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn index_document(
|
pub async fn index_document(
|
||||||
&mut self,
|
&mut self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
tx: &mut Transaction,
|
txn: &Transaction,
|
||||||
rid: &Thing,
|
rid: &Thing,
|
||||||
content: &Vec<Value>,
|
content: &Vec<Value>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// Resolve the doc_id
|
// Resolve the doc_id
|
||||||
let mut doc_ids = self.doc_ids.write().await;
|
let mut doc_ids = self.doc_ids.write().await;
|
||||||
let resolved = doc_ids.resolve_doc_id(tx, rid.into()).await?;
|
let resolved = doc_ids.resolve_doc_id(txn, rid.into()).await?;
|
||||||
let doc_id = *resolved.doc_id();
|
let doc_id = *resolved.doc_id();
|
||||||
drop(doc_ids);
|
drop(doc_ids);
|
||||||
// Index the values
|
// Index the values
|
||||||
|
@ -100,12 +101,37 @@ impl MTreeIndex {
|
||||||
let vector = Vector::try_from_value(self.vector_type, self.dim, v)?;
|
let vector = Vector::try_from_value(self.vector_type, self.dim, v)?;
|
||||||
vector.check_dimension(self.dim)?;
|
vector.check_dimension(self.dim)?;
|
||||||
// Insert the vector in the index
|
// Insert the vector in the index
|
||||||
mtree.insert(stk, tx, &mut self.store, vector.into(), doc_id).await?;
|
mtree.insert(stk, txn, &mut self.store, vector.into(), doc_id).await?;
|
||||||
}
|
}
|
||||||
drop(mtree);
|
drop(mtree);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn remove_document(
|
||||||
|
&mut self,
|
||||||
|
stk: &mut Stk,
|
||||||
|
txn: &Transaction,
|
||||||
|
rid: &Thing,
|
||||||
|
content: &Vec<Value>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut doc_ids = self.doc_ids.write().await;
|
||||||
|
let doc_id = doc_ids.remove_doc(txn, rid.into()).await?;
|
||||||
|
drop(doc_ids);
|
||||||
|
if let Some(doc_id) = doc_id {
|
||||||
|
// Lock the index
|
||||||
|
let mut mtree = self.mtree.write().await;
|
||||||
|
for v in content {
|
||||||
|
// Extract the vector
|
||||||
|
let vector = Vector::try_from_value(self.vector_type, self.dim, v)?;
|
||||||
|
vector.check_dimension(self.dim)?;
|
||||||
|
// Remove the vector
|
||||||
|
mtree.delete(stk, txn, &mut self.store, vector.into(), doc_id).await?;
|
||||||
|
}
|
||||||
|
drop(mtree);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn knn_search(
|
pub async fn knn_search(
|
||||||
&self,
|
&self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
|
@ -136,38 +162,13 @@ impl MTreeIndex {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn remove_document(
|
pub(crate) async fn statistics(&self, tx: &Transaction) -> Result<MtStatistics, Error> {
|
||||||
&mut self,
|
|
||||||
stk: &mut Stk,
|
|
||||||
tx: &mut Transaction,
|
|
||||||
rid: &Thing,
|
|
||||||
content: &Vec<Value>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut doc_ids = self.doc_ids.write().await;
|
|
||||||
let doc_id = doc_ids.remove_doc(tx, rid.into()).await?;
|
|
||||||
drop(doc_ids);
|
|
||||||
if let Some(doc_id) = doc_id {
|
|
||||||
// Lock the index
|
|
||||||
let mut mtree = self.mtree.write().await;
|
|
||||||
for v in content {
|
|
||||||
// Extract the vector
|
|
||||||
let vector = Vector::try_from_value(self.vector_type, self.dim, v)?;
|
|
||||||
vector.check_dimension(self.dim)?;
|
|
||||||
// Remove the vector
|
|
||||||
mtree.delete(stk, tx, &mut self.store, vector.into(), doc_id).await?;
|
|
||||||
}
|
|
||||||
drop(mtree);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn statistics(&self, tx: &mut Transaction) -> Result<MtStatistics, Error> {
|
|
||||||
Ok(MtStatistics {
|
Ok(MtStatistics {
|
||||||
doc_ids: self.doc_ids.read().await.statistics(tx).await?,
|
doc_ids: self.doc_ids.read().await.statistics(tx).await?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> {
|
pub async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> {
|
||||||
let mut doc_ids = self.doc_ids.write().await;
|
let mut doc_ids = self.doc_ids.write().await;
|
||||||
doc_ids.finish(tx).await?;
|
doc_ids.finish(tx).await?;
|
||||||
drop(doc_ids);
|
drop(doc_ids);
|
||||||
|
@ -296,7 +297,7 @@ impl MTree {
|
||||||
async fn insert(
|
async fn insert(
|
||||||
&mut self,
|
&mut self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut MTreeStore,
|
store: &mut MTreeStore,
|
||||||
obj: SharedVector,
|
obj: SharedVector,
|
||||||
id: DocId,
|
id: DocId,
|
||||||
|
@ -368,7 +369,7 @@ impl MTree {
|
||||||
|
|
||||||
async fn append(
|
async fn append(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut MTreeStore,
|
store: &mut MTreeStore,
|
||||||
object: &SharedVector,
|
object: &SharedVector,
|
||||||
id: DocId,
|
id: DocId,
|
||||||
|
@ -406,7 +407,7 @@ impl MTree {
|
||||||
async fn insert_at_node(
|
async fn insert_at_node(
|
||||||
&mut self,
|
&mut self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut MTreeStore,
|
store: &mut MTreeStore,
|
||||||
node: MStoredNode,
|
node: MStoredNode,
|
||||||
parent_center: &Option<SharedVector>,
|
parent_center: &Option<SharedVector>,
|
||||||
|
@ -442,7 +443,7 @@ impl MTree {
|
||||||
async fn insert_node_internal(
|
async fn insert_node_internal(
|
||||||
&mut self,
|
&mut self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut MTreeStore,
|
store: &mut MTreeStore,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
node_key: Key,
|
node_key: Key,
|
||||||
|
@ -749,7 +750,7 @@ impl MTree {
|
||||||
async fn delete(
|
async fn delete(
|
||||||
&mut self,
|
&mut self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut MTreeStore,
|
store: &mut MTreeStore,
|
||||||
object: SharedVector,
|
object: SharedVector,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
|
@ -795,7 +796,7 @@ impl MTree {
|
||||||
async fn delete_at_node(
|
async fn delete_at_node(
|
||||||
&mut self,
|
&mut self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut MTreeStore,
|
store: &mut MTreeStore,
|
||||||
node: MStoredNode,
|
node: MStoredNode,
|
||||||
parent_center: &Option<SharedVector>,
|
parent_center: &Option<SharedVector>,
|
||||||
|
@ -844,7 +845,7 @@ impl MTree {
|
||||||
async fn delete_node_internal(
|
async fn delete_node_internal(
|
||||||
&mut self,
|
&mut self,
|
||||||
stk: &mut Stk,
|
stk: &mut Stk,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut MTreeStore,
|
store: &mut MTreeStore,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
node_key: Key,
|
node_key: Key,
|
||||||
|
@ -975,7 +976,7 @@ impl MTree {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn deletion_underflown(
|
async fn deletion_underflown(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
store: &mut MTreeStore,
|
store: &mut MTreeStore,
|
||||||
parent_center: &Option<SharedVector>,
|
parent_center: &Option<SharedVector>,
|
||||||
n_node: &mut InternalNode,
|
n_node: &mut InternalNode,
|
||||||
|
@ -1471,16 +1472,9 @@ impl VersionedSerdeState for MState {}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use futures::lock::Mutex;
|
|
||||||
use hashbrown::{HashMap, HashSet};
|
|
||||||
use reblessive::tree::Stk;
|
|
||||||
use std::collections::VecDeque;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::ctx::Context;
|
use crate::ctx::Context;
|
||||||
use crate::err::Error;
|
use crate::err::Error;
|
||||||
use test_log::test;
|
|
||||||
|
|
||||||
use crate::idx::docids::{DocId, DocIds};
|
use crate::idx::docids::{DocId, DocIds};
|
||||||
use crate::idx::planner::checker::MTreeConditionChecker;
|
use crate::idx::planner::checker::MTreeConditionChecker;
|
||||||
use crate::idx::trees::knn::tests::TestCollection;
|
use crate::idx::trees::knn::tests::TestCollection;
|
||||||
|
@ -1492,6 +1486,10 @@ mod tests {
|
||||||
use crate::kvs::Transaction;
|
use crate::kvs::Transaction;
|
||||||
use crate::kvs::{Datastore, TransactionType};
|
use crate::kvs::{Datastore, TransactionType};
|
||||||
use crate::sql::index::{Distance, VectorType};
|
use crate::sql::index::{Distance, VectorType};
|
||||||
|
use hashbrown::{HashMap, HashSet};
|
||||||
|
use reblessive::tree::Stk;
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
use test_log::test;
|
||||||
|
|
||||||
async fn new_operation<'a>(
|
async fn new_operation<'a>(
|
||||||
ds: &Datastore,
|
ds: &Datastore,
|
||||||
|
@ -1503,15 +1501,15 @@ mod tests {
|
||||||
.index_store()
|
.index_store()
|
||||||
.get_store_mtree(TreeNodeProvider::Debug, t.state.generation, tt, cache_size)
|
.get_store_mtree(TreeNodeProvider::Debug, t.state.generation, tt, cache_size)
|
||||||
.await;
|
.await;
|
||||||
let tx = Arc::new(Mutex::new(ds.transaction(tt, Optimistic).await.unwrap()));
|
let tx = ds.transaction(tt, Optimistic).await.unwrap().enclose();
|
||||||
let ctx = Context::default().set_transaction(tx);
|
let ctx = Context::default().with_transaction(tx);
|
||||||
(ctx, st)
|
(ctx, st)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn finish_operation(
|
async fn finish_operation(
|
||||||
ds: &Datastore,
|
ds: &Datastore,
|
||||||
t: &mut MTree,
|
t: &mut MTree,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
mut st: TreeStore<MTreeNode>,
|
mut st: TreeStore<MTreeNode>,
|
||||||
commit: bool,
|
commit: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
@ -1540,18 +1538,16 @@ mod tests {
|
||||||
for (doc_id, obj) in collection.to_vec_ref() {
|
for (doc_id, obj) in collection.to_vec_ref() {
|
||||||
{
|
{
|
||||||
let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await;
|
let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
t.insert(stk, &mut tx, &mut st, obj.clone(), *doc_id).await?;
|
t.insert(stk, &tx, &mut st, obj.clone(), *doc_id).await?;
|
||||||
finish_operation(ds, t, &mut tx, st, true).await?;
|
finish_operation(ds, t, &tx, st, true).await?;
|
||||||
drop(tx);
|
|
||||||
map.insert(*doc_id, obj.clone());
|
map.insert(*doc_id, obj.clone());
|
||||||
}
|
}
|
||||||
c += 1;
|
c += 1;
|
||||||
{
|
{
|
||||||
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
|
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let p = check_tree_properties(&mut tx, &mut st, t).await?;
|
let p = check_tree_properties(&tx, &mut st, t).await?;
|
||||||
drop(tx);
|
|
||||||
assert_eq!(p.doc_count, c);
|
assert_eq!(p.doc_count, c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1568,19 +1564,17 @@ mod tests {
|
||||||
let mut map = HashMap::with_capacity(collection.len());
|
let mut map = HashMap::with_capacity(collection.len());
|
||||||
{
|
{
|
||||||
let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await;
|
let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
for (doc_id, obj) in collection.to_vec_ref() {
|
for (doc_id, obj) in collection.to_vec_ref() {
|
||||||
t.insert(stk, &mut tx, &mut st, obj.clone(), *doc_id).await?;
|
t.insert(stk, &tx, &mut st, obj.clone(), *doc_id).await?;
|
||||||
map.insert(*doc_id, obj.clone());
|
map.insert(*doc_id, obj.clone());
|
||||||
}
|
}
|
||||||
finish_operation(ds, t, &mut tx, st, true).await?;
|
finish_operation(ds, t, &tx, st, true).await?;
|
||||||
drop(tx);
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
|
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
check_tree_properties(&mut tx, &mut st, t).await?;
|
check_tree_properties(&tx, &mut st, t).await?;
|
||||||
drop(tx);
|
|
||||||
}
|
}
|
||||||
Ok(map)
|
Ok(map)
|
||||||
}
|
}
|
||||||
|
@ -1598,9 +1592,9 @@ mod tests {
|
||||||
let deleted = {
|
let deleted = {
|
||||||
debug!("### Remove {} {:?}", doc_id, obj);
|
debug!("### Remove {} {:?}", doc_id, obj);
|
||||||
let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await;
|
let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let deleted = t.delete(stk, &mut tx, &mut st, obj.clone(), *doc_id).await?;
|
let deleted = t.delete(stk, &tx, &mut st, obj.clone(), *doc_id).await?;
|
||||||
finish_operation(ds, t, &mut tx, st, true).await?;
|
finish_operation(ds, t, &tx, st, true).await?;
|
||||||
drop(tx);
|
drop(tx);
|
||||||
deleted
|
deleted
|
||||||
};
|
};
|
||||||
|
@ -1627,16 +1621,16 @@ mod tests {
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
|
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
check_tree_properties(&mut tx, &mut st, t).await?;
|
check_tree_properties(&tx, &mut st, t).await?;
|
||||||
drop(tx);
|
drop(tx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if all_deleted {
|
if all_deleted {
|
||||||
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
|
let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
check_tree_properties(&mut tx, &mut st, t).await?.check(0, 0, None, None, 0, 0);
|
check_tree_properties(&tx, &mut st, t).await?.check(0, 0, None, None, 0, 0);
|
||||||
drop(tx);
|
drop(tx);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -1677,9 +1671,8 @@ mod tests {
|
||||||
if expected_len != res.docs.len() {
|
if expected_len != res.docs.len() {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
debug!("{:?}", res.visited_nodes);
|
debug!("{:?}", res.visited_nodes);
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
check_tree_properties(&mut tx, &mut st, t).await?;
|
check_tree_properties(&tx, &mut st, t).await?;
|
||||||
drop(tx);
|
|
||||||
}
|
}
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
expected_len,
|
expected_len,
|
||||||
|
@ -1761,10 +1754,10 @@ mod tests {
|
||||||
let mut t = MTree::new(MState::new(*capacity), distance.clone());
|
let mut t = MTree::new(MState::new(*capacity), distance.clone());
|
||||||
|
|
||||||
let (ctx, _st) = new_operation(&ds, &t, TransactionType::Read, cache_size).await;
|
let (ctx, _st) = new_operation(&ds, &t, TransactionType::Read, cache_size).await;
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let doc_ids = DocIds::new(
|
let doc_ids = DocIds::new(
|
||||||
ds.index_store(),
|
ds.index_store(),
|
||||||
&mut tx,
|
&tx,
|
||||||
TransactionType::Read,
|
TransactionType::Read,
|
||||||
IndexKeyBase::default(),
|
IndexKeyBase::default(),
|
||||||
7,
|
7,
|
||||||
|
@ -1772,7 +1765,6 @@ mod tests {
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
drop(tx);
|
|
||||||
|
|
||||||
let map = if collection.len() < 1000 {
|
let map = if collection.len() < 1000 {
|
||||||
insert_collection_one_by_one(stk, &ds, &mut t, &collection, cache_size).await?
|
insert_collection_one_by_one(stk, &ds, &mut t, &collection, cache_size).await?
|
||||||
|
@ -2078,7 +2070,7 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn check_tree_properties(
|
async fn check_tree_properties(
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
st: &mut MTreeStore,
|
st: &mut MTreeStore,
|
||||||
t: &MTree,
|
t: &MTree,
|
||||||
) -> Result<CheckedProperties, Error> {
|
) -> Result<CheckedProperties, Error> {
|
||||||
|
|
|
@ -131,7 +131,7 @@ where
|
||||||
|
|
||||||
pub(super) async fn get_node(
|
pub(super) async fn get_node(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
) -> Result<Arc<StoredNode<N>>, Error> {
|
) -> Result<Arc<StoredNode<N>>, Error> {
|
||||||
match self {
|
match self {
|
||||||
|
@ -208,7 +208,7 @@ where
|
||||||
|
|
||||||
async fn get_node(
|
async fn get_node(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
) -> Result<Arc<StoredNode<N>>, Error> {
|
) -> Result<Arc<StoredNode<N>>, Error> {
|
||||||
if let Some(n) = self.lru.get(node_id).await {
|
if let Some(n) = self.lru.get(node_id).await {
|
||||||
|
@ -260,7 +260,7 @@ where
|
||||||
|
|
||||||
pub(super) async fn get_node(
|
pub(super) async fn get_node(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
) -> Result<Arc<StoredNode<N>>, Error> {
|
) -> Result<Arc<StoredNode<N>>, Error> {
|
||||||
match self.cache.entry(node_id) {
|
match self.cache.entry(node_id) {
|
||||||
|
|
|
@ -47,7 +47,7 @@ where
|
||||||
|
|
||||||
pub(in crate::idx) async fn get_node_mut(
|
pub(in crate::idx) async fn get_node_mut(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
) -> Result<StoredNode<N>, Error> {
|
) -> Result<StoredNode<N>, Error> {
|
||||||
match self {
|
match self {
|
||||||
|
@ -58,7 +58,7 @@ where
|
||||||
|
|
||||||
pub(in crate::idx) async fn get_node(
|
pub(in crate::idx) async fn get_node(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
) -> Result<Arc<StoredNode<N>>, Error> {
|
) -> Result<Arc<StoredNode<N>>, Error> {
|
||||||
match self {
|
match self {
|
||||||
|
@ -74,10 +74,8 @@ where
|
||||||
) -> Result<Arc<StoredNode<N>>, Error> {
|
) -> Result<Arc<StoredNode<N>>, Error> {
|
||||||
match self {
|
match self {
|
||||||
Self::Read(r) => {
|
Self::Read(r) => {
|
||||||
let mut tx = ctx.tx_lock().await;
|
let tx = ctx.tx();
|
||||||
let n = r.get_node(&mut tx, node_id).await;
|
r.get_node(&tx, node_id).await
|
||||||
drop(tx);
|
|
||||||
n
|
|
||||||
}
|
}
|
||||||
_ => Err(Error::Unreachable("TreeStore::get_node_txn")),
|
_ => Err(Error::Unreachable("TreeStore::get_node_txn")),
|
||||||
}
|
}
|
||||||
|
@ -112,7 +110,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn finish(&mut self, tx: &mut Transaction) -> Result<Option<TreeCache<N>>, Error> {
|
pub async fn finish(&mut self, tx: &Transaction) -> Result<Option<TreeCache<N>>, Error> {
|
||||||
match self {
|
match self {
|
||||||
Self::Write(w) => w.finish(tx).await,
|
Self::Write(w) => w.finish(tx).await,
|
||||||
_ => Ok(None),
|
_ => Ok(None),
|
||||||
|
@ -143,7 +141,7 @@ impl TreeNodeProvider {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn load<N>(&self, tx: &mut Transaction, id: NodeId) -> Result<StoredNode<N>, Error>
|
async fn load<N>(&self, tx: &Transaction, id: NodeId) -> Result<StoredNode<N>, Error>
|
||||||
where
|
where
|
||||||
N: TreeNode + Clone,
|
N: TreeNode + Clone,
|
||||||
{
|
{
|
||||||
|
@ -157,7 +155,7 @@ impl TreeNodeProvider {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn save<N>(&self, tx: &mut Transaction, node: &mut StoredNode<N>) -> Result<(), Error>
|
async fn save<N>(&self, tx: &Transaction, node: &mut StoredNode<N>) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
N: TreeNode + Clone + Display,
|
N: TreeNode + Clone + Display,
|
||||||
{
|
{
|
||||||
|
@ -290,20 +288,16 @@ impl IndexStores {
|
||||||
|
|
||||||
pub(crate) async fn index_removed(
|
pub(crate) async fn index_removed(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
ns: &str,
|
ns: &str,
|
||||||
db: &str,
|
db: &str,
|
||||||
tb: &str,
|
tb: &str,
|
||||||
ix: &str,
|
ix: &str,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
self.remove_index(ns, db, tx.get_and_cache_tb_index(ns, db, tb, ix).await?.as_ref()).await
|
self.remove_index(ns, db, tx.get_tb_index(ns, db, tb, ix).await?.as_ref()).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn namespace_removed(
|
pub(crate) async fn namespace_removed(&self, tx: &Transaction, ns: &str) -> Result<(), Error> {
|
||||||
&self,
|
|
||||||
tx: &mut Transaction,
|
|
||||||
ns: &str,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
for db in tx.all_db(ns).await?.iter() {
|
for db in tx.all_db(ns).await?.iter() {
|
||||||
self.database_removed(tx, ns, &db.name).await?;
|
self.database_removed(tx, ns, &db.name).await?;
|
||||||
}
|
}
|
||||||
|
@ -312,7 +306,7 @@ impl IndexStores {
|
||||||
|
|
||||||
pub(crate) async fn database_removed(
|
pub(crate) async fn database_removed(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
ns: &str,
|
ns: &str,
|
||||||
db: &str,
|
db: &str,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
@ -324,7 +318,7 @@ impl IndexStores {
|
||||||
|
|
||||||
pub(crate) async fn table_removed(
|
pub(crate) async fn table_removed(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
ns: &str,
|
ns: &str,
|
||||||
db: &str,
|
db: &str,
|
||||||
tb: &str,
|
tb: &str,
|
||||||
|
|
|
@ -41,7 +41,7 @@ where
|
||||||
|
|
||||||
pub(super) async fn get_node_mut(
|
pub(super) async fn get_node_mut(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
) -> Result<StoredNode<N>, Error> {
|
) -> Result<StoredNode<N>, Error> {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
|
@ -95,10 +95,7 @@ where
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn finish(
|
pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<Option<TreeCache<N>>, Error> {
|
||||||
&mut self,
|
|
||||||
tx: &mut Transaction,
|
|
||||||
) -> Result<Option<TreeCache<N>>, Error> {
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
if !self.out.is_empty() {
|
if !self.out.is_empty() {
|
||||||
|
@ -167,7 +164,7 @@ where
|
||||||
|
|
||||||
pub(super) async fn get_node(
|
pub(super) async fn get_node(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &Transaction,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
) -> Result<Arc<StoredNode<N>>, Error> {
|
) -> Result<Arc<StoredNode<N>>, Error> {
|
||||||
let r = self.cache.get_node(tx, node_id).await?;
|
let r = self.cache.get_node(tx, node_id).await?;
|
||||||
|
|
|
@ -545,12 +545,11 @@ mod tests {
|
||||||
assert_eq!(dist.compute(&v1, &v2).unwrap(), res.into());
|
assert_eq!(dist.compute(&v1, &v2).unwrap(), res.into());
|
||||||
|
|
||||||
// Check the "Vector" optimised implementations
|
// Check the "Vector" optimised implementations
|
||||||
for t in [VectorType::F64] {
|
let t = VectorType::F64;
|
||||||
let v1: SharedVector = Vector::try_from_vector(t, &v1).unwrap().into();
|
let v1: SharedVector = Vector::try_from_vector(t, &v1).unwrap().into();
|
||||||
let v2: SharedVector = Vector::try_from_vector(t, &v2).unwrap().into();
|
let v2: SharedVector = Vector::try_from_vector(t, &v2).unwrap().into();
|
||||||
assert_eq!(dist.calculate(&v1, &v2), res);
|
assert_eq!(dist.calculate(&v1, &v2), res);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
fn test_distance_collection(dist: Distance, size: usize, dim: usize) {
|
fn test_distance_collection(dist: Distance, size: usize, dim: usize) {
|
||||||
let mut rng = get_seed_rnd();
|
let mut rng = get_seed_rnd();
|
||||||
|
|
|
@ -1,17 +1,19 @@
|
||||||
use std::fmt::{Display, Formatter};
|
use std::fmt::{Display, Formatter};
|
||||||
|
|
||||||
|
#[allow(unused)]
|
||||||
|
pub(crate) trait Categorise {
|
||||||
|
/// Returns the category of the key for error reporting
|
||||||
|
fn categorise(&self) -> Category;
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub enum KeyCategory {
|
#[allow(unused)]
|
||||||
/// This category is reserved for cases when we do not know the category
|
pub enum Category {
|
||||||
/// It should be caught and re-populated with the correct category where appropriate
|
|
||||||
Unknown,
|
|
||||||
/// crate::key::root::all /
|
/// crate::key::root::all /
|
||||||
Root,
|
Root,
|
||||||
/// crate::key::root::ac /!ac{ac}
|
/// crate::key::root::ac /!ac{ac}
|
||||||
Access,
|
Access,
|
||||||
/// crate::key::root::hb /!hb{ts}/{nd}
|
|
||||||
Heartbeat,
|
|
||||||
/// crate::key::root::nd /!nd{nd}
|
/// crate::key::root::nd /!nd{nd}
|
||||||
Node,
|
Node,
|
||||||
/// crate::key::root::ni /!ni
|
/// crate::key::root::ni /!ni
|
||||||
|
@ -21,24 +23,33 @@ pub enum KeyCategory {
|
||||||
/// crate::key::root::us /!us{us}
|
/// crate::key::root::us /!us{us}
|
||||||
User,
|
User,
|
||||||
///
|
///
|
||||||
|
/// ------------------------------
|
||||||
|
///
|
||||||
/// crate::key::node::all /${nd}
|
/// crate::key::node::all /${nd}
|
||||||
NodeRoot,
|
NodeRoot,
|
||||||
/// crate::key::node::lq /${nd}!lq{lq}{ns}{db}
|
/// crate::key::node::lq /${nd}!lq{lq}{ns}{db}
|
||||||
NodeLiveQuery,
|
NodeLiveQuery,
|
||||||
///
|
///
|
||||||
|
/// ------------------------------
|
||||||
|
///
|
||||||
|
/// crate::key::namespace::di /+{ni}!di
|
||||||
|
DatabaseIdentifier,
|
||||||
|
/// crate::key::database::ti /+{ni}*{di}!ti
|
||||||
|
DatabaseTableIdentifier,
|
||||||
|
///
|
||||||
|
/// ------------------------------
|
||||||
|
///
|
||||||
/// crate::key::namespace::all /*{ns}
|
/// crate::key::namespace::all /*{ns}
|
||||||
NamespaceRoot,
|
NamespaceRoot,
|
||||||
/// crate::key::namespace::db /*{ns}!db{db}
|
/// crate::key::namespace::db /*{ns}!db{db}
|
||||||
DatabaseAlias,
|
DatabaseAlias,
|
||||||
/// crate::key::namespace::di /+{ns id}!di
|
|
||||||
DatabaseIdentifier,
|
|
||||||
/// crate::key::namespace::lg /*{ns}!lg{lg}
|
|
||||||
DatabaseLogAlias,
|
|
||||||
/// crate::key::namespace::ac /*{ns}!ac{ac}
|
/// crate::key::namespace::ac /*{ns}!ac{ac}
|
||||||
NamespaceAccess,
|
NamespaceAccess,
|
||||||
/// crate::key::namespace::us /*{ns}!us{us}
|
/// crate::key::namespace::us /*{ns}!us{us}
|
||||||
NamespaceUser,
|
NamespaceUser,
|
||||||
///
|
///
|
||||||
|
/// ------------------------------
|
||||||
|
///
|
||||||
/// crate::key::database::all /*{ns}*{db}
|
/// crate::key::database::all /*{ns}*{db}
|
||||||
DatabaseRoot,
|
DatabaseRoot,
|
||||||
/// crate::key::database::ac /*{ns}*{db}!ac{ac}
|
/// crate::key::database::ac /*{ns}*{db}!ac{ac}
|
||||||
|
@ -47,16 +58,12 @@ pub enum KeyCategory {
|
||||||
DatabaseAnalyzer,
|
DatabaseAnalyzer,
|
||||||
/// crate::key::database::fc /*{ns}*{db}!fn{fc}
|
/// crate::key::database::fc /*{ns}*{db}!fn{fc}
|
||||||
DatabaseFunction,
|
DatabaseFunction,
|
||||||
/// crate::key::database::lg /*{ns}*{db}!lg{lg}
|
|
||||||
DatabaseLog,
|
|
||||||
/// crate::key::database::ml /*{ns}*{db}!ml{ml}{vn}
|
/// crate::key::database::ml /*{ns}*{db}!ml{ml}{vn}
|
||||||
DatabaseModel,
|
DatabaseModel,
|
||||||
/// crate::key::database::pa /*{ns}*{db}!pa{pa}
|
/// crate::key::database::pa /*{ns}*{db}!pa{pa}
|
||||||
DatabaseParameter,
|
DatabaseParameter,
|
||||||
/// crate::key::database::tb /*{ns}*{db}!tb{tb}
|
/// crate::key::database::tb /*{ns}*{db}!tb{tb}
|
||||||
DatabaseTable,
|
DatabaseTable,
|
||||||
/// crate::key::database::ti /+{ns id}*{db id}!ti
|
|
||||||
DatabaseTableIdentifier,
|
|
||||||
/// crate::key::database::ts /*{ns}*{db}!ts{ts}
|
/// crate::key::database::ts /*{ns}*{db}!ts{ts}
|
||||||
DatabaseTimestamp,
|
DatabaseTimestamp,
|
||||||
/// crate::key::database::us /*{ns}*{db}!us{us}
|
/// crate::key::database::us /*{ns}*{db}!us{us}
|
||||||
|
@ -64,6 +71,8 @@ pub enum KeyCategory {
|
||||||
/// crate::key::database::vs /*{ns}*{db}!vs
|
/// crate::key::database::vs /*{ns}*{db}!vs
|
||||||
DatabaseVersionstamp,
|
DatabaseVersionstamp,
|
||||||
///
|
///
|
||||||
|
/// ------------------------------
|
||||||
|
///
|
||||||
/// crate::key::table::all /*{ns}*{db}*{tb}
|
/// crate::key::table::all /*{ns}*{db}*{tb}
|
||||||
TableRoot,
|
TableRoot,
|
||||||
/// crate::key::table::ev /*{ns}*{db}*{tb}!ev{ev}
|
/// crate::key::table::ev /*{ns}*{db}*{tb}!ev{ev}
|
||||||
|
@ -77,6 +86,8 @@ pub enum KeyCategory {
|
||||||
/// crate::key::table::lq /*{ns}*{db}*{tb}!lq{lq}
|
/// crate::key::table::lq /*{ns}*{db}*{tb}!lq{lq}
|
||||||
TableLiveQuery,
|
TableLiveQuery,
|
||||||
///
|
///
|
||||||
|
/// ------------------------------
|
||||||
|
///
|
||||||
/// crate::key::index::all /*{ns}*{db}*{tb}+{ix}
|
/// crate::key::index::all /*{ns}*{db}*{tb}+{ix}
|
||||||
IndexRoot,
|
IndexRoot,
|
||||||
/// crate::key::index::bc /*{ns}*{db}*{tb}+{ix}!bc{id}
|
/// crate::key::index::bc /*{ns}*{db}*{tb}+{ix}!bc{id}
|
||||||
|
@ -104,69 +115,71 @@ pub enum KeyCategory {
|
||||||
/// crate::key::index /*{ns}*{db}*{tb}+{ix}*{fd}{id}
|
/// crate::key::index /*{ns}*{db}*{tb}+{ix}*{fd}{id}
|
||||||
Index,
|
Index,
|
||||||
///
|
///
|
||||||
|
/// ------------------------------
|
||||||
|
///
|
||||||
/// crate::key::change /*{ns}*{db}#{ts}
|
/// crate::key::change /*{ns}*{db}#{ts}
|
||||||
ChangeFeed,
|
ChangeFeed,
|
||||||
///
|
///
|
||||||
|
/// ------------------------------
|
||||||
|
///
|
||||||
/// crate::key::thing /*{ns}*{db}*{tb}*{id}
|
/// crate::key::thing /*{ns}*{db}*{tb}*{id}
|
||||||
Thing,
|
Thing,
|
||||||
///
|
///
|
||||||
|
/// ------------------------------
|
||||||
|
///
|
||||||
/// crate::key::graph /*{ns}*{db}*{tb}~{id}{eg}{fk}
|
/// crate::key::graph /*{ns}*{db}*{tb}~{id}{eg}{fk}
|
||||||
Graph,
|
Graph,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for KeyCategory {
|
impl Display for Category {
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||||
let name = match self {
|
let name = match self {
|
||||||
KeyCategory::Unknown => "Unknown",
|
Self::Root => "Root",
|
||||||
KeyCategory::Root => "Root",
|
Self::Access => "Access",
|
||||||
KeyCategory::Access => "Access",
|
Self::Node => "Node",
|
||||||
KeyCategory::Heartbeat => "Heartbeat",
|
Self::NamespaceIdentifier => "NamespaceIdentifier",
|
||||||
KeyCategory::Node => "Node",
|
Self::Namespace => "Namespace",
|
||||||
KeyCategory::NamespaceIdentifier => "NamespaceIdentifier",
|
Self::User => "User",
|
||||||
KeyCategory::Namespace => "Namespace",
|
Self::NodeRoot => "NodeRoot",
|
||||||
KeyCategory::User => "User",
|
Self::NodeLiveQuery => "NodeLiveQuery",
|
||||||
KeyCategory::NodeRoot => "NodeRoot",
|
Self::NamespaceRoot => "NamespaceRoot",
|
||||||
KeyCategory::NodeLiveQuery => "NodeLiveQuery",
|
Self::DatabaseAlias => "DatabaseAlias",
|
||||||
KeyCategory::NamespaceRoot => "NamespaceRoot",
|
Self::DatabaseIdentifier => "DatabaseIdentifier",
|
||||||
KeyCategory::DatabaseAlias => "DatabaseAlias",
|
Self::NamespaceAccess => "NamespaceAccess",
|
||||||
KeyCategory::DatabaseIdentifier => "DatabaseIdentifier",
|
Self::NamespaceUser => "NamespaceUser",
|
||||||
KeyCategory::DatabaseLogAlias => "DatabaseLogAlias",
|
Self::DatabaseRoot => "DatabaseRoot",
|
||||||
KeyCategory::NamespaceAccess => "NamespaceAccess",
|
Self::DatabaseAccess => "DatabaseAccess",
|
||||||
KeyCategory::NamespaceUser => "NamespaceUser",
|
Self::DatabaseAnalyzer => "DatabaseAnalyzer",
|
||||||
KeyCategory::DatabaseRoot => "DatabaseRoot",
|
Self::DatabaseFunction => "DatabaseFunction",
|
||||||
KeyCategory::DatabaseAccess => "DatabaseAccess",
|
Self::DatabaseModel => "DatabaseModel",
|
||||||
KeyCategory::DatabaseAnalyzer => "DatabaseAnalyzer",
|
Self::DatabaseParameter => "DatabaseParameter",
|
||||||
KeyCategory::DatabaseFunction => "DatabaseFunction",
|
Self::DatabaseTable => "DatabaseTable",
|
||||||
KeyCategory::DatabaseLog => "DatabaseLog",
|
Self::DatabaseTableIdentifier => "DatabaseTableIdentifier",
|
||||||
KeyCategory::DatabaseModel => "DatabaseModel",
|
Self::DatabaseTimestamp => "DatabaseTimestamp",
|
||||||
KeyCategory::DatabaseParameter => "DatabaseParameter",
|
Self::DatabaseUser => "DatabaseUser",
|
||||||
KeyCategory::DatabaseTable => "DatabaseTable",
|
Self::DatabaseVersionstamp => "DatabaseVersionstamp",
|
||||||
KeyCategory::DatabaseTableIdentifier => "DatabaseTableIdentifier",
|
Self::TableRoot => "TableRoot",
|
||||||
KeyCategory::DatabaseTimestamp => "DatabaseTimestamp",
|
Self::TableEvent => "TableEvent",
|
||||||
KeyCategory::DatabaseUser => "DatabaseUser",
|
Self::TableField => "TableField",
|
||||||
KeyCategory::DatabaseVersionstamp => "DatabaseVersionstamp",
|
Self::TableView => "TableView",
|
||||||
KeyCategory::TableRoot => "TableRoot",
|
Self::IndexDefinition => "IndexDefinition",
|
||||||
KeyCategory::TableEvent => "TableEvent",
|
Self::TableLiveQuery => "TableLiveQuery",
|
||||||
KeyCategory::TableField => "TableField",
|
Self::IndexRoot => "IndexRoot",
|
||||||
KeyCategory::TableView => "TableView",
|
Self::IndexTermDocList => "IndexTermDocList",
|
||||||
KeyCategory::IndexDefinition => "IndexDefinition",
|
Self::IndexBTreeNode => "IndexBTreeNode",
|
||||||
KeyCategory::TableLiveQuery => "TableLiveQuery",
|
Self::IndexTermDocFrequency => "IndexTermDocFrequency",
|
||||||
KeyCategory::IndexRoot => "IndexRoot",
|
Self::IndexDocKeys => "IndexDocKeys",
|
||||||
KeyCategory::IndexTermDocList => "IndexTermDocList",
|
Self::IndexTermList => "IndexTermList",
|
||||||
KeyCategory::IndexBTreeNode => "IndexBTreeNode",
|
Self::IndexBTreeNodeDocLengths => "IndexBTreeNodeDocLengths",
|
||||||
KeyCategory::IndexTermDocFrequency => "IndexTermDocFrequency",
|
Self::IndexOffset => "IndexOffset",
|
||||||
KeyCategory::IndexDocKeys => "IndexDocKeys",
|
Self::IndexBTreeNodePostings => "IndexBTreeNodePostings",
|
||||||
KeyCategory::IndexTermList => "IndexTermList",
|
Self::IndexFullTextState => "IndexFullTextState",
|
||||||
KeyCategory::IndexBTreeNodeDocLengths => "IndexBTreeNodeDocLengths",
|
Self::IndexBTreeNodeTerms => "IndexBTreeNodeTerms",
|
||||||
KeyCategory::IndexOffset => "IndexOffset",
|
Self::IndexTerms => "IndexTerms",
|
||||||
KeyCategory::IndexBTreeNodePostings => "IndexBTreeNodePostings",
|
Self::Index => "Index",
|
||||||
KeyCategory::IndexFullTextState => "IndexFullTextState",
|
Self::ChangeFeed => "ChangeFeed",
|
||||||
KeyCategory::IndexBTreeNodeTerms => "IndexBTreeNodeTerms",
|
Self::Thing => "Thing",
|
||||||
KeyCategory::IndexTerms => "IndexTerms",
|
Self::Graph => "Graph",
|
||||||
KeyCategory::Index => "Index",
|
|
||||||
KeyCategory::ChangeFeed => "ChangeFeed",
|
|
||||||
KeyCategory::Thing => "Thing",
|
|
||||||
KeyCategory::Graph => "Graph",
|
|
||||||
};
|
};
|
||||||
write!(f, "{}", name)
|
write!(f, "{}", name)
|
||||||
}
|
}
|
|
@ -1,11 +1,9 @@
|
||||||
/// Stores change feeds
|
//! Stores change feeds
|
||||||
|
use crate::key::category::Categorise;
|
||||||
|
use crate::key::category::Category;
|
||||||
|
use crate::vs;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::vs;
|
|
||||||
|
|
||||||
use crate::key::error::KeyCategory;
|
|
||||||
use crate::key::key_req::KeyRequirements;
|
|
||||||
use std::str;
|
use std::str;
|
||||||
|
|
||||||
// Cf stands for change feeds
|
// Cf stands for change feeds
|
||||||
|
@ -73,9 +71,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Cf<'_> {
|
impl Categorise for Cf<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::ChangeFeed
|
Category::ChangeFeed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
/// Stores a DEFINE ACCESS ON DATABASE config definition
|
//! Stores a DEFINE ACCESS ON DATABASE config definition
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Ac<'_> {
|
impl Categorise for Ac<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseAccess
|
Category::DatabaseAccess
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores the key prefix for all keys under a database
|
//! Stores the key prefix for all keys under a database
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -18,9 +18,9 @@ pub fn new<'a>(ns: &'a str, db: &'a str) -> All<'a> {
|
||||||
All::new(ns, db)
|
All::new(ns, db)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for All<'_> {
|
impl Categorise for All<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseRoot
|
Category::DatabaseRoot
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores a DEFINE ANALYZER config definition
|
//! Stores a DEFINE ANALYZER config definition
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Az<'_> {
|
impl Categorise for Az<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseAnalyzer
|
Category::DatabaseAnalyzer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use crate::key::error::KeyCategory;
|
//! Stores a DEFINE FUNCTION config definition
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Categorise;
|
||||||
/// Stores a DEFINE FUNCTION config definition
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Fc<'_> {
|
impl Categorise for Fc<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseFunction
|
Category::DatabaseFunction
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
/// Stores a DEFINE MODEL config definition
|
//! Stores a DEFINE MODEL config definition
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -35,9 +35,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Ml<'_> {
|
impl Categorise for Ml<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseModel
|
Category::DatabaseModel
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores a DEFINE PARAM config definition
|
//! Stores a DEFINE PARAM config definition
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Pa<'_> {
|
impl Categorise for Pa<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseParameter
|
Category::DatabaseParameter
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores a DEFINE TABLE config definition
|
//! Stores a DEFINE TABLE config definition
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Tb<'_> {
|
impl Categorise for Tb<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseTable
|
Category::DatabaseTable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores the next and available freed IDs for documents
|
//! Stores the next and available freed IDs for documents
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -22,9 +22,9 @@ pub fn new(ns: u32, db: u32) -> Ti {
|
||||||
Ti::new(ns, db)
|
Ti::new(ns, db)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Ti {
|
impl Categorise for Ti {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseTableIdentifier
|
Category::DatabaseTableIdentifier
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores database timestamps
|
//! Stores database timestamps
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -39,9 +39,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Ts<'_> {
|
impl Categorise for Ts<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseTimestamp
|
Category::DatabaseTimestamp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use crate::key::error::KeyCategory;
|
//! Stores a DEFINE USER ON DATABASE config definition
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Categorise;
|
||||||
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -33,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Us<'_> {
|
impl Categorise for Us<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseUser
|
Category::DatabaseUser
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores database versionstamps
|
//! Stores database versionstamps
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -23,9 +23,9 @@ pub fn new<'a>(ns: &'a str, db: &'a str) -> Vs<'a> {
|
||||||
Vs::new(ns, db)
|
Vs::new(ns, db)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Vs<'_> {
|
impl Categorise for Vs<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseVersionstamp
|
Category::DatabaseVersionstamp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,6 @@
|
||||||
/// Debug purposes only. It may be used in logs. Not for key handling in implementation code.
|
/// Displays a key in a human-readable format.
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
/// Helpers for debugging keys
|
pub fn sprint<T>(key: &T) -> String
|
||||||
|
|
||||||
/// sprint_key converts a key to an escaped string.
|
|
||||||
/// This is used for logging and debugging tests and should not be used in implementation code.
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn sprint_key<T>(key: &T) -> String
|
|
||||||
where
|
where
|
||||||
T: AsRef<[u8]>,
|
T: AsRef<[u8]>,
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores a graph edge pointer
|
//! Stores a graph edge pointer
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use crate::sql::dir::Dir;
|
use crate::sql::dir::Dir;
|
||||||
use crate::sql::id::Id;
|
use crate::sql::id::Id;
|
||||||
use crate::sql::thing::Thing;
|
use crate::sql::thing::Thing;
|
||||||
|
@ -164,9 +164,9 @@ pub fn ftsuffix(ns: &str, db: &str, tb: &str, id: &Id, eg: &Dir, ft: &str) -> Ve
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Graph<'_> {
|
impl Categorise for Graph<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::Graph
|
Category::Graph
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores the key prefix for all keys under an index
|
//! Stores the key prefix for all keys under an index
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -22,9 +22,9 @@ pub fn new<'a>(ns: &'a str, db: &'a str, tb: &'a str, ix: &'a str) -> All<'a> {
|
||||||
All::new(ns, db, tb, ix)
|
All::new(ns, db, tb, ix)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for All<'_> {
|
impl Categorise for All<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexRoot
|
Category::IndexRoot
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! Stores Doc list for each term
|
//! Stores Doc list for each term
|
||||||
use crate::idx::ft::terms::TermId;
|
use crate::idx::ft::terms::TermId;
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -23,9 +23,9 @@ pub struct Bc<'a> {
|
||||||
pub term_id: TermId,
|
pub term_id: TermId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bc<'_> {
|
impl Categorise for Bc<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexTermDocList
|
Category::IndexTermDocList
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! Stores BTree nodes for doc ids
|
//! Stores BTree nodes for doc ids
|
||||||
use crate::idx::trees::store::NodeId;
|
use crate::idx::trees::store::NodeId;
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -23,9 +23,9 @@ pub struct Bd<'a> {
|
||||||
pub node_id: Option<NodeId>,
|
pub node_id: Option<NodeId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bd<'_> {
|
impl Categorise for Bd<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexBTreeNode
|
Category::IndexBTreeNode
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
//! Stores Term/Doc frequency
|
//! Stores Term/Doc frequency
|
||||||
use crate::idx::docids::DocId;
|
use crate::idx::docids::DocId;
|
||||||
use crate::idx::ft::terms::TermId;
|
use crate::idx::ft::terms::TermId;
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -25,9 +25,9 @@ pub struct Bf<'a> {
|
||||||
pub doc_id: DocId,
|
pub doc_id: DocId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bf<'_> {
|
impl Categorise for Bf<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexTermDocFrequency
|
Category::IndexTermDocFrequency
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! Stores doc keys for doc_ids
|
//! Stores doc keys for doc_ids
|
||||||
use crate::idx::trees::store::NodeId;
|
use crate::idx::trees::store::NodeId;
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -23,9 +23,9 @@ pub struct Bi<'a> {
|
||||||
pub node_id: NodeId,
|
pub node_id: NodeId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bi<'_> {
|
impl Categorise for Bi<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexDocKeys
|
Category::IndexDocKeys
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! Stores the term list for doc_ids
|
//! Stores the term list for doc_ids
|
||||||
use crate::idx::docids::DocId;
|
use crate::idx::docids::DocId;
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -23,9 +23,9 @@ pub struct Bk<'a> {
|
||||||
pub doc_id: DocId,
|
pub doc_id: DocId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bk<'_> {
|
impl Categorise for Bk<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexTermList
|
Category::IndexTermList
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! Stores BTree nodes for doc lengths
|
//! Stores BTree nodes for doc lengths
|
||||||
use crate::idx::trees::store::NodeId;
|
use crate::idx::trees::store::NodeId;
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -23,9 +23,9 @@ pub struct Bl<'a> {
|
||||||
pub node_id: Option<NodeId>,
|
pub node_id: Option<NodeId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bl<'_> {
|
impl Categorise for Bl<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexBTreeNodeDocLengths
|
Category::IndexBTreeNodeDocLengths
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
//! Stores the offsets
|
//! Stores the offsets
|
||||||
use crate::idx::docids::DocId;
|
use crate::idx::docids::DocId;
|
||||||
use crate::idx::ft::terms::TermId;
|
use crate::idx::ft::terms::TermId;
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -25,9 +25,9 @@ pub struct Bo<'a> {
|
||||||
pub term_id: TermId,
|
pub term_id: TermId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bo<'_> {
|
impl Categorise for Bo<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexOffset
|
Category::IndexOffset
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! Stores BTree nodes for postings
|
//! Stores BTree nodes for postings
|
||||||
use crate::idx::trees::store::NodeId;
|
use crate::idx::trees::store::NodeId;
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -23,9 +23,9 @@ pub struct Bp<'a> {
|
||||||
pub node_id: Option<NodeId>,
|
pub node_id: Option<NodeId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bp<'_> {
|
impl Categorise for Bp<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexBTreeNodePostings
|
Category::IndexBTreeNodePostings
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores FullText index states
|
//! Stores FullText index states
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -20,9 +20,9 @@ pub struct Bs<'a> {
|
||||||
pub ix: &'a str,
|
pub ix: &'a str,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bs<'_> {
|
impl Categorise for Bs<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexFullTextState
|
Category::IndexFullTextState
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! Stores BTree nodes for terms
|
//! Stores BTree nodes for terms
|
||||||
use crate::idx::trees::store::NodeId;
|
use crate::idx::trees::store::NodeId;
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -23,9 +23,9 @@ pub struct Bt<'a> {
|
||||||
pub node_id: Option<NodeId>,
|
pub node_id: Option<NodeId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bt<'_> {
|
impl Categorise for Bt<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexBTreeNodeTerms
|
Category::IndexBTreeNodeTerms
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! Stores terms for term_ids
|
//! Stores terms for term_ids
|
||||||
use crate::idx::ft::terms::TermId;
|
use crate::idx::ft::terms::TermId;
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -23,9 +23,9 @@ pub struct Bu<'a> {
|
||||||
pub term_id: TermId,
|
pub term_id: TermId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Bu<'_> {
|
impl Categorise for Bu<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::IndexTerms
|
Category::IndexTerms
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,8 +13,8 @@ pub mod bt;
|
||||||
pub mod bu;
|
pub mod bu;
|
||||||
pub mod vm;
|
pub mod vm;
|
||||||
|
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use crate::sql::array::Array;
|
use crate::sql::array::Array;
|
||||||
use crate::sql::id::Id;
|
use crate::sql::id::Id;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
|
@ -103,9 +103,9 @@ pub struct Index<'a> {
|
||||||
pub id: Option<Cow<'a, Id>>,
|
pub id: Option<Cow<'a, Id>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Index<'_> {
|
impl Categorise for Index<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::Index
|
Category::Index
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
use crate::key::error::KeyCategory;
|
|
||||||
|
|
||||||
/// Key requirements are functions that we expect all keys to have
|
|
||||||
pub(crate) trait KeyRequirements {
|
|
||||||
/// Returns the category of the key for error reporting
|
|
||||||
fn key_category(&self) -> KeyCategory;
|
|
||||||
}
|
|
|
@ -22,7 +22,7 @@
|
||||||
/// crate::key::database::ac /*{ns}*{db}!ac{ac}
|
/// crate::key::database::ac /*{ns}*{db}!ac{ac}
|
||||||
/// crate::key::database::az /*{ns}*{db}!az{az}
|
/// crate::key::database::az /*{ns}*{db}!az{az}
|
||||||
/// crate::key::database::fc /*{ns}*{db}!fn{fc}
|
/// crate::key::database::fc /*{ns}*{db}!fn{fc}
|
||||||
/// crate::key::database::lg /*{ns}*{db}!lg{lg}
|
/// crate::key::database::ml /*{ns}*{db}!ml{ml}{vn}
|
||||||
/// crate::key::database::pa /*{ns}*{db}!pa{pa}
|
/// crate::key::database::pa /*{ns}*{db}!pa{pa}
|
||||||
/// crate::key::database::tb /*{ns}*{db}!tb{tb}
|
/// crate::key::database::tb /*{ns}*{db}!tb{tb}
|
||||||
/// crate::key::database::ti /+{ns id}*{db id}!ti
|
/// crate::key::database::ti /+{ns id}*{db id}!ti
|
||||||
|
@ -57,15 +57,14 @@
|
||||||
///
|
///
|
||||||
/// crate::key::graph /*{ns}*{db}*{tb}~{id}{eg}{fk}
|
/// crate::key::graph /*{ns}*{db}*{tb}~{id}{eg}{fk}
|
||||||
///
|
///
|
||||||
pub mod change;
|
pub(crate) mod category;
|
||||||
pub mod database;
|
pub(crate) mod change;
|
||||||
pub mod debug;
|
pub(crate) mod database;
|
||||||
pub(crate) mod error;
|
pub(crate) mod debug;
|
||||||
pub mod graph;
|
pub(crate) mod graph;
|
||||||
pub mod index;
|
pub(crate) mod index;
|
||||||
pub(crate) mod key_req;
|
pub(crate) mod namespace;
|
||||||
pub mod namespace;
|
pub(crate) mod node;
|
||||||
pub mod node;
|
pub(crate) mod root;
|
||||||
pub mod root;
|
pub(crate) mod table;
|
||||||
pub mod table;
|
pub(crate) mod thing;
|
||||||
pub mod thing;
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores a DEFINE ACCESS ON NAMESPACE config definition
|
//! Stores a DEFINE ACCESS ON NAMESPACE config definition
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -32,9 +32,9 @@ pub fn suffix(ns: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Ac<'_> {
|
impl Categorise for Ac<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::NamespaceAccess
|
Category::NamespaceAccess
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores the key prefix for all keys under a namespace
|
//! Stores the key prefix for all keys under a namespace
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -16,9 +16,9 @@ pub fn new(ns: &str) -> All<'_> {
|
||||||
All::new(ns)
|
All::new(ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for All<'_> {
|
impl Categorise for All<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::NamespaceRoot
|
Category::NamespaceRoot
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use crate::key::error::KeyCategory;
|
//! Stores a DEFINE DATABASE config definition
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Categorise;
|
||||||
/// Stores a DEFINE DATABASE config definition
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -32,9 +32,9 @@ pub fn suffix(ns: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Db<'_> {
|
impl Categorise for Db<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseAlias
|
Category::DatabaseAlias
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use crate::key::error::KeyCategory;
|
//! Stores a database ID generator state
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Categorise;
|
||||||
/// Stores a database ID generator state
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -19,9 +19,9 @@ pub fn new(ns: u32) -> Di {
|
||||||
Di::new(ns)
|
Di::new(ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Di {
|
impl Categorise for Di {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::DatabaseIdentifier
|
Category::DatabaseIdentifier
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl Di {
|
impl Di {
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use crate::key::error::KeyCategory;
|
//! Stores a DEFINE USER ON NAMESPACE config definition
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Categorise;
|
||||||
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -31,9 +32,9 @@ pub fn suffix(ns: &str) -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Us<'_> {
|
impl Categorise for Us<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::NamespaceUser
|
Category::NamespaceUser
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores the key prefix for all nodes
|
//! Stores the key prefix for all nodes
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
@ -18,9 +18,9 @@ pub fn new(nd: Uuid) -> All {
|
||||||
All::new(nd)
|
All::new(nd)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for All {
|
impl Categorise for All {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::NodeRoot
|
Category::NodeRoot
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores a LIVE SELECT query definition on the cluster
|
//! Stores a LIVE SELECT query definition on the cluster
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
@ -12,7 +12,7 @@ use uuid::Uuid;
|
||||||
/// The value is just the table of the live query as a Strand, which is the missing information from the key path
|
/// The value is just the table of the live query as a Strand, which is the missing information from the key path
|
||||||
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Key)]
|
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Key)]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub struct Lq<'a> {
|
pub struct Lq {
|
||||||
__: u8,
|
__: u8,
|
||||||
_a: u8,
|
_a: u8,
|
||||||
#[serde(with = "uuid::serde::compact")]
|
#[serde(with = "uuid::serde::compact")]
|
||||||
|
@ -22,38 +22,32 @@ pub struct Lq<'a> {
|
||||||
_d: u8,
|
_d: u8,
|
||||||
#[serde(with = "uuid::serde::compact")]
|
#[serde(with = "uuid::serde::compact")]
|
||||||
pub lq: Uuid,
|
pub lq: Uuid,
|
||||||
_e: u8,
|
|
||||||
pub ns: &'a str,
|
|
||||||
_f: u8,
|
|
||||||
pub db: &'a str,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new<'a>(nd: Uuid, lq: Uuid, ns: &'a str, db: &'a str) -> Lq<'a> {
|
pub fn new(nd: Uuid, lq: Uuid) -> Lq {
|
||||||
Lq::new(nd, lq, ns, db)
|
Lq::new(nd, lq)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prefix_nd(nd: &Uuid) -> Vec<u8> {
|
pub fn prefix(nd: Uuid) -> Vec<u8> {
|
||||||
let mut k = [b'/', b'$'].to_vec();
|
let mut k = super::all::new(nd).encode().unwrap();
|
||||||
k.extend_from_slice(nd.as_bytes());
|
k.extend_from_slice(&[b'!', b'l', b'q', 0x00]);
|
||||||
k.extend_from_slice(&[0x00]);
|
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn suffix_nd(nd: &Uuid) -> Vec<u8> {
|
pub fn suffix(nd: Uuid) -> Vec<u8> {
|
||||||
let mut k = [b'/', b'$'].to_vec();
|
let mut k = super::all::new(nd).encode().unwrap();
|
||||||
k.extend_from_slice(nd.as_bytes());
|
k.extend_from_slice(&[b'!', b'l', b'q', 0xff]);
|
||||||
k.extend_from_slice(&[0xff]);
|
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Lq<'_> {
|
impl Categorise for Lq {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::NodeLiveQuery
|
Category::NodeLiveQuery
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Lq<'a> {
|
impl Lq {
|
||||||
pub fn new(nd: Uuid, lq: Uuid, ns: &'a str, db: &'a str) -> Self {
|
pub fn new(nd: Uuid, lq: Uuid) -> Self {
|
||||||
Self {
|
Self {
|
||||||
__: b'/',
|
__: b'/',
|
||||||
_a: b'$',
|
_a: b'$',
|
||||||
|
@ -62,10 +56,6 @@ impl<'a> Lq<'a> {
|
||||||
_c: b'l',
|
_c: b'l',
|
||||||
_d: b'q',
|
_d: b'q',
|
||||||
lq,
|
lq,
|
||||||
_e: b'*',
|
|
||||||
ns,
|
|
||||||
_f: b'*',
|
|
||||||
db,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -80,35 +70,40 @@ mod tests {
|
||||||
let nd = Uuid::from_bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10]);
|
let nd = Uuid::from_bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10]);
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
let lq = Uuid::from_bytes([0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20]);
|
let lq = Uuid::from_bytes([0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20]);
|
||||||
let val = Lq::new(nd, lq, "testns", "testdb");
|
let val = Lq::new(nd, lq);
|
||||||
let enc = Lq::encode(&val).unwrap();
|
let enc = Lq::encode(&val).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
enc,
|
enc,
|
||||||
b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\
|
b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\
|
||||||
!lq\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\
|
!lq\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
|
||||||
*testns\0*testdb\0"
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let dec = Lq::decode(&enc).unwrap();
|
let dec = Lq::decode(&enc).unwrap();
|
||||||
assert_eq!(val, dec);
|
assert_eq!(val, dec);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn prefix_nd() {
|
fn test_prefix() {
|
||||||
use super::*;
|
use super::*;
|
||||||
let nd = Uuid::from_bytes([
|
#[rustfmt::skip]
|
||||||
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
|
let nd = Uuid::from_bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10]);
|
||||||
0x0f, 0x10,
|
let val = super::prefix(nd);
|
||||||
]);
|
assert_eq!(
|
||||||
let val = prefix_nd(&nd);
|
val,
|
||||||
assert_eq!(val, b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x00");
|
b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\
|
||||||
|
!lq\x00"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn suffix_nd() {
|
fn test_suffix() {
|
||||||
use super::*;
|
use super::*;
|
||||||
let nd = Uuid::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
|
#[rustfmt::skip]
|
||||||
let val = suffix_nd(&nd);
|
let nd = Uuid::from_bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10]);
|
||||||
assert_eq!(val, b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\xff");
|
let val = super::suffix(nd);
|
||||||
|
assert_eq!(
|
||||||
|
val,
|
||||||
|
b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\
|
||||||
|
!lq\xff"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use crate::key::error::KeyCategory;
|
//! Stores a DEFINE ACCESS ON ROOT config definition
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Categorise;
|
||||||
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -29,9 +30,9 @@ pub fn suffix() -> Vec<u8> {
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Ac<'_> {
|
impl Categorise for Ac<'_> {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::Access
|
Category::Access
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores the key prefix for all keys
|
//! Stores the key prefix for all keys
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -20,9 +20,9 @@ impl Default for Kv {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Kv {
|
impl Categorise for Kv {
|
||||||
fn key_category(&self) -> KeyCategory {
|
fn categorise(&self) -> Category {
|
||||||
KeyCategory::Root
|
Category::Root
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,101 +0,0 @@
|
||||||
//! Stores a heartbeat per registered cluster node
|
|
||||||
use crate::dbs::node::{KeyTimestamp, Timestamp};
|
|
||||||
use crate::key::error::KeyCategory;
|
|
||||||
use crate::key::key_req::KeyRequirements;
|
|
||||||
use derive::Key;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Key)]
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub struct Hb {
|
|
||||||
__: u8,
|
|
||||||
_a: u8,
|
|
||||||
_b: u8,
|
|
||||||
_c: u8,
|
|
||||||
pub hb: Timestamp,
|
|
||||||
_d: u8,
|
|
||||||
#[serde(with = "uuid::serde::compact")]
|
|
||||||
pub nd: Uuid,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyRequirements for Hb {
|
|
||||||
fn key_category(&self) -> KeyCategory {
|
|
||||||
KeyCategory::Heartbeat
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Hb {
|
|
||||||
pub fn new(hb: Timestamp, nd: Uuid) -> Self {
|
|
||||||
Self {
|
|
||||||
__: b'/',
|
|
||||||
_a: b'!',
|
|
||||||
_b: b'h',
|
|
||||||
_c: b'b',
|
|
||||||
hb,
|
|
||||||
_d: b'/',
|
|
||||||
nd,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn prefix() -> Vec<u8> {
|
|
||||||
let mut k = crate::key::root::all::new().encode().unwrap();
|
|
||||||
k.extend_from_slice(&[b'!', b'h', b'b', 0x00]);
|
|
||||||
k
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn suffix(ts: &Timestamp) -> Vec<u8> {
|
|
||||||
// Add one to timestamp so we get a complete range inclusive of provided timestamp
|
|
||||||
// Also convert type
|
|
||||||
let tskey: KeyTimestamp = KeyTimestamp {
|
|
||||||
value: ts.value + 1,
|
|
||||||
};
|
|
||||||
let mut k = crate::key::root::all::new().encode().unwrap();
|
|
||||||
k.extend_from_slice(&[b'!', b'h', b'b']);
|
|
||||||
k.extend_from_slice(tskey.encode().unwrap().as_ref());
|
|
||||||
k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Timestamp> for Hb {
|
|
||||||
fn from(ts: Timestamp) -> Self {
|
|
||||||
let empty_uuid = uuid::Uuid::nil();
|
|
||||||
Self::new(ts, empty_uuid)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
#[test]
|
|
||||||
fn key() {
|
|
||||||
use super::*;
|
|
||||||
#[rustfmt::skip]
|
|
||||||
let val = Hb::new(
|
|
||||||
Timestamp { value: 123 },
|
|
||||||
Uuid::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
|
|
||||||
);
|
|
||||||
let enc = Hb::encode(&val).unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
enc,
|
|
||||||
b"/!hb\x00\x00\x00\x00\x00\x00\x00\x7b/\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10");
|
|
||||||
let dec = Hb::decode(&enc).unwrap();
|
|
||||||
assert_eq!(val, dec);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn prefix() {
|
|
||||||
use super::*;
|
|
||||||
let actual = Hb::prefix();
|
|
||||||
assert_eq!(actual, b"/!hb\x00")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn suffix() {
|
|
||||||
use super::*;
|
|
||||||
let ts: Timestamp = Timestamp {
|
|
||||||
value: 456,
|
|
||||||
};
|
|
||||||
let actual = Hb::suffix(&ts);
|
|
||||||
assert_eq!(actual, b"/!hb\x00\x00\x00\x00\x00\x00\x01\xc9") // 457, because we add 1 to the timestamp
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,6 +1,5 @@
|
||||||
pub mod ac;
|
pub mod ac;
|
||||||
pub mod all;
|
pub mod all;
|
||||||
pub mod hb;
|
|
||||||
pub mod nd;
|
pub mod nd;
|
||||||
pub mod ni;
|
pub mod ni;
|
||||||
pub mod ns;
|
pub mod ns;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Stores cluster membership information
|
//! Stores cluster membership information
|
||||||
use crate::key::error::KeyCategory;
|
use crate::key::category::Categorise;
|
||||||
use crate::key::key_req::KeyRequirements;
|
use crate::key::category::Category;
|
||||||
use derive::Key;
|
use derive::Key;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
@ -18,9 +18,25 @@ pub struct Nd {
|
||||||
pub nd: Uuid,
|
pub nd: Uuid,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyRequirements for Nd {
|
pub fn new(nd: Uuid) -> Nd {
|
||||||
fn key_category(&self) -> KeyCategory {
|
Nd::new(nd)
|
||||||
KeyCategory::Node
|
}
|
||||||
|
|
||||||
|
pub fn prefix() -> Vec<u8> {
|
||||||
|
let mut k = crate::key::root::all::new().encode().unwrap();
|
||||||
|
k.extend_from_slice(&[b'!', b'n', b'd', 0x00]);
|
||||||
|
k
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn suffix() -> Vec<u8> {
|
||||||
|
let mut k = crate::key::root::all::new().encode().unwrap();
|
||||||
|
k.extend_from_slice(&[b'!', b'n', b'd', 0xff]);
|
||||||
|
k
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Categorise for Nd {
|
||||||
|
fn categorise(&self) -> Category {
|
||||||
|
Category::Node
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,18 +50,6 @@ impl Nd {
|
||||||
nd,
|
nd,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prefix() -> Vec<u8> {
|
|
||||||
let mut k = crate::key::root::all::new().encode().unwrap();
|
|
||||||
k.extend_from_slice(&[b'!', b'n', b'd', 0x00]);
|
|
||||||
k
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn suffix() -> Vec<u8> {
|
|
||||||
let mut k = crate::key::root::all::new().encode().unwrap();
|
|
||||||
k.extend_from_slice(&[b'!', b'n', b'd', 0xff]);
|
|
||||||
k
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -61,13 +65,13 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_prefix() {
|
fn test_prefix() {
|
||||||
let val = super::Nd::prefix();
|
let val = super::prefix();
|
||||||
assert_eq!(val, b"/!nd\0")
|
assert_eq!(val, b"/!nd\0")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_suffix() {
|
fn test_suffix() {
|
||||||
let val = super::Nd::suffix();
|
let val = super::suffix();
|
||||||
assert_eq!(val, b"/!nd\xff")
|
assert_eq!(val, b"/!nd\xff")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue