From bfc474e4d8fd0fdf50e6f8bbdf5b9855ebd0ed43 Mon Sep 17 00:00:00 2001 From: Tobie Morgan Hitchcock Date: Wed, 17 Jul 2024 23:44:05 +0100 Subject: [PATCH] Refactor transaction, caching, and key-value store interfaces (#4257) Co-authored-by: Gerard Guillemas Martos --- .github/workflows/bench.yml | 10 +- .github/workflows/ci.yml | 20 +- Cargo.lock | 74 +- Cargo.toml | 10 +- Makefile | 8 +- Makefile.ci.toml | 35 +- Makefile.local.toml | 42 +- Makefile.toml | 5 +- cackle.toml | 40 +- core/Cargo.toml | 24 +- core/src/cf/gc.rs | 97 +- core/src/cf/reader.rs | 67 +- core/src/cf/writer.rs | 65 +- core/src/cnf/mod.rs | 53 +- core/src/ctx/context.rs | 25 +- core/src/dbs/executor.rs | 124 +- core/src/dbs/iterator.rs | 4 +- core/src/dbs/mod.rs | 2 - core/src/dbs/node.rs | 188 +- core/src/dbs/options.rs | 28 +- core/src/dbs/processor.rs | 262 +- core/src/dbs/test.rs | 5 +- core/src/dbs/transaction.rs | 5 - core/src/doc/changefeeds.rs | 17 +- core/src/doc/compute.rs | 2 +- core/src/doc/document.rs | 44 +- core/src/doc/edges.rs | 14 +- core/src/doc/index.rs | 33 +- core/src/doc/lives.rs | 315 +- core/src/doc/process.rs | 2 +- core/src/doc/purge.rs | 18 +- core/src/doc/store.rs | 13 +- core/src/err/mod.rs | 53 +- core/src/exe/try_join_all_buffered.rs | 6 +- core/src/fnc/search.rs | 4 +- core/src/iam/signin.rs | 5 +- core/src/iam/signup.rs | 4 +- core/src/iam/verify.rs | 169 +- core/src/idg/u32.rs | 12 +- core/src/idx/docids.rs | 148 +- core/src/idx/ft/analyzer/mod.rs | 39 +- core/src/idx/ft/doclength.rs | 63 +- core/src/idx/ft/mod.rs | 120 +- core/src/idx/ft/offsets.rs | 6 +- core/src/idx/ft/postings.rs | 50 +- core/src/idx/ft/scorer.rs | 4 +- core/src/idx/ft/termdocs.rs | 6 +- core/src/idx/ft/terms.rs | 94 +- core/src/idx/planner/checker.rs | 15 +- core/src/idx/planner/executor.rs | 38 +- core/src/idx/planner/iterators.rs | 89 +- core/src/idx/planner/tree.rs | 16 +- core/src/idx/trees/btree.rs | 173 +- core/src/idx/trees/mtree.rs | 150 +- core/src/idx/trees/store/cache.rs | 6 +- core/src/idx/trees/store/mod.rs | 30 +- core/src/idx/trees/store/tree.rs | 9 +- core/src/idx/trees/vector.rs | 9 +- core/src/key/{error.rs => category.rs} | 143 +- core/src/key/change/mod.rs | 16 +- core/src/key/database/ac.rs | 12 +- core/src/key/database/all.rs | 10 +- core/src/key/database/az.rs | 10 +- core/src/key/database/fc.rs | 12 +- core/src/key/database/ml.rs | 12 +- core/src/key/database/pa.rs | 10 +- core/src/key/database/tb.rs | 10 +- core/src/key/database/ti.rs | 10 +- core/src/key/database/ts.rs | 10 +- core/src/key/database/us.rs | 11 +- core/src/key/database/vs.rs | 10 +- core/src/key/debug.rs | 11 +- core/src/key/graph/mod.rs | 10 +- core/src/key/index/all.rs | 10 +- core/src/key/index/bc.rs | 10 +- core/src/key/index/bd.rs | 10 +- core/src/key/index/bf.rs | 10 +- core/src/key/index/bi.rs | 10 +- core/src/key/index/bk.rs | 10 +- core/src/key/index/bl.rs | 10 +- core/src/key/index/bo.rs | 10 +- core/src/key/index/bp.rs | 10 +- core/src/key/index/bs.rs | 10 +- core/src/key/index/bt.rs | 10 +- core/src/key/index/bu.rs | 10 +- core/src/key/index/mod.rs | 10 +- core/src/key/key_req.rs | 7 - core/src/key/mod.rs | 25 +- core/src/key/namespace/ac.rs | 10 +- core/src/key/namespace/all.rs | 10 +- core/src/key/namespace/db.rs | 12 +- core/src/key/namespace/di.rs | 12 +- core/src/key/namespace/us.rs | 11 +- core/src/key/node/all.rs | 10 +- core/src/key/node/lq.rs | 77 +- core/src/key/root/ac.rs | 11 +- core/src/key/root/all.rs | 10 +- core/src/key/root/hb.rs | 101 - core/src/key/root/mod.rs | 1 - core/src/key/root/nd.rs | 42 +- core/src/key/root/ni.rs | 10 +- core/src/key/root/ns.rs | 10 +- core/src/key/root/us.rs | 11 +- core/src/key/table/all.rs | 10 +- core/src/key/table/ev.rs | 12 +- core/src/key/table/fd.rs | 12 +- core/src/key/table/ft.rs | 12 +- core/src/key/table/ix.rs | 12 +- core/src/key/table/lq.rs | 22 +- core/src/key/thing/mod.rs | 10 +- core/src/kvs/api.rs | 328 ++ core/src/kvs/batch.rs | 10 + core/src/kvs/cache.rs | 276 +- core/src/kvs/clock.rs | 3 + core/src/kvs/ds.rs | 775 +--- core/src/kvs/export.rs | 208 + core/src/kvs/fdb/mod.rs | 618 ++- core/src/kvs/indxdb/mod.rs | 135 +- core/src/kvs/kv.rs | 9 - core/src/kvs/live.rs | 19 + core/src/kvs/lq_cf.rs | 388 -- core/src/kvs/lq_structs.rs | 138 - core/src/kvs/lq_v2_doc.rs | 420 -- core/src/kvs/lq_v2_fut.rs | 205 - core/src/kvs/mem/mod.rs | 178 +- core/src/kvs/mod.rs | 19 +- core/src/kvs/node.rs | 301 ++ core/src/kvs/rocksdb/mod.rs | 228 +- core/src/kvs/scanner.rs | 110 + core/src/kvs/stash.rs | 17 + core/src/kvs/surrealkv/mod.rs | 364 +- core/src/kvs/tests/cluster_init.rs | 397 -- core/src/kvs/tests/hb.rs | 35 - core/src/kvs/tests/helper.rs | 53 +- core/src/kvs/tests/lq.rs | 83 - core/src/kvs/tests/mod.rs | 226 +- core/src/kvs/tests/multireader.rs | 8 +- .../kvs/tests/multiwriter_different_keys.rs | 10 +- .../kvs/tests/multiwriter_same_keys_allow.rs | 14 +- .../tests/multiwriter_same_keys_conflict.rs | 14 +- core/src/kvs/tests/nd.rs | 28 - core/src/kvs/tests/ndlq.rs | 34 - core/src/kvs/tests/nq.rs | 61 - core/src/kvs/tests/raw.rs | 271 +- core/src/kvs/tests/sequences.rs | 47 + core/src/kvs/tests/snapshot.rs | 12 +- core/src/kvs/tests/tb.rs | 89 - core/src/kvs/tests/tblq.rs | 53 - .../kvs/tests/timestamp_to_versionstamp.rs | 18 +- core/src/kvs/tests/tx_test.rs | 71 +- core/src/kvs/tikv/mod.rs | 265 +- core/src/kvs/tr.rs | 645 +++ core/src/kvs/tx.rs | 4094 +++++------------ core/src/mac/mod.rs | 56 +- core/src/rpc/rpc_context.rs | 7 - core/src/sql/function.rs | 9 +- core/src/sql/kind.rs | 8 +- core/src/sql/model.rs | 10 +- core/src/sql/param.rs | 16 +- core/src/sql/permission.rs | 9 +- core/src/sql/statements/analyze.rs | 13 +- core/src/sql/statements/define/access.rs | 52 +- core/src/sql/statements/define/analyzer.rs | 25 +- core/src/sql/statements/define/database.rs | 44 +- core/src/sql/statements/define/event.rs | 22 +- core/src/sql/statements/define/field.rs | 133 +- core/src/sql/statements/define/function.rs | 20 +- core/src/sql/statements/define/index.rs | 41 +- core/src/sql/statements/define/model.rs | 86 +- core/src/sql/statements/define/namespace.rs | 44 +- core/src/sql/statements/define/param.rs | 20 +- core/src/sql/statements/define/table.rs | 126 +- core/src/sql/statements/define/user.rs | 56 +- core/src/sql/statements/info.rs | 488 +- core/src/sql/statements/kill.rs | 168 +- core/src/sql/statements/live.rs | 145 +- core/src/sql/statements/rebuild.rs | 24 +- core/src/sql/statements/remove/access.rs | 38 +- core/src/sql/statements/remove/analyzer.rs | 12 +- core/src/sql/statements/remove/database.rs | 18 +- core/src/sql/statements/remove/event.rs | 13 +- core/src/sql/statements/remove/field.rs | 19 +- core/src/sql/statements/remove/function.rs | 12 +- core/src/sql/statements/remove/index.rs | 15 +- core/src/sql/statements/remove/model.rs | 18 +- core/src/sql/statements/remove/namespace.rs | 18 +- core/src/sql/statements/remove/param.rs | 12 +- core/src/sql/statements/remove/table.rs | 20 +- core/src/sql/statements/remove/user.rs | 38 +- core/src/sql/statements/show.rs | 22 +- core/src/sql/uuid.rs | 4 +- .../src/sql/value/serde/ser/statement/live.rs | 5 - core/src/vs/mod.rs | 2 - lib/Cargo.toml | 17 +- lib/benches/README.md | 11 - lib/benches/hash_trie_btree.rs | 9 +- lib/benches/index_btree.rs | 8 +- lib/benches/index_mtree.rs | 19 +- lib/benches/sdb_benches/lib/mod.rs | 9 - lib/benches/sdb_benches/sdk/mod.rs | 16 +- lib/src/api/engine/local/mod.rs | 4 +- lib/src/api/engine/local/native.rs | 5 +- lib/src/api/engine/tasks.rs | 102 +- lib/src/api/method/insert.rs | 5 +- lib/src/lib.rs | 3 + lib/src/mac/mod.rs | 9 + lib/tests/bootstrap.rs | 250 - lib/tests/changefeeds.rs | 3 +- lib/tests/define.rs | 15 +- lib/tests/info.rs | 11 +- lib/tests/live.rs | 173 - lib/tests/remove.rs | 19 +- lib/tests/strict.rs | 1 + lib/tests/util.rs | 4 +- src/cli/mod.rs | 4 +- src/cli/start.rs | 20 +- src/cli/validator/parser/env_filter.rs | 3 +- src/cnf/mod.rs | 1 + src/dbs/mod.rs | 43 +- src/mac/mod.rs | 7 +- src/net/health.rs | 20 +- src/net/ml.rs | 2 +- src/rpc/connection.rs | 18 +- src/telemetry/logs/mod.rs | 36 +- src/telemetry/mod.rs | 39 +- supply-chain/audits.toml | 6 + supply-chain/config.toml | 40 +- supply-chain/imports.lock | 146 +- tests/cli_integration.rs | 17 +- 229 files changed, 7232 insertions(+), 10884 deletions(-) delete mode 100644 core/src/dbs/transaction.rs rename core/src/key/{error.rs => category.rs} (55%) delete mode 100644 core/src/key/key_req.rs delete mode 100644 core/src/key/root/hb.rs create mode 100644 core/src/kvs/api.rs create mode 100644 core/src/kvs/batch.rs create mode 100644 core/src/kvs/export.rs create mode 100644 core/src/kvs/live.rs delete mode 100644 core/src/kvs/lq_cf.rs delete mode 100644 core/src/kvs/lq_structs.rs delete mode 100644 core/src/kvs/lq_v2_doc.rs delete mode 100644 core/src/kvs/lq_v2_fut.rs create mode 100644 core/src/kvs/node.rs create mode 100644 core/src/kvs/scanner.rs create mode 100644 core/src/kvs/stash.rs delete mode 100644 core/src/kvs/tests/cluster_init.rs delete mode 100644 core/src/kvs/tests/hb.rs delete mode 100644 core/src/kvs/tests/lq.rs delete mode 100644 core/src/kvs/tests/nd.rs delete mode 100644 core/src/kvs/tests/ndlq.rs delete mode 100644 core/src/kvs/tests/nq.rs create mode 100644 core/src/kvs/tests/sequences.rs delete mode 100644 core/src/kvs/tests/tb.rs delete mode 100644 core/src/kvs/tests/tblq.rs create mode 100644 core/src/kvs/tr.rs create mode 100644 lib/src/mac/mod.rs delete mode 100644 lib/tests/bootstrap.rs delete mode 100644 lib/tests/live.rs diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 8fdd434c..0558df1c 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -102,14 +102,14 @@ jobs: features: "kv-mem" - target: "lib-rocksdb" features: "kv-rocksdb" - - target: "lib-fdb" - features: "kv-fdb-7_1" + - target: "lib-surrealkv" + features: "kv-surrealkv" - target: "sdk-mem" features: "kv-mem" - target: "sdk-rocksdb" features: "kv-rocksdb" - - target: "sdk-fdb" - features: "kv-fdb-7_1" + - target: "sdk-surrealkv" + features: "kv-surrealkv" # This one fails because the server consumes too much memory and the kernel kills it. I tried with instances up to 16GB of RAM. # - target: "sdk-ws" # features: "protocol-ws" @@ -143,7 +143,7 @@ jobs: uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0 if: ${{ matrix.target == 'lib-fdb' || matrix.target == 'sdk-fdb' }} with: - version: "7.1.30" + version: "7.1.61" # Run SurrealDB in the background if needed - name: Build and start SurrealDB diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 595d211b..09b1ce4c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -571,16 +571,24 @@ jobs: with: save-if: ${{ github.ref == 'refs/heads/main' }} - - name: Setup FoundationDB - uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0 - with: - version: "7.1.30" - - name: Install cargo-make run: cargo install --debug --locked cargo-make + - name: Setup FoundationDB + uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0 + with: + version: "7.1.61" + - name: Test fdb engine - run: cargo make ci-api-integration-fdb + run: cargo make ci-api-integration-fdb-7_1 + + - name: Setup FoundationDB + uses: foundationdb-rs/foundationdb-actions-install@v.2.2.0 + with: + version: "7.3.47" + + - name: Test fdb engine + run: cargo make ci-api-integration-fdb-7_3 - name: Debug info if: always() diff --git a/Cargo.lock b/Cargo.lock index 71949b60..7d04b9a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -872,7 +872,6 @@ dependencies = [ "clang-sys", "lazy_static", "lazycell", - "log", "peeking_take_while", "prettyplease", "proc-macro2", @@ -881,7 +880,6 @@ dependencies = [ "rustc-hash", "shlex", "syn 2.0.58", - "which", ] [[package]] @@ -1782,9 +1780,9 @@ dependencies = [ [[package]] name = "echodb" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ac31e38aeac770dd01b9d6c9ab2a6d7f025815f71105911cf6de073a5db8ee1" +checksum = "1d1eccc44ff21b80ca7e883ff57423a12610965a33637d5d0bef4adebcd81749" dependencies = [ "arc-swap", "imbl", @@ -2082,9 +2080,9 @@ dependencies = [ [[package]] name = "foundationdb" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8696fd1be198f101eb58aeecf0f504fc02b28c7afcc008b4e4a998a91b305108" +checksum = "020bf4ae7238dbdb1ff01e9f981db028515cf66883c461e29faedfea130b2728" dependencies = [ "async-recursion 1.1.0", "async-trait", @@ -2102,18 +2100,18 @@ dependencies = [ [[package]] name = "foundationdb-gen" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62239700f01b041b6372aaeb847c52f960e1a69fd2b1025dc995ea3dd90e3308" +checksum = "36878d54a76a48e794d0fe89be2096ab5968b071e7ec25f7becfe7846f55fa77" dependencies = [ "xml-rs", ] [[package]] name = "foundationdb-macros" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c8d52fe8b46ab822b4decdcc0d6d85aeedfc98f0d52ba2bd4aec4a97807516" +checksum = "f8db6653cbc621a3810d95d55bd342be3e71181d6df21a4eb29ef986202d3f9c" dependencies = [ "proc-macro2", "quote", @@ -2123,11 +2121,12 @@ dependencies = [ [[package]] name = "foundationdb-sys" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98e49545f5393d276b7b888c77e3f9519fd33727435f8244344be72c3284256f" +checksum = "ace2f49db8614b7d7e3b656a12e0059b5fbd0a4da3410b1797374bec3db269fa" dependencies = [ - "bindgen 0.65.1", + "bindgen 0.69.4", + "libc", ] [[package]] @@ -2912,9 +2911,9 @@ dependencies = [ [[package]] name = "indxdb" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1de97697bf90e30042ea4ae3260a976253e0bb1703fa339541bcc047cc994180" +checksum = "817e28ebe3466175be7e66f4eadfb9e6a221537db2f78b6be04e14b7051a56af" dependencies = [ "js-sys", "rexie", @@ -5432,9 +5431,9 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.14" +version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" dependencies = [ "serde", ] @@ -6116,9 +6115,9 @@ dependencies = [ [[package]] name = "surrealdb-tikv-client" -version = "0.2.0-surreal.2" +version = "0.3.0-surreal.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b79f921871d6ed67c970e8499b4aca3724115c189f99ab30f51b46c77bd19819" +checksum = "f9e204e84239374e8ba2dfabb88f5ac20f69baa09599eee225958445fb7e0a14" dependencies = [ "async-recursion 0.3.2", "async-trait", @@ -6130,15 +6129,17 @@ dependencies = [ "log", "pin-project", "prometheus", - "prost 0.11.9", + "prost 0.12.3", "rand 0.8.5", "regex", "semver", "serde", "serde_derive", + "serde_json", + "take_mut", "thiserror", "tokio", - "tonic 0.9.2", + "tonic 0.10.2", ] [[package]] @@ -6271,6 +6272,12 @@ dependencies = [ "libc", ] +[[package]] +name = "take_mut" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" + [[package]] name = "tap" version = "1.0.1" @@ -6387,18 +6394,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", @@ -6482,9 +6489,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.37.0" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ "backtrace", "bytes", @@ -6511,9 +6518,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", @@ -6673,17 +6680,15 @@ dependencies = [ [[package]] name = "tonic" -version = "0.9.2" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" +checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" dependencies = [ "async-stream", "async-trait", "axum 0.6.20", "base64 0.21.7", "bytes", - "futures-core", - "futures-util", "h2", "http 0.2.12", "http-body 0.4.6", @@ -6691,7 +6696,8 @@ dependencies = [ "hyper-timeout", "percent-encoding", "pin-project", - "prost 0.11.9", + "prost 0.12.3", + "rustls 0.21.11", "rustls-pemfile", "tokio", "tokio-rustls", diff --git a/Cargo.toml b/Cargo.toml index 45f172b5..e19cd103 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,11 +8,11 @@ authors = ["Tobie Morgan Hitchcock "] [features] # Public features -default = ["storage-mem", "storage-rocksdb", "scripting", "http"] +default = ["storage-mem", "storage-surrealkv", "storage-rocksdb", "scripting", "http"] storage-mem = ["surrealdb/kv-mem"] storage-rocksdb = ["surrealdb/kv-rocksdb"] storage-tikv = ["surrealdb/kv-tikv"] -storage-fdb = ["surrealdb/kv-fdb-7_1"] +storage-fdb = ["surrealdb/kv-fdb"] storage-surrealkv = ["surrealdb/kv-surrealkv"] scripting = ["surrealdb/scripting"] http = ["surrealdb/http"] @@ -20,6 +20,9 @@ http-compression = [] ml = ["surrealdb/ml"] jwks = ["surrealdb/jwks"] performance-profiler = ["dep:pprof"] +# Special features +storage-fdb-7_1 = ["surrealdb/kv-fdb-7_1"] +storage-fdb-7_3 = ["surrealdb/kv-fdb-7_3"] [workspace] members = [ @@ -30,6 +33,9 @@ members = [ "lib/examples/rocket", ] +[profile.make] +inherits = "dev" + [profile.release] lto = true strip = true diff --git a/Makefile b/Makefile index a5d0cfb8..8a1eddbe 100644 --- a/Makefile +++ b/Makefile @@ -45,10 +45,10 @@ serve: check-deps sql: check-deps cargo make sql -.PHONY: quick -quick: check-deps - cargo make quick - .PHONY: build build: check-deps cargo make build + +.PHONY: release +release: check-deps + cargo make release diff --git a/Makefile.ci.toml b/Makefile.ci.toml index 1c823d44..5b91560b 100644 --- a/Makefile.ci.toml +++ b/Makefile.ci.toml @@ -1,6 +1,6 @@ [tasks.ci-format] category = "CI - CHECK" -dependencies = ["cargo-fmt", "cargo-fmt-unlinked"] +dependencies = ["cargo-fmt"] [tasks.ci-check] category = "CI - CHECK" @@ -15,7 +15,7 @@ args = ["check", "--locked", "--package", "surrealdb", "--features", "protocol-w [tasks.ci-clippy] category = "CI - CHECK" command = "cargo" -args = ["clippy", "--all-targets", "--features", "storage-mem,storage-rocksdb,storage-tikv,storage-fdb,scripting,http,jwks", "--tests", "--benches", "--examples", "--bins", "--", "-D", "warnings"] +args = ["clippy", "--all-targets", "--features", "storage-mem,storage-surrealkv,storage-rocksdb,storage-tikv,storage-fdb,scripting,http,jwks,ml,storage-fdb-7_1", "--tests", "--benches", "--examples", "--bins", "--", "-D", "warnings"] # # Integration Tests @@ -151,11 +151,6 @@ category = "CI - INTEGRATION TESTS" env = { _TEST_API_ENGINE = "rocksdb", _TEST_FEATURES = "kv-rocksdb" } run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = true } -[tasks.ci-api-integration-fdb] -category = "CI - INTEGRATION TESTS" -env = { _TEST_API_ENGINE = "fdb", _TEST_FEATURES = "kv-fdb-7_1" } -run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = false } - [tasks.ci-api-integration-surrealkv] category = "CI - INTEGRATION TESTS" env = { _TEST_API_ENGINE = "surrealkv", _TEST_FEATURES = "kv-surrealkv" } @@ -166,6 +161,16 @@ category = "CI - INTEGRATION TESTS" env = { _TEST_API_ENGINE = "tikv", _TEST_FEATURES = "kv-tikv" } run_task = { name = ["start-tikv", "test-kvs", "test-api-integration", "stop-tikv"], fork = true, parallel = false } +[tasks.ci-api-integration-fdb-7_1] +category = "CI - INTEGRATION TESTS" +env = { _TEST_API_ENGINE = "fdb", _TEST_FEATURES = "kv-fdb,kv-fdb-7_1" } +run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = false } + +[tasks.ci-api-integration-fdb-7_3] +category = "CI - INTEGRATION TESTS" +env = { _TEST_API_ENGINE = "fdb", _TEST_FEATURES = "kv-fdb,kv-fdb-7_3" } +run_task = { name = ["test-kvs", "test-api-integration"], fork = true, parallel = false } + # # Services @@ -283,7 +288,7 @@ BENCH_WORKER_THREADS = { value = "1", condition = { env_not_set = ["BENCH_WORKER BENCH_NUM_OPS = { value = "1000", condition = { env_not_set = ["BENCH_NUM_OPS"] } } BENCH_DURATION = { value = "30", condition = { env_not_set = ["BENCH_DURATION"] } } BENCH_SAMPLE_SIZE = { value = "10", condition = { env_not_set = ["BENCH_SAMPLE_SIZE"] } } -BENCH_FEATURES = { value = "protocol-ws,kv-mem,kv-rocksdb,kv-fdb-7_1,kv-surrealkv", condition = { env_not_set = ["BENCH_FEATURES"] } } +BENCH_FEATURES = { value = "protocol-ws,kv-mem,kv-rocksdb,kv-surrealkv", condition = { env_not_set = ["BENCH_FEATURES"] } } [tasks.bench-target] private = true @@ -301,11 +306,6 @@ category = "CI - BENCHMARK - SurrealDB Target" env = { BENCH_DATASTORE_TARGET = "lib-rocksdb" } run_task = { name = ["bench-target"] } -[tasks.bench-lib-fdb] -category = "CI - BENCHMARK - SurrealDB Target" -env = { BENCH_DATASTORE_TARGET = "lib-fdb" } -run_task = { name = ["bench-target"] } - [tasks.bench-sdk-mem] category = "CI - BENCHMARK - SurrealDB Target" env = { BENCH_DATASTORE_TARGET = "sdk-mem" } @@ -316,17 +316,12 @@ category = "CI - BENCHMARK - SurrealDB Target" env = { BENCH_DATASTORE_TARGET = "sdk-rocksdb" } run_task = { name = ["bench-target"] } -[tasks.bench-sdk-fdb] +[tasks.bench-lib-surrealkv] category = "CI - BENCHMARK - SurrealDB Target" -env = { BENCH_DATASTORE_TARGET = "sdk-fdb" } +env = { BENCH_DATASTORE_TARGET = "lib-surrealkv" } run_task = { name = ["bench-target"] } [tasks.bench-sdk-ws] category = "CI - BENCHMARK - SurrealDB Target" env = { BENCH_DATASTORE_TARGET = "sdk-ws" } run_task = { name = ["bench-target"] } - -[tasks.bench-lib-surrealkv] -category = "CI - BENCHMARK - SurrealDB Target" -env = { BENCH_DATASTORE_TARGET = "lib-surrealkv" } -run_task = { name = ["bench-target"] } diff --git a/Makefile.local.toml b/Makefile.local.toml index 235f63de..91da299b 100644 --- a/Makefile.local.toml +++ b/Makefile.local.toml @@ -24,35 +24,29 @@ args = ["doc", "--open", "--no-deps", "--package", "surrealdb", "--features", "r category = "LOCAL USAGE" command = "cargo" env = { RUST_MIN_STACK={ value = "4194304", condition = { env_not_set = ["RUST_MIN_STACK"] } } } -args = ["test", "--workspace", "--no-fail-fast"] - -# Check -[tasks.cargo-check] -category = "LOCAL USAGE" -command = "cargo" -args = ["check", "--workspace", "--features", "${DEV_FEATURES}"] +args = ["test", "--profile", "make", "--workspace", "--no-fail-fast"] +# Format [tasks.cargo-fmt] category = "LOCAL USAGE" command = "cargo" args = ["fmt", "--all", "--check"] -[tasks.cargo-fmt-unlinked] +# Check +[tasks.cargo-check] category = "LOCAL USAGE" -script = """ - set -e - cd ${CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}/ - cargo fmt --all --check -- ./lib/tests/**/*.rs ./core/src/kvs/tests/*.rs -""" +command = "cargo" +args = ["check", "--profile", "make", "--workspace", "--all-targets", "--features", "${ALL_FEATURES}"] +# Clippy [tasks.cargo-clippy] category = "LOCAL USAGE" command = "cargo" -args = ["clippy", "--all-targets", "--all-features", "--", "-D", "warnings"] +args = ["clippy", "--profile", "make", "--workspace", "--all-targets", "--features", "${ALL_FEATURES}", "--", "-D", "warnings"] [tasks.check] category = "LOCAL USAGE" -dependencies = ["cargo-check", "cargo-fmt", "cargo-fmt-unlinked", "cargo-clippy"] +dependencies = ["cargo-fmt", "cargo-check", "cargo-clippy"] [tasks.check-wasm] category = "LOCAL USAGE" @@ -74,30 +68,30 @@ args = ["bench", "--package", "surrealdb", "--no-default-features", "--features" [tasks.run] category = "LOCAL USAGE" command = "cargo" -args = ["run", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "${@}"] +args = ["run", "--profile", "make", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "${@}"] # Serve [tasks.serve] category = "LOCAL USAGE" command = "cargo" -args = ["run", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "start", "--allow-all", "${@}"] +args = ["run", "--profile", "make", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "start", "--allow-all", "${@}"] # SQL [tasks.sql] category = "LOCAL USAGE" command = "cargo" -args = ["run", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "sql", "--pretty", "${@}"] - -# Quick -[tasks.quick] -category = "LOCAL USAGE" -command = "cargo" -args = ["build", "${@}"] +args = ["run", "--profile", "make", "--no-default-features", "--features", "${DEV_FEATURES}", "--", "sql", "--pretty", "${@}"] # Build [tasks.build] category = "LOCAL USAGE" command = "cargo" +args = ["build", "--profile", "make", "${@}"] + +# Release +[tasks.release] +category = "LOCAL USAGE" +command = "cargo" args = ["build", "--release", "${@}"] # Default diff --git a/Makefile.toml b/Makefile.toml index 152dcf61..76be2f30 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -10,8 +10,9 @@ reduce_output = true default_to_workspace = false [env] -DEV_FEATURES={ value = "storage-mem,scripting,http,ml,jwks", condition = { env_not_set = ["DEV_FEATURES"] } } -SURREAL_LOG={ value = "trace", condition = { env_not_set = ["SURREAL_LOG"] } } +ALL_FEATURES={ value = "storage-mem,storage-surrealkv,storage-rocksdb,storage-tikv,storage-fdb,scripting,http,jwks,ml,storage-fdb-7_1", condition = { env_not_set = ["ALL_FEATURES"] } } +DEV_FEATURES={ value = "storage-mem,storage-surrealkv,scripting,http,jwks,ml", condition = { env_not_set = ["DEV_FEATURES"] } } +SURREAL_LOG={ value = "full", condition = { env_not_set = ["SURREAL_LOG"] } } SURREAL_USER={ value = "root", condition = { env_not_set = ["SURREAL_USER"] } } SURREAL_PASS={ value = "root", condition = { env_not_set = ["SURREAL_PASS"] } } SURREAL_PATH={ value = "memory", condition = { env_not_set = ["SURREAL_PATH"] } } diff --git a/cackle.toml b/cackle.toml index 51c03756..7bc92b7f 100644 --- a/cackle.toml +++ b/cackle.toml @@ -20,6 +20,10 @@ include = [ "rustix::fs", "tokio::fs", ] +exclude = [ + "std::path::Path", + "std::path::PathBuf", +] [api.net] include = [ @@ -30,10 +34,14 @@ include = [ "surreal::net", "surrealdb", "surrealdb_core", + "surrealkv", "tokio::net", "tracing", "tracing_core", ] +exclude = [ + "hashbrown::map", +] # # Crates Linking to Libraries @@ -308,10 +316,12 @@ build.allow_apis = [ "process", ] allow_unsafe = true +allow_apis = [ + "fs", +] [pkg.proc-macro2] build.allow_apis = [ - "fs", "process", ] allow_unsafe = true @@ -435,6 +445,9 @@ allow_unsafe = true build.allow_apis = [ "process", ] +build.allow_build_instructions = [ + "cargo:rustc-check-cfg=*", +] allow_apis = [ "fs", ] @@ -617,9 +630,6 @@ build.allow_build_instructions = [ [pkg.dirs-sys-next] allow_unsafe = true -from.build.allow_apis = [ - "fs", -] [pkg.crunchy] build.allow_apis = [ @@ -637,7 +647,6 @@ allow_unsafe = true [pkg.anyhow] build.allow_apis = [ - "fs", "process", ] allow_unsafe = true @@ -742,9 +751,6 @@ allow_unsafe = true [pkg.dashmap] allow_unsafe = true -allow_apis = [ - "net", -] [pkg.tokio-stream] allow_unsafe = true @@ -762,9 +768,6 @@ allow_apis = [ "fs", ] -[pkg.atomic-waker] -allow_unsafe = true - [pkg.doc-comment] build.allow_apis = [ "process", @@ -986,6 +989,9 @@ allow_unsafe = true [pkg.crossbeam-deque] allow_unsafe = true +[pkg.crossbeam-queue] +allow_unsafe = true + [pkg.anstream] allow_unsafe = true @@ -1056,9 +1062,6 @@ allow_unsafe = true [pkg.argon2] allow_unsafe = true -[pkg.futures-concurrency] -allow_unsafe = true - [pkg.quick_cache] allow_unsafe = true allow_apis = [ @@ -1211,6 +1214,7 @@ allow_apis = [ [pkg.axum-server] allow_apis = [ + "fs", "net", ] @@ -1353,3 +1357,11 @@ allow_unsafe = true [pkg.tendril] allow_unsafe = true + +[pkg.lru] +allow_unsafe = true + +[pkg.surrealkv] +allow_apis = [ + "fs", +] diff --git a/core/Cargo.toml b/core/Cargo.toml index 24d37d66..a53d5133 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -27,15 +27,8 @@ default = ["kv-mem"] kv-mem = ["dep:echodb", "tokio/time", "dep:tempfile", "dep:ext-sort"] kv-indxdb = ["dep:indxdb"] kv-rocksdb = ["dep:rocksdb", "tokio/time", "dep:tempfile", "dep:ext-sort"] -kv-tikv = ["dep:tikv", "dep:tempfile", "dep:ext-sort"] -kv-fdb-5_1 = ["foundationdb/fdb-5_1", "kv-fdb", "dep:tempfile", "dep:ext-sort"] -kv-fdb-5_2 = ["foundationdb/fdb-5_2", "kv-fdb", "dep:tempfile", "dep:ext-sort"] -kv-fdb-6_0 = ["foundationdb/fdb-6_0", "kv-fdb", "dep:tempfile", "dep:ext-sort"] -kv-fdb-6_1 = ["foundationdb/fdb-6_1", "kv-fdb", "dep:tempfile", "dep:ext-sort"] -kv-fdb-6_2 = ["foundationdb/fdb-6_2", "kv-fdb", "dep:tempfile", "dep:ext-sort"] -kv-fdb-6_3 = ["foundationdb/fdb-6_3", "kv-fdb", "dep:tempfile", "dep:ext-sort"] -kv-fdb-7_0 = ["foundationdb/fdb-7_0", "kv-fdb", "dep:tempfile", "dep:ext-sort"] -kv-fdb-7_1 = ["foundationdb/fdb-7_1", "kv-fdb", "dep:tempfile", "dep:ext-sort"] +kv-tikv = ["dep:tikv", "tokio/time", "dep:tempfile", "dep:ext-sort"] +kv-fdb = ["dep:foundationdb", "tokio/time", "dep:tempfile", "dep:ext-sort"] kv-surrealkv = ["dep:surrealkv", "tokio/time", "dep:tempfile", "dep:ext-sort"] scripting = ["dep:js"] http = ["dep:reqwest"] @@ -48,8 +41,9 @@ arbitrary = [ "geo-types/arbitrary", "uuid/arbitrary", ] -# Private features -kv-fdb = ["tokio/time"] +# Special features +kv-fdb-7_1 = ["foundationdb/fdb-7_1"] +kv-fdb-7_3 = ["foundationdb/fdb-7_3"] [package.metadata.docs.rs] rustdoc-args = ["--cfg", "docsrs"] @@ -76,10 +70,10 @@ dashmap = "5.5.3" derive = { version = "0.12.0", package = "surrealdb-derive" } deunicode = "1.4.1" dmp = "0.2.0" -echodb = { version = "0.6.0", optional = true } +echodb = { version = "0.7.0", optional = true } executor = { version = "1.8.0", package = "async-executor" } ext-sort = { version = "^0.1.4", optional = true } -foundationdb = { version = "0.8.0", default-features = false, features = [ +foundationdb = { version = "0.9.0", default-features = false, features = [ "embedded-fdb-include", ], optional = true } fst = "0.4.7" @@ -89,7 +83,7 @@ geo = { version = "0.27.0", features = ["use-serde"] } geo-types = { version = "0.7.12", features = ["arbitrary"] } hashbrown = { version = "0.14.5", features = ["serde"] } hex = { version = "0.4.3" } -indxdb = { version = "0.4.0", optional = true } +indxdb = { version = "0.5.0", optional = true } ipnet = "2.9.0" js = { version = "0.6.2", package = "rquickjs", features = [ "array-buffer", @@ -146,7 +140,7 @@ surrealkv = { version = "0.3.0", optional = true } surrealml = { version = "0.1.1", optional = true, package = "surrealml-core" } tempfile = { version = "3.10.1", optional = true } thiserror = "1.0.50" -tikv = { version = "0.2.0-surreal.2", default-features = false, package = "surrealdb-tikv-client", optional = true } +tikv = { version = "0.3.0-surreal.1", default-features = false, package = "surrealdb-tikv-client", optional = true } tracing = "0.1.40" trice = "0.4.0" ulid = { version = "1.1.0", features = ["serde"] } diff --git a/core/src/cf/gc.rs b/core/src/cf/gc.rs index b36fe9f7..c26fc59e 100644 --- a/core/src/cf/gc.rs +++ b/core/src/cf/gc.rs @@ -1,7 +1,7 @@ use crate::err::Error; use crate::key::change; #[cfg(debug_assertions)] -use crate::key::debug::sprint_key; +use crate::key::debug::sprint; use crate::kvs::Transaction; use crate::vs; use crate::vs::Versionstamp; @@ -9,42 +9,36 @@ use std::str; // gc_all_at deletes all change feed entries that become stale at the given timestamp. #[allow(unused)] -pub async fn gc_all_at(tx: &mut Transaction, ts: u64, limit: Option) -> Result<(), Error> { - let nses = tx.all_ns().await?; - let nses = nses.as_ref(); - for ns in nses { - gc_ns(tx, ns.name.as_str(), limit, ts).await?; +pub async fn gc_all_at(tx: &Transaction, ts: u64) -> Result<(), Error> { + // Fetch all namespaces + let nss = tx.all_ns().await?; + // Loop over each namespace + for ns in nss.as_ref() { + // Trace for debugging + #[cfg(debug_assertions)] + trace!("Performing garbage collection on {ns} for timestamp {ts}"); + // Process the namespace + gc_ns(tx, ts, ns.name.as_str()).await?; } Ok(()) } // gc_ns deletes all change feed entries in the given namespace that are older than the given watermark. #[allow(unused)] -pub async fn gc_ns( - tx: &mut Transaction, - ns: &str, - limit: Option, - ts: u64, -) -> Result<(), Error> { +pub async fn gc_ns(tx: &Transaction, ts: u64, ns: &str) -> Result<(), Error> { + // Fetch all databases let dbs = tx.all_db(ns).await?; - let dbs = dbs.as_ref(); - for db in dbs { - // We get the expiration of the change feed defined on the database - let db_cf_expiry = match &db.changefeed { - None => 0, - Some(cf) => cf.expiry.as_secs(), - }; + // Loop over each database + for db in dbs.as_ref() { + // Trace for debugging #[cfg(debug_assertions)] - trace!( - "Performing garbage collection on ns {} db {} for ts {}. The cf expiration is {}", - ns, - db.name, - ts, - db_cf_expiry - ); - let tbs = tx.all_tb(ns, db.name.as_str()).await?; - let tbs = tbs.as_ref(); - let max_tb_cf_expiry = tbs.iter().fold(0, |acc, tb| match &tb.changefeed { + trace!("Performing garbage collection on {ns}:{db} for timestamp {ts}"); + // Fetch all tables + let tbs = tx.all_tb(ns, &db.name).await?; + // Get the database changefeed expiration + let db_cf_expiry = db.changefeed.map(|v| v.expiry.as_secs()).unwrap_or_default(); + // Get the maximum table changefeed expiration + let tb_cf_expiry = tbs.as_ref().iter().fold(0, |acc, tb| match &tb.changefeed { None => acc, Some(cf) => { if cf.expiry.is_zero() { @@ -54,46 +48,47 @@ pub async fn gc_ns( } } }); - let cf_expiry = db_cf_expiry.max(max_tb_cf_expiry); + // Calculate the maximum changefeed expiration + let cf_expiry = db_cf_expiry.max(tb_cf_expiry); + // Ignore this database if the expiry is greater if ts < cf_expiry { continue; } - // We only want to retain the expiry window, so we are going to delete everything before + // Calculate the watermark expiry window let watermark_ts = ts - cf_expiry; - #[cfg(debug_assertions)] - trace!("The watermark is {} after removing {cf_expiry} from {ts}", watermark_ts); - let watermark_vs = - tx.get_versionstamp_from_timestamp(watermark_ts, ns, db.name.as_str(), true).await?; + // Calculate the watermark versionstamp + let watermark_vs = tx + .lock() + .await + .get_versionstamp_from_timestamp(watermark_ts, ns, &db.name, true) + .await?; + // If a versionstamp exists, then garbage collect if let Some(watermark_vs) = watermark_vs { - gc_db(tx, ns, db.name.as_str(), watermark_vs, limit).await?; + gc_range(tx, ns, &db.name, watermark_vs).await?; } } Ok(()) } // gc_db deletes all change feed entries in the given database that are older than the given watermark. -pub async fn gc_db( - tx: &mut Transaction, +pub async fn gc_range( + tx: &Transaction, ns: &str, db: &str, watermark: Versionstamp, - limit: Option, ) -> Result<(), Error> { - let beg: Vec = change::prefix_ts(ns, db, vs::u64_to_versionstamp(0)); + // Calculate the range + let beg = change::prefix_ts(ns, db, vs::u64_to_versionstamp(0)); let end = change::prefix_ts(ns, db, watermark); + // Trace for debugging #[cfg(debug_assertions)] trace!( - "DB GC: ns: {}, db: {}, watermark: {:?}, prefix: {}, end: {}", - ns, - db, - watermark, - sprint_key(&beg), - sprint_key(&end) + "Performing garbage collection on {ns}:{db} for watermark {watermark:?}, between {} and {}", + sprint(&beg), + sprint(&end) ); - - let limit = limit.unwrap_or(100); - - tx.delr(beg..end, limit).await?; - + // Delete the entire range in grouped batches + tx.delr(beg..end).await?; + // Ok all good Ok(()) } diff --git a/core/src/cf/reader.rs b/core/src/cf/reader.rs index c96f698e..125c628c 100644 --- a/core/src/cf/reader.rs +++ b/core/src/cf/reader.rs @@ -2,8 +2,8 @@ use crate::cf::{ChangeSet, DatabaseMutation, TableMutations}; use crate::err::Error; use crate::key::change; #[cfg(debug_assertions)] -use crate::key::debug::sprint_key; -use crate::kvs::{Limit, ScanPage, Transaction}; +use crate::key::debug::sprint; +use crate::kvs::Transaction; use crate::sql::statements::show::ShowSince; use crate::vs; @@ -16,18 +16,19 @@ use crate::vs; // You can use this to read the change feed in chunks. // The second call would start from the last versionstamp + 1 of the first call. pub async fn read( - tx: &mut Transaction, + tx: &Transaction, ns: &str, db: &str, tb: Option<&str>, start: ShowSince, limit: Option, ) -> Result, Error> { + // Calculate the start of the changefeed range let beg = match start { ShowSince::Versionstamp(x) => change::prefix_ts(ns, db, vs::u64_to_versionstamp(x)), ShowSince::Timestamp(x) => { let ts = x.0.timestamp() as u64; - let vs = tx.get_versionstamp_from_timestamp(ts, ns, db, true).await?; + let vs = tx.lock().await.get_versionstamp_from_timestamp(ts, ns, db, true).await?; match vs { Some(vs) => change::prefix_ts(ns, db, vs), None => { @@ -38,63 +39,49 @@ pub async fn read( } } }; + // Calculate the end of the changefeed range let end = change::suffix(ns, db); - - let limit = limit.unwrap_or(100); - - let scan = tx - .scan_paged( - ScanPage { - range: beg..end, - limit: Limit::Limited(limit), - }, - limit, - ) - .await?; - + // Limit the changefeed results with a default + let limit = limit.unwrap_or(100).min(1000); + // Create an empty buffer for the versionstamp let mut vs: Option<[u8; 10]> = None; + // Create an empty buffer for the table mutations let mut buf: Vec = Vec::new(); - - let mut r = Vec::::new(); + // Create an empty buffer for the final changesets + let mut res = Vec::::new(); // iterate over _x and put decoded elements to r - for (k, v) in scan.values { + for (k, v) in tx.scan(beg..end, limit).await? { #[cfg(debug_assertions)] - trace!("read change feed; {}", sprint_key(&k)); - + trace!("Reading change feed entry: {}", sprint(&k)); + // Decode the changefeed entry key let dec = crate::key::change::Cf::decode(&k).unwrap(); - - if let Some(tb) = tb { - if dec.tb != tb { - continue; - } + // Check the change is for the desired table + if tb.is_some_and(|tb| tb != dec.tb) { + continue; } - - let _tb = dec.tb; - let ts = dec.vs; - // Decode the byte array into a vector of operations let tb_muts: TableMutations = v.into(); - + // Get the timestamp of the changefeed entry match vs { Some(x) => { - if ts != x { + if dec.vs != x { let db_mut = DatabaseMutation(buf); - r.push(ChangeSet(x, db_mut)); + res.push(ChangeSet(x, db_mut)); buf = Vec::new(); - vs = Some(ts) + vs = Some(dec.vs) } } None => { - vs = Some(ts); + vs = Some(dec.vs); } } buf.push(tb_muts); } - + // Collect all mutations together if !buf.is_empty() { let db_mut = DatabaseMutation(buf); - r.push(ChangeSet(vs.unwrap(), db_mut)); + res.push(ChangeSet(vs.unwrap(), db_mut)); } - - Ok(r) + // Return the results + Ok(res) } diff --git a/core/src/cf/writer.rs b/core/src/cf/writer.rs index 2d5fc657..697c1491 100644 --- a/core/src/cf/writer.rs +++ b/core/src/cf/writer.rs @@ -153,7 +153,6 @@ mod tests { use crate::cf::{ChangeSet, DatabaseMutation, TableMutation, TableMutations}; use crate::dbs::Session; use crate::fflags::FFLAGS; - use crate::key::key_req::KeyRequirements; use crate::kvs::{Datastore, LockType::*, Transaction, TransactionType::*}; use crate::sql::changefeed::ChangeFeed; use crate::sql::id::Id; @@ -186,7 +185,7 @@ mod tests { // Write things to the table. // - let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap().inner(); let thing_a = Thing { tb: TB.to_owned(), id: Id::String("A".to_string()), @@ -205,7 +204,7 @@ mod tests { tx1.complete_changes(true).await.unwrap(); tx1.commit().await.unwrap(); - let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap().inner(); let thing_c = Thing { tb: TB.to_owned(), id: Id::String("C".to_string()), @@ -223,8 +222,7 @@ mod tests { tx2.complete_changes(true).await.unwrap(); tx2.commit().await.unwrap(); - let x = ds.transaction(Write, Optimistic).await; - let mut tx3 = x.unwrap(); + let mut tx3 = ds.transaction(Write, Optimistic).await.unwrap().inner(); let thing_b = Thing { tb: TB.to_owned(), id: Id::String("B".to_string()), @@ -262,11 +260,10 @@ mod tests { let start: u64 = 0; - let mut tx4 = ds.transaction(Write, Optimistic).await.unwrap(); - let r = - crate::cf::read(&mut tx4, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10)) - .await - .unwrap(); + let tx4 = ds.transaction(Write, Optimistic).await.unwrap(); + let r = crate::cf::read(&tx4, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10)) + .await + .unwrap(); tx4.commit().await.unwrap(); let want: Vec = vec![ @@ -338,18 +335,17 @@ mod tests { assert_eq!(r, want); - let mut tx5 = ds.transaction(Write, Optimistic).await.unwrap(); + let tx5 = ds.transaction(Write, Optimistic).await.unwrap(); // gc_all needs to be committed before we can read the changes - crate::cf::gc_db(&mut tx5, NS, DB, vs::u64_to_versionstamp(4), Some(10)).await.unwrap(); + crate::cf::gc_range(&tx5, NS, DB, vs::u64_to_versionstamp(4)).await.unwrap(); // We now commit tx5, which should persist the gc_all resullts tx5.commit().await.unwrap(); // Now we should see the gc_all results - let mut tx6 = ds.transaction(Write, Optimistic).await.unwrap(); - let r = - crate::cf::read(&mut tx6, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10)) - .await - .unwrap(); + let tx6 = ds.transaction(Write, Optimistic).await.unwrap(); + let r = crate::cf::read(&tx6, NS, DB, Some(TB), ShowSince::Versionstamp(start), Some(10)) + .await + .unwrap(); tx6.commit().await.unwrap(); let want: Vec = vec![ChangeSet( @@ -387,8 +383,8 @@ mod tests { // Now we should see the gc_all results ds.tick_at((ts.0.timestamp() + 5).try_into().unwrap()).await.unwrap(); - let mut tx7 = ds.transaction(Write, Optimistic).await.unwrap(); - let r = crate::cf::read(&mut tx7, NS, DB, Some(TB), ShowSince::Timestamp(ts), Some(10)) + let tx7 = ds.transaction(Write, Optimistic).await.unwrap(); + let r = crate::cf::read(&tx7, NS, DB, Some(TB), ShowSince::Timestamp(ts), Some(10)) .await .unwrap(); tx7.commit().await.unwrap(); @@ -406,7 +402,7 @@ mod tests { ) .await; ds.tick_at(10).await.unwrap(); - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); let vs1 = tx.get_versionstamp_from_timestamp(5, NS, DB, false).await.unwrap().unwrap(); let vs2 = tx.get_versionstamp_from_timestamp(10, NS, DB, false).await.unwrap().unwrap(); tx.cancel().await.unwrap(); @@ -511,18 +507,17 @@ mod tests { assert_eq!(r, expected); } - async fn change_feed_ts(mut tx: Transaction, ts: &Datetime) -> Vec { - let r = - crate::cf::read(&mut tx, NS, DB, Some(TB), ShowSince::Timestamp(ts.clone()), Some(10)) - .await - .unwrap(); + async fn change_feed_ts(tx: Transaction, ts: &Datetime) -> Vec { + let r = crate::cf::read(&tx, NS, DB, Some(TB), ShowSince::Timestamp(ts.clone()), Some(10)) + .await + .unwrap(); tx.cancel().await.unwrap(); r } - async fn change_feed_vs(mut tx: Transaction, vs: &Versionstamp) -> Vec { + async fn change_feed_vs(tx: Transaction, vs: &Versionstamp) -> Vec { let r = crate::cf::read( - &mut tx, + &tx, NS, DB, Some(TB), @@ -535,14 +530,14 @@ mod tests { r } - async fn record_change_feed_entry(mut tx: Transaction, id: String) -> Thing { + async fn record_change_feed_entry(tx: Transaction, id: String) -> Thing { let thing = Thing { tb: TB.to_owned(), id: Id::String(id), }; let value_a: Value = "a".into(); let previous = Cow::from(Value::None); - tx.record_change( + tx.lock().await.record_change( NS, DB, TB, @@ -551,7 +546,7 @@ mod tests { Cow::Borrowed(&value_a), DONT_STORE_PREVIOUS, ); - tx.complete_changes(true).await.unwrap(); + tx.lock().await.complete_changes(true).await.unwrap(); tx.commit().await.unwrap(); thing } @@ -585,14 +580,14 @@ mod tests { // work. // - let mut tx0 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); let ns_root = crate::key::root::ns::new(NS); - tx0.put(ns_root.key_category(), &ns_root, dns).await.unwrap(); + tx.put(&ns_root, dns).await.unwrap(); let db_root = crate::key::namespace::db::new(NS, DB); - tx0.put(db_root.key_category(), &db_root, ddb).await.unwrap(); + tx.put(&db_root, ddb).await.unwrap(); let tb_root = crate::key::database::tb::new(NS, DB, TB); - tx0.put(tb_root.key_category(), &tb_root, dtb.clone()).await.unwrap(); - tx0.commit().await.unwrap(); + tx.put(&tb_root, dtb.clone()).await.unwrap(); + tx.commit().await.unwrap(); ds } } diff --git a/core/src/cnf/mod.rs b/core/src/cnf/mod.rs index 8e5c3f9e..6200dc04 100644 --- a/core/src/cnf/mod.rs +++ b/core/src/cnf/mod.rs @@ -1,28 +1,5 @@ use once_cell::sync::Lazy; -#[cfg(not(target_arch = "wasm32"))] -#[allow(dead_code)] -/// Specifies how many concurrent jobs can be buffered in the worker channel. -pub const MAX_CONCURRENT_TASKS: usize = 64; - -/// Specifies how deep various forms of computation will go before the query fails -/// with [`crate::err::Error::ComputationDepthExceeded`]. -/// -/// For reference, use ~15 per MiB of stack in release mode. -/// -/// During query parsing, the total depth of calls to parse values (including arrays, expressions, -/// functions, objects, sub-queries), Javascript values, and geometry collections count against -/// this limit. -/// -/// During query execution, all potentially-recursive code paths count against this limit. Whereas -/// parsing assigns equal weight to each recursion, certain expensive code paths are allowed to -/// count for more than one unit of depth during execution. -pub static MAX_COMPUTATION_DEPTH: Lazy = - lazy_env_parse!("SURREAL_MAX_COMPUTATION_DEPTH", u32, 120); - -/// Specifies the names of parameters which can not be specified in a query. -pub const PROTECTED_PARAM_NAMES: &[&str] = &["access", "auth", "token", "session"]; - /// The characters which are supported in server record IDs. pub const ID_CHARS: [char; 36] = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', @@ -32,8 +9,31 @@ pub const ID_CHARS: [char; 36] = [ /// The publicly visible name of the server pub const SERVER_NAME: &str = "SurrealDB"; -/// Datastore processor batch size for scan operations -pub const PROCESSOR_BATCH_SIZE: u32 = 50; +/// Specifies the names of parameters which can not be specified in a query. +pub const PROTECTED_PARAM_NAMES: &[&str] = &["access", "auth", "token", "session"]; + +/// Specifies how many concurrent jobs can be buffered in the worker channel. +#[cfg(not(target_arch = "wasm32"))] +pub static MAX_CONCURRENT_TASKS: Lazy = + lazy_env_parse!("SURREAL_MAX_CONCURRENT_TASKS", usize, 64); + +/// Specifies how deep computation recursive call will go before en error is returned. +pub static MAX_COMPUTATION_DEPTH: Lazy = + lazy_env_parse!("SURREAL_MAX_COMPUTATION_DEPTH", u32, 120); + +/// Specifies the number of items which can be cached within a single transaction. +pub static TRANSACTION_CACHE_SIZE: Lazy = + lazy_env_parse!("SURREAL_TRANSACTION_CACHE_SIZE", usize, 10_000); + +/// The maximum number of keys that should be scanned at once in general queries. +pub static NORMAL_FETCH_SIZE: Lazy = lazy_env_parse!("SURREAL_NORMAL_FETCH_SIZE", u32, 50); + +/// The maximum number of keys that should be scanned at once for export queries. +pub static EXPORT_BATCH_SIZE: Lazy = lazy_env_parse!("SURREAL_EXPORT_BATCH_SIZE", u32, 1000); + +/// The maximum number of keys that should be fetched when streaming range scanns in a Scanner. +pub static MAX_STREAM_BATCH_SIZE: Lazy = + lazy_env_parse!("SURREAL_MAX_STREAM_BATCH_SIZE", u32, 1000); /// Forward all signup/signin query errors to a client performing record access. Do not use in production. pub static INSECURE_FORWARD_RECORD_ACCESS_ERRORS: Lazy = @@ -50,6 +50,3 @@ pub static INSECURE_FORWARD_RECORD_ACCESS_ERRORS: Lazy = /// If the environment variable is not present or cannot be parsed, a default value of 50,000 is used. pub static EXTERNAL_SORTING_BUFFER_LIMIT: Lazy = lazy_env_parse!("SURREAL_EXTERNAL_SORTING_BUFFER_LIMIT", usize, 50_000); - -/// The number of records that should be fetched and grouped together in an INSERT statement when exporting. -pub static EXPORT_BATCH_SIZE: Lazy = lazy_env_parse!("SURREAL_EXPORT_BATCH_SIZE", u32, 1000); diff --git a/core/src/ctx/context.rs b/core/src/ctx/context.rs index 48b8e2c9..d0dabf76 100644 --- a/core/src/ctx/context.rs +++ b/core/src/ctx/context.rs @@ -2,15 +2,14 @@ use crate::ctx::canceller::Canceller; use crate::ctx::reason::Reason; #[cfg(feature = "http")] use crate::dbs::capabilities::NetTarget; -use crate::dbs::{Capabilities, Notification, Transaction}; +use crate::dbs::{Capabilities, Notification}; use crate::err::Error; use crate::idx::planner::executor::QueryExecutor; use crate::idx::planner::{IterationStage, QueryPlanner}; use crate::idx::trees::store::IndexStores; -use crate::kvs; +use crate::kvs::Transaction; use crate::sql::value::Value; use channel::Sender; -use futures::lock::MutexLockFuture; use std::borrow::Cow; use std::collections::HashMap; use std::fmt::{self, Debug}; @@ -72,7 +71,7 @@ pub struct Context<'a> { // The temporary directory temporary_directory: Option>, // An optional transaction - transaction: Option, + transaction: Option>, } impl<'a> Default for Context<'a> { @@ -81,6 +80,12 @@ impl<'a> Default for Context<'a> { } } +impl<'a> From for Context<'a> { + fn from(txn: Transaction) -> Self { + Context::background().with_transaction(Arc::new(txn)) + } +} + impl<'a> Debug for Context<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Context") @@ -239,23 +244,19 @@ impl<'a> Context<'a> { self.iteration_stage = Some(is); } - pub(crate) fn set_transaction_mut(&mut self, txn: Transaction) { + pub(crate) fn set_transaction(&mut self, txn: Arc) { self.transaction = Some(txn); } - pub fn set_transaction(mut self, txn: Transaction) -> Self { + pub(crate) fn with_transaction(mut self, txn: Arc) -> Self { self.transaction = Some(txn); self } - pub fn get_transaction(&self) -> Option<&Transaction> { - self.transaction.as_ref() - } - - pub(crate) fn tx_lock(&self) -> MutexLockFuture<'_, kvs::Transaction> { + pub(crate) fn tx(&self) -> Arc { self.transaction .as_ref() - .map(|txn| txn.lock()) + .map(Arc::clone) .unwrap_or_else(|| unreachable!("The context was not associated with a transaction")) } diff --git a/core/src/dbs/executor.rs b/core/src/dbs/executor.rs index 3147c93b..f1a9fcbb 100644 --- a/core/src/dbs/executor.rs +++ b/core/src/dbs/executor.rs @@ -1,27 +1,13 @@ -use std::sync::Arc; - -use channel::Receiver; -use futures::lock::Mutex; -use futures::StreamExt; -use reblessive::TreeStack; -#[cfg(not(target_arch = "wasm32"))] -use tokio::spawn; -use tracing::instrument; -use trice::Instant; -#[cfg(target_arch = "wasm32")] -use wasm_bindgen_futures::spawn_local as spawn; - use crate::ctx::Context; use crate::dbs::response::Response; use crate::dbs::Force; use crate::dbs::Notification; use crate::dbs::Options; use crate::dbs::QueryType; -use crate::dbs::Transaction; use crate::err::Error; use crate::iam::Action; use crate::iam::ResourceKind; -use crate::kvs::lq_structs::TrackedResult; +use crate::kvs::Transaction; use crate::kvs::TransactionType; use crate::kvs::{Datastore, LockType::*, TransactionType::*}; use crate::sql::paths::DB; @@ -30,11 +16,21 @@ use crate::sql::query::Query; use crate::sql::statement::Statement; use crate::sql::value::Value; use crate::sql::Base; +use channel::Receiver; +use futures::StreamExt; +use reblessive::TreeStack; +use std::sync::Arc; +#[cfg(not(target_arch = "wasm32"))] +use tokio::spawn; +use tracing::instrument; +use trice::Instant; +#[cfg(target_arch = "wasm32")] +use wasm_bindgen_futures::spawn_local as spawn; pub(crate) struct Executor<'a> { err: bool, kvs: &'a Datastore, - txn: Option, + txn: Option>, } impl<'a> Executor<'a> { @@ -46,7 +42,7 @@ impl<'a> Executor<'a> { } } - fn txn(&self) -> Transaction { + fn txn(&self) -> Arc { self.txn.clone().expect("unreachable: txn was None after successful begin") } @@ -60,7 +56,7 @@ impl<'a> Executor<'a> { Some(_) => false, None => match self.kvs.transaction(write, Optimistic).await { Ok(v) => { - self.txn = Some(Arc::new(Mutex::new(v))); + self.txn = Some(Arc::new(v)); true } Err(_) => { @@ -81,37 +77,27 @@ impl<'a> Executor<'a> { if local { // Extract the transaction if let Some(txn) = self.txn.take() { + // Lock the transaction let mut txn = txn.lock().await; + // Check for any errors if self.err { - // Cancel and ignore any error because the error flag was - // already set let _ = txn.cancel().await; } else { - let r = match txn.complete_changes(false).await { - Ok(_) => { - match txn.commit().await { - Ok(()) => { - // Commit succeeded, do post commit operations that do not matter to the tx - let lqs: Vec = - txn.consume_pending_live_queries(); - // Track the live queries in the data store - self.kvs.handle_postprocessing_of_statements(&lqs).await?; - Ok(()) - } - Err(e) => Err(e), - } - } - r => r, - }; - if let Err(e) = r { - // Transaction failed to commit - // - // TODO: Not all commit errors definitively mean - // the transaction didn't commit. Detect that and tell - // the user. + // + if let Err(e) = txn.complete_changes(false).await { + // Rollback the transaction + let _ = txn.cancel().await; + // Return the error message self.err = true; return Err(e); } + if let Err(e) = txn.commit().await { + // Rollback the transaction + let _ = txn.cancel().await; + // Return the error message + self.err = true; + return Err(e); + }; } } } @@ -122,7 +108,6 @@ impl<'a> Executor<'a> { if local { // Extract the transaction if let Some(txn) = self.txn.take() { - let mut txn = txn.lock().await; if txn.cancel().await.is_err() { self.err = true; } @@ -168,7 +153,6 @@ impl<'a> Executor<'a> { /// Flush notifications from a buffer channel (live queries) to the committed notification channel. /// This is because we don't want to broadcast notifications to the user for failed transactions. - /// TODO we can delete this once we migrate to lq v2 async fn flush(&self, ctx: &Context<'_>, mut rcv: Receiver) { let sender = ctx.notifications(); spawn(async move { @@ -182,17 +166,6 @@ impl<'a> Executor<'a> { }); } - /// A transaction collects created live queries which can then be consumed when a transaction is committed - /// We use this function to get these transactions and send them to the invoker without channels - async fn consume_committed_live_query_registrations(&self) -> Option> { - if let Some(txn) = self.txn.as_ref() { - let txn = txn.lock().await; - Some(txn.consume_pending_live_queries()) - } else { - None - } - } - async fn set_ns(&self, ctx: &mut Context<'_>, opt: &mut Options, ns: &str) { let mut session = ctx.value("session").unwrap_or(&Value::None).clone(); session.put(NS.as_ref(), ns.to_owned().into()); @@ -213,10 +186,9 @@ impl<'a> Executor<'a> { mut ctx: Context<'_>, opt: Options, qry: Query, - ) -> Result<(Vec, Vec), Error> { + ) -> Result, Error> { // The stack to run the executor in. let mut stack = TreeStack::new(); - // Create a notification channel let (send, recv) = channel::unbounded(); // Set the notification channel @@ -225,7 +197,6 @@ impl<'a> Executor<'a> { let mut buf: Vec = vec![]; // Initialise array of responses let mut out: Vec = vec![]; - let mut live_queries: Vec = vec![]; // Do we fast-forward a transaction? // Set to true when we encounter a return statement in a transaction let mut ff_txn = false; @@ -293,9 +264,6 @@ impl<'a> Executor<'a> { let commit_error = self.commit(true).await.err(); buf = buf.into_iter().map(|v| self.buf_commit(v, &commit_error)).collect(); self.flush(&ctx, recv.clone()).await; - if let Some(lqs) = self.consume_committed_live_query_registrations().await { - live_queries.extend(lqs); - } out.append(&mut buf); debug_assert!(self.txn.is_none(), "commit(true) should have unset txn"); self.txn = None; @@ -322,7 +290,8 @@ impl<'a> Executor<'a> { true => Err(Error::TxFailure), // The transaction began successfully false => { - ctx.set_transaction_mut(self.txn()); + // ctx.set_transaction(txn) + ctx.set_transaction(self.txn()); // Check the statement match stack .enter(|stk| stm.compute(stk, &ctx, &opt, None)) @@ -347,12 +316,6 @@ impl<'a> Executor<'a> { Ok(_) => { // Flush live query notifications self.flush(&ctx, recv.clone()).await; - if let Some(lqs) = self - .consume_committed_live_query_registrations() - .await - { - live_queries.extend(lqs); - } Ok(Value::None) } } @@ -395,7 +358,7 @@ impl<'a> Executor<'a> { if let Err(err) = ctx.add_timeout(timeout) { Err(err) } else { - ctx.set_transaction_mut(self.txn()); + ctx.set_transaction(self.txn()); // Process the statement let res = stack .enter(|stk| stm.compute(stk, &ctx, &opt, None)) @@ -410,7 +373,7 @@ impl<'a> Executor<'a> { } // There is no timeout clause None => { - ctx.set_transaction_mut(self.txn()); + ctx.set_transaction(self.txn()); stack .enter(|stk| stm.compute(stk, &ctx, &opt, None)) .finish() @@ -445,11 +408,6 @@ impl<'a> Executor<'a> { } else { // Flush the live query change notifications self.flush(&ctx, recv.clone()).await; - if let Some(lqs) = - self.consume_committed_live_query_registrations().await - { - live_queries.extend(lqs); - } res } } else { @@ -475,18 +433,8 @@ impl<'a> Executor<'a> { e }), query_type: match (is_stm_live, is_stm_kill) { - (true, _) => { - if let Some(lqs) = self.consume_committed_live_query_registrations().await { - live_queries.extend(lqs); - } - QueryType::Live - } - (_, true) => { - if let Some(lqs) = self.consume_committed_live_query_registrations().await { - live_queries.extend(lqs); - } - QueryType::Kill - } + (true, _) => QueryType::Live, + (_, true) => QueryType::Kill, _ => QueryType::Other, }, }; @@ -502,7 +450,7 @@ impl<'a> Executor<'a> { } } // Return responses - Ok((out, live_queries)) + Ok(out) } } diff --git a/core/src/dbs/iterator.rs b/core/src/dbs/iterator.rs index b924824a..0eac19f4 100644 --- a/core/src/dbs/iterator.rs +++ b/core/src/dbs/iterator.rs @@ -529,7 +529,7 @@ impl Iterator { // Create a channel to shutdown let (end, exit) = channel::bounded::<()>(1); // Create an unbounded channel - let (chn, docs) = channel::bounded(crate::cnf::MAX_CONCURRENT_TASKS); + let (chn, docs) = channel::bounded(*crate::cnf::MAX_CONCURRENT_TASKS); // Create an async closure for prepared values let adocs = async { // Process all prepared values @@ -553,7 +553,7 @@ impl Iterator { drop(chn); }; // Create an unbounded channel - let (chn, vals) = channel::bounded(crate::cnf::MAX_CONCURRENT_TASKS); + let (chn, vals) = channel::bounded(*crate::cnf::MAX_CONCURRENT_TASKS); // Create an async closure for received values let avals = async { // Process all received values diff --git a/core/src/dbs/mod.rs b/core/src/dbs/mod.rs index 223a223d..25d74ac5 100644 --- a/core/src/dbs/mod.rs +++ b/core/src/dbs/mod.rs @@ -15,7 +15,6 @@ mod result; mod session; mod statement; mod store; -mod transaction; mod variables; pub mod capabilities; @@ -32,7 +31,6 @@ pub use self::session::*; pub(crate) use self::executor::*; pub(crate) use self::iterator::*; pub(crate) use self::statement::*; -pub(crate) use self::transaction::*; pub(crate) use self::variables::*; #[doc(hidden)] diff --git a/core/src/dbs/node.rs b/core/src/dbs/node.rs index f17b44cb..b05fd269 100644 --- a/core/src/dbs/node.rs +++ b/core/src/dbs/node.rs @@ -1,28 +1,117 @@ -use crate::err::Error; -use crate::err::Error::TimestampOverflow; -use crate::sql::Duration; -use derive::{Key, Store}; +use crate::sql::statements::info::InfoStructure; +use crate::sql::Value; +use derive::Store; use revision::revisioned; +use revision::Error; use serde::{Deserialize, Serialize}; +use std::fmt::{self, Display}; use std::ops::{Add, Sub}; +use std::time::Duration; +use uuid::Uuid; -// NOTE: This is not a statement, but as per layering, keeping it here till we -// have a better structure. -#[revisioned(revision = 1)] -#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, PartialOrd, Hash, Store)] +#[revisioned(revision = 2)] +#[derive(Clone, Debug, Default, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash, Store)] #[non_exhaustive] -pub struct ClusterMembership { +pub struct Node { + #[revision(start = 2, default_fn = "default_id")] + pub id: Uuid, + #[revision(start = 2, default_fn = "default_hb")] + pub hb: Timestamp, + #[revision(start = 2, default_fn = "default_gc")] + pub gc: bool, + #[revision(end = 2, convert_fn = "convert_name")] pub name: String, - // TiKV = TiKV TSO Timestamp as u64 - // not TiKV = local nanos as u64 + #[revision(end = 2, convert_fn = "convert_heartbeat")] pub heartbeat: Timestamp, } + +impl Node { + /// Create a new Node entry + pub fn new(id: Uuid, hb: Timestamp, gc: bool) -> Self { + Self { + id, + hb, + gc, + ..Default::default() + } + } + /// Mark this node as archived + pub fn archive(&self) -> Self { + Node { + gc: true, + ..self.to_owned() + } + } + /// Check if this node is active + pub fn id(&self) -> Uuid { + self.id + } + /// Check if this node is active + pub fn is_active(&self) -> bool { + !self.gc + } + /// Check if this node is archived + pub fn is_archived(&self) -> bool { + self.gc + } + // Return the node id if archived + pub fn archived(&self) -> Option { + match self.is_archived() { + true => Some(self.id), + false => None, + } + } + // Sets the default gc value for old nodes + fn default_id(_revision: u16) -> Uuid { + Uuid::default() + } + // Sets the default gc value for old nodes + fn default_hb(_revision: u16) -> Timestamp { + Timestamp::default() + } + // Sets the default gc value for old nodes + fn default_gc(_revision: u16) -> bool { + true + } + // Sets the default gc value for old nodes + fn convert_name(&mut self, _revision: u16, value: String) -> Result<(), Error> { + self.id = Uuid::parse_str(&value).unwrap(); + Ok(()) + } + // Sets the default gc value for old nodes + fn convert_heartbeat(&mut self, _revision: u16, value: Timestamp) -> Result<(), Error> { + self.hb = value; + Ok(()) + } +} + +impl Display for Node { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "NODE {} SEEN {}", self.id, self.hb)?; + match self.gc { + true => write!(f, " ARCHIVED")?, + false => write!(f, " ACTIVE")?, + }; + Ok(()) + } +} + +impl InfoStructure for Node { + fn structure(self) -> Value { + Value::from(map! { + "id".to_string() => Value::from(self.id), + "seen".to_string() => self.hb.structure(), + "active".to_string() => Value::from(!self.gc), + }) + } +} + // This struct is meant to represent a timestamp that can be used to partially order // events in a cluster. It should be derived from a timestamp oracle, such as the // one available in TiKV via the client `TimestampExt` implementation. #[revisioned(revision = 1)] #[derive( - Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize, Ord, PartialOrd, Hash, Store, Default, + Clone, Copy, Default, Debug, Eq, PartialEq, PartialOrd, Deserialize, Serialize, Hash, Store, )] #[non_exhaustive] pub struct Timestamp { @@ -30,62 +119,49 @@ pub struct Timestamp { } impl From for Timestamp { - fn from(ts: u64) -> Self { + fn from(value: u64) -> Self { Timestamp { - value: ts, + value, } } } -// This struct is to be used only when storing keys as the macro currently -// conflicts when you have Store and Key derive macros. -#[revisioned(revision = 1)] -#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, PartialOrd, Hash, Key)] -#[non_exhaustive] -pub struct KeyTimestamp { - pub value: u64, -} - -impl From<&Timestamp> for KeyTimestamp { - fn from(ts: &Timestamp) -> Self { - KeyTimestamp { - value: ts.value, - } - } -} - -impl Add<&Duration> for &Timestamp { +impl Add for Timestamp { type Output = Timestamp; - fn add(self, rhs: &Duration) -> Timestamp { + fn add(self, rhs: Duration) -> Self::Output { Timestamp { - value: self.value + rhs.as_millis() as u64, + value: self.value.wrapping_add(rhs.as_millis() as u64), } } } -impl Sub<&Duration> for &Timestamp { - type Output = Result; - fn sub(self, rhs: &Duration) -> Self::Output { - let millis = rhs.as_millis() as u64; - if self.value <= millis { - // Removing the duration from this timestamp will cause it to overflow - return Err(TimestampOverflow(format!( - "Failed to subtract {} from {}", - &millis, &self.value - ))); +impl Sub for Timestamp { + type Output = Timestamp; + fn sub(self, rhs: Duration) -> Self::Output { + Timestamp { + value: self.value.wrapping_sub(rhs.as_millis() as u64), } - Ok(Timestamp { - value: self.value - millis, - }) + } +} + +impl Display for Timestamp { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.value) + } +} + +impl InfoStructure for Timestamp { + fn structure(self) -> Value { + self.value.into() } } #[cfg(test)] mod test { use crate::dbs::node::Timestamp; - use crate::sql::Duration; use chrono::prelude::Utc; use chrono::TimeZone; + use std::time::Duration; #[test] fn timestamps_can_be_added_duration() { @@ -94,10 +170,10 @@ mod test { value: t.timestamp_millis() as u64, }; - let hour = Duration(core::time::Duration::from_secs(60 * 60)); - let ts = &ts + &hour; - let ts = &ts + &hour; - let ts = &ts + &hour; + let hour = Duration::from_secs(60 * 60); + let ts = ts + hour; + let ts = ts + hour; + let ts = ts + hour; let end_time = Utc.timestamp_millis_opt(ts.value as i64).unwrap(); let expected_end_time = Utc.with_ymd_and_hms(2000, 1, 1, 15, 30, 0).unwrap(); @@ -111,10 +187,10 @@ mod test { value: t.timestamp_millis() as u64, }; - let hour = Duration(core::time::Duration::from_secs(60 * 60)); - let ts = (&ts - &hour).unwrap(); - let ts = (&ts - &hour).unwrap(); - let ts = (&ts - &hour).unwrap(); + let hour = Duration::from_secs(60 * 60); + let ts = ts - hour; + let ts = ts - hour; + let ts = ts - hour; let end_time = Utc.timestamp_millis_opt(ts.value as i64).unwrap(); let expected_end_time = Utc.with_ymd_and_hms(2000, 1, 1, 9, 30, 0).unwrap(); diff --git a/core/src/dbs/options.rs b/core/src/dbs/options.rs index 26960933..6014241b 100644 --- a/core/src/dbs/options.rs +++ b/core/src/dbs/options.rs @@ -57,16 +57,6 @@ pub enum Force { Index(Arc<[DefineIndexStatement]>), } -impl Force { - pub fn is_none(&self) -> bool { - matches!(self, Force::None) - } - - pub fn is_forced(&self) -> bool { - !matches!(self, Force::None) - } -} - impl Default for Options { fn default() -> Self { Options::new() @@ -111,8 +101,9 @@ impl Options { // -------------------------------------------------- /// Set all the required options from a single point. - /// The system expects these values to always be set, so this should be called for all - /// instances when there is doubt. + /// The system expects these values to always be set, + /// so this should be called for all instances when + /// there is doubt. pub fn with_required( mut self, node_id: Uuid, @@ -334,21 +325,25 @@ impl Options { // -------------------------------------------------- /// Get current Node ID + #[inline(always)] pub fn id(&self) -> Result { - self.id.ok_or(Error::Unreachable("Options::id")) + self.id.ok_or(Error::Unreachable("No Node ID is specified")) } /// Get currently selected NS + #[inline(always)] pub fn ns(&self) -> Result<&str, Error> { self.ns.as_ref().map(AsRef::as_ref).ok_or(Error::NsEmpty) } /// Get currently selected DB + #[inline(always)] pub fn db(&self) -> Result<&str, Error> { self.db.as_ref().map(AsRef::as_ref).ok_or(Error::DbEmpty) } /// Check whether this request supports realtime queries + #[inline(always)] pub fn realtime(&self) -> Result<(), Error> { if !self.live { return Err(Error::RealtimeDisabled); @@ -357,6 +352,7 @@ impl Options { } // Validate Options for Namespace + #[inline(always)] pub fn valid_for_ns(&self) -> Result<(), Error> { if self.ns.is_none() { return Err(Error::NsEmpty); @@ -365,9 +361,11 @@ impl Options { } // Validate Options for Database + #[inline(always)] pub fn valid_for_db(&self) -> Result<(), Error> { - self.valid_for_ns()?; - + if self.ns.is_none() { + return Err(Error::NsEmpty); + } if self.db.is_none() { return Err(Error::DbEmpty); } diff --git a/core/src/dbs/processor.rs b/core/src/dbs/processor.rs index 246e5e08..003c4df2 100644 --- a/core/src/dbs/processor.rs +++ b/core/src/dbs/processor.rs @@ -1,4 +1,4 @@ -use crate::cnf::PROCESSOR_BATCH_SIZE; +use crate::cnf::NORMAL_FETCH_SIZE; use crate::ctx::Context; #[cfg(not(target_arch = "wasm32"))] use crate::dbs::distinct::AsyncDistinct; @@ -8,12 +8,12 @@ use crate::err::Error; use crate::idx::planner::iterators::{CollectorRecord, IteratorRef, ThingIterator}; use crate::idx::planner::IterationStage; use crate::key::{graph, thing}; -use crate::kvs; -use crate::kvs::ScanPage; +use crate::kvs::Transaction; use crate::sql::dir::Dir; use crate::sql::{Edges, Range, Table, Thing, Value}; #[cfg(not(target_arch = "wasm32"))] use channel::Sender; +use futures::StreamExt; use reblessive::tree::Stk; use std::ops::Bound; use std::vec; @@ -150,10 +150,10 @@ impl<'a> Processor<'a> { self.process_index(stk, ctx, opt, stm, &t, irf).await? } Iterable::Mergeable(v, o) => { - self.process_mergeable(stk, ctx, opt, stm, v, o).await? + self.process_mergeable(stk, ctx, opt, stm, (v, o)).await? } Iterable::Relatable(f, v, w, o) => { - self.process_relatable(stk, ctx, opt, stm, f, v, w, o).await? + self.process_relatable(stk, ctx, opt, stm, (f, v, w, o)).await? } } } @@ -178,6 +178,27 @@ impl<'a> Processor<'a> { self.process(stk, ctx, opt, stm, pro).await } + async fn process_defer( + &mut self, + stk: &mut Stk, + ctx: &Context<'_>, + opt: &Options, + stm: &Statement<'_>, + v: Thing, + ) -> Result<(), Error> { + // Check that the table exists + ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?; + // Process the document record + let pro = Processed { + rid: Some(v), + ir: None, + val: Operable::Value(Value::None), + }; + self.process(stk, ctx, opt, stm, pro).await?; + // Everything ok + Ok(()) + } + async fn process_thing( &mut self, stk: &mut Stk, @@ -187,10 +208,10 @@ impl<'a> Processor<'a> { v: Thing, ) -> Result<(), Error> { // Check that the table exists - ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?; + ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?; // Fetch the data from the store let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id); - let val = ctx.tx_lock().await.get(key).await?; + let val = ctx.tx().get(key).await?; // Parse the data from the store let val = Operable::Value(match val { Some(v) => Value::from(v), @@ -207,41 +228,19 @@ impl<'a> Processor<'a> { Ok(()) } - async fn process_defer( - &mut self, - stk: &mut Stk, - ctx: &Context<'_>, - opt: &Options, - stm: &Statement<'_>, - v: Thing, - ) -> Result<(), Error> { - // Check that the table exists - ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?; - // Process the document record - let pro = Processed { - rid: Some(v), - ir: None, - val: Operable::Value(Value::None), - }; - self.process(stk, ctx, opt, stm, pro).await?; - // Everything ok - Ok(()) - } - async fn process_mergeable( &mut self, stk: &mut Stk, ctx: &Context<'_>, opt: &Options, stm: &Statement<'_>, - v: Thing, - o: Value, + (v, o): (Thing, Value), ) -> Result<(), Error> { // Check that the table exists - ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?; + ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?; // Fetch the data from the store let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id); - let val = ctx.tx_lock().await.get(key).await?; + let val = ctx.tx().get(key).await?; // Parse the data from the store let x = match val { Some(v) => Value::from(v), @@ -260,23 +259,19 @@ impl<'a> Processor<'a> { Ok(()) } - #[allow(clippy::too_many_arguments)] async fn process_relatable( &mut self, stk: &mut Stk, ctx: &Context<'_>, opt: &Options, stm: &Statement<'_>, - f: Thing, - v: Thing, - w: Thing, - o: Option, + (f, v, w, o): (Thing, Thing, Thing, Option), ) -> Result<(), Error> { // Check that the table exists - ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?; + ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?; // Fetch the data from the store let key = thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id); - let val = ctx.tx_lock().await.get(key).await?; + let val = ctx.tx().get(key).await?; // Parse the data from the store let x = match val { Some(v) => Value::from(v), @@ -303,47 +298,35 @@ impl<'a> Processor<'a> { stm: &Statement<'_>, v: &Table, ) -> Result<(), Error> { + // Get the transaction + let txn = ctx.tx(); // Check that the table exists - ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, v, opt.strict).await?; + txn.check_ns_db_tb(opt.ns()?, opt.db()?, v, opt.strict).await?; // Prepare the start and end keys let beg = thing::prefix(opt.ns()?, opt.db()?, v); let end = thing::suffix(opt.ns()?, opt.db()?, v); - // Loop until no more keys - let mut next_page = Some(ScanPage::from(beg..end)); - while let Some(page) = next_page { + // Create a new iterable range + let mut stream = txn.stream(beg..end); + // Loop until no more entries + while let Some(res) = stream.next().await { // Check if the context is finished if ctx.is_done() { break; } - // Get the next batch of key-value entries - let res = ctx.tx_lock().await.scan_paged(page, PROCESSOR_BATCH_SIZE).await?; - next_page = res.next_page; - let res = res.values; - // If no results then break - if res.is_empty() { - break; - } - // Loop over results - for (k, v) in res.into_iter() { - // Check the context - if ctx.is_done() { - break; - } - // Parse the data from the store - let key: thing::Thing = (&k).into(); - let val: Value = (&v).into(); - let rid = Thing::from((key.tb, key.id)); - // Create a new operable value - let val = Operable::Value(val); - // Process the record - let pro = Processed { - rid: Some(rid), - ir: None, - val, - }; - self.process(stk, ctx, opt, stm, pro).await?; - } - continue; + // Parse the data from the store + let (k, v) = res?; + let key: thing::Thing = (&k).into(); + let val: Value = (&v).into(); + let rid = Thing::from((key.tb, key.id)); + // Create a new operable value + let val = Operable::Value(val); + // Process the record + let pro = Processed { + rid: Some(rid), + ir: None, + val, + }; + self.process(stk, ctx, opt, stm, pro).await?; } // Everything ok Ok(()) @@ -357,8 +340,10 @@ impl<'a> Processor<'a> { stm: &Statement<'_>, v: Range, ) -> Result<(), Error> { + // Get the transaction + let txn = ctx.tx(); // Check that the table exists - ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?; + txn.check_ns_db_tb(opt.ns()?, opt.db()?, &v.tb, opt.strict).await?; // Prepare the range start key let beg = match &v.beg { Bound::Unbounded => thing::prefix(opt.ns()?, opt.db()?, &v.tb), @@ -379,42 +364,28 @@ impl<'a> Processor<'a> { key } }; - // Loop until no more keys - let mut next_page = Some(ScanPage::from(beg..end)); - while let Some(page) = next_page { + // Create a new iterable range + let mut stream = txn.stream(beg..end); + // Loop until no more entries + while let Some(res) = stream.next().await { // Check if the context is finished if ctx.is_done() { break; } - let res = ctx.tx_lock().await.scan_paged(page, PROCESSOR_BATCH_SIZE).await?; - next_page = res.next_page; - // Get the next batch of key-value entries - let res = res.values; - // If there are key-value entries then fetch them - if res.is_empty() { - break; - } - // Loop over results - for (k, v) in res.into_iter() { - // Check the context - if ctx.is_done() { - break; - } - // Parse the data from the store - let key: thing::Thing = (&k).into(); - let val: Value = (&v).into(); - let rid = Thing::from((key.tb, key.id)); - // Create a new operable value - let val = Operable::Value(val); - // Process the record - let pro = Processed { - rid: Some(rid), - ir: None, - val, - }; - self.process(stk, ctx, opt, stm, pro).await?; - } - continue; + // Parse the data from the store + let (k, v) = res?; + let key: thing::Thing = (&k).into(); + let val: Value = (&v).into(); + let rid = Thing::from((key.tb, key.id)); + // Create a new operable value + let val = Operable::Value(val); + // Process the record + let pro = Processed { + rid: Some(rid), + ir: None, + val, + }; + self.process(stk, ctx, opt, stm, pro).await?; } // Everything ok Ok(()) @@ -496,49 +467,40 @@ impl<'a> Processor<'a> { .collect::>(), }, }; - // - for (beg, end) in keys.iter() { - // Loop until no more keys - let mut next_page = Some(ScanPage::from(beg.clone()..end.clone())); - while let Some(page) = next_page { + // Get the transaction + let txn = ctx.tx(); + // Check that the table exists + txn.check_ns_db_tb(opt.ns()?, opt.db()?, tb, opt.strict).await?; + // Loop over the chosen edge types + for (beg, end) in keys.into_iter() { + // Create a new iterable range + let mut stream = txn.stream(beg..end); + // Loop until no more entries + while let Some(res) = stream.next().await { // Check if the context is finished if ctx.is_done() { break; } - // Get the next batch key-value entries - let res = ctx.tx_lock().await.scan_paged(page, PROCESSOR_BATCH_SIZE).await?; - next_page = res.next_page; - let res = res.values; - // If there are key-value entries then fetch them - if res.is_empty() { - break; - } - // Loop over results - for (k, _) in res.into_iter() { - // Check the context - if ctx.is_done() { - break; - } - // Parse the data from the store - let gra: graph::Graph = graph::Graph::decode(&k)?; - // Fetch the data from the store - let key = thing::new(opt.ns()?, opt.db()?, gra.ft, &gra.fk); - let val = ctx.tx_lock().await.get(key).await?; - let rid = Thing::from((gra.ft, gra.fk)); - // Parse the data from the store - let val = Operable::Value(match val { - Some(v) => Value::from(v), - None => Value::None, - }); - // Process the record - let pro = Processed { - rid: Some(rid), - ir: None, - val, - }; - self.process(stk, ctx, opt, stm, pro).await?; - } - continue; + // Parse the key from the result + let key = res?.0; + // Parse the data from the store + let gra: graph::Graph = graph::Graph::decode(&key)?; + // Fetch the data from the store + let key = thing::new(opt.ns()?, opt.db()?, gra.ft, &gra.fk); + let val = txn.get(key).await?; + let rid = Thing::from((gra.ft, gra.fk)); + // Parse the data from the store + let val = Operable::Value(match val { + Some(v) => Value::from(v), + None => Value::None, + }); + // Process the record + let pro = Processed { + rid: Some(rid), + ir: None, + val, + }; + self.process(stk, ctx, opt, stm, pro).await?; } } // Everything ok @@ -555,7 +517,7 @@ impl<'a> Processor<'a> { irf: IteratorRef, ) -> Result<(), Error> { // Check that the table exists - ctx.tx_lock().await.check_ns_db_tb(opt.ns()?, opt.db()?, &table.0, opt.strict).await?; + ctx.tx().check_ns_db_tb(opt.ns()?, opt.db()?, &table.0, opt.strict).await?; if let Some(exe) = ctx.get_query_executor() { if let Some(mut iterator) = exe.new_iterator(opt, irf).await? { // Get the first batch @@ -592,9 +554,9 @@ impl<'a> Processor<'a> { opt: &Options, iterator: &mut ThingIterator, ) -> Result, Error> { - let mut tx = ctx.tx_lock().await; + let txn = ctx.tx(); let records: Vec = - iterator.next_batch(ctx, &mut tx, PROCESSOR_BATCH_SIZE).await?; + iterator.next_batch(ctx, &txn, *NORMAL_FETCH_SIZE).await?; let mut to_process = Vec::with_capacity(records.len()); for r in records { let v = if let Some(v) = r.2 { @@ -602,7 +564,7 @@ impl<'a> Processor<'a> { v } else { // Otherwise we have to fetch the record - Iterable::fetch_thing(&mut tx, opt, &r.0).await? + Iterable::fetch_thing(&txn, opt, &r.0).await? }; let p = Processed { rid: Some(r.0), @@ -618,14 +580,14 @@ impl<'a> Processor<'a> { impl Iterable { /// Returns the value from the store, or Value::None it the value does not exist. pub(crate) async fn fetch_thing( - tx: &mut kvs::Transaction, + txn: &Transaction, opt: &Options, thg: &Thing, ) -> Result { // Fetch the data from the store let key = thing::new(opt.ns()?, opt.db()?, &thg.tb, &thg.id); // Fetch and parse the data from the store - let val = tx.get(key).await?.map(Value::from).unwrap_or(Value::None); + let val = txn.get(key).await?.map(Value::from).unwrap_or(Value::None); // Return the result Ok(val) } diff --git a/core/src/dbs/test.rs b/core/src/dbs/test.rs index 40203e0a..ab860a01 100644 --- a/core/src/dbs/test.rs +++ b/core/src/dbs/test.rs @@ -8,7 +8,8 @@ use std::sync::Arc; pub async fn mock<'a>() -> (Context<'a>, Options) { let opt = Options::default().with_auth(Arc::new(Auth::for_root(Role::Owner))); let kvs = Datastore::new("memory").await.unwrap(); - let txn = kvs.transaction(Write, Optimistic).await.unwrap().rollback_and_ignore().enclose(); - let ctx = Context::default().set_transaction(txn); + let txn = kvs.transaction(Write, Optimistic).await.unwrap(); + let txn = txn.rollback_and_ignore().await.enclose(); + let ctx = Context::default().with_transaction(txn); (ctx, opt) } diff --git a/core/src/dbs/transaction.rs b/core/src/dbs/transaction.rs deleted file mode 100644 index 0839359b..00000000 --- a/core/src/dbs/transaction.rs +++ /dev/null @@ -1,5 +0,0 @@ -use crate::kvs; -use futures::lock::Mutex; -use std::sync::Arc; - -pub(crate) type Transaction = Arc>; diff --git a/core/src/doc/changefeeds.rs b/core/src/doc/changefeeds.rs index 94400669..80404664 100644 --- a/core/src/doc/changefeeds.rs +++ b/core/src/doc/changefeeds.rs @@ -15,23 +15,20 @@ impl<'a> Document<'a> { if !self.changed() { return Ok(()); } - // + // Get the table let tb = self.tb(ctx, opt).await?; - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); // Get the database and the table for the record - let db = run.add_and_cache_db(opt.ns()?, opt.db()?, opt.strict).await?; + let db = txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?; // Check if changefeeds are enabled if let Some(cf) = db.as_ref().changefeed.as_ref().or(tb.as_ref().changefeed.as_ref()) { - // Get the arguments - let tb = tb.name.as_str(); - let id = self.id.as_ref().unwrap(); // Create the changefeed entry - run.record_change( + txn.lock().await.record_change( opt.ns()?, opt.db()?, - tb, - id, + tb.name.as_str(), + self.id.unwrap(), self.initial.doc.clone(), self.current.doc.clone(), cf.store_diff, diff --git a/core/src/doc/compute.rs b/core/src/doc/compute.rs index 3bada501..a4ae05aa 100644 --- a/core/src/doc/compute.rs +++ b/core/src/doc/compute.rs @@ -52,7 +52,7 @@ impl<'a> Document<'a> { Err(Error::RetryWithId(v)) => { // Fetch the data from the store let key = crate::key::thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id); - let val = ctx.tx_lock().await.get(key).await?; + let val = ctx.tx().get(key).await?; // Parse the data from the store let val = match val { Some(v) => Value::from(v), diff --git a/core/src/doc/document.rs b/core/src/doc/document.rs index 46c496e8..a9f009b7 100644 --- a/core/src/doc/document.rs +++ b/core/src/doc/document.rs @@ -93,25 +93,6 @@ impl<'a> Document<'a> { } } - /// Create a new document that is not going through the standard lifecycle of documents - /// - /// This allows for it to be crafted without needing statements to operate on it - #[doc(hidden)] - pub fn new_artificial( - id: Option<&'a Thing>, - ir: Option<&'a IteratorRecord>, - val: Cow<'a, Value>, - initial: Cow<'a, Value>, - extras: Workable, - ) -> Self { - Document { - id, - extras, - current: CursorDoc::new(id, ir, val), - initial: CursorDoc::new(id, ir, initial), - } - } - /// Get the current document, as it is being modified #[allow(unused)] pub(crate) fn current_doc(&self) -> &Value { @@ -136,23 +117,18 @@ impl<'a> Document<'a> { self.initial.doc.is_none() && self.current.doc.is_some() } - /// Check if document is being deleted - pub fn is_delete(&self) -> bool { - self.current.doc.is_none() - } - /// Get the table for this document pub async fn tb( &self, ctx: &Context<'a>, opt: &Options, ) -> Result, Error> { - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get transaction + let txn = ctx.tx(); // Get the record id let rid = self.id.as_ref().unwrap(); // Get the table definition - let tb = run.get_and_cache_tb(opt.ns()?, opt.db()?, &rid.tb).await; + let tb = txn.get_tb(opt.ns()?, opt.db()?, &rid.tb).await; // Return the table or attempt to define it match tb { // The table doesn't exist @@ -162,9 +138,7 @@ impl<'a> Document<'a> { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Table, &Base::Db)?; // We can create the table automatically - run.add_and_cache_ns(opt.ns()?, opt.strict).await?; - run.add_and_cache_db(opt.ns()?, opt.db()?, opt.strict).await?; - run.add_and_cache_tb(opt.ns()?, opt.db()?, &rid.tb, opt.strict).await + txn.ensure_ns_db_tb(opt.ns()?, opt.db()?, &rid.tb, opt.strict).await } // There was an error Err(err) => Err(err), @@ -181,7 +155,7 @@ impl<'a> Document<'a> { // Get the record id let id = self.id.as_ref().unwrap(); // Get the table definitions - ctx.tx_lock().await.all_tb_views(opt.ns()?, opt.db()?, &id.tb).await + ctx.tx().all_tb_views(opt.ns()?, opt.db()?, &id.tb).await } /// Get the events for this document pub async fn ev( @@ -192,7 +166,7 @@ impl<'a> Document<'a> { // Get the record id let id = self.id.as_ref().unwrap(); // Get the event definitions - ctx.tx_lock().await.all_tb_events(opt.ns()?, opt.db()?, &id.tb).await + ctx.tx().all_tb_events(opt.ns()?, opt.db()?, &id.tb).await } /// Get the fields for this document pub async fn fd( @@ -203,7 +177,7 @@ impl<'a> Document<'a> { // Get the record id let id = self.id.as_ref().unwrap(); // Get the field definitions - ctx.tx_lock().await.all_tb_fields(opt.ns()?, opt.db()?, &id.tb).await + ctx.tx().all_tb_fields(opt.ns()?, opt.db()?, &id.tb).await } /// Get the indexes for this document pub async fn ix( @@ -214,7 +188,7 @@ impl<'a> Document<'a> { // Get the record id let id = self.id.as_ref().unwrap(); // Get the index definitions - ctx.tx_lock().await.all_tb_indexes(opt.ns()?, opt.db()?, &id.tb).await + ctx.tx().all_tb_indexes(opt.ns()?, opt.db()?, &id.tb).await } // Get the lives for this document pub async fn lv( @@ -225,6 +199,6 @@ impl<'a> Document<'a> { // Get the record id let id = self.id.as_ref().unwrap(); // Get the table definition - ctx.tx_lock().await.all_tb_lives(opt.ns()?, opt.db()?, &id.tb).await + ctx.tx().all_tb_lives(opt.ns()?, opt.db()?, &id.tb).await } } diff --git a/core/src/doc/edges.rs b/core/src/doc/edges.rs index 48ce9b2a..ce234188 100644 --- a/core/src/doc/edges.rs +++ b/core/src/doc/edges.rs @@ -21,8 +21,10 @@ impl<'a> Document<'a> { if self.tb(ctx, opt).await?.drop { return Ok(()); } - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); + // Lock the transaction + let mut txn = txn.lock().await; // Get the record id let rid = self.id.as_ref().unwrap(); // Store the record edges @@ -31,16 +33,16 @@ impl<'a> Document<'a> { let (ref o, ref i) = (Dir::Out, Dir::In); // Store the left pointer edge let key = crate::key::graph::new(opt.ns()?, opt.db()?, &l.tb, &l.id, o, rid); - run.set(key, vec![]).await?; + txn.set(key, vec![]).await?; // Store the left inner edge let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, i, l); - run.set(key, vec![]).await?; + txn.set(key, vec![]).await?; // Store the right inner edge let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, o, r); - run.set(key, vec![]).await?; + txn.set(key, vec![]).await?; // Store the right pointer edge let key = crate::key::graph::new(opt.ns()?, opt.db()?, &r.tb, &r.id, i, rid); - run.set(key, vec![]).await?; + txn.set(key, vec![]).await?; // Store the edges on the record self.current.doc.to_mut().put(&*EDGE, Value::Bool(true)); self.current.doc.to_mut().put(&*IN, l.clone().into()); diff --git a/core/src/doc/index.rs b/core/src/doc/index.rs index 8724fc95..157b7d30 100644 --- a/core/src/doc/index.rs +++ b/core/src/doc/index.rs @@ -280,13 +280,16 @@ impl<'a> IndexOperation<'a> { } async fn index_unique(&mut self, ctx: &Context<'_>) -> Result<(), Error> { - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); + // Lock the transaction + let mut txn = txn.lock().await; // Delete the old index data if let Some(o) = self.o.take() { let i = Indexable::new(o, self.ix); for o in i { let key = self.get_unique_index_key(&o)?; - match run.delc(key, Some(self.rid)).await { + match txn.delc(key, Some(self.rid)).await { Err(Error::TxConditionNotMet) => Ok(()), Err(e) => Err(e), Ok(v) => Ok(v), @@ -299,9 +302,9 @@ impl<'a> IndexOperation<'a> { for n in i { if !n.is_all_none_or_null() { let key = self.get_unique_index_key(&n)?; - if run.putc(key, self.rid, None).await.is_err() { + if txn.putc(key, self.rid, None).await.is_err() { let key = self.get_unique_index_key(&n)?; - let val = run.get(key).await?.unwrap(); + let val = txn.get(key).await?.unwrap(); let rid: Thing = val.into(); return self.err_index_exists(rid, n); } @@ -312,13 +315,16 @@ impl<'a> IndexOperation<'a> { } async fn index_non_unique(&mut self, ctx: &Context<'_>) -> Result<(), Error> { - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); + // Lock the transaction + let mut txn = txn.lock().await; // Delete the old index data if let Some(o) = self.o.take() { let i = Indexable::new(o, self.ix); for o in i { let key = self.get_non_unique_index_key(&o)?; - match run.delc(key, Some(self.rid)).await { + match txn.delc(key, Some(self.rid)).await { Err(Error::TxConditionNotMet) => Ok(()), Err(e) => Err(e), Ok(v) => Ok(v), @@ -330,9 +336,9 @@ impl<'a> IndexOperation<'a> { let i = Indexable::new(n, self.ix); for n in i { let key = self.get_non_unique_index_key(&n)?; - if run.putc(key, self.rid, None).await.is_err() { + if txn.putc(key, self.rid, None).await.is_err() { let key = self.get_non_unique_index_key(&n)?; - let val = run.get(key).await?.unwrap(); + let val = txn.get(key).await?.unwrap(); let rid: Thing = val.into(); return self.err_index_exists(rid, n); } @@ -376,20 +382,19 @@ impl<'a> IndexOperation<'a> { ctx: &Context<'_>, p: &MTreeParams, ) -> Result<(), Error> { - let mut tx = ctx.tx_lock().await; + let txn = ctx.tx(); let ikb = IndexKeyBase::new(self.opt.ns()?, self.opt.db()?, self.ix)?; let mut mt = - MTreeIndex::new(ctx.get_index_stores(), &mut tx, ikb, p, TransactionType::Write) - .await?; + MTreeIndex::new(ctx.get_index_stores(), &txn, ikb, p, TransactionType::Write).await?; // Delete the old index data if let Some(o) = self.o.take() { - mt.remove_document(stk, &mut tx, self.rid, &o).await?; + mt.remove_document(stk, &txn, self.rid, &o).await?; } // Create the new index data if let Some(n) = self.n.take() { - mt.index_document(stk, &mut tx, self.rid, &n).await?; + mt.index_document(stk, &txn, self.rid, &n).await?; } - mt.finish(&mut tx).await + mt.finish(&txn).await } async fn index_hnsw(&mut self, ctx: &Context<'_>, p: &HnswParams) -> Result<(), Error> { diff --git a/core/src/doc/lives.rs b/core/src/doc/lives.rs index 435a17ad..a5f6ad4f 100644 --- a/core/src/doc/lives.rs +++ b/core/src/doc/lives.rs @@ -6,19 +6,15 @@ use crate::dbs::Statement; use crate::doc::CursorDoc; use crate::doc::Document; use crate::err::Error; -use crate::fflags::FFLAGS; use crate::sql::paths::AC; use crate::sql::paths::META; use crate::sql::paths::RD; use crate::sql::paths::TK; use crate::sql::permission::Permission; -use crate::sql::statements::LiveStatement; use crate::sql::Value; -use channel::Sender; use reblessive::tree::Stk; use std::ops::Deref; use std::sync::Arc; -use uuid::Uuid; impl<'a> Document<'a> { pub async fn lives( @@ -28,27 +24,145 @@ impl<'a> Document<'a> { opt: &Options, stm: &Statement<'_>, ) -> Result<(), Error> { + // Check import + if opt.import { + return Ok(()); + } // Check if changed if !self.changed() { return Ok(()); } - // Under the new mechanism, live query notifications only come from polling the change feed - // This check can be moved up the call stack, as this entire method will become unnecessary - if FFLAGS.change_feed_live_queries.enabled() { - return Ok(()); - } // Check if we can send notifications if let Some(chn) = &opt.sender { + // Get all live queries for this table + let lvs = self.lv(ctx, opt).await?; // Loop through all index statements - let lq_stms = self.lv(ctx, opt).await?; - let borrows = lq_stms.iter().collect::>(); - self.check_lqs_and_send_notifications(stk, ctx, opt, stm, borrows.as_slice(), chn) - .await?; + for lv in lvs.iter() { + // Create a new statement + let lq = Statement::from(lv); + // Get the event action + let met = if stm.is_delete() { + Value::from("DELETE") + } else if self.is_new() { + Value::from("CREATE") + } else { + Value::from("UPDATE") + }; + // Check if this is a delete statement + let doc = match stm.is_delete() { + true => &self.initial, + false => &self.current, + }; + // Ensure that a session exists on the LIVE query + let sess = match lv.session.as_ref() { + Some(v) => v, + None => continue, + }; + // Ensure that auth info exists on the LIVE query + let auth = match lv.auth.clone() { + Some(v) => v, + None => continue, + }; + // We need to create a new context which we will + // use for processing this LIVE query statement. + // This ensures that we are using the session + // of the user who created the LIVE query. + let mut lqctx = Context::background(); + // Set the current transaction on the new LIVE + // query context to prevent unreachable behaviour + // and ensure that queries can be executed. + lqctx.set_transaction(ctx.tx()); + // Add the session params to this LIVE query, so + // that queries can use these within field + // projections and WHERE clauses. + lqctx.add_value("access", sess.pick(AC.as_ref())); + lqctx.add_value("auth", sess.pick(RD.as_ref())); + lqctx.add_value("token", sess.pick(TK.as_ref())); + lqctx.add_value("session", sess); + // Add $before, $after, $value, and $event params + // to this LIVE query so the user can use these + // within field projections and WHERE clauses. + lqctx.add_value("event", met); + lqctx.add_value("value", self.current.doc.deref()); + lqctx.add_value("after", self.current.doc.deref()); + lqctx.add_value("before", self.initial.doc.deref()); + // We need to create a new options which we will + // use for processing this LIVE query statement. + // This ensures that we are using the auth data + // of the user who created the LIVE query. + let lqopt = opt.new_with_perms(true).with_auth(Arc::from(auth)); + // First of all, let's check to see if the WHERE + // clause of the LIVE query is matched by this + // document. If it is then we can continue. + match self.lq_check(stk, &lqctx, &lqopt, &lq, doc).await { + Err(Error::Ignore) => continue, + Err(e) => return Err(e), + Ok(_) => (), + } + // Secondly, let's check to see if any PERMISSIONS + // clause for this table allows this document to + // be viewed by the user who created this LIVE + // query. If it does, then we can continue. + match self.lq_allow(stk, &lqctx, &lqopt, &lq, doc).await { + Err(Error::Ignore) => continue, + Err(e) => return Err(e), + Ok(_) => (), + } + // Finally, let's check what type of statement + // caused this LIVE query to run, and send the + // relevant notification based on the statement. + if stm.is_delete() { + // Send a DELETE notification + if opt.id()? == lv.node.0 { + chn.send(Notification { + id: lv.id, + action: Action::Delete, + result: { + // Ensure futures are run + let lqopt: &Options = &lqopt.new_with_futures(true); + // Output the full document before any changes were applied + let mut value = + doc.doc.compute(stk, &lqctx, lqopt, Some(doc)).await?; + // Remove metadata fields on output + value.del(stk, &lqctx, lqopt, &*META).await?; + // Output result + value + }, + }) + .await?; + } else { + // TODO: Send to message broker + } + } else if self.is_new() { + // Send a CREATE notification + if opt.id()? == lv.node.0 { + chn.send(Notification { + id: lv.id, + action: Action::Create, + result: self.pluck(stk, &lqctx, &lqopt, &lq).await?, + }) + .await?; + } else { + // TODO: Send to message broker + } + } else { + // Send a UPDATE notification + if opt.id()? == lv.node.0 { + chn.send(Notification { + id: lv.id, + action: Action::Update, + result: self.pluck(stk, &lqctx, &lqopt, &lq).await?, + }) + .await?; + } else { + // TODO: Send to message broker + } + }; + } } // Carry on Ok(()) } - /// Check the WHERE clause for a LIVE query async fn lq_check( &self, @@ -69,7 +183,6 @@ impl<'a> Document<'a> { // Carry on Ok(()) } - /// Check any PERRMISSIONS for a LIVE query async fn lq_allow( &self, @@ -100,176 +213,4 @@ impl<'a> Document<'a> { // Carry on Ok(()) } - - /// Process live query for notifications - pub(crate) async fn check_lqs_and_send_notifications( - &self, - stk: &mut Stk, - ctx: &Context<'_>, - opt: &Options, - stm: &Statement<'_>, - live_statements: &[&LiveStatement], - sender: &Sender, - ) -> Result<(), Error> { - trace!( - "Called check_lqs_and_send_notifications with {} live statements", - live_statements.len() - ); - // Technically this isnt the condition - the `lives` function is passing in the currently evaluated statement - // but the ds.rs invocation of this function is reconstructing this statement - let is_delete = match FFLAGS.change_feed_live_queries.enabled() { - true => self.is_delete(), - false => stm.is_delete(), - }; - for lv in live_statements { - // Create a new statement - let lq = Statement::from(*lv); - // Get the event action - let evt = if stm.is_delete() { - Value::from("DELETE") - } else if self.is_new() { - Value::from("CREATE") - } else { - Value::from("UPDATE") - }; - // Check if this is a delete statement - let doc = match is_delete { - true => &self.initial, - false => &self.current, - }; - // Ensure that a session exists on the LIVE query - let sess = match lv.session.as_ref() { - Some(v) => v, - None => { - trace!("live query did not have a session, skipping"); - continue; - } - }; - // Ensure that auth info exists on the LIVE query - let auth = match lv.auth.clone() { - Some(v) => v, - None => { - trace!("live query did not have auth info, skipping"); - continue; - } - }; - // We need to create a new context which we will - // use for processing this LIVE query statement. - // This ensures that we are using the session - // of the user who created the LIVE query. - let lqctx = Context::background(); - let mut lqctx = - lqctx.set_transaction(ctx.get_transaction().cloned().unwrap_or_else(|| { - unreachable!("Expected transaction to be available in parent context") - })); - lqctx.add_value("access", sess.pick(AC.as_ref())); - lqctx.add_value("auth", sess.pick(RD.as_ref())); - lqctx.add_value("token", sess.pick(TK.as_ref())); - lqctx.add_value("session", sess); - // We need to create a new options which we will - // use for processing this LIVE query statement. - // This ensures that we are using the auth data - // of the user who created the LIVE query. - let lqopt = opt.new_with_perms(true).with_auth(Arc::from(auth)); - // Add $before, $after, $value, and $event params - // to this LIVE query so that user can use these - // within field projections and WHERE clauses. - lqctx.add_value("event", evt); - lqctx.add_value("value", self.current.doc.deref()); - lqctx.add_value("after", self.current.doc.deref()); - lqctx.add_value("before", self.initial.doc.deref()); - // First of all, let's check to see if the WHERE - // clause of the LIVE query is matched by this - // document. If it is then we can continue. - match self.lq_check(stk, &lqctx, &lqopt, &lq, doc).await { - Err(Error::Ignore) => { - trace!("live query did not match the where clause, skipping"); - continue; - } - Err(e) => return Err(e), - Ok(_) => (), - } - // Secondly, let's check to see if any PERMISSIONS - // clause for this table allows this document to - // be viewed by the user who created this LIVE - // query. If it does, then we can continue. - match self.lq_allow(stk, &lqctx, &lqopt, &lq, doc).await { - Err(Error::Ignore) => { - trace!("live query did not have permission to view this document, skipping"); - continue; - } - Err(e) => return Err(e), - Ok(_) => (), - } - // Finally, let's check what type of statement - // caused this LIVE query to run, and send the - // relevant notification based on the statement. - let default_node_id = Uuid::default(); - let node_id = opt.id().unwrap_or(default_node_id); - // This bool is deprecated since lq v2 on cf - // We check against defaults because clients register live queries with their local node id - // But the cf scanner uses the server node id, which is different from the client - let node_matches_live_query = - node_id == default_node_id || lv.node.0 == default_node_id || node_id == lv.node.0; - trace!( - "Notification node matches live query: {} ({} != {})", - node_matches_live_query, - node_id, - lv.node.0 - ); - if is_delete { - // Send a DELETE notification - if node_matches_live_query { - sender - .send(Notification { - id: lv.id, - action: Action::Delete, - result: { - // Ensure futures are run - let lqopt: &Options = &lqopt.new_with_futures(true); - // Output the full document before any changes were applied - let mut value = - doc.doc.compute(stk, &lqctx, lqopt, Some(doc)).await?; - - // TODO(SUR-349): We need an empty object instead of Value::None for serialisation - if value.is_none() { - value = Value::Object(Default::default()); - } - // Remove metadata fields on output - value.del(stk, &lqctx, lqopt, &*META).await?; - // Output result - value - }, - }) - .await?; - } - } else if self.is_new() { - // Send a CREATE notification - if node_matches_live_query { - trace!("Sending lq create notification"); - sender - .send(Notification { - id: lv.id, - action: Action::Create, - result: self.pluck(stk, &lqctx, &lqopt, &lq).await?, - }) - .await?; - } - } else { - // Send a UPDATE notification - if node_matches_live_query { - trace!("Sending lq update notification"); - sender - .send(Notification { - id: lv.id, - action: Action::Update, - result: self.pluck(stk, &lqctx, &lqopt, &lq).await?, - }) - .await?; - } - }; - } - trace!("Ended check_lqs_and_send_notifications"); - Ok(()) - } } diff --git a/core/src/doc/process.rs b/core/src/doc/process.rs index 51662282..05e600f7 100644 --- a/core/src/doc/process.rs +++ b/core/src/doc/process.rs @@ -46,7 +46,7 @@ impl<'a> Document<'a> { Err(Error::RetryWithId(v)) => { // Fetch the data from the store let key = crate::key::thing::new(opt.ns()?, opt.db()?, &v.tb, &v.id); - let val = ctx.tx_lock().await.get(key).await?; + let val = ctx.tx().get(key).await?; // Parse the data from the store let val = match val { Some(v) => Value::from(v), diff --git a/core/src/doc/purge.rs b/core/src/doc/purge.rs index bce57050..6e1a6fd2 100644 --- a/core/src/doc/purge.rs +++ b/core/src/doc/purge.rs @@ -25,13 +25,15 @@ impl<'a> Document<'a> { if !self.changed() { return Ok(()); } - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); + // Lock the transaction + let mut txn = txn.lock().await; // Get the record id if let Some(rid) = self.id { // Purge the record data let key = crate::key::thing::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id); - run.del(key).await?; + txn.del(key).await?; // Purge the record edges match ( self.initial.doc.pick(&*EDGE), @@ -43,20 +45,20 @@ impl<'a> Document<'a> { let (ref o, ref i) = (Dir::Out, Dir::In); // Purge the left pointer edge let key = crate::key::graph::new(opt.ns()?, opt.db()?, &l.tb, &l.id, o, rid); - run.del(key).await?; + txn.del(key).await?; // Purge the left inner edge let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, i, l); - run.del(key).await?; + txn.del(key).await?; // Purge the right inner edge let key = crate::key::graph::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id, o, r); - run.del(key).await?; + txn.del(key).await?; // Purge the right pointer edge let key = crate::key::graph::new(opt.ns()?, opt.db()?, &r.tb, &r.id, i, rid); - run.del(key).await?; + txn.del(key).await?; } _ => { // Release the transaction - drop(run); + drop(txn); // Setup the delete statement let stm = DeleteStatement { what: Values(vec![Value::from(Edges { diff --git a/core/src/doc/store.rs b/core/src/doc/store.rs index 49a5645f..30f752b5 100644 --- a/core/src/doc/store.rs +++ b/core/src/doc/store.rs @@ -3,7 +3,6 @@ use crate::dbs::Options; use crate::dbs::Statement; use crate::doc::Document; use crate::err::Error; -use crate::key::key_req::KeyRequirements; impl<'a> Document<'a> { pub async fn store( @@ -20,18 +19,18 @@ impl<'a> Document<'a> { if self.tb(ctx, opt).await?.drop { return Ok(()); } - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); // Get the record id let rid = self.id.as_ref().unwrap(); // Store the record data let key = crate::key::thing::new(opt.ns()?, opt.db()?, &rid.tb, &rid.id); - // + // Match the statement type match stm { // This is a CREATE statement so try to insert the key - Statement::Create(_) => match run.put(key.key_category(), key, self).await { + Statement::Create(_) => match txn.put(key, self).await { // The key already exists, so return an error - Err(Error::TxKeyAlreadyExistsCategory(_)) => Err(Error::RecordExists { + Err(Error::TxKeyAlreadyExists) => Err(Error::RecordExists { thing: rid.to_string(), }), // Return any other received error @@ -40,7 +39,7 @@ impl<'a> Document<'a> { Ok(v) => Ok(v), }, // This is not a CREATE statement, so update the key - _ => run.set(key, self).await, + _ => txn.set(key, self).await, }?; // Carry on Ok(()) diff --git a/core/src/err/mod.rs b/core/src/err/mod.rs index c236543b..d629b272 100644 --- a/core/src/err/mod.rs +++ b/core/src/err/mod.rs @@ -1,7 +1,6 @@ use crate::iam::Error as IamError; use crate::idx::ft::MatchRef; use crate::idx::trees::vector::SharedVector; -use crate::key::error::KeyCategory; use crate::sql::idiom::Idiom; use crate::sql::index::Distance; use crate::sql::thing::Thing; @@ -92,7 +91,6 @@ pub enum Error { /// The key being inserted in the transaction already exists #[error("The key being inserted already exists")] - #[deprecated(note = "Use TxKeyAlreadyExistsCategory")] TxKeyAlreadyExists, /// The key exceeds a limit set by the KV store @@ -388,6 +386,12 @@ pub enum Error { value: String, }, + /// The requested record does not exist + #[error("The record '{value}' does not exist")] + IdNotFound { + value: String, + }, + #[error("Unsupported distance: {0}")] UnsupportedDistance(Distance), @@ -810,10 +814,6 @@ pub enum Error { #[error("Auth token is missing the '{0}' claim")] MissingTokenClaim(String), - /// The key being inserted in the transaction already exists - #[error("The key being inserted already exists: {0}")] - TxKeyAlreadyExistsCategory(KeyCategory), - /// The db is running without an available storage engine #[error("The db is running without an available storage engine")] MissingStorageEngine, @@ -921,10 +921,6 @@ pub enum Error { #[error("A node task has failed: {0}")] NodeAgent(&'static str), - /// An error related to live query occurred - #[error("Failed to process Live Query: {0}")] - LiveQueryError(LiveQueryCause), - /// The supplied type could not be serialiazed into `sql::Value` #[error("Serialization error: {0}")] Serialization(String), @@ -1041,9 +1037,7 @@ impl From for Error { impl From for Error { fn from(e: echodb::err::Error) -> Error { match e { - echodb::err::Error::KeyAlreadyExists => { - Error::TxKeyAlreadyExistsCategory(crate::key::error::KeyCategory::Unknown) - } + echodb::err::Error::KeyAlreadyExists => Error::TxKeyAlreadyExists, echodb::err::Error::ValNotExpectedValue => Error::TxConditionNotMet, _ => Error::Tx(e.to_string()), } @@ -1054,9 +1048,7 @@ impl From for Error { impl From for Error { fn from(e: indxdb::err::Error) -> Error { match e { - indxdb::err::Error::KeyAlreadyExists => { - Error::TxKeyAlreadyExistsCategory(crate::key::error::KeyCategory::Unknown) - } + indxdb::err::Error::KeyAlreadyExists => Error::TxKeyAlreadyExists, indxdb::err::Error::ValNotExpectedValue => Error::TxConditionNotMet, _ => Error::Tx(e.to_string()), } @@ -1067,9 +1059,7 @@ impl From for Error { impl From for Error { fn from(e: tikv::Error) -> Error { match e { - tikv::Error::DuplicateKeyInsertion => { - Error::TxKeyAlreadyExistsCategory(crate::key::error::KeyCategory::Unknown) - } + tikv::Error::DuplicateKeyInsertion => Error::TxKeyAlreadyExists, tikv::Error::KeyError(ke) if ke.abort.contains("KeyTooLarge") => Error::TxKeyTooLarge, tikv::Error::RegionError(re) if re.raft_entry_too_large.is_some() => Error::TxTooLarge, _ => Error::Tx(e.to_string()), @@ -1091,6 +1081,20 @@ impl From for Error { } } +#[cfg(feature = "kv-fdb")] +impl From for Error { + fn from(e: foundationdb::FdbError) -> Error { + Error::Ds(e.to_string()) + } +} + +#[cfg(feature = "kv-fdb")] +impl From for Error { + fn from(e: foundationdb::TransactionCommitError) -> Error { + Error::Tx(e.to_string()) + } +} + impl From for Error { fn from(e: channel::RecvError) -> Error { Error::Channel(e.to_string()) @@ -1136,14 +1140,3 @@ impl Serialize for Error { serializer.serialize_str(self.to_string().as_str()) } } - -#[derive(Error, Debug)] -#[non_exhaustive] -pub enum LiveQueryCause { - #[doc(hidden)] - #[error("The Live Query must have a change feed for it it work")] - MissingChangeFeed, - #[doc(hidden)] - #[error("The Live Query must have a change feed that includes relative changes")] - ChangeFeedNoOriginal, -} diff --git a/core/src/exe/try_join_all_buffered.rs b/core/src/exe/try_join_all_buffered.rs index a8cbde7b..5f26fa87 100644 --- a/core/src/exe/try_join_all_buffered.rs +++ b/core/src/exe/try_join_all_buffered.rs @@ -32,17 +32,17 @@ where I::Item: TryFuture, { #[cfg(target_arch = "wasm32")] - const LIMIT: usize = 1; + let limit: usize = 1; #[cfg(not(target_arch = "wasm32"))] - const LIMIT: usize = crate::cnf::MAX_CONCURRENT_TASKS; + let limit: usize = *crate::cnf::MAX_CONCURRENT_TASKS; let mut input = iter.into_iter(); let (lo, hi) = input.size_hint(); let initial_capacity = hi.unwrap_or(lo); let mut active = FuturesOrdered::new(); - while active.len() < LIMIT { + while active.len() < limit { if let Some(next) = input.next() { active.push_back(TryFutureExt::into_future(next)); } else { diff --git a/core/src/fnc/search.rs b/core/src/fnc/search.rs index 2bf7905d..87698921 100644 --- a/core/src/fnc/search.rs +++ b/core/src/fnc/search.rs @@ -13,8 +13,8 @@ pub async fn analyze( (az, val): (Value, Value), ) -> Result { if let (Some(opt), Value::Strand(az), Value::Strand(val)) = (opt, az, val) { - let az: Analyzer = - ctx.tx_lock().await.get_db_analyzer(opt.ns()?, opt.db()?, az.as_str()).await?.into(); + // TODO: @emmanuel-keller this `into()` is expansive and clones the value + let az: Analyzer = ctx.tx().get_db_analyzer(opt.ns()?, opt.db()?, &az).await?.into(); az.analyze(stk, ctx, opt, val.0).await } else { Ok(Value::None) diff --git a/core/src/iam/signin.rs b/core/src/iam/signin.rs index 87b9128b..aaa25458 100644 --- a/core/src/iam/signin.rs +++ b/core/src/iam/signin.rs @@ -20,7 +20,6 @@ pub async fn signin(kvs: &Datastore, session: &mut Session, vars: Object) -> Res let ns = vars.get("NS").or_else(|| vars.get("ns")); let db = vars.get("DB").or_else(|| vars.get("db")); let ac = vars.get("AC").or_else(|| vars.get("ac")); - // Check if the parameters exist match (ns, db, ac) { // DB signin with access method @@ -102,7 +101,7 @@ pub async fn db_access( vars: Object, ) -> Result { // Create a new readonly transaction - let mut tx = kvs.transaction(Read, Optimistic).await?; + let tx = kvs.transaction(Read, Optimistic).await?; // Fetch the specified access method from storage let access = tx.get_db_access(&ns, &db, &ac).await; // Ensure that the transaction is cancelled @@ -114,7 +113,7 @@ pub async fn db_access( // All access method types are supported except for JWT // The JWT access method is the one that is internal to SurrealDB // The equivalent of signing in with JWT is to authenticate it - match av.kind { + match av.kind.clone() { AccessType::Record(at) => { // Check if the record access method supports issuing tokens let iss = match at.jwt.issue { diff --git a/core/src/iam/signup.rs b/core/src/iam/signup.rs index d83663dd..0b75e513 100644 --- a/core/src/iam/signup.rs +++ b/core/src/iam/signup.rs @@ -47,7 +47,7 @@ pub async fn db_access( vars: Object, ) -> Result, Error> { // Create a new readonly transaction - let mut tx = kvs.transaction(Read, Optimistic).await?; + let tx = kvs.transaction(Read, Optimistic).await?; // Fetch the specified access method from storage let access = tx.get_db_access(&ns, &db, &ac).await; // Ensure that the transaction is cancelled @@ -57,7 +57,7 @@ pub async fn db_access( Ok(av) => { // Check the access method type // Currently, only the record access method supports signup - match av.kind { + match av.kind.clone() { AccessType::Record(at) => { // Check if the record access method supports issuing tokens let iss = match at.jwt.issue { diff --git a/core/src/iam/verify.rs b/core/src/iam/verify.rs index be6d0538..466a4543 100644 --- a/core/src/iam/verify.rs +++ b/core/src/iam/verify.rs @@ -15,60 +15,47 @@ use once_cell::sync::Lazy; use std::str::{self, FromStr}; use std::sync::Arc; -fn config(alg: Algorithm, key: String) -> Result<(DecodingKey, Validation), Error> { +fn config(alg: Algorithm, key: &[u8]) -> Result<(DecodingKey, Validation), Error> { match alg { - Algorithm::Hs256 => Ok(( - DecodingKey::from_secret(key.as_ref()), - Validation::new(jsonwebtoken::Algorithm::HS256), - )), - Algorithm::Hs384 => Ok(( - DecodingKey::from_secret(key.as_ref()), - Validation::new(jsonwebtoken::Algorithm::HS384), - )), - Algorithm::Hs512 => Ok(( - DecodingKey::from_secret(key.as_ref()), - Validation::new(jsonwebtoken::Algorithm::HS512), - )), - Algorithm::EdDSA => Ok(( - DecodingKey::from_ed_pem(key.as_ref())?, - Validation::new(jsonwebtoken::Algorithm::EdDSA), - )), - Algorithm::Es256 => Ok(( - DecodingKey::from_ec_pem(key.as_ref())?, - Validation::new(jsonwebtoken::Algorithm::ES256), - )), - Algorithm::Es384 => Ok(( - DecodingKey::from_ec_pem(key.as_ref())?, - Validation::new(jsonwebtoken::Algorithm::ES384), - )), - Algorithm::Es512 => Ok(( - DecodingKey::from_ec_pem(key.as_ref())?, - Validation::new(jsonwebtoken::Algorithm::ES384), - )), - Algorithm::Ps256 => Ok(( - DecodingKey::from_rsa_pem(key.as_ref())?, - Validation::new(jsonwebtoken::Algorithm::PS256), - )), - Algorithm::Ps384 => Ok(( - DecodingKey::from_rsa_pem(key.as_ref())?, - Validation::new(jsonwebtoken::Algorithm::PS384), - )), - Algorithm::Ps512 => Ok(( - DecodingKey::from_rsa_pem(key.as_ref())?, - Validation::new(jsonwebtoken::Algorithm::PS512), - )), - Algorithm::Rs256 => Ok(( - DecodingKey::from_rsa_pem(key.as_ref())?, - Validation::new(jsonwebtoken::Algorithm::RS256), - )), - Algorithm::Rs384 => Ok(( - DecodingKey::from_rsa_pem(key.as_ref())?, - Validation::new(jsonwebtoken::Algorithm::RS384), - )), - Algorithm::Rs512 => Ok(( - DecodingKey::from_rsa_pem(key.as_ref())?, - Validation::new(jsonwebtoken::Algorithm::RS512), - )), + Algorithm::Hs256 => { + Ok((DecodingKey::from_secret(key), Validation::new(jsonwebtoken::Algorithm::HS256))) + } + Algorithm::Hs384 => { + Ok((DecodingKey::from_secret(key), Validation::new(jsonwebtoken::Algorithm::HS384))) + } + Algorithm::Hs512 => { + Ok((DecodingKey::from_secret(key), Validation::new(jsonwebtoken::Algorithm::HS512))) + } + Algorithm::EdDSA => { + Ok((DecodingKey::from_ed_pem(key)?, Validation::new(jsonwebtoken::Algorithm::EdDSA))) + } + Algorithm::Es256 => { + Ok((DecodingKey::from_ec_pem(key)?, Validation::new(jsonwebtoken::Algorithm::ES256))) + } + Algorithm::Es384 => { + Ok((DecodingKey::from_ec_pem(key)?, Validation::new(jsonwebtoken::Algorithm::ES384))) + } + Algorithm::Es512 => { + Ok((DecodingKey::from_ec_pem(key)?, Validation::new(jsonwebtoken::Algorithm::ES384))) + } + Algorithm::Ps256 => { + Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::PS256))) + } + Algorithm::Ps384 => { + Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::PS384))) + } + Algorithm::Ps512 => { + Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::PS512))) + } + Algorithm::Rs256 => { + Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::RS256))) + } + Algorithm::Rs384 => { + Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::RS384))) + } + Algorithm::Rs512 => { + Ok((DecodingKey::from_rsa_pem(key)?, Validation::new(jsonwebtoken::Algorithm::RS512))) + } } } @@ -92,7 +79,6 @@ pub async fn basic( ) -> Result<(), Error> { // Log the authentication type trace!("Attempting basic authentication"); - // Check if the parameters exist match (ns, db) { // DB signin @@ -163,16 +149,18 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul // Log the decoded authentication claims trace!("Authenticating with record access method `{}`", ac); // Create a new readonly transaction - let mut tx = kvs.transaction(Read, Optimistic).await?; + let tx = kvs.transaction(Read, Optimistic).await?; // Parse the record id let mut rid = syn::thing(&id)?; // Get the database access method let de = tx.get_db_access(&ns, &db, &ac).await?; + // Ensure that the transaction is cancelled + tx.cancel().await?; // Obtain the configuration to verify the token based on the access method - let (au, cf) = match de.kind { + let (au, cf) = match de.kind.clone() { AccessType::Record(at) => { let cf = match at.jwt.verify.clone() { - JwtAccessVerify::Key(key) => config(key.alg, key.key), + JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()), #[cfg(feature = "jwks")] JwtAccessVerify::Jwks(jwks) => { if let Some(kid) = token_data.header.kid { @@ -244,15 +232,17 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul // Log the decoded authentication claims trace!("Authenticating to database `{}` with access method `{}`", db, ac); // Create a new readonly transaction - let mut tx = kvs.transaction(Read, Optimistic).await?; + let tx = kvs.transaction(Read, Optimistic).await?; // Get the database access method let de = tx.get_db_access(&ns, &db, &ac).await?; + // Ensure that the transaction is cancelled + tx.cancel().await?; // Obtain the configuration to verify the token based on the access method - match de.kind { + match de.kind.clone() { // If the access type is Jwt, this is database access AccessType::Jwt(at) => { let cf = match at.verify { - JwtAccessVerify::Key(key) => config(key.alg, key.key), + JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()), #[cfg(feature = "jwks")] JwtAccessVerify::Jwks(jwks) => { if let Some(kid) = token_data.header.kid { @@ -300,7 +290,7 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul Some(au) => { trace!("Access method `{}` is record access with authenticate clause", ac); let cf = match at.jwt.verify { - JwtAccessVerify::Key(key) => config(key.alg, key.key), + JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()), #[cfg(feature = "jwks")] JwtAccessVerify::Jwks(jwks) => { if let Some(kid) = token_data.header.kid { @@ -366,13 +356,16 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul // Log the decoded authentication claims trace!("Authenticating to database `{}` with user `{}`", db, id); // Create a new readonly transaction - let mut tx = kvs.transaction(Read, Optimistic).await?; + let tx = kvs.transaction(Read, Optimistic).await?; // Get the database user let de = tx.get_db_user(&ns, &db, &id).await.map_err(|e| { trace!("Error while authenticating to database `{db}`: {e}"); Error::InvalidAuth })?; - let cf = config(Algorithm::Hs512, de.code)?; + // Ensure that the transaction is cancelled + tx.cancel().await?; + // Check the algorithm + let cf = config(Algorithm::Hs512, de.code.as_bytes())?; // Verify the token decode::(token, &cf.0, &cf.1)?; // Log the success @@ -398,13 +391,15 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul // Log the decoded authentication claims trace!("Authenticating to namespace `{}` with access method `{}`", ns, ac); // Create a new readonly transaction - let mut tx = kvs.transaction(Read, Optimistic).await?; + let tx = kvs.transaction(Read, Optimistic).await?; // Get the namespace access method let de = tx.get_ns_access(&ns, &ac).await?; + // Ensure that the transaction is cancelled + tx.cancel().await?; // Obtain the configuration to verify the token based on the access method - let cf = match de.kind { + let cf = match de.kind.clone() { AccessType::Jwt(ac) => match ac.verify { - JwtAccessVerify::Key(key) => config(key.alg, key.key), + JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()), #[cfg(feature = "jwks")] JwtAccessVerify::Jwks(jwks) => { if let Some(kid) = token_data.header.kid { @@ -452,13 +447,16 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul // Log the decoded authentication claims trace!("Authenticating to namespace `{}` with user `{}`", ns, id); // Create a new readonly transaction - let mut tx = kvs.transaction(Read, Optimistic).await?; + let tx = kvs.transaction(Read, Optimistic).await?; // Get the namespace user let de = tx.get_ns_user(&ns, &id).await.map_err(|e| { trace!("Error while authenticating to namespace `{ns}`: {e}"); Error::InvalidAuth })?; - let cf = config(Algorithm::Hs512, de.code)?; + // Ensure that the transaction is cancelled + tx.cancel().await?; + // Check the algorithm + let cf = config(Algorithm::Hs512, de.code.as_bytes())?; // Verify the token decode::(token, &cf.0, &cf.1)?; // Log the success @@ -482,13 +480,15 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul // Log the decoded authentication claims trace!("Authenticating to root with access method `{}`", ac); // Create a new readonly transaction - let mut tx = kvs.transaction(Read, Optimistic).await?; + let tx = kvs.transaction(Read, Optimistic).await?; // Get the namespace access method let de = tx.get_root_access(&ac).await?; + // Ensure that the transaction is cancelled + tx.cancel().await?; // Obtain the configuration to verify the token based on the access method - let cf = match de.kind { + let cf = match de.kind.clone() { AccessType::Jwt(ac) => match ac.verify { - JwtAccessVerify::Key(key) => config(key.alg, key.key), + JwtAccessVerify::Key(key) => config(key.alg, key.key.as_bytes()), #[cfg(feature = "jwks")] JwtAccessVerify::Jwks(jwks) => { if let Some(kid) = token_data.header.kid { @@ -533,13 +533,16 @@ pub async fn token(kvs: &Datastore, session: &mut Session, token: &str) -> Resul // Log the decoded authentication claims trace!("Authenticating to root level with user `{}`", id); // Create a new readonly transaction - let mut tx = kvs.transaction(Read, Optimistic).await?; + let tx = kvs.transaction(Read, Optimistic).await?; // Get the namespace user let de = tx.get_root_user(&id).await.map_err(|e| { trace!("Error while authenticating to root: {e}"); Error::InvalidAuth })?; - let cf = config(Algorithm::Hs512, de.code)?; + // Ensure that the transaction is cancelled + tx.cancel().await?; + // Check the algorithm + let cf = config(Algorithm::Hs512, de.code.as_bytes())?; // Verify the token decode::(token, &cf.0, &cf.1)?; // Log the success @@ -565,14 +568,18 @@ pub async fn verify_root_creds( pass: &str, ) -> Result { // Create a new readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await?; + let tx = ds.transaction(Read, Optimistic).await?; // Fetch the specified user from storage let user = tx.get_root_user(user).await.map_err(|e| { trace!("Error while authenticating to root: {e}"); Error::InvalidAuth })?; + // Ensure that the transaction is cancelled + tx.cancel().await?; // Verify the specified password for the user verify_pass(pass, user.hash.as_ref())?; + // Clone the cached user object + let user = (*user).clone(); // Return the verified user object Ok(user) } @@ -584,14 +591,18 @@ pub async fn verify_ns_creds( pass: &str, ) -> Result { // Create a new readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await?; + let tx = ds.transaction(Read, Optimistic).await?; // Fetch the specified user from storage let user = tx.get_ns_user(ns, user).await.map_err(|e| { trace!("Error while authenticating to namespace `{ns}`: {e}"); Error::InvalidAuth })?; + // Ensure that the transaction is cancelled + tx.cancel().await?; // Verify the specified password for the user verify_pass(pass, user.hash.as_ref())?; + // Clone the cached user object + let user = (*user).clone(); // Return the verified user object Ok(user) } @@ -604,14 +615,18 @@ pub async fn verify_db_creds( pass: &str, ) -> Result { // Create a new readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await?; + let tx = ds.transaction(Read, Optimistic).await?; // Fetch the specified user from storage let user = tx.get_db_user(ns, db, user).await.map_err(|e| { trace!("Error while authenticating to database `{ns}/{db}`: {e}"); Error::InvalidAuth })?; + // Ensure that the transaction is cancelled + tx.cancel().await?; // Verify the specified password for the user verify_pass(pass, user.hash.as_ref())?; + // Clone the cached user object + let user = (*user).clone(); // Return the verified user object Ok(user) } @@ -1685,7 +1700,7 @@ mod tests { algorithm: jsonwebtoken::jwk::AlgorithmParameters::OctetKey( jsonwebtoken::jwk::OctetKeyParameters { key_type: jsonwebtoken::jwk::OctetKeyType::Octet, - value: STANDARD_NO_PAD.encode(&secret), + value: STANDARD_NO_PAD.encode(secret), }, ), }], diff --git a/core/src/idg/u32.rs b/core/src/idg/u32.rs index 02f16054..8617e7e6 100644 --- a/core/src/idg/u32.rs +++ b/core/src/idg/u32.rs @@ -115,18 +115,18 @@ mod tests { use crate::kvs::{Datastore, LockType::*, Transaction, TransactionType::*}; async fn get_ids(ds: &Datastore) -> (Transaction, U32) { - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let txn = ds.transaction(Write, Optimistic).await.unwrap(); let key = "foo"; - let v = tx.get(key).await.unwrap(); + let v = txn.get(key).await.unwrap(); let d = U32::new(key.into(), v).await.unwrap(); - (tx, d) + (txn, d) } - async fn finish(mut tx: Transaction, mut d: U32) -> Result<(), Error> { + async fn finish(txn: Transaction, mut d: U32) -> Result<(), Error> { if let Some((key, val)) = d.finish() { - tx.set(key, val).await?; + txn.set(key, val).await?; } - tx.commit().await + txn.commit().await } #[tokio::test] diff --git a/core/src/idx/docids.rs b/core/src/idx/docids.rs index dc9ca15b..e9389f7b 100644 --- a/core/src/idx/docids.rs +++ b/core/src/idx/docids.rs @@ -23,7 +23,7 @@ pub struct DocIds { impl DocIds { pub async fn new( ixs: &IndexStores, - tx: &mut Transaction, + tx: &Transaction, tt: TransactionType, ikb: IndexKeyBase, default_btree_order: u32, @@ -73,7 +73,7 @@ impl DocIds { pub(crate) async fn get_doc_id( &self, - tx: &mut Transaction, + tx: &Transaction, doc_key: Key, ) -> Result, Error> { self.btree.search(tx, &self.store, &doc_key).await @@ -83,7 +83,7 @@ impl DocIds { /// If the doc_id does not exists, a new one is created, and associated to the given key. pub(in crate::idx) async fn resolve_doc_id( &mut self, - tx: &mut Transaction, + tx: &Transaction, doc_key: Key, ) -> Result { { @@ -99,7 +99,7 @@ impl DocIds { pub(in crate::idx) async fn remove_doc( &mut self, - tx: &mut Transaction, + tx: &Transaction, doc_key: Key, ) -> Result, Error> { if let Some(doc_id) = self.btree.delete(tx, &mut self.store, doc_key).await? { @@ -119,7 +119,7 @@ impl DocIds { pub(in crate::idx) async fn get_doc_key( &self, - tx: &mut Transaction, + tx: &Transaction, doc_id: DocId, ) -> Result, Error> { let doc_id_key = self.index_key_base.new_bi_key(doc_id); @@ -130,14 +130,11 @@ impl DocIds { } } - pub(in crate::idx) async fn statistics( - &self, - tx: &mut Transaction, - ) -> Result { + pub(in crate::idx) async fn statistics(&self, tx: &Transaction) -> Result { self.btree.statistics(tx, &self.store).await } - pub(in crate::idx) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> { + pub(in crate::idx) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> { if let Some(new_cache) = self.store.finish(tx).await? { let btree = self.btree.inc_generation().clone(); let state = State { @@ -260,16 +257,15 @@ mod tests { const BTREE_ORDER: u32 = 7; async fn new_operation(ds: &Datastore, tt: TransactionType) -> (Transaction, DocIds) { - let mut tx = ds.transaction(tt, Optimistic).await.unwrap(); - let d = - DocIds::new(ds.index_store(), &mut tx, tt, IndexKeyBase::default(), BTREE_ORDER, 100) - .await - .unwrap(); + let tx = ds.transaction(tt, Optimistic).await.unwrap(); + let d = DocIds::new(ds.index_store(), &tx, tt, IndexKeyBase::default(), BTREE_ORDER, 100) + .await + .unwrap(); (tx, d) } - async fn finish(mut tx: Transaction, mut d: DocIds) { - d.finish(&mut tx).await.unwrap(); + async fn finish(tx: Transaction, mut d: DocIds) { + d.finish(&tx).await.unwrap(); tx.commit().await.unwrap(); } @@ -279,83 +275,65 @@ mod tests { // Resolve a first doc key { - let (mut tx, mut d) = new_operation(&ds, Write).await; - let doc_id = d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(); + let (tx, mut d) = new_operation(&ds, Write).await; + let doc_id = d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(); finish(tx, d).await; - let (mut tx, d) = new_operation(&ds, Read).await; - assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 1); - assert_eq!(d.get_doc_key(&mut tx, 0).await.unwrap(), Some("Foo".into())); + let (tx, d) = new_operation(&ds, Read).await; + assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 1); + assert_eq!(d.get_doc_key(&tx, 0).await.unwrap(), Some("Foo".into())); assert_eq!(doc_id, Resolved::New(0)); } // Resolve the same doc key { - let (mut tx, mut d) = new_operation(&ds, Write).await; - let doc_id = d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(); + let (tx, mut d) = new_operation(&ds, Write).await; + let doc_id = d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(); finish(tx, d).await; - let (mut tx, d) = new_operation(&ds, Read).await; - assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 1); - assert_eq!(d.get_doc_key(&mut tx, 0).await.unwrap(), Some("Foo".into())); + let (tx, d) = new_operation(&ds, Read).await; + assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 1); + assert_eq!(d.get_doc_key(&tx, 0).await.unwrap(), Some("Foo".into())); assert_eq!(doc_id, Resolved::Existing(0)); } // Resolve another single doc key { - let (mut tx, mut d) = new_operation(&ds, Write).await; - let doc_id = d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap(); + let (tx, mut d) = new_operation(&ds, Write).await; + let doc_id = d.resolve_doc_id(&tx, "Bar".into()).await.unwrap(); finish(tx, d).await; - let (mut tx, d) = new_operation(&ds, Read).await; - assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 2); - assert_eq!(d.get_doc_key(&mut tx, 1).await.unwrap(), Some("Bar".into())); + let (tx, d) = new_operation(&ds, Read).await; + assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 2); + assert_eq!(d.get_doc_key(&tx, 1).await.unwrap(), Some("Bar".into())); assert_eq!(doc_id, Resolved::New(1)); } // Resolve another two existing doc keys and two new doc keys (interlaced) { - let (mut tx, mut d) = new_operation(&ds, Write).await; - assert_eq!( - d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(), - Resolved::Existing(0) - ); - assert_eq!(d.resolve_doc_id(&mut tx, "Hello".into()).await.unwrap(), Resolved::New(2)); - assert_eq!( - d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap(), - Resolved::Existing(1) - ); - assert_eq!(d.resolve_doc_id(&mut tx, "World".into()).await.unwrap(), Resolved::New(3)); + let (tx, mut d) = new_operation(&ds, Write).await; + assert_eq!(d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(), Resolved::Existing(0)); + assert_eq!(d.resolve_doc_id(&tx, "Hello".into()).await.unwrap(), Resolved::New(2)); + assert_eq!(d.resolve_doc_id(&tx, "Bar".into()).await.unwrap(), Resolved::Existing(1)); + assert_eq!(d.resolve_doc_id(&tx, "World".into()).await.unwrap(), Resolved::New(3)); finish(tx, d).await; - let (mut tx, d) = new_operation(&ds, Read).await; - assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 4); + let (tx, d) = new_operation(&ds, Read).await; + assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 4); } { - let (mut tx, mut d) = new_operation(&ds, Write).await; - assert_eq!( - d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(), - Resolved::Existing(0) - ); - assert_eq!( - d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap(), - Resolved::Existing(1) - ); - assert_eq!( - d.resolve_doc_id(&mut tx, "Hello".into()).await.unwrap(), - Resolved::Existing(2) - ); - assert_eq!( - d.resolve_doc_id(&mut tx, "World".into()).await.unwrap(), - Resolved::Existing(3) - ); + let (tx, mut d) = new_operation(&ds, Write).await; + assert_eq!(d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(), Resolved::Existing(0)); + assert_eq!(d.resolve_doc_id(&tx, "Bar".into()).await.unwrap(), Resolved::Existing(1)); + assert_eq!(d.resolve_doc_id(&tx, "Hello".into()).await.unwrap(), Resolved::Existing(2)); + assert_eq!(d.resolve_doc_id(&tx, "World".into()).await.unwrap(), Resolved::Existing(3)); finish(tx, d).await; - let (mut tx, d) = new_operation(&ds, Read).await; - assert_eq!(d.get_doc_key(&mut tx, 0).await.unwrap(), Some("Foo".into())); - assert_eq!(d.get_doc_key(&mut tx, 1).await.unwrap(), Some("Bar".into())); - assert_eq!(d.get_doc_key(&mut tx, 2).await.unwrap(), Some("Hello".into())); - assert_eq!(d.get_doc_key(&mut tx, 3).await.unwrap(), Some("World".into())); - assert_eq!(d.statistics(&mut tx).await.unwrap().keys_count, 4); + let (tx, d) = new_operation(&ds, Read).await; + assert_eq!(d.get_doc_key(&tx, 0).await.unwrap(), Some("Foo".into())); + assert_eq!(d.get_doc_key(&tx, 1).await.unwrap(), Some("Bar".into())); + assert_eq!(d.get_doc_key(&tx, 2).await.unwrap(), Some("Hello".into())); + assert_eq!(d.get_doc_key(&tx, 3).await.unwrap(), Some("World".into())); + assert_eq!(d.statistics(&tx).await.unwrap().keys_count, 4); } } @@ -365,53 +343,53 @@ mod tests { // Create two docs { - let (mut tx, mut d) = new_operation(&ds, Write).await; - assert_eq!(d.resolve_doc_id(&mut tx, "Foo".into()).await.unwrap(), Resolved::New(0)); - assert_eq!(d.resolve_doc_id(&mut tx, "Bar".into()).await.unwrap(), Resolved::New(1)); + let (tx, mut d) = new_operation(&ds, Write).await; + assert_eq!(d.resolve_doc_id(&tx, "Foo".into()).await.unwrap(), Resolved::New(0)); + assert_eq!(d.resolve_doc_id(&tx, "Bar".into()).await.unwrap(), Resolved::New(1)); finish(tx, d).await; } // Remove doc 1 { - let (mut tx, mut d) = new_operation(&ds, Write).await; - assert_eq!(d.remove_doc(&mut tx, "Dummy".into()).await.unwrap(), None); - assert_eq!(d.remove_doc(&mut tx, "Foo".into()).await.unwrap(), Some(0)); + let (tx, mut d) = new_operation(&ds, Write).await; + assert_eq!(d.remove_doc(&tx, "Dummy".into()).await.unwrap(), None); + assert_eq!(d.remove_doc(&tx, "Foo".into()).await.unwrap(), Some(0)); finish(tx, d).await; } // Check 'Foo' has been removed { - let (mut tx, mut d) = new_operation(&ds, Write).await; - assert_eq!(d.remove_doc(&mut tx, "Foo".into()).await.unwrap(), None); + let (tx, mut d) = new_operation(&ds, Write).await; + assert_eq!(d.remove_doc(&tx, "Foo".into()).await.unwrap(), None); finish(tx, d).await; } // Insert a new doc - should take the available id 1 { - let (mut tx, mut d) = new_operation(&ds, Write).await; - assert_eq!(d.resolve_doc_id(&mut tx, "Hello".into()).await.unwrap(), Resolved::New(0)); + let (tx, mut d) = new_operation(&ds, Write).await; + assert_eq!(d.resolve_doc_id(&tx, "Hello".into()).await.unwrap(), Resolved::New(0)); finish(tx, d).await; } // Remove doc 2 { - let (mut tx, mut d) = new_operation(&ds, Write).await; - assert_eq!(d.remove_doc(&mut tx, "Dummy".into()).await.unwrap(), None); - assert_eq!(d.remove_doc(&mut tx, "Bar".into()).await.unwrap(), Some(1)); + let (tx, mut d) = new_operation(&ds, Write).await; + assert_eq!(d.remove_doc(&tx, "Dummy".into()).await.unwrap(), None); + assert_eq!(d.remove_doc(&tx, "Bar".into()).await.unwrap(), Some(1)); finish(tx, d).await; } // Check 'Bar' has been removed { - let (mut tx, mut d) = new_operation(&ds, Write).await; - assert_eq!(d.remove_doc(&mut tx, "Foo".into()).await.unwrap(), None); + let (tx, mut d) = new_operation(&ds, Write).await; + assert_eq!(d.remove_doc(&tx, "Foo".into()).await.unwrap(), None); finish(tx, d).await; } // Insert a new doc - should take the available id 2 { - let (mut tx, mut d) = new_operation(&ds, Write).await; - assert_eq!(d.resolve_doc_id(&mut tx, "World".into()).await.unwrap(), Resolved::New(1)); + let (tx, mut d) = new_operation(&ds, Write).await; + assert_eq!(d.resolve_doc_id(&tx, "World".into()).await.unwrap(), Resolved::New(1)); finish(tx, d).await; } } diff --git a/core/src/idx/ft/analyzer/mod.rs b/core/src/idx/ft/analyzer/mod.rs index dcf1ea10..a241c047 100644 --- a/core/src/idx/ft/analyzer/mod.rs +++ b/core/src/idx/ft/analyzer/mod.rs @@ -15,7 +15,7 @@ use filter::Filter; use reblessive::tree::Stk; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; - +use std::sync::Arc; mod filter; mod tokenizer; @@ -35,6 +35,17 @@ impl From for Analyzer { } } +// TODO: @emmanuel-keller we probably don't need to clone the value here +impl From> for Analyzer { + fn from(az: Arc) -> Self { + Self { + function: az.function.clone().map(|i| i.0), + tokenizers: az.tokenizers.clone(), + filters: Filter::from(az.filters.clone()), + } + } +} + pub(in crate::idx) type TermsList = Vec>; pub(in crate::idx) struct TermsSet { @@ -72,13 +83,13 @@ impl Analyzer { let mut list = Vec::with_capacity(tokens.list().len()); let mut unique_tokens = HashSet::new(); let mut set = HashSet::new(); - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); let mut has_unknown_terms = false; for token in tokens.list() { // Tokens can contains duplicated, not need to evaluate them again if unique_tokens.insert(token) { // Is the term known in the index? - let opt_term_id = t.get_term_id(&mut tx, tokens.get_token_string(token)?).await?; + let opt_term_id = t.get_term_id(&tx, tokens.get_token_string(token)?).await?; list.push(opt_term_id.map(|tid| (tid, token.get_char_len()))); if let Some(term_id) = opt_term_id { set.insert(term_id); @@ -109,12 +120,10 @@ impl Analyzer { self.analyze_value(stk, ctx, opt, content, FilteringStage::Indexing, &mut tv).await?; let mut set = HashSet::new(); let mut has_unknown_terms = false; - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); for tokens in tv { for token in tokens.list() { - if let Some(term_id) = - t.get_term_id(&mut tx, tokens.get_token_string(token)?).await? - { + if let Some(term_id) = t.get_term_id(&tx, tokens.get_token_string(token)?).await? { set.insert(term_id); } else { has_unknown_terms = true; @@ -162,9 +171,9 @@ impl Analyzer { } // Now we can resolve the term ids let mut tfid = Vec::with_capacity(tf.len()); - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); for (t, f) in tf { - tfid.push((terms.resolve_term_id(&mut tx, t).await?, f)); + tfid.push((terms.resolve_term_id(&tx, t).await?, f)); } drop(tx); Ok((dl, tfid)) @@ -204,9 +213,9 @@ impl Analyzer { // Now we can resolve the term ids let mut tfid = Vec::with_capacity(tfos.len()); let mut osid = Vec::with_capacity(tfos.len()); - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); for (t, o) in tfos { - let id = terms.resolve_term_id(&mut tx, t).await?; + let id = terms.resolve_term_id(&tx, t).await?; tfid.push((id, o.len() as TermFrequency)); osid.push((id, OffsetRecords(o))); } @@ -308,7 +317,7 @@ impl Analyzer { mod tests { use super::Analyzer; use crate::ctx::Context; - use crate::dbs::{Options, Transaction}; + use crate::dbs::Options; use crate::idx::ft::analyzer::filter::FilteringStage; use crate::idx::ft::analyzer::tokenizer::{Token, Tokens}; use crate::kvs::{Datastore, LockType, TransactionType}; @@ -316,14 +325,12 @@ mod tests { sql::{statements::DefineStatement, Statement}, syn, }; - use futures::lock::Mutex; use std::sync::Arc; async fn get_analyzer_tokens(def: &str, input: &str) -> Tokens { let ds = Datastore::new("memory").await.unwrap(); - let tx = ds.transaction(TransactionType::Read, LockType::Optimistic).await.unwrap(); - let txn: Transaction = Arc::new(Mutex::new(tx)); - let ctx = Context::default().set_transaction(txn); + let txn = ds.transaction(TransactionType::Read, LockType::Optimistic).await.unwrap(); + let ctx = Context::default().with_transaction(Arc::new(txn)); let mut stmt = syn::parse(&format!("DEFINE {def}")).unwrap(); let Some(Statement::Define(DefineStatement::Analyzer(az))) = stmt.0 .0.pop() else { diff --git a/core/src/idx/ft/doclength.rs b/core/src/idx/ft/doclength.rs index 1acddd6b..320e5149 100644 --- a/core/src/idx/ft/doclength.rs +++ b/core/src/idx/ft/doclength.rs @@ -18,7 +18,7 @@ pub(super) struct DocLengths { impl DocLengths { pub(super) async fn new( ixs: &IndexStores, - tx: &mut Transaction, + tx: &Transaction, ikb: IndexKeyBase, default_btree_order: u32, tt: TransactionType, @@ -48,7 +48,7 @@ impl DocLengths { pub(super) async fn get_doc_length( &self, - tx: &mut Transaction, + tx: &Transaction, doc_id: DocId, ) -> Result, Error> { self.btree.search(tx, &self.store, &doc_id.to_be_bytes().to_vec()).await @@ -56,7 +56,7 @@ impl DocLengths { pub(super) async fn get_doc_length_mut( &mut self, - tx: &mut Transaction, + tx: &Transaction, doc_id: DocId, ) -> Result, Error> { self.btree.search_mut(tx, &mut self.store, &doc_id.to_be_bytes().to_vec()).await @@ -64,7 +64,7 @@ impl DocLengths { pub(super) async fn set_doc_length( &mut self, - tx: &mut Transaction, + tx: &Transaction, doc_id: DocId, doc_length: DocLength, ) -> Result<(), Error> { @@ -74,17 +74,17 @@ impl DocLengths { pub(super) async fn remove_doc_length( &mut self, - tx: &mut Transaction, + tx: &Transaction, doc_id: DocId, ) -> Result, Error> { self.btree.delete(tx, &mut self.store, doc_id.to_be_bytes().to_vec()).await } - pub(super) async fn statistics(&self, tx: &mut Transaction) -> Result { + pub(super) async fn statistics(&self, tx: &Transaction) -> Result { self.btree.statistics(tx, &self.store).await } - pub(super) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> { + pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> { if let Some(new_cache) = self.store.finish(tx).await? { let state = self.btree.inc_generation(); tx.set(self.state_key.clone(), state.try_to_val()?).await?; @@ -105,16 +105,15 @@ mod tests { order: u32, tt: TransactionType, ) -> (Transaction, DocLengths) { - let mut tx = ds.transaction(TransactionType::Write, Optimistic).await.unwrap(); - let dl = - DocLengths::new(ds.index_store(), &mut tx, IndexKeyBase::default(), order, tt, 100) - .await - .unwrap(); + let tx = ds.transaction(TransactionType::Write, Optimistic).await.unwrap(); + let dl = DocLengths::new(ds.index_store(), &tx, IndexKeyBase::default(), order, tt, 100) + .await + .unwrap(); (tx, dl) } - async fn finish(mut l: DocLengths, mut tx: Transaction) { - l.finish(&mut tx).await.unwrap(); + async fn finish(mut l: DocLengths, tx: Transaction) { + l.finish(&tx).await.unwrap(); tx.commit().await.unwrap() } @@ -126,54 +125,54 @@ mod tests { { // Check empty state - let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await; - assert_eq!(l.statistics(&mut tx).await.unwrap().keys_count, 0); - let dl = l.get_doc_length(&mut tx, 99).await.unwrap(); + let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await; + assert_eq!(l.statistics(&tx).await.unwrap().keys_count, 0); + let dl = l.get_doc_length(&tx, 99).await.unwrap(); assert_eq!(dl, None); tx.cancel().await.unwrap(); } { // Set a doc length - let (mut tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await; - l.set_doc_length(&mut tx, 99, 199).await.unwrap(); + let (tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await; + l.set_doc_length(&tx, 99, 199).await.unwrap(); finish(l, tx).await; } { - let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await; - assert_eq!(l.statistics(&mut tx).await.unwrap().keys_count, 1); - let dl = l.get_doc_length(&mut tx, 99).await.unwrap(); + let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await; + assert_eq!(l.statistics(&tx).await.unwrap().keys_count, 1); + let dl = l.get_doc_length(&tx, 99).await.unwrap(); assert_eq!(dl, Some(199)); tx.cancel().await.unwrap(); } { // Update doc length - let (mut tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await; - l.set_doc_length(&mut tx, 99, 299).await.unwrap(); + let (tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await; + l.set_doc_length(&tx, 99, 299).await.unwrap(); finish(l, tx).await; } { - let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await; - assert_eq!(l.statistics(&mut tx).await.unwrap().keys_count, 1); - let dl = l.get_doc_length(&mut tx, 99).await.unwrap(); + let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await; + assert_eq!(l.statistics(&tx).await.unwrap().keys_count, 1); + let dl = l.get_doc_length(&tx, 99).await.unwrap(); assert_eq!(dl, Some(299)); tx.cancel().await.unwrap(); } { // Remove doc lengths - let (mut tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await; - assert_eq!(l.remove_doc_length(&mut tx, 99).await.unwrap(), Some(299)); - assert_eq!(l.remove_doc_length(&mut tx, 99).await.unwrap(), None); + let (tx, mut l) = doc_length(&ds, BTREE_ORDER, TransactionType::Write).await; + assert_eq!(l.remove_doc_length(&tx, 99).await.unwrap(), Some(299)); + assert_eq!(l.remove_doc_length(&tx, 99).await.unwrap(), None); finish(l, tx).await; } { - let (mut tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await; - let dl = l.get_doc_length(&mut tx, 99).await.unwrap(); + let (tx, l) = doc_length(&ds, BTREE_ORDER, TransactionType::Read).await; + let dl = l.get_doc_length(&tx, 99).await.unwrap(); assert_eq!(dl, None); tx.cancel().await.unwrap(); } diff --git a/core/src/idx/ft/mod.rs b/core/src/idx/ft/mod.rs index aee2cb3f..b4e917ed 100644 --- a/core/src/idx/ft/mod.rs +++ b/core/src/idx/ft/mod.rs @@ -22,7 +22,7 @@ use crate::idx::ft::terms::{TermId, TermLen, Terms}; use crate::idx::trees::btree::BStatistics; use crate::idx::trees::store::IndexStores; use crate::idx::{IndexKeyBase, VersionedSerdeState}; -use crate::kvs; +use crate::kvs::Transaction; use crate::kvs::{Key, TransactionType}; use crate::sql::index::SearchParams; use crate::sql::scoring::Scoring; @@ -105,35 +105,33 @@ impl FtIndex { p: &SearchParams, tt: TransactionType, ) -> Result { - let mut tx = ctx.tx_lock().await; - let az = tx.get_db_analyzer(opt.ns()?, opt.db()?, az).await?; - let res = - Self::with_analyzer(ctx.get_index_stores(), &mut tx, az, index_key_base, p, tt).await; - drop(tx); - res + let tx = ctx.tx(); + // TODO: @emmanuel-keller we probably don't need to clone the value here + let az = tx.get_db_analyzer(opt.ns()?, opt.db()?, az).await?.as_ref().to_owned(); + Self::with_analyzer(ctx.get_index_stores(), &tx, az, index_key_base, p, tt).await } async fn with_analyzer( ixs: &IndexStores, - run: &mut kvs::Transaction, + txn: &Transaction, az: DefineAnalyzerStatement, index_key_base: IndexKeyBase, p: &SearchParams, tt: TransactionType, ) -> Result { let state_key: Key = index_key_base.new_bs_key(); - let state: State = if let Some(val) = run.get(state_key.clone()).await? { + let state: State = if let Some(val) = txn.get(state_key.clone()).await? { State::try_from_val(val)? } else { State::default() }; let doc_ids = Arc::new(RwLock::new( - DocIds::new(ixs, run, tt, index_key_base.clone(), p.doc_ids_order, p.doc_ids_cache) + DocIds::new(ixs, txn, tt, index_key_base.clone(), p.doc_ids_order, p.doc_ids_cache) .await?, )); let doc_lengths = Arc::new(RwLock::new( DocLengths::new( ixs, - run, + txn, index_key_base.clone(), p.doc_lengths_order, tt, @@ -142,11 +140,11 @@ impl FtIndex { .await?, )); let postings = Arc::new(RwLock::new( - Postings::new(ixs, run, index_key_base.clone(), p.postings_order, tt, p.postings_cache) + Postings::new(ixs, txn, index_key_base.clone(), p.postings_order, tt, p.postings_cache) .await?, )); let terms = Arc::new(RwLock::new( - Terms::new(ixs, run, index_key_base.clone(), p.terms_order, tt, p.terms_cache).await?, + Terms::new(ixs, txn, index_key_base.clone(), p.terms_order, tt, p.terms_cache).await?, )); let termdocs = TermDocs::new(index_key_base.clone()); let offsets = Offsets::new(index_key_base.clone()); @@ -194,17 +192,17 @@ impl FtIndex { ctx: &Context<'_>, rid: &Thing, ) -> Result<(), Error> { - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); // Extract and remove the doc_id (if any) let mut doc_ids = self.doc_ids.write().await; - let doc_id = doc_ids.remove_doc(&mut tx, rid.into()).await?; + let doc_id = doc_ids.remove_doc(&tx, rid.into()).await?; drop(doc_ids); if let Some(doc_id) = doc_id { self.state.doc_count -= 1; // Remove the doc length let mut doc_lengths = self.doc_lengths.write().await; - let dl = doc_lengths.remove_doc_length(&mut tx, doc_id).await?; + let dl = doc_lengths.remove_doc_length(&tx, doc_id).await?; drop(doc_lengths); if let Some(doc_lengths) = dl { self.state.total_docs_lengths -= doc_lengths as u128; @@ -217,11 +215,11 @@ impl FtIndex { let mut p = self.postings.write().await; let mut t = self.terms.write().await; for term_id in &term_list { - p.remove_posting(&mut tx, term_id, doc_id).await?; + p.remove_posting(&tx, term_id, doc_id).await?; // if the term is not present in any document in the index, we can remove it - let doc_count = self.term_docs.remove_doc(&mut tx, term_id, doc_id).await?; + let doc_count = self.term_docs.remove_doc(&tx, term_id, doc_id).await?; if doc_count == 0 { - t.remove_term_id(&mut tx, term_id).await?; + t.remove_term_id(&tx, term_id).await?; } } drop(p); @@ -230,7 +228,7 @@ impl FtIndex { if self.highlighting { for term_id in term_list { // TODO?: Removal can be done with a prefix on doc_id - self.offsets.remove_offsets(&mut tx, doc_id, term_id).await?; + self.offsets.remove_offsets(&tx, doc_id, term_id).await?; } } } @@ -248,11 +246,10 @@ impl FtIndex { content: Vec, ) -> Result<(), Error> { // Resolve the doc_id - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); let mut doc_ids = self.doc_ids.write().await; - let resolved = doc_ids.resolve_doc_id(&mut tx, rid.into()).await?; + let resolved = doc_ids.resolve_doc_id(&tx, rid.into()).await?; drop(doc_ids); - drop(tx); let doc_id = *resolved.doc_id(); // Extract the doc_lengths, terms en frequencies (and offset) @@ -272,14 +269,14 @@ impl FtIndex { }; // Set the doc length - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); let mut dl = self.doc_lengths.write().await; if resolved.was_existing() { - if let Some(old_doc_length) = dl.get_doc_length_mut(&mut tx, doc_id).await? { + if let Some(old_doc_length) = dl.get_doc_length_mut(&tx, doc_id).await? { self.state.total_docs_lengths -= old_doc_length as u128; } } - dl.set_doc_length(&mut tx, doc_id, doc_length).await?; + dl.set_doc_length(&tx, doc_id, doc_length).await?; drop(dl); // Retrieve the existing terms for this document (if any) @@ -294,22 +291,22 @@ impl FtIndex { let mut terms_ids = RoaringTreemap::default(); let mut p = self.postings.write().await; for (term_id, term_freq) in terms_and_frequencies { - p.update_posting(&mut tx, term_id, doc_id, term_freq).await?; + p.update_posting(&tx, term_id, doc_id, term_freq).await?; if let Some(old_term_ids) = &mut old_term_ids { old_term_ids.remove(term_id); } - self.term_docs.set_doc(&mut tx, term_id, doc_id).await?; + self.term_docs.set_doc(&tx, term_id, doc_id).await?; terms_ids.insert(term_id); } // Remove any remaining postings if let Some(old_term_ids) = &old_term_ids { for old_term_id in old_term_ids { - p.remove_posting(&mut tx, old_term_id, doc_id).await?; - let doc_count = self.term_docs.remove_doc(&mut tx, old_term_id, doc_id).await?; + p.remove_posting(&tx, old_term_id, doc_id).await?; + let doc_count = self.term_docs.remove_doc(&tx, old_term_id, doc_id).await?; // if the term does not have anymore postings, we can remove the term if doc_count == 0 { - t.remove_term_id(&mut tx, old_term_id).await?; + t.remove_term_id(&tx, old_term_id).await?; } } } @@ -321,14 +318,14 @@ impl FtIndex { if let Some(ofs) = offsets { if !ofs.is_empty() { for (tid, or) in ofs { - self.offsets.set_offsets(&mut tx, doc_id, tid, or).await?; + self.offsets.set_offsets(&tx, doc_id, tid, or).await?; } } } // In case of an update, w remove the offset for the terms that does not exist anymore if let Some(old_term_ids) = old_term_ids { for old_term_id in old_term_ids { - self.offsets.remove_offsets(&mut tx, doc_id, old_term_id).await?; + self.offsets.remove_offsets(&tx, doc_id, old_term_id).await?; } } } @@ -365,7 +362,7 @@ impl FtIndex { pub(super) async fn get_terms_docs( &self, - tx: &mut kvs::Transaction, + tx: &Transaction, terms: &TermsList, ) -> Result>, Error> { let mut terms_docs = Vec::with_capacity(terms.len()); @@ -424,7 +421,7 @@ impl FtIndex { pub(super) async fn highlight( &self, - tx: &mut kvs::Transaction, + tx: &Transaction, thg: &Thing, terms: &[Option<(TermId, TermLen)>], hlp: HighlightParams, @@ -450,7 +447,7 @@ impl FtIndex { pub(super) async fn extract_offsets( &self, - tx: &mut kvs::Transaction, + tx: &Transaction, thg: &Thing, terms: &[Option<(TermId, u32)>], partial: bool, @@ -473,25 +470,22 @@ impl FtIndex { } pub(crate) async fn statistics(&self, ctx: &Context<'_>) -> Result { - // TODO do parallel execution - let mut run = ctx.tx_lock().await; + let txn = ctx.tx(); let res = FtStatistics { - doc_ids: self.doc_ids.read().await.statistics(&mut run).await?, - terms: self.terms.read().await.statistics(&mut run).await?, - doc_lengths: self.doc_lengths.read().await.statistics(&mut run).await?, - postings: self.postings.read().await.statistics(&mut run).await?, + doc_ids: self.doc_ids.read().await.statistics(&txn).await?, + terms: self.terms.read().await.statistics(&txn).await?, + doc_lengths: self.doc_lengths.read().await.statistics(&txn).await?, + postings: self.postings.read().await.statistics(&txn).await?, }; - drop(run); Ok(res) } pub(crate) async fn finish(&self, ctx: &Context<'_>) -> Result<(), Error> { - let mut run = ctx.tx_lock().await; - self.doc_ids.write().await.finish(&mut run).await?; - self.doc_lengths.write().await.finish(&mut run).await?; - self.postings.write().await.finish(&mut run).await?; - self.terms.write().await.finish(&mut run).await?; - drop(run); + let txn = ctx.tx(); + self.doc_ids.write().await.finish(&txn).await?; + self.doc_lengths.write().await.finish(&txn).await?; + self.postings.write().await.finish(&txn).await?; + self.terms.write().await.finish(&txn).await?; Ok(()) } } @@ -518,10 +512,7 @@ impl HitsIterator { self.iter.size_hint().0 } - pub(crate) async fn next( - &mut self, - tx: &mut kvs::Transaction, - ) -> Result, Error> { + pub(crate) async fn next(&mut self, tx: &Transaction) -> Result, Error> { let di = self.doc_ids.read().await; for doc_id in self.iter.by_ref() { if let Some(doc_key) = di.get_doc_key(tx, doc_id).await? { @@ -546,7 +537,6 @@ mod tests { use crate::sql::statements::{DefineAnalyzerStatement, DefineStatement}; use crate::sql::{Array, Statement, Thing, Value}; use crate::syn; - use futures::lock::Mutex; use reblessive::tree::Stk; use std::collections::HashMap; use std::sync::Arc; @@ -558,11 +548,11 @@ mod tests { scr: BM25Scorer, e: Vec<(&Thing, Option)>, ) { - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); if let Some(mut hits) = hits { let mut map = HashMap::new(); - while let Some((k, d)) = hits.next(&mut tx).await.unwrap() { - let s = scr.score(&mut tx, d).await.unwrap(); + while let Some((k, d)) = hits.next(&tx).await.unwrap() { + let s = scr.score(&tx, d).await.unwrap(); map.insert(k, s); } assert_eq!(map.len(), e.len()); @@ -572,7 +562,6 @@ mod tests { } else { panic!("hits is none"); } - drop(tx); } async fn search( @@ -584,9 +573,8 @@ mod tests { ) -> (Option, BM25Scorer) { let (term_list, _) = fti.extract_querying_terms(stk, ctx, opt, qs.to_string()).await.unwrap(); - let mut tx = ctx.tx_lock().await; - let td = Arc::new(fti.get_terms_docs(&mut tx, &term_list).await.unwrap()); - drop(tx); + let tx = ctx.tx(); + let td = Arc::new(fti.get_terms_docs(&tx, &term_list).await.unwrap()); let scr = fti.new_scorer(td.clone()).unwrap().unwrap(); let hits = fti.new_hits_iterator(td).unwrap(); (hits, scr) @@ -600,10 +588,10 @@ mod tests { hl: bool, ) -> (Context<'a>, Options, FtIndex) { let mut ctx = Context::default(); - let mut tx = ds.transaction(tt, Optimistic).await.unwrap(); + let tx = ds.transaction(tt, Optimistic).await.unwrap(); let fti = FtIndex::with_analyzer( ctx.get_index_stores(), - &mut tx, + &tx, az.clone(), IndexKeyBase::default(), &SearchParams { @@ -623,14 +611,14 @@ mod tests { ) .await .unwrap(); - let txn = Arc::new(Mutex::new(tx)); - ctx.set_transaction_mut(txn); + let txn = Arc::new(tx); + ctx.set_transaction(txn); (ctx, Options::default(), fti) } pub(super) async fn finish(ctx: &Context<'_>, fti: FtIndex) { fti.finish(ctx).await.unwrap(); - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); tx.commit().await.unwrap(); } diff --git a/core/src/idx/ft/offsets.rs b/core/src/idx/ft/offsets.rs index b2c07df3..16e42cb2 100644 --- a/core/src/idx/ft/offsets.rs +++ b/core/src/idx/ft/offsets.rs @@ -19,7 +19,7 @@ impl Offsets { pub(super) async fn set_offsets( &self, - tx: &mut Transaction, + tx: &Transaction, doc_id: DocId, term_id: TermId, offsets: OffsetRecords, @@ -32,7 +32,7 @@ impl Offsets { pub(super) async fn get_offsets( &self, - tx: &mut Transaction, + tx: &Transaction, doc_id: DocId, term_id: TermId, ) -> Result, Error> { @@ -47,7 +47,7 @@ impl Offsets { pub(super) async fn remove_offsets( &self, - tx: &mut Transaction, + tx: &Transaction, doc_id: DocId, term_id: TermId, ) -> Result<(), Error> { diff --git a/core/src/idx/ft/postings.rs b/core/src/idx/ft/postings.rs index 16f5e219..6b8e2613 100644 --- a/core/src/idx/ft/postings.rs +++ b/core/src/idx/ft/postings.rs @@ -20,7 +20,7 @@ pub(super) struct Postings { impl Postings { pub(super) async fn new( ixs: &IndexStores, - tx: &mut Transaction, + tx: &Transaction, index_key_base: IndexKeyBase, order: u32, tt: TransactionType, @@ -51,7 +51,7 @@ impl Postings { pub(super) async fn update_posting( &mut self, - tx: &mut Transaction, + tx: &Transaction, term_id: TermId, doc_id: DocId, term_freq: TermFrequency, @@ -62,7 +62,7 @@ impl Postings { pub(super) async fn get_term_frequency( &self, - tx: &mut Transaction, + tx: &Transaction, term_id: TermId, doc_id: DocId, ) -> Result, Error> { @@ -72,7 +72,7 @@ impl Postings { pub(super) async fn remove_posting( &mut self, - tx: &mut Transaction, + tx: &Transaction, term_id: TermId, doc_id: DocId, ) -> Result, Error> { @@ -80,11 +80,11 @@ impl Postings { self.btree.delete(tx, &mut self.store, key).await } - pub(super) async fn statistics(&self, tx: &mut Transaction) -> Result { + pub(super) async fn statistics(&self, tx: &Transaction) -> Result { self.btree.statistics(tx, &self.store).await } - pub(super) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> { + pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> { if let Some(new_cache) = self.store.finish(tx).await? { let state = self.btree.inc_generation(); tx.set(self.state_key.clone(), state.try_to_val()?).await?; @@ -106,15 +106,15 @@ mod tests { order: u32, tt: TransactionType, ) -> (Transaction, Postings) { - let mut tx = ds.transaction(tt, Optimistic).await.unwrap(); - let p = Postings::new(ds.index_store(), &mut tx, IndexKeyBase::default(), order, tt, 100) + let tx = ds.transaction(tt, Optimistic).await.unwrap(); + let p = Postings::new(ds.index_store(), &tx, IndexKeyBase::default(), order, tt, 100) .await .unwrap(); (tx, p) } - async fn finish(mut tx: Transaction, mut p: Postings) { - p.finish(&mut tx).await.unwrap(); + async fn finish(tx: Transaction, mut p: Postings) { + p.finish(&tx).await.unwrap(); tx.commit().await.unwrap(); } @@ -129,33 +129,33 @@ mod tests { let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await; finish(tx, p).await; - let (mut tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await; - assert_eq!(p.statistics(&mut tx).await.unwrap().keys_count, 0); + let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await; + assert_eq!(p.statistics(&tx).await.unwrap().keys_count, 0); // Add postings - let (mut tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await; - p.update_posting(&mut tx, 1, 2, 3).await.unwrap(); - p.update_posting(&mut tx, 1, 4, 5).await.unwrap(); + let (tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await; + p.update_posting(&tx, 1, 2, 3).await.unwrap(); + p.update_posting(&tx, 1, 4, 5).await.unwrap(); finish(tx, p).await; - let (mut tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await; - assert_eq!(p.statistics(&mut tx).await.unwrap().keys_count, 2); + let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await; + assert_eq!(p.statistics(&tx).await.unwrap().keys_count, 2); - assert_eq!(p.get_term_frequency(&mut tx, 1, 2).await.unwrap(), Some(3)); - assert_eq!(p.get_term_frequency(&mut tx, 1, 4).await.unwrap(), Some(5)); + assert_eq!(p.get_term_frequency(&tx, 1, 2).await.unwrap(), Some(3)); + assert_eq!(p.get_term_frequency(&tx, 1, 4).await.unwrap(), Some(5)); - let (mut tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await; + let (tx, mut p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Write).await; // Check removal of doc 2 - assert_eq!(p.remove_posting(&mut tx, 1, 2).await.unwrap(), Some(3)); + assert_eq!(p.remove_posting(&tx, 1, 2).await.unwrap(), Some(3)); // Again the same - assert_eq!(p.remove_posting(&mut tx, 1, 2).await.unwrap(), None); + assert_eq!(p.remove_posting(&tx, 1, 2).await.unwrap(), None); // Remove doc 4 - assert_eq!(p.remove_posting(&mut tx, 1, 4).await.unwrap(), Some(5)); + assert_eq!(p.remove_posting(&tx, 1, 4).await.unwrap(), Some(5)); finish(tx, p).await; // The underlying b-tree should be empty now - let (mut tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await; - assert_eq!(p.statistics(&mut tx).await.unwrap().keys_count, 0); + let (tx, p) = new_operation(&ds, DEFAULT_BTREE_ORDER, Read).await; + assert_eq!(p.statistics(&tx).await.unwrap().keys_count, 0); } } } diff --git a/core/src/idx/ft/scorer.rs b/core/src/idx/ft/scorer.rs index 04dd73c9..5053a1f6 100644 --- a/core/src/idx/ft/scorer.rs +++ b/core/src/idx/ft/scorer.rs @@ -40,7 +40,7 @@ impl BM25Scorer { async fn term_score( &self, - tx: &mut Transaction, + tx: &Transaction, doc_id: DocId, term_doc_count: DocLength, term_frequency: TermFrequency, @@ -53,7 +53,7 @@ impl BM25Scorer { pub(crate) async fn score( &self, - tx: &mut Transaction, + tx: &Transaction, doc_id: DocId, ) -> Result, Error> { let mut sc = 0.0; diff --git a/core/src/idx/ft/termdocs.rs b/core/src/idx/ft/termdocs.rs index 6dd02ec2..7c9d93ea 100644 --- a/core/src/idx/ft/termdocs.rs +++ b/core/src/idx/ft/termdocs.rs @@ -22,7 +22,7 @@ impl TermDocs { pub(super) async fn set_doc( &self, - tx: &mut Transaction, + tx: &Transaction, term_id: TermId, doc_id: DocId, ) -> Result<(), Error> { @@ -38,7 +38,7 @@ impl TermDocs { pub(super) async fn get_docs( &self, - tx: &mut Transaction, + tx: &Transaction, term_id: TermId, ) -> Result, Error> { let key = self.index_key_base.new_bc_key(term_id); @@ -52,7 +52,7 @@ impl TermDocs { pub(super) async fn remove_doc( &self, - tx: &mut Transaction, + tx: &Transaction, term_id: TermId, doc_id: DocId, ) -> Result { diff --git a/core/src/idx/ft/terms.rs b/core/src/idx/ft/terms.rs index e36f08ad..f721728a 100644 --- a/core/src/idx/ft/terms.rs +++ b/core/src/idx/ft/terms.rs @@ -24,7 +24,7 @@ pub(in crate::idx) struct Terms { impl Terms { pub(super) async fn new( ixs: &IndexStores, - tx: &mut Transaction, + tx: &Transaction, index_key_base: IndexKeyBase, default_btree_order: u32, tt: TransactionType, @@ -74,7 +74,7 @@ impl Terms { pub(super) async fn resolve_term_id( &mut self, - tx: &mut Transaction, + tx: &Transaction, term: &str, ) -> Result { let term_key = term.into(); @@ -91,7 +91,7 @@ impl Terms { pub(super) async fn get_term_id( &self, - tx: &mut Transaction, + tx: &Transaction, term: &str, ) -> Result, Error> { self.btree.search(tx, &self.store, &term.into()).await @@ -99,7 +99,7 @@ impl Terms { pub(super) async fn remove_term_id( &mut self, - tx: &mut Transaction, + tx: &Transaction, term_id: TermId, ) -> Result<(), Error> { let term_id_key = self.index_key_base.new_bu_key(term_id); @@ -117,11 +117,11 @@ impl Terms { Ok(()) } - pub(super) async fn statistics(&self, tx: &mut Transaction) -> Result { + pub(super) async fn statistics(&self, tx: &Transaction) -> Result { self.btree.statistics(tx, &self.store).await } - pub(super) async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> { + pub(super) async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> { if let Some(new_cache) = self.store.finish(tx).await? { let btree = self.btree.inc_generation().clone(); let state = State { @@ -253,15 +253,15 @@ mod tests { order: u32, tt: TransactionType, ) -> (Transaction, Terms) { - let mut tx = ds.transaction(tt, Optimistic).await.unwrap(); - let t = Terms::new(ds.index_store(), &mut tx, IndexKeyBase::default(), order, tt, 100) + let tx = ds.transaction(tt, Optimistic).await.unwrap(); + let t = Terms::new(ds.index_store(), &tx, IndexKeyBase::default(), order, tt, 100) .await .unwrap(); (tx, t) } - async fn finish(mut tx: Transaction, mut t: Terms) { - t.finish(&mut tx).await.unwrap(); + async fn finish(tx: Transaction, mut t: Terms) { + t.finish(&tx).await.unwrap(); tx.commit().await.unwrap(); } @@ -279,43 +279,43 @@ mod tests { // Resolve a first term { - let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; - assert_eq!(t.resolve_term_id(&mut tx, "C").await.unwrap(), 0); + let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; + assert_eq!(t.resolve_term_id(&tx, "C").await.unwrap(), 0); finish(tx, t).await; - let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; - assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 1); + let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; + assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 1); } // Resolve a second term { - let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; - assert_eq!(t.resolve_term_id(&mut tx, "D").await.unwrap(), 1); + let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; + assert_eq!(t.resolve_term_id(&tx, "D").await.unwrap(), 1); finish(tx, t).await; - let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; - assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 2); + let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; + assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 2); } // Resolve two existing terms with new frequencies { - let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; - assert_eq!(t.resolve_term_id(&mut tx, "C").await.unwrap(), 0); - assert_eq!(t.resolve_term_id(&mut tx, "D").await.unwrap(), 1); + let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; + assert_eq!(t.resolve_term_id(&tx, "C").await.unwrap(), 0); + assert_eq!(t.resolve_term_id(&tx, "D").await.unwrap(), 1); finish(tx, t).await; - let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; - assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 2); + let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; + assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 2); } // Resolve one existing terms and two new terms { - let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; - assert_eq!(t.resolve_term_id(&mut tx, "A").await.unwrap(), 2); - assert_eq!(t.resolve_term_id(&mut tx, "C").await.unwrap(), 0); - assert_eq!(t.resolve_term_id(&mut tx, "E").await.unwrap(), 3); + let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; + assert_eq!(t.resolve_term_id(&tx, "A").await.unwrap(), 2); + assert_eq!(t.resolve_term_id(&tx, "C").await.unwrap(), 0); + assert_eq!(t.resolve_term_id(&tx, "E").await.unwrap(), 3); finish(tx, t).await; - let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; - assert_eq!(t.statistics(&mut tx).await.unwrap().keys_count, 4); + let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; + assert_eq!(t.statistics(&tx).await.unwrap().keys_count, 4); } } @@ -326,38 +326,38 @@ mod tests { let ds = Datastore::new("memory").await.unwrap(); { - let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; + let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; // Check removing an non-existing term id returns None - assert!(t.remove_term_id(&mut tx, 0).await.is_ok()); + assert!(t.remove_term_id(&tx, 0).await.is_ok()); // Create few terms - t.resolve_term_id(&mut tx, "A").await.unwrap(); - t.resolve_term_id(&mut tx, "C").await.unwrap(); - t.resolve_term_id(&mut tx, "E").await.unwrap(); + t.resolve_term_id(&tx, "A").await.unwrap(); + t.resolve_term_id(&tx, "C").await.unwrap(); + t.resolve_term_id(&tx, "E").await.unwrap(); finish(tx, t).await; } for term in ["A", "C", "E"] { - let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; - let term_id = t.get_term_id(&mut tx, term).await.unwrap(); + let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; + let term_id = t.get_term_id(&tx, term).await.unwrap(); if let Some(term_id) = term_id { - let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; - t.remove_term_id(&mut tx, term_id).await.unwrap(); + let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; + t.remove_term_id(&tx, term_id).await.unwrap(); finish(tx, t).await; - let (mut tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; - assert_eq!(t.get_term_id(&mut tx, term).await.unwrap(), None); + let (tx, t) = new_operation(&ds, BTREE_ORDER, Read).await; + assert_eq!(t.get_term_id(&tx, term).await.unwrap(), None); } else { panic!("Term ID not found: {}", term); } } // Check id recycling - let (mut tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; - assert_eq!(t.resolve_term_id(&mut tx, "B").await.unwrap(), 0); - assert_eq!(t.resolve_term_id(&mut tx, "D").await.unwrap(), 1); + let (tx, mut t) = new_operation(&ds, BTREE_ORDER, Write).await; + assert_eq!(t.resolve_term_id(&tx, "B").await.unwrap(), 0); + assert_eq!(t.resolve_term_id(&tx, "D").await.unwrap(), 1); finish(tx, t).await; } @@ -375,10 +375,10 @@ mod tests { async fn test_resolve_100_docs_with_50_words_one_by_one() { let ds = Datastore::new("memory").await.unwrap(); for _ in 0..100 { - let (mut tx, mut t) = new_operation(&ds, 100, Write).await; + let (tx, mut t) = new_operation(&ds, 100, Write).await; let terms_string = random_term_freq_vec(50); for (term, _) in terms_string { - t.resolve_term_id(&mut tx, &term).await.unwrap(); + t.resolve_term_id(&tx, &term).await.unwrap(); } finish(tx, t).await; } @@ -388,11 +388,11 @@ mod tests { async fn test_resolve_100_docs_with_50_words_batch_of_10() { let ds = Datastore::new("memory").await.unwrap(); for _ in 0..10 { - let (mut tx, mut t) = new_operation(&ds, 100, Write).await; + let (tx, mut t) = new_operation(&ds, 100, Write).await; for _ in 0..10 { let terms_string = random_term_freq_vec(50); for (term, _) in terms_string { - t.resolve_term_id(&mut tx, &term).await.unwrap(); + t.resolve_term_id(&tx, &term).await.unwrap(); } } finish(tx, t).await; diff --git a/core/src/idx/planner/checker.rs b/core/src/idx/planner/checker.rs index 588a1a94..0f8665b9 100644 --- a/core/src/idx/planner/checker.rs +++ b/core/src/idx/planner/checker.rs @@ -144,13 +144,12 @@ impl<'a> MTreeChecker<'a> { return Ok(VecDeque::from([])); } let mut result = VecDeque::with_capacity(res.len()); - let mut tx = self.ctx.tx_lock().await; + let txn = self.ctx.tx(); for (doc_id, dist) in res { - if let Some(key) = doc_ids.get_doc_key(&mut tx, doc_id).await? { + if let Some(key) = doc_ids.get_doc_key(&txn, doc_id).await? { result.push_back((key.into(), dist, None)); } } - drop(tx); Ok(result) } } @@ -186,9 +185,8 @@ impl CheckerCacheEntry { cond: &Cond, ) -> Result { if let Some(rid) = rid { - let mut tx = ctx.tx_lock().await; - let val = Iterable::fetch_thing(&mut tx, opt, &rid).await?; - drop(tx); + let txn = ctx.tx(); + let val = Iterable::fetch_thing(&txn, opt, &rid).await?; if !val.is_none_or_null() { let (value, truthy) = { let cursor_doc = CursorDoc { @@ -229,9 +227,8 @@ impl<'a> MTreeCondChecker<'a> { match self.cache.entry(doc_id) { Entry::Occupied(e) => Ok(e.get().truthy), Entry::Vacant(e) => { - let mut tx = self.ctx.tx_lock().await; - let rid = doc_ids.get_doc_key(&mut tx, doc_id).await?.map(|k| k.into()); - drop(tx); + let txn = self.ctx.tx(); + let rid = doc_ids.get_doc_key(&txn, doc_id).await?.map(|k| k.into()); let ent = CheckerCacheEntry::build(stk, self.ctx, self.opt, rid, self.cond.as_ref()) .await?; diff --git a/core/src/idx/planner/executor.rs b/core/src/idx/planner/executor.rs index 24e576ce..6aba6bbb 100644 --- a/core/src/idx/planner/executor.rs +++ b/core/src/idx/planner/executor.rs @@ -175,10 +175,10 @@ impl InnerQueryExecutor { } Entry::Vacant(e) => { let ikb = IndexKeyBase::new(opt.ns()?, opt.db()?, idx_def)?; - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); let mt = MTreeIndex::new( ctx.get_index_stores(), - &mut tx, + &tx, ikb, p, TransactionType::Read, @@ -563,11 +563,10 @@ impl QueryExecutor { ft: &FtEntry, ) -> Result { let doc_key: Key = thg.into(); - let mut run = ctx.tx_lock().await; + let tx = ctx.tx(); let di = ft.0.doc_ids.read().await; - let doc_id = di.get_doc_id(&mut run, doc_key).await?; + let doc_id = di.get_doc_id(&tx, doc_key).await?; drop(di); - drop(run); if let Some(doc_id) = doc_id { let term_goals = ft.0.terms_docs.len(); // If there is no terms, it can't be a match @@ -640,18 +639,10 @@ impl QueryExecutor { doc: &Value, ) -> Result { if let Some((e, ft)) = self.get_ft_entry_and_index(hlp.match_ref()) { - let mut run = ctx.tx_lock().await; + let tx = ctx.tx(); let res = ft - .highlight( - &mut run, - thg, - &e.0.query_terms_list, - hlp, - e.0.index_option.id_ref(), - doc, - ) + .highlight(&tx, thg, &e.0.query_terms_list, hlp, e.0.index_option.id_ref(), doc) .await; - drop(run); return res; } Ok(Value::None) @@ -665,9 +656,8 @@ impl QueryExecutor { partial: bool, ) -> Result { if let Some((e, ft)) = self.get_ft_entry_and_index(&match_ref) { - let mut run = ctx.tx_lock().await; - let res = ft.extract_offsets(&mut run, thg, &e.0.query_terms_list, partial).await; - drop(run); + let tx = ctx.tx(); + let res = ft.extract_offsets(&tx, thg, &e.0.query_terms_list, partial).await; return res; } Ok(Value::None) @@ -682,7 +672,7 @@ impl QueryExecutor { ) -> Result { if let Some(e) = self.get_ft_entry(match_ref) { if let Some(scorer) = &e.0.scorer { - let mut run = ctx.tx_lock().await; + let tx = ctx.tx(); let mut doc_id = if let Some(ir) = ir { ir.doc_id() } else { @@ -691,17 +681,15 @@ impl QueryExecutor { if doc_id.is_none() { let key: Key = rid.into(); let di = e.0.doc_ids.read().await; - doc_id = di.get_doc_id(&mut run, key).await?; + doc_id = di.get_doc_id(&tx, key).await?; drop(di); } if let Some(doc_id) = doc_id { - let score = scorer.score(&mut run, doc_id).await?; + let score = scorer.score(&tx, doc_id).await?; if let Some(score) = score { - drop(run); return Ok(Value::from(score)); } } - drop(run); } } Ok(Value::None) @@ -733,8 +721,8 @@ impl FtEntry { if let Matches(qs, _) = io.op() { let (terms_list, terms_set) = ft.extract_querying_terms(stk, ctx, opt, qs.to_owned()).await?; - let mut tx = ctx.tx_lock().await; - let terms_docs = Arc::new(ft.get_terms_docs(&mut tx, &terms_list).await?); + let tx = ctx.tx(); + let terms_docs = Arc::new(ft.get_terms_docs(&tx, &terms_list).await?); drop(tx); Ok(Some(Self(Arc::new(Inner { index_option: io, diff --git a/core/src/idx/planner/iterators.rs b/core/src/idx/planner/iterators.rs index 8a5a34ed..e6869c52 100644 --- a/core/src/idx/planner/iterators.rs +++ b/core/src/idx/planner/iterators.rs @@ -6,8 +6,8 @@ use crate::idx::ft::termdocs::TermsDocs; use crate::idx::ft::{FtIndex, HitsIterator}; use crate::idx::planner::plan::RangeValue; use crate::key::index::Index; -use crate::kvs; -use crate::kvs::{Key, Limit, ScanPage}; +use crate::kvs::Key; +use crate::kvs::Transaction; use crate::sql::statements::DefineIndexStatement; use crate::sql::{Array, Ident, Thing, Value}; use radix_trie::Trie; @@ -118,20 +118,20 @@ impl ThingIterator { pub(crate) async fn next_batch( &mut self, ctx: &Context<'_>, - tx: &mut kvs::Transaction, + txn: &Transaction, size: u32, ) -> Result { match self { - Self::IndexEqual(i) => i.next_batch(tx, size).await, - Self::UniqueEqual(i) => i.next_batch(tx).await, - Self::IndexRange(i) => i.next_batch(tx, size).await, - Self::UniqueRange(i) => i.next_batch(tx, size).await, - Self::IndexUnion(i) => i.next_batch(ctx, tx, size).await, - Self::UniqueUnion(i) => i.next_batch(ctx, tx, size).await, - Self::Matches(i) => i.next_batch(ctx, tx, size).await, + Self::IndexEqual(i) => i.next_batch(txn, size).await, + Self::UniqueEqual(i) => i.next_batch(txn).await, + Self::IndexRange(i) => i.next_batch(txn, size).await, + Self::UniqueRange(i) => i.next_batch(txn, size).await, + Self::IndexUnion(i) => i.next_batch(ctx, txn, size).await, + Self::UniqueUnion(i) => i.next_batch(ctx, txn, size).await, + Self::Matches(i) => i.next_batch(ctx, txn, size).await, Self::Knn(i) => i.next_batch(ctx, size).await, - Self::IndexJoin(i) => Box::pin(i.next_batch(ctx, tx, size)).await, - Self::UniqueJoin(i) => Box::pin(i.next_batch(ctx, tx, size)).await, + Self::IndexJoin(i) => Box::pin(i.next_batch(ctx, txn, size)).await, + Self::UniqueJoin(i) => Box::pin(i.next_batch(ctx, txn, size)).await, } } } @@ -164,7 +164,7 @@ impl IndexEqualThingIterator { } async fn next_scan( - tx: &mut kvs::Transaction, + tx: &Transaction, irf: IteratorRef, beg: &mut Vec, end: &[u8], @@ -172,16 +172,7 @@ impl IndexEqualThingIterator { ) -> Result { let min = beg.clone(); let max = end.to_owned(); - let res = tx - .scan_paged( - ScanPage { - range: min..max, - limit: Limit::Limited(limit), - }, - limit, - ) - .await?; - let res = res.values; + let res = tx.scan(min..max, limit).await?; if let Some((key, _)) = res.last() { let mut key = key.clone(); key.push(0x00); @@ -194,7 +185,7 @@ impl IndexEqualThingIterator { async fn next_batch( &mut self, - tx: &mut kvs::Transaction, + tx: &Transaction, limit: u32, ) -> Result { Self::next_scan(tx, self.irf, &mut self.beg, &self.end, limit).await @@ -306,21 +297,12 @@ impl IndexRangeThingIterator { async fn next_batch( &mut self, - tx: &mut kvs::Transaction, + tx: &Transaction, limit: u32, ) -> Result { let min = self.r.beg.clone(); let max = self.r.end.clone(); - let res = tx - .scan_paged( - ScanPage { - range: min..max, - limit: Limit::Limited(limit), - }, - limit, - ) - .await?; - let res = res.values; + let res = tx.scan(min..max, limit).await?; if let Some((key, _)) = res.last() { self.r.beg.clone_from(key); self.r.beg.push(0x00); @@ -369,7 +351,7 @@ impl IndexUnionThingIterator { async fn next_batch( &mut self, ctx: &Context<'_>, - tx: &mut kvs::Transaction, + tx: &Transaction, limit: u32, ) -> Result { while let Some(r) = &mut self.current { @@ -423,7 +405,7 @@ impl JoinThingIterator { async fn next_current_remote_batch( &mut self, ctx: &Context<'_>, - tx: &mut kvs::Transaction, + tx: &Transaction, limit: u32, ) -> Result { while !ctx.is_done() { @@ -444,7 +426,7 @@ impl JoinThingIterator { async fn next_current_local( &mut self, ctx: &Context<'_>, - tx: &mut kvs::Transaction, + tx: &Transaction, limit: u32, new_iter: F, ) -> Result @@ -471,7 +453,7 @@ impl JoinThingIterator { async fn next_batch( &mut self, ctx: &Context<'_>, - tx: &mut kvs::Transaction, + tx: &Transaction, limit: u32, new_iter: F, ) -> Result @@ -508,7 +490,7 @@ impl IndexJoinThingIterator { async fn next_batch( &mut self, ctx: &Context<'_>, - tx: &mut kvs::Transaction, + tx: &Transaction, limit: u32, ) -> Result { let new_iter = |ns: &str, db: &str, ix_what: &Ident, ix_name: &Ident, value: Value| { @@ -541,10 +523,7 @@ impl UniqueEqualThingIterator { } } - async fn next_batch( - &mut self, - tx: &mut kvs::Transaction, - ) -> Result { + async fn next_batch(&mut self, tx: &Transaction) -> Result { if let Some(key) = self.key.take() { if let Some(val) = tx.get(key).await? { let record = (val.into(), self.irf.into(), None); @@ -612,7 +591,7 @@ impl UniqueRangeThingIterator { async fn next_batch( &mut self, - tx: &mut kvs::Transaction, + tx: &Transaction, mut limit: u32, ) -> Result { if self.done { @@ -621,17 +600,9 @@ impl UniqueRangeThingIterator { let min = self.r.beg.clone(); let max = self.r.end.clone(); limit += 1; - let res = tx - .scan_paged( - ScanPage { - range: min..max, - limit: Limit::Limited(limit), - }, - limit, - ) - .await?; - let mut records = B::with_capacity(res.values.len()); - for (k, v) in res.values { + let res = tx.scan(min..max, limit).await?; + let mut records = B::with_capacity(res.len()); + for (k, v) in res { limit -= 1; if limit == 0 { self.r.beg = k; @@ -682,7 +653,7 @@ impl UniqueUnionThingIterator { async fn next_batch( &mut self, ctx: &Context<'_>, - tx: &mut kvs::Transaction, + tx: &Transaction, limit: u32, ) -> Result { let limit = limit as usize; @@ -717,7 +688,7 @@ impl UniqueJoinThingIterator { async fn next_batch( &mut self, ctx: &Context<'_>, - tx: &mut kvs::Transaction, + tx: &Transaction, limit: u32, ) -> Result { let new_iter = |ns: &str, db: &str, ix_what: &Ident, ix_name: &Ident, value: Value| { @@ -756,7 +727,7 @@ impl MatchesThingIterator { async fn next_batch( &mut self, ctx: &Context<'_>, - tx: &mut kvs::Transaction, + tx: &Transaction, limit: u32, ) -> Result { if let Some(hits) = &mut self.hits { diff --git a/core/src/idx/planner/tree.rs b/core/src/idx/planner/tree.rs index 3b1b278c..dd4dff9e 100644 --- a/core/src/idx/planner/tree.rs +++ b/core/src/idx/planner/tree.rs @@ -6,7 +6,7 @@ use crate::idx::planner::executor::{ }; use crate::idx::planner::plan::{IndexOperator, IndexOption}; use crate::idx::planner::rewriter::KnnConditionRewriter; -use crate::kvs; +use crate::kvs::Transaction; use crate::sql::index::Index; use crate::sql::statements::{DefineFieldStatement, DefineIndexStatement}; use crate::sql::{ @@ -115,7 +115,7 @@ impl<'a> TreeBuilder<'a> { async fn lazy_load_schema_resolver( &mut self, - tx: &mut kvs::Transaction, + tx: &Transaction, table: &Table, ) -> Result<(), Error> { if self.schemas.contains_key(table) { @@ -198,8 +198,8 @@ impl<'a> TreeBuilder<'a> { } async fn resolve_idiom(&mut self, i: &Idiom) -> Result { - let mut tx = self.ctx.tx_lock().await; - self.lazy_load_schema_resolver(&mut tx, self.table).await?; + let tx = self.ctx.tx(); + self.lazy_load_schema_resolver(&tx, self.table).await?; // Try to detect if it matches an index if let Some(schema) = self.schemas.get(self.table).cloned() { @@ -208,12 +208,10 @@ impl<'a> TreeBuilder<'a> { return Ok(Node::IndexedField(i.clone(), irs)); } // Try to detect an indexed record field - if let Some(ro) = self.resolve_record_field(&mut tx, schema.fields.as_ref(), i).await? { - drop(tx); + if let Some(ro) = self.resolve_record_field(&tx, schema.fields.as_ref(), i).await? { return Ok(Node::RecordField(i.clone(), ro)); } } - drop(tx); Ok(Node::NonIndexedField(i.clone())) } @@ -246,7 +244,7 @@ impl<'a> TreeBuilder<'a> { async fn resolve_record_field( &mut self, - tx: &mut kvs::Transaction, + tx: &Transaction, fields: &[DefineFieldStatement], idiom: &Idiom, ) -> Result, Error> { @@ -544,7 +542,7 @@ struct SchemaCache { } impl SchemaCache { - async fn new(opt: &Options, table: &Table, tx: &mut kvs::Transaction) -> Result { + async fn new(opt: &Options, table: &Table, tx: &Transaction) -> Result { let indexes = tx.all_tb_indexes(opt.ns()?, opt.db()?, table).await?; let fields = tx.all_tb_fields(opt.ns()?, opt.db()?, table).await?; Ok(Self { diff --git a/core/src/idx/trees/btree.rs b/core/src/idx/trees/btree.rs index a98aac0d..fcf7d3a7 100644 --- a/core/src/idx/trees/btree.rs +++ b/core/src/idx/trees/btree.rs @@ -287,7 +287,7 @@ where pub async fn search( &self, - tx: &mut Transaction, + tx: &Transaction, store: &BTreeStore, searched_key: &Key, ) -> Result, Error> { @@ -307,7 +307,7 @@ where pub async fn search_mut( &self, - tx: &mut Transaction, + tx: &Transaction, store: &mut BTreeStore, searched_key: &Key, ) -> Result, Error> { @@ -329,7 +329,7 @@ where pub async fn insert( &mut self, - tx: &mut Transaction, + tx: &Transaction, store: &mut BTreeStore, key: Key, payload: Payload, @@ -366,7 +366,7 @@ where async fn insert_non_full( &mut self, - tx: &mut Transaction, + tx: &Transaction, store: &mut BTreeStore, node_id: NodeId, key: Key, @@ -481,7 +481,7 @@ where pub(in crate::idx) async fn delete( &mut self, - tx: &mut Transaction, + tx: &Transaction, store: &mut BTreeStore, key_to_delete: Key, ) -> Result, Error> { @@ -592,7 +592,7 @@ where async fn deleted_from_internal( &mut self, - tx: &mut Transaction, + tx: &Transaction, store: &mut BTreeStore, keys: &mut BK, children: &mut Vec, @@ -669,7 +669,7 @@ where async fn find_highest( &mut self, - tx: &mut Transaction, + tx: &Transaction, store: &mut BTreeStore, node: StoredNode>, ) -> Result<(Key, Payload), Error> { @@ -697,7 +697,7 @@ where async fn find_lowest( &mut self, - tx: &mut Transaction, + tx: &Transaction, store: &mut BTreeStore, node: StoredNode>, ) -> Result<(Key, Payload), Error> { @@ -725,7 +725,7 @@ where async fn deleted_traversal( &mut self, - tx: &mut Transaction, + tx: &Transaction, store: &mut BTreeStore, keys: &mut BK, children: &mut Vec, @@ -949,7 +949,7 @@ where pub(in crate::idx) async fn statistics( &self, - tx: &mut Transaction, + tx: &Transaction, store: &BTreeStore, ) -> Result { let mut stats = BStatistics::default(); @@ -998,7 +998,7 @@ mod tests { }; use crate::idx::trees::store::{NodeId, TreeNode, TreeNodeProvider}; use crate::idx::VersionedSerdeState; - use crate::kvs::{Datastore, Key, LockType::*, ScanPage, Transaction, TransactionType}; + use crate::kvs::{Datastore, Key, LockType::*, Transaction, TransactionType}; use rand::prelude::SliceRandom; use rand::thread_rng; use std::cmp::Ordering; @@ -1034,7 +1034,7 @@ mod tests { } async fn insertions_test( - mut tx: Transaction, + tx: Transaction, mut st: BTreeStore, t: &mut BTree, samples_size: usize, @@ -1046,14 +1046,14 @@ mod tests { for i in 0..samples_size { let (key, payload) = sample_provider(i); // Insert the sample - t.insert(&mut tx, &mut st, key, payload).await.unwrap(); + t.insert(&tx, &mut st, key, payload).await.unwrap(); } - st.finish(&mut tx).await.unwrap(); + st.finish(&tx).await.unwrap(); tx.commit().await.unwrap(); } async fn check_insertions( - mut tx: Transaction, + tx: Transaction, st: BTreeStore, t: &mut BTree, samples_size: usize, @@ -1064,7 +1064,7 @@ mod tests { { for i in 0..samples_size { let (key, payload) = sample_provider(i); - assert_eq!(t.search(&mut tx, &st, &key).await.unwrap(), Some(payload)); + assert_eq!(t.search(&tx, &st, &key).await.unwrap(), Some(payload)); } tx.cancel().await.unwrap(); } @@ -1124,9 +1124,9 @@ mod tests { } { - let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await; + let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await; assert_eq!( - t.statistics(&mut tx, &st).await.unwrap(), + t.statistics(&tx, &st).await.unwrap(), BStatistics { keys_count: 100, max_depth: 3, @@ -1154,9 +1154,9 @@ mod tests { } { - let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; + let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; assert_eq!( - t.statistics(&mut tx, &st).await.unwrap(), + t.statistics(&tx, &st).await.unwrap(), BStatistics { keys_count: 100, max_depth: 3, @@ -1188,8 +1188,8 @@ mod tests { } { - let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await; - let s = t.statistics(&mut tx, &st).await.unwrap(); + let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await; + let s = t.statistics(&tx, &st).await.unwrap(); assert_eq!(s.keys_count, 100); tx.cancel().await.unwrap(); } @@ -1215,8 +1215,8 @@ mod tests { } { - let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; - let s = t.statistics(&mut tx, &st).await.unwrap(); + let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; + let s = t.statistics(&tx, &st).await.unwrap(); assert_eq!(s.keys_count, 100); tx.cancel().await.unwrap(); } @@ -1238,9 +1238,9 @@ mod tests { } { - let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await; + let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await; assert_eq!( - t.statistics(&mut tx, &st).await.unwrap(), + t.statistics(&tx, &st).await.unwrap(), BStatistics { keys_count: 10000, max_depth: 3, @@ -1267,9 +1267,9 @@ mod tests { } { - let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, cache_size).await; + let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, cache_size).await; assert_eq!( - t.statistics(&mut tx, &st).await.unwrap(), + t.statistics(&tx, &st).await.unwrap(), BStatistics { keys_count: 10000, max_depth: 3, @@ -1309,8 +1309,8 @@ mod tests { .await; } - let (mut tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await; - let statistics = t.statistics(&mut tx, &st).await.unwrap(); + let (tx, st) = new_operation_fst(&ds, &t, TransactionType::Read, 20).await; + let statistics = t.statistics(&tx, &st).await.unwrap(); tx.cancel().await.unwrap(); statistics } @@ -1327,8 +1327,8 @@ mod tests { .await; } - let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; - let statistics = t.statistics(&mut tx, &st).await.unwrap(); + let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; + let statistics = t.statistics(&tx, &st).await.unwrap(); tx.cancel().await.unwrap(); statistics @@ -1417,28 +1417,25 @@ mod tests { let mut t = BTree::::new(BState::new(3)); { - let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await; + let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await; for (key, payload) in CLRS_EXAMPLE { - t.insert(&mut tx, &mut st, key.into(), payload).await.unwrap(); + t.insert(&tx, &mut st, key.into(), payload).await.unwrap(); } - st.finish(&mut tx).await.unwrap(); + st.finish(&tx).await.unwrap(); tx.commit().await.unwrap(); } - let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; + let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; - let s = t.statistics(&mut tx, &st).await.unwrap(); + let s = t.statistics(&tx, &st).await.unwrap(); assert_eq!(s.keys_count, 23); assert_eq!(s.max_depth, 3); assert_eq!(s.nodes_count, 10); // There should be one record per node - assert_eq!( - 10, - tx.scan_paged(ScanPage::from(vec![]..vec![0xf]), 100).await.unwrap().values.len() - ); + assert_eq!(10, tx.scan(vec![]..vec![0xf], 100).await.unwrap().len()); let nodes_count = t - .inspect_nodes(&mut tx, &mut st, |count, depth, node_id, node| match count { + .inspect_nodes(&tx, &mut st, |count, depth, node_id, node| match count { 0 => { assert_eq!(depth, 1); assert_eq!(node_id, 7); @@ -1504,14 +1501,14 @@ mod tests { async fn check_finish_commit( t: &mut BTree, mut st: BTreeStore, - mut tx: Transaction, + tx: Transaction, mut gen: u64, info: String, ) -> Result where BK: BKeys + Clone + Debug, { - if st.finish(&mut tx).await?.is_some() { + if st.finish(&tx).await?.is_some() { t.state.generation += 1; } gen += 1; @@ -1527,9 +1524,9 @@ mod tests { let mut t = BTree::::new(BState::new(3)); let mut check_generation = 0; { - let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await; + let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await; for (key, payload) in CLRS_EXAMPLE { - t.insert(&mut tx, &mut st, key.into(), payload).await?; + t.insert(&tx, &mut st, key.into(), payload).await?; } check_generation = check_finish_commit( &mut t, @@ -1545,10 +1542,10 @@ mod tests { let mut key_count = CLRS_EXAMPLE.len() as u64; for (key, payload) in [("f", 6), ("m", 13), ("g", 7), ("d", 4), ("b", 2)] { { - let (mut tx, mut st) = + let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await; debug!("Delete {}", key); - assert_eq!(t.delete(&mut tx, &mut st, key.into()).await?, Some(payload)); + assert_eq!(t.delete(&tx, &mut st, key.into()).await?, Some(payload)); check_generation = check_finish_commit( &mut t, st, @@ -1560,27 +1557,24 @@ mod tests { } key_count -= 1; { - let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; - let s = t.statistics(&mut tx, &st).await?; + let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; + let s = t.statistics(&tx, &st).await?; assert_eq!(s.keys_count, key_count); } } } - let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; + let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; - let s = t.statistics(&mut tx, &st).await.unwrap(); + let s = t.statistics(&tx, &st).await.unwrap(); assert_eq!(s.keys_count, 18); assert_eq!(s.max_depth, 2); assert_eq!(s.nodes_count, 7); // There should be one record per node - assert_eq!( - 7, - tx.scan_paged(ScanPage::from(vec![]..vec![0xf]), 100).await.unwrap().values.len() - ); + assert_eq!(7, tx.scan(vec![]..vec![0xf], 100).await.unwrap().len()); let nodes_count = t - .inspect_nodes(&mut tx, &mut st, |count, depth, node_id, node| match count { + .inspect_nodes(&tx, &mut st, |count, depth, node_id, node| match count { 0 => { assert_eq!(depth, 1); assert_eq!(node_id, 1); @@ -1639,11 +1633,11 @@ mod tests { let mut check_generation = 0; { - let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await; + let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await; for (key, payload) in CLRS_EXAMPLE { expected_keys.insert(key.to_string(), payload); - t.insert(&mut tx, &mut st, key.into(), payload).await?; - let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?; + t.insert(&tx, &mut st, key.into(), payload).await?; + let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?; assert_eq!(expected_keys, tree_keys); } check_generation = check_finish_commit( @@ -1657,8 +1651,8 @@ mod tests { } { - let (mut tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; - print_tree(&mut tx, &mut st, &t).await; + let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; + print_tree(&tx, &mut st, &t).await; tx.cancel().await?; } @@ -1666,11 +1660,10 @@ mod tests { debug!("------------------------"); debug!("Delete {}", key); { - let (mut tx, mut st) = - new_operation_trie(&ds, &t, TransactionType::Write, 20).await; - assert!(t.delete(&mut tx, &mut st, key.into()).await?.is_some()); + let (tx, mut st) = new_operation_trie(&ds, &t, TransactionType::Write, 20).await; + assert!(t.delete(&tx, &mut st, key.into()).await?.is_some()); expected_keys.remove(key); - let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?; + let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?; assert_eq!(expected_keys, tree_keys); check_generation = check_finish_commit( &mut t, @@ -1684,10 +1677,10 @@ mod tests { // Check that every expected keys are still found in the tree { - let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; + let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; for (key, payload) in &expected_keys { assert_eq!( - t.search(&mut tx, &st, &key.as_str().into()).await?, + t.search(&tx, &st, &key.as_str().into()).await?, Some(*payload), "Can't find: {key}", ) @@ -1696,13 +1689,13 @@ mod tests { } } - let (mut tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; - let s = t.statistics(&mut tx, &st).await?; + let (tx, st) = new_operation_trie(&ds, &t, TransactionType::Read, 20).await; + let s = t.statistics(&tx, &st).await?; assert_eq!(s.keys_count, 0); assert_eq!(s.max_depth, 0); assert_eq!(s.nodes_count, 0); // There should not be any record in the database - assert_eq!(0, tx.scan_paged(ScanPage::from(vec![]..vec![0xf]), 100).await?.values.len()); + assert_eq!(0, tx.scan(vec![]..vec![0xf], 100).await.unwrap().len()); tx.cancel().await?; Ok(()) } @@ -1829,37 +1822,37 @@ mod tests { ]; let mut keys = BTreeMap::new(); { - let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await; + let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await; for term in terms { - t.insert(&mut tx, &mut st, term.into(), 0).await?; + t.insert(&tx, &mut st, term.into(), 0).await?; keys.insert(term.to_string(), 0); - let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?; + let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?; assert_eq!(keys, tree_keys); } - st.finish(&mut tx).await?; + st.finish(&tx).await?; tx.commit().await?; } { - let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Read, 100).await; - print_tree(&mut tx, &mut st, &t).await; + let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Read, 100).await; + print_tree(&tx, &mut st, &t).await; } { - let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await; + let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await; for term in terms { debug!("Delete {term}"); - t.delete(&mut tx, &mut st, term.into()).await?; - print_tree_mut(&mut tx, &mut st, &t).await; + t.delete(&tx, &mut st, term.into()).await?; + print_tree_mut(&tx, &mut st, &t).await; keys.remove(term); - let (_, tree_keys) = check_btree_properties(&t, &mut tx, &mut st).await?; + let (_, tree_keys) = check_btree_properties(&t, &tx, &mut st).await?; assert_eq!(keys, tree_keys); } - st.finish(&mut tx).await?; + st.finish(&tx).await?; tx.commit().await?; } { - let (mut tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await; - assert_eq!(check_btree_properties(&t, &mut tx, &mut st).await?.0, 0); - st.finish(&mut tx).await?; + let (tx, mut st) = new_operation_fst(&ds, &t, TransactionType::Write, 100).await; + assert_eq!(check_btree_properties(&t, &tx, &mut st).await?.0, 0); + st.finish(&tx).await?; tx.cancel().await?; } Ok(()) @@ -1867,7 +1860,7 @@ mod tests { async fn check_btree_properties( t: &BTree, - tx: &mut Transaction, + tx: &Transaction, st: &mut BTreeStore, ) -> Result<(usize, BTreeMap), Error> where @@ -1919,7 +1912,7 @@ mod tests { } } - async fn print_tree(tx: &mut Transaction, st: &mut BTreeStore, t: &BTree) + async fn print_tree(tx: &Transaction, st: &mut BTreeStore, t: &BTree) where BK: BKeys + Debug + Clone, { @@ -1932,7 +1925,7 @@ mod tests { debug!("----------------------------------"); } - async fn print_tree_mut(tx: &mut Transaction, st: &mut BTreeStore, t: &BTree) + async fn print_tree_mut(tx: &Transaction, st: &mut BTreeStore, t: &BTree) where BK: BKeys + Debug + Clone, { @@ -1967,7 +1960,7 @@ mod tests { /// This is for debugging async fn inspect_nodes( &self, - tx: &mut Transaction, + tx: &Transaction, st: &mut BTreeStore, inspect_func: F, ) -> Result @@ -1996,7 +1989,7 @@ mod tests { /// This is for debugging async fn inspect_nodes_mut( &self, - tx: &mut Transaction, + tx: &Transaction, st: &mut BTreeStore, mut inspect_func: F, ) -> Result diff --git a/core/src/idx/trees/mtree.rs b/core/src/idx/trees/mtree.rs index f1753d90..da94b986 100644 --- a/core/src/idx/trees/mtree.rs +++ b/core/src/idx/trees/mtree.rs @@ -48,16 +48,16 @@ struct MTreeSearchContext<'a> { impl MTreeIndex { pub async fn new( ixs: &IndexStores, - tx: &mut Transaction, + txn: &Transaction, ikb: IndexKeyBase, p: &MTreeParams, tt: TransactionType, ) -> Result { let doc_ids = Arc::new(RwLock::new( - DocIds::new(ixs, tx, tt, ikb.clone(), p.doc_ids_order, p.doc_ids_cache).await?, + DocIds::new(ixs, txn, tt, ikb.clone(), p.doc_ids_order, p.doc_ids_cache).await?, )); let state_key = ikb.new_vm_key(None); - let state: MState = if let Some(val) = tx.get(state_key.clone()).await? { + let state: MState = if let Some(val) = txn.get(state_key.clone()).await? { MState::try_from_val(val)? } else { MState::new(p.capacity) @@ -81,16 +81,17 @@ impl MTreeIndex { store, }) } + pub async fn index_document( &mut self, stk: &mut Stk, - tx: &mut Transaction, + txn: &Transaction, rid: &Thing, content: &Vec, ) -> Result<(), Error> { // Resolve the doc_id let mut doc_ids = self.doc_ids.write().await; - let resolved = doc_ids.resolve_doc_id(tx, rid.into()).await?; + let resolved = doc_ids.resolve_doc_id(txn, rid.into()).await?; let doc_id = *resolved.doc_id(); drop(doc_ids); // Index the values @@ -100,12 +101,37 @@ impl MTreeIndex { let vector = Vector::try_from_value(self.vector_type, self.dim, v)?; vector.check_dimension(self.dim)?; // Insert the vector in the index - mtree.insert(stk, tx, &mut self.store, vector.into(), doc_id).await?; + mtree.insert(stk, txn, &mut self.store, vector.into(), doc_id).await?; } drop(mtree); Ok(()) } + pub async fn remove_document( + &mut self, + stk: &mut Stk, + txn: &Transaction, + rid: &Thing, + content: &Vec, + ) -> Result<(), Error> { + let mut doc_ids = self.doc_ids.write().await; + let doc_id = doc_ids.remove_doc(txn, rid.into()).await?; + drop(doc_ids); + if let Some(doc_id) = doc_id { + // Lock the index + let mut mtree = self.mtree.write().await; + for v in content { + // Extract the vector + let vector = Vector::try_from_value(self.vector_type, self.dim, v)?; + vector.check_dimension(self.dim)?; + // Remove the vector + mtree.delete(stk, txn, &mut self.store, vector.into(), doc_id).await?; + } + drop(mtree); + } + Ok(()) + } + pub async fn knn_search( &self, stk: &mut Stk, @@ -136,38 +162,13 @@ impl MTreeIndex { res } - pub async fn remove_document( - &mut self, - stk: &mut Stk, - tx: &mut Transaction, - rid: &Thing, - content: &Vec, - ) -> Result<(), Error> { - let mut doc_ids = self.doc_ids.write().await; - let doc_id = doc_ids.remove_doc(tx, rid.into()).await?; - drop(doc_ids); - if let Some(doc_id) = doc_id { - // Lock the index - let mut mtree = self.mtree.write().await; - for v in content { - // Extract the vector - let vector = Vector::try_from_value(self.vector_type, self.dim, v)?; - vector.check_dimension(self.dim)?; - // Remove the vector - mtree.delete(stk, tx, &mut self.store, vector.into(), doc_id).await?; - } - drop(mtree); - } - Ok(()) - } - - pub(crate) async fn statistics(&self, tx: &mut Transaction) -> Result { + pub(crate) async fn statistics(&self, tx: &Transaction) -> Result { Ok(MtStatistics { doc_ids: self.doc_ids.read().await.statistics(tx).await?, }) } - pub async fn finish(&mut self, tx: &mut Transaction) -> Result<(), Error> { + pub async fn finish(&mut self, tx: &Transaction) -> Result<(), Error> { let mut doc_ids = self.doc_ids.write().await; doc_ids.finish(tx).await?; drop(doc_ids); @@ -296,7 +297,7 @@ impl MTree { async fn insert( &mut self, stk: &mut Stk, - tx: &mut Transaction, + tx: &Transaction, store: &mut MTreeStore, obj: SharedVector, id: DocId, @@ -368,7 +369,7 @@ impl MTree { async fn append( &mut self, - tx: &mut Transaction, + tx: &Transaction, store: &mut MTreeStore, object: &SharedVector, id: DocId, @@ -406,7 +407,7 @@ impl MTree { async fn insert_at_node( &mut self, stk: &mut Stk, - tx: &mut Transaction, + tx: &Transaction, store: &mut MTreeStore, node: MStoredNode, parent_center: &Option, @@ -442,7 +443,7 @@ impl MTree { async fn insert_node_internal( &mut self, stk: &mut Stk, - tx: &mut Transaction, + tx: &Transaction, store: &mut MTreeStore, node_id: NodeId, node_key: Key, @@ -749,7 +750,7 @@ impl MTree { async fn delete( &mut self, stk: &mut Stk, - tx: &mut Transaction, + tx: &Transaction, store: &mut MTreeStore, object: SharedVector, doc_id: DocId, @@ -795,7 +796,7 @@ impl MTree { async fn delete_at_node( &mut self, stk: &mut Stk, - tx: &mut Transaction, + tx: &Transaction, store: &mut MTreeStore, node: MStoredNode, parent_center: &Option, @@ -844,7 +845,7 @@ impl MTree { async fn delete_node_internal( &mut self, stk: &mut Stk, - tx: &mut Transaction, + tx: &Transaction, store: &mut MTreeStore, node_id: NodeId, node_key: Key, @@ -975,7 +976,7 @@ impl MTree { #[allow(clippy::too_many_arguments)] async fn deletion_underflown( &mut self, - tx: &mut Transaction, + tx: &Transaction, store: &mut MTreeStore, parent_center: &Option, n_node: &mut InternalNode, @@ -1471,16 +1472,9 @@ impl VersionedSerdeState for MState {} #[cfg(test)] mod tests { - use futures::lock::Mutex; - use hashbrown::{HashMap, HashSet}; - use reblessive::tree::Stk; - use std::collections::VecDeque; - use std::sync::Arc; use crate::ctx::Context; use crate::err::Error; - use test_log::test; - use crate::idx::docids::{DocId, DocIds}; use crate::idx::planner::checker::MTreeConditionChecker; use crate::idx::trees::knn::tests::TestCollection; @@ -1492,6 +1486,10 @@ mod tests { use crate::kvs::Transaction; use crate::kvs::{Datastore, TransactionType}; use crate::sql::index::{Distance, VectorType}; + use hashbrown::{HashMap, HashSet}; + use reblessive::tree::Stk; + use std::collections::VecDeque; + use test_log::test; async fn new_operation<'a>( ds: &Datastore, @@ -1503,15 +1501,15 @@ mod tests { .index_store() .get_store_mtree(TreeNodeProvider::Debug, t.state.generation, tt, cache_size) .await; - let tx = Arc::new(Mutex::new(ds.transaction(tt, Optimistic).await.unwrap())); - let ctx = Context::default().set_transaction(tx); + let tx = ds.transaction(tt, Optimistic).await.unwrap().enclose(); + let ctx = Context::default().with_transaction(tx); (ctx, st) } async fn finish_operation( ds: &Datastore, t: &mut MTree, - tx: &mut Transaction, + tx: &Transaction, mut st: TreeStore, commit: bool, ) -> Result<(), Error> { @@ -1540,18 +1538,16 @@ mod tests { for (doc_id, obj) in collection.to_vec_ref() { { let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await; - let mut tx = ctx.tx_lock().await; - t.insert(stk, &mut tx, &mut st, obj.clone(), *doc_id).await?; - finish_operation(ds, t, &mut tx, st, true).await?; - drop(tx); + let tx = ctx.tx(); + t.insert(stk, &tx, &mut st, obj.clone(), *doc_id).await?; + finish_operation(ds, t, &tx, st, true).await?; map.insert(*doc_id, obj.clone()); } c += 1; { let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await; - let mut tx = ctx.tx_lock().await; - let p = check_tree_properties(&mut tx, &mut st, t).await?; - drop(tx); + let tx = ctx.tx(); + let p = check_tree_properties(&tx, &mut st, t).await?; assert_eq!(p.doc_count, c); } } @@ -1568,19 +1564,17 @@ mod tests { let mut map = HashMap::with_capacity(collection.len()); { let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await; - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); for (doc_id, obj) in collection.to_vec_ref() { - t.insert(stk, &mut tx, &mut st, obj.clone(), *doc_id).await?; + t.insert(stk, &tx, &mut st, obj.clone(), *doc_id).await?; map.insert(*doc_id, obj.clone()); } - finish_operation(ds, t, &mut tx, st, true).await?; - drop(tx); + finish_operation(ds, t, &tx, st, true).await?; } { let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await; - let mut tx = ctx.tx_lock().await; - check_tree_properties(&mut tx, &mut st, t).await?; - drop(tx); + let tx = ctx.tx(); + check_tree_properties(&tx, &mut st, t).await?; } Ok(map) } @@ -1598,9 +1592,9 @@ mod tests { let deleted = { debug!("### Remove {} {:?}", doc_id, obj); let (ctx, mut st) = new_operation(ds, t, TransactionType::Write, cache_size).await; - let mut tx = ctx.tx_lock().await; - let deleted = t.delete(stk, &mut tx, &mut st, obj.clone(), *doc_id).await?; - finish_operation(ds, t, &mut tx, st, true).await?; + let tx = ctx.tx(); + let deleted = t.delete(stk, &tx, &mut st, obj.clone(), *doc_id).await?; + finish_operation(ds, t, &tx, st, true).await?; drop(tx); deleted }; @@ -1627,16 +1621,16 @@ mod tests { } { let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await; - let mut tx = ctx.tx_lock().await; - check_tree_properties(&mut tx, &mut st, t).await?; + let tx = ctx.tx(); + check_tree_properties(&tx, &mut st, t).await?; drop(tx); } } if all_deleted { let (ctx, mut st) = new_operation(ds, t, TransactionType::Read, cache_size).await; - let mut tx = ctx.tx_lock().await; - check_tree_properties(&mut tx, &mut st, t).await?.check(0, 0, None, None, 0, 0); + let tx = ctx.tx(); + check_tree_properties(&tx, &mut st, t).await?.check(0, 0, None, None, 0, 0); drop(tx); } Ok(()) @@ -1677,9 +1671,8 @@ mod tests { if expected_len != res.docs.len() { #[cfg(debug_assertions)] debug!("{:?}", res.visited_nodes); - let mut tx = ctx.tx_lock().await; - check_tree_properties(&mut tx, &mut st, t).await?; - drop(tx); + let tx = ctx.tx(); + check_tree_properties(&tx, &mut st, t).await?; } assert_eq!( expected_len, @@ -1761,10 +1754,10 @@ mod tests { let mut t = MTree::new(MState::new(*capacity), distance.clone()); let (ctx, _st) = new_operation(&ds, &t, TransactionType::Read, cache_size).await; - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); let doc_ids = DocIds::new( ds.index_store(), - &mut tx, + &tx, TransactionType::Read, IndexKeyBase::default(), 7, @@ -1772,7 +1765,6 @@ mod tests { ) .await .unwrap(); - drop(tx); let map = if collection.len() < 1000 { insert_collection_one_by_one(stk, &ds, &mut t, &collection, cache_size).await? @@ -2078,7 +2070,7 @@ mod tests { } async fn check_tree_properties( - tx: &mut Transaction, + tx: &Transaction, st: &mut MTreeStore, t: &MTree, ) -> Result { diff --git a/core/src/idx/trees/store/cache.rs b/core/src/idx/trees/store/cache.rs index e77d2c50..abf98a7f 100644 --- a/core/src/idx/trees/store/cache.rs +++ b/core/src/idx/trees/store/cache.rs @@ -131,7 +131,7 @@ where pub(super) async fn get_node( &self, - tx: &mut Transaction, + tx: &Transaction, node_id: NodeId, ) -> Result>, Error> { match self { @@ -208,7 +208,7 @@ where async fn get_node( &self, - tx: &mut Transaction, + tx: &Transaction, node_id: NodeId, ) -> Result>, Error> { if let Some(n) = self.lru.get(node_id).await { @@ -260,7 +260,7 @@ where pub(super) async fn get_node( &self, - tx: &mut Transaction, + tx: &Transaction, node_id: NodeId, ) -> Result>, Error> { match self.cache.entry(node_id) { diff --git a/core/src/idx/trees/store/mod.rs b/core/src/idx/trees/store/mod.rs index f3580545..70fc6ccb 100644 --- a/core/src/idx/trees/store/mod.rs +++ b/core/src/idx/trees/store/mod.rs @@ -47,7 +47,7 @@ where pub(in crate::idx) async fn get_node_mut( &mut self, - tx: &mut Transaction, + tx: &Transaction, node_id: NodeId, ) -> Result, Error> { match self { @@ -58,7 +58,7 @@ where pub(in crate::idx) async fn get_node( &self, - tx: &mut Transaction, + tx: &Transaction, node_id: NodeId, ) -> Result>, Error> { match self { @@ -74,10 +74,8 @@ where ) -> Result>, Error> { match self { Self::Read(r) => { - let mut tx = ctx.tx_lock().await; - let n = r.get_node(&mut tx, node_id).await; - drop(tx); - n + let tx = ctx.tx(); + r.get_node(&tx, node_id).await } _ => Err(Error::Unreachable("TreeStore::get_node_txn")), } @@ -112,7 +110,7 @@ where } } - pub async fn finish(&mut self, tx: &mut Transaction) -> Result>, Error> { + pub async fn finish(&mut self, tx: &Transaction) -> Result>, Error> { match self { Self::Write(w) => w.finish(tx).await, _ => Ok(None), @@ -143,7 +141,7 @@ impl TreeNodeProvider { } } - async fn load(&self, tx: &mut Transaction, id: NodeId) -> Result, Error> + async fn load(&self, tx: &Transaction, id: NodeId) -> Result, Error> where N: TreeNode + Clone, { @@ -157,7 +155,7 @@ impl TreeNodeProvider { } } - async fn save(&self, tx: &mut Transaction, node: &mut StoredNode) -> Result<(), Error> + async fn save(&self, tx: &Transaction, node: &mut StoredNode) -> Result<(), Error> where N: TreeNode + Clone + Display, { @@ -290,20 +288,16 @@ impl IndexStores { pub(crate) async fn index_removed( &self, - tx: &mut Transaction, + tx: &Transaction, ns: &str, db: &str, tb: &str, ix: &str, ) -> Result<(), Error> { - self.remove_index(ns, db, tx.get_and_cache_tb_index(ns, db, tb, ix).await?.as_ref()).await + self.remove_index(ns, db, tx.get_tb_index(ns, db, tb, ix).await?.as_ref()).await } - pub(crate) async fn namespace_removed( - &self, - tx: &mut Transaction, - ns: &str, - ) -> Result<(), Error> { + pub(crate) async fn namespace_removed(&self, tx: &Transaction, ns: &str) -> Result<(), Error> { for db in tx.all_db(ns).await?.iter() { self.database_removed(tx, ns, &db.name).await?; } @@ -312,7 +306,7 @@ impl IndexStores { pub(crate) async fn database_removed( &self, - tx: &mut Transaction, + tx: &Transaction, ns: &str, db: &str, ) -> Result<(), Error> { @@ -324,7 +318,7 @@ impl IndexStores { pub(crate) async fn table_removed( &self, - tx: &mut Transaction, + tx: &Transaction, ns: &str, db: &str, tb: &str, diff --git a/core/src/idx/trees/store/tree.rs b/core/src/idx/trees/store/tree.rs index e8d51e00..89090d3b 100644 --- a/core/src/idx/trees/store/tree.rs +++ b/core/src/idx/trees/store/tree.rs @@ -41,7 +41,7 @@ where pub(super) async fn get_node_mut( &mut self, - tx: &mut Transaction, + tx: &Transaction, node_id: NodeId, ) -> Result, Error> { #[cfg(debug_assertions)] @@ -95,10 +95,7 @@ where Ok(()) } - pub(super) async fn finish( - &mut self, - tx: &mut Transaction, - ) -> Result>, Error> { + pub(super) async fn finish(&mut self, tx: &Transaction) -> Result>, Error> { #[cfg(debug_assertions)] { if !self.out.is_empty() { @@ -167,7 +164,7 @@ where pub(super) async fn get_node( &self, - tx: &mut Transaction, + tx: &Transaction, node_id: NodeId, ) -> Result>, Error> { let r = self.cache.get_node(tx, node_id).await?; diff --git a/core/src/idx/trees/vector.rs b/core/src/idx/trees/vector.rs index be964991..d06ffa18 100644 --- a/core/src/idx/trees/vector.rs +++ b/core/src/idx/trees/vector.rs @@ -545,11 +545,10 @@ mod tests { assert_eq!(dist.compute(&v1, &v2).unwrap(), res.into()); // Check the "Vector" optimised implementations - for t in [VectorType::F64] { - let v1: SharedVector = Vector::try_from_vector(t, &v1).unwrap().into(); - let v2: SharedVector = Vector::try_from_vector(t, &v2).unwrap().into(); - assert_eq!(dist.calculate(&v1, &v2), res); - } + let t = VectorType::F64; + let v1: SharedVector = Vector::try_from_vector(t, &v1).unwrap().into(); + let v2: SharedVector = Vector::try_from_vector(t, &v2).unwrap().into(); + assert_eq!(dist.calculate(&v1, &v2), res); } fn test_distance_collection(dist: Distance, size: usize, dim: usize) { diff --git a/core/src/key/error.rs b/core/src/key/category.rs similarity index 55% rename from core/src/key/error.rs rename to core/src/key/category.rs index bd70a856..fda1100f 100644 --- a/core/src/key/error.rs +++ b/core/src/key/category.rs @@ -1,17 +1,19 @@ use std::fmt::{Display, Formatter}; +#[allow(unused)] +pub(crate) trait Categorise { + /// Returns the category of the key for error reporting + fn categorise(&self) -> Category; +} + #[derive(Debug, Copy, Clone)] #[non_exhaustive] -pub enum KeyCategory { - /// This category is reserved for cases when we do not know the category - /// It should be caught and re-populated with the correct category where appropriate - Unknown, +#[allow(unused)] +pub enum Category { /// crate::key::root::all / Root, /// crate::key::root::ac /!ac{ac} Access, - /// crate::key::root::hb /!hb{ts}/{nd} - Heartbeat, /// crate::key::root::nd /!nd{nd} Node, /// crate::key::root::ni /!ni @@ -21,24 +23,33 @@ pub enum KeyCategory { /// crate::key::root::us /!us{us} User, /// + /// ------------------------------ + /// /// crate::key::node::all /${nd} NodeRoot, /// crate::key::node::lq /${nd}!lq{lq}{ns}{db} NodeLiveQuery, /// + /// ------------------------------ + /// + /// crate::key::namespace::di /+{ni}!di + DatabaseIdentifier, + /// crate::key::database::ti /+{ni}*{di}!ti + DatabaseTableIdentifier, + /// + /// ------------------------------ + /// /// crate::key::namespace::all /*{ns} NamespaceRoot, /// crate::key::namespace::db /*{ns}!db{db} DatabaseAlias, - /// crate::key::namespace::di /+{ns id}!di - DatabaseIdentifier, - /// crate::key::namespace::lg /*{ns}!lg{lg} - DatabaseLogAlias, /// crate::key::namespace::ac /*{ns}!ac{ac} NamespaceAccess, /// crate::key::namespace::us /*{ns}!us{us} NamespaceUser, /// + /// ------------------------------ + /// /// crate::key::database::all /*{ns}*{db} DatabaseRoot, /// crate::key::database::ac /*{ns}*{db}!ac{ac} @@ -47,16 +58,12 @@ pub enum KeyCategory { DatabaseAnalyzer, /// crate::key::database::fc /*{ns}*{db}!fn{fc} DatabaseFunction, - /// crate::key::database::lg /*{ns}*{db}!lg{lg} - DatabaseLog, /// crate::key::database::ml /*{ns}*{db}!ml{ml}{vn} DatabaseModel, /// crate::key::database::pa /*{ns}*{db}!pa{pa} DatabaseParameter, /// crate::key::database::tb /*{ns}*{db}!tb{tb} DatabaseTable, - /// crate::key::database::ti /+{ns id}*{db id}!ti - DatabaseTableIdentifier, /// crate::key::database::ts /*{ns}*{db}!ts{ts} DatabaseTimestamp, /// crate::key::database::us /*{ns}*{db}!us{us} @@ -64,6 +71,8 @@ pub enum KeyCategory { /// crate::key::database::vs /*{ns}*{db}!vs DatabaseVersionstamp, /// + /// ------------------------------ + /// /// crate::key::table::all /*{ns}*{db}*{tb} TableRoot, /// crate::key::table::ev /*{ns}*{db}*{tb}!ev{ev} @@ -77,6 +86,8 @@ pub enum KeyCategory { /// crate::key::table::lq /*{ns}*{db}*{tb}!lq{lq} TableLiveQuery, /// + /// ------------------------------ + /// /// crate::key::index::all /*{ns}*{db}*{tb}+{ix} IndexRoot, /// crate::key::index::bc /*{ns}*{db}*{tb}+{ix}!bc{id} @@ -104,69 +115,71 @@ pub enum KeyCategory { /// crate::key::index /*{ns}*{db}*{tb}+{ix}*{fd}{id} Index, /// + /// ------------------------------ + /// /// crate::key::change /*{ns}*{db}#{ts} ChangeFeed, /// + /// ------------------------------ + /// /// crate::key::thing /*{ns}*{db}*{tb}*{id} Thing, /// + /// ------------------------------ + /// /// crate::key::graph /*{ns}*{db}*{tb}~{id}{eg}{fk} Graph, } -impl Display for KeyCategory { +impl Display for Category { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let name = match self { - KeyCategory::Unknown => "Unknown", - KeyCategory::Root => "Root", - KeyCategory::Access => "Access", - KeyCategory::Heartbeat => "Heartbeat", - KeyCategory::Node => "Node", - KeyCategory::NamespaceIdentifier => "NamespaceIdentifier", - KeyCategory::Namespace => "Namespace", - KeyCategory::User => "User", - KeyCategory::NodeRoot => "NodeRoot", - KeyCategory::NodeLiveQuery => "NodeLiveQuery", - KeyCategory::NamespaceRoot => "NamespaceRoot", - KeyCategory::DatabaseAlias => "DatabaseAlias", - KeyCategory::DatabaseIdentifier => "DatabaseIdentifier", - KeyCategory::DatabaseLogAlias => "DatabaseLogAlias", - KeyCategory::NamespaceAccess => "NamespaceAccess", - KeyCategory::NamespaceUser => "NamespaceUser", - KeyCategory::DatabaseRoot => "DatabaseRoot", - KeyCategory::DatabaseAccess => "DatabaseAccess", - KeyCategory::DatabaseAnalyzer => "DatabaseAnalyzer", - KeyCategory::DatabaseFunction => "DatabaseFunction", - KeyCategory::DatabaseLog => "DatabaseLog", - KeyCategory::DatabaseModel => "DatabaseModel", - KeyCategory::DatabaseParameter => "DatabaseParameter", - KeyCategory::DatabaseTable => "DatabaseTable", - KeyCategory::DatabaseTableIdentifier => "DatabaseTableIdentifier", - KeyCategory::DatabaseTimestamp => "DatabaseTimestamp", - KeyCategory::DatabaseUser => "DatabaseUser", - KeyCategory::DatabaseVersionstamp => "DatabaseVersionstamp", - KeyCategory::TableRoot => "TableRoot", - KeyCategory::TableEvent => "TableEvent", - KeyCategory::TableField => "TableField", - KeyCategory::TableView => "TableView", - KeyCategory::IndexDefinition => "IndexDefinition", - KeyCategory::TableLiveQuery => "TableLiveQuery", - KeyCategory::IndexRoot => "IndexRoot", - KeyCategory::IndexTermDocList => "IndexTermDocList", - KeyCategory::IndexBTreeNode => "IndexBTreeNode", - KeyCategory::IndexTermDocFrequency => "IndexTermDocFrequency", - KeyCategory::IndexDocKeys => "IndexDocKeys", - KeyCategory::IndexTermList => "IndexTermList", - KeyCategory::IndexBTreeNodeDocLengths => "IndexBTreeNodeDocLengths", - KeyCategory::IndexOffset => "IndexOffset", - KeyCategory::IndexBTreeNodePostings => "IndexBTreeNodePostings", - KeyCategory::IndexFullTextState => "IndexFullTextState", - KeyCategory::IndexBTreeNodeTerms => "IndexBTreeNodeTerms", - KeyCategory::IndexTerms => "IndexTerms", - KeyCategory::Index => "Index", - KeyCategory::ChangeFeed => "ChangeFeed", - KeyCategory::Thing => "Thing", - KeyCategory::Graph => "Graph", + Self::Root => "Root", + Self::Access => "Access", + Self::Node => "Node", + Self::NamespaceIdentifier => "NamespaceIdentifier", + Self::Namespace => "Namespace", + Self::User => "User", + Self::NodeRoot => "NodeRoot", + Self::NodeLiveQuery => "NodeLiveQuery", + Self::NamespaceRoot => "NamespaceRoot", + Self::DatabaseAlias => "DatabaseAlias", + Self::DatabaseIdentifier => "DatabaseIdentifier", + Self::NamespaceAccess => "NamespaceAccess", + Self::NamespaceUser => "NamespaceUser", + Self::DatabaseRoot => "DatabaseRoot", + Self::DatabaseAccess => "DatabaseAccess", + Self::DatabaseAnalyzer => "DatabaseAnalyzer", + Self::DatabaseFunction => "DatabaseFunction", + Self::DatabaseModel => "DatabaseModel", + Self::DatabaseParameter => "DatabaseParameter", + Self::DatabaseTable => "DatabaseTable", + Self::DatabaseTableIdentifier => "DatabaseTableIdentifier", + Self::DatabaseTimestamp => "DatabaseTimestamp", + Self::DatabaseUser => "DatabaseUser", + Self::DatabaseVersionstamp => "DatabaseVersionstamp", + Self::TableRoot => "TableRoot", + Self::TableEvent => "TableEvent", + Self::TableField => "TableField", + Self::TableView => "TableView", + Self::IndexDefinition => "IndexDefinition", + Self::TableLiveQuery => "TableLiveQuery", + Self::IndexRoot => "IndexRoot", + Self::IndexTermDocList => "IndexTermDocList", + Self::IndexBTreeNode => "IndexBTreeNode", + Self::IndexTermDocFrequency => "IndexTermDocFrequency", + Self::IndexDocKeys => "IndexDocKeys", + Self::IndexTermList => "IndexTermList", + Self::IndexBTreeNodeDocLengths => "IndexBTreeNodeDocLengths", + Self::IndexOffset => "IndexOffset", + Self::IndexBTreeNodePostings => "IndexBTreeNodePostings", + Self::IndexFullTextState => "IndexFullTextState", + Self::IndexBTreeNodeTerms => "IndexBTreeNodeTerms", + Self::IndexTerms => "IndexTerms", + Self::Index => "Index", + Self::ChangeFeed => "ChangeFeed", + Self::Thing => "Thing", + Self::Graph => "Graph", }; write!(f, "{}", name) } diff --git a/core/src/key/change/mod.rs b/core/src/key/change/mod.rs index fde24d77..1830ba29 100644 --- a/core/src/key/change/mod.rs +++ b/core/src/key/change/mod.rs @@ -1,11 +1,9 @@ -/// Stores change feeds +//! Stores change feeds +use crate::key::category::Categorise; +use crate::key::category::Category; +use crate::vs; use derive::Key; use serde::{Deserialize, Serialize}; - -use crate::vs; - -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; use std::str; // Cf stands for change feeds @@ -73,9 +71,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec { k } -impl KeyRequirements for Cf<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::ChangeFeed +impl Categorise for Cf<'_> { + fn categorise(&self) -> Category { + Category::ChangeFeed } } diff --git a/core/src/key/database/ac.rs b/core/src/key/database/ac.rs index 519a6e38..76ec9e35 100644 --- a/core/src/key/database/ac.rs +++ b/core/src/key/database/ac.rs @@ -1,6 +1,6 @@ -/// Stores a DEFINE ACCESS ON DATABASE config definition -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +//! Stores a DEFINE ACCESS ON DATABASE config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec { k } -impl KeyRequirements for Ac<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseAccess +impl Categorise for Ac<'_> { + fn categorise(&self) -> Category { + Category::DatabaseAccess } } diff --git a/core/src/key/database/all.rs b/core/src/key/database/all.rs index d5169c78..c0cabdc0 100644 --- a/core/src/key/database/all.rs +++ b/core/src/key/database/all.rs @@ -1,6 +1,6 @@ //! Stores the key prefix for all keys under a database -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -18,9 +18,9 @@ pub fn new<'a>(ns: &'a str, db: &'a str) -> All<'a> { All::new(ns, db) } -impl KeyRequirements for All<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseRoot +impl Categorise for All<'_> { + fn categorise(&self) -> Category { + Category::DatabaseRoot } } diff --git a/core/src/key/database/az.rs b/core/src/key/database/az.rs index 38afc063..131d8717 100644 --- a/core/src/key/database/az.rs +++ b/core/src/key/database/az.rs @@ -1,6 +1,6 @@ //! Stores a DEFINE ANALYZER config definition -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec { k } -impl KeyRequirements for Az<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseAnalyzer +impl Categorise for Az<'_> { + fn categorise(&self) -> Category { + Category::DatabaseAnalyzer } } diff --git a/core/src/key/database/fc.rs b/core/src/key/database/fc.rs index 788e19d8..5aeddd3c 100644 --- a/core/src/key/database/fc.rs +++ b/core/src/key/database/fc.rs @@ -1,6 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; -/// Stores a DEFINE FUNCTION config definition +//! Stores a DEFINE FUNCTION config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec { k } -impl KeyRequirements for Fc<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseFunction +impl Categorise for Fc<'_> { + fn categorise(&self) -> Category { + Category::DatabaseFunction } } diff --git a/core/src/key/database/ml.rs b/core/src/key/database/ml.rs index d08da31b..e662056b 100644 --- a/core/src/key/database/ml.rs +++ b/core/src/key/database/ml.rs @@ -1,6 +1,6 @@ -/// Stores a DEFINE MODEL config definition -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +//! Stores a DEFINE MODEL config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -35,9 +35,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec { k } -impl KeyRequirements for Ml<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseModel +impl Categorise for Ml<'_> { + fn categorise(&self) -> Category { + Category::DatabaseModel } } diff --git a/core/src/key/database/pa.rs b/core/src/key/database/pa.rs index 1954c625..6164f99d 100644 --- a/core/src/key/database/pa.rs +++ b/core/src/key/database/pa.rs @@ -1,6 +1,6 @@ //! Stores a DEFINE PARAM config definition -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec { k } -impl KeyRequirements for Pa<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseParameter +impl Categorise for Pa<'_> { + fn categorise(&self) -> Category { + Category::DatabaseParameter } } diff --git a/core/src/key/database/tb.rs b/core/src/key/database/tb.rs index 0ed43ca7..924ca8d9 100644 --- a/core/src/key/database/tb.rs +++ b/core/src/key/database/tb.rs @@ -1,6 +1,6 @@ //! Stores a DEFINE TABLE config definition -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -34,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec { k } -impl KeyRequirements for Tb<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseTable +impl Categorise for Tb<'_> { + fn categorise(&self) -> Category { + Category::DatabaseTable } } diff --git a/core/src/key/database/ti.rs b/core/src/key/database/ti.rs index 5c38f12c..da0d6fea 100644 --- a/core/src/key/database/ti.rs +++ b/core/src/key/database/ti.rs @@ -1,6 +1,6 @@ //! Stores the next and available freed IDs for documents -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -22,9 +22,9 @@ pub fn new(ns: u32, db: u32) -> Ti { Ti::new(ns, db) } -impl KeyRequirements for Ti { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseTableIdentifier +impl Categorise for Ti { + fn categorise(&self) -> Category { + Category::DatabaseTableIdentifier } } diff --git a/core/src/key/database/ts.rs b/core/src/key/database/ts.rs index 3b54aed6..9b6f93af 100644 --- a/core/src/key/database/ts.rs +++ b/core/src/key/database/ts.rs @@ -1,6 +1,6 @@ //! Stores database timestamps -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -39,9 +39,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec { k } -impl KeyRequirements for Ts<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseTimestamp +impl Categorise for Ts<'_> { + fn categorise(&self) -> Category { + Category::DatabaseTimestamp } } diff --git a/core/src/key/database/us.rs b/core/src/key/database/us.rs index e4d50c3e..4b48ba5e 100644 --- a/core/src/key/database/us.rs +++ b/core/src/key/database/us.rs @@ -1,5 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +//! Stores a DEFINE USER ON DATABASE config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -33,9 +34,9 @@ pub fn suffix(ns: &str, db: &str) -> Vec { k } -impl KeyRequirements for Us<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseUser +impl Categorise for Us<'_> { + fn categorise(&self) -> Category { + Category::DatabaseUser } } diff --git a/core/src/key/database/vs.rs b/core/src/key/database/vs.rs index 2be9cadf..9f1d2cd4 100644 --- a/core/src/key/database/vs.rs +++ b/core/src/key/database/vs.rs @@ -1,6 +1,6 @@ //! Stores database versionstamps -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -23,9 +23,9 @@ pub fn new<'a>(ns: &'a str, db: &'a str) -> Vs<'a> { Vs::new(ns, db) } -impl KeyRequirements for Vs<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseVersionstamp +impl Categorise for Vs<'_> { + fn categorise(&self) -> Category { + Category::DatabaseVersionstamp } } diff --git a/core/src/key/debug.rs b/core/src/key/debug.rs index 959fd2ea..0a82abe3 100644 --- a/core/src/key/debug.rs +++ b/core/src/key/debug.rs @@ -1,11 +1,6 @@ -/// Debug purposes only. It may be used in logs. Not for key handling in implementation code. - -/// Helpers for debugging keys - -/// sprint_key converts a key to an escaped string. -/// This is used for logging and debugging tests and should not be used in implementation code. -#[doc(hidden)] -pub fn sprint_key(key: &T) -> String +/// Displays a key in a human-readable format. +#[cfg(debug_assertions)] +pub fn sprint(key: &T) -> String where T: AsRef<[u8]>, { diff --git a/core/src/key/graph/mod.rs b/core/src/key/graph/mod.rs index abf7b68f..b22fc784 100644 --- a/core/src/key/graph/mod.rs +++ b/core/src/key/graph/mod.rs @@ -1,6 +1,6 @@ //! Stores a graph edge pointer -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use crate::sql::dir::Dir; use crate::sql::id::Id; use crate::sql::thing::Thing; @@ -164,9 +164,9 @@ pub fn ftsuffix(ns: &str, db: &str, tb: &str, id: &Id, eg: &Dir, ft: &str) -> Ve k } -impl KeyRequirements for Graph<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::Graph +impl Categorise for Graph<'_> { + fn categorise(&self) -> Category { + Category::Graph } } diff --git a/core/src/key/index/all.rs b/core/src/key/index/all.rs index b72dfea0..930c1003 100644 --- a/core/src/key/index/all.rs +++ b/core/src/key/index/all.rs @@ -1,6 +1,6 @@ //! Stores the key prefix for all keys under an index -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -22,9 +22,9 @@ pub fn new<'a>(ns: &'a str, db: &'a str, tb: &'a str, ix: &'a str) -> All<'a> { All::new(ns, db, tb, ix) } -impl KeyRequirements for All<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexRoot +impl Categorise for All<'_> { + fn categorise(&self) -> Category { + Category::IndexRoot } } diff --git a/core/src/key/index/bc.rs b/core/src/key/index/bc.rs index ec5657d9..a53cbc97 100644 --- a/core/src/key/index/bc.rs +++ b/core/src/key/index/bc.rs @@ -1,7 +1,7 @@ //! Stores Doc list for each term use crate::idx::ft::terms::TermId; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -23,9 +23,9 @@ pub struct Bc<'a> { pub term_id: TermId, } -impl KeyRequirements for Bc<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexTermDocList +impl Categorise for Bc<'_> { + fn categorise(&self) -> Category { + Category::IndexTermDocList } } diff --git a/core/src/key/index/bd.rs b/core/src/key/index/bd.rs index 8049508f..b9299ed4 100644 --- a/core/src/key/index/bd.rs +++ b/core/src/key/index/bd.rs @@ -1,7 +1,7 @@ //! Stores BTree nodes for doc ids use crate::idx::trees::store::NodeId; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -23,9 +23,9 @@ pub struct Bd<'a> { pub node_id: Option, } -impl KeyRequirements for Bd<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexBTreeNode +impl Categorise for Bd<'_> { + fn categorise(&self) -> Category { + Category::IndexBTreeNode } } diff --git a/core/src/key/index/bf.rs b/core/src/key/index/bf.rs index c7b37a1d..b5f6b97f 100644 --- a/core/src/key/index/bf.rs +++ b/core/src/key/index/bf.rs @@ -1,8 +1,8 @@ //! Stores Term/Doc frequency use crate::idx::docids::DocId; use crate::idx::ft::terms::TermId; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -25,9 +25,9 @@ pub struct Bf<'a> { pub doc_id: DocId, } -impl KeyRequirements for Bf<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexTermDocFrequency +impl Categorise for Bf<'_> { + fn categorise(&self) -> Category { + Category::IndexTermDocFrequency } } diff --git a/core/src/key/index/bi.rs b/core/src/key/index/bi.rs index 6d98f4f6..6a87793a 100644 --- a/core/src/key/index/bi.rs +++ b/core/src/key/index/bi.rs @@ -1,7 +1,7 @@ //! Stores doc keys for doc_ids use crate::idx::trees::store::NodeId; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -23,9 +23,9 @@ pub struct Bi<'a> { pub node_id: NodeId, } -impl KeyRequirements for Bi<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexDocKeys +impl Categorise for Bi<'_> { + fn categorise(&self) -> Category { + Category::IndexDocKeys } } diff --git a/core/src/key/index/bk.rs b/core/src/key/index/bk.rs index cd5e0918..47ac9191 100644 --- a/core/src/key/index/bk.rs +++ b/core/src/key/index/bk.rs @@ -1,7 +1,7 @@ //! Stores the term list for doc_ids use crate::idx::docids::DocId; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -23,9 +23,9 @@ pub struct Bk<'a> { pub doc_id: DocId, } -impl KeyRequirements for Bk<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexTermList +impl Categorise for Bk<'_> { + fn categorise(&self) -> Category { + Category::IndexTermList } } diff --git a/core/src/key/index/bl.rs b/core/src/key/index/bl.rs index dc0994dc..bbbb622b 100644 --- a/core/src/key/index/bl.rs +++ b/core/src/key/index/bl.rs @@ -1,7 +1,7 @@ //! Stores BTree nodes for doc lengths use crate::idx::trees::store::NodeId; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -23,9 +23,9 @@ pub struct Bl<'a> { pub node_id: Option, } -impl KeyRequirements for Bl<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexBTreeNodeDocLengths +impl Categorise for Bl<'_> { + fn categorise(&self) -> Category { + Category::IndexBTreeNodeDocLengths } } diff --git a/core/src/key/index/bo.rs b/core/src/key/index/bo.rs index de69cf14..69a457c2 100644 --- a/core/src/key/index/bo.rs +++ b/core/src/key/index/bo.rs @@ -1,8 +1,8 @@ //! Stores the offsets use crate::idx::docids::DocId; use crate::idx::ft::terms::TermId; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -25,9 +25,9 @@ pub struct Bo<'a> { pub term_id: TermId, } -impl KeyRequirements for Bo<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexOffset +impl Categorise for Bo<'_> { + fn categorise(&self) -> Category { + Category::IndexOffset } } diff --git a/core/src/key/index/bp.rs b/core/src/key/index/bp.rs index ae6bd4fb..b2101d59 100644 --- a/core/src/key/index/bp.rs +++ b/core/src/key/index/bp.rs @@ -1,7 +1,7 @@ //! Stores BTree nodes for postings use crate::idx::trees::store::NodeId; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -23,9 +23,9 @@ pub struct Bp<'a> { pub node_id: Option, } -impl KeyRequirements for Bp<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexBTreeNodePostings +impl Categorise for Bp<'_> { + fn categorise(&self) -> Category { + Category::IndexBTreeNodePostings } } diff --git a/core/src/key/index/bs.rs b/core/src/key/index/bs.rs index 12b9eb4f..899eb88e 100644 --- a/core/src/key/index/bs.rs +++ b/core/src/key/index/bs.rs @@ -1,6 +1,6 @@ //! Stores FullText index states -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -20,9 +20,9 @@ pub struct Bs<'a> { pub ix: &'a str, } -impl KeyRequirements for Bs<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexFullTextState +impl Categorise for Bs<'_> { + fn categorise(&self) -> Category { + Category::IndexFullTextState } } diff --git a/core/src/key/index/bt.rs b/core/src/key/index/bt.rs index 1a878b92..ae84c8b4 100644 --- a/core/src/key/index/bt.rs +++ b/core/src/key/index/bt.rs @@ -1,7 +1,7 @@ //! Stores BTree nodes for terms use crate::idx::trees::store::NodeId; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -23,9 +23,9 @@ pub struct Bt<'a> { pub node_id: Option, } -impl KeyRequirements for Bt<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexBTreeNodeTerms +impl Categorise for Bt<'_> { + fn categorise(&self) -> Category { + Category::IndexBTreeNodeTerms } } diff --git a/core/src/key/index/bu.rs b/core/src/key/index/bu.rs index 3b0fa505..fa0ee019 100644 --- a/core/src/key/index/bu.rs +++ b/core/src/key/index/bu.rs @@ -1,7 +1,7 @@ //! Stores terms for term_ids use crate::idx::ft::terms::TermId; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -23,9 +23,9 @@ pub struct Bu<'a> { pub term_id: TermId, } -impl KeyRequirements for Bu<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexTerms +impl Categorise for Bu<'_> { + fn categorise(&self) -> Category { + Category::IndexTerms } } diff --git a/core/src/key/index/mod.rs b/core/src/key/index/mod.rs index 9b0c0c74..8a3178f2 100644 --- a/core/src/key/index/mod.rs +++ b/core/src/key/index/mod.rs @@ -13,8 +13,8 @@ pub mod bt; pub mod bu; pub mod vm; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use crate::sql::array::Array; use crate::sql::id::Id; use derive::Key; @@ -103,9 +103,9 @@ pub struct Index<'a> { pub id: Option>, } -impl KeyRequirements for Index<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::Index +impl Categorise for Index<'_> { + fn categorise(&self) -> Category { + Category::Index } } diff --git a/core/src/key/key_req.rs b/core/src/key/key_req.rs deleted file mode 100644 index 07b77104..00000000 --- a/core/src/key/key_req.rs +++ /dev/null @@ -1,7 +0,0 @@ -use crate::key::error::KeyCategory; - -/// Key requirements are functions that we expect all keys to have -pub(crate) trait KeyRequirements { - /// Returns the category of the key for error reporting - fn key_category(&self) -> KeyCategory; -} diff --git a/core/src/key/mod.rs b/core/src/key/mod.rs index e5529362..b2e97002 100644 --- a/core/src/key/mod.rs +++ b/core/src/key/mod.rs @@ -22,7 +22,7 @@ /// crate::key::database::ac /*{ns}*{db}!ac{ac} /// crate::key::database::az /*{ns}*{db}!az{az} /// crate::key::database::fc /*{ns}*{db}!fn{fc} -/// crate::key::database::lg /*{ns}*{db}!lg{lg} +/// crate::key::database::ml /*{ns}*{db}!ml{ml}{vn} /// crate::key::database::pa /*{ns}*{db}!pa{pa} /// crate::key::database::tb /*{ns}*{db}!tb{tb} /// crate::key::database::ti /+{ns id}*{db id}!ti @@ -57,15 +57,14 @@ /// /// crate::key::graph /*{ns}*{db}*{tb}~{id}{eg}{fk} /// -pub mod change; -pub mod database; -pub mod debug; -pub(crate) mod error; -pub mod graph; -pub mod index; -pub(crate) mod key_req; -pub mod namespace; -pub mod node; -pub mod root; -pub mod table; -pub mod thing; +pub(crate) mod category; +pub(crate) mod change; +pub(crate) mod database; +pub(crate) mod debug; +pub(crate) mod graph; +pub(crate) mod index; +pub(crate) mod namespace; +pub(crate) mod node; +pub(crate) mod root; +pub(crate) mod table; +pub(crate) mod thing; diff --git a/core/src/key/namespace/ac.rs b/core/src/key/namespace/ac.rs index 0bb8f859..bb5fca13 100644 --- a/core/src/key/namespace/ac.rs +++ b/core/src/key/namespace/ac.rs @@ -1,6 +1,6 @@ //! Stores a DEFINE ACCESS ON NAMESPACE config definition -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -32,9 +32,9 @@ pub fn suffix(ns: &str) -> Vec { k } -impl KeyRequirements for Ac<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::NamespaceAccess +impl Categorise for Ac<'_> { + fn categorise(&self) -> Category { + Category::NamespaceAccess } } diff --git a/core/src/key/namespace/all.rs b/core/src/key/namespace/all.rs index 1348ae39..12e45a95 100644 --- a/core/src/key/namespace/all.rs +++ b/core/src/key/namespace/all.rs @@ -1,6 +1,6 @@ //! Stores the key prefix for all keys under a namespace -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -16,9 +16,9 @@ pub fn new(ns: &str) -> All<'_> { All::new(ns) } -impl KeyRequirements for All<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::NamespaceRoot +impl Categorise for All<'_> { + fn categorise(&self) -> Category { + Category::NamespaceRoot } } diff --git a/core/src/key/namespace/db.rs b/core/src/key/namespace/db.rs index 5ce7007a..03632dda 100644 --- a/core/src/key/namespace/db.rs +++ b/core/src/key/namespace/db.rs @@ -1,6 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; -/// Stores a DEFINE DATABASE config definition +//! Stores a DEFINE DATABASE config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -32,9 +32,9 @@ pub fn suffix(ns: &str) -> Vec { k } -impl KeyRequirements for Db<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseAlias +impl Categorise for Db<'_> { + fn categorise(&self) -> Category { + Category::DatabaseAlias } } diff --git a/core/src/key/namespace/di.rs b/core/src/key/namespace/di.rs index c6372de5..fa25e1de 100644 --- a/core/src/key/namespace/di.rs +++ b/core/src/key/namespace/di.rs @@ -1,6 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; -/// Stores a database ID generator state +//! Stores a database ID generator state +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -19,9 +19,9 @@ pub fn new(ns: u32) -> Di { Di::new(ns) } -impl KeyRequirements for Di { - fn key_category(&self) -> KeyCategory { - KeyCategory::DatabaseIdentifier +impl Categorise for Di { + fn categorise(&self) -> Category { + Category::DatabaseIdentifier } } impl Di { diff --git a/core/src/key/namespace/us.rs b/core/src/key/namespace/us.rs index 4c4511aa..1b0b4bd0 100644 --- a/core/src/key/namespace/us.rs +++ b/core/src/key/namespace/us.rs @@ -1,5 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +//! Stores a DEFINE USER ON NAMESPACE config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -31,9 +32,9 @@ pub fn suffix(ns: &str) -> Vec { k } -impl KeyRequirements for Us<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::NamespaceUser +impl Categorise for Us<'_> { + fn categorise(&self) -> Category { + Category::NamespaceUser } } diff --git a/core/src/key/node/all.rs b/core/src/key/node/all.rs index 93ea7224..8c2c61be 100644 --- a/core/src/key/node/all.rs +++ b/core/src/key/node/all.rs @@ -1,6 +1,6 @@ //! Stores the key prefix for all nodes -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -18,9 +18,9 @@ pub fn new(nd: Uuid) -> All { All::new(nd) } -impl KeyRequirements for All { - fn key_category(&self) -> KeyCategory { - KeyCategory::NodeRoot +impl Categorise for All { + fn categorise(&self) -> Category { + Category::NodeRoot } } diff --git a/core/src/key/node/lq.rs b/core/src/key/node/lq.rs index ad731481..300fd33a 100644 --- a/core/src/key/node/lq.rs +++ b/core/src/key/node/lq.rs @@ -1,6 +1,6 @@ //! Stores a LIVE SELECT query definition on the cluster -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -12,7 +12,7 @@ use uuid::Uuid; /// The value is just the table of the live query as a Strand, which is the missing information from the key path #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Key)] #[non_exhaustive] -pub struct Lq<'a> { +pub struct Lq { __: u8, _a: u8, #[serde(with = "uuid::serde::compact")] @@ -22,38 +22,32 @@ pub struct Lq<'a> { _d: u8, #[serde(with = "uuid::serde::compact")] pub lq: Uuid, - _e: u8, - pub ns: &'a str, - _f: u8, - pub db: &'a str, } -pub fn new<'a>(nd: Uuid, lq: Uuid, ns: &'a str, db: &'a str) -> Lq<'a> { - Lq::new(nd, lq, ns, db) +pub fn new(nd: Uuid, lq: Uuid) -> Lq { + Lq::new(nd, lq) } -pub fn prefix_nd(nd: &Uuid) -> Vec { - let mut k = [b'/', b'$'].to_vec(); - k.extend_from_slice(nd.as_bytes()); - k.extend_from_slice(&[0x00]); +pub fn prefix(nd: Uuid) -> Vec { + let mut k = super::all::new(nd).encode().unwrap(); + k.extend_from_slice(&[b'!', b'l', b'q', 0x00]); k } -pub fn suffix_nd(nd: &Uuid) -> Vec { - let mut k = [b'/', b'$'].to_vec(); - k.extend_from_slice(nd.as_bytes()); - k.extend_from_slice(&[0xff]); +pub fn suffix(nd: Uuid) -> Vec { + let mut k = super::all::new(nd).encode().unwrap(); + k.extend_from_slice(&[b'!', b'l', b'q', 0xff]); k } -impl KeyRequirements for Lq<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::NodeLiveQuery +impl Categorise for Lq { + fn categorise(&self) -> Category { + Category::NodeLiveQuery } } -impl<'a> Lq<'a> { - pub fn new(nd: Uuid, lq: Uuid, ns: &'a str, db: &'a str) -> Self { +impl Lq { + pub fn new(nd: Uuid, lq: Uuid) -> Self { Self { __: b'/', _a: b'$', @@ -62,10 +56,6 @@ impl<'a> Lq<'a> { _c: b'l', _d: b'q', lq, - _e: b'*', - ns, - _f: b'*', - db, } } } @@ -80,35 +70,40 @@ mod tests { let nd = Uuid::from_bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10]); #[rustfmt::skip] let lq = Uuid::from_bytes([0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20]); - let val = Lq::new(nd, lq, "testns", "testdb"); + let val = Lq::new(nd, lq); let enc = Lq::encode(&val).unwrap(); assert_eq!( enc, b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\ - !lq\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\ - *testns\0*testdb\0" + !lq\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" ); - let dec = Lq::decode(&enc).unwrap(); assert_eq!(val, dec); } #[test] - fn prefix_nd() { + fn test_prefix() { use super::*; - let nd = Uuid::from_bytes([ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, - 0x0f, 0x10, - ]); - let val = prefix_nd(&nd); - assert_eq!(val, b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x00"); + #[rustfmt::skip] + let nd = Uuid::from_bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10]); + let val = super::prefix(nd); + assert_eq!( + val, + b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\ + !lq\x00" + ); } #[test] - fn suffix_nd() { + fn test_suffix() { use super::*; - let nd = Uuid::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let val = suffix_nd(&nd); - assert_eq!(val, b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\xff"); + #[rustfmt::skip] + let nd = Uuid::from_bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10]); + let val = super::suffix(nd); + assert_eq!( + val, + b"/$\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\ + !lq\xff" + ); } } diff --git a/core/src/key/root/ac.rs b/core/src/key/root/ac.rs index cf965fba..f566eb3e 100644 --- a/core/src/key/root/ac.rs +++ b/core/src/key/root/ac.rs @@ -1,5 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +//! Stores a DEFINE ACCESS ON ROOT config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -29,9 +30,9 @@ pub fn suffix() -> Vec { k } -impl KeyRequirements for Ac<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::Access +impl Categorise for Ac<'_> { + fn categorise(&self) -> Category { + Category::Access } } diff --git a/core/src/key/root/all.rs b/core/src/key/root/all.rs index dad42e25..b3687587 100644 --- a/core/src/key/root/all.rs +++ b/core/src/key/root/all.rs @@ -1,6 +1,6 @@ //! Stores the key prefix for all keys -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -20,9 +20,9 @@ impl Default for Kv { } } -impl KeyRequirements for Kv { - fn key_category(&self) -> KeyCategory { - KeyCategory::Root +impl Categorise for Kv { + fn categorise(&self) -> Category { + Category::Root } } diff --git a/core/src/key/root/hb.rs b/core/src/key/root/hb.rs deleted file mode 100644 index fb543e99..00000000 --- a/core/src/key/root/hb.rs +++ /dev/null @@ -1,101 +0,0 @@ -//! Stores a heartbeat per registered cluster node -use crate::dbs::node::{KeyTimestamp, Timestamp}; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; -use derive::Key; -use serde::{Deserialize, Serialize}; -use uuid::Uuid; - -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Key)] -#[non_exhaustive] -pub struct Hb { - __: u8, - _a: u8, - _b: u8, - _c: u8, - pub hb: Timestamp, - _d: u8, - #[serde(with = "uuid::serde::compact")] - pub nd: Uuid, -} - -impl KeyRequirements for Hb { - fn key_category(&self) -> KeyCategory { - KeyCategory::Heartbeat - } -} - -impl Hb { - pub fn new(hb: Timestamp, nd: Uuid) -> Self { - Self { - __: b'/', - _a: b'!', - _b: b'h', - _c: b'b', - hb, - _d: b'/', - nd, - } - } - - pub fn prefix() -> Vec { - let mut k = crate::key::root::all::new().encode().unwrap(); - k.extend_from_slice(&[b'!', b'h', b'b', 0x00]); - k - } - - pub fn suffix(ts: &Timestamp) -> Vec { - // Add one to timestamp so we get a complete range inclusive of provided timestamp - // Also convert type - let tskey: KeyTimestamp = KeyTimestamp { - value: ts.value + 1, - }; - let mut k = crate::key::root::all::new().encode().unwrap(); - k.extend_from_slice(&[b'!', b'h', b'b']); - k.extend_from_slice(tskey.encode().unwrap().as_ref()); - k - } -} - -impl From for Hb { - fn from(ts: Timestamp) -> Self { - let empty_uuid = uuid::Uuid::nil(); - Self::new(ts, empty_uuid) - } -} - -#[cfg(test)] -mod tests { - #[test] - fn key() { - use super::*; - #[rustfmt::skip] - let val = Hb::new( - Timestamp { value: 123 }, - Uuid::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]) - ); - let enc = Hb::encode(&val).unwrap(); - assert_eq!( - enc, - b"/!hb\x00\x00\x00\x00\x00\x00\x00\x7b/\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"); - let dec = Hb::decode(&enc).unwrap(); - assert_eq!(val, dec); - } - - #[test] - fn prefix() { - use super::*; - let actual = Hb::prefix(); - assert_eq!(actual, b"/!hb\x00") - } - - #[test] - fn suffix() { - use super::*; - let ts: Timestamp = Timestamp { - value: 456, - }; - let actual = Hb::suffix(&ts); - assert_eq!(actual, b"/!hb\x00\x00\x00\x00\x00\x00\x01\xc9") // 457, because we add 1 to the timestamp - } -} diff --git a/core/src/key/root/mod.rs b/core/src/key/root/mod.rs index 4d011716..898147e5 100644 --- a/core/src/key/root/mod.rs +++ b/core/src/key/root/mod.rs @@ -1,6 +1,5 @@ pub mod ac; pub mod all; -pub mod hb; pub mod nd; pub mod ni; pub mod ns; diff --git a/core/src/key/root/nd.rs b/core/src/key/root/nd.rs index 1028c878..ee89f12e 100644 --- a/core/src/key/root/nd.rs +++ b/core/src/key/root/nd.rs @@ -1,6 +1,6 @@ //! Stores cluster membership information -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -18,9 +18,25 @@ pub struct Nd { pub nd: Uuid, } -impl KeyRequirements for Nd { - fn key_category(&self) -> KeyCategory { - KeyCategory::Node +pub fn new(nd: Uuid) -> Nd { + Nd::new(nd) +} + +pub fn prefix() -> Vec { + let mut k = crate::key::root::all::new().encode().unwrap(); + k.extend_from_slice(&[b'!', b'n', b'd', 0x00]); + k +} + +pub fn suffix() -> Vec { + let mut k = crate::key::root::all::new().encode().unwrap(); + k.extend_from_slice(&[b'!', b'n', b'd', 0xff]); + k +} + +impl Categorise for Nd { + fn categorise(&self) -> Category { + Category::Node } } @@ -34,18 +50,6 @@ impl Nd { nd, } } - - pub fn prefix() -> Vec { - let mut k = crate::key::root::all::new().encode().unwrap(); - k.extend_from_slice(&[b'!', b'n', b'd', 0x00]); - k - } - - pub fn suffix() -> Vec { - let mut k = crate::key::root::all::new().encode().unwrap(); - k.extend_from_slice(&[b'!', b'n', b'd', 0xff]); - k - } } #[cfg(test)] @@ -61,13 +65,13 @@ mod tests { #[test] fn test_prefix() { - let val = super::Nd::prefix(); + let val = super::prefix(); assert_eq!(val, b"/!nd\0") } #[test] fn test_suffix() { - let val = super::Nd::suffix(); + let val = super::suffix(); assert_eq!(val, b"/!nd\xff") } } diff --git a/core/src/key/root/ni.rs b/core/src/key/root/ni.rs index 872e987d..0b3cdcda 100644 --- a/core/src/key/root/ni.rs +++ b/core/src/key/root/ni.rs @@ -1,6 +1,6 @@ //! Stores namespace ID generator state -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -19,9 +19,9 @@ impl Default for Ni { } } -impl KeyRequirements for Ni { - fn key_category(&self) -> KeyCategory { - KeyCategory::NamespaceIdentifier +impl Categorise for Ni { + fn categorise(&self) -> Category { + Category::NamespaceIdentifier } } diff --git a/core/src/key/root/ns.rs b/core/src/key/root/ns.rs index 1168fb13..aed59a1e 100644 --- a/core/src/key/root/ns.rs +++ b/core/src/key/root/ns.rs @@ -1,6 +1,6 @@ //! Stores a DEFINE NAMESPACE config definition -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -30,9 +30,9 @@ pub fn suffix() -> Vec { k } -impl KeyRequirements for Ns<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::Namespace +impl Categorise for Ns<'_> { + fn categorise(&self) -> Category { + Category::Namespace } } diff --git a/core/src/key/root/us.rs b/core/src/key/root/us.rs index 4cf97386..d5b7679f 100644 --- a/core/src/key/root/us.rs +++ b/core/src/key/root/us.rs @@ -1,5 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +//! Stores a DEFINE USER ON ROOT config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -29,9 +30,9 @@ pub fn suffix() -> Vec { k } -impl KeyRequirements for Us<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::User +impl Categorise for Us<'_> { + fn categorise(&self) -> Category { + Category::User } } diff --git a/core/src/key/table/all.rs b/core/src/key/table/all.rs index 60b6a02b..f6955cd4 100644 --- a/core/src/key/table/all.rs +++ b/core/src/key/table/all.rs @@ -1,6 +1,6 @@ //! Stores the key prefix for all keys under a table -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -20,9 +20,9 @@ pub fn new<'a>(ns: &'a str, db: &'a str, tb: &'a str) -> Table<'a> { Table::new(ns, db, tb) } -impl KeyRequirements for Table<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::TableRoot +impl Categorise for Table<'_> { + fn categorise(&self) -> Category { + Category::TableRoot } } diff --git a/core/src/key/table/ev.rs b/core/src/key/table/ev.rs index c52f1a25..b5b5d2a4 100644 --- a/core/src/key/table/ev.rs +++ b/core/src/key/table/ev.rs @@ -1,6 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; -/// Stores a DEFINE EVENT config definition +//! Stores a DEFINE EVENT config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -36,9 +36,9 @@ pub fn suffix(ns: &str, db: &str, tb: &str) -> Vec { k } -impl KeyRequirements for Ev<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::TableEvent +impl Categorise for Ev<'_> { + fn categorise(&self) -> Category { + Category::TableEvent } } diff --git a/core/src/key/table/fd.rs b/core/src/key/table/fd.rs index f1735eaa..3a269ea5 100644 --- a/core/src/key/table/fd.rs +++ b/core/src/key/table/fd.rs @@ -1,6 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; -/// Stores a DEFINE FIELD config definition +//! Stores a DEFINE FIELD config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -36,9 +36,9 @@ pub fn suffix(ns: &str, db: &str, tb: &str) -> Vec { k } -impl KeyRequirements for Fd<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::TableField +impl Categorise for Fd<'_> { + fn categorise(&self) -> Category { + Category::TableField } } diff --git a/core/src/key/table/ft.rs b/core/src/key/table/ft.rs index da259e25..13b85f66 100644 --- a/core/src/key/table/ft.rs +++ b/core/src/key/table/ft.rs @@ -1,6 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; -/// Stores a DEFINE TABLE AS config definition +//! Stores a DEFINE TABLE AS config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -36,9 +36,9 @@ pub fn suffix(ns: &str, db: &str, tb: &str) -> Vec { k } -impl KeyRequirements for Ft<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::TableView +impl Categorise for Ft<'_> { + fn categorise(&self) -> Category { + Category::TableView } } diff --git a/core/src/key/table/ix.rs b/core/src/key/table/ix.rs index f504f487..def868fd 100644 --- a/core/src/key/table/ix.rs +++ b/core/src/key/table/ix.rs @@ -1,6 +1,6 @@ -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; -/// Stores a DEFINE INDEX config definition +//! Stores a DEFINE INDEX config definition +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; @@ -36,9 +36,9 @@ pub fn suffix(ns: &str, db: &str, tb: &str) -> Vec { k } -impl KeyRequirements for Ix<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::IndexDefinition +impl Categorise for Ix<'_> { + fn categorise(&self) -> Category { + Category::IndexDefinition } } diff --git a/core/src/key/table/lq.rs b/core/src/key/table/lq.rs index 4003605e..6a29a475 100644 --- a/core/src/key/table/lq.rs +++ b/core/src/key/table/lq.rs @@ -1,6 +1,6 @@ //! Stores a LIVE SELECT query definition on the table -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use derive::Key; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -38,18 +38,13 @@ pub fn prefix(ns: &str, db: &str, tb: &str) -> Vec { pub fn suffix(ns: &str, db: &str, tb: &str) -> Vec { let mut k = super::all::new(ns, db, tb).encode().unwrap(); - k.extend_from_slice(&[b'!', b'l', b'q']); - k.extend_from_slice(Uuid::max().as_ref()); - // We need the extra byte here because `getr()` only supports half-open ranges - // so it wouldn't match max UUIDs because it doesn't check for equal matches - // on the upper bound. Adding an extra byte to bring max into range as well. - k.push(0x00); + k.extend_from_slice(&[b'!', b'l', b'q', 0xff]); k } -impl KeyRequirements for Lq<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::TableLiveQuery +impl Categorise for Lq<'_> { + fn categorise(&self) -> Category { + Category::TableLiveQuery } } @@ -73,8 +68,6 @@ impl<'a> Lq<'a> { #[cfg(test)] mod tests { - use crate::key::debug; - #[test] fn key() { use super::*; @@ -82,7 +75,6 @@ mod tests { let live_query_id = Uuid::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); let val = Lq::new("testns", "testdb", "testtb", live_query_id); let enc = Lq::encode(&val).unwrap(); - println!("{:?}", debug::sprint_key(&enc)); assert_eq!( enc, b"/*testns\x00*testdb\x00*testtb\x00!lq\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" @@ -101,6 +93,6 @@ mod tests { #[test] fn suffix() { let val = super::suffix("testns", "testdb", "testtb"); - assert_eq!(val, b"/*testns\x00*testdb\x00*testtb\x00!lq\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00") + assert_eq!(val, b"/*testns\x00*testdb\x00*testtb\x00!lq\xff") } } diff --git a/core/src/key/thing/mod.rs b/core/src/key/thing/mod.rs index d748521d..96285b52 100644 --- a/core/src/key/thing/mod.rs +++ b/core/src/key/thing/mod.rs @@ -1,6 +1,6 @@ //! Stores a record document -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; +use crate::key::category::Categorise; +use crate::key::category::Category; use crate::sql::id::Id; use derive::Key; use serde::{Deserialize, Serialize}; @@ -35,9 +35,9 @@ pub fn suffix(ns: &str, db: &str, tb: &str) -> Vec { k } -impl KeyRequirements for Thing<'_> { - fn key_category(&self) -> KeyCategory { - KeyCategory::Thing +impl Categorise for Thing<'_> { + fn categorise(&self) -> Category { + Category::Thing } } diff --git a/core/src/kvs/api.rs b/core/src/kvs/api.rs new file mode 100644 index 00000000..af50b595 --- /dev/null +++ b/core/src/kvs/api.rs @@ -0,0 +1,328 @@ +use super::kv::Add; +use super::tr::Check; +use crate::cnf::NORMAL_FETCH_SIZE; +use crate::err::Error; +use crate::kvs::batch::Batch; +use crate::kvs::Key; +use crate::kvs::Val; +use crate::vs::Versionstamp; +use std::fmt::Debug; +use std::ops::Range; + +pub trait Transaction { + /// Specify how we should handle unclosed transactions. + /// + /// If a transaction is not cancelled or rolled back then + /// this can cause issues on some storage engine + /// implementations. In tests we can ignore unhandled + /// transactions, whilst in development we should panic + /// so that any unintended behaviour is detected, and in + /// production we should only log a warning. + fn check_level(&mut self, check: Check); + + /// Check if transaction is finished. + /// + /// If the transaction has been cancelled or committed, + /// then this function will return [`true`], and any further + /// calls to functions on this transaction will result + /// in an [`Error::TxFinished`] error. + fn closed(&self) -> bool; + + /// Check if transaction is writeable. + /// + /// If the transaction has been marked as a writeable + /// transaction, then this function will return [`true`]. + /// This fuction can be used to check whether a transaction + /// allows data to be modified, and if not then the function + /// will return an [`Error::TxReadonly`] error. + fn writeable(&self) -> bool; + + /// Cancel a transaction. + /// + /// This reverses all changes made within the transaction. + async fn cancel(&mut self) -> Result<(), Error>; + + /// Commit a transaction. + /// + /// This attempts to commit all changes made within the transaction. + async fn commit(&mut self) -> Result<(), Error>; + + /// Check if a key exists in the datastore. + async fn exists(&mut self, key: K) -> Result + where + K: Into + Debug; + + /// Fetch a key from the datastore. + async fn get(&mut self, key: K) -> Result, Error> + where + K: Into + Debug; + + /// Insert or update a key in the datastore. + async fn set(&mut self, key: K, val: V) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug; + + /// Insert a key if it doesn't exist in the datastore. + async fn put(&mut self, key: K, val: V) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug; + + /// Update a key in the datastore if the current value matches a condition. + async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug; + + /// Delete a key from the datastore. + async fn del(&mut self, key: K) -> Result<(), Error> + where + K: Into + Debug; + + /// Delete a key from the datastore if the current value matches a condition. + async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug; + + /// Retrieve a specific range of keys from the datastore. + /// + /// This function fetches the full range of keys without values, in a single request to the underlying datastore. + async fn keys(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into + Debug; + + /// Retrieve a specific range of keys from the datastore. + /// + /// This function fetches the full range of key-value pairs, in a single request to the underlying datastore. + async fn scan(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into + Debug; + + /// Fetch many keys from the datastore. + /// + /// This function fetches all matching keys pairs from the underlying datastore concurrently. + async fn getm(&mut self, keys: Vec) -> Result, Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.closed() { + return Err(Error::TxFinished); + } + // Continue with function logic + let mut out = Vec::with_capacity(keys.len()); + for key in keys.into_iter() { + if let Some(val) = self.get(key).await? { + out.push(val); + } else { + out.push(vec![]); + } + } + Ok(out) + } + + /// Retrieve a range of prefixed keys from the datastore. + /// + /// This function fetches all matching key-value pairs from the underlying datastore in grouped batches. + async fn getp(&mut self, key: K) -> Result, Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.closed() { + return Err(Error::TxFinished); + } + // Continue with function logic + let beg: Key = key.into(); + let end: Key = beg.clone().add(0xff); + self.getr(beg..end).await + } + + /// Retrieve a range of keys from the datastore. + /// + /// This function fetches all matching key-value pairs from the underlying datastore in grouped batches. + async fn getr(&mut self, rng: Range) -> Result, Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.closed() { + return Err(Error::TxFinished); + } + // Continue with function logic + let mut out = vec![]; + let beg: Key = rng.start.into(); + let end: Key = rng.end.into(); + let mut next = Some(beg..end); + while let Some(rng) = next { + let res = self.batch(rng, *NORMAL_FETCH_SIZE, true).await?; + next = res.next; + for v in res.values.into_iter() { + out.push(v); + } + } + Ok(out) + } + + /// Delete a range of prefixed keys from the datastore. + /// + /// This function deletes all matching key-value pairs from the underlying datastore in grouped batches. + async fn delp(&mut self, key: K) -> Result<(), Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.closed() { + return Err(Error::TxFinished); + } + // Check to see if transaction is writable + if !self.writeable() { + return Err(Error::TxReadonly); + } + // Continue with function logic + let beg: Key = key.into(); + let end: Key = beg.clone().add(0xff); + self.delr(beg..end).await + } + + /// Delete a range of keys from the datastore. + /// + /// This function deletes all matching key-value pairs from the underlying datastore in grouped batches. + async fn delr(&mut self, rng: Range) -> Result<(), Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.closed() { + return Err(Error::TxFinished); + } + // Check to see if transaction is writable + if !self.writeable() { + return Err(Error::TxReadonly); + } + // Continue with function logic + let beg: Key = rng.start.into(); + let end: Key = rng.end.into(); + let mut next = Some(beg..end); + while let Some(rng) = next { + let res = self.batch(rng, *NORMAL_FETCH_SIZE, false).await?; + next = res.next; + for (k, _) in res.values.into_iter() { + self.del(k).await?; + } + } + Ok(()) + } + + /// Retrieve a batched scan over a specific range of keys in the datastore. + /// + /// This function fetches keys or key-value pairs, in batches, with multiple requests to the underlying datastore. + async fn batch(&mut self, rng: Range, batch: u32, values: bool) -> Result + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.closed() { + return Err(Error::TxFinished); + } + // Continue with function logic + let beg: Key = rng.start.into(); + let end: Key = rng.end.into(); + // Scan for the next batch + let res = if values { + self.scan(beg..end.clone(), batch).await? + } else { + self.keys(beg..end.clone(), batch) + .await? + .into_iter() + .map(|k| (k, vec![])) + .collect::>() + }; + // Check if range is consumed + if res.len() < batch as usize && batch > 0 { + Ok(Batch { + next: None, + values: res, + }) + } else { + match res.last() { + Some((k, _)) => Ok(Batch { + next: Some(Range { + start: k.clone().add(0x00), + end, + }), + values: res, + }), + // We have checked the length above, + // so there is guaranteed to always + // be a last item in the vector. + // This is therefore unreachable. + None => unreachable!(), + } + } + } + + /// Obtain a new change timestamp for a key + /// which is replaced with the current timestamp when the transaction is committed. + /// NOTE: This should be called when composing the change feed entries for this transaction, + /// which should be done immediately before the transaction commit. + /// That is to keep other transactions commit delay(pessimistic) or conflict(optimistic) as less as possible. + async fn get_timestamp(&mut self, key: K) -> Result + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.closed() { + return Err(Error::TxFinished); + } + // Calculate the version key + let key = key.into(); + // Calculate the version number + let ver = match self.get(key.as_slice()).await? { + Some(prev) => { + let res: Result<[u8; 10], Error> = match prev.as_slice().try_into() { + Ok(ba) => Ok(ba), + Err(e) => Err(Error::Tx(e.to_string())), + }; + crate::vs::try_to_u64_be(res?)? + 1 + } + None => 1, + }; + // Convert the timestamp to a versionstamp + let verbytes = crate::vs::u64_to_versionstamp(ver); + // Store the timestamp to prevent other transactions from committing + self.set(key.as_slice(), verbytes.to_vec()).await?; + // Return the uint64 representation of the timestamp as the result + Ok(verbytes) + } + + /// Insert the versionstamped key into the datastore. + async fn set_versionstamp( + &mut self, + ts_key: K, + prefix: K, + suffix: K, + val: V, + ) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + // Check to see if transaction is closed + if self.closed() { + return Err(Error::TxFinished); + } + // Check to see if transaction is writable + if !self.writeable() { + return Err(Error::TxReadonly); + } + // Continue with function logic + let ts = self.get_timestamp(ts_key).await?; + let mut k: Vec = prefix.into(); + k.append(&mut ts.to_vec()); + k.append(&mut suffix.into()); + self.set(k, val).await + } +} diff --git a/core/src/kvs/batch.rs b/core/src/kvs/batch.rs new file mode 100644 index 00000000..7fdc7d15 --- /dev/null +++ b/core/src/kvs/batch.rs @@ -0,0 +1,10 @@ +use super::Key; +use super::Val; +use std::ops::Range; + +/// A batch scan result returned from the [`Transaction::batch`] or [`Transactor::batch`] functions. +#[derive(Debug)] +pub struct Batch { + pub next: Option>, + pub values: Vec<(Key, Val)>, +} diff --git a/core/src/kvs/cache.rs b/core/src/kvs/cache.rs index 6819d0a7..e12f2ee0 100644 --- a/core/src/kvs/cache.rs +++ b/core/src/kvs/cache.rs @@ -1,5 +1,5 @@ -use crate::idg::u32::U32; -use crate::kvs::kv::Key; +use super::Key; +use crate::dbs::node::Node; use crate::sql::statements::DefineAccessStatement; use crate::sql::statements::DefineAnalyzerStatement; use crate::sql::statements::DefineDatabaseStatement; @@ -13,61 +13,247 @@ use crate::sql::statements::DefineParamStatement; use crate::sql::statements::DefineTableStatement; use crate::sql::statements::DefineUserStatement; use crate::sql::statements::LiveStatement; -use std::collections::HashMap; +use crate::sql::Value; +use quick_cache::Weighter; +use std::any::Any; use std::sync::Arc; +#[derive(Clone)] +pub(super) struct EntryWeighter; + +impl Weighter for EntryWeighter { + fn weight(&self, _key: &Key, val: &Entry) -> u32 { + match val { + // Value entries all have the same weight, + // and can be evicted whenever necessary. + // We could improve this, by calculating + // the precise weight of a Value (when + // deserialising), and using this size to + // determine the actual cache weight. + Entry::Val(_) => 1, + // We don't want to evict other entries + // so we set the weight to 0 which will + // prevent entries being evicted, unless + // specifically removed from the cache. + _ => 0, + } + } +} + #[derive(Clone)] #[non_exhaustive] -pub enum Entry { - // Single definitions - Db(Arc), - Fc(Arc), - Ix(Arc), - Ml(Arc), - Ns(Arc), - Pa(Arc), - Tb(Arc), - // Multi definitions - Acs(Arc<[DefineAccessStatement]>), - Azs(Arc<[DefineAnalyzerStatement]>), - Dbs(Arc<[DefineDatabaseStatement]>), - Das(Arc<[DefineAccessStatement]>), - Dus(Arc<[DefineUserStatement]>), - Evs(Arc<[DefineEventStatement]>), - Fcs(Arc<[DefineFunctionStatement]>), - Fds(Arc<[DefineFieldStatement]>), - Fts(Arc<[DefineTableStatement]>), - Ixs(Arc<[DefineIndexStatement]>), - Lvs(Arc<[LiveStatement]>), - Mls(Arc<[DefineModelStatement]>), +pub(super) enum Entry { + /// A cached entry of any type + Any(Arc), + /// A cached record document content + Val(Arc), + /// A slice of Node specified at the root. + Nds(Arc<[Node]>), + /// A slice of DefineUserStatement specified at the root. + Rus(Arc<[DefineUserStatement]>), + /// A slice of DefineAccessStatement specified at the root. + Ras(Arc<[DefineAccessStatement]>), + /// A slice of DefineNamespaceStatement specified on a namespace. Nss(Arc<[DefineNamespaceStatement]>), - Nas(Arc<[DefineAccessStatement]>), + /// A slice of DefineUserStatement specified on a namespace. Nus(Arc<[DefineUserStatement]>), + /// A slice of DefineAccessStatement specified on a namespace. + Nas(Arc<[DefineAccessStatement]>), + /// A slice of DefineDatabaseStatement specified on a namespace. + Dbs(Arc<[DefineDatabaseStatement]>), + /// A slice of DefineAnalyzerStatement specified on a namespace. + Azs(Arc<[DefineAnalyzerStatement]>), + /// A slice of DefineAccessStatement specified on a database. + Das(Arc<[DefineAccessStatement]>), + /// A slice of DefineUserStatement specified on a database. + Dus(Arc<[DefineUserStatement]>), + /// A slice of DefineFunctionStatement specified on a database. + Fcs(Arc<[DefineFunctionStatement]>), + /// A slice of DefineTableStatement specified on a database. + Fts(Arc<[DefineTableStatement]>), + /// A slice of DefineModelStatement specified on a database. + Mls(Arc<[DefineModelStatement]>), + /// A slice of DefineParamStatement specified on a database. Pas(Arc<[DefineParamStatement]>), + /// A slice of DefineTableStatement specified on a database. Tbs(Arc<[DefineTableStatement]>), - // Sequences - Seq(U32), + /// A slice of DefineEventStatement specified on a table. + Evs(Arc<[DefineEventStatement]>), + /// A slice of DefineFieldStatement specified on a table. + Fds(Arc<[DefineFieldStatement]>), + /// A slice of DefineIndexStatement specified on a table. + Ixs(Arc<[DefineIndexStatement]>), + /// A slice of LiveStatement specified on a table. + Lvs(Arc<[LiveStatement]>), } -#[derive(Default)] -#[non_exhaustive] -pub struct Cache(pub HashMap); - -impl Cache { - /// Set a key in the cache - pub fn set(&mut self, key: Key, val: Entry) { - self.0.insert(key, val); +impl Entry { + /// Converts this cache entry into a single entry of arbitrary type. + /// This panics if called on a cache entry that is not an [`Entry::Any`]. + pub(super) fn into_type(self: Entry) -> Arc { + match self { + Entry::Any(v) => v.downcast::().unwrap(), + _ => unreachable!(), + } } - /// Get a key from the cache - pub fn get(&mut self, key: &Key) -> Option { - self.0.get(key).cloned() + /// Converts this cache entry into a slice of [`Node`]. + /// This panics if called on a cache entry that is not an [`Entry::Nds`]. + pub(super) fn into_nds(self) -> Arc<[Node]> { + match self { + Entry::Nds(v) => v, + _ => unreachable!(), + } } - /// Delete a key from the cache - pub fn del(&mut self, key: &Key) -> Option { - self.0.remove(key) + /// Converts this cache entry into a slice of [`DefineUserStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Rus`]. + pub(super) fn into_rus(self) -> Arc<[DefineUserStatement]> { + match self { + Entry::Rus(v) => v, + _ => unreachable!(), + } } - /// Clears a cache completely - pub fn clear(&mut self) { - self.0.clear() + /// Converts this cache entry into a slice of [`DefineAccessStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Ras`]. + pub(super) fn into_ras(self) -> Arc<[DefineAccessStatement]> { + match self { + Entry::Ras(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineNamespaceStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Nss`]. + pub(super) fn into_nss(self) -> Arc<[DefineNamespaceStatement]> { + match self { + Entry::Nss(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineAccessStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Nas`]. + pub(super) fn into_nas(self) -> Arc<[DefineAccessStatement]> { + match self { + Entry::Nas(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineUserStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Nus`]. + pub(super) fn into_nus(self) -> Arc<[DefineUserStatement]> { + match self { + Entry::Nus(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineDatabaseStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Dbs`]. + pub(super) fn into_dbs(self) -> Arc<[DefineDatabaseStatement]> { + match self { + Entry::Dbs(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineAccessStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Das`]. + pub(super) fn into_das(self) -> Arc<[DefineAccessStatement]> { + match self { + Entry::Das(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineUserStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Dus`]. + pub(super) fn into_dus(self) -> Arc<[DefineUserStatement]> { + match self { + Entry::Dus(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineAnalyzerStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Azs`]. + pub(super) fn into_azs(self) -> Arc<[DefineAnalyzerStatement]> { + match self { + Entry::Azs(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineFunctionStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Fcs`]. + pub(super) fn into_fcs(self) -> Arc<[DefineFunctionStatement]> { + match self { + Entry::Fcs(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineParamStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Pas`]. + pub(super) fn into_pas(self) -> Arc<[DefineParamStatement]> { + match self { + Entry::Pas(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineModelStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Mls`]. + pub(super) fn into_mls(self) -> Arc<[DefineModelStatement]> { + match self { + Entry::Mls(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineTableStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Tbs`]. + pub(super) fn into_tbs(self) -> Arc<[DefineTableStatement]> { + match self { + Entry::Tbs(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineEventStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Evs`]. + pub(super) fn into_evs(self) -> Arc<[DefineEventStatement]> { + match self { + Entry::Evs(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineFieldStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Fds`]. + pub(super) fn into_fds(self) -> Arc<[DefineFieldStatement]> { + match self { + Entry::Fds(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineIndexStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Ixs`]. + pub(super) fn into_ixs(self) -> Arc<[DefineIndexStatement]> { + match self { + Entry::Ixs(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`DefineTableStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Fts`]. + pub(super) fn into_fts(self) -> Arc<[DefineTableStatement]> { + match self { + Entry::Fts(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a slice of [`LiveStatement`]. + /// This panics if called on a cache entry that is not an [`Entry::Lvs`]. + pub(super) fn into_lvs(self) -> Arc<[LiveStatement]> { + match self { + Entry::Lvs(v) => v, + _ => unreachable!(), + } + } + /// Converts this cache entry into a single [`Value`]. + /// This panics if called on a cache entry that is not an [`Entry::Val`]. + pub(super) fn into_val(self) -> Arc { + match self { + Entry::Val(v) => v, + _ => unreachable!(), + } } } diff --git a/core/src/kvs/clock.rs b/core/src/kvs/clock.rs index 990b5f87..06a724cd 100644 --- a/core/src/kvs/clock.rs +++ b/core/src/kvs/clock.rs @@ -21,6 +21,9 @@ pub enum SizedClock { } impl SizedClock { + pub(crate) fn system() -> Self { + Self::System(Default::default()) + } pub async fn now(&self) -> Timestamp { match self { SizedClock::System(c) => c.now(), diff --git a/core/src/kvs/ds.rs b/core/src/kvs/ds.rs index 7b942ff5..35d0bd9d 100644 --- a/core/src/kvs/ds.rs +++ b/core/src/kvs/ds.rs @@ -1,8 +1,32 @@ -use std::collections::{BTreeMap, BTreeSet}; +use super::tr::Transactor; +use super::tx::Transaction; +use crate::cf; +use crate::ctx::Context; +#[cfg(feature = "jwks")] +use crate::dbs::capabilities::NetTarget; +use crate::dbs::{ + Attach, Capabilities, Executor, Notification, Options, Response, Session, Variables, +}; +use crate::err::Error; +#[cfg(feature = "jwks")] +use crate::iam::jwks::JwksCache; +use crate::iam::{Action, Auth, Error as IamError, Resource, Role}; +use crate::idx::trees::store::IndexStores; +use crate::kvs::clock::SizedClock; +#[allow(unused_imports)] +use crate::kvs::clock::SystemClock; +use crate::kvs::{LockType, LockType::*, TransactionType, TransactionType::*}; +use crate::sql::{statements::DefineUserStatement, Base, Query, Value}; +use crate::syn; +use crate::vs::{conv, Versionstamp}; +use channel::{Receiver, Sender}; +use futures::Future; +use reblessive::TreeStack; use std::fmt; #[cfg(any( feature = "kv-mem", feature = "kv-surrealkv", + feature = "kv-file", feature = "kv-rocksdb", feature = "kv-fdb", feature = "kv-tikv", @@ -12,51 +36,19 @@ use std::sync::Arc; use std::time::Duration; #[cfg(not(target_arch = "wasm32"))] use std::time::{SystemTime, UNIX_EPOCH}; - -use channel::{Receiver, Sender}; -use futures::{lock::Mutex, Future}; -use reblessive::{tree::Stk, TreeStack}; +#[cfg(feature = "jwks")] use tokio::sync::RwLock; use tracing::instrument; use tracing::trace; - +use uuid::Uuid; #[cfg(target_arch = "wasm32")] use wasmtimer::std::{SystemTime, UNIX_EPOCH}; -use super::tx::Transaction; -use crate::cf; -use crate::ctx::Context; -#[cfg(feature = "jwks")] -use crate::dbs::capabilities::NetTarget; -use crate::dbs::{ - node::Timestamp, Attach, Capabilities, Executor, Notification, Options, Response, Session, - Variables, -}; -use crate::err::Error; -#[cfg(feature = "jwks")] -use crate::iam::jwks::JwksCache; -use crate::iam::{Action, Auth, Error as IamError, Resource, Role}; -use crate::idx::trees::store::IndexStores; -use crate::key::root::hb::Hb; -use crate::kvs::clock::SizedClock; -#[allow(unused_imports)] -use crate::kvs::clock::SystemClock; -use crate::kvs::lq_cf::LiveQueryTracker; -use crate::kvs::lq_structs::{LqValue, TrackedResult, UnreachableLqType}; -use crate::kvs::lq_v2_fut::process_lq_notifications; -use crate::kvs::{LockType, LockType::*, TransactionType, TransactionType::*}; -use crate::options::EngineOptions; -use crate::sql::{self, statements::DefineUserStatement, Base, Query, Uuid, Value}; -use crate::syn; -use crate::vs::{conv, Oracle, Versionstamp}; +const TARGET: &str = "surrealdb::core::kvs::tr"; // If there are an infinite number of heartbeats, then we want to go batch-by-batch spread over several checks -const HEARTBEAT_BATCH_SIZE: u32 = 1000; const LQ_CHANNEL_SIZE: usize = 100; -// The batch size used for non-paged operations (i.e. if there are more results, they are ignored) -const NON_PAGED_BATCH_SIZE: u32 = 100_000; - // The role assigned to the initial user created when starting the server with credentials for the first time const INITIAL_USER_ROLE: &str = "owner"; @@ -78,14 +70,10 @@ pub struct Datastore { transaction_timeout: Option, // Capabilities for this datastore capabilities: Capabilities, - pub(super) engine_options: EngineOptions, - // The versionstamp oracle for this datastore. - // Used only in some datastores, such as tikv. - versionstamp_oracle: Arc>, // Whether this datastore enables live query notifications to subscribers pub(super) notification_channel: Option<(Sender, Receiver)>, // Clock for tracking time. It is read only and accessible to all transactions. It is behind a mutex as tests may write to it. - clock: Arc, + pub(super) clock: Arc, // The index store cache index_stores: IndexStores, #[cfg(feature = "jwks")] @@ -100,13 +88,8 @@ pub struct Datastore { ))] // The temporary directory temporary_directory: Option>, - pub(crate) lq_cf_store: Arc>, } -/// We always want to be circulating the live query information -/// And we will sometimes have an error attached but still not want to lose the LQ. -pub(crate) type BootstrapOperationResult = (LqValue, Option); - #[allow(clippy::large_enum_variant)] pub(super) enum Inner { #[cfg(feature = "kv-mem")] @@ -184,80 +167,55 @@ impl Datastore { /// # } /// ``` pub async fn new(path: &str) -> Result { - Self::new_full_impl(path, None).await + Self::new_with_clock(path, None).await } - #[allow(dead_code)] - #[cfg(test)] - pub async fn new_full( + #[allow(unused_variables)] + pub async fn new_with_clock( path: &str, - clock_override: Option>, + clock: Option>, ) -> Result { - Self::new_full_impl(path, clock_override).await - } - - #[allow(dead_code)] - async fn new_full_impl( - path: &str, - #[allow(unused_variables)] clock_override: Option>, - ) -> Result { - #[allow(unused_variables)] - let default_clock: Arc = Arc::new(SizedClock::System(SystemClock::new())); - - // removes warning if no storage is enabled. - #[cfg(not(any( - feature = "kv-mem", - feature = "kv-rocksdb", - feature = "kv-indxdb", - feature = "kv-tikv", - feature = "kv-fdb", - feature = "kv-surrealkv" - )))] - let _ = (clock_override, default_clock); - // Initiate the desired datastore let (inner, clock): (Result, Arc) = match path { + // Initiate an in-memory datastore "memory" => { #[cfg(feature = "kv-mem")] { - info!("Starting kvs store in {}", path); + info!(target: TARGET, "Starting kvs store in {}", path); let v = super::mem::Datastore::new().await.map(Inner::Mem); - let default_clock = Arc::new(SizedClock::System(SystemClock::new())); - let clock = clock_override.unwrap_or(default_clock); - info!("Started kvs store in {}", path); - Ok((v, clock)) + let c = clock.unwrap_or_else(|| Arc::new(SizedClock::system())); + info!(target: TARGET, "Started kvs store in {}", path); + Ok((v, c)) } #[cfg(not(feature = "kv-mem"))] return Err(Error::Ds("Cannot connect to the `memory` storage engine as it is not enabled in this build of SurrealDB".to_owned())); } - // Parse and initiate an File database + // Parse and initiate a File datastore s if s.starts_with("file:") => { #[cfg(feature = "kv-rocksdb")] { - info!("Starting kvs store at {}", path); + info!(target: TARGET, "Starting kvs store at {}", path); let s = s.trim_start_matches("file://"); let s = s.trim_start_matches("file:"); let v = super::rocksdb::Datastore::new(s).await.map(Inner::RocksDB); - let default_clock = Arc::new(SizedClock::System(SystemClock::new())); - let clock = clock_override.unwrap_or(default_clock); - info!("Started kvs store at {}", path); - Ok((v, clock)) + let c = clock.unwrap_or_else(|| Arc::new(SizedClock::system())); + info!(target: TARGET, "Started kvs store at {}", path); + Ok((v, c)) } #[cfg(not(feature = "kv-rocksdb"))] return Err(Error::Ds("Cannot connect to the `rocksdb` storage engine as it is not enabled in this build of SurrealDB".to_owned())); } - // Parse and initiate an RocksDB database + // Parse and initiate a RocksDB datastore s if s.starts_with("rocksdb:") => { #[cfg(feature = "kv-rocksdb")] { - info!("Starting kvs store at {}", path); + info!(target: TARGET, "Starting kvs store at {}", path); let s = s.trim_start_matches("rocksdb://"); let s = s.trim_start_matches("rocksdb:"); let v = super::rocksdb::Datastore::new(s).await.map(Inner::RocksDB); - info!("Started kvs store at {}", path); - let default_clock = Arc::new(SizedClock::System(SystemClock::new())); - let clock = clock_override.unwrap_or(default_clock); - Ok((v, clock)) + let c = clock.unwrap_or_else(|| Arc::new(SizedClock::system())); + info!(target: TARGET, "Started kvs store at {}", path); + Ok((v, c)) } #[cfg(not(feature = "kv-rocksdb"))] return Err(Error::Ds("Cannot connect to the `rocksdb` storage engine as it is not enabled in this build of SurrealDB".to_owned())); @@ -266,71 +224,65 @@ impl Datastore { s if s.starts_with("indxdb:") => { #[cfg(feature = "kv-indxdb")] { - info!("Starting kvs store at {}", path); + info!(target: TARGET, "Starting kvs store at {}", path); let s = s.trim_start_matches("indxdb://"); let s = s.trim_start_matches("indxdb:"); let v = super::indxdb::Datastore::new(s).await.map(Inner::IndxDB); - info!("Started kvs store at {}", path); - let default_clock = Arc::new(SizedClock::System(SystemClock::new())); - let clock = clock_override.unwrap_or(default_clock); - Ok((v, clock)) + let c = clock.unwrap_or_else(|| Arc::new(SizedClock::system())); + info!(target: TARGET, "Started kvs store at {}", path); + Ok((v, c)) } #[cfg(not(feature = "kv-indxdb"))] return Err(Error::Ds("Cannot connect to the `indxdb` storage engine as it is not enabled in this build of SurrealDB".to_owned())); } - // Parse and initiate a TiKV database + // Parse and initiate a TiKV datastore s if s.starts_with("tikv:") => { #[cfg(feature = "kv-tikv")] { - info!("Connecting to kvs store at {}", path); + info!(target: TARGET, "Connecting to kvs store at {}", path); let s = s.trim_start_matches("tikv://"); let s = s.trim_start_matches("tikv:"); let v = super::tikv::Datastore::new(s).await.map(Inner::TiKV); - info!("Connected to kvs store at {}", path); - let default_clock = Arc::new(SizedClock::System(SystemClock::new())); - let clock = clock_override.unwrap_or(default_clock); - Ok((v, clock)) + let c = clock.unwrap_or_else(|| Arc::new(SizedClock::system())); + info!(target: TARGET, "Connected to kvs store at {}", path); + Ok((v, c)) } #[cfg(not(feature = "kv-tikv"))] return Err(Error::Ds("Cannot connect to the `tikv` storage engine as it is not enabled in this build of SurrealDB".to_owned())); } - // Parse and initiate a FoundationDB database + // Parse and initiate a FoundationDB datastore s if s.starts_with("fdb:") => { #[cfg(feature = "kv-fdb")] { - info!("Connecting to kvs store at {}", path); + info!(target: TARGET, "Connecting to kvs store at {}", path); let s = s.trim_start_matches("fdb://"); let s = s.trim_start_matches("fdb:"); let v = super::fdb::Datastore::new(s).await.map(Inner::FoundationDB); - info!("Connected to kvs store at {}", path); - let default_clock = Arc::new(SizedClock::System(SystemClock::new())); - let clock = clock_override.unwrap_or(default_clock); - Ok((v, clock)) + let c = clock.unwrap_or_else(|| Arc::new(SizedClock::system())); + info!(target: TARGET, "Connected to kvs store at {}", path); + Ok((v, c)) } #[cfg(not(feature = "kv-fdb"))] return Err(Error::Ds("Cannot connect to the `foundationdb` storage engine as it is not enabled in this build of SurrealDB".to_owned())); } - // Parse and initiate a SurrealKV database + // Parse and initiate a SurrealKV datastore s if s.starts_with("surrealkv:") => { #[cfg(feature = "kv-surrealkv")] { - info!("Starting kvs store at {}", path); + info!(target: TARGET, "Starting kvs store at {}", path); let s = s.trim_start_matches("surrealkv://"); let s = s.trim_start_matches("surrealkv:"); let v = super::surrealkv::Datastore::new(s).await.map(Inner::SurrealKV); - info!("Started to kvs store at {}", path); - let default_clock = Arc::new(SizedClock::System(SystemClock::new())); - let clock = clock_override.unwrap_or(default_clock); - Ok((v, clock)) + let c = clock.unwrap_or_else(|| Arc::new(SizedClock::system())); + info!(target: TARGET, "Started to kvs store at {}", path); + Ok((v, c)) } #[cfg(not(feature = "kv-surrealkv"))] return Err(Error::Ds("Cannot connect to the `surrealkv` storage engine as it is not enabled in this build of SurrealDB".to_owned())); } // The datastore path is not valid _ => { - // use clock_override and default_clock to remove warning when no kv is enabled. - let _ = default_clock; - info!("Unable to load the specified datastore {}", path); + info!(target: TARGET, "Unable to load the specified datastore {}", path); Err(Error::Ds("Unable to load the specified datastore".into())) } }?; @@ -338,15 +290,13 @@ impl Datastore { inner.map(|inner| Self { id: Uuid::new_v4(), inner, + clock, strict: false, auth_enabled: false, query_timeout: None, transaction_timeout: None, notification_channel: None, capabilities: Capabilities::default(), - engine_options: EngineOptions::default(), - versionstamp_oracle: Arc::new(Mutex::new(Oracle::systime_counter())), - clock, index_stores: IndexStores::default(), #[cfg(feature = "jwks")] jwks_cache: Arc::new(RwLock::new(JwksCache::new())), @@ -358,7 +308,6 @@ impl Datastore { feature = "kv-tikv", ))] temporary_directory: None, - lq_cf_store: Arc::new(RwLock::new(LiveQueryTracker::new())), }) } @@ -411,14 +360,9 @@ impl Datastore { feature = "kv-fdb", feature = "kv-tikv", ))] - pub fn with_temporary_directory(mut self, path: PathBuf) -> Self { - self.temporary_directory = Some(Arc::new(path)); - self - } - - /// Set the engine options for the datastore - pub fn with_engine_options(mut self, engine_options: EngineOptions) -> Self { - self.engine_options = engine_options; + /// Set a temporary directory for ordering of large result sets + pub fn with_temporary_directory(mut self, path: Option) -> Self { + self.temporary_directory = path.map(Arc::new); self } @@ -431,6 +375,10 @@ impl Datastore { self.auth_enabled } + pub fn id(&self) -> Uuid { + self.id + } + /// Does the datastore allow connections to a network target? #[cfg(feature = "jwks")] pub(crate) fn allows_network_target(&self, net_target: &NetTarget) -> bool { @@ -442,379 +390,47 @@ impl Datastore { &self.jwks_cache } - /// Setup the initial credentials - /// Trigger the `unreachable definition` compilation error, probably due to this issue: - /// https://github.com/rust-lang/rust/issues/111370 - #[allow(unreachable_code, unused_variables)] - pub async fn setup_initial_creds(&self, username: &str, password: &str) -> Result<(), Error> { - // Start a new writeable transaction - let txn = self.transaction(Write, Optimistic).await?.rollback_with_panic().enclose(); - // Fetch the root users from the storage - let users = txn.lock().await.all_root_users().await; - // Process credentials, depending on existing users - match users { - Ok(v) if v.is_empty() => { - // Display information in the logs - info!("Credentials were provided, and no root users were found. The root user '{}' will be created", username); - // Create and save a new root user - let stm = - DefineUserStatement::from((Base::Root, username, password, INITIAL_USER_ROLE)); - let ctx = Context::default().set_transaction(txn.clone()); - let opt = Options::new().with_auth(Arc::new(Auth::for_root(Role::Owner))); - let _ = stm.compute(&ctx, &opt, None).await?; - // We added a new user, so commit the transaction - txn.lock().await.commit().await?; - // Everything ok - Ok(()) - } - Ok(_) => { - // Display warnings in the logs - warn!("Credentials were provided, but existing root users were found. The root user '{}' will not be created", username); - warn!("Consider removing the --user and --pass arguments from the server start command"); - // We didn't write anything, so just rollback - txn.lock().await.cancel().await?; - // Everything ok - Ok(()) - } - Err(e) => { - // There was an unexpected error, so rollback - txn.lock().await.cancel().await?; - // Return any error - Err(e) - } - } - } - - // Initialise bootstrap with implicit values intended for runtime - // An error indicates that a failure happened, but that does not mean that the bootstrap - // completely failed. It may have partially completed. It certainly has side-effects - // that weren't reversed, as it tries to bootstrap and garbage collect to the best of its - // ability. - // NOTE: If you get rust mutex deadlocks, check your transactions around this method. - // This should be called before any transactions are made in release mode - // In tests, it should be outside any other transaction - in isolation. - // We cannot easily systematise this, since we aren't counting transactions created. + // Initialise the cluster and run bootstrap utilities + #[instrument(err, level = "debug", target = "surrealdb::core::kvs::ds", skip_all)] pub async fn bootstrap(&self) -> Result<(), Error> { - // First we clear unreachable state that could exist by upgrading from - // previous beta versions - trace!("Clearing unreachable state"); - let mut tx = self.transaction(Write, Optimistic).await?; - match self.clear_unreachable_state(&mut tx).await { - Ok(_) => tx.commit().await, - Err(e) => { - let msg = format!("Error clearing unreachable cluster state at bootstrap: {:?}", e); - error!(msg); - tx.cancel().await?; - Err(Error::Tx(msg)) - } - }?; - - trace!("Bootstrapping {}", self.id); - let mut tx = self.transaction(Write, Optimistic).await?; - let archived = match self.register_remove_and_archive(&mut tx, &self.id).await { - Ok(archived) => { - tx.commit().await?; - archived - } - Err(e) => { - error!("Error bootstrapping mark phase: {:?}", e); - tx.cancel().await?; - return Err(e); - } - }; - // Filtered includes all lqs that should be used in subsequent step - // Currently that is all of them, no matter the error encountered - let mut filtered: Vec = vec![]; - // err is used to aggregate all errors across all stages - let mut err = vec![]; - for res in archived { - match res { - (lq, Some(e)) => { - filtered.push(lq); - err.push(e); - } - (lq, None) => { - filtered.push(lq); - } - } - } - - let mut tx = self.transaction(Write, Optimistic).await?; - let val = self.remove_archived(&mut tx, filtered).await; - let resolve_err = match val { - Ok(_) => tx.commit().await, - Err(e) => { - error!("Error bootstrapping sweep phase: {:?}", e); - match tx.cancel().await { - Ok(_) => Err(e), - Err(e) => { - // We have a nested error - Err(Error::Tx(format!("Error bootstrapping sweep phase: {:?} and error cancelling transaction: {:?}", e, e))) - } - } - } - }; - if let Err(e) = resolve_err { - err.push(e); - } - if !err.is_empty() { - error!("Error bootstrapping sweep phase: {:?}", err); - return Err(Error::Tx(format!("Error bootstrapping sweep phase: {:?}", err))); - } + // Insert this node in the cluster + self.insert_node(self.id).await?; + // Mark expired nodes as archived + self.expire_nodes().await?; + // Everything ok Ok(()) } - // Node registration + "mark" stage of mark-and-sweep gc - pub async fn register_remove_and_archive( - &self, - tx: &mut Transaction, - node_id: &Uuid, - ) -> Result, Error> { - trace!("Registering node {}", node_id); - let timestamp = tx.clock().await; - self.register_membership(tx, node_id, timestamp).await?; - // Determine the timeout for when a cluster node is expired - let ts_expired = (×tamp - &sql::duration::Duration::from_secs(5))?; - let dead = self.remove_dead_nodes(tx, &ts_expired).await?; - trace!("Archiving dead nodes: {:?}", dead); - self.archive_dead_lqs(tx, &dead, node_id).await - } - - // Adds entries to the KV store indicating membership information - pub async fn register_membership( - &self, - tx: &mut Transaction, - node_id: &Uuid, - timestamp: Timestamp, - ) -> Result<(), Error> { - tx.set_nd(node_id.0).await?; - tx.set_hb(timestamp, node_id.0).await?; - Ok(()) - } - - /// Delete dead heartbeats and nodes - /// Returns node IDs - pub async fn remove_dead_nodes( - &self, - tx: &mut Transaction, - ts: &Timestamp, - ) -> Result, Error> { - let hbs = self.delete_dead_heartbeats(tx, ts).await?; - trace!("Found {} expired heartbeats", hbs.len()); - let mut nodes = vec![]; - for hb in hbs { - trace!("Deleting node {}", &hb.nd); - // TODO should be delr in case of nested entries - tx.del_nd(hb.nd).await?; - nodes.push(crate::sql::uuid::Uuid::from(hb.nd)); + /// Setup the initial cluster access credentials + #[instrument(err, level = "debug", target = "surrealdb::core::kvs::ds", skip_all)] + pub async fn setup_initial_creds(&self, user: &str, pass: &str) -> Result<(), Error> { + // Start a new writeable transaction + let txn = self.transaction(Write, Optimistic).await?.enclose(); + // Fetch the root users from the storage + let users = catch!(txn, txn.all_root_users()); + // Process credentials, depending on existing users + if users.is_empty() { + // Display information in the logs + info!(target: TARGET, "Credentials were provided, and no root users were found. The root user '{user}' will be created"); + // Create and new root user definition + let stm = DefineUserStatement::from((Base::Root, user, pass, INITIAL_USER_ROLE)); + let opt = Options::new().with_auth(Arc::new(Auth::for_root(Role::Owner))); + let ctx = Context::default().with_transaction(txn.clone()); + catch!(txn, stm.compute(&ctx, &opt, None)); + // We added a user, so commit the transaction + txn.commit().await + } else { + // Display information in the logs + warn!(target: TARGET, "Credentials were provided, but existing root users were found. The root user '{user}' will not be created"); + warn!(target: TARGET, "Consider removing the --user and --pass arguments from the server start command"); + // We didn't write anything, so just rollback + txn.cancel().await } - Ok(nodes) - } - - /// Accepts cluster IDs - /// Archives related live queries - /// Returns live query keys that can be used for deletes - /// - /// The reason we archive first is to stop other nodes from picking it up for further updates - /// This means it will be easier to wipe the range in a subsequent transaction - pub async fn archive_dead_lqs( - &self, - tx: &mut Transaction, - nodes: &[Uuid], - this_node_id: &Uuid, - ) -> Result, Error> { - let mut archived = vec![]; - for nd in nodes.iter() { - trace!("Archiving node {}", &nd); - // Scan on node prefix for LQ space - let node_lqs = tx.scan_ndlq(nd, NON_PAGED_BATCH_SIZE).await?; - trace!("Found {} LQ entries for {:?}", node_lqs.len(), nd); - for lq in node_lqs { - trace!("Archiving query {:?}", &lq); - let node_archived_lqs = - match self.archive_lv_for_node(tx, &lq.nd, *this_node_id).await { - Ok(lq) => lq, - Err(e) => { - error!("Error archiving lqs during bootstrap phase: {:?}", e); - vec![] - } - }; - // We need to add lv nodes not found so that they can be deleted in second stage - for lq_value in node_archived_lqs { - archived.push(lq_value); - } - } - } - Ok(archived) - } - - pub async fn remove_archived( - &self, - tx: &mut Transaction, - archived: Vec, - ) -> Result<(), Error> { - trace!("Gone into removing archived: {:?}", archived.len()); - for lq in archived { - // Delete the cluster key, used for finding LQ associated with a node - let key = crate::key::node::lq::new(lq.nd.0, lq.lq.0, &lq.ns, &lq.db); - tx.del(key).await?; - // Delete the table key, used for finding LQ associated with a table - let key = crate::key::table::lq::new(&lq.ns, &lq.db, &lq.tb, lq.lq.0); - tx.del(key).await?; - } - Ok(()) - } - - pub async fn clear_unreachable_state(&self, tx: &mut Transaction) -> Result<(), Error> { - // Scan nodes - let cluster = tx.scan_nd(NON_PAGED_BATCH_SIZE).await?; - trace!("Found {} nodes", cluster.len()); - let mut unreachable_nodes = BTreeMap::new(); - for cl in &cluster { - unreachable_nodes.insert(cl.name.clone(), cl.clone()); - } - // Scan all heartbeats - let end_of_time = Timestamp { - // We remove one, because the scan range adds one - value: u64::MAX - 1, - }; - let hbs = tx.scan_hb(&end_of_time, NON_PAGED_BATCH_SIZE).await?; - trace!("Found {} heartbeats", hbs.len()); - for hb in hbs { - match unreachable_nodes.remove(&hb.nd.to_string()) { - None => { - // Didnt exist in cluster and should be deleted - tx.del_hb(hb.hb, hb.nd).await?; - } - Some(_) => {} - } - } - // Remove unreachable nodes - for (_, cl) in unreachable_nodes { - trace!("Removing unreachable node {}", cl.name); - tx.del_nd( - uuid::Uuid::parse_str(&cl.name).map_err(|e| { - Error::Unimplemented(format!("cluster id was not uuid: {:?}", e)) - })?, - ) - .await?; - } - // Scan node live queries for every node - let mut nd_lq_set: BTreeSet = BTreeSet::new(); - for cl in &cluster { - let nds = tx.scan_ndlq(&uuid::Uuid::parse_str(&cl.name).map_err(|e| { - Error::Unimplemented(format!("cluster id was not uuid when parsing to aggregate cluster live queries: {:?}", e)) - })?, NON_PAGED_BATCH_SIZE).await?; - nd_lq_set.extend(nds.into_iter().map(UnreachableLqType::Nd)); - } - trace!("Found {} node live queries", nd_lq_set.len()); - // Scan tables for all live queries - // let mut tb_lqs: Vec = vec![]; - let mut tb_lq_set: BTreeSet = BTreeSet::new(); - for ndlq in &nd_lq_set { - let lq = ndlq.get_inner(); - let tbs = tx.scan_tblq(&lq.ns, &lq.db, &lq.tb, NON_PAGED_BATCH_SIZE).await?; - tb_lq_set.extend(tbs.into_iter().map(UnreachableLqType::Tb)); - } - trace!("Found {} table live queries", tb_lq_set.len()); - // Find and delete missing - for missing in nd_lq_set.symmetric_difference(&tb_lq_set) { - match missing { - UnreachableLqType::Nd(ndlq) => { - warn!("Deleting ndlq {:?}", &ndlq); - tx.del_ndlq(ndlq.nd.0, ndlq.lq.0, &ndlq.ns, &ndlq.db).await?; - } - UnreachableLqType::Tb(tblq) => { - warn!("Deleting tblq {:?}", &tblq); - tx.del_tblq(&tblq.ns, &tblq.db, &tblq.tb, tblq.lq.0).await?; - } - } - } - trace!("Successfully cleared cluster of unreachable state"); - Ok(()) - } - - // Garbage collection task to run when a client disconnects from a surrealdb node - // i.e. we know the node, we are not performing a full wipe on the node - // and the wipe must be fully performed by this node - pub async fn garbage_collect_dead_session( - &self, - live_queries: &[uuid::Uuid], - ) -> Result<(), Error> { - let mut tx = self.transaction(Write, Optimistic).await?; - - // Find all the LQs we own, so that we can get the ns/ds from provided uuids - // We may improve this in future by tracking in web layer - let lqs = tx.scan_ndlq(&self.id, NON_PAGED_BATCH_SIZE).await?; - let mut hits = vec![]; - for lq_value in lqs { - if live_queries.contains(&lq_value.lq) { - hits.push(lq_value.clone()); - let lq = crate::key::node::lq::Lq::new( - lq_value.nd.0, - lq_value.lq.0, - lq_value.ns.as_str(), - lq_value.db.as_str(), - ); - tx.del(lq).await?; - trace!("Deleted lq {:?} as part of session garbage collection", lq_value.clone()); - } - } - - // Now delete the table entries for the live queries - for lq in hits { - let lv = - crate::key::table::lq::new(lq.ns.as_str(), lq.db.as_str(), lq.tb.as_str(), lq.lq.0); - tx.del(lv.clone()).await?; - trace!("Deleted lv {:?} as part of session garbage collection", lv); - } - tx.commit().await - } - - // Returns a list of live query IDs - pub async fn archive_lv_for_node( - &self, - tx: &mut Transaction, - nd: &Uuid, - this_node_id: Uuid, - ) -> Result, Error> { - let lqs = tx.all_lq(nd).await?; - trace!("Archiving lqs and found {} LQ entries for {}", lqs.len(), nd); - let mut ret: Vec = vec![]; - for lq in lqs { - let lv_res = - tx.get_tb_live(lq.ns.as_str(), lq.db.as_str(), lq.tb.as_str(), &lq.lq).await; - if let Err(e) = lv_res { - error!("Error getting live query for node {}: {:?}", nd, e); - ret.push((lq, Some(e))); - continue; - } - let lv = lv_res.unwrap(); - let archived_lvs = lv.clone().archive(this_node_id); - tx.putc_tblq(&lq.ns, &lq.db, &lq.tb, archived_lvs, Some(lv)).await?; - ret.push((lq, None)); - } - Ok(ret) - } - - /// Given a timestamp, delete all the heartbeats that have expired - /// Return the removed heartbeats as they will contain node information - pub async fn delete_dead_heartbeats( - &self, - tx: &mut Transaction, - ts: &Timestamp, - ) -> Result, Error> { - let dead = tx.scan_hb(ts, HEARTBEAT_BATCH_SIZE).await?; - // Delete the heartbeat and everything nested - tx.delr_hb(dead.clone(), NON_PAGED_BATCH_SIZE).await?; - for dead_node in dead.clone() { - tx.del_nd(dead_node.nd).await?; - } - Ok::, Error>(dead) } // tick is called periodically to perform maintenance tasks. // This is called every TICK_INTERVAL. + #[instrument(level = "debug", skip(self))] pub async fn tick(&self) -> Result<(), Error> { let now = SystemTime::now().duration_since(UNIX_EPOCH).map_err(|e| { Error::Internal(format!("Clock may have gone backwards: {:?}", e.duration())) @@ -827,12 +443,20 @@ impl Datastore { // tick_at is the utility function that is called by tick. // It is handy for testing, because it allows you to specify the timestamp, // without depending on a system clock. + #[instrument(level = "debug", skip(self))] pub async fn tick_at(&self, ts: u64) -> Result<(), Error> { - trace!("Ticking at timestamp {} ({:?})", ts, conv::u64_to_versionstamp(ts)); + trace!(target: TARGET, "Ticking at timestamp {ts} ({:?})", conv::u64_to_versionstamp(ts)); let _vs = self.save_timestamp_for_versionstamp(ts).await?; self.garbage_collect_stale_change_feeds(ts).await?; - // TODO Add LQ GC - // TODO Add Node GC? + // Update this node in the cluster + self.update_node(self.id).await?; + // Mark expired nodes as archived + self.expire_nodes().await?; + // Cleanup expired nodes data + self.cleanup_nodes().await?; + // Garbage collect other data + self.garbage_collect().await?; + // Everything ok Ok(()) } @@ -842,8 +466,8 @@ impl Datastore { &self, ts: u64, ) -> Result, Error> { - let mut tx = self.transaction(Write, Optimistic).await?; - match self.save_timestamp_for_versionstamp_impl(ts, &mut tx).await { + let tx = self.transaction(Write, Optimistic).await?; + match self.save_timestamp_for_versionstamp_impl(ts, &tx).await { Ok(vs) => Ok(vs), Err(e) => { match tx.cancel().await { @@ -858,41 +482,10 @@ impl Datastore { } } - /// Poll change feeds for live query notifications - pub async fn process_lq_notifications( - &self, - stk: &mut Stk, - ctx: &Context<'_>, - opt: &Options, - ) -> Result<(), Error> { - process_lq_notifications(self, ctx, stk, opt).await - } - - /// Add and kill live queries being track on the datastore - /// These get polled by the change feed tick - pub(crate) async fn handle_postprocessing_of_statements( - &self, - lqs: &Vec, - ) -> Result<(), Error> { - // Lock the local live queries - let mut lq_cf_store = self.lq_cf_store.write().await; - for lq in lqs { - match lq { - TrackedResult::LiveQuery(lq) => { - lq_cf_store.register_live_query(lq, Versionstamp::default()).unwrap(); - } - TrackedResult::KillQuery(kill_entry) => { - lq_cf_store.unregister_live_query(kill_entry); - } - } - } - Ok(()) - } - async fn save_timestamp_for_versionstamp_impl( &self, ts: u64, - tx: &mut Transaction, + tx: &Transaction, ) -> Result, Error> { let mut vs: Option = None; let nses = tx.all_ns().await?; @@ -904,7 +497,7 @@ impl Datastore { for db in dbs { let db = db.name.as_str(); // TODO(SUR-341): This is incorrect, it's a [ns,db] to vs pair - vs = Some(tx.set_timestamp_for_versionstamp(ts, ns, db, true).await?); + vs = Some(tx.lock().await.set_timestamp_for_versionstamp(ts, ns, db).await?); } } tx.commit().await?; @@ -913,8 +506,8 @@ impl Datastore { // garbage_collect_stale_change_feeds deletes all change feed entries that are older than the watermarks. pub(crate) async fn garbage_collect_stale_change_feeds(&self, ts: u64) -> Result<(), Error> { - let mut tx = self.transaction(Write, Optimistic).await?; - if let Err(e) = self.garbage_collect_stale_change_feeds_impl(ts, &mut tx).await { + let tx = self.transaction(Write, Optimistic).await?; + if let Err(e) = self.garbage_collect_stale_change_feeds_impl(&tx, ts).await { return match tx.cancel().await { Ok(_) => { Err(e) @@ -929,42 +522,14 @@ impl Datastore { async fn garbage_collect_stale_change_feeds_impl( &self, + tx: &Transaction, ts: u64, - tx: &mut Transaction, ) -> Result<(), Error> { - // TODO Make gc batch size/limit configurable? - cf::gc_all_at(tx, ts, Some(100)).await?; + cf::gc_all_at(tx, ts).await?; tx.commit().await?; Ok(()) } - // Creates a heartbeat entry for the member indicating to the cluster - // that the node is alive. - // This is the preferred way of creating heartbeats inside the database, so try to use this. - pub async fn heartbeat(&self) -> Result<(), Error> { - let mut tx = self.transaction(Write, Optimistic).await?; - let timestamp = tx.clock().await; - self.heartbeat_full(&mut tx, timestamp, self.id).await?; - tx.commit().await - } - - // Creates a heartbeat entry for the member indicating to the cluster - // that the node is alive. Intended for testing. - // This includes all dependencies that are hard to control and is done in such a way for testing. - // Inside the database, try to use the heartbeat() function instead. - pub async fn heartbeat_full( - &self, - tx: &mut Transaction, - timestamp: Timestamp, - node_id: Uuid, - ) -> Result<(), Error> { - tx.set_hb(timestamp, node_id.0).await - } - - // ----- - // End cluster helpers, storage functions here - // ----- - /// Create a new transaction on this datastore /// /// ```rust,no_run @@ -985,66 +550,60 @@ impl Datastore { write: TransactionType, lock: LockType, ) -> Result { + // Specify if the transaction is writeable #[allow(unused_variables)] let write = match write { Read => false, Write => true, }; - + // Specify if the transaction is lockable #[allow(unused_variables)] let lock = match lock { Pessimistic => true, Optimistic => false, }; - + // Create a new transaction on the datastore #[allow(unused_variables)] let inner = match &self.inner { #[cfg(feature = "kv-mem")] Inner::Mem(v) => { let tx = v.transaction(write, lock).await?; - super::tx::Inner::Mem(tx) + super::tr::Inner::Mem(tx) } #[cfg(feature = "kv-rocksdb")] Inner::RocksDB(v) => { let tx = v.transaction(write, lock).await?; - super::tx::Inner::RocksDB(tx) + super::tr::Inner::RocksDB(tx) } #[cfg(feature = "kv-indxdb")] Inner::IndxDB(v) => { let tx = v.transaction(write, lock).await?; - super::tx::Inner::IndxDB(tx) + super::tr::Inner::IndxDB(tx) } #[cfg(feature = "kv-tikv")] Inner::TiKV(v) => { let tx = v.transaction(write, lock).await?; - super::tx::Inner::TiKV(tx) + super::tr::Inner::TiKV(tx) } #[cfg(feature = "kv-fdb")] Inner::FoundationDB(v) => { let tx = v.transaction(write, lock).await?; - super::tx::Inner::FoundationDB(tx) + super::tr::Inner::FoundationDB(tx) } #[cfg(feature = "kv-surrealkv")] Inner::SurrealKV(v) => { let tx = v.transaction(write, lock).await?; - super::tx::Inner::SurrealKV(tx) + super::tr::Inner::SurrealKV(tx) } #[allow(unreachable_patterns)] _ => unreachable!(), }; - - let (send, recv): (Sender, Receiver) = - channel::bounded(LQ_CHANNEL_SIZE); - - Ok(Transaction { + Ok(Transaction::new(Transactor { inner, - cache: super::cache::Cache::default(), + stash: super::stash::Stash::default(), cf: cf::Writer::new(), - vso: self.versionstamp_oracle.clone(), clock: self.clock.clone(), - prepared_async_events: (Arc::new(send), Arc::new(recv)), - engine_options: self.engine_options, - }) + })) } /// Parse and execute an SQL query @@ -1116,7 +675,7 @@ impl Datastore { } // Create a new query options let opt = Options::default() - .with_id(self.id.0) + .with_id(self.id) .with_ns(sess.ns()) .with_db(sess.db()) .with_live(sess.live()) @@ -1148,15 +707,7 @@ impl Datastore { // Store the query variables let ctx = vars.attach(ctx)?; // Process all statements - let res = exe.execute(ctx, opt, ast).await; - match res { - Ok((responses, lives)) => { - // Register live queries - self.handle_postprocessing_of_statements(&lives).await?; - Ok(responses) - } - Err(e) => Err(e), - } + exe.execute(ctx, opt, ast).await } /// Ensure a SQL [`Value`] is fully computed @@ -1188,9 +739,6 @@ impl Datastore { if sess.expired() { return Err(Error::ExpiredSession); } - - let mut stack = TreeStack::new(); - // Check if anonymous actors can compute values when auth is enabled // TODO(sgirones): Check this as part of the authorisation layer if sess.au.is_anon() && self.auth_enabled && !self.capabilities.allows_guest_access() { @@ -1201,9 +749,11 @@ impl Datastore { } .into()); } + // Create a new memory stack + let mut stack = TreeStack::new(); // Create a new query options let opt = Options::default() - .with_id(self.id.0) + .with_id(self.id) .with_ns(sess.ns()) .with_db(sess.db()) .with_live(sess.live()) @@ -1228,16 +778,16 @@ impl Datastore { let ctx = vars.attach(ctx)?; // Start a new transaction let txn = self.transaction(val.writeable().into(), Optimistic).await?.enclose(); - // Set the context transaction - let ctx = ctx.set_transaction(txn.clone()); + // Store the transaction + let ctx = ctx.with_transaction(txn.clone()); // Compute the value let res = stack.enter(|stk| val.compute(stk, &ctx, &opt, None)).finish().await; // Store any data match (res.is_ok(), val.writeable()) { // If the compute was successful, then commit if writeable - (true, true) => txn.lock().await.commit().await?, + (true, true) => txn.commit().await?, // Cancel if the compute was an error, or if readonly - (_, _) => txn.lock().await.cancel().await?, + (_, _) => txn.cancel().await?, }; // Return result res @@ -1276,11 +826,11 @@ impl Datastore { if sess.expired() { return Err(Error::ExpiredSession); } - + // Create a new memory stack let mut stack = TreeStack::new(); // Create a new query options let opt = Options::default() - .with_id(self.id.0) + .with_id(self.id) .with_ns(sess.ns()) .with_db(sess.db()) .with_live(sess.live()) @@ -1305,16 +855,16 @@ impl Datastore { let ctx = vars.attach(ctx)?; // Start a new transaction let txn = self.transaction(val.writeable().into(), Optimistic).await?.enclose(); - let ctx = ctx.set_transaction(txn.clone()); - + // Store the transaction + let ctx = ctx.with_transaction(txn.clone()); // Compute the value let res = stack.enter(|stk| val.compute(stk, &ctx, &opt, None)).finish().await; // Store any data match (res.is_ok(), val.writeable()) { // If the compute was successful, then commit if writeable - (true, true) => txn.lock().await.commit().await?, + (true, true) => txn.commit().await?, // Cancel if the compute was an error, or if readonly - (_, _) => txn.lock().await.cancel().await?, + (_, _) => txn.cancel().await?, }; // Return result res @@ -1345,14 +895,18 @@ impl Datastore { } /// Performs a database import from SQL - #[instrument(level = "debug", skip(self, sess, sql))] + #[instrument(level = "debug", skip_all)] pub async fn import(&self, sql: &str, sess: &Session) -> Result, Error> { + // Check if the session has expired + if sess.expired() { + return Err(Error::ExpiredSession); + } // Execute the SQL import self.execute(sql, sess, None).await } /// Performs a full database export as SQL - #[instrument(level = "debug", skip(self, sess, chn))] + #[instrument(level = "debug", skip_all)] pub async fn export( &self, sess: &Session, @@ -1365,7 +919,7 @@ impl Datastore { // Retrieve the provided NS and DB let (ns, db) = crate::iam::check::check_ns_db(sess)?; // Create a new readonly transaction - let mut txn = self.transaction(Read, Optimistic).await?; + let txn = self.transaction(Read, Optimistic).await?; // Return an async export job Ok(async move { // Process the export @@ -1421,7 +975,7 @@ mod test { let dbs = Datastore::new("memory").await.unwrap().with_capabilities(Capabilities::all()); let opt = Options::default() - .with_id(dbs.id.0) + .with_id(dbs.id) .with_ns(Some("test".into())) .with_db(Some("test".into())) .with_live(false) @@ -1435,8 +989,9 @@ mod test { // Set context capabilities ctx.add_capabilities(dbs.capabilities.clone()); // Start a new transaction - let txn = dbs.transaction(val.writeable().into(), Optimistic).await?.enclose(); - let ctx = ctx.set_transaction(txn); + let txn = dbs.transaction(val.writeable().into(), Optimistic).await?; + // Store the transaction + let ctx = ctx.with_transaction(txn.enclose()); // Compute the value let mut stack = reblessive::tree::TreeStack::new(); let res = stack.enter(|stk| val.compute(stk, &ctx, &opt, None)).finish().await.unwrap(); diff --git a/core/src/kvs/export.rs b/core/src/kvs/export.rs new file mode 100644 index 00000000..1940674c --- /dev/null +++ b/core/src/kvs/export.rs @@ -0,0 +1,208 @@ +use super::Transaction; +use crate::cnf::EXPORT_BATCH_SIZE; +use crate::err::Error; +use crate::sql::paths::EDGE; +use crate::sql::paths::IN; +use crate::sql::paths::OUT; +use crate::sql::Value; +use channel::Sender; + +impl Transaction { + /// Writes the full database contents as binary SQL. + pub async fn export(&self, ns: &str, db: &str, chn: Sender>) -> Result<(), Error> { + // Output OPTIONS + { + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("-- OPTION")).await?; + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("")).await?; + chn.send(bytes!("OPTION IMPORT;")).await?; + chn.send(bytes!("")).await?; + } + // Output USERS + { + let dus = self.all_db_users(ns, db).await?; + if !dus.is_empty() { + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("-- USERS")).await?; + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("")).await?; + for us in dus.iter() { + chn.send(bytes!(format!("{us};"))).await?; + } + chn.send(bytes!("")).await?; + } + } + // Output ACCESSES + { + let dts = self.all_db_accesses(ns, db).await?; + if !dts.is_empty() { + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("-- ACCESSES")).await?; + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("")).await?; + for dt in dts.iter() { + chn.send(bytes!(format!("{dt};"))).await?; + } + chn.send(bytes!("")).await?; + } + } + // Output PARAMS + { + let pas = self.all_db_params(ns, db).await?; + if !pas.is_empty() { + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("-- PARAMS")).await?; + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("")).await?; + for pa in pas.iter() { + chn.send(bytes!(format!("{pa};"))).await?; + } + chn.send(bytes!("")).await?; + } + } + // Output FUNCTIONS + { + let fcs = self.all_db_functions(ns, db).await?; + if !fcs.is_empty() { + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("-- FUNCTIONS")).await?; + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("")).await?; + for fc in fcs.iter() { + chn.send(bytes!(format!("{fc};"))).await?; + } + chn.send(bytes!("")).await?; + } + } + // Output ANALYZERS + { + let azs = self.all_db_analyzers(ns, db).await?; + if !azs.is_empty() { + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("-- ANALYZERS")).await?; + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("")).await?; + for az in azs.iter() { + chn.send(bytes!(format!("{az};"))).await?; + } + chn.send(bytes!("")).await?; + } + } + // Output TABLES + { + let tbs = self.all_tb(ns, db).await?; + if !tbs.is_empty() { + for tb in tbs.iter() { + // Output TABLE + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!(format!("-- TABLE: {}", tb.name))).await?; + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("")).await?; + chn.send(bytes!(format!("{tb};"))).await?; + chn.send(bytes!("")).await?; + // Output FIELDS + let fds = self.all_tb_fields(ns, db, &tb.name).await?; + if !fds.is_empty() { + for fd in fds.iter() { + chn.send(bytes!(format!("{fd};"))).await?; + } + chn.send(bytes!("")).await?; + } + // Output INDEXES + let ixs = self.all_tb_indexes(ns, db, &tb.name).await?; + if !ixs.is_empty() { + for ix in ixs.iter() { + chn.send(bytes!(format!("{ix};"))).await?; + } + chn.send(bytes!("")).await?; + } + // Output EVENTS + let evs = self.all_tb_events(ns, db, &tb.name).await?; + if !evs.is_empty() { + for ev in evs.iter() { + chn.send(bytes!(format!("{ev};"))).await?; + } + chn.send(bytes!("")).await?; + } + } + // Start transaction + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("-- TRANSACTION")).await?; + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("")).await?; + chn.send(bytes!("BEGIN TRANSACTION;")).await?; + chn.send(bytes!("")).await?; + // Records to be exported, categorised by the type of INSERT statement + let mut records_normal: Vec = + Vec::with_capacity(*EXPORT_BATCH_SIZE as usize); + let mut records_relate: Vec = + Vec::with_capacity(*EXPORT_BATCH_SIZE as usize); + // Output TABLE data + for tb in tbs.iter() { + // Start records + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!(format!("-- TABLE DATA: {}", tb.name))).await?; + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("")).await?; + // Fetch records + let beg = crate::key::thing::prefix(ns, db, &tb.name); + let end = crate::key::thing::suffix(ns, db, &tb.name); + let mut next = Some(beg..end); + while let Some(rng) = next { + // Get the next batch of records + let batch = self.batch(rng, *EXPORT_BATCH_SIZE, true).await?; + // Set the next scan range + next = batch.next; + // Check there are records + if batch.values.is_empty() { + break; + } + // Categorize the record types + for (_, v) in batch.values.into_iter() { + // Parse the key and the value + let v: Value = (&v).into(); + // Check if this is a graph edge + match (v.pick(&*EDGE), v.pick(&*IN), v.pick(&*OUT)) { + // This is a graph edge record + (Value::Bool(true), Value::Thing(_), Value::Thing(_)) => { + records_relate.push(v.to_string()); + } + // This is a normal record + _ => { + records_normal.push(v.to_string()); + } + } + } + // Add batches of INSERT statements + if !records_normal.is_empty() { + let values = records_normal.join(", "); + let sql = format!("INSERT [ {values} ];"); + chn.send(bytes!(sql)).await?; + records_normal.clear(); + } + // Add batches of INSERT RELATION statements + if !records_relate.is_empty() { + let values = records_relate.join(", "); + let sql = format!("INSERT RELATION [ {values} ];"); + chn.send(bytes!(sql)).await?; + records_relate.clear() + } + // Fetch more records + continue; + } + chn.send(bytes!("")).await?; + } + // Commit transaction + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("-- TRANSACTION")).await?; + chn.send(bytes!("-- ------------------------------")).await?; + chn.send(bytes!("")).await?; + chn.send(bytes!("COMMIT TRANSACTION;")).await?; + chn.send(bytes!("")).await?; + } + } + // Everything exported + Ok(()) + } +} diff --git a/core/src/kvs/fdb/mod.rs b/core/src/kvs/fdb/mod.rs index 3f1bb95c..1e2971eb 100644 --- a/core/src/kvs/fdb/mod.rs +++ b/core/src/kvs/fdb/mod.rs @@ -6,31 +6,32 @@ use crate::err::Error; use crate::kvs::Check; use crate::kvs::Key; use crate::kvs::Val; -use crate::vs::{u64_to_versionstamp, Versionstamp}; +use crate::vs::Versionstamp; use foundationdb::options::DatabaseOption; -use futures::TryStreamExt; +use foundationdb::options::MutationType; +use foundationdb::Database; +use foundationdb::RangeOption; +use foundationdb::Transaction as Tx; +use futures::StreamExt; +use once_cell::sync::Lazy; +use std::fmt::Debug; use std::ops::Range; use std::sync::Arc; -// We use it to work-around the fact that foundationdb-rs' Transaction -// have incompatible lifetimes for the cancel and the commit methods. -// More concretely, fdb-rs's cancel/commit takes the receiver as just `self`, -// which result in it moves and drops the receiver on the function call, -// which results in a compile error on cancel/commit that takes the self as `&mut self` which doesn't drop -// self or the fdb-rs Transaction it contains. -// -// We use mutex from the futures crate instead of the std's due to https://rust-lang.github.io/wg-async/vision/submitted_stories/status_quo/alan_thinks_he_needs_async_locks.html. -use crate::key::error::KeyCategory; -use foundationdb::options::MutationType; -use futures::lock::Mutex; -use once_cell::sync::Lazy; -// In case you're curious why FDB store doesn't work as you've expected, -// run a few queries via surrealdb-sql or via the REST API, and -// run the following command to what have been saved to FDB: -// fdbcli --exec 'getrangekeys \x00 \xff' +const TIMESTAMP: [u8; 10] = [0x00; 10]; + #[non_exhaustive] pub struct Datastore { - db: foundationdb::Database, + db: Database, + // The Database stored above, relies on the + // foundationdb network being booted before + // the client can be used. The return result + // of the foundationdb::boot method is a + // handle which must be dropped before the + // program exits. This handle is stored on + // the database so that it is held for the + // duration of the programme. This pointer must + // be declared last, so that it is dropped last. _fdbnet: Arc, } @@ -45,7 +46,7 @@ pub struct Transaction { /// Should we check unhandled transactions? check: Check, /// The underlying datastore transaction - inner: Arc>>, + inner: Option, } impl Drop for Transaction { @@ -81,15 +82,19 @@ impl Drop for Transaction { impl Datastore { /// Open a new database /// - /// path must be an empty string or a local file path to a FDB cluster file. - /// An empty string results in using the default cluster file placed - /// at a system-dependent location defined by FDB. - /// See https://apple.github.io/foundationdb/administration.html#default-cluster-file for more information on that. + /// The `path` argument can be a local file path to a FoundationDB + /// cluster file, or an empty string. If specified as an empty + /// string, then the default cluster file placed at a system + /// dependent location (defined by FoundationDB) will be used. + /// See https://apple.github.io/foundationdb/administration.html + /// for more information on cluster connection files. pub(crate) async fn new(path: &str) -> Result { + // Initialize the FoundationDB Client API static FDBNET: Lazy> = Lazy::new(|| Arc::new(unsafe { foundationdb::boot() })); + // Store the network cancellation handle let _fdbnet = (*FDBNET).clone(); - + // Configure and setup the database match foundationdb::Database::from_path(path) { Ok(db) => { // Set the transaction timeout @@ -128,10 +133,10 @@ impl Datastore { match self.db.create_trx() { Ok(inner) => Ok(Transaction { done: false, + lock, check, write, - lock, - inner: Arc::new(Mutex::new(Some(inner))), + inner: Some(inner), }), Err(e) => Err(Error::Tx(e.to_string())), } @@ -139,27 +144,39 @@ impl Datastore { } impl Transaction { - /// Behaviour if unclosed - pub(crate) fn check_level(&mut self, check: Check) { - self.check = check; - } - /// Check if closed - pub(crate) fn closed(&self) -> bool { - self.done - } - /// We use lock=true to enable the tikv's own pessimistic inner (https://docs.pingcap.com/tidb/v4.0/pessimistic-transaction) - /// for tikv kvs. - /// FDB's standard transaction(snapshot=false) behaves like a tikv perssimistic inner - /// by automatically retrying on conflict at the fdb client layer. - /// So in fdb kvs we assume that lock=true is basically a request to - /// use the standard fdb inner to make transactions Serializable. - /// In case the inner is write, we assume the user never wants to lose serializability - /// so we go with the standard fdb serializable inner in that case too. + /// Each transaction uses `lock=true` to behave similarly to pessimistic + /// locks in the same way that pessimistic transactions work in TiKV. + /// Standard transactions in FoundationDB (where `snapshot=false`) behave + /// behaves like a TiKV pessimistic transaction, by automatically retrying + /// on commit conflicts at the client layer. In FoundationDB we assume + /// that `lock=true` is effectively specifying that we should ensure + /// transactions are serializable. If the transaction is writeable, we also + /// assume that the user never wants to lose serializability, so we go with + /// the standard FoundationDB serializable more in that scenario. + #[inline(always)] fn snapshot(&self) -> bool { !self.write && !self.lock } +} + +impl super::api::Transaction for Transaction { + /// Behaviour if unclosed + fn check_level(&mut self, check: Check) { + self.check = check; + } + + /// Check if closed + fn closed(&self) -> bool { + self.done + } + + /// Check if writeable + fn writeable(&self) -> bool { + self.write + } + /// Cancel a transaction - pub(crate) async fn cancel(&mut self) -> Result<(), Error> { + async fn cancel(&mut self) -> Result<(), Error> { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); @@ -167,25 +184,16 @@ impl Transaction { // Mark this transaction as done self.done = true; // Cancel this transaction - // - // To overcome the limitation in the rust fdb client that - // it's `cancel` and `commit` methods require you to move the - // whole inner object to the method, we wrap it inside a Arc>> - // so that we can atomically `take` the inner out of the container and - // replace it with the new `reset`ed inner. - let inner = match self.inner.lock().await.take() { - Some(inner) => { - let tc = inner.cancel(); - tc.reset() - } - _ => return Err(Error::Ds("Unexpected error".to_string())), + match self.inner.take() { + Some(inner) => inner.cancel().reset(), + None => unreachable!(), }; - self.inner = Arc::new(Mutex::new(Some(inner))); // Continue Ok(()) } + /// Commit a transaction - pub(crate) async fn commit(&mut self) -> Result<(), Error> { + async fn commit(&mut self) -> Result<(), Error> { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); @@ -196,217 +204,56 @@ impl Transaction { } // Mark this transaction as done self.done = true; - // Cancel this transaction - // - // To overcome the limitation in the rust fdb client that - // it's `cancel` and `commit` methods require you to move the - // whole inner object to the method, we wrap it inside a Arc>> - // so that we can atomically `take` the inner out of the container and - // replace it with the new `reset`ed inner. - let r = match self.inner.lock().await.take() { - Some(inner) => inner.commit().await, - _ => return Err(Error::Ds("Unexpected error".to_string())), + // Commit this transaction + match self.inner.take() { + Some(inner) => inner.commit().await?, + None => unreachable!(), }; - match r { - Ok(_r) => {} - Err(e) => { - return Err(Error::Tx(format!("Transaction commit error: {}", e))); - } - } // Continue Ok(()) } + /// Check if a key exists - pub(crate) async fn exi(&mut self, key: K) -> Result + async fn exists(&mut self, key: K) -> Result where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); } // Check the key - let key: Vec = key.into(); - let key: &[u8] = &key[..]; - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - // Assuming the `lock` argument passed to the datastore creation function - // is meant for conducting a pessimistic lock on the underlying kv store to - // make the transaction serializable, we use the inverse of it to enable the snapshot isolation - // on the get request. - // See https://apple.github.io/foundationdb/api-c.html#snapshot-reads for more information on how the snapshot get is supposed to work in FDB. - inner - .get(key, self.snapshot()) - .await - .map(|v| v.is_some()) - .map_err(|e| Error::Tx(format!("Unable to get kv from FoundationDB: {}", e))) - } - /// Fetch a key from the database - pub(crate) async fn get(&mut self, key: K) -> Result, Error> - where - K: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Get the key - let key: Vec = key.into(); - let key = &key[..]; - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - // Assuming the `lock` argument passed to the datastore creation function - // is meant for conducting a pessimistic lock on the underlying kv store to - // make the transaction serializable, we use the inverse of it to enable the snapshot isolation - // on the get request. - // See https://apple.github.io/foundationdb/api-c.html#snapshot-reads for more information on how the snapshot get is supposed to work in FDB. - inner - .get(key, self.snapshot()) - .await - .map(|v| v.as_ref().map(|v| v.to_vec())) - .map_err(|e| Error::Tx(format!("Unable to get kv from FoundationDB: {}", e))) - } - /// Obtain a new change timestamp for a key - /// which is replaced with the current timestamp when the transaction is committed. - /// NOTE: This should be called when composing the change feed entries for this transaction, - /// which should be done immediately before the transaction commit. - /// That is to keep other transactions commit delay(pessimistic) or conflict(optimistic) as less as possible. - #[allow(unused)] - pub(crate) async fn get_timestamp(&mut self) -> Result { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - let res = inner - .get_read_version() - .await - .map_err(|e| Error::Tx(format!("Unable to get read version from FDB: {}", e)))?; - let res: u64 = res.try_into().unwrap(); - let res = u64_to_versionstamp(res); - - // Return the uint64 representation of the timestamp as the result + let res = self.inner.as_ref().unwrap().get(&key.into(), self.snapshot()).await?.is_some(); + // Return result Ok(res) } - /// Inserts or update a key in the database - pub(crate) async fn set(&mut self, key: K, val: V) -> Result<(), Error> + + /// Fetch a key from the database + async fn get(&mut self, key: K) -> Result, Error> where - K: Into, - V: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); } - // Check to see if transaction is writable - if !self.write { - return Err(Error::TxReadonly); - } - // Set the key - let key: Vec = key.into(); - let key = &key[..]; - let val: Vec = val.into(); - let val = &val[..]; - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - inner.set(key, val); - // Return result - Ok(()) - } - /// Insert a key if it doesn't exist in the database - /// - /// This function is used when the client sent a CREATE query, - /// where the key is derived from namespace, database, table name, - /// and either an auto-generated record ID or a the record ID specified by the client - /// after the colon in the CREATE query's first argument. - /// - /// Suppose you've sent a query like `CREATE author:john SET ...` with - /// the namespace `test` and the database `test`- - /// You'll see SurrealDB sets a value to the key `/*test\x00*test\x00*author\x00*\x00\x00\x00\x01john\x00`. - pub(crate) async fn put( - &mut self, - category: KeyCategory, - key: K, - val: V, - ) -> Result<(), Error> - where - K: Into, - V: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Check to see if transaction is writable - if !self.write { - return Err(Error::TxReadonly); - } - let key: Vec = key.into(); - if self.exi(key.clone().as_slice()).await? { - return Err(Error::TxKeyAlreadyExistsCategory(category)); - } - // Set the key - let key: &[u8] = &key[..]; - let val: Vec = val.into(); - let val: &[u8] = &val[..]; - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - inner.set(key, val); - // Return result - Ok(()) - } - /// Insert a key if it doesn't exist in the database - pub(crate) async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> - where - K: Into, - V: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Check to see if transaction is writable - if !self.write { - return Err(Error::TxReadonly); - } // Get the key - let key: Vec = key.into(); - let key: &[u8] = key.as_slice(); - // Get the val - let val: Vec = val.into(); - let val: &[u8] = val.as_slice(); - // Get the check - let chk = chk.map(Into::into); - // Delete the key - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - // Assuming the `lock` argument passed to the datastore creation function - // is meant for conducting a pessimistic lock on the underlying kv store to - // make the transaction serializable, we use the inverse of it to enable the snapshot isolation - // on the get request. - // See https://apple.github.io/foundationdb/api-c.html#snapshot-reads for more information on how the snapshot get is supposed to work in FDB. - let res = inner.get(key, false).await; - let res = res.map_err(|e| Error::Tx(format!("Unable to get kv from FoundationDB: {}", e))); - match (res, chk) { - (Ok(Some(v)), Some(w)) if *v.as_ref() == w => inner.set(key, val), - (Ok(None), None) => inner.set(key, val), - (Err(e), _) => return Err(e), - _ => return Err(Error::TxConditionNotMet), - }; + let res = self + .inner + .as_ref() + .unwrap() + .get(&key.into(), self.snapshot()) + .await? + .map(|v| v.to_vec()); // Return result - Ok(()) + Ok(res) } - // Sets the value for a versionstamped key prefixed with the user-supplied key. - pub(crate) async fn set_versionstamped_key( - &mut self, - prefix: K, - suffix: K, - val: V, - ) -> Result<(), Error> + + /// Inserts or update a key in the database + async fn set(&mut self, key: K, val: V) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -417,33 +264,16 @@ impl Transaction { return Err(Error::TxReadonly); } // Set the key - let mut k: Vec = prefix.into(); - let pos = k.len(); - let pos: u32 = pos.try_into().unwrap(); - // The incomplete versionstamp is 10 bytes long. - // See the documentation of SetVersionstampedKey for more information. - let mut ts_placeholder: Vec = - vec![0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]; - k.append(&mut ts_placeholder); - k.append(&mut suffix.into()); - // FDB's SetVersionstampedKey expects the parameter, the start position of the 10-bytes placeholder - // to be replaced by the versionstamp, to be in little endian. - let mut posbs: Vec = pos.to_le_bytes().to_vec(); - k.append(&mut posbs); + self.inner.as_ref().unwrap().set(&key.into(), &val.into()); + // Return result + Ok(()) + } - let key: &[u8] = &k[..]; - let val: Vec = val.into(); - let val: &[u8] = &val[..]; - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - inner.atomic_op(key, val, MutationType::SetVersionstampedKey); - // Return result - Ok(()) - } - /// Delete a key - pub(crate) async fn del(&mut self, key: K) -> Result<(), Error> + /// Insert a key if it doesn't exist in the database + async fn put(&mut self, key: K, val: V) -> Result<(), Error> where - K: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -453,20 +283,25 @@ impl Transaction { if !self.write { return Err(Error::TxReadonly); } - // Delete the key - let key: Vec = key.into(); - let key: &[u8] = key.as_slice(); - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - inner.clear(key); + // Get the transaction + let inner = self.inner.as_ref().unwrap(); + // Get the arguments + let key = key.into(); + let val = val.into(); + // Set the key if empty + match inner.get(&key, self.snapshot()).await? { + None => inner.set(&key, &val), + _ => return Err(Error::TxKeyAlreadyExists), + }; // Return result Ok(()) } - /// Delete a key - pub(crate) async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> + + /// Insert a key if the current value matches a condition. + async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -476,77 +311,184 @@ impl Transaction { if !self.write { return Err(Error::TxReadonly); } - let key: Vec = key.into(); - let key: &[u8] = key.as_slice(); - // Get the check - let chk: Option = chk.map(Into::into); - // Delete the key - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - let res = inner - .get(key, false) - .await - .map_err(|e| Error::Tx(format!("FoundationDB inner failure: {}", e))); - match (res, chk) { - (Ok(Some(v)), Some(w)) if *v.as_ref() == w => inner.clear(key), - (Ok(None), None) => inner.clear(key), + // Get the transaction + let inner = self.inner.as_ref().unwrap(); + // Get the arguments + let key = key.into(); + let val = val.into(); + let chk = chk.map(Into::into); + // Set the key if valid + match (inner.get(&key, self.snapshot()).await?, chk) { + (Some(v), Some(w)) if *v.as_ref() == w => inner.set(&key, &val), + (None, None) => inner.set(&key, &val), _ => return Err(Error::TxConditionNotMet), }; // Return result Ok(()) } - /// Retrieve a range of keys from the databases - pub(crate) async fn scan( - &mut self, - rng: Range, - limit: u32, - ) -> Result, Error> + + /// Delete a key + async fn del(&mut self, key: K) -> Result<(), Error> where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); } + // Check to see if transaction is writable + if !self.write { + return Err(Error::TxReadonly); + } + // Remove the key + self.inner.as_ref().unwrap().clear(&key.into()); + // Return result + Ok(()) + } + + /// Delete a key if the current value matches a condition. + async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Check to see if transaction is writable + if !self.write { + return Err(Error::TxReadonly); + } + // Get the transaction + let inner = self.inner.as_ref().unwrap(); + // Get the arguments + let key = key.into(); + let chk = chk.map(Into::into); + // Delete the key if valid + match (inner.get(&key, self.snapshot()).await?, chk) { + (Some(v), Some(w)) if *v.as_ref() == w => inner.clear(&key), + (None, None) => inner.clear(&key), + _ => return Err(Error::TxConditionNotMet), + }; + // Return result + Ok(()) + } + + /// Delete a range of keys from the databases + async fn delr(&mut self, rng: Range) -> Result<(), Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Check to see if transaction is writable + if !self.write { + return Err(Error::TxReadonly); + } + // Delete the key range + self.inner.as_ref().unwrap().clear_range(&rng.start.into(), &rng.end.into()); + // Return result + Ok(()) + } + + /// Retrieve a range of keys from the databases + async fn keys(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Get the transaction + let inner = self.inner.as_ref().unwrap(); // Convert the range to bytes let rng: Range = Range { start: rng.start.into(), end: rng.end.into(), }; - // Scan the keys - let begin: Vec = rng.start; - let end: Vec = rng.end; - let opt = foundationdb::RangeOption { - limit: Some(limit.try_into().unwrap()), - ..foundationdb::RangeOption::from((begin.as_slice(), end.as_slice())) + // Create result set + let mut res = vec![]; + // Set the key range + let opt = RangeOption { + limit: Some(limit as usize), + ..RangeOption::from((rng.start.as_slice(), rng.end.as_slice())) }; - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - // Assuming the `lock` argument passed to the datastore creation function - // is meant for conducting a pessimistic lock on the underlying kv store to - // make the transaction serializable, we use the inverse of it to enable the snapshot isolation - // on the get request. - // See https://apple.github.io/foundationdb/api-c.html#snapshot-reads for more information on how the snapshot get is supposed to work in FDB. - let mut stream = inner.get_ranges_keyvalues(opt, self.snapshot()); - let mut res: Vec<(Key, Val)> = vec![]; - loop { - let x = stream.try_next().await; - match x { - Ok(Some(v)) => { - let x = (Key::from(v.key()), Val::from(v.value())); - res.push(x) - } - Ok(None) => break, - Err(e) => return Err(Error::Tx(format!("GetRanges failed: {}", e))), + // Create the scan request + let mut req = inner.get_ranges(opt, self.snapshot()); + // Scan the keys in the iterator + while let Some(val) = req.next().await { + for v in val?.into_iter() { + res.push(Key::from(v.key())); } } + // Return result Ok(res) } - /// Delete a range of keys from the databases - pub(crate) async fn delr(&mut self, rng: Range) -> Result<(), Error> + /// Retrieve a range of keys from the databases + async fn scan(&mut self, rng: Range, limit: u32) -> Result, Error> where - K: Into, + K: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Get the transaction + let inner = self.inner.as_ref().unwrap(); + // Convert the range to bytes + let rng: Range = Range { + start: rng.start.into(), + end: rng.end.into(), + }; + // Create result set + let mut res = vec![]; + // Set the key range + let opt = RangeOption { + limit: Some(limit as usize), + ..RangeOption::from((rng.start.as_slice(), rng.end.as_slice())) + }; + // Create the scan request + let mut req = inner.get_ranges(opt, self.snapshot()); + // Scan the keys in the iterator + while let Some(val) = req.next().await { + for v in val?.into_iter() { + res.push((Key::from(v.key()), Val::from(v.value()))); + } + } + // Return result + Ok(res) + } + + /// Obtain a new change timestamp for a key + async fn get_timestamp(&mut self, _: K) -> Result { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Get the current read version + let res = self.inner.as_ref().unwrap().get_read_version().await?; + // Convert to a version stamp + let res = crate::vs::u64_to_versionstamp(res as u64); + // Return result + Ok(res) + } + + // Sets the value for a versionstamped key prefixed with the user-supplied key. + async fn set_versionstamp( + &mut self, + _: K, + prefix: K, + suffix: K, + val: V, + ) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -556,11 +498,21 @@ impl Transaction { if !self.write { return Err(Error::TxReadonly); } - let begin: &[u8] = &rng.start.into(); - let end: &[u8] = &rng.end.into(); - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); - inner.clear_range(begin, end); + // Build the key starting with the prefix + let mut key: Vec = prefix.into(); + // Get the position of the timestamp + let pos = key.len() as u32; + // Append the timestamp placeholder + key.extend_from_slice(&TIMESTAMP); + // Append the suffix to the key + key.extend(suffix.into()); + // Append the 4 byte placeholder position in little endian + key.append(&mut pos.to_le_bytes().to_vec()); + // Convert the value + let val = val.into(); + // Set the versionstamp key + self.inner.as_ref().unwrap().atomic_op(&key, &val, MutationType::SetVersionstampedKey); + // Return result Ok(()) } } diff --git a/core/src/kvs/indxdb/mod.rs b/core/src/kvs/indxdb/mod.rs index f150f3e8..e62ad002 100644 --- a/core/src/kvs/indxdb/mod.rs +++ b/core/src/kvs/indxdb/mod.rs @@ -4,7 +4,6 @@ use crate::err::Error; use crate::kvs::Check; use crate::kvs::Key; use crate::kvs::Val; -use crate::vs::{try_to_u64_be, u64_to_versionstamp, Versionstamp}; use std::ops::Range; #[non_exhaustive] @@ -56,7 +55,7 @@ impl Drop for Transaction { impl Datastore { /// Open a new database - pub(crate) async fn new(path: &str) -> Result { + pub async fn new(path: &str) -> Result { match indxdb::db::new(path).await { Ok(db) => Ok(Datastore { db, @@ -65,7 +64,7 @@ impl Datastore { } } /// Start a new transaction - pub(crate) async fn transaction(&self, write: bool, _: bool) -> Result { + pub async fn transaction(&self, write: bool, _: bool) -> Result { // Specify the check level #[cfg(not(debug_assertions))] let check = Check::Warn; @@ -84,17 +83,24 @@ impl Datastore { } } -impl Transaction { +impl super::api::Transaction for Transaction { /// Behaviour if unclosed - pub(crate) fn check_level(&mut self, check: Check) { + fn check_level(&mut self, check: Check) { self.check = check; } + /// Check if closed - pub(crate) fn closed(&self) -> bool { + fn closed(&self) -> bool { self.done } + + /// Check if writeable + fn writeable(&self) -> bool { + self.write + } + /// Cancel a transaction - pub(crate) async fn cancel(&mut self) -> Result<(), Error> { + async fn cancel(&mut self) -> Result<(), Error> { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); @@ -106,8 +112,9 @@ impl Transaction { // Continue Ok(()) } + /// Commit a transaction - pub(crate) async fn commit(&mut self) -> Result<(), Error> { + async fn commit(&mut self) -> Result<(), Error> { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); @@ -123,8 +130,9 @@ impl Transaction { // Continue Ok(()) } + /// Check if a key exists - pub(crate) async fn exi(&mut self, key: K) -> Result + async fn exists(&mut self, key: K) -> Result where K: Into, { @@ -137,8 +145,9 @@ impl Transaction { // Return result Ok(res) } + /// Fetch a key from the database - pub(crate) async fn get(&mut self, key: K) -> Result, Error> + async fn get(&mut self, key: K) -> Result, Error> where K: Into, { @@ -151,70 +160,9 @@ impl Transaction { // Return result Ok(res) } - /// Obtain a new change timestamp for a key - /// which is replaced with the current timestamp when the transaction is committed. - /// NOTE: This should be called when composing the change feed entries for this transaction, - /// which should be done immediately before the transaction commit. - /// That is to keep other transactions commit delay(pessimistic) or conflict(optimistic) as less as possible. - #[allow(unused)] - pub(crate) async fn get_timestamp(&mut self, key: K) -> Result - where - K: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Write the timestamp to the "last-write-timestamp" key - // to ensure that no other transactions can commit with older timestamps. - let k: Key = key.into(); - let prev = self.inner.get(k.clone()).await?; - let ver = match prev { - Some(prev) => { - let slice = prev.as_slice(); - let res: Result<[u8; 10], Error> = match slice.try_into() { - Ok(ba) => Ok(ba), - Err(e) => Err(Error::Ds(e.to_string())), - }; - let array = res?; - let prev: u64 = try_to_u64_be(array)?; - prev + 1 - } - None => 1, - }; - let verbytes = u64_to_versionstamp(ver); - - self.inner.put(k, verbytes.to_vec()).await?; - // Return the uint64 representation of the timestamp as the result - Ok(verbytes) - } - /// Obtain a new key that is suffixed with the change timestamp - pub(crate) async fn get_versionstamped_key( - &mut self, - ts_key: K, - prefix: K, - suffix: K, - ) -> Result, Error> - where - K: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Check to see if transaction is writable - if !self.write { - return Err(Error::TxReadonly); - } - let ts = self.get_timestamp(ts_key).await?; - let mut k: Vec = prefix.into(); - k.append(&mut ts.to_vec()); - k.append(&mut suffix.into()); - Ok(k) - } /// Insert or update a key in the database - pub(crate) async fn set(&mut self, key: K, val: V) -> Result<(), Error> + async fn set(&mut self, key: K, val: V) -> Result<(), Error> where K: Into, V: Into, @@ -232,8 +180,9 @@ impl Transaction { // Return result Ok(()) } + /// Insert a key if it doesn't exist in the database - pub(crate) async fn put(&mut self, key: K, val: V) -> Result<(), Error> + async fn put(&mut self, key: K, val: V) -> Result<(), Error> where K: Into, V: Into, @@ -251,8 +200,9 @@ impl Transaction { // Return result Ok(()) } - /// Insert a key if it doesn't exist in the database - pub(crate) async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> + + /// Insert a key if the current value matches a condition + async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> where K: Into, V: Into, @@ -270,8 +220,9 @@ impl Transaction { // Return result Ok(()) } + /// Delete a key - pub(crate) async fn del(&mut self, key: K) -> Result<(), Error> + async fn del(&mut self, key: K) -> Result<(), Error> where K: Into, { @@ -288,8 +239,9 @@ impl Transaction { // Return result Ok(res) } - /// Delete a key - pub(crate) async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> + + /// Delete a key if the current value matches a condition + async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> where K: Into, V: Into, @@ -307,12 +259,29 @@ impl Transaction { // Return result Ok(res) } + /// Retrieve a range of keys from the databases - pub(crate) async fn scan( - &mut self, - rng: Range, - limit: u32, - ) -> Result, Error> + async fn keys(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Convert the range to bytes + let rng: Range = Range { + start: rng.start.into(), + end: rng.end.into(), + }; + // Scan the keys + let res = self.inner.keys(rng, limit).await?; + // Return result + Ok(res) + } + + /// Retrieve a range of keys from the databases + async fn scan(&mut self, rng: Range, limit: u32) -> Result, Error> where K: Into, { diff --git a/core/src/kvs/kv.rs b/core/src/kvs/kv.rs index 2facab94..b803a0f1 100644 --- a/core/src/kvs/kv.rs +++ b/core/src/kvs/kv.rs @@ -4,15 +4,6 @@ pub type Key = Vec; /// The value part of a key-value pair. An alias for [`Vec`]. pub type Val = Vec; -/// Used to determine the behaviour when a transaction is not handled correctly -#[derive(Default)] -pub(crate) enum Check { - #[default] - None, - Warn, - Panic, -} - /// This trait appends an element to a collection, and allows chaining pub(super) trait Add { fn add(self, v: T) -> Self; diff --git a/core/src/kvs/live.rs b/core/src/kvs/live.rs new file mode 100644 index 00000000..368ac503 --- /dev/null +++ b/core/src/kvs/live.rs @@ -0,0 +1,19 @@ +use derive::Store; +use revision::revisioned; +use serde::{Deserialize, Serialize}; + +#[revisioned(revision = 1)] +#[derive(Clone, Debug, Default, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Store)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[non_exhaustive] +pub struct Live { + // TODO: optimisation this should probably be a &str + /// The namespace in which this LIVE query exists + pub ns: String, + // TODO: optimisation this should probably be a &str + /// The database in which this LIVE query exists + pub db: String, + // TODO: optimisation this should probably be a &str + /// The table in which this LIVE query exists + pub tb: String, +} diff --git a/core/src/kvs/lq_cf.rs b/core/src/kvs/lq_cf.rs deleted file mode 100644 index 95528191..00000000 --- a/core/src/kvs/lq_cf.rs +++ /dev/null @@ -1,388 +0,0 @@ -use crate::dbs::node::Timestamp; -use std::collections::BTreeMap; - -use crate::kvs::lq_structs::{KillEntry, LqEntry, LqIndexKey, LqIndexValue, LqSelector}; -use crate::vs::{conv, Versionstamp}; - -/// We often want to increment by 1, but the 2 least significant bytes are unused -const ONE_SHIFTED: u128 = 1 << 16; - -/// The datastore needs to track live queries that it owns as an engine. The db API and drivers -/// start tasks that poll the database for changes that are broadcast to relevant live queries. -/// -/// This struct tracks live queries against change feeds so that the correct watermarks are used -/// across differently versioned live queries. It provides convenience, correctness and separation -/// of concerns. -pub(crate) struct LiveQueryTracker { - // Map of Live Query identifier (ns+db+tb) for change feed tracking - // the mapping is to a list of affected live queries - local_live_queries: BTreeMap, - // Set of tracked change feeds with associated watermarks - // This is updated with new/removed live queries and improves cf request performance - // The Versionstamp associated is scanned inclusive of first value, so it must contain the earliest NOT read value - // So if VS=2 has been processed, the correct value here is VS=3 - cf_watermarks: BTreeMap, -} - -impl LiveQueryTracker { - pub(crate) const fn new() -> Self { - Self { - local_live_queries: BTreeMap::new(), - cf_watermarks: BTreeMap::new(), - } - } - - /// Add another Live Query to track, given the Versionstamp to stream from - pub(crate) fn register_live_query( - &mut self, - lq_index_key: &LqEntry, - live_query_vs: Versionstamp, - ) -> Result<(), &'static str> { - // See if we are already tracking the query - let k = lq_index_key.as_key(); - if self.local_live_queries.contains_key(&k) { - return Err("Live query registered twice"); - } - let v = lq_index_key.as_value(live_query_vs, Timestamp::default()); - let selector = k.selector.clone(); - self.local_live_queries.insert(k, v); - - // Check if we need to add a watermark for change feeds - match self.cf_watermarks.get(&selector) { - Some(existing_watermark) => { - // if we are tracking a later watermark than the one committed, then we need to move the watermark backwards - // Each individual live query will track its own watermark, so they will not get picked up when replaying older events - let current_u128 = conv::to_u128_be(*existing_watermark); - let proposed_u128 = conv::to_u128_be(live_query_vs); - if proposed_u128 < current_u128 { - self.cf_watermarks.insert(selector, live_query_vs); - } - } - None => { - // This default watermark is bad - it will catch up from the start of the change feed - self.cf_watermarks.insert(selector, live_query_vs); - } - } - Ok(()) - } - - pub(crate) fn unregister_live_query(&mut self, kill_entry: &KillEntry) { - // Because the information available from a kill statement is limited, we need to find a relevant kill query - let found: Option<(LqIndexKey, LqIndexValue)> = self - .local_live_queries - .iter() - .filter(|(k, _)| { - // Get all the live queries in the ns/db pair. We don't know table - k.selector.ns == kill_entry.ns && k.selector.db == kill_entry.db - }) - .filter_map(|(k, v)| { - if v.stm.id == kill_entry.live_id { - Some((k.clone(), v.clone())) - } else { - None - } - }) - .next(); - match found { - None => { - // TODO(SUR-336): Make Live Query ID validation available at statement level, perhaps via transaction - warn!( - "Could not find live query {:?} to kill in ns/db pair {:?} / {:?}", - &kill_entry, &kill_entry.ns, &kill_entry.db - ); - } - Some(found) => { - self.local_live_queries.remove(&found.0); - // TODO remove the watermarks - } - }; - } - - /// This will update the watermark of all live queries, regardless of their individual state - pub(crate) fn update_watermark_live_query( - &mut self, - live_query: &LqIndexKey, - watermark: &Versionstamp, - ) -> Result<(), &'static str> { - let lq_data = self.local_live_queries.get_mut(live_query).ok_or("Live query not found")?; - let current_lq_vs = conv::to_u128_be(lq_data.vs); - let proposed_vs = conv::to_u128_be(*watermark); - if proposed_vs >= current_lq_vs { - // We now need to increase the watermark so that scanning does not pick up the current observed - let new_proposed = proposed_vs + ONE_SHIFTED; - lq_data.vs = conv::try_u128_to_versionstamp(new_proposed) - .map_err(|_| "Could not convert to versionstamp")?; - - // We need to drop the borrow and keep the data - let lq_data = lq_data.clone(); - - // Since we modified, we now check if we need to update the change feed watermark - let valid_lqs = Self::live_queries_for_selector_impl( - &self.local_live_queries, - &live_query.selector, - ); - // Find the minimum watermark - let min_watermark = - valid_lqs.iter().map(|(_, v)| conv::to_u128_be(v.vs)).min().unwrap(); - // Get the current watermark - let current_watermark = - conv::to_u128_be(*self.cf_watermarks.get(&live_query.selector).unwrap()); - if min_watermark > current_watermark { - self.cf_watermarks.insert(live_query.selector.clone(), lq_data.vs); - } - } - Ok(()) - } - - pub(crate) fn get_watermarks(&self) -> &BTreeMap { - &self.cf_watermarks - } - - /// This is to iterate the change feed trackers by index - /// It is useful in situations where you want to hold a mutable reference, but still need - /// to iterate over it normally - /// This will break if values are added or removed, so keep the write lock while iterating - /// This can be improved by having droppable trackers/iterators returned - pub(crate) fn get_watermark_by_enum_index( - &self, - index: usize, - ) -> Option<(&LqSelector, &Versionstamp)> { - self.cf_watermarks.iter().nth(index) - } - - pub(crate) fn is_empty(&self) -> bool { - self.local_live_queries.is_empty() - } - - /// Find the necessary Live Query information for a given selector - pub(crate) fn live_queries_for_selector( - &self, - selector: &LqSelector, - ) -> Vec<(LqIndexKey, LqIndexValue)> { - Self::live_queries_for_selector_impl(&self.local_live_queries, selector) - } - - fn live_queries_for_selector_impl( - local_live_queries: &BTreeMap, - selector: &LqSelector, - ) -> Vec<(LqIndexKey, LqIndexValue)> { - local_live_queries - .iter() - .filter(|(k, _)| k.selector == *selector) - .map(|(k, v)| (k.clone(), v.clone())) - .collect() - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::sql::statements::LiveStatement; - use crate::sql::{Table, Uuid, Value}; - use std::str::FromStr; - - const NS: &str = "test_namespace"; - const DB: &str = "test_database"; - const TB: &str = "test_table"; - const DEFAULT_WATERMARK: [u8; 10] = [0; 10]; - - #[test] - fn registering_lq_tracks_cf() { - let mut tracker = LiveQueryTracker::new(); - assert!(tracker.is_empty()); - let lq_entry = an_lq_entry( - Uuid::from_str("36a35c76-8912-4b28-987a-4dcf276422c0").unwrap(), - NS, - DB, - TB, - ); - tracker.register_live_query(&lq_entry, DEFAULT_WATERMARK).unwrap(); - - assert_eq!(tracker.get_watermarks().len(), 1); - } - - #[test] - fn can_progress_a_live_query() { - let mut tracker = LiveQueryTracker::new(); - assert!(tracker.is_empty()); - let lq_entry = an_lq_entry( - Uuid::from_str("ffac79b6-39e7-45bb-901c-2cda393e4f8a").unwrap(), - NS, - DB, - TB, - ); - - // We set any watermark to start with - tracker.register_live_query(&lq_entry, DEFAULT_WATERMARK).unwrap(); - assert_tracker_has_watermark( - &tracker, - NS.to_string(), - DB.to_string(), - TB.to_string(), - DEFAULT_WATERMARK, - ); - - // Progress the watermark - let proposed_watermark = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - tracker.update_watermark_live_query(&lq_entry.as_key(), &proposed_watermark).unwrap(); - let new_watermark = increment_versionstamp(proposed_watermark); - assert_tracker_has_watermark( - &tracker, - NS.to_string(), - DB.to_string(), - TB.to_string(), - new_watermark, - ); - } - - #[test] - fn progressed_live_queries_that_get_removed_clear_cf_watermark() { - let mut tracker = LiveQueryTracker::new(); - assert!(tracker.is_empty()); - - // Add lq - let lq_entry = an_lq_entry( - Uuid::from_str("97d28595-0297-4b77-9806-58ec726e21f1").unwrap(), - NS, - DB, - TB, - ); - tracker.register_live_query(&lq_entry, DEFAULT_WATERMARK).unwrap(); - - // Check watermark - let lq_selector = LqSelector { - ns: NS.to_string(), - db: DB.to_string(), - tb: TB.to_string(), - }; - assert_tracker_has_watermark( - &tracker, - lq_selector.ns.clone(), - lq_selector.db.clone(), - lq_selector.tb.clone(), - DEFAULT_WATERMARK, - ); - - // Progress watermark - let proposed_watermark = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - tracker.update_watermark_live_query(&lq_entry.as_key(), &proposed_watermark).unwrap(); - let mut modified_watermark = proposed_watermark; - modified_watermark[7] += 1; - assert_tracker_has_watermark( - &tracker, - lq_selector.ns.clone(), - lq_selector.db.clone(), - lq_selector.tb.clone(), - modified_watermark, - ); - } - - #[test] - fn two_live_queries_one_in_catchup() { - let mut tracker = LiveQueryTracker::new(); - assert!(tracker.is_empty()); - - // Add lq - let lq1 = an_lq_entry( - Uuid::from_str("4b93a192-9f5f-4014-aa2e-93ecff8ad2e6").unwrap(), - NS, - DB, - TB, - ); - tracker.register_live_query(&lq1, DEFAULT_WATERMARK).unwrap(); - - // Check watermark is "default" - let wms = tracker.get_watermarks(); - assert_eq!(wms.len(), 1); - let (selector, watermark) = wms.iter().next().unwrap(); - assert_eq!( - selector, - &LqSelector { - ns: NS.to_string(), - db: DB.to_string(), - tb: TB.to_string(), - } - ); - assert_eq!(watermark, &DEFAULT_WATERMARK); - - // Progress the watermark - let progressed_watermark = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - tracker.update_watermark_live_query(&lq1.as_key(), &progressed_watermark).unwrap(); - - // Add a second live query - let lq2 = an_lq_entry( - Uuid::from_str("ec023004-c657-49f9-8688-33e4ab490fd2").unwrap(), - NS, - DB, - TB, - ); - - // Check the watermark is shared - it has moved backwards - tracker.register_live_query(&lq2, DEFAULT_WATERMARK).unwrap(); - assert_tracker_has_watermark( - &tracker, - NS.to_string(), - DB.to_string(), - TB.to_string(), - DEFAULT_WATERMARK, - ); - - // But the individual live query watermarks are intact - let tracked_live_queries = tracker.live_queries_for_selector(&LqSelector { - ns: NS.to_string(), - db: DB.to_string(), - tb: TB.to_string(), - }); - let progressed_watermark = increment_versionstamp(progressed_watermark); - assert_eq!(tracked_live_queries.len(), 2); - assert_eq!(tracked_live_queries[0].1.vs, progressed_watermark); - assert_eq!(tracked_live_queries[1].1.vs, DEFAULT_WATERMARK); - } - - /// Fixture to provide necessary data for a tracked live query - fn an_lq_entry(live_id: Uuid, ns: &str, db: &str, tb: &str) -> LqEntry { - LqEntry { - live_id, - ns: ns.to_string(), - db: db.to_string(), - stm: LiveStatement { - id: live_id, - node: Default::default(), - expr: Default::default(), - what: Value::Table(Table(tb.to_string())), - cond: None, - fetch: None, - archived: None, - session: None, - auth: None, - }, - } - } - - /// Validate there is only a single watermark with the given data - fn assert_tracker_has_watermark( - tracker: &LiveQueryTracker, - ns: String, - db: String, - tb: String, - vs: Versionstamp, - ) { - assert_eq!(tracker.get_watermarks().len(), 1); - let (selector, watermark) = tracker.get_watermarks().iter().next().unwrap(); - assert_eq!( - selector, - &LqSelector { - ns, - db, - tb - } - ); - assert_eq!(watermark, &vs); - } - - fn increment_versionstamp(vs: Versionstamp) -> Versionstamp { - let u128_be = conv::to_u128_be(vs); - let incremented = u128_be + ONE_SHIFTED; - conv::try_u128_to_versionstamp(incremented).unwrap() - } -} diff --git a/core/src/kvs/lq_structs.rs b/core/src/kvs/lq_structs.rs deleted file mode 100644 index dbd61b4c..00000000 --- a/core/src/kvs/lq_structs.rs +++ /dev/null @@ -1,138 +0,0 @@ -use crate::dbs::node::Timestamp; -use crate::sql::statements::LiveStatement; -use crate::sql::Uuid; -use crate::vs::Versionstamp; -use std::cmp::Ordering; - -/// Used for cluster logic to move LQ data to LQ cleanup code -/// Not a stored struct; Used only in this module -/// -/// This struct is public because it is used in Live Query errors for v1. -/// V1 is now deprecated and the struct can be made non-public -#[derive(Debug, Clone, Eq, PartialEq)] -#[non_exhaustive] -pub struct LqValue { - pub nd: Uuid, - pub ns: String, - pub db: String, - pub tb: String, - pub lq: Uuid, -} - -/// Used to track unreachable live queries in v1 -#[derive(Debug)] -pub(crate) enum UnreachableLqType { - Nd(LqValue), - Tb(LqValue), -} - -impl UnreachableLqType { - pub(crate) fn get_inner(&self) -> &LqValue { - match self { - UnreachableLqType::Nd(lq) => lq, - UnreachableLqType::Tb(lq) => lq, - } - } -} - -impl PartialEq for UnreachableLqType { - fn eq(&self, other: &Self) -> bool { - self.get_inner().lq == other.get_inner().lq - } -} - -impl Eq for UnreachableLqType {} - -impl PartialOrd for UnreachableLqType { - fn partial_cmp(&self, other: &Self) -> Option { - Option::Some(self.get_inner().lq.cmp(&other.get_inner().lq)) - } -} - -impl Ord for UnreachableLqType { - fn cmp(&self, other: &Self) -> Ordering { - self.get_inner().lq.cmp(&other.get_inner().lq) - } -} - -/// LqSelector is used for tracking change-feed backed queries in a common baseline -/// The intention is to have a collection of live queries that can have batch operations performed on them -/// This reduces the number of change feed queries -#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Debug)] -pub(crate) struct LqSelector { - pub(crate) ns: String, - pub(crate) db: String, - pub(crate) tb: String, -} - -/// This is an internal-only helper struct for organising the keys of how live queries are accessed -/// Because we want immutable keys, we cannot put mutable things in such as ts and vs -#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Debug)] -pub(crate) struct LqIndexKey { - pub(crate) selector: LqSelector, - pub(crate) lq: Uuid, -} - -/// Internal only struct -/// This can be assumed to have a mutable reference -#[derive(Eq, PartialEq, Clone, Debug)] -pub(crate) struct LqIndexValue { - pub(crate) stm: LiveStatement, - pub(crate) vs: Versionstamp, - // TODO(phughk, pre-2.0): unused? added because we have access to timestamp checkpoints but they arent used and this can be deleted - pub(crate) ts: Timestamp, -} - -/// Stores all data required for tracking a live query -/// Can be used to derive various in-memory map indexes and values -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq, Clone))] -pub(crate) struct LqEntry { - pub(crate) live_id: Uuid, - pub(crate) ns: String, - pub(crate) db: String, - pub(crate) stm: LiveStatement, -} - -/// This is a type representing information that is tracked outside of a datastore -/// For example, live query IDs need to be tracked by websockets so they are closed correctly on closing a connection -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq, Clone))] -#[allow(dead_code)] -pub(crate) enum TrackedResult { - LiveQuery(LqEntry), - KillQuery(KillEntry), -} - -/// KillEntry is a type that is used to hold the data necessary to kill a live query -/// It is not used for any indexing -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq, Clone))] -pub(crate) struct KillEntry { - pub(crate) live_id: Uuid, - pub(crate) ns: String, - pub(crate) db: String, -} - -impl LqEntry { - /// Treat like an into from a borrow - pub(crate) fn as_key(&self) -> LqIndexKey { - let tb = self.stm.what.to_string(); - LqIndexKey { - selector: LqSelector { - ns: self.ns.clone(), - db: self.db.clone(), - tb, - }, - lq: self.live_id, - } - } - - pub(crate) fn as_value(&self, vs: Versionstamp, ts: Timestamp) -> LqIndexValue { - LqIndexValue { - stm: self.stm.clone(), - vs, - ts, - } - } -} diff --git a/core/src/kvs/lq_v2_doc.rs b/core/src/kvs/lq_v2_doc.rs deleted file mode 100644 index 01f1572c..00000000 --- a/core/src/kvs/lq_v2_doc.rs +++ /dev/null @@ -1,420 +0,0 @@ -use std::borrow::Cow; - -use crate::cf::TableMutation; -use crate::dbs::Workable; -use crate::doc::Document; -use crate::err::Error; -use crate::sql::{Array, Object, Value}; - -const EMPTY_DOC: Value = Value::None; - -/// Construct a document from a Change Feed mutation -/// This is required to perform document operations such as live query notifications -pub(in crate::kvs) fn construct_document( - mutation: &TableMutation, -) -> Result, Error> { - match mutation { - TableMutation::Set(id, current_value) => { - let doc = Document::new_artificial( - Some(id), - None, - Cow::Borrowed(current_value), - Cow::Owned(EMPTY_DOC), - Workable::Normal, - ); - Ok(Some(doc)) - } - TableMutation::Del(id) => { - let fake_previous_value_because_we_need_the_id_and_del_doesnt_store_value = - Value::Object(Object::from(map! { - "id" => Value::Thing(id.clone()), - })); - let doc = Document::new_artificial( - Some(id), - None, - Cow::Owned(Value::None), - Cow::Owned(fake_previous_value_because_we_need_the_id_and_del_doesnt_store_value), - Workable::Normal, - ); - Ok(Some(doc)) - } - TableMutation::Def(_) => Ok(None), - TableMutation::SetWithDiff(id, current_value, operations) => { - // We need a previous value otherwise the Value::compute function won't work correctly - // This is also how IDs are carried into notifications, not via doc.rid - let mut copy = current_value.clone(); - copy.patch(Value::Array(Array( - operations.iter().map(|op| Value::Object(Object::from(op.clone()))).collect(), - )))?; - let doc = Document::new_artificial( - Some(id), - None, - Cow::Borrowed(current_value), - Cow::Owned(copy), - Workable::Normal, - ); - trace!("Constructed artificial document: {:?}, is_new={}", doc, doc.is_new()); - // TODO(SUR-328): reverse diff and apply to doc to retrieve original version of doc - Ok(Some(doc)) - } - TableMutation::DelWithOriginal(id, val) => { - let doc = Document::new_artificial( - Some(id), - None, - Cow::Owned(Value::None), - Cow::Borrowed(val), - Workable::Normal, - ); - Ok(Some(doc)) - } - } -} - -#[cfg(test)] -mod test { - use crate::cf::TableMutation; - use crate::kvs::lq_v2_doc::construct_document; - use crate::sql::statements::DefineTableStatement; - use crate::sql::{Idiom, Object, Operation, Strand, Thing, Value}; - - #[test] - fn test_construct_document_create() { - let thing = Thing::from(("table", "id")); - let value = Value::Strand(Strand::from("value")); - let tb_mutation = TableMutation::Set(thing.clone(), value); - let doc = construct_document(&tb_mutation).unwrap(); - let doc = doc.unwrap(); - assert!(doc.is_new()); - assert!(doc.initial_doc().is_none()); - assert!(doc.current_doc().is_some()); - } - - #[test] - fn test_construct_document_empty_value_is_valid() { - let thing = Thing::from(("table", "id")); - let value = Value::None; - let tb_mutation = TableMutation::Set(thing.clone(), value); - let doc = construct_document(&tb_mutation).unwrap(); - let doc = doc.unwrap(); - assert!(!doc.is_new()); - // This is actually invalid data - we are going to treat it as delete though - assert!(doc.is_delete()); - assert!(doc.initial_doc().is_none()); - assert!(doc.current_doc().is_none()); - } - - #[test] - fn test_construct_document_update() { - let thing = Thing::from(("table", "id")); - let current_value = Value::Object(Object(map! { - "first_field".to_string() => Value::Strand(Strand::from("first_value")), - "second_field".to_string() => Value::Strand(Strand::from("second_value")), - })); - let operations = vec![ - Operation::Remove { - path: Idiom::from("first_field"), - }, - Operation::Replace { - path: Idiom::from("second_field"), - value: Value::Strand(Strand::from("original_value")), - }, - Operation::Add { - path: Idiom::from("third_field"), - value: Value::Strand(Strand::from("third_value")), - }, - ]; - let expected_original = Value::Object(Object(map! { - "second_field".to_string() => Value::Strand(Strand::from("original_value")), - "third_field".to_string() => Value::Strand(Strand::from("third_value")), - })); - let tb_mutation = - TableMutation::SetWithDiff(thing.clone(), current_value.clone(), operations); - let doc = construct_document(&tb_mutation).unwrap(); - let doc = doc.unwrap(); - assert!(!doc.is_new()); - assert!(!doc.is_delete()); - assert_eq!(doc.initial_doc(), &expected_original, "{:?}", doc.initial_doc()); - assert_eq!(doc.current_doc(), ¤t_value, "{:?}", doc.current_doc()); - } - - #[test] - fn test_construct_document_delete() { - let thing = Thing::from(("table", "id")); - let tb_mutation = TableMutation::Del(thing.clone()); - let doc = construct_document(&tb_mutation).unwrap(); - let doc = doc.unwrap(); - // The previous and current doc values are "None", so technically this is a new doc as per - // current == None - assert!(!doc.is_new(), "{:?}", doc); - assert!(doc.is_delete(), "{:?}", doc); - assert!(doc.current_doc().is_none()); - assert!(doc.initial_doc().is_some()); - match doc.initial_doc() { - Value::Object(o) => { - assert!(o.contains_key("id")); - assert_eq!(o.get("id").unwrap(), &Value::Thing(thing)); - } - _ => panic!("Initial doc should be an object"), - } - } - - #[test] - fn test_construct_document_delete_with_original() { - let thing = Thing::from(("table", "id")); - let original = Value::Object(Object(map! { - "id".to_string() => Value::Thing(thing.clone()), - "some_key".to_string() => Value::Strand(Strand::from("some_value")), - })); - let tb_mutation = TableMutation::DelWithOriginal(thing.clone(), original); - let doc = construct_document(&tb_mutation).unwrap(); - let doc = doc.unwrap(); - // The previous and current doc values are "None", so technically this is a new doc as per - // current == None - assert!(!doc.is_new(), "{:?}", doc); - assert!(doc.is_delete(), "{:?}", doc); - assert!(doc.current_doc().is_none()); - assert!(doc.initial_doc().is_some()); - match doc.initial_doc() { - Value::Object(o) => { - assert!(o.contains_key("id")); - assert_eq!(o.get("id").unwrap(), &Value::Thing(thing)); - } - _ => panic!("Initial doc should be an object"), - } - } - - #[test] - fn test_construct_document_none_for_schema() { - let tb_mutation = TableMutation::Def(DefineTableStatement::default()); - let doc = construct_document(&tb_mutation).unwrap(); - assert!(doc.is_none()); - } -} - -#[cfg(feature = "kv-mem")] -#[cfg(test)] -mod test_check_lqs_and_send_notifications { - use std::collections::BTreeMap; - use std::sync::Arc; - - use crate::cf::TableMutation; - use crate::ctx::Context; - use crate::dbs::fuzzy_eq::FuzzyEq; - use crate::dbs::{Action, Notification, Options, Session, Statement}; - use crate::fflags::FFLAGS; - use crate::iam::{Auth, Role}; - use crate::kvs::lq_v2_doc::construct_document; - use crate::kvs::LockType::Optimistic; - use crate::kvs::{Datastore, TransactionType}; - use crate::sql::paths::{OBJ_PATH_ACCESS, OBJ_PATH_AUTH, OBJ_PATH_TOKEN}; - use crate::sql::statements::{CreateStatement, DeleteStatement, LiveStatement}; - use crate::sql::{Fields, Object, Strand, Table, Thing, Uuid, Value, Values}; - use channel::Sender; - use futures::executor::block_on; - use once_cell::sync::Lazy; - use reblessive::TreeStack; - use TransactionType::Write; - - static SETUP: Lazy> = Lazy::new(|| Arc::new(block_on(setup_test_suite_init()))); - - struct TestSuite { - ns: String, - db: String, - tb: String, - } - - async fn setup_test_suite_init() -> TestSuite { - let ds = Datastore::new("memory").await.unwrap(); - let ns = "the_namespace"; - let db = "the_database"; - let tb = "the_table"; - - // First we define levels of permissions and schemas and required CF - let vars = Some(BTreeMap::new()); - ds.execute( - &format!( - " - USE NAMESPACE {ns}; - USE DATABASE {db}; - DEFINE TABLE {tb} CHANGEFEED 1m INCLUDE ORIGINAL PERMISSIONS FULL; - " - ), - &Session::owner(), - vars, - ) - .await - .unwrap() - .into_iter() - .map(|r| r.result.unwrap()) - .for_each(drop); - - TestSuite { - ns: ns.to_string(), - db: db.to_string(), - tb: tb.to_string(), - } - } - - #[test_log::test(tokio::test)] - async fn test_create() { - if !FFLAGS.change_feed_live_queries.enabled_test { - return; - } - - // Setup channels used for listening to LQs - let (sender, receiver) = channel::unbounded(); - let opt = a_usable_options(&sender); - - // WHEN: - // Construct document we are validating - let record_id = Thing::from((SETUP.tb.as_str(), "id")); - let value = Value::Strand(Strand::from("value")); - let tb_mutation = TableMutation::Set(record_id.clone(), value); - let doc = construct_document(&tb_mutation).unwrap().unwrap(); - - // AND: - // Perform "live query" on the constructed doc that we are checking - let live_statement = a_live_query_statement(); - let executed_statement = a_create_statement(); - let mut tx = Datastore::new("memory") - .await - .unwrap() - .transaction(Write, Optimistic) - .await - .unwrap() - .enclose(); - let ctx = Context::background().set_transaction(tx.clone()); - let mut stack = TreeStack::new(); - stack.enter(|stk| async { - doc.check_lqs_and_send_notifications( - stk, - &ctx, - &opt, - &Statement::Create(&executed_statement), - &[&live_statement], - &sender, - ) - .await - .unwrap(); - }); - tx.lock().await.commit().await.unwrap(); - - // THEN: - let notification = receiver.try_recv().expect("There should be a notification"); - assert!( - notification.fuzzy_eq(&Notification::new( - Uuid::default(), - Action::Create, - Value::Strand(Strand::from("value")) - )), - "{:?}", - notification - ); - assert!(receiver.try_recv().is_err()); - } - - #[test_log::test(tokio::test)] - async fn test_delete() { - if !FFLAGS.change_feed_live_queries.enabled_test { - return; - } - - // Setup channels used for listening to LQs - let (sender, receiver) = channel::unbounded(); - let opt = a_usable_options(&sender); - - // WHEN: - // Construct document we are validating - let record_id = Thing::from((SETUP.tb.as_str(), "id")); - let value = Value::Strand(Strand::from("value")); - let tb_mutation = TableMutation::Set(record_id.clone(), value); - let doc = construct_document(&tb_mutation).unwrap().unwrap(); - - // AND: - // Perform "live query" on the constructed doc that we are checking - let live_statement = a_live_query_statement(); - let executed_statement = a_delete_statement(); - let mut tx = Datastore::new("memory") - .await - .unwrap() - .transaction(Write, Optimistic) - .await - .unwrap() - .enclose(); - let ctx = Context::background().set_transaction(tx.clone()); - let mut stack = TreeStack::new(); - stack.enter(|stk| async { - doc.check_lqs_and_send_notifications( - stk, - &ctx, - &opt, - &Statement::Delete(&executed_statement), - &[&live_statement], - &sender, - ) - .await - .unwrap(); - }); - tx.lock().await.commit().await.unwrap(); - - // THEN: - let notification = receiver.try_recv().expect("There should be a notification"); - // TODO(SUR-349): Delete value should be the object that was just deleted - let expected_value = Value::Object(Object::default()); - assert!( - notification.fuzzy_eq(&Notification::new( - Uuid::default(), - Action::Delete, - expected_value - )), - "{:?}", - notification - ); - assert!(receiver.try_recv().is_err()); - } - - // Live queries will have authentication info associated with them - // This is a way to fake that - fn a_live_query_statement() -> LiveStatement { - let mut stm = LiveStatement::new(Fields::all()); - let mut session: BTreeMap = BTreeMap::new(); - session.insert(OBJ_PATH_ACCESS.to_string(), Value::Strand(Strand::from("access"))); - session.insert(OBJ_PATH_AUTH.to_string(), Value::Strand(Strand::from("auth"))); - session.insert(OBJ_PATH_TOKEN.to_string(), Value::Strand(Strand::from("token"))); - let session = Value::Object(Object::from(session)); - stm.session = Some(session); - stm.auth = Some(Auth::for_db(Role::Owner, "namespace", "database")); - stm - } - - // Fake a create statement that does not involve parsing the query - fn a_create_statement() -> CreateStatement { - CreateStatement { - only: false, - what: Values(vec![Value::Table(Table::from(SETUP.tb.clone()))]), - data: None, - output: None, - timeout: None, - parallel: false, - } - } - - fn a_delete_statement() -> DeleteStatement { - DeleteStatement { - only: false, - what: Values(vec![Value::Table(Table::from(SETUP.tb.clone()))]), - cond: None, - output: None, - timeout: None, - parallel: false, - } - } - - fn a_usable_options(sender: &Sender) -> Options { - let mut ctx = Context::default(); - ctx.add_notifications(Some(sender)); - Options::default() - .with_ns(Some(SETUP.ns.clone().into())) - .with_db(Some(SETUP.db.clone().into())) - } -} diff --git a/core/src/kvs/lq_v2_fut.rs b/core/src/kvs/lq_v2_fut.rs deleted file mode 100644 index 6c9c8f68..00000000 --- a/core/src/kvs/lq_v2_fut.rs +++ /dev/null @@ -1,205 +0,0 @@ -use crate::cf; -use crate::cf::ChangeSet; -use crate::ctx::Context; -use crate::dbs::{Options, Statement}; -use crate::err::Error; -use crate::fflags::FFLAGS; -use crate::kvs::lq_cf::LiveQueryTracker; -use crate::kvs::lq_structs::{LqIndexKey, LqIndexValue, LqSelector}; -use crate::kvs::lq_v2_doc::construct_document; -use crate::kvs::LockType::Optimistic; -use crate::kvs::TransactionType::Read; -use crate::kvs::{Datastore, Transaction}; -use crate::sql::statements::show::ShowSince; -use crate::vs::conv; -use reblessive::tree::Stk; -use std::collections::BTreeMap; -use std::sync::Arc; -use tokio::sync::RwLock; - -/// Poll change feeds for live query notifications -pub async fn process_lq_notifications( - ds: &Datastore, - ctx: &Context<'_>, - stk: &mut Stk, - opt: &Options, -) -> Result<(), Error> { - // Runtime feature gate, as it is not production-ready - if !FFLAGS.change_feed_live_queries.enabled() { - return Ok(()); - } - // Return if there are no live queries - if ds.notification_channel.is_none() { - trace!("Channels is none, short-circuiting"); - return Ok(()); - } - if ds.lq_cf_store.read().await.is_empty() { - // This is safe - just a shortcut - trace!("No live queries, short-circuiting"); - return Ok(()); - } - - // Change map includes a mapping of selector to changesets, ordered by versionstamp - let mut relevant_changesets: BTreeMap> = BTreeMap::new(); - { - let tx = ds.transaction(Read, Optimistic).await?; - populate_relevant_changesets( - tx, - ds.lq_cf_store.clone(), - ds.engine_options.live_query_catchup_size, - &mut relevant_changesets, - ) - .await?; - }; - - for (selector, change_sets) in relevant_changesets { - // find matching live queries - let lq_pairs = ds.lq_cf_store.read().await.live_queries_for_selector(&selector); - - // Find relevant changes - #[cfg(debug_assertions)] - trace!("There are {} change sets", change_sets.len()); - #[cfg(debug_assertions)] - trace!( - "\n{}", - change_sets - .iter() - .enumerate() - .map(|(i, x)| format!("[{i}] {:?}", x)) - .collect::>() - .join("\n") - ); - for change_set in change_sets { - process_change_set_for_notifications(ds, ctx, stk, opt, change_set, &lq_pairs).await?; - } - } - trace!("Finished process lq successfully"); - Ok(()) -} -async fn populate_relevant_changesets( - mut tx: Transaction, - live_query_tracker: Arc>, - catchup_size: u32, - relevant_changesets: &mut BTreeMap>, -) -> Result<(), Error> { - let live_query_tracker = live_query_tracker.write().await; - let tracked_cfs = live_query_tracker.get_watermarks().len(); - // We are going to track the latest observed versionstamp here - for current in 0..tracked_cfs { - // The reason we iterate this way (len+index) is because we "know" that the list won't change, but we - // want mutable access to it so we can update it while iterating - let (selector, vs) = live_query_tracker.get_watermark_by_enum_index(current).unwrap(); - - // Read the change feed for the selector - #[cfg(debug_assertions)] - trace!( - "Checking for new changes for ns={} db={} tb={} vs={:?}", - selector.ns, - selector.db, - selector.tb, - vs - ); - let res = cf::read( - &mut tx, - &selector.ns, - &selector.db, - // Technically, we can not fetch by table and do the per-table filtering this side. - // That is an improvement though - Some(&selector.tb), - ShowSince::versionstamp(vs), - Some(catchup_size), - ) - .await?; - // Confirm we do need to change watermark - this is technically already handled by the cf range scan - if res.is_empty() { - #[cfg(debug_assertions)] - trace!( - "There were no changes in the change feed for {:?} from versionstamp {:?}", - selector, - conv::versionstamp_to_u64(vs) - ) - } - if let Some(change_set) = res.last() { - if conv::versionstamp_to_u64(&change_set.0) > conv::versionstamp_to_u64(vs) { - #[cfg(debug_assertions)] - trace!("Adding a change set for lq notification processing"); - // This does not guarantee a notification, as a changeset an include many tables and many changes - relevant_changesets.insert(selector.clone(), res); - } - } - } - tx.cancel().await -} - -async fn process_change_set_for_notifications( - ds: &Datastore, - ctx: &Context<'_>, - stk: &mut Stk, - opt: &Options, - change_set: ChangeSet, - lq_pairs: &[(LqIndexKey, LqIndexValue)], -) -> Result<(), Error> { - #[cfg(debug_assertions)] - trace!("Moving to next change set, {:?}", change_set); - for (lq_key, lq_value) in lq_pairs.iter() { - #[cfg(debug_assertions)] - trace!("Processing live query for notification key={:?} and value={:?}", lq_key, lq_value); - let change_vs = change_set.0; - let database_mutation = &change_set.1; - for table_mutations in database_mutation.0.iter() { - if table_mutations.0 == lq_key.selector.tb { - // Create a doc of the table value - // Run the 'lives' logic on the doc, while providing live queries instead of reading from storage - // This will generate and send notifications - #[cfg(debug_assertions)] - trace!( - "There are {} table mutations being prepared for notifications", - table_mutations.1.len() - ); - for (_i, mutation) in table_mutations.1.iter().enumerate() { - #[cfg(debug_assertions)] - trace!("[{} @ {:?}] Processing table mutation: {:?} Constructing document from mutation", _i, change_vs, mutation); - if let Some(doc) = construct_document(mutation)? { - // We know we are only processing a single LQ at a time, so we can limit notifications to 1 - let notification_capacity = 1; - // We track notifications as a separate channel in case we want to process - // for the current state we only forward - let (local_notification_channel_sender, local_notification_channel_recv) = - channel::bounded(notification_capacity); - doc.check_lqs_and_send_notifications( - stk, - ctx, - opt, - &Statement::Live(&lq_value.stm), - [&lq_value.stm].as_slice(), - &local_notification_channel_sender, - ) - .await - .map_err(|e| { - Error::Internal(format!( - "Error checking lqs for notifications: {:?}", - e - )) - })?; - - // Send the notifications to driver or api - while let Ok(notification) = local_notification_channel_recv.try_recv() { - #[cfg(debug_assertions)] - trace!("Sending notification to client: {:?}", notification); - ds.notification_channel - .as_ref() - .unwrap() - .0 - .send(notification) - .await - .unwrap(); - } - } - // Progress the live query watermark - } - } - } - ds.lq_cf_store.write().await.update_watermark_live_query(lq_key, &change_vs).unwrap(); - } - Ok(()) -} diff --git a/core/src/kvs/mem/mod.rs b/core/src/kvs/mem/mod.rs index eb9f9b6d..26ccb86d 100644 --- a/core/src/kvs/mem/mod.rs +++ b/core/src/kvs/mem/mod.rs @@ -1,12 +1,10 @@ #![cfg(feature = "kv-mem")] use crate::err::Error; -#[cfg(debug_assertions)] -use crate::key::debug::sprint_key; use crate::kvs::Check; use crate::kvs::Key; use crate::kvs::Val; -use crate::vs::{try_to_u64_be, u64_to_versionstamp, Versionstamp}; +use std::fmt::Debug; use std::ops::Range; #[non_exhaustive] @@ -83,17 +81,24 @@ impl Datastore { } } -impl Transaction { +impl super::api::Transaction for Transaction { /// Behaviour if unclosed - pub(crate) fn check_level(&mut self, check: Check) { + fn check_level(&mut self, check: Check) { self.check = check; } + /// Check if closed - pub(crate) fn closed(&self) -> bool { + fn closed(&self) -> bool { self.done } + + /// Check if writeable + fn writeable(&self) -> bool { + self.write + } + /// Cancel a transaction - pub(crate) fn cancel(&mut self) -> Result<(), Error> { + async fn cancel(&mut self) -> Result<(), Error> { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); @@ -105,8 +110,9 @@ impl Transaction { // Continue Ok(()) } + /// Commit a transaction - pub(crate) fn commit(&mut self) -> Result<(), Error> { + async fn commit(&mut self) -> Result<(), Error> { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); @@ -122,10 +128,11 @@ impl Transaction { // Continue Ok(()) } + /// Check if a key exists - pub(crate) fn exi(&mut self, key: K) -> Result + async fn exists(&mut self, key: K) -> Result where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -136,10 +143,11 @@ impl Transaction { // Return result Ok(res) } + /// Fetch a key from the database - pub(crate) fn get(&mut self, key: K) -> Result, Error> + async fn get(&mut self, key: K) -> Result, Error> where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -150,97 +158,12 @@ impl Transaction { // Return result Ok(res) } - /// Obtain a new change timestamp for a key - /// which is replaced with the current timestamp when the transaction is committed. - /// NOTE: This should be called when composing the change feed entries for this transaction, - /// which should be done immediately before the transaction commit. - /// That is to keep other transactions commit delay(pessimistic) or conflict(optimistic) as less as possible. - #[allow(unused)] - pub(crate) fn get_timestamp(&mut self, key: K) -> Result - where - K: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Write the timestamp to the "last-write-timestamp" key - // to ensure that no other transactions can commit with older timestamps. - let k: Key = key.into(); - let prev = self.inner.get(k.clone())?; - let ver = match prev { - Some(prev) => { - let slice = prev.as_slice(); - let res: Result<[u8; 10], Error> = match slice.try_into() { - Ok(ba) => { - #[cfg(debug_assertions)] - trace!( - "Previous timestamp for key {} is {}", - sprint_key(&k), - sprint_key(&ba) - ); - Ok(ba) - } - Err(e) => Err(Error::Ds(e.to_string())), - }; - let array = res?; - let prev = try_to_u64_be(array)?; - prev + 1 - } - None => 1, - }; - - let verbytes = u64_to_versionstamp(ver); - - self.inner.set(k, verbytes.to_vec())?; - // Return the uint64 representation of the timestamp as the result - Ok(verbytes) - } - /// Obtain a new key that is suffixed with the change timestamp - pub(crate) async fn get_versionstamped_key( - &mut self, - ts_key: K, - prefix: K, - suffix: K, - ) -> Result, Error> - where - K: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Check to see if transaction is writable - if !self.write { - return Err(Error::TxReadonly); - } - - let ts_key: Key = ts_key.into(); - #[cfg(debug_assertions)] - let dbg_ts = sprint_key(&ts_key); - let prefix: Key = prefix.into(); - #[cfg(debug_assertions)] - let dbg_prefix = sprint_key(&prefix); - let mut suffix: Key = suffix.into(); - #[cfg(debug_assertions)] - let dbg_suffix = sprint_key(&suffix); - - let ts = self.get_timestamp(ts_key)?; - let mut k: Vec = prefix; - k.append(&mut ts.to_vec()); - k.append(&mut suffix); - - #[cfg(debug_assertions)] - trace!("get_versionstamped_key; prefix={dbg_prefix} ts={dbg_ts} suff={dbg_suffix}"); - - Ok(k) - } /// Insert or update a key in the database - pub(crate) fn set(&mut self, key: K, val: V) -> Result<(), Error> + async fn set(&mut self, key: K, val: V) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -255,11 +178,12 @@ impl Transaction { // Return result Ok(()) } + /// Insert a key if it doesn't exist in the database - pub(crate) fn put(&mut self, key: K, val: V) -> Result<(), Error> + async fn put(&mut self, key: K, val: V) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -274,11 +198,12 @@ impl Transaction { // Return result Ok(()) } - /// Insert a key if it doesn't exist in the database - pub(crate) fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> + + /// Insert a key if the current value matches a condition + async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -293,10 +218,11 @@ impl Transaction { // Return result Ok(()) } + /// Delete a key - pub(crate) fn del(&mut self, key: K) -> Result<(), Error> + async fn del(&mut self, key: K) -> Result<(), Error> where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -311,11 +237,12 @@ impl Transaction { // Return result Ok(()) } - /// Delete a key - pub(crate) fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> + + /// Delete a key if the current value matches a condition + async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -330,10 +257,31 @@ impl Transaction { // Return result Ok(()) } + /// Retrieve a range of keys from the databases - pub(crate) fn scan(&mut self, rng: Range, limit: u32) -> Result, Error> + async fn keys(&mut self, rng: Range, limit: u32) -> Result, Error> where - K: Into, + K: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Convert the range to bytes + let rng: Range = Range { + start: rng.start.into(), + end: rng.end.into(), + }; + // Scan the keys + let res = self.inner.keys(rng, limit as usize)?; + // Return result + Ok(res) + } + + /// Retrieve a range of keys from the databases + async fn scan(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into + Debug, { // Check to see if transaction is closed if self.done { diff --git a/core/src/kvs/mod.rs b/core/src/kvs/mod.rs index adcb5f4d..fe3e5b14 100644 --- a/core/src/kvs/mod.rs +++ b/core/src/kvs/mod.rs @@ -11,9 +11,20 @@ //! - `rocksdb`: [RocksDB](https://github.com/facebook/rocksdb) an embeddable persistent key-value store for fast storage //! - `tikv`: [TiKV](https://github.com/tikv/tikv) a distributed, and transactional key-value database //! - `mem`: in-memory database + +mod api; +mod batch; mod cache; mod clock; mod ds; +mod export; +mod live; +mod node; +mod scanner; +mod stash; +mod tr; +mod tx; + mod fdb; mod indxdb; mod kv; @@ -21,16 +32,12 @@ mod mem; mod rocksdb; mod surrealkv; mod tikv; -mod tx; -pub(crate) mod lq_structs; - -mod lq_cf; -mod lq_v2_doc; -mod lq_v2_fut; #[cfg(test)] mod tests; pub use self::ds::*; pub use self::kv::*; +pub use self::live::*; +pub use self::tr::*; pub use self::tx::*; diff --git a/core/src/kvs/node.rs b/core/src/kvs/node.rs new file mode 100644 index 00000000..260663d7 --- /dev/null +++ b/core/src/kvs/node.rs @@ -0,0 +1,301 @@ +use crate::cnf::NORMAL_FETCH_SIZE; +use crate::dbs::node::Node; +use crate::err::Error; +use crate::kvs::Datastore; +use crate::kvs::Live; +use crate::kvs::LockType::*; +use crate::kvs::TransactionType::*; +use crate::sql::statements::LiveStatement; +use std::time::Duration; + +const TARGET: &str = "surrealdb::core::kvs::node"; + +impl Datastore { + /// Inserts a node for the first time into the cluster. + /// + /// This function should be run at server or database startup. + /// + /// This function ensures that this node is entered into the clister + /// membership entries. This function must be run at server or database + /// startup, in order to write the initial entry and timestamp to storage. + #[instrument(err, level = "debug", target = "surrealdb::core::kvs::node", skip(self))] + pub async fn insert_node(&self, id: uuid::Uuid) -> Result<(), Error> { + // Log when this method is run + trace!(target: TARGET, "Inserting node in the cluster"); + // Open transaction and set node data + let txn = self.transaction(Write, Optimistic).await?; + let key = crate::key::root::nd::Nd::new(id); + let now = self.clock.now().await; + let val = Node::new(id, now, false); + match run!(txn, txn.put(key, val)) { + Err(Error::TxKeyAlreadyExists) => Err(Error::ClAlreadyExists { + value: id.to_string(), + }), + other => other, + } + } + + /// Updates an already existing node in the cluster. + /// + /// This function should be run periodically at a regular interval. + /// + /// This function updates the entry for this node with an up-to-date + /// timestamp. This ensures that the node is not marked as expired by any + /// garbage collection tasks, preventing any data cleanup for this node. + #[instrument(err, level = "debug", target = "surrealdb::core::kvs::node", skip(self))] + pub async fn update_node(&self, id: uuid::Uuid) -> Result<(), Error> { + // Log when this method is run + trace!(target: TARGET, "Updating node in the cluster"); + // Open transaction and set node data + let txn = self.transaction(Write, Optimistic).await?; + let key = crate::key::root::nd::new(id); + let now = self.clock.now().await; + let val = Node::new(id, now, false); + run!(txn, txn.set(key, val)) + } + + /// Deletes a node from the cluster. + /// + /// This function should be run when a node is shutting down. + /// + /// This function marks the node as archived, ready for garbage collection. + /// Later on when garbage collection is running the live queries assigned + /// to this node will be removed, along with the node itself. + #[instrument(err, level = "debug", target = "surrealdb::core::kvs::node", skip(self))] + pub async fn delete_node(&self, id: uuid::Uuid) -> Result<(), Error> { + // Log when this method is run + trace!(target: TARGET, "Archiving node in the cluster"); + // Open transaction and set node data + let txn = self.transaction(Write, Optimistic).await?; + let key = crate::key::root::nd::new(id); + let val = txn.get_node(id).await?; + let val = val.as_ref().archive(); + run!(txn, txn.set(key, val)) + } + + /// Expires nodes which have timedout from the cluster. + /// + /// This function should be run periodically at an interval. + /// + /// This function marks the node as archived, ready for garbage collection. + /// Later on when garbage collection is running the live queries assigned + /// to this node will be removed, along with the node itself. + #[instrument(err, level = "debug", target = "surrealdb::core::kvs::node", skip(self))] + pub async fn expire_nodes(&self) -> Result<(), Error> { + // Log when this method is run + trace!(target: TARGET, "Archiving expired nodes in the cluster"); + // Open transaction and fetch nodes + let txn = self.transaction(Write, Optimistic).await?; + let now = self.clock.now().await; + let nds = catch!(txn, txn.all_nodes()); + for nd in nds.iter() { + // Check that the node is active + if nd.is_active() { + // Check if the node has expired + if nd.hb < now - Duration::from_secs(30) { + // Log the live query scanning + trace!(target: TARGET, id = %nd.id, "Archiving node in the cluster"); + // Mark the node as archived + let val = nd.archive(); + // Get the key for the node entry + let key = crate::key::root::nd::new(nd.id); + // Update the node entry + catch!(txn, txn.set(key, val)); + } + } + } + // Commit the changes + txn.commit().await + } + + /// Cleans up nodes which are no longer in this cluster. + /// + /// This function should be run periodically at an interval. + /// + /// This function clears up all nodes which have been marked as archived. + /// When a matching node is found, all node queries, and table queries are + /// garbage collected, before the node itself is completely deleted. + #[instrument(err, level = "debug", target = "surrealdb::core::kvs::node", skip(self))] + pub async fn cleanup_nodes(&self) -> Result<(), Error> { + // Log when this method is run + trace!(target: TARGET, "Cleaning up archived nodes in the cluster"); + // Fetch all of the expired nodes + let expired = { + let txn = self.transaction(Read, Optimistic).await?; + let nds = catch!(txn, txn.all_nodes()); + // Filter the archived nodes + nds.iter().filter_map(Node::archived).collect::>() + }; + // Delete the live queries + { + for id in expired.iter() { + // Log the live query scanning + trace!(target: TARGET, id = %id, "Deleting live queries for node"); + // Scan the live queries for this node + let txn = self.transaction(Write, Optimistic).await?; + let beg = crate::key::node::lq::prefix(*id); + let end = crate::key::node::lq::suffix(*id); + let mut next = Some(beg..end); + while let Some(rng) = next { + let res = catch!(txn, txn.batch(rng, *NORMAL_FETCH_SIZE, true)); + next = res.next; + for (k, v) in res.values.iter() { + // Decode the data for this live query + let val: Live = v.into(); + // Get the key for this node live query + let nlq = crate::key::node::lq::Lq::decode(k)?; + // Check that the node for this query is archived + if expired.contains(&nlq.nd) { + // Get the key for this table live query + let tlq = crate::key::table::lq::new(&val.ns, &val.db, &val.tb, nlq.lq); + // Delete the table live query + catch!(txn, txn.del(tlq)); + // Delete the node live query + catch!(txn, txn.del(nlq)); + } + } + } + // Commit the changes + txn.commit().await?; + } + } + // Delete the expired nodes + { + let txn = self.transaction(Write, Optimistic).await?; + // Loop over the nodes and delete + for id in expired.iter() { + // Log the node deletion + trace!(target: TARGET, id = %id, "Deleting node from the cluster"); + // Get the key for the node entry + let key = crate::key::root::nd::new(*id); + // Delete the cluster node entry + catch!(txn, txn.del(key)); + } + // Commit the changes + txn.commit().await?; + } + // Everything was successful + Ok(()) + } + + /// Clean up all other miscellaneous data. + /// + /// This function should be run periodically at an interval. + /// + /// This function clears up all data which might have been missed from + /// previous cleanup runs, or when previous runs failed. This function + /// currently deletes all live queries, for nodes which no longer exist + /// in the cluster, from all namespaces, databases, and tables. It uses + /// a number of transactions in order to prevent failure of large or + /// long-running transactions on distributed storage engines. + #[instrument(err, level = "debug", target = "surrealdb::core::kvs::node", skip(self))] + pub async fn garbage_collect(&self) -> Result<(), Error> { + // Log the node deletion + trace!(target: TARGET, "Garbage collecting all miscellaneous data"); + // Fetch expired nodes + let expired = { + let txn = self.transaction(Read, Optimistic).await?; + let nds = catch!(txn, txn.all_nodes()); + // Filter the archived nodes + nds.iter().filter_map(Node::archived).collect::>() + }; + // Fetch all namespaces + let nss = { + let txn = self.transaction(Read, Optimistic).await?; + catch!(txn, txn.all_ns()) + }; + // Loop over all namespaces + for ns in nss.iter() { + // Log the namespace + trace!(target: TARGET, "Garbage collecting data in namespace {}", ns.name); + // Fetch all databases + let dbs = { + let txn = self.transaction(Read, Optimistic).await?; + catch!(txn, txn.all_db(&ns.name)) + }; + // Loop over all databases + for db in dbs.iter() { + // Log the namespace + trace!(target: TARGET, "Garbage collecting data in database {}/{}", ns.name, db.name); + // Fetch all tables + let tbs = { + let txn = self.transaction(Read, Optimistic).await?; + catch!(txn, txn.all_tb(&ns.name, &db.name)) + }; + // Loop over all tables + for tb in tbs.iter() { + // Log the namespace + trace!(target: TARGET, "Garbage collecting data in table {}/{}/{}", ns.name, db.name, tb.name); + // Iterate over the table live queries + let txn = self.transaction(Write, Optimistic).await?; + let beg = crate::key::table::lq::prefix(&ns.name, &db.name, &tb.name); + let end = crate::key::table::lq::suffix(&ns.name, &db.name, &tb.name); + let mut next = Some(beg..end); + while let Some(rng) = next { + let res = catch!(txn, txn.batch(rng, *NORMAL_FETCH_SIZE, false)); + next = res.next; + for (k, v) in res.values.iter() { + // Decode the LIVE query statement + let stm: LiveStatement = v.into(); + // Get the key for this node live query + let tlq = crate::key::table::lq::Lq::decode(k)?; + // Get the node id and the live query id + let (nid, lid) = (stm.node.0, stm.id.0); + // Check that the node for this query is archived + if expired.contains(&stm.node) { + // Get the key for this table live query + let nlq = crate::key::node::lq::new(nid, lid); + // Delete the node live query + catch!(txn, txn.del(nlq)); + // Delete the table live query + catch!(txn, txn.del(tlq)); + } + } + } + // Commit the changes + txn.commit().await?; + } + } + } + // All ok + Ok(()) + } + + /// Clean up the live queries for a disconnected connection. + /// + /// This function should be run when a WebSocket disconnects. + /// + /// This function clears up the live queries on the current node, which + /// are specified by uique live query UUIDs. This is necessary when a + /// WebSocket disconnects, and any associated live queries need to be + /// cleaned up and removed. + #[instrument(err, level = "debug", target = "surrealdb::core::kvs::node", skip(self))] + pub async fn delete_queries(&self, ids: Vec) -> Result<(), Error> { + // Log the node deletion + trace!(target: TARGET, "Deleting live queries for a connection"); + // Fetch expired nodes + let txn = self.transaction(Write, Optimistic).await?; + // Loop over the live query unique ids + for id in ids.into_iter() { + // Get the key for this node live query + let nlq = crate::key::node::lq::new(self.id(), id); + // Fetch the LIVE meta data node entry + if let Some(val) = catch!(txn, txn.get(nlq)) { + // Decode the data for this live query + let lq: Live = val.into(); + // Get the key for this node live query + let nlq = crate::key::node::lq::new(self.id(), id); + // Get the key for this table live query + let tlq = crate::key::table::lq::new(&lq.ns, &lq.db, &lq.tb, id); + // Delete the table live query + catch!(txn, txn.del(tlq)); + // Delete the node live query + catch!(txn, txn.del(nlq)); + } + } + // Commit the changes + txn.commit().await?; + // All ok + Ok(()) + } +} diff --git a/core/src/kvs/rocksdb/mod.rs b/core/src/kvs/rocksdb/mod.rs index 02ee927f..8aed8627 100644 --- a/core/src/kvs/rocksdb/mod.rs +++ b/core/src/kvs/rocksdb/mod.rs @@ -3,16 +3,14 @@ mod cnf; use crate::err::Error; -use crate::key::error::KeyCategory; use crate::kvs::Check; use crate::kvs::Key; use crate::kvs::Val; -use crate::vs::{try_to_u64_be, u64_to_versionstamp, Versionstamp}; -use futures::lock::Mutex; use rocksdb::{ DBCompactionStyle, DBCompressionType, LogLevel, OptimisticTransactionDB, OptimisticTransactionOptions, Options, ReadOptions, WriteOptions, }; +use std::fmt::Debug; use std::ops::Range; use std::pin::Pin; use std::sync::Arc; @@ -32,13 +30,13 @@ pub struct Transaction { /// Should we check unhandled transactions? check: Check, /// The underlying datastore transaction - inner: Arc>>>, + inner: Option>, /// The read options containing the Snapshot ro: ReadOptions, // The above, supposedly 'static transaction // actually points here, so we need to ensure // the memory is kept alive. This pointer must - // be declared last, so that it is dropped last + // be declared last, so that it is dropped last. _db: Pin>, } @@ -154,24 +152,31 @@ impl Datastore { done: false, write, check, - inner: Arc::new(Mutex::new(Some(inner))), + inner: Some(inner), ro, _db: self.db.clone(), }) } } -impl Transaction { +impl super::api::Transaction for Transaction { /// Behaviour if unclosed - pub(crate) fn check_level(&mut self, check: Check) { + fn check_level(&mut self, check: Check) { self.check = check; } + /// Check if closed - pub(crate) fn closed(&self) -> bool { + fn closed(&self) -> bool { self.done } + + /// Check if writeable + fn writeable(&self) -> bool { + self.write + } + /// Cancel a transaction - pub(crate) async fn cancel(&mut self) -> Result<(), Error> { + async fn cancel(&mut self) -> Result<(), Error> { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); @@ -179,15 +184,16 @@ impl Transaction { // Mark this transaction as done self.done = true; // Cancel this transaction - match self.inner.lock().await.take() { + match self.inner.as_ref() { Some(inner) => inner.rollback()?, None => unreachable!(), }; // Continue Ok(()) } + /// Commit a transaction - pub(crate) async fn commit(&mut self) -> Result<(), Error> { + async fn commit(&mut self) -> Result<(), Error> { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); @@ -198,110 +204,50 @@ impl Transaction { } // Mark this transaction as done self.done = true; - // Cancel this transaction - match self.inner.lock().await.take() { + // Commit this transaction + match self.inner.take() { Some(inner) => inner.commit()?, None => unreachable!(), }; // Continue Ok(()) } + /// Check if a key exists - pub(crate) async fn exi(&mut self, key: K) -> Result + async fn exists(&mut self, key: K) -> Result where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); } // Check the key - let res = - self.inner.lock().await.as_ref().unwrap().get_opt(key.into(), &self.ro)?.is_some(); + let res = self.inner.as_ref().unwrap().get_opt(key.into(), &self.ro)?.is_some(); // Return result Ok(res) } + /// Fetch a key from the database - pub(crate) async fn get(&mut self, key: K) -> Result, Error> + async fn get(&mut self, key: K) -> Result, Error> where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); } // Get the key - let res = self.inner.lock().await.as_ref().unwrap().get_opt(key.into(), &self.ro)?; + let res = self.inner.as_ref().unwrap().get_opt(key.into(), &self.ro)?; // Return result Ok(res) } - /// Obtain a new change timestamp for a key - /// which is replaced with the current timestamp when the transaction is committed. - /// NOTE: This should be called when composing the change feed entries for this transaction, - /// which should be done immediately before the transaction commit. - /// That is to keep other transactions commit delay(pessimistic) or conflict(optimistic) as less as possible. - #[allow(unused)] - pub(crate) async fn get_timestamp(&mut self, key: K) -> Result - where - K: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Write the timestamp to the "last-write-timestamp" key - // to ensure that no other transactions can commit with older timestamps. - let k: Key = key.into(); - let prev = self.inner.lock().await.as_ref().unwrap().get_opt(k.clone(), &self.ro)?; - let ver = match prev { - Some(prev) => { - let slice = prev.as_slice(); - let res: Result<[u8; 10], Error> = match slice.try_into() { - Ok(ba) => Ok(ba), - Err(e) => Err(Error::Ds(e.to_string())), - }; - let array = res?; - let prev = try_to_u64_be(array)?; - prev + 1 - } - None => 1, - }; - let verbytes = u64_to_versionstamp(ver); - - self.inner.lock().await.as_ref().unwrap().put(k, verbytes)?; - // Return the uint64 representation of the timestamp as the result - Ok(verbytes) - } - /// Obtain a new key that is suffixed with the change timestamp - pub(crate) async fn get_versionstamped_key( - &mut self, - ts_key: K, - prefix: K, - suffix: K, - ) -> Result, Error> - where - K: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Check to see if transaction is writable - if !self.write { - return Err(Error::TxReadonly); - } - let ts = self.get_timestamp(ts_key).await?; - let mut k: Vec = prefix.into(); - k.append(&mut ts.to_vec()); - k.append(&mut suffix.into()); - Ok(k) - } /// Insert or update a key in the database - pub(crate) async fn set(&mut self, key: K, val: V) -> Result<(), Error> + async fn set(&mut self, key: K, val: V) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -312,20 +258,16 @@ impl Transaction { return Err(Error::TxReadonly); } // Set the key - self.inner.lock().await.as_ref().unwrap().put(key.into(), val.into())?; + self.inner.as_ref().unwrap().put(key.into(), val.into())?; // Return result Ok(()) } + /// Insert a key if it doesn't exist in the database - pub(crate) async fn put( - &mut self, - category: KeyCategory, - key: K, - val: V, - ) -> Result<(), Error> + async fn put(&mut self, key: K, val: V) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -336,24 +278,24 @@ impl Transaction { return Err(Error::TxReadonly); } // Get the transaction - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); + let inner = self.inner.as_ref().unwrap(); // Get the arguments let key = key.into(); let val = val.into(); // Set the key if empty match inner.get_opt(&key, &self.ro)? { None => inner.put(key, val)?, - _ => return Err(Error::TxKeyAlreadyExistsCategory(category)), + _ => return Err(Error::TxKeyAlreadyExists), }; // Return result Ok(()) } - /// Insert a key if it doesn't exist in the database - pub(crate) async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> + + /// Insert a key if the current value matches a condition + async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -364,8 +306,7 @@ impl Transaction { return Err(Error::TxReadonly); } // Get the transaction - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); + let inner = self.inner.as_ref().unwrap(); // Get the arguments let key = key.into(); let val = val.into(); @@ -379,10 +320,11 @@ impl Transaction { // Return result Ok(()) } + /// Delete a key - pub(crate) async fn del(&mut self, key: K) -> Result<(), Error> + async fn del(&mut self, key: K) -> Result<(), Error> where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -393,15 +335,16 @@ impl Transaction { return Err(Error::TxReadonly); } // Remove the key - self.inner.lock().await.as_ref().unwrap().delete(key.into())?; + self.inner.as_ref().unwrap().delete(key.into())?; // Return result Ok(()) } - /// Delete a key - pub(crate) async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> + + /// Delete a key if the current value matches a condition + async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -412,8 +355,7 @@ impl Transaction { return Err(Error::TxReadonly); } // Get the transaction - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); + let inner = self.inner.as_ref().unwrap(); // Get the arguments let key = key.into(); let chk = chk.map(Into::into); @@ -426,22 +368,68 @@ impl Transaction { // Return result Ok(()) } + /// Retrieve a range of keys from the databases - pub(crate) async fn scan( - &mut self, - rng: Range, - limit: u32, - ) -> Result, Error> + async fn keys(&mut self, rng: Range, limit: u32) -> Result, Error> where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); } // Get the transaction - let inner = self.inner.lock().await; - let inner = inner.as_ref().unwrap(); + let inner = self.inner.as_ref().unwrap(); + // Convert the range to bytes + let rng: Range = Range { + start: rng.start.into(), + end: rng.end.into(), + }; + // Create result set + let mut res = vec![]; + // Set the key range + let beg = rng.start.as_slice(); + let end = rng.end.as_slice(); + // Set the ReadOptions with the snapshot + let mut ro = ReadOptions::default(); + ro.set_snapshot(&inner.snapshot()); + // Create the iterator + let mut iter = inner.raw_iterator_opt(ro); + // Seek to the start key + iter.seek(&rng.start); + // Scan the keys in the iterator + while iter.valid() { + // Check the scan limit + if res.len() < limit as usize { + // Get the key and value + let k = iter.key(); + // Check the key and value + if let Some(k) = k { + if k >= beg && k < end { + res.push(k.to_vec()); + iter.next(); + continue; + } + } + } + // Exit + break; + } + // Return result + Ok(res) + } + + /// Retrieve a range of keys from the databases + async fn scan(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Get the transaction + let inner = self.inner.as_ref().unwrap(); // Convert the range to bytes let rng: Range = Range { start: rng.start.into(), diff --git a/core/src/kvs/scanner.rs b/core/src/kvs/scanner.rs new file mode 100644 index 00000000..6d6b4f50 --- /dev/null +++ b/core/src/kvs/scanner.rs @@ -0,0 +1,110 @@ +use super::tx::Transaction; +use super::Key; +use super::Val; +use crate::cnf::MAX_STREAM_BATCH_SIZE; +use crate::err::Error; +use futures::stream::Stream; +use futures::Future; +use futures::FutureExt; +use std::collections::VecDeque; +use std::ops::Range; +use std::pin::Pin; +use std::task::{Context, Poll}; + +type Output = Result, Error>; + +pub(super) struct Scanner<'a> { + /// The store which started this range scan + store: &'a Transaction, + /// The number of keys to fetch at once + batch: u32, + // The key range for this range scan + range: Range, + // The results from the last range scan + results: VecDeque<(Key, Val)>, + /// The currently running future to be polled + future: Option + 'a>>>, + /// Whether this stream should try to fetch more + exhausted: bool, +} + +impl<'a> Scanner<'a> { + pub fn new(store: &'a Transaction, batch: u32, range: Range) -> Self { + Scanner { + store, + batch, + range, + future: None, + results: VecDeque::new(), + exhausted: false, + } + } +} + +impl<'a> Stream for Scanner<'a> { + type Item = Result<(Key, Val), Error>; + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + // If we have results, return the first one + if let Some(v) = self.results.pop_front() { + return Poll::Ready(Some(Ok(v))); + } + // If we won't fetch more results then exit + if self.exhausted { + return Poll::Ready(None); + } + // Check if there is no pending future task + if self.future.is_none() { + // Set the max number of results to fetch + let num = std::cmp::min(*MAX_STREAM_BATCH_SIZE, self.batch); + // Clone the range to use when scanning + let range = self.range.clone(); + // Prepare a future to scan for results + self.future = Some(Box::pin(self.store.scan(range, num))); + } + // Try to resolve the future + match self.future.as_mut().unwrap().poll_unpin(cx) { + // The future has now completed fully + Poll::Ready(result) => { + // Drop the completed asynchronous future + self.future = None; + // Check the result of the finished future + match result { + // The range was fetched successfully + Ok(v) => match v.is_empty() { + // There are no more results to stream + true => { + // Mark this stream as complete + Poll::Ready(None) + } + // There are results which need streaming + false => { + // We fetched the last elements in the range + if v.len() < self.batch as usize { + self.exhausted = true; + } + // Get the last element of the results + let last = v.last().unwrap(); + // Start the next scan from the last result + self.range.start.clone_from(&last.0); + // Ensure we don't see the last result again + self.range.start.push(0xff); + // Store the fetched range results + self.results.extend(v); + // Remove the first result to return + let item = self.results.pop_front().unwrap(); + // Return the first result + Poll::Ready(Some(Ok(item))) + } + }, + // Return the received error + Err(error) => Poll::Ready(Some(Err(error))), + } + } + // The future has not yet completed + Poll::Pending => Poll::Pending, + } + } +} diff --git a/core/src/kvs/stash.rs b/core/src/kvs/stash.rs new file mode 100644 index 00000000..b5772682 --- /dev/null +++ b/core/src/kvs/stash.rs @@ -0,0 +1,17 @@ +use crate::idg::u32::U32; +use crate::kvs::kv::Key; +use std::collections::HashMap; + +#[derive(Default)] +pub(super) struct Stash(pub HashMap); + +impl Stash { + /// Set a key in the cache + pub fn set(&mut self, key: Key, val: U32) { + self.0.insert(key, val); + } + /// Get a key from the cache + pub fn get(&mut self, key: &Key) -> Option { + self.0.get(key).cloned() + } +} diff --git a/core/src/kvs/surrealkv/mod.rs b/core/src/kvs/surrealkv/mod.rs index 436bf4cc..fbd6f4f4 100644 --- a/core/src/kvs/surrealkv/mod.rs +++ b/core/src/kvs/surrealkv/mod.rs @@ -1,12 +1,10 @@ #![cfg(feature = "kv-surrealkv")] use crate::err::Error; -use crate::key::error::KeyCategory; use crate::kvs::Check; use crate::kvs::Key; use crate::kvs::Val; -use crate::vs::{try_to_u64_be, u64_to_versionstamp, Versionstamp}; - +use std::fmt::Debug; use std::ops::Range; use surrealkv::Options; use surrealkv::Store; @@ -92,307 +90,241 @@ impl Datastore { } } -impl Transaction { - /// Sets the behavior of the transaction if it's not closed. - pub(crate) fn set_check_level(&mut self, check: Check) { +impl super::api::Transaction for Transaction { + /// Behaviour if unclosed + fn check_level(&mut self, check: Check) { self.check = check; } - /// Checks if the transaction is closed. - pub(crate) fn is_closed(&self) -> bool { + /// Check if closed + fn closed(&self) -> bool { self.done } + /// Check if writeable + fn writeable(&self) -> bool { + self.write + } + /// Cancels the transaction. - pub(crate) async fn cancel(&mut self) -> Result<(), Error> { - // If the transaction is already closed, return an error. - if self.is_closed() { + async fn cancel(&mut self) -> Result<(), Error> { + // Check to see if transaction is closed + if self.done { return Err(Error::TxFinished); } - // Mark the transaction as done. self.done = true; - // Rollback the transaction. self.inner.rollback(); - + // Continue Ok(()) } /// Commits the transaction. - pub(crate) async fn commit(&mut self) -> Result<(), Error> { - // If the transaction is already closed or is read-only, return an error. - if self.is_closed() { + async fn commit(&mut self) -> Result<(), Error> { + // Check to see if transaction is closed + if self.done { return Err(Error::TxFinished); - } else if !self.write { + } + // Check to see if transaction is writable + if !self.write { return Err(Error::TxReadonly); } - // Mark the transaction as done. self.done = true; - // Commit the transaction. - self.inner.commit().await.map_err(Into::into) + self.inner.commit().await?; + // Continue + Ok(()) } /// Checks if a key exists in the database. - pub(crate) async fn exists(&mut self, key: K) -> Result + async fn exists(&mut self, key: K) -> Result where - K: Into, + K: Into + Debug, { - // If the transaction is already closed, return an error. - if self.is_closed() { + // Check to see if transaction is closed + if self.done { return Err(Error::TxFinished); } - - // Check if the key exists in the database. - self.inner - .get(key.into().as_slice()) - .map(|opt| opt.is_some()) - .map_err(|e| Error::Tx(format!("Unable to get kv from SurrealKV: {}", e))) - } - - /// Fetches a value from the database by key. - pub(crate) async fn get(&mut self, key: K) -> Result, Error> - where - K: Into, - { - // If the transaction is already closed, return an error. - if self.is_closed() { - return Err(Error::TxFinished); - } - - // Fetch the value from the database. - let res = self.inner.get(key.into().as_slice())?; - + // Check the key + let res = self.inner.get(&key.into())?.is_some(); + // Return result Ok(res) } - /// Obtains a new change timestamp for a key. - /// This timestamp is replaced with the current timestamp when the transaction is committed. - /// This method should be called when composing the change feed entries for this transaction, - /// which should be done immediately before the transaction commit. - /// This is to minimize the delay or conflict of other transactions. - #[allow(unused)] - pub(crate) async fn get_timestamp(&mut self, key: K) -> Result + /// Fetch a key from the database + async fn get(&mut self, key: K) -> Result, Error> where - K: Into, + K: Into + Debug, { - // If the transaction is already closed, return an error. - if self.is_closed() { + // Check to see if transaction is closed + if self.done { return Err(Error::TxFinished); } + // Fetch the value from the database. + let res = self.inner.get(&key.into())?; + // Return result + Ok(res) + } - // Convert the key into a vector. - let key_vec = key.into(); - let k = key_vec.as_slice(); + /// Insert or update a key in the database + async fn set(&mut self, key: K, val: V) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Check to see if transaction is writable + if !self.write { + return Err(Error::TxReadonly); + } + // Set the key + self.inner.set(&key.into(), &val.into())?; + // Return result + Ok(()) + } - // Get the previous value of the key. - let prev = self.inner.get(k)?; - - // Calculate the new version. - let ver = match prev { - Some(prev) => { - let slice = prev.as_slice(); - let res: Result<[u8; 10], Error> = match slice.try_into() { - Ok(ba) => Ok(ba), - Err(e) => Err(Error::Ds(e.to_string())), - }; - let array = res?; - let prev: u64 = try_to_u64_be(array)?; - prev + 1 - } - None => 1, + /// Insert a key if it doesn't exist in the database + async fn put(&mut self, key: K, val: V) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Check to see if transaction is writable + if !self.write { + return Err(Error::TxReadonly); + } + // Get the arguments + let key = key.into(); + let val = val.into(); + // Set the key if empty + match self.inner.get(&key)? { + None => self.inner.set(&key, &val)?, + _ => return Err(Error::TxKeyAlreadyExists), }; - - // Convert the version to a versionstamp. - let verbytes = u64_to_versionstamp(ver); - - // Set the new versionstamp. - self.inner.set(k, verbytes.as_slice())?; - - // Return the versionstamp. - Ok(verbytes) + // Return result + Ok(()) } - /// Obtains a new key that is suffixed with the change timestamp. - pub(crate) async fn get_versionstamped_key( - &mut self, - ts_key: K, - prefix: K, - suffix: K, - ) -> Result, Error> + /// Insert a key if the current value matches a condition + async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> where - K: Into, + K: Into + Debug, + V: Into + Debug, { - // If the transaction is already closed or is read-only, return an error. - if self.is_closed() { - return Err(Error::TxFinished); - } else if !self.write { - return Err(Error::TxReadonly); - } - - // Get the timestamp. - let ts = self.get_timestamp(ts_key).await?; - - // Create the new key. - let mut k: Vec = prefix.into(); - k.append(&mut ts.to_vec()); - k.append(&mut suffix.into()); - - // Return the new key. - Ok(k) - } - - /// Inserts or updates a key in the database. - pub(crate) async fn set(&mut self, key: K, val: V) -> Result<(), Error> - where - K: Into, - V: Into, - { - // If the transaction is already closed or is read-only, return an error. - if self.is_closed() { - return Err(Error::TxFinished); - } else if !self.write { - return Err(Error::TxReadonly); - } - - // Set the key. - self.inner.set(key.into().as_slice(), &val.into()).map_err(Into::into) - } - - /// Inserts a key-value pair into the database if the key doesn't already exist. - pub(crate) async fn put( - &mut self, - category: KeyCategory, - key: K, - val: V, - ) -> Result<(), Error> - where - K: Into, - V: Into, - { - // Ensure the transaction is open and writable. + // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); } + // Check to see if transaction is writable if !self.write { return Err(Error::TxReadonly); } - - // Check if the key already exists. - let key: Vec = key.into(); - if self.exists(key.clone().as_slice()).await? { - return Err(Error::TxKeyAlreadyExistsCategory(category)); - } - - // Insert the key-value pair. - self.inner.set(&key, &val.into()).map_err(Into::into) - } - - /// Inserts a key-value pair into the database if the key doesn't already exist, - /// or if the existing value matches the provided check value. - pub(crate) async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> - where - K: Into, - V: Into, - { - // Ensure the transaction is open and writable. - if self.done { - return Err(Error::TxFinished); - } - if !self.write { - return Err(Error::TxReadonly); - } - - // Convert the check value. + // Get the arguments + let key = key.into(); + let val = val.into(); let chk = chk.map(Into::into); - - // Insert the key-value pair if the key doesn't exist or the existing value matches the check value. - let key_slice = key.into(); - let val_vec = val.into(); - let res = self.inner.get(key_slice.as_slice())?; - - match (res, chk) { - (Some(v), Some(w)) if v == w => self.inner.set(key_slice.as_slice(), &val_vec)?, - (None, None) => self.inner.set(key_slice.as_slice(), &val_vec)?, + // Set the key if valid + match (self.inner.get(&key)?, chk) { + (Some(v), Some(w)) if v == w => self.inner.set(&key, &val)?, + (None, None) => self.inner.set(&key, &val)?, _ => return Err(Error::TxConditionNotMet), }; - + // Return result Ok(()) } /// Deletes a key from the database. - pub(crate) async fn del(&mut self, key: K) -> Result<(), Error> + async fn del(&mut self, key: K) -> Result<(), Error> where - K: Into, + K: Into + Debug, { - // Ensure the transaction is open and writable. + // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); } + // Check to see if transaction is writable if !self.write { return Err(Error::TxReadonly); } - - // Delete the key. - let key_slice = key.into(); - self.inner.delete(key_slice.as_slice()).map_err(Into::into) + // Remove the key + self.inner.delete(&key.into())?; + // Return result + Ok(()) } - /// Deletes a key from the database if the existing value matches the provided check value. - pub(crate) async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> + /// Delete a key if the current value matches a condition + async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { - // Ensure the transaction is open and writable. + // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); } + // Check to see if transaction is writable if !self.write { return Err(Error::TxReadonly); } - - // Convert the check value. - let chk: Option = chk.map(Into::into); - - // Delete the key if the existing value matches the check value. - let key_slice = key.into(); - let res = self.inner.get(key_slice.as_slice())?; - - match (res, chk) { - (Some(v), Some(w)) if v == w => self.inner.delete(key_slice.as_slice())?, - (None, None) => self.inner.delete(key_slice.as_slice())?, + // Get the arguments + let key = key.into(); + let chk = chk.map(Into::into); + // Delete the key if valid + match (self.inner.get(&key)?, chk) { + (Some(v), Some(w)) if v == w => self.inner.delete(&key)?, + (None, None) => self.inner.delete(&key)?, _ => return Err(Error::TxConditionNotMet), }; - + // Return result Ok(()) } /// Retrieves a range of key-value pairs from the database. - pub(crate) async fn scan( - &mut self, - rng: Range, - limit: u32, - ) -> Result, Error> + async fn keys(&mut self, rng: Range, limit: u32) -> Result, Error> where - K: Into, + K: Into + Debug, { - // Ensure the transaction is open. + // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); } + // Set the key range + let beg = rng.start.into(); + let end = rng.end.into(); + // Retrieve the scan range + let res = self.inner.scan(beg.as_slice()..end.as_slice(), Some(limit as usize))?; + // Convert the keys and values + let res = res.into_iter().map(|kv| Key::from(kv.0)).collect(); + // Return result + Ok(res) + } - // Convert the range to byte slices. - let start_range = rng.start.into(); - let end_range = rng.end.into(); - - // Retrieve the key-value pairs. - let res = - self.inner.scan(start_range.as_slice()..end_range.as_slice(), Some(limit as usize))?; + /// Retrieves a range of key-value pairs from the database. + async fn scan(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Set the key range + let beg = rng.start.into(); + let end = rng.end.into(); + // Retrieve the scan range + let res = self.inner.scan(beg.as_slice()..end.as_slice(), Some(limit as usize))?; + // Convert the keys and values let res = res.into_iter().map(|kv| (Key::from(kv.0), kv.1)).collect(); - + // Return result Ok(res) } } diff --git a/core/src/kvs/tests/cluster_init.rs b/core/src/kvs/tests/cluster_init.rs deleted file mode 100644 index ae916263..00000000 --- a/core/src/kvs/tests/cluster_init.rs +++ /dev/null @@ -1,397 +0,0 @@ -use futures::lock::Mutex; -use std::collections::BTreeSet; -use std::sync::Arc; - -use crate::ctx::context; - -use crate::dbs::{Options, Session}; -use crate::iam::{Auth, Role}; -use crate::kvs::lq_structs::{LqValue, UnreachableLqType}; -use crate::kvs::{LockType::*, TransactionType::*}; -use crate::sql; -use crate::sql::statements::LiveStatement; -use crate::sql::Value::Table; -use crate::sql::{Fields, Value}; -use test_log::test; -use uuid; - -#[tokio::test] -#[serial] -async fn expired_nodes_are_garbage_collected() { - let old_node = Uuid::parse_str("2ea6d33f-4c0a-417a-ab04-1fa9869f9a65").unwrap(); - let new_node = Uuid::parse_str("fbfb3487-71fe-4749-b3aa-1cc0a5380cdd").unwrap(); - let old_time = Timestamp { - value: 123000, - }; - let fake_clock = FakeClock::new(old_time); - let fake_clock = Arc::new(SizedClock::Fake(fake_clock)); - let mut test = init(new_node, fake_clock.clone()).await.unwrap(); - - // Set up the first node at an early timestamp - test.db = test.db.with_node_id(sql::Uuid::from(old_node)); - test.db.bootstrap().await.unwrap(); - - // Throw in some stray nodes and heartbeats - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let corrupt_node_1 = Uuid::parse_str("5a65fe57-7ac3-4b13-a31f-6376d3b484c8").unwrap(); - let corrupt_node_2 = Uuid::parse_str("eb94a0b4-70ea-482f-a7dd-dc02132be846").unwrap(); - tx.set_nd(corrupt_node_1).await.unwrap(); - tx.set_hb(old_time, corrupt_node_2).await.unwrap(); - tx.commit().await.unwrap(); - - // Set up second node at a later timestamp - let new_time = Timestamp { - value: 567000, - }; - set_fake_clock(fake_clock.clone(), new_time).await; - test.db = test.db.with_node_id(sql::Uuid::from(new_node)); - test.db.bootstrap().await.unwrap(); - - // Now scan the heartbeats to validate there is only one node left - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let scanned = tx.scan_hb(&new_time, 100).await.unwrap(); - assert_eq!(scanned.len(), 1); - for hb in scanned.iter() { - assert_eq!(&hb.nd, &new_node); - } - - // And scan the nodes to verify its just the latest also - let scanned = tx.scan_nd(100).await.unwrap(); - assert_eq!(scanned.len(), 1); - for cl in scanned.iter() { - assert_eq!(&cl.name, &new_node.to_string()); - } - - tx.commit().await.unwrap(); -} - -#[tokio::test] -#[serial] -async fn expired_nodes_get_live_queries_archived() { - let old_node = Uuid::parse_str("c756ed5a-3b19-4303-bce2-5e0edf72e66b").unwrap(); - let old_time = Timestamp { - value: 123000, - }; - let fake_clock = FakeClock::new(old_time); - let fake_clock = Arc::new(SizedClock::Fake(fake_clock)); - let mut test = init(old_node, fake_clock.clone()).await.unwrap(); - - // Set up the first node at an early timestamp - test.db = test.db.with_node_id(sql::Uuid::from(old_node)).with_notifications(); - test.db.bootstrap().await.unwrap(); - - // Set up live query - let ses = Session::owner() - .with_ns(test.test_str("testns").as_str()) - .with_db(test.test_str("testdb").as_str()); - let table = "my_table"; - let lq = LiveStatement { - id: sql::Uuid(Uuid::parse_str("da60fa34-902d-4110-b810-7d435267a9f8").unwrap()), - node: crate::sql::uuid::Uuid::from(old_node), - expr: Fields(vec![sql::Field::All], false), - what: Table(sql::Table::from(table)), - cond: None, - fetch: None, - archived: Some(crate::sql::uuid::Uuid::from(old_node)), - session: Some(Value::None), - auth: Some(Auth::for_root(Role::Owner)), - }; - let ctx = context::Context::background(); - let (sender, _) = channel::unbounded(); - let opt = Options::new() - .with_ns(ses.ns()) - .with_db(ses.db()) - .with_auth(Arc::new(Default::default())) - .with_live(true) - .with_id(old_node); - let opt = Options::new_with_sender(&opt, sender); - let tx = Arc::new(Mutex::new(test.db.transaction(Write, Optimistic).await.unwrap())); - let ctx = ctx.set_transaction(tx); - let res = { - let mut stack = reblessive::tree::TreeStack::new(); - stack.enter(|stk| lq.compute(stk, &ctx, &opt, None)).finish().await.unwrap() - }; - match res { - Value::Uuid(_) => {} - _ => { - panic!("Not a uuid: {:?}", res); - } - } - ctx.tx_lock().await.commit().await.unwrap(); - - // Set up second node at a later timestamp - let new_node = Uuid::parse_str("04da7d4c-0086-4358-8318-49f0bb168fa7").unwrap(); - let new_time = Timestamp { - value: 456000, - }; - set_fake_clock(fake_clock.clone(), new_time).await; - test.db = test.db.with_node_id(sql::Uuid::from(new_node)); - test.db.bootstrap().await.unwrap(); - - // Now validate lq was removed - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let scanned = tx - .all_tb_lives(ses.ns().unwrap().as_ref(), ses.db().unwrap().as_ref(), table) - .await - .unwrap(); - assert_eq!(scanned.len(), 0); - tx.commit().await.unwrap(); -} - -#[test(tokio::test)] -#[serial] -async fn single_live_queries_are_garbage_collected() { - // Test parameters - let mut stack = reblessive::tree::TreeStack::new(); - let ctx = context::Context::background(); - let node_id = Uuid::parse_str("b1a08614-a826-4581-938d-bea17f00e253").unwrap(); - let time = Timestamp { - value: 123000, - }; - let fake_clock = FakeClock::new(time); - let fake_clock = Arc::new(SizedClock::Fake(fake_clock)); - let mut test = init(node_id, fake_clock).await.unwrap(); - let namespace = "test_namespace"; - let database = "test_db"; - let table = "test_table"; - let options = Options::default() - .with_required( - node_id, - Some(Arc::from(namespace)), - Some(Arc::from(database)), - Arc::new(Auth::for_root(Role::Owner)), - ) - .with_live(true); - - // We do standard cluster init - trace!("Bootstrapping node {}", node_id); - test.db = test.db.with_node_id(crate::sql::uuid::Uuid::from(node_id)); - test.db.bootstrap().await.unwrap(); - - // We set up 2 live queries, one of which we want to garbage collect - trace!("Setting up live queries"); - let tx = Arc::new(Mutex::new(test.db.transaction(Write, Optimistic).await.unwrap())); - let ctx = ctx.set_transaction(tx); - let live_query_to_delete = Uuid::parse_str("8aed07c4-9683-480e-b1e4-f0db8b331530").unwrap(); - let live_st = LiveStatement { - id: sql::Uuid(live_query_to_delete), - node: sql::uuid::Uuid::from(node_id), - expr: Fields(vec![sql::Field::All], false), - what: Table(sql::Table::from(table)), - cond: None, - fetch: None, - archived: None, - session: Some(Value::None), - auth: Some(Auth::for_root(Role::Owner)), - }; - stack - .enter(|stk| live_st.compute(stk, &ctx, &options, None)) - .finish() - .await - .map_err(|e| format!("Error computing live statement: {:?} {:?}", live_st, e)) - .unwrap(); - let live_query_to_keep = Uuid::parse_str("adea762a-17db-4810-a4a2-c54babfdaf23").unwrap(); - let live_st = LiveStatement { - id: sql::Uuid(live_query_to_keep), - node: sql::Uuid::from(node_id), - expr: Fields(vec![sql::Field::All], false), - what: Table(sql::Table::from(table)), - cond: None, - fetch: None, - archived: None, - session: Some(Value::None), - auth: Some(Auth::for_root(Role::Owner)), - }; - stack - .enter(|stk| live_st.compute(stk, &ctx, &options, None)) - .finish() - .await - .map_err(|e| format!("Error computing live statement: {:?} {:?}", live_st, e)) - .unwrap(); - ctx.tx_lock().await.commit().await.unwrap(); - - // Subject: Perform the action we are testing - trace!("Garbage collecting dead sessions"); - test.db.garbage_collect_dead_session(&[live_query_to_delete]).await.unwrap(); - - // Validate - trace!("Validating live queries"); - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let scanned = tx.all_tb_lives(namespace, database, table).await.unwrap(); - assert_eq!(scanned.len(), 1, "The scanned values are {:?}", scanned); - assert_eq!(&scanned[0].id.0, &live_query_to_keep); - let scanned = tx.all_lq(&node_id).await.unwrap(); - assert_eq!(scanned.len(), 1); - assert_eq!(&scanned[0].lq, &sql::Uuid::from(live_query_to_keep)); - tx.commit().await.unwrap(); -} - -#[test(tokio::test)] -#[serial] -async fn bootstrap_does_not_error_on_missing_live_queries() { - // Test parameters - let mut stack = reblessive::tree::TreeStack::new(); - let ctx = context::Context::background(); - let old_node_id = Uuid::parse_str("5f644f02-7c1a-4f8b-babd-bd9e92c1836a").unwrap(); - let t1 = Timestamp { - value: 123_000, - }; - let t2 = Timestamp { - value: 456_000, - }; - let fake_clock = FakeClock::new(t1); - let fake_clock = Arc::new(SizedClock::Fake(fake_clock)); - let test = init(old_node_id, fake_clock.clone()).await.unwrap(); - let namespace = "test_namespace_0A8BD08BE4F2457BB9F145557EF19605"; - let database_owned = format!("test_db_{:?}", test.kvs); - let database = database_owned.as_str(); - let table = "test_table"; - let options = Options::default() - .with_required( - old_node_id, - Some(Arc::from(namespace)), - Some(Arc::from(database)), - Arc::new(Auth::for_root(Role::Owner)), - ) - .with_live(true); - - // We do standard cluster init - trace!("Bootstrapping node {}", old_node_id); - test.db.bootstrap().await.unwrap(); - - // We set up 2 live queries, one of which we want to garbage collect - trace!("Setting up live queries"); - let tx = Arc::new(Mutex::new(test.db.transaction(Write, Optimistic).await.unwrap())); - let ctx = ctx.set_transaction(tx); - let live_query_to_corrupt = Uuid::parse_str("d4cee7ce-5c78-4a30-9fa9-2444d58029f6").unwrap(); - let live_st = LiveStatement { - id: sql::Uuid(live_query_to_corrupt), - node: sql::uuid::Uuid::from(old_node_id), - expr: Fields(vec![sql::Field::All], false), - what: Table(sql::Table::from(table)), - cond: None, - fetch: None, - archived: None, - session: Some(Value::None), - auth: Some(Auth::for_root(Role::Owner)), - }; - stack - .enter(|stk| live_st.compute(stk, &ctx, &options, None)) - .finish() - .await - .map_err(|e| format!("Error computing live statement: {:?} {:?}", live_st, e)) - .unwrap(); - - // Now we corrupt the live query entry by leaving the node entry in but removing the table entry - let key = crate::key::table::lq::new(namespace, database, table, live_query_to_corrupt); - ctx.tx_lock().await.del(key).await.unwrap(); - ctx.tx_lock().await.commit().await.unwrap(); - - // Subject: Perform the action we are testing - trace!("Bootstrapping"); - let new_node_id = Uuid::parse_str("53f7355d-5be1-4a94-9803-5192b59c5244").unwrap(); - - // There should not be an error - set_fake_clock(fake_clock.clone(), t2).await; - let second_node = test.db.with_node_id(crate::sql::uuid::Uuid::from(new_node_id)); - match second_node.bootstrap().await { - Ok(_) => { - // The behaviour has now changed to remove all broken entries without raising errors - } - Err(e) => { - panic!("Bootstrapping should not generate errors: {:?}", e) - } - } - - // Verify node live query was deleted - let mut tx = second_node.transaction(Write, Optimistic).await.unwrap(); - let found = tx - .scan_ndlq(&old_node_id, 100) - .await - .map_err(|e| format!("Error scanning ndlq: {:?}", e)) - .unwrap(); - assert_eq!(0, found.len(), "Found: {:?}", found); - let found = tx - .scan_ndlq(&new_node_id, 100) - .await - .map_err(|e| format!("Error scanning ndlq: {:?}", e)) - .unwrap(); - assert_eq!(0, found.len(), "Found: {:?}", found); - - // Verify table live query does not exist - let found = tx - .scan_tblq(namespace, database, table, 100) - .await - .map_err(|e| format!("Error scanning tblq: {:?}", e)) - .unwrap(); - assert_eq!(0, found.len(), "Found: {:?}", found); - tx.cancel().await.unwrap(); -} - -#[test(tokio::test)] -async fn test_asymmetric_difference() { - let nd1 = Uuid::parse_str("7da0b3bb-1811-4c0e-8d8d-5fc08b8200a5").unwrap(); - let nd2 = Uuid::parse_str("8fd394df-7f96-4395-9c9a-3abf1e2386ea").unwrap(); - let nd3 = Uuid::parse_str("aa53cb74-1d6b-44df-b063-c495e240ae9e").unwrap(); - let ns1 = "namespace_one"; - let ns2 = "namespace_two"; - let ns3 = "namespace_three"; - let db1 = "database_one"; - let db2 = "database_two"; - let db3 = "database_three"; - let tb1 = "table_one"; - let tb2 = "table_two"; - let tb3 = "table_three"; - let lq1 = Uuid::parse_str("95f0e060-d301-4dfc-9d35-f150e802873b").unwrap(); - let lq2 = Uuid::parse_str("acf60c04-5819-4a23-9874-aeb0ae1be425").unwrap(); - let lq3 = Uuid::parse_str("5d591ae7-db79-4e4f-aa02-a83a4a25ce3f").unwrap(); - let left_set = BTreeSet::from_iter(vec![ - UnreachableLqType::Nd(LqValue { - nd: nd1.into(), - ns: ns1.to_string(), - db: db1.to_string(), - tb: tb1.to_string(), - lq: lq1.into(), - }), - UnreachableLqType::Nd(LqValue { - nd: nd2.into(), - ns: ns2.to_string(), - db: db2.to_string(), - tb: tb2.to_string(), - lq: lq2.into(), - }), - ]); - - let right_set = BTreeSet::from_iter(vec![ - UnreachableLqType::Tb(LqValue { - nd: nd2.into(), - ns: ns2.to_string(), - db: db2.to_string(), - tb: tb2.to_string(), - lq: lq2.into(), - }), - UnreachableLqType::Tb(LqValue { - nd: nd3.into(), - ns: ns3.to_string(), - db: db3.to_string(), - tb: tb3.to_string(), - lq: lq3.into(), - }), - ]); - - let diff = left_set.symmetric_difference(&right_set); - // TODO but also poorman's count - let mut count = 0; - for _ in diff { - count += 1; - } - assert_ne!(count, 0); -} - -async fn set_fake_clock(fake_clock: Arc, time: Timestamp) { - let clock = match &*fake_clock { - SizedClock::Fake(f) => f, - _ => panic!("Clock is not fake"), - }; - clock.set(time).await; -} diff --git a/core/src/kvs/tests/hb.rs b/core/src/kvs/tests/hb.rs deleted file mode 100644 index e12691cb..00000000 --- a/core/src/kvs/tests/hb.rs +++ /dev/null @@ -1,35 +0,0 @@ -#[tokio::test] -#[serial] -async fn write_scan_hb() { - let nd = uuid::Uuid::parse_str("e80540d4-2869-4bf3-ae27-790a538c53f3").unwrap(); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(nd, clock).await.unwrap(); - - // Add 2 nodes - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let t1 = tx.clock().await; - let t2 = Timestamp { - value: t1.value + 1, - }; - let t3 = Timestamp { - value: t2.value + 1, - }; - tx.set_hb(t1, Uuid::parse_str("6d1210a0-9224-4813-8090-ded787d51894").unwrap()).await.unwrap(); - tx.set_hb(t2, Uuid::parse_str("b80ff454-c3e7-46a9-a0b0-7b40e9a62626").unwrap()).await.unwrap(); - tx.commit().await.unwrap(); - - // Scan in batches of 1 - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let vals_lim = tx.scan_hb(&t3, 1).await.unwrap(); - tx.cancel().await.unwrap(); - - // Scan in batches of 100k - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let vals_no_lim = tx.scan_hb(&t3, 100_000).await.unwrap(); - tx.cancel().await.unwrap(); - - // Assert equal - assert_eq!(vals_lim, vals_no_lim); - assert_eq!(vals_lim.len(), 2); - assert_eq!(vals_no_lim.len(), 2); -} diff --git a/core/src/kvs/tests/helper.rs b/core/src/kvs/tests/helper.rs index fd3ce99b..54a24053 100644 --- a/core/src/kvs/tests/helper.rs +++ b/core/src/kvs/tests/helper.rs @@ -1,45 +1,12 @@ use crate::dbs::node::Timestamp; -use crate::err::Error; use crate::kvs::clock::{FakeClock, SizedClock}; - -#[non_exhaustive] -pub struct TestContext { - pub(crate) db: Datastore, - pub(crate) kvs: Kvs, - // A string identifier for this context. - // It will usually be a uuid or combination of uuid and fixed string identifier. - // It is useful for separating test setups when environments are shared. - pub(crate) context_id: String, -} - -/// TestContext is a container for an initialised test context -/// Anything stateful (such as storage layer and logging) can be tied with this -impl TestContext { - // Use this to generate strings that have the test uuid associated with it - pub fn test_str(&self, prefix: &str) -> String { - format!("{}-{}", prefix, self.context_id) - } -} - -/// Initialise logging and prepare a useable datastore -/// In the future it would be nice to handle multiple datastores -pub(crate) async fn init(node_id: Uuid, clock: Arc) -> Result { - let (db, kvs) = new_ds(node_id, clock).await; - Ok(TestContext { - db, - kvs, - context_id: node_id.to_string(), // The context does not always have to be a uuid - }) -} - -/// Scan the entire storage layer displaying keys -/// Useful to debug scans ;) -async fn _debug_scan(tx: &mut Transaction, message: &str) { - let r = tx.scan_paged(ScanPage::from(vec![0]..vec![u8::MAX]), u32::MAX).await.unwrap(); - let r = r.values; - println!("START OF RANGE SCAN - {}", message); - for (k, _v) in r.iter() { - println!("{}", crate::key::debug::sprint_key(k)); - } - println!("END OF RANGE SCAN - {}", message); -} +use crate::kvs::tests::{ClockType, Kvs}; +use crate::kvs::Datastore; +use crate::kvs::LockType; +use crate::kvs::LockType::*; +use crate::kvs::Transaction; +use crate::kvs::TransactionType; +use crate::kvs::TransactionType::*; +use serial_test::serial; +use std::sync::Arc; +use uuid::Uuid; diff --git a/core/src/kvs/tests/lq.rs b/core/src/kvs/tests/lq.rs deleted file mode 100644 index 9b6a569c..00000000 --- a/core/src/kvs/tests/lq.rs +++ /dev/null @@ -1,83 +0,0 @@ -use crate::kvs::lq_structs::{LqIndexKey, LqIndexValue, LqSelector}; -use uuid::Uuid; - -#[tokio::test] -#[serial] -async fn scan_node_lq() { - let node_id = Uuid::parse_str("63bb5c1a-b14e-4075-a7f8-680267fbe136").unwrap(); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(node_id, clock).await.unwrap(); - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let namespace = "test_namespace"; - let database = "test_database"; - let live_query_id = Uuid::from_bytes([ - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, - ]); - let key = crate::key::node::lq::new(node_id, live_query_id, namespace, database); - trace!( - "Inserting key: {}", - key.encode() - .unwrap() - .iter() - .flat_map(|byte| std::ascii::escape_default(*byte)) - .map(|byte| byte as char) - .collect::() - ); - tx.putc(key, "value", None).await.unwrap(); - tx.commit().await.unwrap(); - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - - let res = tx.scan_ndlq(&node_id, 100).await.unwrap(); - assert_eq!(res.len(), 1); - for val in res { - assert_eq!(val.nd.0, node_id.clone()); - assert_eq!(val.ns, namespace); - assert_eq!(val.db, database); - assert_eq!(val.lq.0, live_query_id.clone()); - } - - tx.commit().await.unwrap(); -} - -#[test_log::test(tokio::test)] -async fn live_params_are_evaluated() { - if !crate::fflags::FFLAGS.change_feed_live_queries.enabled() { - return; - } - let node_id = Uuid::parse_str("9cb22db9-1851-4781-8847-d781a3f373ae").unwrap(); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(node_id, clock).await.unwrap(); - - let sess = Session::owner().with_ns("test_namespace").with_db("test_database"); - let params = map! { - "expected_table".to_string() => Value::Table(sql::Table("test_table".to_string())), - }; - test.db.execute("DEFINE TABLE expected_table CHANGEFEED 10m INCLUDE ORIGINAL; LIVE SELECT * FROM $expected_table", &sess, Some(params)).await.unwrap(); - let mut res = test.db.lq_cf_store.read().await.live_queries_for_selector(&LqSelector { - ns: "test_namespace".to_string(), - db: "test_database".to_string(), - tb: "test_table".to_string(), - }); - assert_eq!(res.len(), 1); - // We remove the unknown value - res[0].0.lq = Default::default(); - assert_eq!( - res, - vec![( - LqIndexKey { - selector: LqSelector { - ns: "test_namespace".to_string(), - db: "test_database".to_string(), - tb: "test_table".to_string(), - }, - lq: Default::default(), - }, - LqIndexValue { - stm: Default::default(), - vs: [0; 10], - ts: Default::default(), - } - )] - ) -} diff --git a/core/src/kvs/tests/mod.rs b/core/src/kvs/tests/mod.rs index 94f2a114..5897e448 100644 --- a/core/src/kvs/tests/mod.rs +++ b/core/src/kvs/tests/mod.rs @@ -31,239 +31,147 @@ type ClockType = Arc; #[cfg(feature = "kv-mem")] mod mem { - use crate::kvs::tests::{ClockType, Kvs}; - use crate::kvs::Datastore; - use crate::kvs::LockType; - use crate::kvs::Transaction; - use crate::kvs::TransactionType; - use serial_test::serial; - - async fn new_ds(node_id: Uuid, clock_override: ClockType) -> (Datastore, Kvs) { - ( - Datastore::new_full("memory", Some(clock_override)) - .await - .unwrap() - .with_node_id(crate::sql::Uuid::from(node_id)), - Kvs::Mem, - ) + async fn new_ds(id: Uuid, clock: ClockType) -> (Datastore, Kvs) { + // Use a memory datastore instance + let path = "memory"; + // Setup the in-memory datastore + let ds = Datastore::new_with_clock(path, Some(clock)).await.unwrap().with_node_id(id); + // Return the datastore + (ds, Kvs::Mem) } async fn new_tx(write: TransactionType, lock: LockType) -> Transaction { - // Shared node id for one-off transactions - // We should delete this, node IDs should be known. - let new_tx_uuid = Uuid::parse_str("361893b5-a041-40c0-996c-c3a8828ef06b").unwrap(); + let nodeid = Uuid::new_v4(); let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - new_ds(new_tx_uuid, clock).await.0.transaction(write, lock).await.unwrap() + new_ds(nodeid, clock).await.0.transaction(write, lock).await.unwrap() } - include!("cluster_init.rs"); - include!("hb.rs"); include!("helper.rs"); - include!("lq.rs"); - include!("nq.rs"); include!("raw.rs"); include!("snapshot.rs"); - include!("tb.rs"); include!("multireader.rs"); include!("timestamp_to_versionstamp.rs"); - include!("nd.rs"); - include!("ndlq.rs"); - include!("tblq.rs"); - include!("tbnt.rs"); - include!("tx_test.rs"); } #[cfg(feature = "kv-rocksdb")] mod rocksdb { - use crate::kvs::tests::{ClockType, Kvs}; - use crate::kvs::Datastore; - use crate::kvs::LockType; - use crate::kvs::Transaction; - use crate::kvs::TransactionType; - use serial_test::serial; use temp_dir::TempDir; - async fn new_ds(node_id: Uuid, clock_override: ClockType) -> (Datastore, Kvs) { + async fn new_ds(id: Uuid, clock: ClockType) -> (Datastore, Kvs) { + // Setup the temporary data storage path let path = TempDir::new().unwrap().path().to_string_lossy().to_string(); - ( - Datastore::new_full(format!("rocksdb:{path}").as_str(), Some(clock_override)) - .await - .unwrap() - .with_node_id(sql::Uuid::from(node_id)), - Kvs::Rocksdb, - ) + let path = format!("rocksdb:{path}"); + // Setup the RocksDB datastore + let ds = Datastore::new_with_clock(&path, Some(clock)).await.unwrap().with_node_id(id); + // Return the datastore + (ds, Kvs::Rocksdb) } async fn new_tx(write: TransactionType, lock: LockType) -> Transaction { - // Shared node id for one-off transactions - // We should delete this, node IDs should be known. - let new_tx_uuid = Uuid::parse_str("22358e5e-87bd-4040-8c63-01db896191ab").unwrap(); + let nodeid = Uuid::new_v4(); let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - new_ds(new_tx_uuid, clock).await.0.transaction(write, lock).await.unwrap() + new_ds(nodeid, clock).await.0.transaction(write, lock).await.unwrap() } - include!("cluster_init.rs"); - include!("hb.rs"); include!("helper.rs"); - include!("lq.rs"); - include!("nq.rs"); include!("raw.rs"); include!("snapshot.rs"); - include!("tb.rs"); include!("multireader.rs"); include!("multiwriter_different_keys.rs"); include!("multiwriter_same_keys_conflict.rs"); include!("timestamp_to_versionstamp.rs"); - include!("nd.rs"); - include!("ndlq.rs"); - include!("tblq.rs"); - include!("tbnt.rs"); - include!("tx_test.rs"); +} + +#[cfg(feature = "kv-surrealkv")] +mod surrealkv { + + use temp_dir::TempDir; + + async fn new_ds(id: Uuid, clock: ClockType) -> (Datastore, Kvs) { + // Setup the temporary data storage path + let path = TempDir::new().unwrap().path().to_string_lossy().to_string(); + let path = format!("surrealkv:{path}"); + // Setup the SurrealKV datastore + let ds = Datastore::new_with_clock(&path, Some(clock)).await.unwrap().with_node_id(id); + // Return the datastore + (ds, Kvs::SurrealKV) + } + + async fn new_tx(write: TransactionType, lock: LockType) -> Transaction { + let nodeid = Uuid::new_v4(); + let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); + let (ds, _) = new_ds(nodeid, clock).await; + ds.transaction(write, lock).await.unwrap() + } + + include!("raw.rs"); + include!("helper.rs"); + include!("snapshot.rs"); + include!("multireader.rs"); + include!("multiwriter_different_keys.rs"); + include!("multiwriter_same_keys_allow.rs"); + include!("timestamp_to_versionstamp.rs"); } #[cfg(feature = "kv-tikv")] mod tikv { - use crate::kvs::tests::{ClockType, Kvs}; - use crate::kvs::Transaction; - use crate::kvs::{Datastore, LockType, TransactionType}; - use serial_test::serial; - - async fn new_ds(node_id: Uuid, clock_override: ClockType) -> (Datastore, Kvs) { - let ds = Datastore::new_full("tikv:127.0.0.1:2379", Some(clock_override)) - .await - .unwrap() - .with_node_id(sql::uuid::Uuid(node_id)); + async fn new_ds(id: Uuid, clock: ClockType) -> (Datastore, Kvs) { + // Setup the cluster connection string + let path = "tikv:127.0.0.1:2379"; + // Setup the TiKV datastore + let ds = Datastore::new_with_clock(path, Some(clock)).await.unwrap().with_node_id(id); // Clear any previous test entries - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - tx.delp(vec![], u32::MAX).await.unwrap(); + let tx = ds.transaction(Write, Optimistic).await.unwrap(); + tx.delp(vec![]).await.unwrap(); tx.commit().await.unwrap(); // Return the datastore (ds, Kvs::Tikv) } async fn new_tx(write: TransactionType, lock: LockType) -> Transaction { - // Shared node id for one-off transactions - // We should delete this, node IDs should be known. - let new_tx_uuid = Uuid::parse_str("18717a0f-0ab0-421e-b20c-e69fb03e90a3").unwrap(); + let nodeid = Uuid::new_v4(); let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - new_ds(new_tx_uuid, clock).await.0.transaction(write, lock).await.unwrap() + new_ds(nodeid, clock).await.0.transaction(write, lock).await.unwrap() } - include!("cluster_init.rs"); - include!("hb.rs"); include!("helper.rs"); - include!("lq.rs"); - include!("nq.rs"); include!("raw.rs"); include!("snapshot.rs"); - include!("tb.rs"); include!("multireader.rs"); include!("multiwriter_different_keys.rs"); include!("multiwriter_same_keys_conflict.rs"); include!("timestamp_to_versionstamp.rs"); - include!("nd.rs"); - include!("ndlq.rs"); - include!("tblq.rs"); - include!("tbnt.rs"); - include!("tx_test.rs"); } #[cfg(feature = "kv-fdb")] mod fdb { - use crate::kvs::tests::{ClockType, Kvs}; - use crate::kvs::Transaction; - use crate::kvs::{Datastore, LockType, TransactionType}; - use serial_test::serial; - - async fn new_ds(node_id: Uuid, clock_override: ClockType) -> (Datastore, Kvs) { - let ds = Datastore::new_full("fdb:/etc/foundationdb/fdb.cluster", Some(clock_override)) - .await - .unwrap() - .with_node_id(sql::Uuid::from(node_id)); + async fn new_ds(id: Uuid, clock: ClockType) -> (Datastore, Kvs) { + // Setup the cluster connection string + let path = "fdb:/etc/foundationdb/fdb.cluster"; + // Setup the FoundationDB datastore + let ds = Datastore::new_with_clock(path, Some(clock)).await.unwrap().with_node_id(id); // Clear any previous test entries - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - tx.delp(vec![], u32::MAX).await.unwrap(); + let tx = ds.transaction(Write, Optimistic).await.unwrap(); + tx.delp(vec![]).await.unwrap(); tx.commit().await.unwrap(); // Return the datastore (ds, Kvs::Fdb) } async fn new_tx(write: TransactionType, lock: LockType) -> Transaction { - // Shared node id for one-off transactions - // We should delete this, node IDs should be known. - let new_tx_uuid = Uuid::parse_str("50f5bdf5-8abe-406b-8002-a79c942f510f").unwrap(); + let nodeid = Uuid::new_v4(); let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - new_ds(new_tx_uuid, clock).await.0.transaction(write, lock).await.unwrap() + new_ds(nodeid, clock).await.0.transaction(write, lock).await.unwrap() } - include!("cluster_init.rs"); - include!("hb.rs"); include!("helper.rs"); - include!("lq.rs"); - include!("nq.rs"); include!("raw.rs"); include!("snapshot.rs"); - include!("tb.rs"); include!("multireader.rs"); include!("multiwriter_different_keys.rs"); include!("multiwriter_same_keys_allow.rs"); include!("timestamp_to_versionstamp.rs"); - include!("nd.rs"); - include!("ndlq.rs"); - include!("tblq.rs"); - include!("tbnt.rs"); - include!("tx_test.rs"); -} - -#[cfg(feature = "kv-surrealkv")] -mod surrealkv { - - use crate::kvs::tests::{ClockType, Kvs}; - use crate::kvs::Datastore; - use crate::kvs::LockType; - use crate::kvs::Transaction; - use crate::kvs::TransactionType; - use serial_test::serial; - use temp_dir::TempDir; - - async fn new_ds(node_id: Uuid, clock_override: ClockType) -> (Datastore, Kvs) { - let path = TempDir::new().unwrap().path().to_string_lossy().to_string(); - ( - Datastore::new_full(format!("surrealkv:{path}").as_str(), Some(clock_override)) - .await - .unwrap() - .with_node_id(sql::Uuid::from(node_id)), - Kvs::SurrealKV, - ) - } - - async fn new_tx(write: TransactionType, lock: LockType) -> Transaction { - // Shared node id for one-off transactions - // We should delete this, node IDs should be known. - let new_tx_uuid = Uuid::parse_str("22358e5e-87bd-4040-8c63-01db896191ab").unwrap(); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let (ds, _) = new_ds(new_tx_uuid, clock).await; - ds.transaction(write, lock).await.unwrap() - } - - include!("raw.rs"); - include!("cluster_init.rs"); - include!("hb.rs"); - include!("helper.rs"); - include!("lq.rs"); - include!("nq.rs"); - include!("snapshot.rs"); - include!("tb.rs"); - include!("multireader.rs"); - include!("multiwriter_different_keys.rs"); - include!("multiwriter_same_keys_allow.rs"); - include!("timestamp_to_versionstamp.rs"); - include!("nd.rs"); - include!("ndlq.rs"); - include!("tblq.rs"); - include!("tbnt.rs"); - include!("tx_test.rs"); } diff --git a/core/src/kvs/tests/multireader.rs b/core/src/kvs/tests/multireader.rs index c80df0bc..e68f2da7 100644 --- a/core/src/kvs/tests/multireader.rs +++ b/core/src/kvs/tests/multireader.rs @@ -6,19 +6,19 @@ async fn multireader() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Insert an initial key - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx.set("test", "some text").await.unwrap(); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx1 = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx1 = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx1.get("test").await.unwrap().unwrap(); assert_eq!(val, b"some text"); // Create a readonly transaction - let mut tx2 = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx2 = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx2.get("test").await.unwrap().unwrap(); assert_eq!(val, b"some text"); // Create a readonly transaction - let mut tx3 = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx3 = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx3.get("test").await.unwrap().unwrap(); assert_eq!(val, b"some text"); // Cancel both readonly transactions diff --git a/core/src/kvs/tests/multiwriter_different_keys.rs b/core/src/kvs/tests/multiwriter_different_keys.rs index e161576b..657cda68 100644 --- a/core/src/kvs/tests/multiwriter_different_keys.rs +++ b/core/src/kvs/tests/multiwriter_different_keys.rs @@ -6,24 +6,24 @@ async fn multiwriter_different_keys() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Insert an initial key - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx.set("test", "some text").await.unwrap(); tx.commit().await.unwrap(); // Create a writeable transaction - let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx1.set("test1", "other text 1").await.unwrap(); // Create a writeable transaction - let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx2.set("test2", "other text 2").await.unwrap(); // Create a writeable transaction - let mut tx3 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx3 = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx3.set("test3", "other text 3").await.unwrap(); // Cancel both writeable transactions tx1.commit().await.unwrap(); tx2.commit().await.unwrap(); tx3.commit().await.unwrap(); // Check that the key was updated ok - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap().unwrap(); assert_eq!(val, b"some text"); let val = tx.get("test1").await.unwrap().unwrap(); diff --git a/core/src/kvs/tests/multiwriter_same_keys_allow.rs b/core/src/kvs/tests/multiwriter_same_keys_allow.rs index ad3a7b5a..08b96fa2 100644 --- a/core/src/kvs/tests/multiwriter_same_keys_allow.rs +++ b/core/src/kvs/tests/multiwriter_same_keys_allow.rs @@ -6,33 +6,33 @@ async fn multiwriter_same_keys_allow() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Insert an initial key - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx.set("test", "some text").await.unwrap(); tx.commit().await.unwrap(); // Create a writeable transaction - let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx1.set("test", "other text 1").await.unwrap(); // Create a writeable transaction - let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx2.set("test", "other text 2").await.unwrap(); // Create a writeable transaction - let mut tx3 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx3 = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx3.set("test", "other text 3").await.unwrap(); // Cancel both writeable transactions assert!(tx1.commit().await.is_ok()); assert!(tx2.commit().await.is_ok()); assert!(tx3.commit().await.is_ok()); // Check that the key was updated ok - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap().unwrap(); assert_eq!(val, b"other text 3"); tx.cancel().await.unwrap(); // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx.set("test", "original text").await.unwrap(); tx.commit().await.unwrap(); // Check that the key was updated ok - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap().unwrap(); assert_eq!(val, b"original text"); tx.cancel().await.unwrap(); diff --git a/core/src/kvs/tests/multiwriter_same_keys_conflict.rs b/core/src/kvs/tests/multiwriter_same_keys_conflict.rs index d30e1f8b..499b68c1 100644 --- a/core/src/kvs/tests/multiwriter_same_keys_conflict.rs +++ b/core/src/kvs/tests/multiwriter_same_keys_conflict.rs @@ -6,33 +6,33 @@ async fn multiwriter_same_keys_conflict() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Insert an initial key - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx.set("test", "some text").await.unwrap(); tx.commit().await.unwrap(); // Create a writeable transaction - let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx1 = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx1.set("test", "other text 1").await.unwrap(); // Create a writeable transaction - let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx2 = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx2.set("test", "other text 2").await.unwrap(); // Create a writeable transaction - let mut tx3 = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx3 = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx3.set("test", "other text 3").await.unwrap(); // Cancel both writeable transactions assert!(tx1.commit().await.is_ok()); assert!(tx2.commit().await.is_err()); assert!(tx3.commit().await.is_err()); // Check that the key was updated ok - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap().unwrap(); assert_eq!(val, b"other text 1"); tx.cancel().await.unwrap(); // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx.set("test", "original text").await.unwrap(); tx.commit().await.unwrap(); // Check that the key was updated ok - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap().unwrap(); assert_eq!(val, b"original text"); tx.cancel().await.unwrap(); diff --git a/core/src/kvs/tests/nd.rs b/core/src/kvs/tests/nd.rs deleted file mode 100644 index ed282f53..00000000 --- a/core/src/kvs/tests/nd.rs +++ /dev/null @@ -1,28 +0,0 @@ -#[tokio::test] -#[serial] -async fn write_scan_nd() { - let nd = uuid::Uuid::parse_str("6a6a4e59-3e86-431d-884f-8f433781e4e9").unwrap(); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(nd, clock).await.unwrap(); - - // Add 2 nodes - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - tx.set_nd(Uuid::parse_str("83d9b3c0-f3c4-45be-9ef9-9d48502fecb1").unwrap()).await.unwrap(); - tx.set_nd(Uuid::parse_str("cbefc4fe-8ba0-4898-ab69-782e3ebc06f9").unwrap()).await.unwrap(); - tx.commit().await.unwrap(); - - // Scan in batches of 1 - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let res_many_batches = tx.scan_nd(1).await.unwrap(); - tx.cancel().await.unwrap(); - - // Scan in batches of 100k - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let res_single_batch = tx.scan_nd(100_000).await.unwrap(); - tx.cancel().await.unwrap(); - - // Assert equal - assert_eq!(res_many_batches, res_single_batch); - assert_eq!(res_many_batches.len(), 2); - assert_eq!(res_single_batch.len(), 2); -} diff --git a/core/src/kvs/tests/ndlq.rs b/core/src/kvs/tests/ndlq.rs deleted file mode 100644 index 5fb6990b..00000000 --- a/core/src/kvs/tests/ndlq.rs +++ /dev/null @@ -1,34 +0,0 @@ -#[tokio::test] -#[serial] -async fn write_scan_ndlq() { - let nd = uuid::Uuid::parse_str("7a17446f-721f-4855-8fc7-81086752ca44").unwrap(); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(nd, clock).await.unwrap(); - - // Write some data - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let ns = "namespace"; - let db = "database"; - let tb = "table"; - let lq = - sql::Uuid::from(uuid::Uuid::parse_str("4c3dca4b-ec08-4e3e-b23a-6b03b5cdc3fc").unwrap()); - tx.putc_ndlq(nd, lq.0, ns, db, tb, None).await.unwrap(); - tx.commit().await.unwrap(); - - // Verify scan - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let res_many_batches = tx.scan_ndlq(&nd, 1).await.unwrap(); - let res_single_batch = tx.scan_ndlq(&nd, 100_000).await.unwrap(); - tx.commit().await.unwrap(); - assert_eq!( - res_many_batches, - vec![LqValue { - nd: sql::Uuid::from(nd), - ns: ns.to_string(), - db: db.to_string(), - tb: tb.to_string(), - lq - }] - ); - assert_eq!(res_many_batches, res_single_batch); -} diff --git a/core/src/kvs/tests/nq.rs b/core/src/kvs/tests/nq.rs deleted file mode 100644 index c546d2df..00000000 --- a/core/src/kvs/tests/nq.rs +++ /dev/null @@ -1,61 +0,0 @@ -#[tokio::test] -#[serial] -async fn archive_lv_for_node_archives() { - let node_id = Uuid::parse_str("9ab2d498-757f-48cc-8c07-a7d337997445").unwrap(); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(node_id, clock).await.unwrap(); - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let namespace = "test_namespace"; - let database = "test_database"; - let table = "test_table"; - tx.set_nd(node_id).await.unwrap(); - - let lv_id = crate::sql::uuid::Uuid::from(Uuid::from_bytes([ - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, - ])); - - let key = crate::key::node::lq::new(node_id, lv_id.0, namespace, database); - tx.putc(key, table, None).await.unwrap(); - - let mut stm = LiveStatement::from_source_parts(Fields::all(), Table(table.into()), None, None); - stm.id = lv_id; - tx.putc_tblq(namespace, database, table, stm, None).await.unwrap(); - - let this_node_id = crate::sql::uuid::Uuid::from(Uuid::from_bytes([ - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, - 0x2F, - ])); - // We commit after setup because otherwise in memory does not have read your own writes - // i.e. setup data is part of same transaction as required implementation checks - tx.commit().await.unwrap(); - - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let results = test - .db - .archive_lv_for_node(&mut tx, &sql::uuid::Uuid(node_id), this_node_id) - .await - .unwrap(); - assert_eq!(results.len(), 1); - tx.commit().await.unwrap(); - let (lq, opt_err) = &results[0]; - match opt_err { - None => { - //expected - } - Some(err) => { - panic!("Unexpected error: {:?}", err); - } - } - assert_eq!(lq.nd, sql::uuid::Uuid(node_id)); - assert_eq!(lq.ns, namespace); - assert_eq!(lq.db, database); - assert_eq!(lq.tb, table); - assert_eq!(lq.lq, lv_id); - - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let lv = tx.all_tb_lives(namespace, database, table).await.unwrap(); - assert_eq!(lv.len(), 1, "{:?}", lv); - assert_eq!(lv[0].archived, Some(this_node_id)); - tx.commit().await.unwrap(); -} diff --git a/core/src/kvs/tests/raw.rs b/core/src/kvs/tests/raw.rs index ae185e50..ffc2d096 100644 --- a/core/src/kvs/tests/raw.rs +++ b/core/src/kvs/tests/raw.rs @@ -1,30 +1,27 @@ -use crate::key::error::KeyCategory::Unknown; - #[tokio::test] #[serial] async fn initialise() { - let mut tx = new_tx(Write, Optimistic).await; - assert!(tx.put(Unknown, "test", "ok").await.is_ok()); + let mut tx = new_tx(Write, Optimistic).await.inner(); + assert!(tx.put("test", "ok").await.is_ok()); tx.commit().await.unwrap(); } #[tokio::test] #[serial] -async fn exi() { +async fn exists() { // Create a new datastore let node_id = Uuid::parse_str("463a5008-ee1d-43db-9662-5e752b6ea3f9").unwrap(); let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - assert!(tx.put(Unknown, "test", "ok").await.is_ok()); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.put("test", "ok").await.is_ok()); tx.commit().await.unwrap(); - // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); - let val = tx.exi("test").await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); + let val = tx.exists("test").await.unwrap(); assert!(val); - let val = tx.exi("none").await.unwrap(); + let val = tx.exists("none").await.unwrap(); assert!(!val); tx.cancel().await.unwrap(); } @@ -37,11 +34,11 @@ async fn get() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - assert!(tx.put(Unknown, "test", "ok").await.is_ok()); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.put("test", "ok").await.is_ok()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap(); assert!(matches!(val.as_deref(), Some(b"ok"))); let val = tx.get("none").await.unwrap(); @@ -57,20 +54,20 @@ async fn set() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); assert!(tx.set("test", "one").await.is_ok()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap(); assert!(matches!(val.as_deref(), Some(b"one"))); tx.cancel().await.unwrap(); // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); assert!(tx.set("test", "two").await.is_ok()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap(); assert!(matches!(val.as_deref(), Some(b"two"))); tx.cancel().await.unwrap(); @@ -84,47 +81,25 @@ async fn put() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - assert!(tx.put(Unknown, "test", "one").await.is_ok()); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.put("test", "one").await.is_ok()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap(); assert!(matches!(val.as_deref(), Some(b"one"))); tx.cancel().await.unwrap(); // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - assert!(tx.put(Unknown, "test", "two").await.is_err()); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.put("test", "two").await.is_err()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap(); assert!(matches!(val.as_deref(), Some(b"one"))); tx.cancel().await.unwrap(); } -#[tokio::test] -#[serial] -async fn del() { - // Create a new datastore - let node_id = Uuid::parse_str("e0acb360-9187-401f-8192-f870b09e2c9e").unwrap(); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let (ds, _) = new_ds(node_id, clock).await; - // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - assert!(tx.put(Unknown, "test", "one").await.is_ok()); - tx.commit().await.unwrap(); - // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - assert!(tx.del("test").await.is_ok()); - tx.commit().await.unwrap(); - // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); - let val = tx.get("test").await.unwrap(); - assert!(val.as_deref().is_none()); - tx.cancel().await.unwrap(); -} - #[tokio::test] #[serial] async fn putc() { @@ -133,34 +108,56 @@ async fn putc() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - assert!(tx.put(Unknown, "test", "one").await.is_ok()); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.put("test", "one").await.is_ok()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap(); assert!(matches!(val.as_deref(), Some(b"one"))); tx.cancel().await.unwrap(); // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); assert!(tx.putc("test", "two", Some("one")).await.is_ok()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap(); assert!(matches!(val.as_deref(), Some(b"two"))); tx.cancel().await.unwrap(); // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); assert!(tx.putc("test", "tre", Some("one")).await.is_err()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap(); assert!(matches!(val.as_deref(), Some(b"two"))); tx.cancel().await.unwrap(); } +#[tokio::test] +#[serial] +async fn del() { + // Create a new datastore + let node_id = Uuid::parse_str("e0acb360-9187-401f-8192-f870b09e2c9e").unwrap(); + let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); + let (ds, _) = new_ds(node_id, clock).await; + // Create a writeable transaction + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.put("test", "one").await.is_ok()); + tx.commit().await.unwrap(); + // Create a writeable transaction + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.del("test").await.is_ok()); + tx.commit().await.unwrap(); + // Create a readonly transaction + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); + let val = tx.get("test").await.unwrap(); + assert!(val.as_deref().is_none()); + tx.cancel().await.unwrap(); +} + #[tokio::test] #[serial] async fn delc() { @@ -169,29 +166,70 @@ async fn delc() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - assert!(tx.put(Unknown, "test", "one").await.is_ok()); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.put("test", "one").await.is_ok()); tx.commit().await.unwrap(); // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); assert!(tx.delc("test", Some("two")).await.is_err()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap(); assert!(matches!(val.as_deref(), Some(b"one"))); tx.cancel().await.unwrap(); // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); assert!(tx.delc("test", Some("one")).await.is_ok()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap(); assert!(val.as_deref().is_none()); tx.cancel().await.unwrap(); } +#[tokio::test] +#[serial] +async fn keys() { + // Create a new datastore + let node_id = Uuid::parse_str("83b81cc2-9609-4533-bede-c170ab9f7bbe").unwrap(); + let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); + let (ds, _) = new_ds(node_id, clock).await; + // Create a writeable transaction + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.put("test1", "1").await.is_ok()); + assert!(tx.put("test2", "2").await.is_ok()); + assert!(tx.put("test3", "3").await.is_ok()); + assert!(tx.put("test4", "4").await.is_ok()); + assert!(tx.put("test5", "5").await.is_ok()); + tx.commit().await.unwrap(); + // Create a readonly transaction + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); + let val = tx.keys("test1".."test9", u32::MAX).await.unwrap(); + assert_eq!(val.len(), 5); + assert_eq!(val[0], b"test1"); + assert_eq!(val[1], b"test2"); + assert_eq!(val[2], b"test3"); + assert_eq!(val[3], b"test4"); + assert_eq!(val[4], b"test5"); + tx.cancel().await.unwrap(); + // Create a readonly transaction + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); + let val = tx.keys("test2".."test4", u32::MAX).await.unwrap(); + assert_eq!(val.len(), 2); + assert_eq!(val[0], b"test2"); + assert_eq!(val[1], b"test3"); + tx.cancel().await.unwrap(); + // Create a readonly transaction + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); + let val = tx.keys("test1".."test9", 2).await.unwrap(); + assert_eq!(val.len(), 2); + assert_eq!(val[0], b"test1"); + assert_eq!(val[1], b"test2"); + tx.cancel().await.unwrap(); +} + #[tokio::test] #[serial] async fn scan() { @@ -200,15 +238,15 @@ async fn scan() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - assert!(tx.put(Unknown, "test1", "1").await.is_ok()); - assert!(tx.put(Unknown, "test2", "2").await.is_ok()); - assert!(tx.put(Unknown, "test3", "3").await.is_ok()); - assert!(tx.put(Unknown, "test4", "4").await.is_ok()); - assert!(tx.put(Unknown, "test5", "5").await.is_ok()); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.put("test1", "1").await.is_ok()); + assert!(tx.put("test2", "2").await.is_ok()); + assert!(tx.put("test3", "3").await.is_ok()); + assert!(tx.put("test4", "4").await.is_ok()); + assert!(tx.put("test5", "5").await.is_ok()); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.scan("test1".."test9", u32::MAX).await.unwrap(); assert_eq!(val.len(), 5); assert_eq!(val[0].0, b"test1"); @@ -223,7 +261,7 @@ async fn scan() { assert_eq!(val[4].1, b"5"); tx.cancel().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.scan("test2".."test4", u32::MAX).await.unwrap(); assert_eq!(val.len(), 2); assert_eq!(val[0].0, b"test2"); @@ -232,7 +270,7 @@ async fn scan() { assert_eq!(val[1].1, b"3"); tx.cancel().await.unwrap(); // Create a readonly transaction - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.scan("test1".."test9", 2).await.unwrap(); assert_eq!(val.len(), 2); assert_eq!(val[0].0, b"test1"); @@ -244,70 +282,53 @@ async fn scan() { #[tokio::test] #[serial] -async fn scan_paged() { +async fn batch() { // Create a new datastore let node_id = Uuid::parse_str("6572a13c-a7a0-4e19-be62-18acb4e854f5").unwrap(); let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Create a writeable transaction - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - assert!(tx.put(Unknown, "test1", "1").await.is_ok()); - assert!(tx.put(Unknown, "test2", "2").await.is_ok()); - assert!(tx.put(Unknown, "test3", "3").await.is_ok()); - assert!(tx.put(Unknown, "test4", "4").await.is_ok()); - assert!(tx.put(Unknown, "test5", "5").await.is_ok()); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + assert!(tx.put("test1", "1").await.is_ok()); + assert!(tx.put("test2", "2").await.is_ok()); + assert!(tx.put("test3", "3").await.is_ok()); + assert!(tx.put("test4", "4").await.is_ok()); + assert!(tx.put("test5", "5").await.is_ok()); tx.commit().await.unwrap(); // Create a readonly transaction - let tx = ds.transaction(Read, Optimistic).await.unwrap(); - - async_defer!(let tx = (tx) defer { - tx.cancel().await.unwrap(); - } after { - let val = - tx.scan_paged(ScanPage::from("test1".into().."test9".into()), u32::MAX).await.unwrap(); - let val = val.values; - assert_eq!(val.len(), 5); - assert_eq!(val[0].0, b"test1"); - assert_eq!(val[0].1, b"1"); - assert_eq!(val[1].0, b"test2"); - assert_eq!(val[1].1, b"2"); - assert_eq!(val[2].0, b"test3"); - assert_eq!(val[2].1, b"3"); - assert_eq!(val[3].0, b"test4"); - assert_eq!(val[3].1, b"4"); - assert_eq!(val[4].0, b"test5"); - assert_eq!(val[4].1, b"5"); - }) - .await; - + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); + let res = tx.batch("test1".as_bytes().."test9".as_bytes(), u32::MAX, true).await.unwrap(); + let val = res.values; + assert_eq!(val.len(), 5); + assert_eq!(val[0].0, b"test1"); + assert_eq!(val[0].1, b"1"); + assert_eq!(val[1].0, b"test2"); + assert_eq!(val[1].1, b"2"); + assert_eq!(val[2].0, b"test3"); + assert_eq!(val[2].1, b"3"); + assert_eq!(val[3].0, b"test4"); + assert_eq!(val[3].1, b"4"); + assert_eq!(val[4].0, b"test5"); + assert_eq!(val[4].1, b"5"); + tx.cancel().await.unwrap(); // Create a readonly transaction - let tx = ds.transaction(Read, Optimistic).await.unwrap(); - async_defer!(let tx = (tx) defer { - tx.cancel().await.unwrap(); - } after { - let val = - tx.scan_paged(ScanPage::from("test2".into().."test4".into()), u32::MAX).await.unwrap(); - let val = val.values; - assert_eq!(val.len(), 2); - assert_eq!(val[0].0, b"test2"); - assert_eq!(val[0].1, b"2"); - assert_eq!(val[1].0, b"test3"); - assert_eq!(val[1].1, b"3"); - }) - .await; + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); + let res = tx.batch("test2".as_bytes().."test4".as_bytes(), u32::MAX, true).await.unwrap(); + let val = res.values; + assert_eq!(val.len(), 2); + assert_eq!(val[0].0, b"test2"); + assert_eq!(val[0].1, b"2"); + assert_eq!(val[1].0, b"test3"); + assert_eq!(val[1].1, b"3"); + tx.cancel().await.unwrap(); // Create a readonly transaction - let tx = ds.transaction(Read, Optimistic).await.unwrap(); - async_defer!(let tx = (tx) defer { - tx.cancel().await.unwrap(); - } after { - let val = - tx.scan_paged(ScanPage::from("test2".into().."test4".into()), u32::MAX).await.unwrap(); - let val = val.values; - assert_eq!(val.len(), 2); - assert_eq!(val[0].0, b"test2"); - assert_eq!(val[0].1, b"2"); - assert_eq!(val[1].0, b"test3"); - assert_eq!(val[1].1, b"3"); - }) - .await; + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); + let res = tx.batch("test2".as_bytes().."test4".as_bytes(), u32::MAX, true).await.unwrap(); + let val = res.values; + assert_eq!(val.len(), 2); + assert_eq!(val[0].0, b"test2"); + assert_eq!(val[0].1, b"2"); + assert_eq!(val[1].0, b"test3"); + assert_eq!(val[1].1, b"3"); + tx.cancel().await.unwrap(); } diff --git a/core/src/kvs/tests/sequences.rs b/core/src/kvs/tests/sequences.rs new file mode 100644 index 00000000..dc753884 --- /dev/null +++ b/core/src/kvs/tests/sequences.rs @@ -0,0 +1,47 @@ +#[tokio::test] +#[serial] +async fn sequences() { + // Create a new datastore + let node_id = Uuid::parse_str("b7afc077-2123-476f-bee0-43d7504f1e0a").unwrap(); + let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); + let (ds, _) = new_ds(node_id, clock).await; + // Test separate sequences + let mut txn = ds.transaction(Write, Optimistic).await.unwrap().inner(); + let nsid = txn.get_next_ns_id().await.unwrap(); + txn.complete_changes(false).await.unwrap(); + txn.commit().await.unwrap(); + assert_eq!(nsid, 0); + // Test separate sequences + let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); + let dbid = txn.get_next_db_id(nsid).await.unwrap(); + txn.complete_changes(false).await.unwrap(); + txn.commit().await.unwrap(); + assert_eq!(dbid, 0); + // Test separate sequences + let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); + let tbid1 = txn.get_next_tb_id(nsid, dbid).await.unwrap(); + txn.complete_changes(false).await.unwrap(); + txn.commit().await.unwrap(); + assert_eq!(tbid1, 0); + // Test separate sequences + let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); + let tbid2 = txn.get_next_tb_id(nsid, dbid).await.unwrap(); + txn.complete_changes(false).await.unwrap(); + txn.commit().await.unwrap(); + assert_eq!(tbid2, 1); + // Test separate sequences + let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); + txn.remove_tb_id(nsid, dbid, tbid1).await.unwrap(); + txn.complete_changes(false).await.unwrap(); + txn.commit().await.unwrap(); + // Test separate sequences + let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); + txn.remove_db_id(nsid, dbid).await.unwrap(); + txn.complete_changes(false).await.unwrap(); + txn.commit().await.unwrap(); + // Test separate sequences + let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); + txn.remove_ns_id(nsid).await.unwrap(); + txn.complete_changes(false).await.unwrap(); + txn.commit().await.unwrap(); +} diff --git a/core/src/kvs/tests/snapshot.rs b/core/src/kvs/tests/snapshot.rs index c199dde2..e04c97f3 100644 --- a/core/src/kvs/tests/snapshot.rs +++ b/core/src/kvs/tests/snapshot.rs @@ -6,24 +6,24 @@ async fn snapshot() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Insert an initial key - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); tx.set("test", "some text").await.unwrap(); tx.commit().await.unwrap(); // Create a readonly transaction - let mut tx1 = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx1 = ds.transaction(Read, Optimistic).await.unwrap().inner(); // Check that the key was inserted ok let val = tx1.get("test").await.unwrap().unwrap(); assert_eq!(val, b"some text"); // Create a new writeable transaction - let mut txw = ds.transaction(Write, Optimistic).await.unwrap(); + let mut txw = ds.transaction(Write, Optimistic).await.unwrap().inner(); // Update the test key content txw.set("test", "other text").await.unwrap(); // Create a readonly transaction - let mut tx2 = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx2 = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx2.get("test").await.unwrap().unwrap(); assert_eq!(val, b"some text"); // Create a readonly transaction - let mut tx3 = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx3 = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx3.get("test").await.unwrap().unwrap(); assert_eq!(val, b"some text"); // Update the test key content @@ -38,7 +38,7 @@ async fn snapshot() { // Commit the writable transaction txw.commit().await.unwrap(); // Check that the key was updated ok - let mut tx = ds.transaction(Read, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Read, Optimistic).await.unwrap().inner(); let val = tx.get("test").await.unwrap().unwrap(); assert_eq!(val, b"extra text"); tx.cancel().await.unwrap(); diff --git a/core/src/kvs/tests/tb.rs b/core/src/kvs/tests/tb.rs deleted file mode 100644 index 9ab651f1..00000000 --- a/core/src/kvs/tests/tb.rs +++ /dev/null @@ -1,89 +0,0 @@ -use crate::key::database::tb; -use crate::key::database::tb::Tb; -use crate::kvs::ScanPage; -use crate::sql::statements::DefineTableStatement; -use crate::sql::TableType; - -#[tokio::test] -#[serial] -async fn table_definitions_can_be_scanned() { - // Setup - let node_id = Uuid::parse_str("f7b2ba17-90ed-45f9-9aa2-906c6ba0c289").unwrap(); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(node_id, clock).await.unwrap(); - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - - // Create a table definition - let namespace = "test_namespace"; - let database = "test_database"; - let table = "test_table"; - let key = Tb::new(namespace, database, table); - let value = DefineTableStatement { - name: Default::default(), - drop: false, - full: false, - id: None, - view: None, - permissions: Default::default(), - changefeed: None, - ..Default::default() - }; - tx.set(&key, &value).await.unwrap(); - - // Validate with scan - match tx - .scan_paged( - ScanPage::from(tb::prefix(namespace, database)..tb::suffix(namespace, database)), - 1000, - ) - .await - { - Ok(scan) => { - assert_eq!(scan.values.len(), 1); - let read = DefineTableStatement::from(&scan.values[0].1); - assert_eq!(&read, &value); - } - Err(e) => panic!("{:?}", e), - } - tx.commit().await.unwrap(); -} - -#[tokio::test] -#[serial] -async fn table_definitions_can_be_deleted() { - // Setup - let node_id = Uuid::parse_str("13c0e650-1710-489e-bb80-f882bce50b56").unwrap(); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(node_id, clock).await.unwrap(); - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - - // Create a table definition - let namespace = "test_namespace"; - let database = "test_database"; - let table = "test_table"; - let key = Tb::new(namespace, database, table); - let value = DefineTableStatement { - name: Default::default(), - drop: false, - full: false, - id: None, - view: None, - permissions: Default::default(), - changefeed: None, - comment: None, - if_not_exists: false, - kind: TableType::Any, - }; - tx.set(&key, &value).await.unwrap(); - - // Validate delete - tx.del(&key).await.unwrap(); - - // Should not exist - match tx.get(&key).await { - Ok(None) => {} - Ok(Some(o)) => panic!("Should not exist but was {:?}", o), - Err(e) => panic!("Unexpected error on get {:?}", e), - }; - tx.commit().await.unwrap(); -} diff --git a/core/src/kvs/tests/tblq.rs b/core/src/kvs/tests/tblq.rs deleted file mode 100644 index f3a3b9cd..00000000 --- a/core/src/kvs/tests/tblq.rs +++ /dev/null @@ -1,53 +0,0 @@ -#[tokio::test] -#[serial] -async fn write_scan_tblq() { - let node_id = uuid::uuid!("0bee25e0-34d7-448c-abc0-48cdf3db3a53"); - let live_ids = [ - uuid::Uuid::nil(), - uuid::uuid!("b5aab54e-d1ef-4a14-b537-9206dcde2209"), - uuid::Uuid::new_v4(), - uuid::Uuid::max(), - ]; - - for live_id in live_ids { - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(node_id, clock).await.unwrap(); - - // Write some data - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let ns = "namespace"; - let db = "database"; - let tb = "table"; - let live_id = sql::Uuid::from(live_id); - let live_stm = LiveStatement { - id: live_id, - node: sql::Uuid::from(node_id), - expr: Default::default(), - what: Default::default(), - cond: None, - fetch: None, - archived: None, - session: Some(Value::None), - auth: None, - }; - tx.putc_tblq(ns, db, tb, live_stm, None).await.unwrap(); - tx.commit().await.unwrap(); - - // Verify scan - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - let res_many_batches = tx.scan_tblq(ns, db, tb, 1).await.unwrap(); - let res_single_batch = tx.scan_tblq(ns, db, tb, 100_000).await.unwrap(); - tx.commit().await.unwrap(); - assert_eq!( - res_many_batches, - vec![LqValue { - nd: sql::Uuid::from(node_id), - ns: ns.to_string(), - db: db.to_string(), - tb: tb.to_string(), - lq: live_id - }] - ); - assert_eq!(res_many_batches, res_single_batch); - } -} diff --git a/core/src/kvs/tests/timestamp_to_versionstamp.rs b/core/src/kvs/tests/timestamp_to_versionstamp.rs index c896e902..1de06ca2 100644 --- a/core/src/kvs/tests/timestamp_to_versionstamp.rs +++ b/core/src/kvs/tests/timestamp_to_versionstamp.rs @@ -17,27 +17,27 @@ async fn timestamp_to_versionstamp() { let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); let (ds, _) = new_ds(node_id, clock).await; // Give the current versionstamp a timestamp of 0 - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - tx.set_timestamp_for_versionstamp(0, "myns", "mydb", true).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + tx.set_timestamp_for_versionstamp(0, "myns", "mydb").await.unwrap(); tx.commit().await.unwrap(); // Get the versionstamp for timestamp 0 - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); let vs1 = tx.get_versionstamp_from_timestamp(0, "myns", "mydb", true).await.unwrap().unwrap(); tx.commit().await.unwrap(); // Give the current versionstamp a timestamp of 1 - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - tx.set_timestamp_for_versionstamp(1, "myns", "mydb", true).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + tx.set_timestamp_for_versionstamp(1, "myns", "mydb").await.unwrap(); tx.commit().await.unwrap(); // Get the versionstamp for timestamp 1 - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); let vs2 = tx.get_versionstamp_from_timestamp(1, "myns", "mydb", true).await.unwrap().unwrap(); tx.commit().await.unwrap(); // Give the current versionstamp a timestamp of 2 - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - tx.set_timestamp_for_versionstamp(2, "myns", "mydb", true).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); + tx.set_timestamp_for_versionstamp(2, "myns", "mydb").await.unwrap(); tx.commit().await.unwrap(); // Get the versionstamp for timestamp 2 - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut tx = ds.transaction(Write, Optimistic).await.unwrap().inner(); let vs3 = tx.get_versionstamp_from_timestamp(2, "myns", "mydb", true).await.unwrap().unwrap(); tx.commit().await.unwrap(); assert!(vs1 < vs2); diff --git a/core/src/kvs/tests/tx_test.rs b/core/src/kvs/tests/tx_test.rs index 6822a52c..5ee29681 100644 --- a/core/src/kvs/tests/tx_test.rs +++ b/core/src/kvs/tests/tx_test.rs @@ -1,67 +1,6 @@ -use crate::key::debug::sprint_key; -use crate::key::error::KeyCategory; -use crate::kvs::lq_structs::{KillEntry, LqEntry, TrackedResult}; +use crate::key::debug::sprint; use crate::sql::Strand; -#[tokio::test] -#[serial] -async fn live_queries_sent_to_tx_are_received() { - let node_id = uuid::uuid!("d0f1a200-e24e-44fe-98c1-2271a5781da7"); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(node_id, clock).await.unwrap(); - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - - // Create live query data - let lq_entry = LqEntry { - live_id: sql::Uuid::new_v4(), - ns: "namespace".to_string(), - db: "database".to_string(), - stm: LiveStatement { - id: sql::Uuid::new_v4(), - node: sql::Uuid::from(node_id), - expr: Default::default(), - what: Default::default(), - cond: None, - fetch: None, - archived: None, - session: Some(Value::None), - auth: None, - }, - }; - tx.pre_commit_register_async_event(TrackedResult::LiveQuery(lq_entry.clone())).unwrap(); - - tx.commit().await.unwrap(); - - // Verify data - let live_queries = tx.consume_pending_live_queries(); - assert_eq!(live_queries.len(), 1); - assert_eq!(live_queries[0], TrackedResult::LiveQuery(lq_entry)); -} -#[tokio::test] -#[serial] -async fn kill_queries_sent_to_tx_are_received() { - let node_id = uuid::uuid!("1cae3d33-64e6-4867-bf17-d095c1b842d7"); - let clock = Arc::new(SizedClock::Fake(FakeClock::new(Timestamp::default()))); - let test = init(node_id, clock).await.unwrap(); - let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); - - let kill_entry = KillEntry { - live_id: uuid::uuid!("f396c0cb-01ca-4213-a72d-b0240f6d00b2").into(), - ns: "some_ns".to_string(), - db: "some_db".to_string(), - }; - - // Create live query data - tx.pre_commit_register_async_event(TrackedResult::KillQuery(kill_entry.clone())).unwrap(); - - tx.commit().await.unwrap(); - - // Verify data - let live_queries = tx.consume_pending_live_queries(); - assert_eq!(live_queries.len(), 1); - assert_eq!(live_queries[0], TrackedResult::KillQuery(kill_entry)); -} - #[tokio::test] #[serial] async fn delr_range_correct() { @@ -72,9 +11,7 @@ async fn delr_range_correct() { // Create some data let mut tx = test.db.transaction(Write, Optimistic).await.unwrap(); tx.putc(b"hugh\x00\x10", Value::Strand(Strand::from("0010")), None).await.unwrap(); - tx.put(KeyCategory::ChangeFeed, b"hugh\x00\x10\x10", Value::Strand(Strand::from("001010"))) - .await - .unwrap(); + tx.put(b"hugh\x00\x10\x10", Value::Strand(Strand::from("001010"))).await.unwrap(); tx.putc(b"hugh\x00\x20", Value::Strand(Strand::from("0020")), None).await.unwrap(); tx.commit().await.unwrap(); @@ -137,6 +74,6 @@ async fn set_versionstamp_is_incremental() { b"prefix\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00suffix", b"prefix\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00suffix", ]; - assert_eq!(found[0].0, expected_keys[0], "key was {}", sprint_key(&found[0].0)); - assert_eq!(found[1].0, expected_keys[1], "key was {}", sprint_key(&found[1].0)); + assert_eq!(found[0].0, expected_keys[0], "key was {}", sprint(&found[0].0)); + assert_eq!(found[1].0, expected_keys[1], "key was {}", sprint(&found[1].0)); } diff --git a/core/src/kvs/tikv/mod.rs b/core/src/kvs/tikv/mod.rs index f8e5a4b5..c3568cd9 100644 --- a/core/src/kvs/tikv/mod.rs +++ b/core/src/kvs/tikv/mod.rs @@ -1,19 +1,21 @@ #![cfg(feature = "kv-tikv")] use crate::err::Error; -use crate::key::error::KeyCategory; use crate::kvs::Check; use crate::kvs::Key; use crate::kvs::Val; -use crate::vs::{try_to_u64_be, u64_to_versionstamp, Versionstamp}; +use crate::vs::Versionstamp; +use std::fmt::Debug; use std::ops::Range; +use std::pin::Pin; +use std::sync::Arc; use tikv::CheckLevel; use tikv::TimestampExt; use tikv::TransactionOptions; #[non_exhaustive] pub struct Datastore { - db: tikv::TransactionClient, + db: Pin>, } #[non_exhaustive] @@ -26,6 +28,11 @@ pub struct Transaction { check: Check, /// The underlying datastore transaction inner: tikv::Transaction, + // The above, supposedly 'static transaction + // actually points here, so we need to ensure + // the memory is kept alive. This pointer must + // be declared last, so that it is dropped last. + db: Pin>, } impl Drop for Transaction { @@ -63,18 +70,13 @@ impl Datastore { pub(crate) async fn new(path: &str) -> Result { match tikv::TransactionClient::new(vec![path]).await { Ok(db) => Ok(Datastore { - db, + db: Arc::pin(db), }), Err(e) => Err(Error::Ds(e.to_string())), } } /// Start a new transaction pub(crate) async fn transaction(&self, write: bool, lock: bool) -> Result { - // TiKV currently has issues with pessimistic locks. Panic in development. - #[cfg(debug_assertions)] - if lock { - panic!("There are issues with pessimistic locking in TiKV"); - } // Set whether this should be an optimistic or pessimistic transaction let mut opt = if lock { TransactionOptions::new_pessimistic() @@ -99,23 +101,31 @@ impl Datastore { check, write, inner, + db: self.db.clone(), }), Err(e) => Err(Error::Tx(e.to_string())), } } } -impl Transaction { +impl super::api::Transaction for Transaction { /// Behaviour if unclosed - pub(crate) fn check_level(&mut self, check: Check) { + fn check_level(&mut self, check: Check) { self.check = check; } + /// Check if closed - pub(crate) fn closed(&self) -> bool { + fn closed(&self) -> bool { self.done } + + /// Check if writeable + fn writeable(&self) -> bool { + self.write + } + /// Cancel a transaction - pub(crate) async fn cancel(&mut self) -> Result<(), Error> { + async fn cancel(&mut self) -> Result<(), Error> { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); @@ -129,8 +139,9 @@ impl Transaction { // Continue Ok(()) } + /// Commit a transaction - pub(crate) async fn commit(&mut self) -> Result<(), Error> { + async fn commit(&mut self) -> Result<(), Error> { // Check to see if transaction is closed if self.done { return Err(Error::TxFinished); @@ -151,80 +162,11 @@ impl Transaction { // Continue Ok(()) } - /// Obtain a new change timestamp for a key - /// which is replaced with the current timestamp when the transaction is committed. - /// NOTE: This should be called when composing the change feed entries for this transaction, - /// which should be done immediately before the transaction commit. - /// That is to keep other transactions commit delay(pessimistic) or conflict(optimistic) as less as possible. - #[allow(unused)] - pub(crate) async fn get_timestamp( - &mut self, - key: K, - lock: bool, - ) -> Result - where - K: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Get the current timestamp - let res = self.inner.get_current_timestamp().await?; - let ver = res.version(); - let verbytes = u64_to_versionstamp(ver); - // Write the timestamp to the "last-write-timestamp" key - // to ensure that no other transactions can commit with older timestamps. - let k: Key = key.into(); - if lock { - let prev = self.inner.get(k.clone()).await?; - if let Some(prev) = prev { - let slice = prev.as_slice(); - let res: Result<[u8; 10], Error> = match slice.try_into() { - Ok(ba) => Ok(ba), - Err(e) => Err(Error::Ds(e.to_string())), - }; - let array = res?; - let prev = try_to_u64_be(array)?; - if prev >= ver { - return Err(Error::TxFailure); - } - } - self.inner.put(k, verbytes.to_vec()).await?; - } - // Return the uint64 representation of the timestamp as the result - Ok(u64_to_versionstamp(ver)) - } - /// Obtain a new key that is suffixed with the change timestamp - #[allow(unused)] - pub(crate) async fn get_versionstamped_key( - &mut self, - ts_key: K, - prefix: K, - suffix: K, - ) -> Result, Error> - where - K: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Check to see if transaction is writable - if !self.write { - return Err(Error::TxReadonly); - } - let ts = self.get_timestamp(ts_key, false).await?; - let mut k: Vec = prefix.into(); - k.append(&mut ts.to_vec()); - k.append(&mut suffix.into()); - Ok(k) - } /// Check if a key exists - pub(crate) async fn exi(&mut self, key: K) -> Result + async fn exists(&mut self, key: K) -> Result where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -235,10 +177,11 @@ impl Transaction { // Return result Ok(res) } + /// Fetch a key from the database - pub(crate) async fn get(&mut self, key: K) -> Result, Error> + async fn get(&mut self, key: K) -> Result, Error> where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -249,11 +192,12 @@ impl Transaction { // Return result Ok(res) } + /// Insert or update a key in the database - pub(crate) async fn set(&mut self, key: K, val: V) -> Result<(), Error> + async fn set(&mut self, key: K, val: V) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -268,16 +212,12 @@ impl Transaction { // Return result Ok(()) } + /// Insert a key if it doesn't exist in the database - pub(crate) async fn put( - &mut self, - category: KeyCategory, - key: K, - val: V, - ) -> Result<(), Error> + async fn put(&mut self, key: K, val: V) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -294,16 +234,17 @@ impl Transaction { // Set the key if empty match self.inner.key_exists(key.clone()).await? { false => self.inner.put(key, val).await?, - _ => return Err(Error::TxKeyAlreadyExistsCategory(category)), + _ => return Err(Error::TxKeyAlreadyExists), }; // Return result Ok(()) } - /// Insert a key if it doesn't exist in the database - pub(crate) async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> + + /// Insert a key if the current value matches a condition + async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -328,10 +269,11 @@ impl Transaction { // Return result Ok(()) } + /// Delete a key - pub(crate) async fn del(&mut self, key: K) -> Result<(), Error> + async fn del(&mut self, key: K) -> Result<(), Error> where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -346,11 +288,12 @@ impl Transaction { // Return result Ok(()) } - /// Delete a key - pub(crate) async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> + + /// Delete a key if the current value matches a condition + async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> where - K: Into, - V: Into, + K: Into + Debug, + V: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -373,34 +316,11 @@ impl Transaction { // Return result Ok(()) } - /// Retrieve a range of keys from the databases - pub(crate) async fn scan( - &mut self, - rng: Range, - limit: u32, - ) -> Result, Error> - where - K: Into, - { - // Check to see if transaction is closed - if self.done { - return Err(Error::TxFinished); - } - // Convert the range to bytes - let rng: Range = Range { - start: rng.start.into(), - end: rng.end.into(), - }; - // Scan the keys - let res = self.inner.scan(rng, limit).await?; - let res = res.map(|kv| (Key::from(kv.0), kv.1)).collect(); - // Return result - Ok(res) - } + /// Delete a range of keys from the databases - pub(crate) async fn delr(&mut self, rng: Range, limit: u32) -> Result<(), Error> + async fn delr(&mut self, rng: Range) -> Result<(), Error> where - K: Into, + K: Into + Debug, { // Check to see if transaction is closed if self.done { @@ -410,18 +330,81 @@ impl Transaction { if !self.write { return Err(Error::TxReadonly); } + // Delete the key range + self.db.unsafe_destroy_range(rng.start.into()..rng.end.into()).await?; + // Return result + Ok(()) + } + + /// Delete a range of keys from the database + async fn keys(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } // Convert the range to bytes let rng: Range = Range { start: rng.start.into(), end: rng.end.into(), }; // Scan the keys - let res = self.inner.scan_keys(rng, limit).await?; - // Delete all the keys - for key in res { - self.inner.delete(key).await?; - } + let res = self.inner.scan_keys(rng, limit).await?.map(Key::from).collect(); // Return result - Ok(()) + Ok(res) + } + + /// Retrieve a range of keys from the database + async fn scan(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Convert the range to bytes + let rng: Range = Range { + start: rng.start.into(), + end: rng.end.into(), + }; + // Scan the keys + let res = self.inner.scan(rng, limit).await?.map(|kv| (Key::from(kv.0), kv.1)).collect(); + // Return result + Ok(res) + } + + /// Obtain a new change timestamp for a key + async fn get_timestamp(&mut self, key: K) -> Result + where + K: Into + Debug, + { + // Check to see if transaction is closed + if self.done { + return Err(Error::TxFinished); + } + // Calculate the version key + let key = key.into(); + // Get the transaction version + let ver = self.inner.current_timestamp().await?.version(); + // Calculate the previous version value + if let Some(prev) = self.get(key.as_slice()).await? { + let res: Result<[u8; 10], Error> = match prev.as_slice().try_into() { + Ok(ba) => Ok(ba), + Err(e) => Err(Error::Tx(e.to_string())), + }; + let prev = crate::vs::try_to_u64_be(res?)?; + if prev >= ver { + return Err(Error::TxFailure); + } + }; + // Convert the timestamp to a versionstamp + let verbytes = crate::vs::u64_to_versionstamp(ver); + // Store the timestamp to prevent other transactions from committing + self.set(key.as_slice(), verbytes.to_vec()).await?; + // Return the uint64 representation of the timestamp as the result + Ok(verbytes) } } diff --git a/core/src/kvs/tr.rs b/core/src/kvs/tr.rs new file mode 100644 index 00000000..1376619d --- /dev/null +++ b/core/src/kvs/tr.rs @@ -0,0 +1,645 @@ +use super::api::Transaction; +use super::Key; +use super::Val; +use crate::cf; +use crate::dbs::node::Timestamp; +use crate::err::Error; +use crate::idg::u32::U32; +#[cfg(debug_assertions)] +use crate::key::debug::sprint; +use crate::kvs::batch::Batch; +use crate::kvs::clock::SizedClock; +use crate::kvs::stash::Stash; +use crate::sql; +use crate::sql::thing::Thing; +use crate::sql::Value; +use crate::vs::Versionstamp; +use sql::statements::DefineTableStatement; +use std::borrow::Cow; +use std::fmt; +use std::fmt::Debug; +use std::ops::Range; +use std::sync::Arc; + +#[cfg(debug_assertions)] +const TARGET: &str = "surrealdb::core::kvs::tr"; + +/// Used to determine the behaviour when a transaction is not closed correctly +#[derive(Default)] +pub enum Check { + #[default] + None, + Warn, + Panic, +} + +/// Specifies whether the transaction is read-only or writeable. +#[derive(Copy, Clone)] +pub enum TransactionType { + Read, + Write, +} + +impl From for TransactionType { + fn from(value: bool) -> Self { + match value { + true => TransactionType::Write, + false => TransactionType::Read, + } + } +} + +/// Specifies whether the transaction is optimistic or pessimistic. +#[derive(Copy, Clone)] +pub enum LockType { + Pessimistic, + Optimistic, +} + +impl From for LockType { + fn from(value: bool) -> Self { + match value { + true => LockType::Pessimistic, + false => LockType::Optimistic, + } + } +} + +/// A set of undoable updates and requests against a dataset. +#[allow(dead_code)] +#[non_exhaustive] +pub struct Transactor { + pub(super) inner: Inner, + pub(super) stash: Stash, + pub(super) cf: cf::Writer, + pub(super) clock: Arc, +} + +#[allow(clippy::large_enum_variant)] +pub(super) enum Inner { + #[cfg(feature = "kv-mem")] + Mem(super::mem::Transaction), + #[cfg(feature = "kv-rocksdb")] + RocksDB(super::rocksdb::Transaction), + #[cfg(feature = "kv-indxdb")] + IndxDB(super::indxdb::Transaction), + #[cfg(feature = "kv-tikv")] + TiKV(super::tikv::Transaction), + #[cfg(feature = "kv-fdb")] + FoundationDB(super::fdb::Transaction), + #[cfg(feature = "kv-surrealkv")] + SurrealKV(super::surrealkv::Transaction), +} + +impl fmt::Display for Transactor { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + #![allow(unused_variables)] + match &self.inner { + #[cfg(feature = "kv-mem")] + Inner::Mem(_) => write!(f, "memory"), + #[cfg(feature = "kv-rocksdb")] + Inner::RocksDB(_) => write!(f, "rocksdb"), + #[cfg(feature = "kv-indxdb")] + Inner::IndxDB(_) => write!(f, "indxdb"), + #[cfg(feature = "kv-tikv")] + Inner::TiKV(_) => write!(f, "tikv"), + #[cfg(feature = "kv-fdb")] + Inner::FoundationDB(_) => write!(f, "fdb"), + #[cfg(feature = "kv-surrealkv")] + Inner::SurrealKV(_) => write!(f, "surrealkv"), + #[allow(unreachable_patterns)] + _ => unreachable!(), + } + } +} + +macro_rules! expand_inner { + ( $v:expr, $arm:pat_param => $b:block ) => { + match $v { + #[cfg(feature = "kv-mem")] + Inner::Mem($arm) => $b, + #[cfg(feature = "kv-rocksdb")] + Inner::RocksDB($arm) => $b, + #[cfg(feature = "kv-indxdb")] + Inner::IndxDB($arm) => $b, + #[cfg(feature = "kv-tikv")] + Inner::TiKV($arm) => $b, + #[cfg(feature = "kv-fdb")] + Inner::FoundationDB($arm) => $b, + #[cfg(feature = "kv-surrealkv")] + Inner::SurrealKV($arm) => $b, + #[allow(unreachable_patterns)] + _ => unreachable!(), + } + }; +} + +impl Transactor { + // -------------------------------------------------- + // Integral methods + // -------------------------------------------------- + + /// Specify how we should handle unclosed transactions. + /// + /// If a transaction is not cancelled or rolled back then + /// this can cause issues on some storage engine + /// implementations. In tests we can ignore unhandled + /// transactions, whilst in development we should panic + /// so that any unintended behaviour is detected, and in + /// production we should only log a warning. + pub(crate) fn check_level(&mut self, check: Check) { + #[cfg(debug_assertions)] + trace!(target: TARGET, "check_level"); + expand_inner!(&mut self.inner, v => { v.check_level(check) }) + } + + /// Check if transaction is finished. + /// + /// If the transaction has been cancelled or committed, + /// then this function will return [`true`], and any further + /// calls to functions on this transaction will result + /// in a [`Error::TxFinished`] error. + pub async fn closed(&self) -> bool { + #[cfg(debug_assertions)] + trace!(target: TARGET, "closed"); + expand_inner!(&self.inner, v => { v.closed() }) + } + + /// Cancel a transaction. + /// + /// This reverses all changes made within the transaction. + pub async fn cancel(&mut self) -> Result<(), Error> { + #[cfg(debug_assertions)] + trace!(target: TARGET, "cancel"); + expand_inner!(&mut self.inner, v => { v.cancel().await }) + } + + /// Commit a transaction. + /// + /// This attempts to commit all changes made within the transaction. + pub async fn commit(&mut self) -> Result<(), Error> { + #[cfg(debug_assertions)] + trace!(target: TARGET, "commit"); + expand_inner!(&mut self.inner, v => { v.commit().await }) + } + + /// Check if a key exists in the datastore. + pub async fn exists(&mut self, key: K) -> Result + where + K: Into + Debug, + { + let key = key.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "exists {}", sprint(&key)); + expand_inner!(&mut self.inner, v => { v.exists(key).await }) + } + + /// Fetch a key from the datastore. + pub async fn get(&mut self, key: K) -> Result, Error> + where + K: Into + Debug, + { + let key = key.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "get {}", sprint(&key)); + expand_inner!(&mut self.inner, v => { v.get(key).await }) + } + + /// Fetch many keys from the datastore. + pub async fn getm(&mut self, keys: Vec) -> Result, Error> + where + K: Into + Debug, + { + let keys = keys.into_iter().map(Into::into).collect::>(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "getm {}", keys.iter().map(sprint).collect::>().join(" + ")); + expand_inner!(&mut self.inner, v => { v.getm(keys).await }) + } + + /// Retrieve a specific range of keys from the datastore. + /// + /// This function fetches all matching key-value pairs from the underlying datastore in grouped batches. + pub async fn getr(&mut self, rng: Range) -> Result, Error> + where + K: Into + Debug, + { + let beg: Key = rng.start.into(); + let end: Key = rng.end.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "getr {}..{}", sprint(&beg), sprint(&end)); + expand_inner!(&mut self.inner, v => { v.getr(beg..end).await }) + } + + /// Retrieve a specific prefixed range of keys from the datastore. + /// + /// This function fetches all matching key-value pairs from the underlying datastore in grouped batches. + pub async fn getp(&mut self, key: K) -> Result, Error> + where + K: Into + Debug, + { + let key: Key = key.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "getp {}", sprint(&key)); + expand_inner!(&mut self.inner, v => { v.getp(key).await }) + } + + /// Insert or update a key in the datastore. + pub async fn set(&mut self, key: K, val: V) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + let key = key.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "set {} => {:?}", sprint(&key), val); + expand_inner!(&mut self.inner, v => { v.set(key, val).await }) + } + + /// Insert a key if it doesn't exist in the datastore. + pub async fn put(&mut self, key: K, val: V) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + let key = key.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "put {} => {:?}", sprint(&key), val); + expand_inner!(&mut self.inner, v => { v.put(key, val).await }) + } + + /// Update a key in the datastore if the current value matches a condition. + pub async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + let key = key.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "putc {} if {:?} => {:?}", sprint(&key), chk, val); + expand_inner!(&mut self.inner, v => { v.putc(key, val, chk).await }) + } + + /// Delete a key from the datastore. + pub async fn del(&mut self, key: K) -> Result<(), Error> + where + K: Into + Debug, + { + let key = key.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "del {}", sprint(&key)); + expand_inner!(&mut self.inner, v => { v.del(key).await }) + } + + /// Delete a key from the datastore if the current value matches a condition. + pub async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + let key = key.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "delc {} if {:?}", sprint(&key), chk); + expand_inner!(&mut self.inner, v => { v.delc(key, chk).await }) + } + + /// Delete a range of keys from the datastore. + /// + /// This function deletes all matching key-value pairs from the underlying datastore in grouped batches. + pub async fn delr(&mut self, rng: Range) -> Result<(), Error> + where + K: Into + Debug, + { + let beg: Key = rng.start.into(); + let end: Key = rng.end.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "delr {}..{}", sprint(&beg), sprint(&end)); + expand_inner!(&mut self.inner, v => { v.delr(beg..end).await }) + } + + /// Delete a prefixed range of keys from the datastore. + /// + /// This function deletes all matching key-value pairs from the underlying datastore in grouped batches. + pub async fn delp(&mut self, key: K) -> Result<(), Error> + where + K: Into + Debug, + { + let key: Key = key.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "delp {}", sprint(&key)); + expand_inner!(&mut self.inner, v => { v.delp(key).await }) + } + + /// Retrieve a specific range of keys from the datastore. + /// + /// This function fetches the full range of keys without values, in a single request to the underlying datastore. + pub async fn keys(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into + Debug, + { + let beg: Key = rng.start.into(); + let end: Key = rng.end.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "keys {}..{} (limit: {limit})", sprint(&beg), sprint(&end)); + expand_inner!(&mut self.inner, v => { v.keys(beg..end, limit).await }) + } + + /// Retrieve a specific range of keys from the datastore. + /// + /// This function fetches the full range of key-value pairs, in a single request to the underlying datastore. + pub async fn scan(&mut self, rng: Range, limit: u32) -> Result, Error> + where + K: Into + Debug, + { + let beg: Key = rng.start.into(); + let end: Key = rng.end.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "scan {}..{} (limit: {limit})", sprint(&beg), sprint(&end)); + expand_inner!(&mut self.inner, v => { v.scan(beg..end, limit).await }) + } + + /// Retrieve a batched scan over a specific range of keys in the datastore. + /// + /// This function fetches keys or key-value pairs, in batches, with multiple requests to the underlying datastore. + pub async fn batch( + &mut self, + rng: Range, + batch: u32, + values: bool, + ) -> Result + where + K: Into + Debug, + { + let beg: Key = rng.start.into(); + let end: Key = rng.end.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "batch {}..{} (batch: {batch})", sprint(&beg), sprint(&end)); + expand_inner!(&mut self.inner, v => { v.batch(beg..end, batch, values).await }) + } + + /// Obtain a new change timestamp for a key + /// which is replaced with the current timestamp when the transaction is committed. + /// NOTE: This should be called when composing the change feed entries for this transaction, + /// which should be done immediately before the transaction commit. + /// That is to keep other transactions commit delay(pessimistic) or conflict(optimistic) as less as possible. + #[allow(unused)] + pub async fn get_timestamp(&mut self, key: K) -> Result + where + K: Into + Debug, + { + // We convert to byte slice as its easier at this level + let key = key.into(); + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_timestamp {}", sprint(&key)); + expand_inner!(&mut self.inner, v => { v.get_timestamp(key).await }) + } + + /// Insert or update a key in the datastore. + #[allow(unused_variables)] + pub async fn set_versionstamped( + &mut self, + ts_key: K, + prefix: K, + suffix: K, + val: V, + ) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + let ts_key = ts_key.into(); + let prefix = prefix.into(); + let suffix = suffix.into(); + #[cfg(debug_assertions)] + trace!( + target: TARGET, + "set_versionstamp ts={} prefix={} suffix={}", + sprint(&ts_key), + sprint(&prefix), + sprint(&suffix) + ); + expand_inner!(&mut self.inner, v => { v.set_versionstamp(ts_key, prefix, suffix, val).await }) + } + + // -------------------------------------------------- + // Additional methods + // -------------------------------------------------- + + /// Clock retrieves the current timestamp, without guaranteeing + /// monotonicity in all implementations. + /// + /// It is used for unreliable ordering of events as well as + /// handling of timeouts. Operations that are not guaranteed to be correct. + /// But also allows for lexicographical ordering. + /// + /// Public for tests, but not required for usage from a user perspective. + pub async fn clock(&self) -> Timestamp { + self.clock.now().await + } + + // change will record the change in the changefeed if enabled. + // To actually persist the record changes into the underlying kvs, + // you must call the `complete_changes` function and then commit the transaction. + #[allow(clippy::too_many_arguments)] + pub(crate) fn record_change( + &mut self, + ns: &str, + db: &str, + tb: &str, + id: &Thing, + previous: Cow<'_, Value>, + current: Cow<'_, Value>, + store_difference: bool, + ) { + self.cf.record_cf_change(ns, db, tb, id.clone(), previous, current, store_difference) + } + + // Records the table (re)definition in the changefeed if enabled. + pub(crate) fn record_table_change( + &mut self, + ns: &str, + db: &str, + tb: &str, + dt: &DefineTableStatement, + ) { + self.cf.define_table(ns, db, tb, dt) + } + + pub(crate) async fn get_idg(&mut self, key: &Key) -> Result { + Ok(if let Some(v) = self.stash.get(key) { + v + } else { + let val = self.get(key.clone()).await?; + if let Some(val) = val { + U32::new(key.clone(), Some(val)).await? + } else { + U32::new(key.clone(), None).await? + } + }) + } + + /// Gets the next namespace id + pub(crate) async fn get_next_ns_id(&mut self) -> Result { + let key = crate::key::root::ni::Ni::default().encode().unwrap(); + let mut seq = self.get_idg(&key).await?; + let nid = seq.get_next_id(); + self.stash.set(key, seq.clone()); + let (k, v) = seq.finish().unwrap(); + self.set(k, v).await?; + Ok(nid) + } + + /// Gets the next database id for the given namespace + pub(crate) async fn get_next_db_id(&mut self, ns: u32) -> Result { + let key = crate::key::namespace::di::new(ns).encode().unwrap(); + let mut seq = self.get_idg(&key).await?; + let nid = seq.get_next_id(); + self.stash.set(key, seq.clone()); + let (k, v) = seq.finish().unwrap(); + self.set(k, v).await?; + Ok(nid) + } + + /// Gets the next table id for the given namespace and database + pub(crate) async fn get_next_tb_id(&mut self, ns: u32, db: u32) -> Result { + let key = crate::key::database::ti::new(ns, db).encode().unwrap(); + let mut seq = self.get_idg(&key).await?; + let nid = seq.get_next_id(); + self.stash.set(key, seq.clone()); + let (k, v) = seq.finish().unwrap(); + self.set(k, v).await?; + Ok(nid) + } + + /// Removes the given namespace from the sequence. + #[allow(unused)] + pub(crate) async fn remove_ns_id(&mut self, ns: u32) -> Result<(), Error> { + let key = crate::key::root::ni::Ni::default().encode().unwrap(); + let mut seq = self.get_idg(&key).await?; + seq.remove_id(ns); + self.stash.set(key, seq.clone()); + let (k, v) = seq.finish().unwrap(); + self.set(k, v).await?; + Ok(()) + } + + /// Removes the given database from the sequence. + #[allow(unused)] + pub(crate) async fn remove_db_id(&mut self, ns: u32, db: u32) -> Result<(), Error> { + let key = crate::key::namespace::di::new(ns).encode().unwrap(); + let mut seq = self.get_idg(&key).await?; + seq.remove_id(db); + self.stash.set(key, seq.clone()); + let (k, v) = seq.finish().unwrap(); + self.set(k, v).await?; + Ok(()) + } + + /// Removes the given table from the sequence. + #[allow(unused)] + pub(crate) async fn remove_tb_id(&mut self, ns: u32, db: u32, tb: u32) -> Result<(), Error> { + let key = crate::key::database::ti::new(ns, db).encode().unwrap(); + let mut seq = self.get_idg(&key).await?; + seq.remove_id(tb); + self.stash.set(key, seq.clone()); + let (k, v) = seq.finish().unwrap(); + self.set(k, v).await?; + Ok(()) + } + + // complete_changes will complete the changefeed recording for the given namespace and database. + // + // Under the hood, this function calls the transaction's `set_versionstamped_key` for each change. + // Every change must be recorded by calling this struct's `record_change` function beforehand. + // If there were no preceding `record_change` function calls for this transaction, this function will do nothing. + // + // This function should be called only after all the changes have been made to the transaction. + // Otherwise, changes are missed in the change feed. + // + // This function should be called immediately before calling the commit function to guarantee that + // the lock, if needed by lock=true, is held only for the duration of the commit, not the entire transaction. + // + // This function is here because it needs access to mutably borrow the transaction. + // + // Lastly, you should set lock=true if you want the changefeed to be correctly ordered for + // non-FDB backends. + pub(crate) async fn complete_changes(&mut self, _lock: bool) -> Result<(), Error> { + let changes = self.cf.get(); + for (tskey, prefix, suffix, v) in changes { + self.set_versionstamped(tskey, prefix, suffix, v).await? + } + Ok(()) + } + + // set_timestamp_for_versionstamp correlates the given timestamp with the current versionstamp. + // This allows get_versionstamp_from_timestamp to obtain the versionstamp from the timestamp later. + pub(crate) async fn set_timestamp_for_versionstamp( + &mut self, + ts: u64, + ns: &str, + db: &str, + ) -> Result { + // This also works as an advisory lock on the ts keys so that there is + // on other concurrent transactions that can write to the ts_key or the keys after it. + let key = crate::key::database::vs::new(ns, db); + let vst = self.get_timestamp(key).await?; + #[cfg(debug_assertions)] + trace!( + "Setting timestamp {} for versionstamp {:?} in ns: {}, db: {}", + ts, + crate::vs::conv::versionstamp_to_u64(&vst), + ns, + db + ); + + // Ensure there are no keys after the ts_key + // Otherwise we can go back in time! + let ts_key = crate::key::database::ts::new(ns, db, ts); + let begin = ts_key.encode()?; + let end = crate::key::database::ts::suffix(ns, db); + let ts_pairs: Vec<(Vec, Vec)> = self.getr(begin..end).await?; + let latest_ts_pair = ts_pairs.last(); + if let Some((k, _)) = latest_ts_pair { + #[cfg(debug_assertions)] + trace!( + "There already was a greater committed timestamp {} in ns: {}, db: {} found: {}", + ts, + ns, + db, + sprint(k) + ); + let k = crate::key::database::ts::Ts::decode(k)?; + let latest_ts = k.ts; + if latest_ts >= ts { + return Err(Error::Internal( + "ts is less than or equal to the latest ts".to_string(), + )); + } + } + self.set(ts_key, vst).await?; + Ok(vst) + } + + pub(crate) async fn get_versionstamp_from_timestamp( + &mut self, + ts: u64, + ns: &str, + db: &str, + _lock: bool, + ) -> Result, Error> { + let start = crate::key::database::ts::prefix(ns, db); + let ts_key = crate::key::database::ts::new(ns, db, ts + 1); + let end = ts_key.encode()?; + let ts_pairs = self.getr(start..end).await?; + let latest_ts_pair = ts_pairs.last(); + if let Some((_, v)) = latest_ts_pair { + if v.len() == 10 { + let mut sl = [0u8; 10]; + sl.copy_from_slice(v); + return Ok(Some(sl)); + } else { + return Err(Error::Internal("versionstamp is not 10 bytes".to_string())); + } + } + Ok(None) + } +} diff --git a/core/src/kvs/tx.rs b/core/src/kvs/tx.rs index 3adf6e62..eb75a0f2 100644 --- a/core/src/kvs/tx.rs +++ b/core/src/kvs/tx.rs @@ -1,194 +1,76 @@ -use std::borrow::Cow; -use std::fmt; +use super::batch::Batch; +use super::tr::Check; +use super::Convert; +use super::Key; +use super::Val; +use crate::cnf::NORMAL_FETCH_SIZE; +use crate::cnf::TRANSACTION_CACHE_SIZE; +use crate::dbs::node::Node; +use crate::err::Error; +use crate::kvs::cache::Entry; +use crate::kvs::cache::EntryWeighter; +use crate::kvs::scanner::Scanner; +use crate::kvs::Transactor; +use crate::sql::statements::DefineAccessStatement; +use crate::sql::statements::DefineAnalyzerStatement; +use crate::sql::statements::DefineDatabaseStatement; +use crate::sql::statements::DefineEventStatement; +use crate::sql::statements::DefineFieldStatement; +use crate::sql::statements::DefineFunctionStatement; +use crate::sql::statements::DefineIndexStatement; +use crate::sql::statements::DefineModelStatement; +use crate::sql::statements::DefineNamespaceStatement; +use crate::sql::statements::DefineParamStatement; +use crate::sql::statements::DefineTableStatement; +use crate::sql::statements::DefineUserStatement; +use crate::sql::statements::LiveStatement; +use crate::sql::Id; +use crate::sql::Permissions; +use crate::sql::Value; +use futures::lock::Mutex; +use futures::lock::MutexGuard; +use futures::stream::Stream; +use quick_cache::sync::Cache; use std::fmt::Debug; use std::ops::Range; use std::sync::Arc; - -use channel::{Receiver, Sender}; -use futures::lock::Mutex; use uuid::Uuid; -use sql::permission::Permissions; -use sql::statements::DefineAccessStatement; -use sql::statements::DefineAnalyzerStatement; -use sql::statements::DefineDatabaseStatement; -use sql::statements::DefineEventStatement; -use sql::statements::DefineFieldStatement; -use sql::statements::DefineFunctionStatement; -use sql::statements::DefineIndexStatement; -use sql::statements::DefineModelStatement; -use sql::statements::DefineNamespaceStatement; -use sql::statements::DefineParamStatement; -use sql::statements::DefineTableStatement; -use sql::statements::DefineUserStatement; -use sql::statements::LiveStatement; - -use crate::cf; -use crate::cnf::EXPORT_BATCH_SIZE; -use crate::dbs::node::ClusterMembership; -use crate::dbs::node::Timestamp; -use crate::err::Error; -use crate::idg::u32::U32; #[cfg(debug_assertions)] -use crate::key::debug::sprint_key; -use crate::key::error::KeyCategory; -use crate::key::key_req::KeyRequirements; -use crate::kvs::cache::Cache; -use crate::kvs::cache::Entry; -use crate::kvs::clock::SizedClock; -use crate::kvs::lq_structs::{LqValue, TrackedResult}; -use crate::kvs::Check; -use crate::options::EngineOptions; -use crate::sql; -use crate::sql::paths::EDGE; -use crate::sql::paths::IN; -use crate::sql::paths::OUT; -use crate::sql::thing::Thing; -use crate::sql::Strand; -use crate::sql::Value; -use crate::vs::Oracle; -use crate::vs::Versionstamp; +const TARGET: &str = "surrealdb::core::kvs::tx"; -use super::kv::Add; -use super::kv::Convert; -use super::Key; -use super::Val; - -#[derive(Copy, Clone, Debug)] -#[non_exhaustive] -pub enum Limit { - Unlimited, - Limited(u32), -} - -#[non_exhaustive] -pub struct ScanPage -where - K: Into + Debug, -{ - pub range: Range, - pub limit: Limit, -} - -impl From>> for ScanPage> { - fn from(value: Range>) -> Self { - ScanPage { - range: value, - limit: Limit::Unlimited, - } - } -} - -#[non_exhaustive] -pub struct ScanResult -where - K: Into + Debug, -{ - pub next_page: Option>, - pub values: Vec<(Key, Val)>, -} - -/// A set of undoable updates and requests against a dataset. -#[allow(dead_code)] #[non_exhaustive] pub struct Transaction { - pub(super) inner: Inner, - pub(super) cache: Cache, - pub(super) cf: cf::Writer, - pub(super) vso: Arc>, - pub(super) clock: Arc, - pub(super) prepared_async_events: (Arc>, Arc>), - pub(super) engine_options: EngineOptions, -} - -#[allow(clippy::large_enum_variant)] -pub(super) enum Inner { - #[cfg(feature = "kv-mem")] - Mem(super::mem::Transaction), - #[cfg(feature = "kv-rocksdb")] - RocksDB(super::rocksdb::Transaction), - #[cfg(feature = "kv-indxdb")] - IndxDB(super::indxdb::Transaction), - #[cfg(feature = "kv-tikv")] - TiKV(super::tikv::Transaction), - #[cfg(feature = "kv-fdb")] - FoundationDB(super::fdb::Transaction), - #[cfg(feature = "kv-surrealkv")] - SurrealKV(super::surrealkv::Transaction), -} - -#[derive(Copy, Clone)] -#[non_exhaustive] -pub enum TransactionType { - Read, - Write, -} - -impl From for TransactionType { - fn from(value: bool) -> Self { - match value { - true => TransactionType::Write, - false => TransactionType::Read, - } - } -} - -#[non_exhaustive] -pub enum LockType { - Pessimistic, - Optimistic, -} - -impl fmt::Display for Transaction { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - #![allow(unused_variables)] - match &self.inner { - #[cfg(feature = "kv-mem")] - Inner::Mem(_) => write!(f, "memory"), - #[cfg(feature = "kv-rocksdb")] - Inner::RocksDB(_) => write!(f, "rocksdb"), - #[cfg(feature = "kv-indxdb")] - Inner::IndxDB(_) => write!(f, "indxdb"), - #[cfg(feature = "kv-tikv")] - Inner::TiKV(_) => write!(f, "tikv"), - #[cfg(feature = "kv-fdb")] - Inner::FoundationDB(_) => write!(f, "fdb"), - #[cfg(feature = "kv-surrealkv")] - Inner::SurrealKV(_) => write!(f, "surrealkv"), - #[allow(unreachable_patterns)] - _ => unreachable!(), - } - } + /// The underlying transactor + tx: Mutex, + /// The query cache for this store + cache: Cache, } impl Transaction { - // -------------------------------------------------- - // Configuration methods - // -------------------------------------------------- - - pub fn rollback_with_warning(mut self) -> Self { - self.check_level(Check::Warn); - self + /// Create a new query store + pub fn new(tx: Transactor) -> Transaction { + Transaction { + tx: Mutex::new(tx), + cache: Cache::with_weighter(*TRANSACTION_CACHE_SIZE, 10_000, EntryWeighter), + } } - pub fn rollback_with_panic(mut self) -> Self { - self.check_level(Check::Panic); - self + /// Retrieve the underlying transaction + pub fn inner(self) -> Transactor { + self.tx.into_inner() } - pub fn rollback_and_ignore(mut self) -> Self { - self.check_level(Check::None); - self + /// Enclose this transaction in an [`Arc`] + pub fn enclose(self) -> Arc { + Arc::new(self) } - pub fn enclose(self) -> Arc> { - Arc::new(Mutex::new(self)) + /// Retrieve the underlying transaction + pub async fn lock(&self) -> MutexGuard<'_, Transactor> { + self.tx.lock().await } - // -------------------------------------------------- - // Integral methods - // -------------------------------------------------- - /// Check if transaction is finished. /// /// If the transaction has been cancelled or committed, @@ -196,3015 +78,1493 @@ impl Transaction { /// calls to functions on this transaction will result /// in a [`Error::TxFinished`] error. pub async fn closed(&self) -> bool { - #[cfg(debug_assertions)] - trace!("Closed"); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.closed(), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.closed(), - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.closed(), - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.closed(), - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.closed(), - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.is_closed(), - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + self.lock().await.closed().await } /// Cancel a transaction. /// /// This reverses all changes made within the transaction. - pub async fn cancel(&mut self) -> Result<(), Error> { - #[cfg(debug_assertions)] - trace!("Cancel"); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.cancel(), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.cancel().await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.cancel().await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.cancel().await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.cancel().await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.cancel().await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + pub async fn cancel(&self) -> Result<(), Error> { + self.lock().await.cancel().await } /// Commit a transaction. /// /// This attempts to commit all changes made within the transaction. - pub async fn commit(&mut self) -> Result<(), Error> { - #[cfg(debug_assertions)] - trace!("Commit"); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.commit(), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.commit().await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.commit().await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.commit().await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.commit().await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.commit().await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } - } - - /// From the existing transaction, consume all the remaining live query registration events and return them synchronously - /// This function does not check that a transaction was committed, but the intention is to consume from this - /// only once the transaction is committed - pub(crate) fn consume_pending_live_queries(&self) -> Vec { - let mut tracked_results: Vec = - Vec::with_capacity(self.engine_options.new_live_queries_per_transaction as usize); - while let Ok(tracked_result) = self.prepared_async_events.1.try_recv() { - tracked_results.push(tracked_result); - } - tracked_results - } - - /// Sends an async operation, such as a new live query, to the transaction which is forwarded - /// only once committed and removed once a transaction is aborted - // allow(dead_code) because this is used in v2, but not v1 - #[allow(dead_code)] - pub(crate) fn pre_commit_register_async_event( - &mut self, - lq_entry: TrackedResult, - ) -> Result<(), Error> { - self.prepared_async_events.0.try_send(lq_entry).map_err(|_send_err| { - Error::Internal("Prepared lq failed to add lq to channel".to_string()) - }) - } - - /// Delete a key from the datastore. - #[allow(unused_variables)] - pub async fn del(&mut self, key: K) -> Result<(), Error> - where - K: Into + Debug, - { - let key = key.into(); - #[cfg(debug_assertions)] - trace!("Del {}", sprint_key(&key)); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.del(key), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.del(key).await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.del(key).await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.del(key).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.del(key).await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.del(key).await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + pub async fn commit(&self) -> Result<(), Error> { + self.lock().await.commit().await } /// Check if a key exists in the datastore. - #[allow(unused_variables)] - pub async fn exi(&mut self, key: K) -> Result + pub async fn exists(&self, key: K) -> Result where - K: Into + Debug + AsRef<[u8]>, + K: Into + Debug, { - #[cfg(debug_assertions)] - trace!("Exi {}", sprint_key(&key)); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.exi(key), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.exi(key).await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.exi(key).await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.exi(key).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.exi(key).await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.exists(key).await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + self.lock().await.exists(key).await } /// Fetch a key from the datastore. - #[allow(unused_variables)] - pub async fn get(&mut self, key: K) -> Result, Error> + pub async fn get(&self, key: K) -> Result, Error> where K: Into + Debug, { - let key = key.into(); - #[cfg(debug_assertions)] - trace!("Get {}", sprint_key(&key)); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.get(key), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.get(key).await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.get(key).await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.get(key).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.get(key).await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.get(key).await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + self.lock().await.get(key).await } - /// Insert or update a key in the datastore. - #[allow(unused_variables)] - pub async fn set(&mut self, key: K, val: V) -> Result<(), Error> - where - K: Into + Debug, - V: Into + Debug, - { - let key = key.into(); - #[cfg(debug_assertions)] - trace!("Set {} => {:?}", sprint_key(&key), val); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.set(key, val), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.set(key, val).await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.set(key, val).await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.set(key, val).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.set(key, val).await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.set(key, val).await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } - } - - /// Obtain a new change timestamp for a key - /// which is replaced with the current timestamp when the transaction is committed. - /// NOTE: This should be called when composing the change feed entries for this transaction, - /// which should be done immediately before the transaction commit. - /// That is to keep other transactions commit delay(pessimistic) or conflict(optimistic) as less as possible. - #[allow(unused)] - pub async fn get_timestamp(&mut self, key: K, lock: bool) -> Result + /// Retrieve a batch set of keys from the datastore. + pub async fn getm(&self, keys: Vec) -> Result, Error> where K: Into + Debug, { - // We convert to byte slice as its easier at this level - let key = key.into(); - #[cfg(debug_assertions)] - trace!("Get Timestamp {}", sprint_key(&key)); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.get_timestamp(key), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.get_timestamp(key).await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.get_timestamp(key).await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.get_timestamp(key, lock).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.get_timestamp().await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.get_timestamp(key).await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + self.lock().await.getm(keys).await } - #[allow(unused)] - async fn get_non_monotonic_versionstamp(&mut self) -> Result { - Ok(self.vso.lock().await.now()) - } - - #[allow(unused)] - async fn get_non_monotonic_versionstamped_key( - &mut self, - prefix: K, - suffix: K, - ) -> Result, Error> - where - K: Into, - { - let prefix: Key = prefix.into(); - let suffix: Key = suffix.into(); - let ts = self.get_non_monotonic_versionstamp().await?; - let mut k: Vec = prefix.clone(); - k.append(&mut ts.to_vec()); - k.append(&mut suffix.clone()); - Ok(k) - } - - /// Insert or update a key in the datastore. - #[allow(unused_variables)] - pub async fn set_versionstamped_key( - &mut self, - ts_key: K, - prefix: K, - suffix: K, - val: V, - ) -> Result<(), Error> + /// Retrieve a specific prefix of keys from the datastore. + /// + /// This function fetches key-value pairs from the underlying datastore in grouped batches. + pub async fn getp(&self, key: K) -> Result, Error> where K: Into + Debug, - V: Into + Debug, { - let ts_key = ts_key.into(); - let prefix = prefix.into(); - let suffix = suffix.into(); - #[cfg(debug_assertions)] - trace!( - "Set Versionstamped Key ts={} prefix={} suffix={}", - sprint_key(&prefix), - sprint_key(&ts_key), - sprint_key(&suffix) - ); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => { - let k = v.get_versionstamped_key(ts_key, prefix, suffix).await?; - v.set(k, val) - } - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => { - let k = v.get_versionstamped_key(ts_key, prefix, suffix).await?; - v.set(k, val).await - } - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => { - let k = v.get_versionstamped_key(ts_key, prefix, suffix).await?; - v.set(k, val).await - } - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => { - let k = v.get_versionstamped_key(ts_key, prefix, suffix).await?; - v.set(k, val).await - } - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.set_versionstamped_key(prefix, suffix, val).await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => { - let k = v.get_versionstamped_key(ts_key, prefix, suffix).await?; - v.set(k, val).await - } - #[allow(unreachable_patterns)] - _ => unreachable!(), - } - } - - /// Insert a key if it doesn't exist in the datastore. - #[allow(unused_variables)] - pub async fn put(&mut self, category: KeyCategory, key: K, val: V) -> Result<(), Error> - where - K: Into + Debug, - V: Into + Debug, - { - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.put(key, val), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.put(category, key, val).await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.put(key, val).await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.put(category, key, val).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.put(category, key, val).await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.put(category, key, val).await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + self.lock().await.getp(key).await } /// Retrieve a specific range of keys from the datastore. /// - /// This function fetches the full range of key-value pairs, in a single request to the underlying datastore. - #[allow(unused_variables)] - pub async fn scan(&mut self, rng: Range, limit: u32) -> Result, Error> + /// This function fetches key-value pairs from the underlying datastore in grouped batches. + pub async fn getr(&self, rng: Range) -> Result, Error> where K: Into + Debug, { - let rng = Range { - start: rng.start.into(), - end: rng.end.into(), - }; - #[cfg(debug_assertions)] - trace!("Scan {} - {}", sprint_key(&rng.start), sprint_key(&rng.end)); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.scan(rng, limit), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.scan(rng, limit).await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.scan(rng, limit).await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.scan(rng, limit).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.scan(rng, limit).await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.scan(rng, limit).await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + self.lock().await.getr(rng).await } - /// Retrieve a specific range of keys from the datastore. - /// - /// This function fetches the full range of key-value pairs, in a single request to the underlying datastore. - #[allow(unused_variables)] - pub async fn scan_paged( - &mut self, - page: ScanPage, - batch_limit: u32, - ) -> Result, Error> - where - K: Into + From> + AsRef<[u8]> + Debug + Clone, - { - #[cfg(debug_assertions)] - trace!("Scan paged {} - {}", sprint_key(&page.range.start), sprint_key(&page.range.end)); - let range = page.range.clone(); - let res = match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.scan(range, batch_limit), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.scan(range, batch_limit).await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.scan(range, batch_limit).await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.scan(range, batch_limit).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.scan(range, batch_limit).await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.scan(range, batch_limit).await, - #[allow(unreachable_patterns)] - _ => Err(Error::MissingStorageEngine), - }; - // Construct next page - res.map(|tup_vec: Vec<(Key, Val)>| { - if tup_vec.len() < batch_limit as usize { - ScanResult { - next_page: None, - values: tup_vec, - } - } else { - let (mut rng, limit) = (page.range, page.limit); - rng.start = match tup_vec.last() { - Some((k, _)) => K::from(k.clone().add(0)), - None => rng.start, - }; - ScanResult { - next_page: Some(ScanPage { - range: rng, - limit, - }), - values: tup_vec, - } - } - }) - } - - /// Update a key in the datastore if the current value matches a condition. - #[allow(unused_variables)] - pub async fn putc(&mut self, key: K, val: V, chk: Option) -> Result<(), Error> + /// Delete a key from the datastore. + pub async fn del(&self, key: K) -> Result<(), Error> where K: Into + Debug, - V: Into + Debug, { - let key = key.into(); - #[cfg(debug_assertions)] - trace!("Putc {} if {:?} => {:?}", sprint_key(&key), chk, val); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.putc(key, val, chk), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.putc(key, val, chk).await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.putc(key, val, chk).await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.putc(key, val, chk).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.putc(key, val, chk).await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.putc(key, val, chk).await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + self.lock().await.del(key).await } /// Delete a key from the datastore if the current value matches a condition. - #[allow(unused_variables)] - pub async fn delc(&mut self, key: K, chk: Option) -> Result<(), Error> + pub async fn delc(&self, key: K, chk: Option) -> Result<(), Error> where K: Into + Debug, V: Into + Debug, { - let key = key.into(); - #[cfg(debug_assertions)] - trace!("Delc {} if {:?}", sprint_key(&key), chk); - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(v), - .. - } => v.delc(key, chk), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(v), - .. - } => v.delc(key, chk).await, - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(v), - .. - } => v.delc(key, chk).await, - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.delc(key, chk).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.delc(key, chk).await, - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.delc(key, chk).await, - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + self.lock().await.delc(key, chk).await } - // -------------------------------------------------- - // Superjacent methods - // -------------------------------------------------- + /// Delete a range of keys from the datastore. + /// + /// This function deletes entries from the underlying datastore in grouped batches. + pub async fn delr(&self, rng: Range) -> Result<(), Error> + where + K: Into + Debug, + { + self.lock().await.delr(rng).await + } + + /// Delete a prefix of keys from the datastore. + /// + /// This function deletes entries from the underlying datastore in grouped batches. + pub async fn delp(&self, key: K) -> Result<(), Error> + where + K: Into + Debug, + { + self.lock().await.delp(key).await + } + + /// Insert or update a key in the datastore. + pub async fn set(&self, key: K, val: V) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + self.lock().await.set(key, val).await + } + + /// Insert a key if it doesn't exist in the datastore. + pub async fn put(&self, key: K, val: V) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + self.lock().await.put(key, val).await + } + + /// Update a key in the datastore if the current value matches a condition. + pub async fn putc(&self, key: K, val: V, chk: Option) -> Result<(), Error> + where + K: Into + Debug, + V: Into + Debug, + { + self.lock().await.putc(key, val, chk).await + } /// Retrieve a specific range of keys from the datastore. /// - /// This function fetches key-value pairs from the underlying datastore in batches of 1000. - pub async fn getr(&mut self, rng: Range, limit: u32) -> Result, Error> + /// This function fetches the full range of keys, in a single request to the underlying datastore. + pub async fn keys(&self, rng: Range, limit: u32) -> Result, Error> where K: Into + Debug, { - let beg: Key = rng.start.into(); - let end: Key = rng.end.into(); - #[cfg(debug_assertions)] - trace!("Getr {}..{} (limit: {limit})", sprint_key(&beg), sprint_key(&end)); - let mut out: Vec<(Key, Val)> = vec![]; - let mut next_page = Some(ScanPage { - range: beg..end, - limit: Limit::Limited(limit), - }); - // Start processing - while let Some(page) = next_page { - // Get records batch - let res = self.scan_paged(page, 1000).await?; - next_page = res.next_page; - let res = res.values; - // Exit when settled - if res.is_empty() { - break; - } - // Loop over results - for (k, v) in res.into_iter() { - // Delete - out.push((k, v)); - } - } - Ok(out) + self.lock().await.keys(rng, limit).await } - /// Delete a range of keys from the datastore. + + /// Retrieve a specific range of keys from the datastore. /// - /// This function fetches key-value pairs from the underlying datastore in batches of 1000. - pub async fn delr(&mut self, rng: Range, limit: u32) -> Result<(), Error> + /// This function fetches the full range of key-value pairs, in a single request to the underlying datastore. + pub async fn scan(&self, rng: Range, limit: u32) -> Result, Error> where K: Into + Debug, { - let rng = Range { - start: rng.start.into(), - end: rng.end.into(), - }; - #[cfg(debug_assertions)] - trace!("Delr {}..{} (limit: {limit})", sprint_key(&rng.start), sprint_key(&rng.end)); - match self { - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(v), - .. - } => v.delr(rng, limit).await, - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(v), - .. - } => v.delr(rng).await, - #[allow(unreachable_patterns)] - _ => self._delr(rng, limit).await, - } + self.lock().await.scan(rng, limit).await } - /// Delete a range of keys from the datastore. + /// Retrieve a batched scan over a specific range of keys in the datastore. /// - /// This function fetches key-value pairs from the underlying datastore in batches of 1000. - async fn _delr(&mut self, rng: Range, limit: u32) -> Result<(), Error> + /// This function fetches the key-value pairs in batches, with multiple requests to the underlying datastore. + pub async fn batch(&self, rng: Range, batch: u32, values: bool) -> Result where K: Into + Debug, { - let beg: Key = rng.start.into(); - let end: Key = rng.end.into(); - // Start processing - let mut next_page = Some(ScanPage { - range: beg..end, - limit: Limit::Limited(limit), - }); - while let Some(page) = next_page { - // Get records batch - let res = self.scan_paged(page, limit).await?; - next_page = res.next_page; - let res = res.values; - // Exit when settled - if res.is_empty() { - #[cfg(debug_assertions)] - trace!("Delr page was empty"); - break; - } - // Loop over results - for (k, _) in res.into_iter() { - // Delete - #[cfg(debug_assertions)] - trace!("Delr key {}", sprint_key(&k)); - self.del(k).await?; - } - } - Ok(()) + self.lock().await.batch(rng, batch, values).await } - /// Retrieve a specific prefix of keys from the datastore. + + /// Retrieve a stream over a specific range of keys in the datastore. /// - /// This function fetches key-value pairs from the underlying datastore in batches of 1000. - pub async fn getp(&mut self, key: K, limit: u32) -> Result, Error> + /// This function fetches the key-value pairs in batches, with multiple requests to the underlying datastore. + pub fn stream(&self, rng: Range) -> impl Stream> + '_ where K: Into + Debug, { - let beg: Key = key.into(); - let end: Key = beg.clone().add(0xff); - #[cfg(debug_assertions)] - trace!("Getp {}-{} (limit: {limit})", sprint_key(&beg), sprint_key(&end)); - let mut out: Vec<(Key, Val)> = vec![]; - // Start processing - let mut next_page = Some(ScanPage { - range: beg..end, - limit: Limit::Limited(limit), - }); - while let Some(page) = next_page { - let res = self.scan_paged(page, 1000).await?; - next_page = res.next_page; - // Get records batch - let res = res.values; - // Exit when settled - if res.is_empty() { - break; - }; - // Loop over results - for (k, v) in res.into_iter() { - // Delete - out.push((k, v)); - } - } - Ok(out) - } - /// Delete a prefix of keys from the datastore. - /// - /// This function fetches key-value pairs from the underlying datastore in batches of 1000. - pub async fn delp(&mut self, key: K, limit: u32) -> Result<(), Error> - where - K: Into + Debug, - { - let beg: Key = key.into(); - let end: Key = beg.clone().add(0xff); - #[cfg(debug_assertions)] - trace!("Delp {}-{} (limit: {limit})", sprint_key(&beg), sprint_key(&end)); - let min = beg.clone(); - let max = end.clone(); - self.delr(min..max, limit).await?; - Ok(()) - } - - // -------------------------------------------------- - // Superimposed methods - // -------------------------------------------------- - - /// Clear any cache entry for the specified key. - pub async fn clr(&mut self, key: K) -> Result<(), Error> - where - K: Into, - { - let key: Key = key.into(); - self.cache.del(&key); - Ok(()) - } - - // Register cluster membership - // NOTE: Setting cluster membership sets the heartbeat - // Remember to set the heartbeat as well - pub async fn set_nd(&mut self, id: Uuid) -> Result<(), Error> { - let key = crate::key::root::nd::Nd::new(id); - match self.get_nd(id).await? { - Some(_) => Err(Error::ClAlreadyExists { - value: id.to_string(), - }), - None => { - let value = ClusterMembership { - name: id.to_string(), - heartbeat: self.clock().await, - }; - self.put(key.key_category(), key, value).await?; - Ok(()) - } - } - } - - // Retrieve cluster information - pub async fn get_nd(&mut self, id: Uuid) -> Result, Error> { - let key = crate::key::root::nd::Nd::new(id); - let val = self.get(key).await?; - match val { - Some(v) => Ok(Some::(v.into())), - None => Ok(None), - } - } - - /// Clock retrieves the current timestamp, without guaranteeing - /// monotonicity in all implementations. - /// - /// It is used for unreliable ordering of events as well as - /// handling of timeouts. Operations that are not guaranteed to be correct. - /// But also allows for lexicographical ordering. - /// - /// Public for tests, but not required for usage from a user perspective. - pub async fn clock(&self) -> Timestamp { - // Use a timestamp oracle if available - // Match, because we cannot have sized traits or async traits - self.clock.now().await - } - - // Set heartbeat - pub async fn set_hb(&mut self, timestamp: Timestamp, id: Uuid) -> Result<(), Error> { - let key = crate::key::root::hb::Hb::new(timestamp, id); - // We do not need to do a read, we always want to overwrite - let key_enc = key.encode()?; - self.put( - key.key_category(), - key_enc, - ClusterMembership { - name: id.to_string(), - heartbeat: timestamp, + Scanner::new( + self, + *NORMAL_FETCH_SIZE, + Range { + start: rng.start.into(), + end: rng.end.into(), }, ) - .await?; - Ok(()) } - pub async fn del_hb(&mut self, timestamp: Timestamp, id: Uuid) -> Result<(), Error> { - let key = crate::key::root::hb::Hb::new(timestamp, id); - self.del(key).await?; - Ok(()) + // -------------------------------------------------- + // Rollback methods + // -------------------------------------------------- + + /// Warn if this transaction is dropped without proper handling. + pub async fn rollback_with_warning(self) -> Self { + self.tx.lock().await.check_level(Check::Warn); + self } - // Delete a cluster registration entry - pub async fn del_nd(&mut self, node: Uuid) -> Result<(), Error> { - let key = crate::key::root::nd::Nd::new(node); - let key_enc = key.encode()?; - self.del(key_enc).await + /// Panic if this transaction is dropped without proper handling. + pub async fn rollback_with_panic(self) -> Self { + self.tx.lock().await.check_level(Check::Panic); + self } - // Delete the live query notification registry on the table - pub async fn del_ndlq(&mut self, nd: Uuid, lq: Uuid, ns: &str, db: &str) -> Result<(), Error> { - let key = crate::key::node::lq::Lq::new(nd, lq, ns, db); - let key_enc = key.encode()?; - self.del(key_enc).await + /// Do nothing if this transaction is dropped without proper handling. + pub async fn rollback_and_ignore(self) -> Self { + self.tx.lock().await.check_level(Check::None); + self } - // Scans up until the heartbeat timestamp and returns the discovered nodes - pub async fn scan_hb( - &mut self, - time_to: &Timestamp, - batch_size: u32, - ) -> Result, Error> { - let beg = crate::key::root::hb::Hb::prefix(); - let end = crate::key::root::hb::Hb::suffix(time_to); - let mut out: Vec = vec![]; - // Start processing - let mut next_page = Some(ScanPage::from(beg..end)); - while let Some(page) = next_page { - let res = self.scan_paged(page, batch_size).await?; - next_page = res.next_page; - for (k, _) in res.values.into_iter() { - out.push(crate::key::root::hb::Hb::decode(k.as_slice())?); - } - } - Ok(out) - } + // -------------------------------------------------- + // Cache methods + // -------------------------------------------------- - /// scan_nd will scan all the cluster membership registers - /// setting limit to 0 will result in scanning all entries - pub async fn scan_nd(&mut self, batch_size: u32) -> Result, Error> { - let beg = crate::key::root::nd::Nd::prefix(); - let end = crate::key::root::nd::Nd::suffix(); - let mut out: Vec = vec![]; - // Start processing - let mut next_page = Some(ScanPage::from(beg..end)); - while let Some(page) = next_page { - let res = self.scan_paged(page, batch_size).await?; - next_page = res.next_page; - for (_, v) in res.values.into_iter() { - out.push(v.into()); - } - } - Ok(out) - } - - pub async fn delr_hb( - &mut self, - ts: Vec, - limit: u32, - ) -> Result<(), Error> { - trace!("delr_hb: ts={:?} limit={:?}", ts, limit); - for hb in ts.into_iter() { - self.del(hb).await?; - } - Ok(()) - } - - pub async fn del_tblq(&mut self, ns: &str, db: &str, tb: &str, lv: Uuid) -> Result<(), Error> { - trace!("del_lv: ns={:?} db={:?} tb={:?} lv={:?}", ns, db, tb, lv); - let key = crate::key::table::lq::new(ns, db, tb, lv); - self.cache.del(&key.clone().into()); - self.del(key).await - } - - pub async fn scan_ndlq<'a>( - &mut self, - node: &Uuid, - batch_size: u32, - ) -> Result, Error> { - let beg = crate::key::node::lq::prefix_nd(node); - let end = crate::key::node::lq::suffix_nd(node); - let mut out: Vec = vec![]; - let mut next_page = Some(ScanPage::from(beg..end)); - while let Some(page) = next_page { - let res = self.scan_paged(page, batch_size).await?; - next_page = res.next_page; - for (key, value) in res.values.into_iter() { - let lv = crate::key::node::lq::Lq::decode(key.as_slice())?; - let tb: String = String::from_utf8(value).unwrap(); - out.push(LqValue { - nd: lv.nd.into(), - ns: lv.ns.to_string(), - db: lv.db.to_string(), - tb, - lq: lv.lq.into(), - }); - } - } - Ok(out) - } - - pub async fn scan_tblq<'a>( - &mut self, - ns: &str, - db: &str, - tb: &str, - batch_size: u32, - ) -> Result, Error> { - let beg = crate::key::table::lq::prefix(ns, db, tb); - let end = crate::key::table::lq::suffix(ns, db, tb); - let mut out: Vec = vec![]; - let mut next_page = Some(ScanPage::from(beg..end)); - while let Some(page) = next_page { - let res = self.scan_paged(page, batch_size).await?; - next_page = res.next_page; - for (key, value) in res.values.into_iter() { - let lv = crate::key::table::lq::Lq::decode(key.as_slice())?; - let val: LiveStatement = value.into(); - out.push(LqValue { - nd: val.node, - ns: lv.ns.to_string(), - db: lv.db.to_string(), - tb: lv.tb.to_string(), - lq: val.id, - }); - } - } - Ok(out) - } - - /// Add live query to table - pub async fn putc_tblq( - &mut self, - ns: &str, - db: &str, - tb: &str, - live_stm: LiveStatement, - expected: Option, - ) -> Result<(), Error> { - let key = crate::key::table::lq::new(ns, db, tb, live_stm.id.0); - let key_enc = crate::key::table::lq::Lq::encode(&key)?; + pub async fn all_nodes(&self) -> Result, Error> { + // Log this function call in development #[cfg(debug_assertions)] - trace!("putc_tblq ({:?}): key={:?}", &live_stm.id, sprint_key(&key_enc)); - self.putc(key_enc, live_stm, expected).await - } - - pub async fn putc_ndlq( - &mut self, - nd: Uuid, - lq: Uuid, - ns: &str, - db: &str, - tb: &str, - chk: Option<&str>, - ) -> Result<(), Error> { - let key = crate::key::node::lq::new(nd, lq, ns, db); - self.putc(key, tb, chk).await - } - - /// Retrieve all ROOT users. - pub async fn all_root_users(&mut self) -> Result, Error> { - let beg = crate::key::root::us::prefix(); - let end = crate::key::root::us::suffix(); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - Ok(val) - } - - /// Retrieve all ROOT access method definitions. - pub async fn all_root_accesses(&mut self) -> Result, Error> { - let key = crate::key::root::ac::prefix(); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Acs(v) = e { - v - } else { - unreachable!(); + trace!(target: TARGET, "all_nodes"); + // Continue with the function logic + let key = crate::key::root::nd::prefix(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::root::nd::suffix(); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Nds(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::root::ac::prefix(); - let end = crate::key::root::ac::suffix(); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Acs(Arc::clone(&val))); - val - }) + } + .into_nds()) } - /// Retrieve all ROOT access method definitions in redacted form. - pub async fn all_root_accesses_redacted( - &mut self, - ) -> Result, Error> { - let accesses = self.all_root_accesses().await?; - let redacted: Vec<_> = accesses.iter().map(|statement| statement.redacted()).collect(); - Ok(Arc::from(redacted)) + /// Retrieve all ROOT level users in a datastore. + pub async fn all_root_users(&self) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_root_users"); + // Continue with the function logic + let key = crate::key::root::us::prefix(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::root::us::suffix(); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Rus(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_rus()) + } + + /// Retrieve all ROOT level accesses in a datastore. + pub async fn all_root_accesses(&self) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_root_accesses"); + // Continue with the function logic + let key = crate::key::root::ac::prefix(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::root::ac::suffix(); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Ras(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_ras()) } /// Retrieve all namespace definitions in a datastore. - pub async fn all_ns(&mut self) -> Result, Error> { + pub async fn all_ns(&self) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_ns"); + // Continue with the function logic let key = crate::key::root::ns::prefix(); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Nss(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::root::ns::suffix(); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Nss(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::root::ns::prefix(); - let end = crate::key::root::ns::suffix(); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Nss(Arc::clone(&val))); - val - }) + } + .into_nss()) } /// Retrieve all namespace user definitions for a specific namespace. - pub async fn all_ns_users(&mut self, ns: &str) -> Result, Error> { + pub async fn all_ns_users(&self, ns: &str) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_ns_users {ns}"); + // Continue with the function logic let key = crate::key::namespace::us::prefix(ns); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Nus(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::namespace::us::suffix(ns); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Nus(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::namespace::us::prefix(ns); - let end = crate::key::namespace::us::suffix(ns); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Nus(Arc::clone(&val))); - val - }) + } + .into_nus()) } - /// Retrieve all namespace access method definitions. - pub async fn all_ns_accesses( - &mut self, - ns: &str, - ) -> Result, Error> { + /// Retrieve all namespace access definitions for a specific namespace. + pub async fn all_ns_accesses(&self, ns: &str) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_ns_accesses {ns}"); + // Continue with the function logic let key = crate::key::namespace::ac::prefix(ns); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Nas(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::namespace::ac::suffix(ns); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Nas(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::namespace::ac::prefix(ns); - let end = crate::key::namespace::ac::suffix(ns); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Nas(Arc::clone(&val))); - val - }) - } - - /// Retrieve all namespace access method definitions in redacted form. - pub async fn all_ns_accesses_redacted( - &mut self, - ns: &str, - ) -> Result, Error> { - let accesses = self.all_ns_accesses(ns).await?; - let redacted: Vec<_> = accesses.iter().map(|statement| statement.redacted()).collect(); - Ok(Arc::from(redacted)) + } + .into_nas()) } /// Retrieve all database definitions for a specific namespace. - pub async fn all_db(&mut self, ns: &str) -> Result, Error> { + pub async fn all_db(&self, ns: &str) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_db {ns}"); + // Continue with the function logic let key = crate::key::namespace::db::prefix(ns); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Dbs(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::namespace::db::suffix(ns); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Dbs(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::namespace::db::prefix(ns); - let end = crate::key::namespace::db::suffix(ns); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Dbs(Arc::clone(&val))); - val - }) + } + .into_dbs()) } /// Retrieve all database user definitions for a specific database. pub async fn all_db_users( - &mut self, + &self, ns: &str, db: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_db_users {ns} {db}"); + // Continue with the function logic let key = crate::key::database::us::prefix(ns, db); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Dus(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::database::us::suffix(ns, db); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Dus(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::database::us::prefix(ns, db); - let end = crate::key::database::us::suffix(ns, db); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Dus(Arc::clone(&val))); - val - }) + } + .into_dus()) } - /// Retrieve all database access method definitions. + /// Retrieve all database access definitions for a specific database. pub async fn all_db_accesses( - &mut self, + &self, ns: &str, db: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_db_accesses {ns} {db}"); + // Continue with the function logic let key = crate::key::database::ac::prefix(ns, db); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Das(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::database::ac::suffix(ns, db); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Das(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::database::ac::prefix(ns, db); - let end = crate::key::database::ac::suffix(ns, db); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Das(Arc::clone(&val))); - val - }) - } - - /// Retrieve all database access method definitions in redacted form. - pub async fn all_db_accesses_redacted( - &mut self, - ns: &str, - db: &str, - ) -> Result, Error> { - let accesses = self.all_db_accesses(ns, db).await?; - let redacted: Vec<_> = accesses.iter().map(|statement| statement.redacted()).collect(); - Ok(Arc::from(redacted)) + } + .into_das()) } /// Retrieve all analyzer definitions for a specific database. pub async fn all_db_analyzers( - &mut self, + &self, ns: &str, db: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_db_analyzers {ns} {db}"); + // Continue with the function logic let key = crate::key::database::az::prefix(ns, db); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Azs(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::database::az::suffix(ns, db); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Azs(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::database::az::prefix(ns, db); - let end = crate::key::database::az::suffix(ns, db); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Azs(Arc::clone(&val))); - val - }) + } + .into_azs()) } /// Retrieve all function definitions for a specific database. pub async fn all_db_functions( - &mut self, + &self, ns: &str, db: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_db_functions {ns} {db}"); + // Continue with the function logic let key = crate::key::database::fc::prefix(ns, db); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Fcs(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::database::fc::suffix(ns, db); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Fcs(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::database::fc::prefix(ns, db); - let end = crate::key::database::fc::suffix(ns, db); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Fcs(Arc::clone(&val))); - val - }) + } + .into_fcs()) } /// Retrieve all param definitions for a specific database. pub async fn all_db_params( - &mut self, + &self, ns: &str, db: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_db_params {ns} {db}"); + // Continue with the function logic let key = crate::key::database::pa::prefix(ns, db); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Pas(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::database::pa::suffix(ns, db); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Pas(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::database::pa::prefix(ns, db); - let end = crate::key::database::pa::suffix(ns, db); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Pas(Arc::clone(&val))); - val - }) + } + .into_pas()) } /// Retrieve all model definitions for a specific database. pub async fn all_db_models( - &mut self, + &self, ns: &str, db: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_db_models {ns} {db}"); + // Continue with the function logic let key = crate::key::database::ml::prefix(ns, db); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Mls(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::database::ml::suffix(ns, db); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Mls(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::database::ml::prefix(ns, db); - let end = crate::key::database::ml::suffix(ns, db); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Mls(Arc::clone(&val))); - val - }) + } + .into_mls()) } /// Retrieve all table definitions for a specific database. - pub async fn all_tb( - &mut self, - ns: &str, - db: &str, - ) -> Result, Error> { + pub async fn all_tb(&self, ns: &str, db: &str) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_tb {ns} {db}"); + // Continue with the function logic let key = crate::key::database::tb::prefix(ns, db); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Tbs(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::database::tb::suffix(ns, db); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Tbs(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::database::tb::prefix(ns, db); - let end = crate::key::database::tb::suffix(ns, db); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Tbs(Arc::clone(&val))); - val - }) + } + .into_tbs()) } /// Retrieve all event definitions for a specific table. pub async fn all_tb_events( - &mut self, + &self, ns: &str, db: &str, tb: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_tb_events {ns} {db} {tb}"); + // Continue with the function logic let key = crate::key::table::ev::prefix(ns, db, tb); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Evs(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::table::ev::suffix(ns, db, tb); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Evs(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::table::ev::prefix(ns, db, tb); - let end = crate::key::table::ev::suffix(ns, db, tb); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Evs(Arc::clone(&val))); - val - }) + } + .into_evs()) } /// Retrieve all field definitions for a specific table. pub async fn all_tb_fields( - &mut self, + &self, ns: &str, db: &str, tb: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_tb_fields {ns} {db} {tb}"); + // Continue with the function logic let key = crate::key::table::fd::prefix(ns, db, tb); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Fds(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::table::fd::suffix(ns, db, tb); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Fds(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::table::fd::prefix(ns, db, tb); - let end = crate::key::table::fd::suffix(ns, db, tb); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Fds(Arc::clone(&val))); - val - }) + } + .into_fds()) } /// Retrieve all index definitions for a specific table. pub async fn all_tb_indexes( - &mut self, + &self, ns: &str, db: &str, tb: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_tb_indexes {ns} {db} {tb}"); + // Continue with the function logic let key = crate::key::table::ix::prefix(ns, db, tb); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Ixs(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::table::ix::suffix(ns, db, tb); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Ixs(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::table::ix::prefix(ns, db, tb); - let end = crate::key::table::ix::suffix(ns, db, tb); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Ixs(Arc::clone(&val))); - val - }) + } + .into_ixs()) } /// Retrieve all view definitions for a specific table. pub async fn all_tb_views( - &mut self, + &self, ns: &str, db: &str, tb: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_tb_views {ns} {db} {tb}"); + // Continue with the function logic let key = crate::key::table::ft::prefix(ns, db, tb); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Fts(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::table::ft::suffix(ns, db, tb); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Fts(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::table::ft::prefix(ns, db, tb); - let end = crate::key::table::ft::suffix(ns, db, tb); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Fts(Arc::clone(&val))); - val - }) + } + .into_fts()) } /// Retrieve all live definitions for a specific table. pub async fn all_tb_lives( - &mut self, + &self, ns: &str, db: &str, tb: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "all_tb_lives {ns} {db} {tb}"); + // Continue with the function logic let key = crate::key::table::lq::prefix(ns, db, tb); - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Lvs(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let end = crate::key::table::lq::suffix(ns, db, tb); + let val = self.getr(key..end).await?; + let val = val.convert().into(); + let val = Entry::Lvs(Arc::clone(&val)); + let _ = cache.insert(val.clone()); + val } - } else { - let beg = crate::key::table::lq::prefix(ns, db, tb); - let end = crate::key::table::lq::suffix(ns, db, tb); - let val = self.getr(beg..end, u32::MAX).await?; - let val = val.convert().into(); - self.cache.set(key, Entry::Lvs(Arc::clone(&val))); - val - }) - } - - pub async fn all_lq(&mut self, nd: &uuid::Uuid) -> Result, Error> { - let beg = crate::key::node::lq::prefix_nd(nd); - let end = crate::key::node::lq::suffix_nd(nd); - let lq_pairs = self.getr(beg..end, u32::MAX).await?; - let mut lqs = vec![]; - for (key, value) in lq_pairs { - let lq_key = crate::key::node::lq::Lq::decode(key.as_slice())?; - trace!("Value is {:?}", &value); - let lq_value = String::from_utf8(value).map_err(|e| { - Error::Internal(format!("Failed to decode a value while reading LQ: {}", e)) - })?; - let lqv = LqValue { - nd: (*nd).into(), - ns: lq_key.ns.to_string(), - db: lq_key.db.to_string(), - tb: lq_value, - lq: lq_key.lq.into(), - }; - lqs.push(lqv); } - Ok(lqs) - } - - /// Retrieve a specific user definition from ROOT. - pub async fn get_root_user(&mut self, user: &str) -> Result { - let key = crate::key::root::us::new(user); - let val = self.get(key).await?.ok_or(Error::UserRootNotFound { - value: user.to_owned(), - })?; - Ok(val.into()) - } - - /// Retrieve a specific root access method definition. - pub async fn get_root_access(&mut self, ac: &str) -> Result { - let key = crate::key::root::ac::new(ac); - let val = self.get(key).await?.ok_or(Error::AccessRootNotFound { - value: ac.to_owned(), - })?; - Ok(val.into()) + .into_lvs()) } /// Retrieve a specific namespace definition. - pub async fn get_ns(&mut self, ns: &str) -> Result { - let key = crate::key::root::ns::new(ns); - let val = self.get(key).await?.ok_or(Error::NsNotFound { - value: ns.to_owned(), - })?; - Ok(val.into()) + pub async fn get_node(&self, id: Uuid) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_node {id}"); + // Continue with the function logic + let key = crate::key::root::nd::new(id).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::NdNotFound { + value: id.to_string(), + })?; + let val: Node = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) } - /// Retrieve a specific user definition from a namespace. + /// Retrieve a specific namespace user definition. + pub async fn get_root_user(&self, user: &str) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_root_user {user}"); + // Continue with the function logic + let key = crate::key::root::us::new(user).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::UserRootNotFound { + value: user.to_owned(), + })?; + let val: DefineUserStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) + } + + /// Retrieve a specific namespace user definition. + pub async fn get_root_access(&self, user: &str) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_root_access {user}"); + // Continue with the function logic + let key = crate::key::root::ac::new(user).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::AccessRootNotFound { + value: user.to_owned(), + })?; + let val: DefineAccessStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) + } + + /// Retrieve a specific namespace definition. + pub async fn get_ns(&self, ns: &str) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_ns {ns}"); + // Continue with the function logic + let key = crate::key::root::ns::new(ns).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::NsNotFound { + value: ns.to_owned(), + })?; + let val: DefineNamespaceStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) + } + + /// Retrieve a specific namespace user definition. pub async fn get_ns_user( - &mut self, + &self, ns: &str, user: &str, - ) -> Result { - let key = crate::key::namespace::us::new(ns, user); - let val = self.get(key).await?.ok_or(Error::UserNsNotFound { - value: user.to_owned(), - ns: ns.to_owned(), - })?; - Ok(val.into()) + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_ns_user {ns} {user}"); + // Continue with the function logic + let key = crate::key::namespace::us::new(ns, user).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::UserNsNotFound { + value: user.to_owned(), + ns: ns.to_owned(), + })?; + let val: DefineUserStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) } - /// Retrieve a specific namespace access method definition. + /// Retrieve a specific namespace access definition. pub async fn get_ns_access( - &mut self, + &self, ns: &str, - ac: &str, - ) -> Result { - let key = crate::key::namespace::ac::new(ns, ac); - let val = self.get(key).await?.ok_or(Error::AccessNsNotFound { - value: ac.to_owned(), - ns: ns.to_owned(), - })?; - Ok(val.into()) + na: &str, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_ns_access {ns} {na}"); + // Continue with the function logic + let key = crate::key::namespace::ac::new(ns, na).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::AccessNsNotFound { + value: na.to_owned(), + ns: ns.to_owned(), + })?; + let val: DefineAccessStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) } /// Retrieve a specific database definition. - pub async fn get_db(&mut self, ns: &str, db: &str) -> Result { - let key = crate::key::namespace::db::new(ns, db); - let val = self.get(key).await?.ok_or(Error::DbNotFound { - value: db.to_owned(), - })?; - Ok(val.into()) + pub async fn get_db(&self, ns: &str, db: &str) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_db {ns} {db}"); + // Continue with the function logic + let key = crate::key::namespace::db::new(ns, db).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::DbNotFound { + value: db.to_owned(), + })?; + let val: DefineDatabaseStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) } /// Retrieve a specific user definition from a database. pub async fn get_db_user( - &mut self, + &self, ns: &str, db: &str, user: &str, - ) -> Result { - let key = crate::key::database::us::new(ns, db, user); - let val = self.get(key).await?.ok_or(Error::UserDbNotFound { - value: user.to_owned(), - ns: ns.to_owned(), - db: db.to_owned(), - })?; - Ok(val.into()) + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_db_user {ns} {db} {user}"); + // Continue with the function logic + let key = crate::key::database::us::new(ns, db, user).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::UserDbNotFound { + value: user.to_owned(), + ns: ns.to_owned(), + db: db.to_owned(), + })?; + let val: DefineUserStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) + } + + /// Retrieve a specific database access definition. + pub async fn get_db_access( + &self, + ns: &str, + db: &str, + da: &str, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_db_access {ns} {db} {da}"); + // Continue with the function logic + let key = crate::key::database::ac::new(ns, db, da).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::AccessDbNotFound { + value: da.to_owned(), + ns: ns.to_owned(), + db: db.to_owned(), + })?; + let val: DefineAccessStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) } /// Retrieve a specific model definition from a database. pub async fn get_db_model( - &mut self, - ns: &str, - db: &str, - ml: &str, - vn: &str, - ) -> Result { - let key = crate::key::database::ml::new(ns, db, ml, vn); - let val = self.get(key).await?.ok_or(Error::MlNotFound { - value: format!("{ml}<{vn}>"), - })?; - Ok(val.into()) - } - - /// Retrieve a specific database access method definition. - pub async fn get_db_access( - &mut self, - ns: &str, - db: &str, - ac: &str, - ) -> Result { - let key = crate::key::database::ac::new(ns, db, ac); - let val = self.get(key).await?.ok_or(Error::AccessDbNotFound { - value: ac.to_owned(), - ns: ns.to_owned(), - db: db.to_owned(), - })?; - Ok(val.into()) - } - - /// Retrieve a specific analyzer definition. - pub async fn get_db_analyzer( - &mut self, - ns: &str, - db: &str, - az: &str, - ) -> Result { - let key = crate::key::database::az::new(ns, db, az); - let val = self.get(key).await?.ok_or(Error::AzNotFound { - value: az.to_owned(), - })?; - Ok(val.into()) - } - - /// Retrieve a specific function definition from a database. - pub async fn get_db_function( - &mut self, - ns: &str, - db: &str, - fc: &str, - ) -> Result { - let key = crate::key::database::fc::new(ns, db, fc); - let val = self.get(key).await?.ok_or(Error::FcNotFound { - value: fc.to_owned(), - })?; - Ok(val.into()) - } - - /// Retrieve a specific function definition from a database. - pub async fn get_db_param( - &mut self, - ns: &str, - db: &str, - pa: &str, - ) -> Result { - let key = crate::key::database::pa::new(ns, db, pa); - let val = self.get(key).await?.ok_or(Error::PaNotFound { - value: pa.to_owned(), - })?; - Ok(val.into()) - } - - /// Return the table stored at the lq address - pub async fn get_lq( - &mut self, - nd: Uuid, - ns: &str, - db: &str, - lq: Uuid, - ) -> Result { - let key = crate::key::node::lq::new(nd, lq, ns, db); - let val = self.get(key).await?.ok_or(Error::LqNotFound { - value: lq.to_string(), - })?; - Value::from(val).convert_to_strand() - } - - /// Retrieve a specific table definition. - pub async fn get_tb( - &mut self, - ns: &str, - db: &str, - tb: &str, - ) -> Result { - let key = crate::key::database::tb::new(ns, db, tb); - let val = self.get(key).await?.ok_or(Error::TbNotFound { - value: tb.to_owned(), - })?; - Ok(val.into()) - } - - /// Retrieve a live query for a table. - pub async fn get_tb_live( - &mut self, - ns: &str, - db: &str, - tb: &str, - lv: &Uuid, - ) -> Result { - let key = crate::key::table::lq::new(ns, db, tb, *lv); - let key_enc = crate::key::table::lq::Lq::encode(&key)?; - #[cfg(debug_assertions)] - trace!("Getting lv ({:?}) {}", lv, sprint_key(&key_enc)); - let val = self.get(key_enc).await?.ok_or(Error::LvNotFound { - value: lv.to_string(), - })?; - Ok(val.into()) - } - - /// Retrieve an event for a table. - pub async fn get_tb_event( - &mut self, - ns: &str, - db: &str, - tb: &str, - ev: &str, - ) -> Result { - let key = crate::key::table::ev::new(ns, db, tb, ev); - let key_enc = crate::key::table::ev::Ev::encode(&key)?; - #[cfg(debug_assertions)] - trace!("Getting ev ({:?}) {}", ev, sprint_key(&key_enc)); - let val = self.get(key_enc).await?.ok_or(Error::EvNotFound { - value: ev.to_string(), - })?; - Ok(val.into()) - } - - /// Retrieve an event for a table. - pub async fn get_tb_field( - &mut self, - ns: &str, - db: &str, - tb: &str, - fd: &str, - ) -> Result { - let key = crate::key::table::fd::new(ns, db, tb, fd); - let key_enc = crate::key::table::fd::Fd::encode(&key)?; - #[cfg(debug_assertions)] - trace!("Getting fd ({:?}) {}", fd, sprint_key(&key_enc)); - let val = self.get(key_enc).await?.ok_or(Error::FdNotFound { - value: fd.to_string(), - })?; - Ok(val.into()) - } - - /// Retrieve an event for a table. - pub async fn get_tb_index( - &mut self, - ns: &str, - db: &str, - tb: &str, - ix: &str, - ) -> Result { - let key = crate::key::table::ix::new(ns, db, tb, ix); - let key_enc = crate::key::table::ix::Ix::encode(&key)?; - #[cfg(debug_assertions)] - trace!("Getting ix ({:?}) {}", ix, sprint_key(&key_enc)); - let val = self.get(key_enc).await?.ok_or(Error::IxNotFound { - value: ix.to_string(), - })?; - Ok(val.into()) - } - - /// Add a namespace with a default configuration, only if we are in dynamic mode. - pub async fn add_ns( - &mut self, - ns: &str, - strict: bool, - ) -> Result { - match self.get_ns(ns).await { - Err(Error::NsNotFound { - value, - }) => match strict { - false => { - let key = crate::key::root::ns::new(ns); - let val = DefineNamespaceStatement { - name: ns.to_owned().into(), - ..Default::default() - }; - self.put(key.key_category(), key, &val).await?; - Ok(val) - } - true => Err(Error::NsNotFound { - value, - }), - }, - Err(e) => Err(e), - Ok(v) => Ok(v), - } - } - - /// Add a database with a default configuration, only if we are in dynamic mode. - pub async fn add_db( - &mut self, - ns: &str, - db: &str, - strict: bool, - ) -> Result { - match self.get_db(ns, db).await { - Err(Error::DbNotFound { - value, - }) => match strict { - false => { - let key = crate::key::namespace::db::new(ns, db); - let val = DefineDatabaseStatement { - name: db.to_owned().into(), - ..Default::default() - }; - self.put(key.key_category(), key, &val).await?; - Ok(val) - } - true => Err(Error::DbNotFound { - value, - }), - }, - Err(e) => Err(e), - Ok(v) => Ok(v), - } - } - - /// Add a table with a default configuration, only if we are in dynamic mode. - pub async fn add_tb( - &mut self, - ns: &str, - db: &str, - tb: &str, - strict: bool, - ) -> Result { - match self.get_tb(ns, db, tb).await { - Err(Error::TbNotFound { - value, - }) => match strict { - false => { - let key = crate::key::database::tb::new(ns, db, tb); - let val = DefineTableStatement { - name: tb.to_owned().into(), - permissions: Permissions::none(), - ..Default::default() - }; - self.put(key.key_category(), key, &val).await?; - Ok(val) - } - true => Err(Error::TbNotFound { - value, - }), - }, - Err(e) => Err(e), - Ok(v) => Ok(v), - } - } - - /// Retrieve and cache a specific namespace definition. - pub async fn get_and_cache_ns( - &mut self, - ns: &str, - ) -> Result, Error> { - let key = crate::key::root::ns::new(ns).encode()?; - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Ns(v) = e { - v - } else { - unreachable!(); - } - } else { - let val = self.get(key.clone()).await?.ok_or(Error::NsNotFound { - value: ns.to_owned(), - })?; - let val: Arc = Arc::new(val.into()); - self.cache.set(key, Entry::Ns(Arc::clone(&val))); - val - }) - } - - /// Retrieve and cache a specific database definition. - pub async fn get_and_cache_db( - &mut self, - ns: &str, - db: &str, - ) -> Result, Error> { - let key = crate::key::namespace::db::new(ns, db).encode()?; - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Db(v) = e { - v - } else { - unreachable!(); - } - } else { - let val = self.get(key.clone()).await?.ok_or(Error::DbNotFound { - value: db.to_owned(), - })?; - let val: Arc = Arc::new(val.into()); - self.cache.set(key, Entry::Db(Arc::clone(&val))); - val - }) - } - - /// Retrieve and cache a specific table definition. - pub async fn get_and_cache_tb( - &mut self, - ns: &str, - db: &str, - tb: &str, - ) -> Result, Error> { - let key = crate::key::database::tb::new(ns, db, tb).encode()?; - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Tb(v) = e { - v - } else { - unreachable!(); - } - } else { - let val = self.get(key.clone()).await?.ok_or(Error::TbNotFound { - value: tb.to_owned(), - })?; - let val: Arc = Arc::new(val.into()); - self.cache.set(key, Entry::Tb(Arc::clone(&val))); - val - }) - } - - /// Retrieve a specific function definition. - pub async fn get_and_cache_db_function( - &mut self, - ns: &str, - db: &str, - fc: &str, - ) -> Result, Error> { - let key = crate::key::database::fc::new(ns, db, fc).encode()?; - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Fc(v) = e { - v - } else { - unreachable!(); - } - } else { - let val = self.get(key.clone()).await?.ok_or(Error::FcNotFound { - value: fc.to_owned(), - })?; - let val: Arc = Arc::new(val.into()); - self.cache.set(key, Entry::Fc(Arc::clone(&val))); - val - }) - } - - /// Retrieve a specific param definition. - pub async fn get_and_cache_db_param( - &mut self, - ns: &str, - db: &str, - pa: &str, - ) -> Result, Error> { - let key = crate::key::database::pa::new(ns, db, pa).encode()?; - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Pa(v) = e { - v - } else { - unreachable!(); - } - } else { - let val = self.get(key.clone()).await?.ok_or(Error::PaNotFound { - value: pa.to_owned(), - })?; - let val: Arc = Arc::new(val.into()); - self.cache.set(key, Entry::Pa(Arc::clone(&val))); - val - }) - } - - /// Retrieve a specific model definition. - pub async fn get_and_cache_db_model( - &mut self, + &self, ns: &str, db: &str, ml: &str, vn: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_db_model {ns} {db} {ml} {vn}"); + // Continue with the function logic let key = crate::key::database::ml::new(ns, db, ml, vn).encode()?; - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Ml(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::MlNotFound { + value: format!("{ml}<{vn}>"), + })?; + let val: DefineModelStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val } - } else { - let val = self.get(key.clone()).await?.ok_or(Error::MlNotFound { - value: format!("{ml}<{vn}>"), - })?; - let val: Arc = Arc::new(val.into()); - self.cache.set(key, Entry::Ml(Arc::clone(&val))); - val - }) + } + .into_type()) } - /// Retrieve a specific table index definition. - pub async fn get_and_cache_tb_index( - &mut self, + /// Retrieve a specific analyzer definition. + pub async fn get_db_analyzer( + &self, + ns: &str, + db: &str, + az: &str, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_db_analyzer {ns} {db} {az}"); + // Continue with the function logic + let key = crate::key::database::az::new(ns, db, az).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::AzNotFound { + value: az.to_owned(), + })?; + let val: DefineAnalyzerStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) + } + + /// Retrieve a specific function definition from a database. + pub async fn get_db_function( + &self, + ns: &str, + db: &str, + fc: &str, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_db_function {ns} {db} {fc}"); + // Continue with the function logic + let key = crate::key::database::fc::new(ns, db, fc).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::FcNotFound { + value: fc.to_owned(), + })?; + let val: DefineFunctionStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) + } + + /// Retrieve a specific function definition from a database. + pub async fn get_db_param( + &self, + ns: &str, + db: &str, + pa: &str, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_db_param {ns} {db} {pa}"); + // Continue with the function logic + let key = crate::key::database::pa::new(ns, db, pa).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::PaNotFound { + value: pa.to_owned(), + })?; + let val: DefineParamStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) + } + + /// Retrieve a specific table definition. + pub async fn get_tb( + &self, + ns: &str, + db: &str, + tb: &str, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_tb {ns} {db} {tb}"); + // Continue with the function logic + let key = crate::key::database::tb::new(ns, db, tb).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::TbNotFound { + value: tb.to_owned(), + })?; + let val: DefineTableStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) + } + + /// Retrieve an event for a table. + pub async fn get_tb_event( + &self, + ns: &str, + db: &str, + tb: &str, + ev: &str, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_tb_event {ns} {db} {tb} {ev}"); + // Continue with the function logic + let key = crate::key::table::ev::new(ns, db, tb, ev).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::EvNotFound { + value: ev.to_owned(), + })?; + let val: DefineEventStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) + } + + /// Retrieve a field for a table. + pub async fn get_tb_field( + &self, + ns: &str, + db: &str, + tb: &str, + fd: &str, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_tb_field {ns} {db} {tb} {fd}"); + // Continue with the function logic + let key = crate::key::table::fd::new(ns, db, tb, fd).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::FdNotFound { + value: fd.to_owned(), + })?; + let val: DefineFieldStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + } + .into_type()) + } + + /// Retrieve an index for a table. + pub async fn get_tb_index( + &self, ns: &str, db: &str, tb: &str, ix: &str, ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_tb_index {ns} {db} {tb} {ix}"); + // Continue with the function logic let key = crate::key::table::ix::new(ns, db, tb, ix).encode()?; - Ok(if let Some(e) = self.cache.get(&key) { - if let Entry::Ix(v) = e { - v - } else { - unreachable!(); + let res = self.cache.get_value_or_guard_async(&key).await; + Ok(match res { + Ok(val) => val, + Err(cache) => { + let val = self.get(key).await?.ok_or(Error::IxNotFound { + value: ix.to_owned(), + })?; + let val: DefineIndexStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val } - } else { - let val = self.get(key.clone()).await?.ok_or(Error::IxNotFound { - value: ix.to_owned(), - })?; - let val: Arc = Arc::new(val.into()); - self.cache.set(key, Entry::Ix(Arc::clone(&val))); - val - }) + } + .into_type()) } - /// Add a namespace with a default configuration, only if we are in dynamic mode. - pub async fn add_and_cache_ns( - &mut self, + /// Fetch a specific record value. + pub async fn get_record( + &self, ns: &str, - strict: bool, - ) -> Result, Error> { - match self.get_and_cache_ns(ns).await { - Err(Error::NsNotFound { - value, - }) => match strict { - false => { - let key = crate::key::root::ns::new(ns); - let val = DefineNamespaceStatement { - name: ns.to_owned().into(), - ..Default::default() - }; - self.put(key.key_category(), key, &val).await?; - Ok(Arc::new(val)) + db: &str, + tb: &str, + id: &Id, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_record {ns} {db} {tb} {id}"); + // Continue with the function logic + let key = crate::key::thing::new(ns, db, tb, id).encode()?; + let res = self.cache.get_value_or_guard_async(&key).await; + match res { + // The entry is in the cache + Ok(val) => Ok(val.into_val()), + // The entry is not in the cache + Err(cache) => match self.get(key).await? { + // The value exists in the datastore + Some(val) => { + let val = Entry::Val(Arc::new(val.into())); + let _ = cache.insert(val.clone()); + Ok(val.into_val()) } - true => Err(Error::NsNotFound { - value, - }), + // The value is not in the datastore + None => Ok(Arc::new(Value::None)), }, - Err(e) => Err(e), - Ok(v) => Ok(v), } } - /// Add a database with a default configuration, only if we are in dynamic mode. - pub async fn add_and_cache_db( - &mut self, + pub async fn set_record( + &self, + ns: &str, + db: &str, + tb: &str, + id: &Id, + val: Value, + ) -> Result<(), Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "set_record {ns} {db} {tb} {id} {val}"); + // Continue with the function logic + let key = crate::key::thing::new(ns, db, tb, id); + let enc = crate::key::thing::new(ns, db, tb, id).encode()?; + // Set the value in the datastore + self.set(&key, &val).await?; + // Set the value in the cache + self.cache.insert(enc, Entry::Val(Arc::new(val))); + // Return nothing + Ok(()) + } + + /// Get or add a namespace with a default configuration, only if we are in dynamic mode. + pub async fn get_or_add_ns( + &self, + ns: &str, + strict: bool, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_or_add_ns {ns}"); + // Continue with the function logic + self.get_or_add_ns_upwards(ns, strict, false).await + } + + /// Get or add a database with a default configuration, only if we are in dynamic mode. + pub async fn get_or_add_db( + &self, ns: &str, db: &str, strict: bool, ) -> Result, Error> { - match self.get_and_cache_db(ns, db).await { - Err(Error::DbNotFound { - value, - }) => match strict { - false => { - let key = crate::key::namespace::db::new(ns, db); - let val = DefineDatabaseStatement { - name: db.to_owned().into(), - ..Default::default() - }; - self.put(key.key_category(), key, &val).await?; - Ok(Arc::new(val)) - } - true => Err(Error::DbNotFound { - value, - }), - }, - Err(e) => Err(e), - Ok(v) => Ok(v), - } + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_or_add_db {ns} {db}"); + // Continue with the function logic + self.get_or_add_db_upwards(ns, db, strict, false).await } - /// Add a table with a default configuration, only if we are in dynamic mode. - pub async fn add_and_cache_tb( - &mut self, + /// Get or add a table with a default configuration, only if we are in dynamic mode. + pub async fn get_or_add_tb( + &self, ns: &str, db: &str, tb: &str, strict: bool, ) -> Result, Error> { - match self.get_and_cache_tb(ns, db, tb).await { - Err(Error::TbNotFound { - value, - }) => match strict { - false => { - let key = crate::key::database::tb::new(ns, db, tb); - let val = DefineTableStatement { - name: tb.to_owned().into(), - permissions: Permissions::none(), - ..Default::default() - }; - self.put(key.key_category(), key, &val).await?; - Ok(Arc::new(val)) - } - true => Err(Error::TbNotFound { - value, - }), - }, - Err(e) => Err(e), - Ok(v) => Ok(v), - } + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "get_or_add_tb {ns} {db} {tb}"); + // Continue with the function logic + self.get_or_add_tb_upwards(ns, db, tb, strict, false).await } - /// Retrieve and cache a specific table definition. - pub async fn check_ns_db_tb( - &mut self, + /// Ensures that a table, database, and namespace are all fully defined. + #[inline(always)] + pub async fn ensure_ns_db_tb( + &self, + ns: &str, + db: &str, + tb: &str, + strict: bool, + ) -> Result, Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "ensure_ns_db_tb {ns} {db} {tb}"); + // Continue with the function logic + self.get_or_add_tb_upwards(ns, db, tb, strict, true).await + } + + /// Ensure a specific table (and database, and namespace) exist. + #[inline(always)] + pub(crate) async fn check_ns_db_tb( + &self, ns: &str, db: &str, tb: &str, strict: bool, ) -> Result<(), Error> { + // Log this function call in development + #[cfg(debug_assertions)] + trace!(target: TARGET, "check_ns_db_tb {ns} {db} {tb}"); + // Continue with the function logic match strict { // Strict mode is disabled false => Ok(()), // Strict mode is enabled true => { - self.get_and_cache_ns(ns).await?; - self.get_and_cache_db(ns, db).await?; - self.get_and_cache_tb(ns, db, tb).await?; - Ok(()) - } - } - } - - // -------------------------------------------------- - // Additional methods - // -------------------------------------------------- - - /// Writes the full database contents as binary SQL. - pub async fn export(&mut self, ns: &str, db: &str, chn: Sender>) -> Result<(), Error> { - // Output OPTIONS - { - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("-- OPTION")).await?; - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("")).await?; - chn.send(bytes!("OPTION IMPORT;")).await?; - chn.send(bytes!("")).await?; - } - // Output USERS - { - let dus = self.all_db_users(ns, db).await?; - if !dus.is_empty() { - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("-- USERS")).await?; - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("")).await?; - for us in dus.iter() { - chn.send(bytes!(format!("{us};"))).await?; - } - chn.send(bytes!("")).await?; - } - } - // Output ACCESSES - { - let dts = self.all_db_accesses(ns, db).await?; - if !dts.is_empty() { - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("-- ACCESSES")).await?; - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("")).await?; - for dt in dts.iter() { - chn.send(bytes!(format!("{dt};"))).await?; - } - chn.send(bytes!("")).await?; - } - } - // Output PARAMS - { - let pas = self.all_db_params(ns, db).await?; - if !pas.is_empty() { - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("-- PARAMS")).await?; - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("")).await?; - for pa in pas.iter() { - chn.send(bytes!(format!("{pa};"))).await?; - } - chn.send(bytes!("")).await?; - } - } - // Output FUNCTIONS - { - let fcs = self.all_db_functions(ns, db).await?; - if !fcs.is_empty() { - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("-- FUNCTIONS")).await?; - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("")).await?; - for fc in fcs.iter() { - chn.send(bytes!(format!("{fc};"))).await?; - } - chn.send(bytes!("")).await?; - } - } - // Output ANALYZERS - { - let azs = self.all_db_analyzers(ns, db).await?; - if !azs.is_empty() { - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("-- ANALYZERS")).await?; - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("")).await?; - for az in azs.iter() { - chn.send(bytes!(format!("{az};"))).await?; - } - chn.send(bytes!("")).await?; - } - } - // Output TABLES - { - let tbs = self.all_tb(ns, db).await?; - if !tbs.is_empty() { - for tb in tbs.iter() { - // Output TABLE - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!(format!("-- TABLE: {}", tb.name))).await?; - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("")).await?; - chn.send(bytes!(format!("{tb};"))).await?; - chn.send(bytes!("")).await?; - // Output FIELDS - let fds = self.all_tb_fields(ns, db, &tb.name).await?; - if !fds.is_empty() { - for fd in fds.iter() { - chn.send(bytes!(format!("{fd};"))).await?; - } - chn.send(bytes!("")).await?; - } - // Output INDEXES - let ixs = self.all_tb_indexes(ns, db, &tb.name).await?; - if !ixs.is_empty() { - for ix in ixs.iter() { - chn.send(bytes!(format!("{ix};"))).await?; - } - chn.send(bytes!("")).await?; - } - // Output EVENTS - let evs = self.all_tb_events(ns, db, &tb.name).await?; - if !evs.is_empty() { - for ev in evs.iter() { - chn.send(bytes!(format!("{ev};"))).await?; - } - chn.send(bytes!("")).await?; - } - } - // Start transaction - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("-- TRANSACTION")).await?; - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("")).await?; - chn.send(bytes!("BEGIN TRANSACTION;")).await?; - chn.send(bytes!("")).await?; - // Records to be exported, categorised by the type of INSERT statement - let mut exported_normal: Vec = - Vec::with_capacity(*EXPORT_BATCH_SIZE as usize); - let mut exported_relation: Vec = - Vec::with_capacity(*EXPORT_BATCH_SIZE as usize); - // Output TABLE data - for tb in tbs.iter() { - // Start records - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!(format!("-- TABLE DATA: {}", tb.name))).await?; - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("")).await?; - // Fetch records - let beg = crate::key::thing::prefix(ns, db, &tb.name); - let end = crate::key::thing::suffix(ns, db, &tb.name); - let mut nxt: Option>> = Some(ScanPage::from(beg..end)); - while nxt.is_some() { - let res = self.scan_paged(nxt.unwrap(), *EXPORT_BATCH_SIZE).await?; - nxt = res.next_page; - let res = res.values; - if res.is_empty() { - break; - } - - // Categorize results - for (_, v) in res.into_iter() { - // Parse the key and the value - let v: Value = (&v).into(); - // Check if this is a graph edge - match (v.pick(&*EDGE), v.pick(&*IN), v.pick(&*OUT)) { - // This is a graph edge record - (Value::Bool(true), Value::Thing(_), Value::Thing(_)) => { - exported_relation.push(v.to_string()); - } - // This is a normal record - _ => { - exported_normal.push(v.to_string()); + // Check that the table exists + match self.get_tb(ns, db, tb).await { + Err(Error::TbNotFound { + value: tb, + }) => { + // If not, check the database exists + match self.get_db(ns, db).await { + Err(Error::DbNotFound { + value: db, + }) => { + // If not, check the namespace exists + match self.get_ns(ns).await { + Err(Error::NsNotFound { + value: ns, + }) => Err(Error::NsNotFound { + value: ns, + }), + // Return any other errors + Err(err) => Err(err), + // Namespace does exist + Ok(_) => Err(Error::DbNotFound { + value: db, + }), } } + // Return any other errors + Err(err) => Err(err), + // Database does exist + Ok(_) => Err(Error::TbNotFound { + value: tb, + }), } - - // Add batches of INSERT statements - // No need to chunk here, the scan it limited to 1000 - if !exported_normal.is_empty() { - let values = exported_normal.join(", "); - let sql = format!("INSERT [ {values} ];"); - chn.send(bytes!(sql)).await?; - exported_normal.clear(); - } - - // Add batches of INSERT RELATION statements - // No need to chunk here, the scan it limited to 1000 - if !exported_relation.is_empty() { - let values = exported_relation.join(", "); - let sql = format!("INSERT RELATION [ {values} ];"); - chn.send(bytes!(sql)).await?; - exported_relation.clear() - } - - continue; } - chn.send(bytes!("")).await?; + // Return any other errors + Err(err) => Err(err), + // Table does exist + Ok(_) => Ok(()), } - // Commit transaction - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("-- TRANSACTION")).await?; - chn.send(bytes!("-- ------------------------------")).await?; - chn.send(bytes!("")).await?; - chn.send(bytes!("COMMIT TRANSACTION;")).await?; - chn.send(bytes!("")).await?; } } - // Everything exported - Ok(()) } - // change will record the change in the changefeed if enabled. - // To actually persist the record changes into the underlying kvs, - // you must call the `complete_changes` function and then commit the transaction. - pub(crate) fn clear_cache(&mut self) { + /// Clears all keys from the transaction cache. + #[inline(always)] + pub fn clear(&self) { self.cache.clear() } - // change will record the change in the changefeed if enabled. - // To actually persist the record changes into the underlying kvs, - // you must call the `complete_changes` function and then commit the transaction. - #[allow(clippy::too_many_arguments)] - pub(crate) fn record_change( - &mut self, - ns: &str, - db: &str, - tb: &str, - id: &Thing, - previous: Cow<'_, Value>, - current: Cow<'_, Value>, - store_difference: bool, - ) { - self.cf.record_cf_change(ns, db, tb, id.clone(), previous, current, store_difference) - } - - // Records the table (re)definition in the changefeed if enabled. - pub(crate) fn record_table_change( - &mut self, - ns: &str, - db: &str, - tb: &str, - dt: &DefineTableStatement, - ) { - self.cf.define_table(ns, db, tb, dt) - } - - pub(crate) async fn get_idg(&mut self, key: Key) -> Result { - let seq = if let Some(e) = self.cache.get(&key) { - if let Entry::Seq(v) = e { - v - } else { - unreachable!(); - } - } else { - let val = self.get(key.clone()).await?; - if let Some(val) = val { - U32::new(key.clone(), Some(val)).await? - } else { - U32::new(key.clone(), None).await? - } - }; - - Ok(seq) - } - - // get_next_db_id will get the next db id for the given namespace. - pub(crate) async fn get_next_db_id(&mut self, ns: u32) -> Result { - let key = crate::key::namespace::di::new(ns).encode().unwrap(); - let mut seq = if let Some(e) = self.cache.get(&key) { - if let Entry::Seq(v) = e { - v - } else { - unreachable!(); - } - } else { - let val = self.get(key.clone()).await?; - if let Some(val) = val { - U32::new(key.clone(), Some(val)).await? - } else { - U32::new(key.clone(), None).await? - } - }; - - let id = seq.get_next_id(); - - self.cache.set(key.clone(), Entry::Seq(seq.clone())); - let (k, v) = seq.finish().unwrap(); - self.set(k, v).await?; - - Ok(id) - } - - // remove_db_id removes the given db id from the sequence. - #[allow(unused)] - pub(crate) async fn remove_db_id(&mut self, ns: u32, db: u32) -> Result<(), Error> { - let key = crate::key::namespace::di::new(ns).encode().unwrap(); - let mut seq = self.get_idg(key.clone()).await?; - - seq.remove_id(db); - - self.cache.set(key.clone(), Entry::Seq(seq.clone())); - let (k, v) = seq.finish().unwrap(); - self.set(k, v).await?; - - Ok(()) - } - - // get_next_db_id will get the next tb id for the given namespace and database. - pub(crate) async fn get_next_tb_id(&mut self, ns: u32, db: u32) -> Result { - let key = crate::key::database::ti::new(ns, db).encode().unwrap(); - let mut seq = self.get_idg(key.clone()).await?; - - let id = seq.get_next_id(); - - self.cache.set(key.clone(), Entry::Seq(seq.clone())); - let (k, v) = seq.finish().unwrap(); - self.set(k, v).await?; - - Ok(id) - } - - // remove_tb_id removes the given tb id from the sequence. - #[allow(unused)] - pub(crate) async fn remove_tb_id(&mut self, ns: u32, db: u32, tb: u32) -> Result<(), Error> { - let key = crate::key::database::ti::new(ns, db).encode().unwrap(); - let mut seq = self.get_idg(key.clone()).await?; - - seq.remove_id(tb); - - self.cache.set(key.clone(), Entry::Seq(seq.clone())); - let (k, v) = seq.finish().unwrap(); - self.set(k, v).await?; - - Ok(()) - } - - // get_next_ns_id will get the next ns id. - pub(crate) async fn get_next_ns_id(&mut self) -> Result { - let key = crate::key::root::ni::Ni::default().encode().unwrap(); - let mut seq = if let Some(e) = self.cache.get(&key) { - if let Entry::Seq(v) = e { - v - } else { - unreachable!(); - } - } else { - let val = self.get(key.clone()).await?; - if let Some(val) = val { - U32::new(key.clone(), Some(val)).await? - } else { - U32::new(key.clone(), None).await? - } - }; - - let id = seq.get_next_id(); - - self.cache.set(key.clone(), Entry::Seq(seq.clone())); - let (k, v) = seq.finish().unwrap(); - self.set(k, v).await?; - - Ok(id) - } - - // remove_ns_id removes the given ns id from the sequence. - #[allow(unused)] - pub(crate) async fn remove_ns_id(&mut self, ns: u32) -> Result<(), Error> { - let key = crate::key::root::ni::Ni::default().encode().unwrap(); - let mut seq = self.get_idg(key.clone()).await?; - - seq.remove_id(ns); - - self.cache.set(key.clone(), Entry::Seq(seq.clone())); - let (k, v) = seq.finish().unwrap(); - self.set(k, v).await?; - - Ok(()) - } - - // complete_changes will complete the changefeed recording for the given namespace and database. - // - // Under the hood, this function calls the transaction's `set_versionstamped_key` for each change. - // Every change must be recorded by calling this struct's `record_change` function beforehand. - // If there were no preceding `record_change` function calls for this transaction, this function will do nothing. - // - // This function should be called only after all the changes have been made to the transaction. - // Otherwise, changes are missed in the change feed. - // - // This function should be called immediately before calling the commit function to guarantee that - // the lock, if needed by lock=true, is held only for the duration of the commit, not the entire transaction. - // - // This function is here because it needs access to mutably borrow the transaction. - // - // Lastly, you should set lock=true if you want the changefeed to be correctly ordered for - // non-FDB backends. - pub(crate) async fn complete_changes(&mut self, _lock: bool) -> Result<(), Error> { - let changes = self.cf.get(); - for (tskey, prefix, suffix, v) in changes { - self.set_versionstamped_key(tskey, prefix, suffix, v).await? - } - Ok(()) - } - - // set_timestamp_for_versionstamp correlates the given timestamp with the current versionstamp. - // This allows get_versionstamp_from_timestamp to obtain the versionstamp from the timestamp later. - pub(crate) async fn set_timestamp_for_versionstamp( - &mut self, - ts: u64, - ns: &str, - db: &str, - lock: bool, - ) -> Result { - // This also works as an advisory lock on the ts keys so that there is - // on other concurrent transactions that can write to the ts_key or the keys after it. - let vs = self.get_timestamp(crate::key::database::vs::new(ns, db), lock).await?; - #[cfg(debug_assertions)] - trace!( - "Setting timestamp {} for versionstamp {:?} in ns: {}, db: {}", - ts, - crate::vs::conv::versionstamp_to_u64(&vs), - ns, - db - ); - - // Ensure there are no keys after the ts_key - // Otherwise we can go back in time! - let ts_key = crate::key::database::ts::new(ns, db, ts); - let begin = ts_key.encode()?; - let end = crate::key::database::ts::suffix(ns, db); - let ts_pairs: Vec<(Vec, Vec)> = self.getr(begin..end, u32::MAX).await?; - let latest_ts_pair = ts_pairs.last(); - if let Some((k, _)) = latest_ts_pair { - #[cfg(debug_assertions)] - trace!( - "There already was a greater committed timestamp {} in ns: {}, db: {} found: {}", - ts, - ns, - db, - sprint_key(k) - ); - let k = crate::key::database::ts::Ts::decode(k)?; - let latest_ts = k.ts; - if latest_ts >= ts { - return Err(Error::Internal( - "ts is less than or equal to the latest ts".to_string(), - )); - } - } - self.set(ts_key, vs).await?; - Ok(vs) - } - - pub(crate) async fn get_versionstamp_from_timestamp( - &mut self, - ts: u64, - ns: &str, - db: &str, - _lock: bool, - ) -> Result, Error> { - let start = crate::key::database::ts::prefix(ns, db); - let ts_key = crate::key::database::ts::new(ns, db, ts + 1); - let end = ts_key.encode()?; - let ts_pairs = self.getr(start..end, u32::MAX).await?; - let latest_ts_pair = ts_pairs.last(); - if let Some((_, v)) = latest_ts_pair { - if v.len() == 10 { - let mut sl = [0u8; 10]; - sl.copy_from_slice(v); - return Ok(Some(sl)); - } else { - return Err(Error::Internal("versionstamp is not 10 bytes".to_string())); - } - } - Ok(None) - } - // -------------------------------------------------- // Private methods // -------------------------------------------------- - #[allow(unused_variables)] - fn check_level(&mut self, check: Check) { - #![allow(unused_variables)] - match self { - #[cfg(feature = "kv-mem")] - Transaction { - inner: Inner::Mem(ref mut v), - .. - } => v.check_level(check), - #[cfg(feature = "kv-rocksdb")] - Transaction { - inner: Inner::RocksDB(ref mut v), - .. - } => v.check_level(check), - #[cfg(feature = "kv-indxdb")] - Transaction { - inner: Inner::IndxDB(ref mut v), - .. - } => v.check_level(check), - #[cfg(feature = "kv-tikv")] - Transaction { - inner: Inner::TiKV(ref mut v), - .. - } => v.check_level(check), - #[cfg(feature = "kv-fdb")] - Transaction { - inner: Inner::FoundationDB(ref mut v), - .. - } => v.check_level(check), - #[cfg(feature = "kv-surrealkv")] - Transaction { - inner: Inner::SurrealKV(v), - .. - } => v.set_check_level(check), - #[allow(unreachable_patterns)] - _ => unreachable!(), - } - } - - #[cfg(debug_assertions)] - #[allow(unused)] - #[doc(hidden)] - pub async fn print_all(&mut self) { - let mut next_page = - Some(ScanPage::from(crate::key::root::ns::prefix()..b"\xff\xff\xff".to_vec())); - println!("Start print all"); - while next_page.is_some() { - let res = self.scan_paged(next_page.unwrap(), 1000).await.unwrap(); - for (k, _) in res.values { - println!("{}", sprint_key(&k)); + /// Get or add a namespace with a default configuration, only if we are in dynamic mode. + async fn get_or_add_ns_upwards( + &self, + ns: &str, + strict: bool, + _upwards: bool, + ) -> Result, Error> { + let key = crate::key::root::ns::new(ns); + let enc = crate::key::root::ns::new(ns).encode()?; + let res = self.cache.get_value_or_guard_async(&enc).await; + Ok(match res { + // The entry is in the cache + Ok(val) => val, + // The entry is not in the cache + Err(cache) => { + // Try to fetch the value from the datastore + let res = self.get(&key).await?.ok_or(Error::NsNotFound { + value: ns.to_owned(), + }); + // Check whether the value exists in the datastore + match res { + // Store a new default value in the datastore + Err(Error::NsNotFound { + .. + }) if !strict => { + let val = DefineNamespaceStatement { + name: ns.to_owned().into(), + ..Default::default() + }; + let val = { + self.put(&key, &val).await?; + Entry::Any(Arc::new(val)) + }; + let _ = cache.insert(val.clone()); + val + } + // Store the fetched value in the cache + Ok(val) => { + let val: DefineNamespaceStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + // Throw any received errors + Err(err) => Err(err)?, + } } - next_page = res.next_page; } - println!("End print all"); - } -} - -#[cfg(test)] -#[cfg(feature = "kv-mem")] -mod tests { - use crate::key::database::all::All; - use crate::key::database::tb::Tb; - use crate::{ - kvs::{Datastore, LockType::*, TransactionType::*}, - sql::{statements::DefineUserStatement, Base}, - }; - - #[tokio::test] - async fn test_get_root_user() { - let ds = Datastore::new("memory").await.unwrap(); - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - - // Retrieve non-existent KV user - let res = txn.get_root_user("nonexistent").await; - assert_eq!(res.err().unwrap().to_string(), "The root user 'nonexistent' does not exist"); - - // Create KV user and retrieve it - let data = DefineUserStatement { - name: "user".into(), - base: Base::Root, - ..Default::default() - }; - let key = crate::key::root::us::new("user"); - txn.set(key, data.to_owned()).await.unwrap(); - let res = txn.get_root_user("user").await.unwrap(); - assert_eq!(res, data); - txn.commit().await.unwrap() + .into_type()) } - #[tokio::test] - async fn test_get_ns_user() { - let ds = Datastore::new("memory").await.unwrap(); - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - - // Retrieve non-existent NS user - let res = txn.get_ns_user("ns", "nonexistent").await; - assert_eq!( - res.err().unwrap().to_string(), - "The user 'nonexistent' does not exist in the namespace 'ns'" - ); - - // Create NS user and retrieve it - let data = DefineUserStatement { - name: "user".into(), - base: Base::Ns, - ..Default::default() - }; - - let key = crate::key::namespace::us::new("ns", "user"); - txn.set(key, data.to_owned()).await.unwrap(); - let res = txn.get_ns_user("ns", "user").await.unwrap(); - assert_eq!(res, data); - txn.commit().await.unwrap(); - } - - #[tokio::test] - async fn test_get_db_user() { - let ds = Datastore::new("memory").await.unwrap(); - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - - // Retrieve non-existent DB user - let res = txn.get_db_user("ns", "db", "nonexistent").await; - assert_eq!( - res.err().unwrap().to_string(), - "The user 'nonexistent' does not exist in the database 'db'" - ); - - // Create DB user and retrieve it - let data = DefineUserStatement { - name: "user".into(), - base: Base::Db, - ..Default::default() - }; - - let key = crate::key::database::us::new("ns", "db", "user"); - txn.set(key, data.to_owned()).await.unwrap(); - let res = txn.get_db_user("ns", "db", "user").await.unwrap(); - assert_eq!(res, data); - txn.commit().await.unwrap(); - } - - #[tokio::test] - async fn test_all_root_users() { - let ds = Datastore::new("memory").await.unwrap(); - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - - // When there are no users - let res = txn.all_root_users().await.unwrap(); - assert_eq!(res.len(), 0); - - // When there are users - let data = DefineUserStatement { - name: "user".into(), - base: Base::Root, - ..Default::default() - }; - - let key1 = crate::key::root::us::new("user1"); - let key2 = crate::key::root::us::new("user2"); - txn.set(key1, data.to_owned()).await.unwrap(); - txn.set(key2, data.to_owned()).await.unwrap(); - let res = txn.all_root_users().await.unwrap(); - - assert_eq!(res.len(), 2); - assert_eq!(res[0], data); - txn.commit().await.unwrap(); - } - - #[tokio::test] - async fn test_all_ns_users() { - let ds = Datastore::new("memory").await.unwrap(); - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - - // When there are no users - let res = txn.all_ns_users("ns").await.unwrap(); - assert_eq!(res.len(), 0); - - // When there are users - let data = DefineUserStatement { - name: "user".into(), - base: Base::Ns, - ..Default::default() - }; - - let key1 = crate::key::namespace::us::new("ns", "user1"); - let key2 = crate::key::namespace::us::new("ns", "user2"); - txn.set(key1, data.to_owned()).await.unwrap(); - txn.set(key2, data.to_owned()).await.unwrap(); - - txn.cache.clear(); - - let res = txn.all_ns_users("ns").await.unwrap(); - - assert_eq!(res.len(), 2); - assert_eq!(res[0], data); - txn.commit().await.unwrap(); - } - - #[tokio::test] - async fn test_all_db_users() { - let ds = Datastore::new("memory").await.unwrap(); - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - - // When there are no users - let res = txn.all_db_users("ns", "db").await.unwrap(); - assert_eq!(res.len(), 0); - - // When there are users - let data = DefineUserStatement { - name: "user".into(), - base: Base::Db, - ..Default::default() - }; - - let key1 = crate::key::database::us::new("ns", "db", "user1"); - let key2 = crate::key::database::us::new("ns", "db", "user2"); - txn.set(key1, data.to_owned()).await.unwrap(); - txn.set(key2, data.to_owned()).await.unwrap(); - - txn.cache.clear(); - - let res = txn.all_db_users("ns", "db").await.unwrap(); - - assert_eq!(res.len(), 2); - assert_eq!(res[0], data); - txn.commit().await.unwrap(); - } - - #[tokio::test] - async fn test_seqs() { - let ds = Datastore::new("memory").await.unwrap(); - - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - let nsid = txn.get_next_ns_id().await.unwrap(); - txn.complete_changes(false).await.unwrap(); - txn.commit().await.unwrap(); - assert_eq!(nsid, 0); - - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - let dbid = txn.get_next_db_id(nsid).await.unwrap(); - txn.complete_changes(false).await.unwrap(); - txn.commit().await.unwrap(); - assert_eq!(dbid, 0); - - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - let tbid1 = txn.get_next_tb_id(nsid, dbid).await.unwrap(); - txn.complete_changes(false).await.unwrap(); - txn.commit().await.unwrap(); - assert_eq!(tbid1, 0); - - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - let tbid2 = txn.get_next_tb_id(nsid, dbid).await.unwrap(); - txn.complete_changes(false).await.unwrap(); - txn.commit().await.unwrap(); - assert_eq!(tbid2, 1); - - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - txn.remove_tb_id(nsid, dbid, tbid1).await.unwrap(); - txn.complete_changes(false).await.unwrap(); - txn.commit().await.unwrap(); - - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - txn.remove_db_id(nsid, dbid).await.unwrap(); - txn.complete_changes(false).await.unwrap(); - txn.commit().await.unwrap(); - - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - txn.remove_ns_id(nsid).await.unwrap(); - txn.complete_changes(false).await.unwrap(); - txn.commit().await.unwrap(); - } - - #[tokio::test] - async fn test_delp() { - let ds = Datastore::new("memory").await.unwrap(); - // Create entries - { - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - for i in 0..2500 { - let t = format!("{i}"); - let tb = Tb::new("test", "test", &t); - txn.set(tb, vec![]).await.unwrap(); + /// Get or add a database with a default configuration, only if we are in dynamic mode. + async fn get_or_add_db_upwards( + &self, + ns: &str, + db: &str, + strict: bool, + upwards: bool, + ) -> Result, Error> { + let key = crate::key::namespace::db::new(ns, db); + let enc = crate::key::namespace::db::new(ns, db).encode()?; + let res = self.cache.get_value_or_guard_async(&enc).await; + Ok(match res { + // The entry is in the cache + Ok(val) => val, + // The entry is not in the cache + Err(cache) => { + // Try to fetch the value from the datastore + let res = self.get(&key).await?.ok_or(Error::DbNotFound { + value: db.to_owned(), + }); + // Check whether the value exists in the datastore + match res { + // Store a new default value in the datastore + Err(Error::DbNotFound { + .. + }) if !strict => { + // First ensure that a namespace exists + if upwards { + self.get_or_add_ns_upwards(ns, strict, upwards).await?; + } + // Next, dynamically define the database + let val = DefineDatabaseStatement { + name: db.to_owned().into(), + ..Default::default() + }; + let val = { + self.put(&key, &val).await?; + Entry::Any(Arc::new(val)) + }; + let _ = cache.insert(val.clone()); + val + } + // Check to see that the hierarchy exists + Err(Error::TbNotFound { + value, + }) if strict => { + self.get_ns(ns).await?; + Err(Error::DbNotFound { + value, + })? + } + // Store the fetched value in the cache + Ok(val) => { + let val: DefineDatabaseStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + // Throw any received errors + Err(err) => Err(err)?, + } } - txn.commit().await.unwrap(); } + .into_type()) + } - let beg = crate::key::database::tb::prefix("test", "test"); - let end = crate::key::database::tb::suffix("test", "test"); - let rng = beg..end; - - // Check we have the table keys - { - let mut txn = ds.transaction(Read, Optimistic).await.unwrap(); - let res = txn.getr(rng.clone(), u32::MAX).await.unwrap(); - assert_eq!(res.len(), 2500); - } - - // Delete using the prefix - { - let mut txn = ds.transaction(Write, Optimistic).await.unwrap(); - let all = All::new("test", "test"); - txn.delp(all, u32::MAX).await.unwrap(); - txn.commit().await.unwrap(); - } - - // Check we don't have any table key anymore - { - let mut txn = ds.transaction(Read, Optimistic).await.unwrap(); - let res = txn.getr(rng, u32::MAX).await.unwrap(); - assert_eq!(res.len(), 0); + /// Get or add a table with a default configuration, only if we are in dynamic mode. + async fn get_or_add_tb_upwards( + &self, + ns: &str, + db: &str, + tb: &str, + strict: bool, + upwards: bool, + ) -> Result, Error> { + let key = crate::key::database::tb::new(ns, db, tb); + let enc = crate::key::database::tb::new(ns, db, tb).encode()?; + let res = self.cache.get_value_or_guard_async(&enc).await; + Ok(match res { + // The entry is in the cache + Ok(val) => val, + // The entry is not in the cache + Err(cache) => { + // Try to fetch the value from the datastore + let res = self.get(&key).await?.ok_or(Error::TbNotFound { + value: tb.to_owned(), + }); + // Check whether the value exists in the datastore + match res { + // Store a new default value in the datastore + Err(Error::TbNotFound { + .. + }) if !strict => { + // First ensure that a database exists + if upwards { + self.get_or_add_db_upwards(ns, db, strict, upwards).await?; + } + // Next, dynamically define the table + let val = DefineTableStatement { + name: tb.to_owned().into(), + permissions: Permissions::none(), + ..Default::default() + }; + let val = { + self.put(&key, &val).await?; + Entry::Any(Arc::new(val)) + }; + let _ = cache.insert(val.clone()); + val + } + // Check to see that the hierarchy exists + Err(Error::TbNotFound { + value, + }) if strict => { + self.get_ns(ns).await?; + self.get_db(ns, db).await?; + Err(Error::TbNotFound { + value, + })? + } + // Store the fetched value in the cache + Ok(val) => { + let val: DefineTableStatement = val.into(); + let val = Entry::Any(Arc::new(val)); + let _ = cache.insert(val.clone()); + val + } + // Throw any received errors + Err(err) => Err(err)?, + } + } } - } -} - -#[cfg(all(test, feature = "kv-mem"))] -mod tx_test { - use crate::kvs::lq_structs::{LqEntry, TrackedResult}; - use crate::kvs::Datastore; - use crate::kvs::LockType::Optimistic; - use crate::kvs::TransactionType::Write; - use crate::sql; - use crate::sql::statements::LiveStatement; - use crate::sql::Value; - - #[tokio::test] - pub async fn lqs_can_be_submitted_and_read() { - let ds = Datastore::new("memory").await.unwrap(); - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - - // Create live query data - let node_id = uuid::uuid!("d2715187-9d1a-49a5-9b0a-b496035b6c21"); - let lq_entry = LqEntry { - live_id: sql::Uuid::new_v4(), - ns: "namespace".to_string(), - db: "database".to_string(), - stm: LiveStatement { - id: sql::Uuid::new_v4(), - node: sql::uuid::Uuid(node_id), - expr: Default::default(), - what: Default::default(), - cond: None, - fetch: None, - archived: None, - session: Some(Value::None), - auth: None, - }, - }; - tx.pre_commit_register_async_event(TrackedResult::LiveQuery(lq_entry.clone())).unwrap(); - - tx.commit().await.unwrap(); - - // Verify data - let live_queries = tx.consume_pending_live_queries(); - assert_eq!(live_queries.len(), 1); - assert_eq!(live_queries[0], TrackedResult::LiveQuery(lq_entry)); + .into_type()) } } diff --git a/core/src/mac/mod.rs b/core/src/mac/mod.rs index ca042cb6..f3356520 100644 --- a/core/src/mac/mod.rs +++ b/core/src/mac/mod.rs @@ -1,6 +1,4 @@ /// Converts some text into a new line byte string -#[macro_export] -#[doc(hidden)] macro_rules! bytes { ($expression:expr) => { format!("{}\n", $expression).into_bytes() @@ -8,8 +6,6 @@ macro_rules! bytes { } /// Creates a new b-tree map of key-value pairs -#[macro_export] -#[doc(hidden)] macro_rules! map { ($($k:expr $(, if let $grant:pat = $check:expr)? $(, if $guard:expr)? => $v:expr),* $(,)? $( => $x:expr )?) => {{ let mut m = ::std::collections::BTreeMap::new(); @@ -19,15 +15,62 @@ macro_rules! map { }}; } +/// Extends a b-tree map of key-value pairs +macro_rules! mrg { + ($($m:expr, $x:expr)+) => {{ + $($m.extend($x.iter().map(|(k, v)| (k.clone(), v.clone())));)+ + $($m)+ + }}; +} + /// Matches on a specific config environment -#[macro_export] -#[doc(hidden)] macro_rules! get_cfg { ($i:ident : $($s:expr),+) => ( let $i = || { $( if cfg!($i=$s) { return $s; } );+ "unknown"}; ) } +/// Runs a method on a transaction, ensuring that the transaction +/// is cancelled and rolled back if the initial function fails. +/// This can be used to ensure that the use of the `?` operator to +/// fail fast and return an error from a function does not leave +/// a transaction in an uncommitted state without rolling back. +macro_rules! catch { + ($txn:ident, $default:expr) => { + match $default.await { + Err(e) => { + let _ = $txn.cancel().await; + return Err(e); + } + Ok(v) => v, + } + }; +} + +/// Runs a method on a transaction, ensuring that the transaction +/// is cancelled and rolled back if the initial function fails, or +/// committed successfully if the initial function succeeds. This +/// can be used to ensure that the use of the `?` operator to fail +/// fast and return an error from a function does not leave a +/// transaction in an uncommitted state without rolling back. +macro_rules! run { + ($txn:ident, $default:expr) => { + match $default.await { + Err(e) => { + let _ = $txn.cancel().await; + Err(e) + } + Ok(v) => match $txn.commit().await { + Err(e) => { + let _ = $txn.cancel().await; + Err(e) + } + Ok(_) => Ok(v), + }, + } + }; +} + /// A macro that allows lazily parsing a value from the environment variable, /// with a fallback default value if the variable is not set or parsing fails. /// @@ -77,7 +120,6 @@ macro_rules! lazy_env_parse_or_else { } #[cfg(test)] -#[macro_export] macro_rules! async_defer{ (let $bind:ident = ($capture:expr) defer { $($d:tt)* } after { $($t:tt)* }) => { async { diff --git a/core/src/rpc/rpc_context.rs b/core/src/rpc/rpc_context.rs index 95fe79c8..f993ed95 100644 --- a/core/src/rpc/rpc_context.rs +++ b/core/src/rpc/rpc_context.rs @@ -11,13 +11,6 @@ use crate::{ use super::{method::Method, response::Data, rpc_error::RpcError}; -macro_rules! mrg { - ($($m:expr, $x:expr)+) => {{ - $($m.extend($x.iter().map(|(k, v)| (k.clone(), v.clone())));)+ - $($m)+ - }}; -} - #[allow(async_fn_in_trait)] pub trait RpcContext { fn kvs(&self) -> &Datastore; diff --git a/core/src/sql/function.rs b/core/src/sql/function.rs index 5e504600..c39fa187 100644 --- a/core/src/sql/function.rs +++ b/core/src/sql/function.rs @@ -210,14 +210,7 @@ impl Function { // Check this function is allowed ctx.check_allowed_function(name.as_str())?; // Get the function definition - let val = { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Get the function definition - let val = run.get_and_cache_db_function(opt.ns()?, opt.db()?, s).await?; - drop(run); - val - }; + let val = ctx.tx().get_db_function(opt.ns()?, opt.db()?, s).await?; // Check permissions if opt.check_perms(Action::View)? { match &val.permissions { diff --git a/core/src/sql/kind.rs b/core/src/sql/kind.rs index 03c7e33c..17434359 100644 --- a/core/src/sql/kind.rs +++ b/core/src/sql/kind.rs @@ -38,10 +38,16 @@ impl Default for Kind { } impl Kind { - fn is_any(&self) -> bool { + // Returns true if this type is an `any` + pub(crate) fn is_any(&self) -> bool { matches!(self, Kind::Any) } + // Returns true if this type is a record + pub(crate) fn is_record(&self) -> bool { + matches!(self, Kind::Record(_)) + } + // return the kind of the contained value. // // For example: for `array` or `set` this returns `number`. diff --git a/core/src/sql/model.rs b/core/src/sql/model.rs index d9d3b92c..d285e034 100644 --- a/core/src/sql/model.rs +++ b/core/src/sql/model.rs @@ -66,15 +66,7 @@ impl Model { // Check this function is allowed ctx.check_allowed_function(name.as_str())?; // Get the model definition - let val = { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Get the function definition - let val = - run.get_and_cache_db_model(opt.ns()?, opt.db()?, &self.name, &self.version).await?; - drop(run); - val - }; + let val = ctx.tx().get_db_model(opt.ns()?, opt.db()?, &self.name, &self.version).await?; // Calculate the model path let path = format!( "ml/{}/{}/{}-{}-{}.surml", diff --git a/core/src/sql/param.rs b/core/src/sql/param.rs index a3a86995..8e337e65 100644 --- a/core/src/sql/param.rs +++ b/core/src/sql/param.rs @@ -69,12 +69,10 @@ impl Param { Some(v) => v.compute(stk, ctx, opt, doc).await, // The param has not been set locally None => { - let val = { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Get the param definition - run.get_and_cache_db_param(opt.ns()?, opt.db()?, v).await - }; + // Ensure a database is set + opt.valid_for_db()?; + // Fetch a defined param if set + let val = ctx.tx().get_db_param(opt.ns()?, opt.db()?, v).await; // Check if the param has been set globally match val { // The param has been set globally @@ -104,7 +102,11 @@ impl Param { val.value.compute(stk, ctx, opt, doc).await } // The param has not been set globally - Err(_) => Ok(Value::None), + Err(Error::PaNotFound { + .. + }) => Ok(Value::None), + // There was another request error + Err(e) => Err(e), } } }, diff --git a/core/src/sql/permission.rs b/core/src/sql/permission.rs index 79f08d11..75beaa59 100644 --- a/core/src/sql/permission.rs +++ b/core/src/sql/permission.rs @@ -144,21 +144,16 @@ impl PermissionKind { } #[revisioned(revision = 1)] -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash)] +#[derive(Clone, Debug, Default, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[non_exhaustive] pub enum Permission { None, + #[default] Full, Specific(Value), } -impl Default for Permission { - fn default() -> Self { - Self::Full - } -} - impl Permission { pub fn is_none(&self) -> bool { matches!(self, Permission::None) diff --git a/core/src/sql/statements/analyze.rs b/core/src/sql/statements/analyze.rs index 07bacd1c..66b8e3c8 100644 --- a/core/src/sql/statements/analyze.rs +++ b/core/src/sql/statements/analyze.rs @@ -38,13 +38,8 @@ impl AnalyzeStatement { // Allowed to run? opt.is_allowed(Action::View, ResourceKind::Index, &Base::Db)?; // Read the index - let ix = ctx - .tx_lock() - .await - .get_and_cache_tb_index(opt.ns()?, opt.db()?, tb, idx) - .await?; + let ix = ctx.tx().get_tb_index(opt.ns()?, opt.db()?, tb, idx).await?; let ikb = IndexKeyBase::new(opt.ns()?, opt.db()?, &ix)?; - // Index operation dispatching let value: Value = match &ix.index { Index::Search(p) => { @@ -54,16 +49,16 @@ impl AnalyzeStatement { ft.statistics(ctx).await?.into() } Index::MTree(p) => { - let mut tx = ctx.tx_lock().await; + let tx = ctx.tx(); let mt = MTreeIndex::new( ctx.get_index_stores(), - &mut tx, + &tx, ikb, p, TransactionType::Read, ) .await?; - mt.statistics(&mut tx).await?.into() + mt.statistics(&tx).await?.into() } _ => { return Err(Error::FeatureNotYetImplemented { diff --git a/core/src/sql/statements/define/access.rs b/core/src/sql/statements/define/access.rs index bf4370f1..c970ab98 100644 --- a/core/src/sql/statements/define/access.rs +++ b/core/src/sql/statements/define/access.rs @@ -57,16 +57,15 @@ impl DefineAccessStatement { opt: &Options, _doc: Option<&CursorDoc<'_>>, ) -> Result { + // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Actor, &self.base)?; - + // Check the statement type match &self.base { Base::Root => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Fetch the transaction + let txn = ctx.tx(); // Check if access method already exists - if run.get_root_access(&self.name).await.is_ok() { + if txn.get_root_access(&self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -77,24 +76,25 @@ impl DefineAccessStatement { } // Process the statement let key = crate::key::root::ac::new(&self.name); - run.set( + txn.set( key, DefineAccessStatement { + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } Base::Ns => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if access method already exists - if run.get_ns_access(opt.ns()?, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_ns_access(opt.ns()?, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -106,25 +106,26 @@ impl DefineAccessStatement { } // Process the statement let key = crate::key::namespace::ac::new(opt.ns()?, &self.name); - run.add_ns(opt.ns()?, opt.strict).await?; - run.set( + txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.set( key, DefineAccessStatement { + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } Base::Db => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if access method already exists - if run.get_db_access(opt.ns()?, opt.db()?, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_db_access(opt.ns()?, opt.db()?, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -137,16 +138,19 @@ impl DefineAccessStatement { } // Process the statement let key = crate::key::database::ac::new(opt.ns()?, opt.db()?, &self.name); - run.add_ns(opt.ns()?, opt.strict).await?; - run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; - run.set( + txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?; + txn.set( key, DefineAccessStatement { + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/define/analyzer.rs b/core/src/sql/statements/define/analyzer.rs index be1185e4..de2fa16f 100644 --- a/core/src/sql/statements/define/analyzer.rs +++ b/core/src/sql/statements/define/analyzer.rs @@ -34,12 +34,10 @@ impl DefineAnalyzerStatement { ) -> Result { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Analyzer, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if analyzer already exists - if run.get_db_analyzer(opt.ns()?, opt.db()?, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_db_analyzer(opt.ns()?, opt.db()?, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -50,21 +48,20 @@ impl DefineAnalyzerStatement { } // Process the statement let key = crate::key::database::az::new(opt.ns()?, opt.db()?, &self.name); - run.add_ns(opt.ns()?, opt.strict).await?; - run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; - // Persist the definition - run.set( + txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?; + txn.set( key, DefineAnalyzerStatement { - // Don't persist the "IF NOT EXISTS" clause to schema + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; - // Release the transaction - drop(run); // Do we really need this? - // Ok all good + // Clear the cache + txn.clear(); + // Ok all good Ok(Value::None) } } diff --git a/core/src/sql/statements/define/database.rs b/core/src/sql/statements/define/database.rs index 3e567bfa..8d08134f 100644 --- a/core/src/sql/statements/define/database.rs +++ b/core/src/sql/statements/define/database.rs @@ -33,12 +33,10 @@ impl DefineDatabaseStatement { ) -> Result { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Database, &Base::Ns)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if database already exists - if run.get_db(opt.ns()?, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_db(opt.ns()?, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -49,27 +47,23 @@ impl DefineDatabaseStatement { } // Process the statement let key = crate::key::namespace::db::new(opt.ns()?, &self.name); - let ns = run.add_ns(opt.ns()?, opt.strict).await?; - // Set the id - if self.id.is_none() && ns.id.is_some() { - // Set the id - let db = DefineDatabaseStatement { - id: Some(run.get_next_db_id(ns.id.unwrap()).await?), + let ns = txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.set( + key, + DefineDatabaseStatement { + id: if self.id.is_none() && ns.id.is_some() { + Some(txn.lock().await.get_next_db_id(ns.id.unwrap()).await?) + } else { + None + }, + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() - }; - - run.set(key, db).await?; - } else { - run.set( - key, - DefineDatabaseStatement { - if_not_exists: false, - ..self.clone() - }, - ) - .await?; - } + }, + ) + .await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/define/event.rs b/core/src/sql/statements/define/event.rs index aa117efa..45a76e77 100644 --- a/core/src/sql/statements/define/event.rs +++ b/core/src/sql/statements/define/event.rs @@ -34,12 +34,10 @@ impl DefineEventStatement { ) -> Result { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Event, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if event already exists - if run.get_tb_event(opt.ns()?, opt.db()?, &self.what, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_tb_event(opt.ns()?, opt.db()?, &self.what, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -50,20 +48,20 @@ impl DefineEventStatement { } // Process the statement let key = crate::key::table::ev::new(opt.ns()?, opt.db()?, &self.what, &self.name); - run.add_ns(opt.ns()?, opt.strict).await?; - run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; - run.add_tb(opt.ns()?, opt.db()?, &self.what, opt.strict).await?; - run.set( + txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?; + txn.get_or_add_tb(opt.ns()?, opt.db()?, &self.what, opt.strict).await?; + txn.set( key, DefineEventStatement { + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; // Clear the cache - let key = crate::key::table::ev::prefix(opt.ns()?, opt.db()?, &self.what); - run.clr(key).await?; + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/define/field.rs b/core/src/sql/statements/define/field.rs index a8b344a5..9a5b0447 100644 --- a/core/src/sql/statements/define/field.rs +++ b/core/src/sql/statements/define/field.rs @@ -44,13 +44,15 @@ impl DefineFieldStatement { ) -> Result { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Field, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if field already exists + // Get the NS and DB + let ns = opt.ns()?; + let db = opt.db()?; + // Fetch the transaction + let txn = ctx.tx(); + // Get the name of the field let fd = self.name.to_string(); - if run.get_tb_field(opt.ns()?, opt.db()?, &self.what, &fd).await.is_ok() { + // Check if the definition exists + if txn.get_tb_field(ns, db, &self.what, &fd).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -60,14 +62,14 @@ impl DefineFieldStatement { } } // Process the statement - run.add_ns(opt.ns()?, opt.strict).await?; - run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; - - let tb = run.add_tb(opt.ns()?, opt.db()?, &self.what, opt.strict).await?; - let key = crate::key::table::fd::new(opt.ns()?, opt.db()?, &self.what, &fd); - run.set( + let key = crate::key::table::fd::new(ns, db, &self.what, &fd); + txn.get_or_add_ns(ns, opt.strict).await?; + txn.get_or_add_db(ns, db, opt.strict).await?; + txn.get_or_add_tb(ns, db, &self.what, opt.strict).await?; + txn.set( key, DefineFieldStatement { + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, @@ -75,7 +77,7 @@ impl DefineFieldStatement { .await?; // find existing field definitions. - let fields = run.all_tb_fields(opt.ns()?, opt.db()?, &self.what).await.ok(); + let fields = txn.all_tb_fields(ns, db, &self.what).await.ok(); // Process possible recursive_definitions. if let Some(mut cur_kind) = self.kind.as_ref().and_then(|x| x.inner_kind()) { @@ -84,10 +86,9 @@ impl DefineFieldStatement { let new_kind = cur_kind.inner_kind(); name.0.push(Part::All); + // Get the name of the field let fd = name.to_string(); - let key = crate::key::table::fd::new(opt.ns()?, opt.db()?, &self.what, &fd); - run.add_ns(opt.ns()?, opt.strict).await?; - run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; + let key = crate::key::table::fd::new(ns, db, &self.what, &fd); // merge the new definition with possible existing definitions. let statement = if let Some(existing) = @@ -108,7 +109,7 @@ impl DefineFieldStatement { } }; - run.set(key, statement).await?; + txn.set(key, statement).await?; if let Some(new_kind) = new_kind { cur_kind = new_kind; @@ -117,52 +118,66 @@ impl DefineFieldStatement { } } } - - let new_tb = match (fd.as_str(), tb.kind.clone(), self.kind.clone()) { - ("in", TableType::Relation(rel), Some(dk)) => { - if !matches!(dk, Kind::Record(_)) { - return Err(Error::Thrown("in field on a relation must be a record".into())); - }; - if rel.from.as_ref() != Some(&dk) { - Some(DefineTableStatement { - kind: TableType::Relation(Relation { - from: Some(dk), - ..rel - }), - ..tb - }) - } else { - None + // If this is an `in` field then check relation definitions + if fd.as_str() == "in" { + // Get the table definition that this field belongs to + let tb = txn.get_tb(ns, db, &self.what).await?; + // The table is marked as TYPE RELATION + if let TableType::Relation(ref relation) = tb.kind { + // Check if a field TYPE has been specified + if let Some(kind) = self.kind.as_ref() { + // The `in` field must be a record type + if !kind.is_record() { + return Err(Error::Thrown( + "in field on a relation must be a record".into(), + )); + } + // Add the TYPE to the DEFINE TABLE statement + if relation.from.as_ref() != self.kind.as_ref() { + let key = crate::key::database::tb::new(ns, db, &self.what); + let val = DefineTableStatement { + kind: TableType::Relation(Relation { + from: self.kind.to_owned(), + ..relation.to_owned() + }), + ..tb.as_ref().to_owned() + }; + txn.set(key, val).await?; + } + } + } + } + // If this is an `out` field then check relation definitions + if fd.as_str() == "out" { + // Get the table definition that this field belongs to + let tb = txn.get_tb(ns, db, &self.what).await?; + // The table is marked as TYPE RELATION + if let TableType::Relation(ref relation) = tb.kind { + // Check if a field TYPE has been specified + if let Some(kind) = self.kind.as_ref() { + // The `out` field must be a record type + if !kind.is_record() { + return Err(Error::Thrown( + "out field on a relation must be a record".into(), + )); + } + // Add the TYPE to the DEFINE TABLE statement + if relation.from.as_ref() != self.kind.as_ref() { + let key = crate::key::database::tb::new(ns, db, &self.what); + let val = DefineTableStatement { + kind: TableType::Relation(Relation { + to: self.kind.to_owned(), + ..relation.to_owned() + }), + ..tb.as_ref().to_owned() + }; + txn.set(key, val).await?; + } } } - ("out", TableType::Relation(rel), Some(dk)) => { - if !matches!(dk, Kind::Record(_)) { - return Err(Error::Thrown("out field on a relation must be a record".into())); - }; - if rel.to.as_ref() != Some(&dk) { - Some(DefineTableStatement { - kind: TableType::Relation(Relation { - to: Some(dk), - ..rel - }), - ..tb - }) - } else { - None - } - } - _ => None, - }; - if let Some(tb) = new_tb { - let key = crate::key::database::tb::new(opt.ns()?, opt.db()?, &self.what); - run.set(key, &tb).await?; - let key = crate::key::table::ft::prefix(opt.ns()?, opt.db()?, &self.what); - run.clr(key).await?; } - // Clear the cache - let key = crate::key::table::fd::prefix(opt.ns()?, opt.db()?, &self.what); - run.clr(key).await?; + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/define/function.rs b/core/src/sql/statements/define/function.rs index 6e3c22cb..43d3bd3d 100644 --- a/core/src/sql/statements/define/function.rs +++ b/core/src/sql/statements/define/function.rs @@ -35,12 +35,10 @@ impl DefineFunctionStatement { ) -> Result { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Function, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if function already exists - if run.get_db_function(opt.ns()?, opt.db()?, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_db_function(opt.ns()?, opt.db()?, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -51,17 +49,19 @@ impl DefineFunctionStatement { } // Process the statement let key = crate::key::database::fc::new(opt.ns()?, opt.db()?, &self.name); - run.add_ns(opt.ns()?, opt.strict).await?; - run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; - run.set( + txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?; + txn.set( key, DefineFunctionStatement { - // Don't persist the "IF NOT EXISTS" clause to schema + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/define/index.rs b/core/src/sql/statements/define/index.rs index 749f804f..6ab7488d 100644 --- a/core/src/sql/statements/define/index.rs +++ b/core/src/sql/statements/define/index.rs @@ -4,9 +4,8 @@ use crate::doc::CursorDoc; use crate::err::Error; use crate::iam::{Action, ResourceKind}; use crate::sql::statements::info::InfoStructure; -use crate::sql::{ - statements::UpdateStatement, Base, Ident, Idioms, Index, Part, Strand, Value, Values, -}; +use crate::sql::statements::UpdateStatement; +use crate::sql::{Base, Ident, Idioms, Index, Output, Part, Strand, Value, Values}; use derive::Store; use reblessive::tree::Stk; use revision::revisioned; @@ -39,12 +38,10 @@ impl DefineIndexStatement { ) -> Result { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Index, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if index already exists - if run.get_tb_index(opt.ns()?, opt.db()?, &self.what, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_tb_index(opt.ns()?, opt.db()?, &self.what, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -53,17 +50,15 @@ impl DefineIndexStatement { }); } } - // If we are strict, check that the table exists - run.check_ns_db_tb(opt.ns()?, opt.db()?, &self.what, opt.strict).await?; // Does the table exists? - match run.get_and_cache_tb(opt.ns()?, opt.db()?, &self.what).await { + match txn.get_tb(opt.ns()?, opt.db()?, &self.what).await { Ok(db) => { // Are we SchemaFull? if db.full { // Check that the fields exists for idiom in self.cols.iter() { if let Some(Part::Field(id)) = idiom.first() { - run.get_tb_field(opt.ns()?, opt.db()?, &self.what, id).await?; + txn.get_tb_field(opt.ns()?, opt.db()?, &self.what, id).await?; } } } @@ -75,34 +70,28 @@ impl DefineIndexStatement { // Any other error should be returned Err(e) => return Err(e), } - // Process the statement let key = crate::key::table::ix::new(opt.ns()?, opt.db()?, &self.what, &self.name); - run.add_ns(opt.ns()?, opt.strict).await?; - run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; - run.add_tb(opt.ns()?, opt.db()?, &self.what, opt.strict).await?; - run.set( + txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?; + txn.get_or_add_tb(opt.ns()?, opt.db()?, &self.what, opt.strict).await?; + txn.set( key, DefineIndexStatement { - // Don't persist the "IF NOT EXISTS" clause to schema + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; - // Remove the index data - let key = crate::key::index::all::new(opt.ns()?, opt.db()?, &self.what, &self.name); - run.delp(key, u32::MAX).await?; // Clear the cache - let key = crate::key::table::ix::prefix(opt.ns()?, opt.db()?, &self.what); - run.clr(key).await?; - // Release the transaction - drop(run); + txn.clear(); // Force queries to run let opt = &opt.new_with_force(Force::Index(Arc::new([self.clone()]))); // Update the index data let stm = UpdateStatement { what: Values(vec![Value::Table(self.what.clone().into())]), + output: Some(Output::None), ..UpdateStatement::default() }; stm.compute(stk, ctx, opt, doc).await?; diff --git a/core/src/sql/statements/define/model.rs b/core/src/sql/statements/define/model.rs index 19da435d..2f7439ae 100644 --- a/core/src/sql/statements/define/model.rs +++ b/core/src/sql/statements/define/model.rs @@ -25,6 +25,48 @@ pub struct DefineModelStatement { pub if_not_exists: bool, } +impl DefineModelStatement { + /// Process this type returning a computed simple Value + pub(crate) async fn compute( + &self, + ctx: &Context<'_>, + opt: &Options, + _doc: Option<&CursorDoc<'_>>, + ) -> Result { + // Allowed to run? + opt.is_allowed(Action::Edit, ResourceKind::Model, &Base::Db)?; + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_db_model(opt.ns()?, opt.db()?, &self.name, &self.version).await.is_ok() { + if self.if_not_exists { + return Ok(Value::None); + } else { + return Err(Error::MlAlreadyExists { + value: self.name.to_string(), + }); + } + } + // Process the statement + let key = crate::key::database::ml::new(opt.ns()?, opt.db()?, &self.name, &self.version); + txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?; + txn.set( + key, + DefineModelStatement { + // Don't persist the `IF NOT EXISTS` clause to schema + if_not_exists: false, + ..self.clone() + }, + ) + .await?; + // Clear the cache + txn.clear(); + // Ok all good + Ok(Value::None) + } +} + impl fmt::Display for DefineModelStatement { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "DEFINE MODEL")?; @@ -46,50 +88,6 @@ impl fmt::Display for DefineModelStatement { } } -impl DefineModelStatement { - /// Process this type returning a computed simple Value - pub(crate) async fn compute( - &self, - ctx: &Context<'_>, - opt: &Options, - _doc: Option<&CursorDoc<'_>>, - ) -> Result { - // Allowed to run? - opt.is_allowed(Action::Edit, ResourceKind::Model, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if model already exists - if run.get_db_model(opt.ns()?, opt.db()?, &self.name, &self.version).await.is_ok() { - if self.if_not_exists { - return Ok(Value::None); - } else { - return Err(Error::MlAlreadyExists { - value: self.name.to_string(), - }); - } - } - // Process the statement - let key = crate::key::database::ml::new(opt.ns()?, opt.db()?, &self.name, &self.version); - run.add_ns(opt.ns()?, opt.strict).await?; - run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; - run.set( - key, - DefineModelStatement { - // Don't persist the "IF NOT EXISTS" clause to schema - if_not_exists: false, - ..self.clone() - }, - ) - .await?; - // Store the model file - // TODO - // Ok all good - Ok(Value::None) - } -} - impl InfoStructure for DefineModelStatement { fn structure(self) -> Value { Value::from(map! { diff --git a/core/src/sql/statements/define/namespace.rs b/core/src/sql/statements/define/namespace.rs index e8067b89..6a23f78f 100644 --- a/core/src/sql/statements/define/namespace.rs +++ b/core/src/sql/statements/define/namespace.rs @@ -32,14 +32,10 @@ impl DefineNamespaceStatement { ) -> Result { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Namespace, &Base::Root)?; - // Process the statement - let key = crate::key::root::ns::new(&self.name); - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if namespace already exists - if run.get_ns(&self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_ns(&self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -48,24 +44,24 @@ impl DefineNamespaceStatement { }); } } - if self.id.is_none() { - // Set the id - let ns = DefineNamespaceStatement { - id: Some(run.get_next_ns_id().await?), + // Process the statement + let key = crate::key::root::ns::new(&self.name); + txn.set( + key, + DefineNamespaceStatement { + id: if self.id.is_none() { + Some(txn.lock().await.get_next_ns_id().await?) + } else { + None + }, + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() - }; - run.set(key, ns).await?; - } else { - run.set( - key, - DefineNamespaceStatement { - if_not_exists: false, - ..self.clone() - }, - ) - .await?; - } + }, + ) + .await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/define/param.rs b/core/src/sql/statements/define/param.rs index ed6464ce..53b2f925 100644 --- a/core/src/sql/statements/define/param.rs +++ b/core/src/sql/statements/define/param.rs @@ -36,12 +36,10 @@ impl DefineParamStatement { ) -> Result { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Parameter, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if param already exists - if run.get_db_param(opt.ns()?, opt.db()?, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_db_param(opt.ns()?, opt.db()?, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -52,19 +50,21 @@ impl DefineParamStatement { } // Process the statement let key = crate::key::database::pa::new(opt.ns()?, opt.db()?, &self.name); - run.add_ns(opt.ns()?, opt.strict).await?; - run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; - run.set( + txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?; + txn.set( key, DefineParamStatement { // Compute the param value: self.value.compute(stk, ctx, opt, doc).await?, - // Don't persist the "IF NOT EXISTS" clause to schema + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/define/table.rs b/core/src/sql/statements/define/table.rs index 82b5b7b4..d658bfbf 100644 --- a/core/src/sql/statements/define/table.rs +++ b/core/src/sql/statements/define/table.rs @@ -4,14 +4,14 @@ use crate::dbs::{Force, Options}; use crate::doc::CursorDoc; use crate::err::Error; use crate::iam::{Action, ResourceKind}; +use crate::sql::fmt::{is_pretty, pretty_indent}; +use crate::sql::paths::{IN, OUT}; use crate::sql::statements::info::InfoStructure; use crate::sql::{ - changefeed::ChangeFeed, - fmt::{is_pretty, pretty_indent}, - statements::UpdateStatement, - Base, Ident, Permissions, Strand, Value, Values, View, + changefeed::ChangeFeed, statements::UpdateStatement, Base, Ident, Output, Permissions, Strand, + Value, Values, View, }; -use crate::sql::{Idiom, Kind, Part, TableType}; +use crate::sql::{Idiom, Kind, TableType}; use derive::Store; use reblessive::tree::Stk; use revision::revisioned; @@ -48,12 +48,10 @@ impl DefineTableStatement { ) -> Result { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Table, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if table already exists - if run.get_tb(opt.ns()?, opt.db()?, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_tb(opt.ns()?, opt.db()?, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -64,67 +62,69 @@ impl DefineTableStatement { } // Process the statement let key = crate::key::database::tb::new(opt.ns()?, opt.db()?, &self.name); - let ns = run.add_ns(opt.ns()?, opt.strict).await?; - let db = run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; - let dt = if self.id.is_none() && ns.id.is_some() && db.id.is_some() { - DefineTableStatement { - id: Some(run.get_next_tb_id(ns.id.unwrap(), db.id.unwrap()).await?), - if_not_exists: false, - ..self.clone() - } - } else { - DefineTableStatement { - if_not_exists: false, - ..self.clone() - } + let ns = txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + let db = txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?; + let dt = DefineTableStatement { + id: if self.id.is_none() && ns.id.is_some() && db.id.is_some() { + Some(txn.lock().await.get_next_tb_id(ns.id.unwrap(), db.id.unwrap()).await?) + } else { + None + }, + // Don't persist the `IF NOT EXISTS` clause to schema + if_not_exists: false, + ..self.clone() }; + txn.set(key, &dt).await?; + // Add table relational fields if let TableType::Relation(rel) = &self.kind { - let tb: &str = &self.name; - let in_kind = rel.from.clone().unwrap_or(Kind::Record(vec![])); - let out_kind = rel.to.clone().unwrap_or(Kind::Record(vec![])); - let in_key = crate::key::table::fd::new(opt.ns()?, opt.db()?, tb, "in"); - let out_key = crate::key::table::fd::new(opt.ns()?, opt.db()?, tb, "out"); - run.set( - in_key, - DefineFieldStatement { - name: Idiom(vec![Part::from("in")]), - what: tb.into(), - kind: Some(in_kind), - ..Default::default() - }, - ) - .await?; - run.set( - out_key, - DefineFieldStatement { - name: Idiom(vec![Part::from("out")]), - what: tb.into(), - kind: Some(out_kind), - ..Default::default() - }, - ) - .await?; + // Set the `in` field as a DEFINE FIELD definition + { + let key = crate::key::table::fd::new(opt.ns()?, opt.db()?, &self.name, "in"); + let val = rel.from.clone().unwrap_or(Kind::Record(vec![])); + txn.set( + key, + DefineFieldStatement { + name: Idiom::from(IN.to_vec()), + what: self.name.to_owned(), + kind: Some(val), + ..Default::default() + }, + ) + .await?; + } + // Set the `out` field as a DEFINE FIELD definition + { + let key = crate::key::table::fd::new(opt.ns()?, opt.db()?, &self.name, "out"); + let val = rel.to.clone().unwrap_or(Kind::Record(vec![])); + txn.set( + key, + DefineFieldStatement { + name: Idiom::from(OUT.to_vec()), + what: self.name.to_owned(), + kind: Some(val), + ..Default::default() + }, + ) + .await?; + } + } + // Clear the cache + txn.clear(); + // Record definition change + if dt.changefeed.is_some() { + txn.lock().await.record_table_change(opt.ns()?, opt.db()?, &self.name, &dt); } - - let tb_key = crate::key::table::fd::prefix(opt.ns()?, opt.db()?, &self.name); - run.clr(tb_key).await?; - run.set(key, &dt).await?; // Check if table is a view if let Some(view) = &self.view { // Remove the table data let key = crate::key::table::all::new(opt.ns()?, opt.db()?, &self.name); - run.delp(key, u32::MAX).await?; + txn.delp(key).await?; // Process each foreign table for v in view.what.0.iter() { // Save the view config let key = crate::key::table::ft::new(opt.ns()?, opt.db()?, v, &self.name); - run.set(key, self).await?; - // Clear the cache - let key = crate::key::table::ft::prefix(opt.ns()?, opt.db()?, v); - run.clr(key).await?; + txn.set(key, self).await?; } - // Release the transaction - drop(run); // Force queries to run let opt = &opt.new_with_force(Force::Table(Arc::new([dt]))); // Process each foreign table @@ -132,14 +132,14 @@ impl DefineTableStatement { // Process the view data let stm = UpdateStatement { what: Values(vec![Value::Table(v.clone())]), + output: Some(Output::None), ..UpdateStatement::default() }; stm.compute(stk, ctx, opt, doc).await?; } - } else if dt.changefeed.is_some() { - run.record_table_change(opt.ns()?, opt.db()?, self.name.0.as_str(), &dt); } - + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/define/user.rs b/core/src/sql/statements/define/user.rs index 0003222d..dfe9a27b 100644 --- a/core/src/sql/statements/define/user.rs +++ b/core/src/sql/statements/define/user.rs @@ -105,15 +105,13 @@ impl DefineUserStatement { ) -> Result { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Actor, &self.base)?; - + // Check the statement type match self.base { Base::Root => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if user already exists - if run.get_root_user(&self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_root_user(&self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -124,25 +122,25 @@ impl DefineUserStatement { } // Process the statement let key = crate::key::root::us::new(&self.name); - run.set( + txn.set( key, DefineUserStatement { - // Don't persist the "IF NOT EXISTS" clause to schema + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } Base::Ns => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if user already exists - if run.get_ns_user(opt.ns()?, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_ns_user(opt.ns()?, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -154,26 +152,26 @@ impl DefineUserStatement { } // Process the statement let key = crate::key::namespace::us::new(opt.ns()?, &self.name); - run.add_ns(opt.ns()?, opt.strict).await?; - run.set( + txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.set( key, DefineUserStatement { - // Don't persist the "IF NOT EXISTS" clause to schema + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } Base::Db => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); - // Check if user already exists - if run.get_db_user(opt.ns()?, opt.db()?, &self.name).await.is_ok() { + // Fetch the transaction + let txn = ctx.tx(); + // Check if the definition exists + if txn.get_db_user(opt.ns()?, opt.db()?, &self.name).await.is_ok() { if self.if_not_exists { return Ok(Value::None); } else { @@ -186,17 +184,19 @@ impl DefineUserStatement { } // Process the statement let key = crate::key::database::us::new(opt.ns()?, opt.db()?, &self.name); - run.add_ns(opt.ns()?, opt.strict).await?; - run.add_db(opt.ns()?, opt.db()?, opt.strict).await?; - run.set( + txn.get_or_add_ns(opt.ns()?, opt.strict).await?; + txn.get_or_add_db(opt.ns()?, opt.db()?, opt.strict).await?; + txn.set( key, DefineUserStatement { - // Don't persist the "IF NOT EXISTS" clause to schema + // Don't persist the `IF NOT EXISTS` clause to schema if_not_exists: false, ..self.clone() }, ) .await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/info.rs b/core/src/sql/statements/info.rs index ed7e38f6..c9523037 100644 --- a/core/src/sql/statements/info.rs +++ b/core/src/sql/statements/info.rs @@ -9,6 +9,7 @@ use derive::Store; use revision::revisioned; use serde::{Deserialize, Serialize}; use std::fmt; +use std::sync::Arc; #[revisioned(revision = 2)] #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Store, Hash)] @@ -70,307 +71,238 @@ impl InfoStatement { opt: &Options, _doc: Option<&CursorDoc<'_>>, ) -> Result { - // Allowed to run? match self { - InfoStatement::Root(false) => { + InfoStatement::Root(structured) => { // Allowed to run? opt.is_allowed(Action::View, ResourceKind::Any, &Base::Root)?; - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); // Create the result set - let mut res = Object::default(); - // Process the namespaces - let mut tmp = Object::default(); - for v in run.all_ns().await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("namespaces".to_owned(), tmp.into()); - // Process the users - let mut tmp = Object::default(); - for v in run.all_root_users().await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("users".to_owned(), tmp.into()); - // Process the accesses - let mut tmp = Object::default(); - for v in run.all_root_accesses_redacted().await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("accesses".to_owned(), tmp.into()); - // Ok all good - Value::from(res).ok() + Ok(match structured { + true => Value::from(map! { + "accesses".to_string() => process(txn.all_root_accesses().await?.iter().map(|v| v.redacted()).collect()), + "namespaces".to_string() => process(txn.all_ns().await?), + "nodes".to_string() => process(txn.all_nodes().await?), + "users".to_string() => process(txn.all_root_users().await?), + }), + false => Value::from(map! { + "accesses".to_string() => { + let mut out = Object::default(); + for v in txn.all_root_accesses().await?.iter().map(|v| v.redacted()) { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "namespaces".to_string() => { + let mut out = Object::default(); + for v in txn.all_ns().await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "nodes".to_string() => { + let mut out = Object::default(); + for v in txn.all_nodes().await?.iter() { + out.insert(v.id.to_string(), v.to_string().into()); + } + out.into() + }, + "users".to_string() => { + let mut out = Object::default(); + for v in txn.all_root_users().await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + }), + }) } - InfoStatement::Ns(false) => { + InfoStatement::Ns(structured) => { // Allowed to run? opt.is_allowed(Action::View, ResourceKind::Any, &Base::Ns)?; - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the NS + let ns = opt.ns()?; + // Get the transaction + let txn = ctx.tx(); // Create the result set - let mut res = Object::default(); - // Process the databases - let mut tmp = Object::default(); - for v in run.all_db(opt.ns()?).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("databases".to_owned(), tmp.into()); - // Process the users - let mut tmp = Object::default(); - for v in run.all_ns_users(opt.ns()?).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("users".to_owned(), tmp.into()); - // Process the accesses - let mut tmp = Object::default(); - for v in run.all_ns_accesses_redacted(opt.ns()?).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("accesses".to_owned(), tmp.into()); - // Ok all good - Value::from(res).ok() + Ok(match structured { + true => Value::from(map! { + "accesses".to_string() => process(txn.all_ns_accesses(ns).await?.iter().map(|v| v.redacted()).collect()), + "databases".to_string() => process(txn.all_db(ns).await?), + "users".to_string() => process(txn.all_ns_users(ns).await?), + }), + false => Value::from(map! { + "accesses".to_string() => { + let mut out = Object::default(); + for v in txn.all_ns_accesses(ns).await?.iter().map(|v| v.redacted()) { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "databases".to_string() => { + let mut out = Object::default(); + for v in txn.all_db(ns).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "users".to_string() => { + let mut out = Object::default(); + for v in txn.all_ns_users(ns).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + }), + }) } - InfoStatement::Db(false) => { + InfoStatement::Db(structured) => { // Allowed to run? opt.is_allowed(Action::View, ResourceKind::Any, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the NS and DB + let ns = opt.ns()?; + let db = opt.db()?; + // Get the transaction + let txn = ctx.tx(); // Create the result set - let mut res = Object::default(); - // Process the users - let mut tmp = Object::default(); - for v in run.all_db_users(opt.ns()?, opt.db()?).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("users".to_owned(), tmp.into()); - // Process the functions - let mut tmp = Object::default(); - for v in run.all_db_functions(opt.ns()?, opt.db()?).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("functions".to_owned(), tmp.into()); - // Process the models - let mut tmp = Object::default(); - for v in run.all_db_models(opt.ns()?, opt.db()?).await?.iter() { - tmp.insert(format!("{}<{}>", v.name, v.version), v.to_string().into()); - } - res.insert("models".to_owned(), tmp.into()); - // Process the params - let mut tmp = Object::default(); - for v in run.all_db_params(opt.ns()?, opt.db()?).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("params".to_owned(), tmp.into()); - // Process the accesses - let mut tmp = Object::default(); - for v in run.all_db_accesses_redacted(opt.ns()?, opt.db()?).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("accesses".to_owned(), tmp.into()); - // Process the tables - let mut tmp = Object::default(); - for v in run.all_tb(opt.ns()?, opt.db()?).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("tables".to_owned(), tmp.into()); - // Process the analyzers - let mut tmp = Object::default(); - for v in run.all_db_analyzers(opt.ns()?, opt.db()?).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("analyzers".to_owned(), tmp.into()); - // Ok all good - Value::from(res).ok() + Ok(match structured { + true => Value::from(map! { + "accesses".to_string() => process(txn.all_db_accesses(ns, db).await?.iter().map(|v| v.redacted()).collect()), + "analyzers".to_string() => process(txn.all_db_analyzers(ns, db).await?), + "functions".to_string() => process(txn.all_db_functions(ns, db).await?), + "models".to_string() => process(txn.all_db_models(ns, db).await?), + "params".to_string() => process(txn.all_db_params(ns, db).await?), + "tables".to_string() => process(txn.all_tb(ns, db).await?), + "users".to_string() => process(txn.all_db_users(ns, db).await?), + }), + false => Value::from(map! { + "accesses".to_string() => { + let mut out = Object::default(); + for v in txn.all_db_accesses(ns, db).await?.iter().map(|v| v.redacted()) { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "analyzers".to_string() => { + let mut out = Object::default(); + for v in txn.all_db_analyzers(ns, db).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "functions".to_string() => { + let mut out = Object::default(); + for v in txn.all_db_functions(ns, db).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "models".to_string() => { + let mut out = Object::default(); + for v in txn.all_db_models(ns, db).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "params".to_string() => { + let mut out = Object::default(); + for v in txn.all_db_params(ns, db).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "tables".to_string() => { + let mut out = Object::default(); + for v in txn.all_tb(ns, db).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "users".to_string() => { + let mut out = Object::default(); + for v in txn.all_db_users(ns, db).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + }), + }) } - InfoStatement::Tb(tb, false) => { + InfoStatement::Tb(tb, structured) => { // Allowed to run? opt.is_allowed(Action::View, ResourceKind::Any, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the NS and DB + let ns = opt.ns()?; + let db = opt.db()?; + // Get the transaction + let txn = ctx.tx(); // Create the result set - let mut res = Object::default(); - // Process the events - let mut tmp = Object::default(); - for v in run.all_tb_events(opt.ns()?, opt.db()?, tb).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("events".to_owned(), tmp.into()); - // Process the fields - let mut tmp = Object::default(); - for v in run.all_tb_fields(opt.ns()?, opt.db()?, tb).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("fields".to_owned(), tmp.into()); - // Process the tables - let mut tmp = Object::default(); - for v in run.all_tb_views(opt.ns()?, opt.db()?, tb).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("tables".to_owned(), tmp.into()); - // Process the indexes - let mut tmp = Object::default(); - for v in run.all_tb_indexes(opt.ns()?, opt.db()?, tb).await?.iter() { - tmp.insert(v.name.to_string(), v.to_string().into()); - } - res.insert("indexes".to_owned(), tmp.into()); - // Process the live queries - let mut tmp = Object::default(); - for v in run.all_tb_lives(opt.ns()?, opt.db()?, tb).await?.iter() { - tmp.insert(v.id.to_raw(), v.to_string().into()); - } - res.insert("lives".to_owned(), tmp.into()); - // Ok all good - Value::from(res).ok() + Ok(match structured { + true => Value::from(map! { + "events".to_string() => process(txn.all_tb_events(ns, db, tb).await?), + "fields".to_string() => process(txn.all_tb_fields(ns, db, tb).await?), + "indexes".to_string() => process(txn.all_tb_indexes(ns, db, tb).await?), + "lives".to_string() => process(txn.all_tb_lives(ns, db, tb).await?), + "tables".to_string() => process(txn.all_tb_views(ns, db, tb).await?), + }), + false => Value::from(map! { + "events".to_string() => { + let mut out = Object::default(); + for v in txn.all_tb_events(ns, db, tb).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "fields".to_string() => { + let mut out = Object::default(); + for v in txn.all_tb_fields(ns, db, tb).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "indexes".to_string() => { + let mut out = Object::default(); + for v in txn.all_tb_indexes(ns, db, tb).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + "lives".to_string() => { + let mut out = Object::default(); + for v in txn.all_tb_lives(ns, db, tb).await?.iter() { + out.insert(v.id.to_string(), v.to_string().into()); + } + out.into() + }, + "tables".to_string() => { + let mut out = Object::default(); + for v in txn.all_tb_views(ns, db, tb).await?.iter() { + out.insert(v.name.to_string(), v.to_string().into()); + } + out.into() + }, + }), + }) } - InfoStatement::User(user, base, false) => { + InfoStatement::User(user, base, structured) => { + // Get the base type let base = base.clone().unwrap_or(opt.selected_base()?); // Allowed to run? opt.is_allowed(Action::View, ResourceKind::Actor, &base)?; - - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); // Process the user let res = match base { - Base::Root => run.get_root_user(user).await?, - Base::Ns => run.get_ns_user(opt.ns()?, user).await?, - Base::Db => run.get_db_user(opt.ns()?, opt.db()?, user).await?, + Base::Root => txn.get_root_user(user).await?, + Base::Ns => txn.get_ns_user(opt.ns()?, user).await?, + Base::Db => txn.get_db_user(opt.ns()?, opt.db()?, user).await?, _ => return Err(Error::InvalidLevel(base.to_string())), }; // Ok all good - Value::from(res.to_string()).ok() - } - InfoStatement::Root(true) => { - // Allowed to run? - opt.is_allowed(Action::View, ResourceKind::Any, &Base::Root)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Create the result set - let mut res = Object::default(); - // Process the namespaces - res.insert("namespaces".to_owned(), process_arr(run.all_ns().await?)); - // Process the users - res.insert("users".to_owned(), process_arr(run.all_root_users().await?)); - // Ok all good - Value::from(res).ok() - } - InfoStatement::Ns(true) => { - // Allowed to run? - opt.is_allowed(Action::View, ResourceKind::Any, &Base::Ns)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Create the result set - let mut res = Object::default(); - // Process the databases - res.insert("databases".to_owned(), process_arr(run.all_db(opt.ns()?).await?)); - // Process the users - res.insert("users".to_owned(), process_arr(run.all_ns_users(opt.ns()?).await?)); - // Process the accesses - res.insert( - "accesses".to_owned(), - process_arr(run.all_ns_accesses_redacted(opt.ns()?).await?), - ); - // Ok all good - Value::from(res).ok() - } - InfoStatement::Db(true) => { - // Allowed to run? - opt.is_allowed(Action::View, ResourceKind::Any, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Create the result set - let mut res = Object::default(); - // Process the users - res.insert( - "users".to_owned(), - process_arr(run.all_db_users(opt.ns()?, opt.db()?).await?), - ); - // Process the accesses - res.insert( - "accesses".to_owned(), - process_arr(run.all_db_accesses(opt.ns()?, opt.db()?).await?), - ); - // Process the functions - res.insert( - "functions".to_owned(), - process_arr(run.all_db_functions(opt.ns()?, opt.db()?).await?), - ); - // Process the models - res.insert( - "models".to_owned(), - process_arr(run.all_db_models(opt.ns()?, opt.db()?).await?), - ); - // Process the params - res.insert( - "params".to_owned(), - process_arr(run.all_db_params(opt.ns()?, opt.db()?).await?), - ); - // Process the accesses - res.insert( - "accesses".to_owned(), - process_arr(run.all_db_accesses_redacted(opt.ns()?, opt.db()?).await?), - ); - // Process the tables - res.insert( - "tables".to_owned(), - process_arr(run.all_tb(opt.ns()?, opt.db()?).await?), - ); - // Process the analyzers - res.insert( - "analyzers".to_owned(), - process_arr(run.all_db_analyzers(opt.ns()?, opt.db()?).await?), - ); - // Ok all good - Value::from(res).ok() - } - InfoStatement::Tb(tb, true) => { - // Allowed to run? - opt.is_allowed(Action::View, ResourceKind::Any, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Create the result set - let mut res = Object::default(); - // Process the events - res.insert( - "events".to_owned(), - process_arr(run.all_tb_events(opt.ns()?, opt.db()?, tb).await?), - ); - // Process the fields - res.insert( - "fields".to_owned(), - process_arr(run.all_tb_fields(opt.ns()?, opt.db()?, tb).await?), - ); - // Process the tables - res.insert( - "tables".to_owned(), - process_arr(run.all_tb_views(opt.ns()?, opt.db()?, tb).await?), - ); - // Process the indexes - res.insert( - "indexes".to_owned(), - process_arr(run.all_tb_indexes(opt.ns()?, opt.db()?, tb).await?), - ); - // Process the live queries - res.insert( - "lives".to_owned(), - process_arr(run.all_tb_lives(opt.ns()?, opt.db()?, tb).await?), - ); - // Ok all good - Value::from(res).ok() - } - InfoStatement::User(user, base, true) => { - let base = base.clone().unwrap_or(opt.selected_base()?); - // Allowed to run? - opt.is_allowed(Action::View, ResourceKind::Actor, &base)?; - - // Claim transaction - let mut run = ctx.tx_lock().await; - // Process the user - let res = match base { - Base::Root => run.get_root_user(user).await?, - Base::Ns => run.get_ns_user(opt.ns()?, user).await?, - Base::Db => run.get_db_user(opt.ns()?, opt.db()?, user).await?, - _ => return Err(Error::InvalidLevel(base.to_string())), - }; - // Ok all good - Ok(res.structure()) + Ok(match structured { + true => res.as_ref().clone().structure(), + false => Value::from(res.to_string()), + }) } } } @@ -399,8 +331,6 @@ impl fmt::Display for InfoStatement { } } -use std::sync::Arc; - pub(crate) trait InfoStructure { fn structure(self) -> Value; } @@ -417,7 +347,7 @@ impl InfoStatement { } } -fn process_arr(a: Arc<[T]>) -> Value +fn process(a: Arc<[T]>) -> Value where T: InfoStructure + Clone, { diff --git a/core/src/sql/statements/kill.rs b/core/src/sql/statements/kill.rs index d0972d0b..0f366494 100644 --- a/core/src/sql/statements/kill.rs +++ b/core/src/sql/statements/kill.rs @@ -1,18 +1,14 @@ -use std::fmt; - -use derive::Store; -use reblessive::tree::Stk; -use revision::revisioned; -use serde::{Deserialize, Serialize}; - use crate::ctx::Context; use crate::dbs::Options; use crate::doc::CursorDoc; use crate::err::Error; -use crate::fflags::FFLAGS; -use crate::kvs::lq_structs::{KillEntry, TrackedResult}; -use crate::sql::Uuid; +use crate::kvs::Live; use crate::sql::Value; +use derive::Store; +use reblessive::tree::Stk; +use revision::revisioned; +use serde::{Deserialize, Serialize}; +use std::fmt; #[revisioned(revision = 1)] #[derive(Clone, Debug, Default, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Store, Hash)] @@ -38,80 +34,40 @@ impl KillStatement { // Valid options? opt.valid_for_db()?; // Resolve live query id - let live_query_id = match &self.id { - Value::Uuid(id) => *id, - Value::Param(param) => match param.compute(stk, ctx, opt, None).await? { - Value::Uuid(id) => id, - Value::Strand(id) => match uuid::Uuid::try_parse(&id) { - Ok(id) => Uuid(id), - _ => { - return Err(Error::KillStatement { - value: - "KILL received a parameter that could not be converted to a UUID" - .to_string(), - }); - } - }, - _ => { - return Err(Error::KillStatement { - value: "KILL received a parameter that was not expected".to_string(), - }); - } - }, - Value::Strand(maybe_id) => match uuid::Uuid::try_parse(maybe_id) { - Ok(id) => Uuid(id), - _ => { - return Err(Error::KillStatement { - value: "KILL received a Strand that could not be converted to a UUID" - .to_string(), - }); - } - }, - _ => { + let lid = match self.id.compute(stk, ctx, opt, None).await?.convert_to_uuid() { + Err(_) => { return Err(Error::KillStatement { - value: "Unhandled type for KILL statement".to_string(), - }); + value: self.id.to_string(), + }) } + Ok(id) => id, }; - // Claim transaction - let mut run = ctx.tx_lock().await; - if FFLAGS.change_feed_live_queries.enabled() { - run.pre_commit_register_async_event(TrackedResult::KillQuery(KillEntry { - live_id: live_query_id, - ns: opt.ns()?.to_string(), - db: opt.db()?.to_string(), - }))?; - } else { - // Fetch the live query key - let key = crate::key::node::lq::new(opt.id()?, live_query_id.0, opt.ns()?, opt.db()?); - // Fetch the live query key if it exists - match run.get(key).await? { - Some(val) => match std::str::from_utf8(&val) { - Ok(tb) => { - // Delete the node live query - let key = crate::key::node::lq::new( - opt.id()?, - live_query_id.0, - opt.ns()?, - opt.db()?, - ); - run.del(key).await?; - // Delete the table live query - let key = - crate::key::table::lq::new(opt.ns()?, opt.db()?, tb, live_query_id.0); - run.del(key).await?; - } - _ => { - return Err(Error::KillStatement { - value: self.id.to_string(), - }); - } - }, - None => { - return Err(Error::KillStatement { - value: "KILL statement uuid did not exist".to_string(), - }); - } + // Get the Node ID + let nid = opt.id()?; + // Get the LIVE ID + let lid = lid.0; + // Get the transaction + let txn = ctx.tx(); + // Lock the transaction + let mut txn = txn.lock().await; + // Fetch the live query key + let key = crate::key::node::lq::new(nid, lid); + // Fetch the live query key if it exists + match txn.get(key).await? { + Some(val) => { + // Decode the data for this live query + let val: Live = val.into(); + // Delete the node live query + let key = crate::key::node::lq::new(nid, lid); + txn.del(key).await?; + // Delete the table live query + let key = crate::key::table::lq::new(&val.ns, &val.db, &val.tb, lid); + txn.del(key).await?; + } + None => { + return Err(Error::KillStatement { + value: self.id.to_string(), + }); } } // Return the query id @@ -124,53 +80,3 @@ impl fmt::Display for KillStatement { write!(f, "KILL {}", self.id) } } - -#[cfg(test)] -mod test { - use std::str::FromStr; - - use crate::ctx::Context; - use crate::dbs::Options; - use crate::fflags::FFLAGS; - use crate::kvs::lq_structs::{KillEntry, TrackedResult}; - use crate::kvs::{Datastore, LockType, TransactionType}; - use crate::sql::statements::KillStatement; - use crate::sql::uuid::Uuid; - - #[test_log::test(tokio::test)] - async fn kill_handles_uuid_event_registration() { - if !FFLAGS.change_feed_live_queries.enabled() { - return; - } - let res = KillStatement { - id: Uuid::from_str("8f92f057-c739-4bf2-9d0c-a74d01299efc").unwrap().into(), - }; - let ctx = Context::default(); - let opt = Options::new() - .with_id(uuid::Uuid::from_str("8c41d9f7-a627-40f7-86f5-59d56cd765c6").unwrap()) - .with_live(true) - .with_db(Some("database".into())) - .with_ns(Some("namespace".into())); - let ds = Datastore::new("memory").await.unwrap(); - let tx = - ds.transaction(TransactionType::Write, LockType::Optimistic).await.unwrap().enclose(); - let ctx = ctx.set_transaction(tx.clone()); - - let mut stack = reblessive::tree::TreeStack::new(); - - stack.enter(|stk| res.compute(stk, &ctx, &opt, None)).finish().await.unwrap(); - - let mut tx = tx.lock().await; - tx.commit().await.unwrap(); - - // Validate sent - assert_eq!( - tx.consume_pending_live_queries(), - vec![TrackedResult::KillQuery(KillEntry { - live_id: Uuid::from_str("8f92f057-c739-4bf2-9d0c-a74d01299efc").unwrap(), - ns: "namespace".to_string(), - db: "database".to_string(), - })] - ); - } -} diff --git a/core/src/sql/statements/live.rs b/core/src/sql/statements/live.rs index 9c4146b4..a7fbfa2b 100644 --- a/core/src/sql/statements/live.rs +++ b/core/src/sql/statements/live.rs @@ -1,20 +1,18 @@ use crate::ctx::Context; use crate::dbs::Options; use crate::doc::CursorDoc; -use crate::err::{Error, LiveQueryCause}; -use crate::fflags::FFLAGS; +use crate::err::Error; use crate::iam::Auth; -use crate::kvs::lq_structs::{LqEntry, TrackedResult}; +use crate::kvs::Live; use crate::sql::statements::info::InfoStructure; -use crate::sql::{Cond, Fetchs, Fields, Table, Uuid, Value}; +use crate::sql::{Cond, Fetchs, Fields, Uuid, Value}; use derive::Store; -use futures::lock::MutexGuard; use reblessive::tree::Stk; use revision::revisioned; use serde::{Deserialize, Serialize}; use std::fmt; -#[revisioned(revision = 2)] +#[revisioned(revision = 1)] #[derive(Clone, Debug, Default, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Store, Hash)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[non_exhaustive] @@ -25,26 +23,18 @@ pub struct LiveStatement { pub what: Value, pub cond: Option, pub fetch: Option, - // When a live query is marked for archiving, this will - // be set to the node ID that archived the query. This - // is an internal property, set by the database runtime. - // This is optional, and is only set when archived. - // - // This is deprecated from 2.0 - pub(crate) archived: Option, - // When a live query is created, we must also store the - // authenticated session of the user who made the query, - // so we can check it later when sending notifications. - // This is optional as it is only set by the database - // runtime when storing the live query to storage. - #[revision(start = 2)] - pub(crate) session: Option, // When a live query is created, we must also store the // authenticated session of the user who made the query, // so we can check it later when sending notifications. // This is optional as it is only set by the database // runtime when storing the live query to storage. pub(crate) auth: Option, + // When a live query is created, we must also store the + // authenticated session of the user who made the query, + // so we can check it later when sending notifications. + // This is optional as it is only set by the database + // runtime when storing the live query to storage. + pub(crate) session: Option, } impl LiveStatement { @@ -94,96 +84,49 @@ impl LiveStatement { let mut stm = LiveStatement { // Use the current session authentication // for when we store the LIVE Statement - session: ctx.value("session").cloned(), + auth: Some(opt.auth.as_ref().clone()), // Use the current session authentication // for when we store the LIVE Statement - auth: Some(opt.auth.as_ref().clone()), + session: ctx.value("session").cloned(), // Clone the rest of the original fields // from the LIVE statement to the new one ..self.clone() }; + // Get the id let id = stm.id.0; - match FFLAGS.change_feed_live_queries.enabled() { - true => { - let mut run = ctx.tx_lock().await; - match stm.what.compute(stk, ctx, opt, doc).await? { - Value::Table(tb) => { - // We modify the table as it can be a $PARAM and the compute evaluates that - let mut stm = stm; - stm.what = Value::Table(tb.clone()); - - let ns = opt.ns()?.to_string(); - let db = opt.db()?.to_string(); - self.validate_change_feed_valid(&mut run, &ns, &db, &tb).await?; - // Send the live query registration hook to the transaction pre-commit channel - run.pre_commit_register_async_event(TrackedResult::LiveQuery(LqEntry { - live_id: stm.id, - ns, - db, - stm, - }))?; - } - v => { - return Err(Error::LiveStatement { - value: v.to_string(), - }); - } - } - Ok(id.into()) - } - false => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Process the live query table - match stm.what.compute(stk, ctx, opt, doc).await? { - Value::Table(tb) => { - // Store the current Node ID - stm.node = nid.into(); - // Insert the node live query - run.putc_ndlq(nid, id, opt.ns()?, opt.db()?, tb.as_str(), None).await?; - // Insert the table live query - run.putc_tblq(opt.ns()?, opt.db()?, &tb, stm, None).await?; - } - v => { - return Err(Error::LiveStatement { - value: v.to_string(), - }); - } + // Process the live query table + match stm.what.compute(stk, ctx, opt, doc).await? { + Value::Table(tb) => { + // Store the current Node ID + stm.node = nid.into(); + // Get the NS and DB + let ns = opt.ns()?; + let db = opt.db()?; + // Store the live info + let lq = Live { + ns: ns.to_string(), + db: db.to_string(), + tb: tb.to_string(), }; - // Return the query id - Ok(id.into()) + // Get the transaction + let txn = ctx.tx(); + // Lock the transaction + let mut txn = txn.lock().await; + // Insert the node live query + let key = crate::key::node::lq::new(nid, id); + txn.put(key, lq).await?; + // Insert the table live query + let key = crate::key::table::lq::new(ns, db, &tb, id); + txn.put(key, stm).await?; } - } - } - - async fn validate_change_feed_valid( - &self, - tx: &mut MutexGuard<'_, crate::kvs::Transaction>, - ns: &str, - db: &str, - tb: &Table, - ) -> Result<(), Error> { - // Find the table definition - let tb_definition = tx.get_and_cache_tb(ns, db, tb).await.map_err(|e| match e { - Error::TbNotFound { - value: _tb, - } => Error::LiveQueryError(LiveQueryCause::MissingChangeFeed), - _ => e, - })?; - // check it has a change feed - let cf = tb_definition - .changefeed - .ok_or(Error::LiveQueryError(LiveQueryCause::MissingChangeFeed))?; - // check the change feed includes the original - required for differentiating between CREATE and UPDATE - if !cf.store_diff { - return Err(Error::LiveQueryError(LiveQueryCause::ChangeFeedNoOriginal)); - } - Ok(()) - } - - pub(crate) fn archive(mut self, node_id: Uuid) -> LiveStatement { - self.archived = Some(node_id); - self + v => { + return Err(Error::LiveStatement { + value: v.to_string(), + }); + } + }; + // Return the query id + Ok(id.into()) } } diff --git a/core/src/sql/statements/rebuild.rs b/core/src/sql/statements/rebuild.rs index 146c56f8..2a01ffad 100644 --- a/core/src/sql/statements/rebuild.rs +++ b/core/src/sql/statements/rebuild.rs @@ -71,31 +71,19 @@ impl RebuildIndexStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Index, &Base::Db)?; - // Get the index definition - let ix = ctx - .tx_lock() - .await - .get_and_cache_tb_index( - opt.ns()?, - opt.db()?, - self.what.as_str(), - self.name.as_str(), - ) - .await?; - - // Remove the index - let remove = RemoveIndexStatement { + let ix = ctx.tx().get_tb_index(opt.ns()?, opt.db()?, &self.what, &self.name).await?; + // Create the remove statement + let stm = RemoveIndexStatement { name: self.name.clone(), what: self.what.clone(), if_exists: false, }; - remove.compute(ctx, opt).await?; - + // Execute the delete statement + stm.compute(ctx, opt).await?; // Rebuild the index ix.compute(stk, ctx, opt, doc).await?; - - // Return the result object + // Ok all good Ok(Value::None) } .await; diff --git a/core/src/sql/statements/remove/access.rs b/core/src/sql/statements/remove/access.rs index fcf4f170..d5c79cc3 100644 --- a/core/src/sql/statements/remove/access.rs +++ b/core/src/sql/statements/remove/access.rs @@ -25,44 +25,44 @@ impl RemoveAccessStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Actor, &self.base)?; - + // Check the statement type match &self.base { Base::Root => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); // Get the definition - let ac = run.get_root_access(&self.name).await?; + let ac = txn.get_root_access(&self.name).await?; // Delete the definition let key = crate::key::root::ac::new(&ac.name); - run.del(key).await?; + txn.del(key).await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } Base::Ns => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); // Get the definition - let ac = run.get_ns_access(opt.ns()?, &self.name).await?; + let ac = txn.get_ns_access(opt.ns()?, &self.name).await?; // Delete the definition let key = crate::key::namespace::ac::new(opt.ns()?, &ac.name); - run.del(key).await?; + txn.del(key).await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } Base::Db => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); // Get the definition - let ac = run.get_db_access(opt.ns()?, opt.db()?, &self.name).await?; + let ac = txn.get_db_access(opt.ns()?, opt.db()?, &self.name).await?; // Delete the definition let key = crate::key::database::ac::new(opt.ns()?, opt.db()?, &ac.name); - run.del(key).await?; + txn.del(key).await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/remove/analyzer.rs b/core/src/sql/statements/remove/analyzer.rs index 02540386..b77be9b1 100644 --- a/core/src/sql/statements/remove/analyzer.rs +++ b/core/src/sql/statements/remove/analyzer.rs @@ -23,15 +23,15 @@ impl RemoveAnalyzerStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Analyzer, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); // Get the definition - let az = run.get_db_analyzer(opt.ns()?, opt.db()?, &self.name).await?; + let az = txn.get_db_analyzer(opt.ns()?, opt.db()?, &self.name).await?; // Delete the definition let key = crate::key::database::az::new(opt.ns()?, opt.db()?, &az.name); - run.del(key).await?; + txn.del(key).await?; + // Clear the cache + txn.clear(); // TODO Check that the analyzer is not used in any schema // Ok all good Ok(Value::None) diff --git a/core/src/sql/statements/remove/database.rs b/core/src/sql/statements/remove/database.rs index ee76f5ff..7985d56f 100644 --- a/core/src/sql/statements/remove/database.rs +++ b/core/src/sql/statements/remove/database.rs @@ -24,20 +24,20 @@ impl RemoveDatabaseStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Database, &Base::Ns)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Remove index store - ctx.get_index_stores().database_removed(&mut run, opt.ns()?, &self.name).await?; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); + // Remove the index stores + ctx.get_index_stores().database_removed(&txn, opt.ns()?, &self.name).await?; // Get the definition - let db = run.get_db(opt.ns()?, &self.name).await?; + let db = txn.get_db(opt.ns()?, &self.name).await?; // Delete the definition let key = crate::key::namespace::db::new(opt.ns()?, &db.name); - run.del(key).await?; + txn.del(key).await?; // Delete the resource data let key = crate::key::database::all::new(opt.ns()?, &db.name); - run.delp(key, u32::MAX).await?; + txn.delp(key).await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/remove/event.rs b/core/src/sql/statements/remove/event.rs index 9fe1258b..dd99add2 100644 --- a/core/src/sql/statements/remove/event.rs +++ b/core/src/sql/statements/remove/event.rs @@ -25,18 +25,15 @@ impl RemoveEventStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Event, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); // Get the definition - let ev = run.get_tb_event(opt.ns()?, opt.db()?, &self.what, &self.name).await?; + let ev = txn.get_tb_event(opt.ns()?, opt.db()?, &self.what, &self.name).await?; // Delete the definition let key = crate::key::table::ev::new(opt.ns()?, opt.db()?, &ev.what, &ev.name); - run.del(key).await?; + txn.del(key).await?; // Clear the cache - let key = crate::key::table::ev::prefix(opt.ns()?, opt.db()?, &ev.what); - run.clr(key).await?; + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/remove/field.rs b/core/src/sql/statements/remove/field.rs index 476f3c8e..34dac5d9 100644 --- a/core/src/sql/statements/remove/field.rs +++ b/core/src/sql/statements/remove/field.rs @@ -25,20 +25,17 @@ impl RemoveFieldStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Field, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); + // Get the field name + let na = self.name.to_string(); // Get the definition - let fd_name = self.name.to_string(); - let fd = run.get_tb_field(opt.ns()?, opt.db()?, &self.what, &fd_name).await?; + let fd = txn.get_tb_field(opt.ns()?, opt.db()?, &self.what, &na).await?; // Delete the definition - let fd_name = fd.name.to_string(); - let key = crate::key::table::fd::new(opt.ns()?, opt.db()?, &self.what, &fd_name); - run.del(key).await?; + let key = crate::key::table::fd::new(opt.ns()?, opt.db()?, &fd.what, &na); + txn.del(key).await?; // Clear the cache - let key = crate::key::table::fd::prefix(opt.ns()?, opt.db()?, &self.what); - run.clr(key).await?; + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/remove/function.rs b/core/src/sql/statements/remove/function.rs index 57b3e383..68f2f60c 100644 --- a/core/src/sql/statements/remove/function.rs +++ b/core/src/sql/statements/remove/function.rs @@ -24,15 +24,15 @@ impl RemoveFunctionStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Function, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); // Get the definition - let fc = run.get_db_function(opt.ns()?, opt.db()?, &self.name).await?; + let fc = txn.get_db_function(opt.ns()?, opt.db()?, &self.name).await?; // Delete the definition let key = crate::key::database::fc::new(opt.ns()?, opt.db()?, &fc.name); - run.del(key).await?; + txn.del(key).await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/remove/index.rs b/core/src/sql/statements/remove/index.rs index 56e9731d..0414b796 100644 --- a/core/src/sql/statements/remove/index.rs +++ b/core/src/sql/statements/remove/index.rs @@ -25,23 +25,20 @@ impl RemoveIndexStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Index, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); // Clear the index store cache ctx.get_index_stores() - .index_removed(&mut run, opt.ns()?, opt.db()?, &self.what, &self.name) + .index_removed(&txn, opt.ns()?, opt.db()?, &self.what, &self.name) .await?; - // Clear the cache - run.clear_cache(); // Delete the definition let key = crate::key::table::ix::new(opt.ns()?, opt.db()?, &self.what, &self.name); - run.del(key).await?; + txn.del(key).await?; // Remove the index data let key = crate::key::index::all::new(opt.ns()?, opt.db()?, &self.what, &self.name); - run.delp(key, u32::MAX).await?; + txn.delp(key).await?; // Clear the cache - let key = crate::key::table::ix::prefix(opt.ns()?, opt.db()?, &self.what); - run.clr(key).await?; + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/remove/model.rs b/core/src/sql/statements/remove/model.rs index 3e54389d..b4632fab 100644 --- a/core/src/sql/statements/remove/model.rs +++ b/core/src/sql/statements/remove/model.rs @@ -25,16 +25,16 @@ impl RemoveModelStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Model, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); + // Get the defined model + let ml = txn.get_db_model(opt.ns()?, opt.db()?, &self.name, &self.version).await?; // Delete the definition - let key = - crate::key::database::ml::new(opt.ns()?, opt.db()?, &self.name, &self.version); - run.del(key).await?; - // Remove the model file - // TODO + let key = crate::key::database::ml::new(opt.ns()?, opt.db()?, &ml.name, &ml.version); + txn.del(key).await?; + // Clear the cache + txn.clear(); + // TODO Remove the model file from storage // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/remove/namespace.rs b/core/src/sql/statements/remove/namespace.rs index 8450ba04..c8161b72 100644 --- a/core/src/sql/statements/remove/namespace.rs +++ b/core/src/sql/statements/remove/namespace.rs @@ -24,20 +24,20 @@ impl RemoveNamespaceStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Namespace, &Base::Root)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Delete index stores instance - ctx.get_index_stores().namespace_removed(&mut run, &self.name).await?; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); + // Remove the index stores + ctx.get_index_stores().namespace_removed(&txn, &self.name).await?; // Get the definition - let ns = run.get_ns(&self.name).await?; + let ns = txn.get_ns(&self.name).await?; // Delete the definition let key = crate::key::root::ns::new(&ns.name); - run.del(key).await?; + txn.del(key).await?; // Delete the resource data let key = crate::key::namespace::all::new(&ns.name); - run.delp(key, u32::MAX).await?; + txn.delp(key).await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/remove/param.rs b/core/src/sql/statements/remove/param.rs index 98228a05..f728e70c 100644 --- a/core/src/sql/statements/remove/param.rs +++ b/core/src/sql/statements/remove/param.rs @@ -24,15 +24,15 @@ impl RemoveParamStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Parameter, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); // Get the definition - let pa = run.get_db_param(opt.ns()?, opt.db()?, &self.name).await?; + let pa = txn.get_db_param(opt.ns()?, opt.db()?, &self.name).await?; // Delete the definition let key = crate::key::database::pa::new(opt.ns()?, opt.db()?, &pa.name); - run.del(key).await?; + txn.del(key).await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/remove/table.rs b/core/src/sql/statements/remove/table.rs index 3e85050c..be3584e5 100644 --- a/core/src/sql/statements/remove/table.rs +++ b/core/src/sql/statements/remove/table.rs @@ -24,31 +24,29 @@ impl RemoveTableStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Table, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); // Remove the index stores - ctx.get_index_stores() - .table_removed(&mut run, opt.ns()?, opt.db()?, &self.name) - .await?; - // Clear the cache - run.clear_cache(); + ctx.get_index_stores().table_removed(&txn, opt.ns()?, opt.db()?, &self.name).await?; // Get the defined table - let tb = run.get_tb(opt.ns()?, opt.db()?, &self.name).await?; + let tb = txn.get_tb(opt.ns()?, opt.db()?, &self.name).await?; // Delete the definition let key = crate::key::database::tb::new(opt.ns()?, opt.db()?, &self.name); - run.del(key).await?; + txn.del(key).await?; // Remove the resource data let key = crate::key::table::all::new(opt.ns()?, opt.db()?, &self.name); - run.delp(key, u32::MAX).await?; + txn.delp(key).await?; // Check if this is a foreign table if let Some(view) = &tb.view { // Process each foreign table for v in view.what.0.iter() { // Save the view config let key = crate::key::table::ft::new(opt.ns()?, opt.db()?, v, &self.name); - run.del(key).await?; + txn.del(key).await?; } } + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/remove/user.rs b/core/src/sql/statements/remove/user.rs index 4d4d995f..af202db3 100644 --- a/core/src/sql/statements/remove/user.rs +++ b/core/src/sql/statements/remove/user.rs @@ -25,44 +25,44 @@ impl RemoveUserStatement { let future = async { // Allowed to run? opt.is_allowed(Action::Edit, ResourceKind::Actor, &self.base)?; - + // Check the statement type match self.base { Base::Root => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); // Get the definition - let us = run.get_root_user(&self.name).await?; + let us = txn.get_root_user(&self.name).await?; // Process the statement let key = crate::key::root::us::new(&us.name); - run.del(key).await?; + txn.del(key).await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } Base::Ns => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); // Get the definition - let us = run.get_ns_user(opt.ns()?, &self.name).await?; + let us = txn.get_ns_user(opt.ns()?, &self.name).await?; // Delete the definition let key = crate::key::namespace::us::new(opt.ns()?, &us.name); - run.del(key).await?; + txn.del(key).await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } Base::Db => { - // Claim transaction - let mut run = ctx.tx_lock().await; - // Clear the cache - run.clear_cache(); + // Get the transaction + let txn = ctx.tx(); // Get the definition - let us = run.get_db_user(opt.ns()?, opt.db()?, &self.name).await?; + let us = txn.get_db_user(opt.ns()?, opt.db()?, &self.name).await?; // Delete the definition let key = crate::key::database::us::new(opt.ns()?, opt.db()?, &us.name); - run.del(key).await?; + txn.del(key).await?; + // Clear the cache + txn.clear(); // Ok all good Ok(Value::None) } diff --git a/core/src/sql/statements/show.rs b/core/src/sql/statements/show.rs index bb46b0c8..d524f54d 100644 --- a/core/src/sql/statements/show.rs +++ b/core/src/sql/statements/show.rs @@ -32,8 +32,7 @@ impl ShowSince { } } -// ShowStatement is used to show changes in a table or database via -// the SHOW CHANGES statement. +/// A SHOW CHANGES statement for displaying changes made to a table or database. #[revisioned(revision = 1)] #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Store, Hash)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] @@ -52,29 +51,26 @@ impl ShowStatement { opt: &Options, _doc: Option<&CursorDoc<'_>>, ) -> Result { - // Selected DB? + // Allowed to run? opt.is_allowed(Action::View, ResourceKind::Table, &Base::Db)?; - // Claim transaction - let mut run = ctx.tx_lock().await; + // Get the transaction + let txn = ctx.tx(); // Process the show query - let tb = self.table.as_deref(); let r = crate::cf::read( - &mut run, + &txn, opt.ns()?, opt.db()?, - tb.map(|x| x.as_str()), + self.table.as_deref().map(String::as_str), self.since.clone(), self.limit, ) .await?; // Return the changes - let mut a = Vec::::new(); + let mut a: Vec = Vec::new(); for r in r.iter() { - let v: Value = r.clone().into_value(); - a.push(v); + a.push(r.clone().into_value()); } - let v: Value = Value::Array(crate::sql::array::Array(a)); - Ok(v) + Ok(a.into()) } } diff --git a/core/src/sql/uuid.rs b/core/src/sql/uuid.rs index 4564ca39..82ba3444 100644 --- a/core/src/sql/uuid.rs +++ b/core/src/sql/uuid.rs @@ -9,9 +9,7 @@ use std::str::FromStr; pub(crate) const TOKEN: &str = "$surrealdb::private::sql::Uuid"; #[revisioned(revision = 1)] -#[derive( - Clone, Copy, Debug, Default, Eq, Ord, PartialEq, PartialOrd, Serialize, Deserialize, Hash, -)] +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash)] #[serde(rename = "$surrealdb::private::sql::Uuid")] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[non_exhaustive] diff --git a/core/src/sql/value/serde/ser/statement/live.rs b/core/src/sql/value/serde/ser/statement/live.rs index b08f80a7..2d367bea 100644 --- a/core/src/sql/value/serde/ser/statement/live.rs +++ b/core/src/sql/value/serde/ser/statement/live.rs @@ -48,7 +48,6 @@ pub struct SerializeLiveStatement { what: Value, cond: Option, fetch: Option, - archived: Option, session: Option, auth: Option, } @@ -80,9 +79,6 @@ impl serde::ser::SerializeStruct for SerializeLiveStatement { "fetch" => { self.fetch = value.serialize(ser::fetchs::opt::Serializer.wrap())?; } - "archived" => { - self.archived = value.serialize(ser::uuid::opt::Serializer.wrap())?.map(Uuid); - } "session" => { self.session = None; } @@ -104,7 +100,6 @@ impl serde::ser::SerializeStruct for SerializeLiveStatement { what: self.what, cond: self.cond, fetch: self.fetch, - archived: self.archived, session: None, auth: None, }) diff --git a/core/src/vs/mod.rs b/core/src/vs/mod.rs index 1d53ee27..c93ba377 100644 --- a/core/src/vs/mod.rs +++ b/core/src/vs/mod.rs @@ -17,10 +17,8 @@ pub type Versionstamp = [u8; 10]; pub(crate) mod conv; -pub(crate) mod oracle; pub use self::conv::*; -pub use self::oracle::*; /// Generate S-tuples of valid, sequenced versionstamps within range. /// The limit is used, because these are combinatorics - without an upper bound, combinations aren't possible. diff --git a/lib/Cargo.toml b/lib/Cargo.toml index cddae98c..a99f8db8 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -29,15 +29,8 @@ protocol-ws = ["dep:tokio-tungstenite", "dep:trice", "tokio/time"] kv-mem = ["surrealdb-core/kv-mem", "tokio/time"] kv-indxdb = ["surrealdb-core/kv-indxdb"] kv-rocksdb = ["surrealdb-core/kv-rocksdb", "tokio/time"] -kv-tikv = ["surrealdb-core/kv-tikv"] -kv-fdb-5_1 = ["surrealdb-core/kv-fdb-5_1", "kv-fdb"] -kv-fdb-5_2 = ["surrealdb-core/kv-fdb-5_2", "kv-fdb"] -kv-fdb-6_0 = ["surrealdb-core/kv-fdb-6_0", "kv-fdb"] -kv-fdb-6_1 = ["surrealdb-core/kv-fdb-6_1", "kv-fdb"] -kv-fdb-6_2 = ["surrealdb-core/kv-fdb-6_2", "kv-fdb"] -kv-fdb-6_3 = ["surrealdb-core/kv-fdb-6_3", "kv-fdb"] -kv-fdb-7_0 = ["surrealdb-core/kv-fdb-7_0", "kv-fdb"] -kv-fdb-7_1 = ["surrealdb-core/kv-fdb-7_1", "kv-fdb"] +kv-tikv = ["surrealdb-core/kv-tikv", "tokio/time"] +kv-fdb = ["surrealdb-core/kv-fdb", "tokio/time"] kv-surrealkv = ["surrealdb-core/kv-surrealkv", "tokio/time"] scripting = ["surrealdb-core/scripting"] http = ["surrealdb-core/http"] @@ -54,9 +47,9 @@ rustls = [ ml = ["surrealdb-core/ml"] jwks = ["surrealdb-core/jwks"] arbitrary = ["surrealdb-core/arbitrary"] - -# Private features -kv-fdb = ["tokio/time"] +# Special features +kv-fdb-7_1 = ["surrealdb-core/kv-fdb-7_1"] +kv-fdb-7_3 = ["surrealdb-core/kv-fdb-7_3"] [package.metadata.docs.rs] rustdoc-args = ["--cfg", "docsrs"] diff --git a/lib/benches/README.md b/lib/benches/README.md index 08fcda09..4ddea67e 100644 --- a/lib/benches/README.md +++ b/lib/benches/README.md @@ -29,17 +29,6 @@ $ cargo make bench-lib-rocksdb $ cargo make bench-sdk-rocksdb ``` -* FoundationDB datastore using the lib or the SDK - * Start FoundationDB - ``` - $ docker run -ti -e FDB_NETWORKING_MODE=host --net=host foundationdb/foundationdb:7.1.30 - ``` - * Run the benchmarks - ```console - $ cargo make bench-lib-rocksdb - $ cargo make bench-sdk-rocksdb - ``` - * WebSocket remote server using the SDK * Start SurrealDB server ``` diff --git a/lib/benches/hash_trie_btree.rs b/lib/benches/hash_trie_btree.rs index fdbb0b30..8ea63e52 100644 --- a/lib/benches/hash_trie_btree.rs +++ b/lib/benches/hash_trie_btree.rs @@ -4,7 +4,7 @@ use radix_trie::{Trie, TrieCommon, TrieKey}; use std::collections::{BTreeMap, HashMap}; use std::hash::Hash; use std::time::Duration; -use surrealdb::key::table::ix; +// use surrealdb::key::table::ix; use surrealdb::sql::{value, Array, Id, Thing}; // Common use case: VectorSearch @@ -23,7 +23,8 @@ fn bench_hash_trie_btree_large_vector(c: &mut Criterion) { g.finish(); } -fn bench_hash_trie_btree_ix_key(c: &mut Criterion) { +// TODO: @emmanuel-keller this is disabled because `ix` is now private +/*fn bench_hash_trie_btree_ix_key(c: &mut Criterion) { const N: usize = 100_000; let mut samples = Vec::with_capacity(N); for i in 0..N { @@ -36,7 +37,7 @@ fn bench_hash_trie_btree_ix_key(c: &mut Criterion) { bench_trie(&mut g, &samples); bench_btree(&mut g, &samples); g.finish(); -} +}*/ fn bench_hash_trie_btree_small_string(c: &mut Criterion) { const N: usize = 100_000; @@ -192,7 +193,7 @@ fn bench_btree_get(samples: &[(K, V)], map: &BTreeMap) { criterion_group!( benches, bench_hash_trie_btree_large_vector, - bench_hash_trie_btree_ix_key, + // bench_hash_trie_btree_ix_key, bench_hash_trie_btree_small_string, bench_hash_trie_btree_thing, bench_hash_trie_btree_value diff --git a/lib/benches/index_btree.rs b/lib/benches/index_btree.rs index 2fe83283..bfc6323b 100644 --- a/lib/benches/index_btree.rs +++ b/lib/benches/index_btree.rs @@ -64,7 +64,7 @@ where BK: BKeys + Clone + Default + Debug, { let ds = Datastore::new("memory").await.unwrap(); - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); + let tx = ds.transaction(Write, Optimistic).await.unwrap(); let mut t = BTree::::new(BState::new(100)); let np = TreeNodeProvider::Debug; let c = TreeCache::new(0, np.get_key(0), np.clone(), cache_size); @@ -72,11 +72,11 @@ where for i in 0..samples_size { let (key, payload) = sample_provider(i); // Insert the sample - t.insert(&mut tx, &mut s, key.clone(), payload).await.unwrap(); + t.insert(&tx, &mut s, key.clone(), payload).await.unwrap(); // Search for it - black_box(t.search_mut(&mut tx, &mut s, &key).await.unwrap()); + black_box(t.search_mut(&tx, &mut s, &key).await.unwrap()); } - s.finish(&mut tx).await.unwrap(); + s.finish(&tx).await.unwrap(); tx.commit().await.unwrap(); } diff --git a/lib/benches/index_mtree.rs b/lib/benches/index_mtree.rs index 4cd74a8d..7dc9d0e3 100644 --- a/lib/benches/index_mtree.rs +++ b/lib/benches/index_mtree.rs @@ -1,7 +1,6 @@ use criterion::measurement::WallTime; use criterion::{criterion_group, criterion_main, BenchmarkGroup, Criterion, Throughput}; use futures::executor::block_on; -use futures::lock::Mutex; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use reblessive::TreeStack; @@ -54,7 +53,7 @@ fn bench_index_mtree_dim_2048_full_cache(c: &mut Criterion) { async fn mtree_index( ds: &Datastore, - tx: &mut Transaction, + tx: &Transaction, dimension: usize, cache_size: usize, tt: TransactionType, @@ -144,8 +143,8 @@ async fn insert_objects( vector_size: usize, cache_size: usize, ) { - let mut tx = ds.transaction(Write, Optimistic).await.unwrap(); - let mut mt = mtree_index(ds, &mut tx, vector_size, cache_size, Write).await; + let tx = ds.transaction(Write, Optimistic).await.unwrap(); + let mut mt = mtree_index(ds, &tx, vector_size, cache_size, Write).await; let mut stack = TreeStack::new(); let mut rng = StdRng::from_entropy(); stack @@ -154,12 +153,12 @@ async fn insert_objects( let vector: Vec = random_object(&mut rng, vector_size); // Insert the sample let rid = Thing::from(("test", Id::from(i as i64))); - mt.index_document(stk, &mut tx, &rid, &vec![Value::from(vector)]).await.unwrap(); + mt.index_document(stk, &tx, &rid, &vec![Value::from(vector)]).await.unwrap(); } }) .finish() .await; - mt.finish(&mut tx).await.unwrap(); + mt.finish(&tx).await.unwrap(); tx.commit().await.unwrap(); } @@ -170,11 +169,9 @@ async fn knn_lookup_objects( cache_size: usize, knn: usize, ) { - let txn = Arc::new(Mutex::new(ds.transaction(Read, Optimistic).await.unwrap())); - let mut tx = txn.lock().await; - let mt = Arc::new(mtree_index(ds, &mut tx, vector_size, cache_size, Read).await); - drop(tx); - let ctx = Arc::new(Context::default().set_transaction(txn)); + let txn = ds.transaction(Read, Optimistic).await.unwrap(); + let mt = Arc::new(mtree_index(ds, &txn, vector_size, cache_size, Read).await); + let ctx = Arc::new(Context::from(txn)); let counter = Arc::new(AtomicUsize::new(0)); diff --git a/lib/benches/sdb_benches/lib/mod.rs b/lib/benches/sdb_benches/lib/mod.rs index ee1bd931..5d12692b 100644 --- a/lib/benches/sdb_benches/lib/mod.rs +++ b/lib/benches/sdb_benches/lib/mod.rs @@ -31,15 +31,6 @@ pub(super) async fn init(target: &str) { .expect("Unable to execute the query"); let _ = DB.set(Arc::new(ds)); } - #[cfg(feature = "kv-fdb")] - "lib-fdb" => { - let ds = Datastore::new("fdb:///etc/foundationdb/fdb.cluster").await.unwrap(); - // Verify it can connect to the FDB cluster - ds.execute("INFO FOR DB", &Session::owner().with_ns("ns").with_db("db"), None) - .await - .expect("Unable to connect to FDB cluster"); - let _ = DB.set(Arc::new(ds)); - } #[cfg(feature = "kv-surrealkv")] "lib-surrealkv" => { let path = format!( diff --git a/lib/benches/sdb_benches/sdk/mod.rs b/lib/benches/sdb_benches/sdk/mod.rs index 723451d4..a15dbf23 100644 --- a/lib/benches/sdb_benches/sdk/mod.rs +++ b/lib/benches/sdb_benches/sdk/mod.rs @@ -31,11 +31,17 @@ pub(super) async fn init(target: &str) { println!("\n### Using path: {} ###\n", path); DB.connect(&path).await.unwrap(); } - #[cfg(feature = "kv-fdb")] - "sdk-fdb" => { - DB.connect("fdb:///etc/foundationdb/fdb.cluster").await.unwrap(); - // Verify it can connect to the FDB cluster - DB.health().await.expect("fdb cluster is unavailable"); + #[cfg(feature = "kv-surrealkv")] + "sdk-surrealkv" => { + let path = format!( + "surrealkv://sdk-surrealkv-{}.db", + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_millis() + ); + println!("\n### Using path: {} ###\n", path); + DB.connect(&path).await.unwrap(); } #[cfg(feature = "protocol-ws")] "sdk-ws" => { diff --git a/lib/src/api/engine/local/mod.rs b/lib/src/api/engine/local/mod.rs index 0e1230f8..31a279d8 100644 --- a/lib/src/api/engine/local/mod.rs +++ b/lib/src/api/engine/local/mod.rs @@ -448,7 +448,7 @@ async fn export( // Check the permissions level kvs.check(sess, Action::View, ResourceKind::Model.on_db(&nsv, &dbv))?; // Start a new readonly transaction - let mut tx = kvs.transaction(TransactionType::Read, LockType::Optimistic).await?; + let tx = kvs.transaction(TransactionType::Read, LockType::Optimistic).await?; // Attempt to get the model definition let info = tx.get_db_model(&nsv, &dbv, &name, &version).await?; // Export the file data in to the store @@ -792,7 +792,7 @@ async fn router( [Value::Strand(key), value] => (mem::take(&mut key.0), mem::take(value)), _ => unreachable!(), }; - let var = Some(crate::map! { + let var = Some(map! { key.clone() => Value::None, => vars }); diff --git a/lib/src/api/engine/local/native.rs b/lib/src/api/engine/local/native.rs index 66ddb09a..4ab58a4b 100644 --- a/lib/src/api/engine/local/native.rs +++ b/lib/src/api/engine/local/native.rs @@ -147,10 +147,7 @@ pub(crate) async fn run_router( feature = "kv-fdb", feature = "kv-tikv", ))] - let kvs = match address.config.temporary_directory { - Some(tmp_dir) => kvs.with_temporary_directory(tmp_dir), - _ => kvs, - }; + let kvs = kvs.with_temporary_directory(address.config.temporary_directory); let kvs = Arc::new(kvs); let mut vars = BTreeMap::new(); diff --git a/lib/src/api/engine/tasks.rs b/lib/src/api/engine/tasks.rs index 61bb17bc..57b707c8 100644 --- a/lib/src/api/engine/tasks.rs +++ b/lib/src/api/engine/tasks.rs @@ -1,38 +1,31 @@ +use crate::engine::IntervalStream; +use crate::kvs::Datastore; +use crate::options::EngineOptions; +#[cfg(not(target_arch = "wasm32"))] +use crate::Error as RootError; use futures::StreamExt; -use reblessive::TreeStack; #[cfg(target_arch = "wasm32")] use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; - -#[cfg(not(target_arch = "wasm32"))] -use tokio::task::JoinHandle; - -use crate::dbs::Options; -use crate::fflags::FFLAGS; -use crate::kvs::Datastore; -use crate::options::EngineOptions; - -use crate::engine::IntervalStream; -#[cfg(not(target_arch = "wasm32"))] -use crate::Error as RootError; -use surrealdb_core::ctx::Context; -use surrealdb_core::kvs::{LockType, TransactionType}; #[cfg(not(target_arch = "wasm32"))] use tokio::spawn as spawn_future; use tokio::sync::oneshot; +#[cfg(not(target_arch = "wasm32"))] +use tokio::task::JoinHandle; #[cfg(target_arch = "wasm32")] use wasm_bindgen_futures::spawn_local as spawn_future; +/// This will be true if a task has completed #[cfg(not(target_arch = "wasm32"))] type FutureTask = JoinHandle<()>; -#[cfg(target_arch = "wasm32")] + /// This will be true if a task has completed +#[cfg(target_arch = "wasm32")] type FutureTask = Arc; pub struct Tasks { pub nd: FutureTask, - pub lq: FutureTask, } impl Tasks { @@ -49,29 +42,17 @@ impl Tasks { return Err(RootError::Db(inner_err)); } } - match self.lq.await { - Ok(_) => {} - Err(e) if e.is_cancelled() => {} - Err(e) => { - error!("Live query task failed: {}", e); - let inner_err = - crate::err::Error::NodeAgent("live query task failed and has been logged"); - return Err(RootError::Db(inner_err)); - } - }; Ok(()) } } /// Starts tasks that are required for the correct running of the engine -pub fn start_tasks(opt: &EngineOptions, dbs: Arc) -> (Tasks, [oneshot::Sender<()>; 2]) { +pub fn start_tasks(opt: &EngineOptions, dbs: Arc) -> (Tasks, [oneshot::Sender<()>; 1]) { let nd = init(opt, dbs.clone()); - let lq = live_query_change_feed(opt, dbs); - let cancellation_channels = [nd.1, lq.1]; + let cancellation_channels = [nd.1]; ( Tasks { nd: nd.0, - lq: lq.0, }, cancellation_channels, ) @@ -127,65 +108,6 @@ fn init(opt: &EngineOptions, dbs: Arc) -> (FutureTask, oneshot::Sende return (ret_status, tx); } -// Start live query on change feeds notification processing -fn live_query_change_feed( - opt: &EngineOptions, - dbs: Arc, -) -> (FutureTask, oneshot::Sender<()>) { - let tick_interval = opt.tick_interval; - - #[cfg(target_arch = "wasm32")] - let completed_status = Arc::new(AtomicBool::new(false)); - #[cfg(target_arch = "wasm32")] - let ret_status = completed_status.clone(); - - // We create a channel that can be streamed that will indicate termination - let (tx, mut rx) = oneshot::channel(); - - let _fut = spawn_future(async move { - let mut stack = TreeStack::new(); - - let _lifecycle = crate::dbs::LoggingLifecycle::new("live query agent task".to_string()); - if !FFLAGS.change_feed_live_queries.enabled() { - // TODO verify test fails since return without completion - #[cfg(target_arch = "wasm32")] - completed_status.store(true, Ordering::Relaxed); - return; - } - let mut ticker = interval_ticker(tick_interval).await; - - let opt = Options::default(); - loop { - tokio::select! { - v = ticker.next() => { - // ticker will never return None; - let i = v.unwrap(); - trace!("Live query agent tick: {:?}", i); - let tx = dbs.transaction(TransactionType::Write, LockType::Optimistic).await.unwrap(); - let ctx = Context::background().set_transaction(tx.enclose()); - if let Err(e) = - stack.enter(|stk| dbs.process_lq_notifications(stk, &ctx, &opt)).finish().await - { - error!("Error running node agent tick: {}", e); - break; - } - } - _ = &mut rx => { - // termination requested, - break - } - } - } - - #[cfg(target_arch = "wasm32")] - completed_status.store(true, Ordering::Relaxed); - }); - #[cfg(not(target_arch = "wasm32"))] - return (_fut, tx); - #[cfg(target_arch = "wasm32")] - return (ret_status, tx); -} - async fn interval_ticker(interval: Duration) -> IntervalStream { #[cfg(not(target_arch = "wasm32"))] use tokio::{time, time::MissedTickBehavior}; diff --git a/lib/src/api/method/insert.rs b/lib/src/api/method/insert.rs index 104cff2b..04e8e2b8 100644 --- a/lib/src/api/method/insert.rs +++ b/lib/src/api/method/insert.rs @@ -55,10 +55,7 @@ macro_rules! into_future { Resource::RecordId(record_id) => { let mut table = Table::default(); table.0 = record_id.tb.clone(); - ( - table.into(), - crate::map! { String::from("id") => record_id.into() }.into(), - ) + (table.into(), map! { String::from("id") => record_id.into() }.into()) } Resource::Object(obj) => return Err(Error::InsertOnObject(obj).into()), Resource::Array(arr) => return Err(Error::InsertOnArray(arr).into()), diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 6576d3f3..9d9a635b 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -116,6 +116,9 @@ compile_error!("The `ml` feature is not supported on the `wasm32` architecture." #[macro_use] extern crate tracing; +#[macro_use] +mod mac; + mod api; #[doc(inline)] diff --git a/lib/src/mac/mod.rs b/lib/src/mac/mod.rs new file mode 100644 index 00000000..200a18c4 --- /dev/null +++ b/lib/src/mac/mod.rs @@ -0,0 +1,9 @@ +/// Creates a new b-tree map of key-value pairs +macro_rules! map { + ($($k:expr $(, if let $grant:pat = $check:expr)? $(, if $guard:expr)? => $v:expr),* $(,)? $( => $x:expr )?) => {{ + let mut m = ::std::collections::BTreeMap::new(); + $(m.extend($x.iter().map(|(k, v)| (k.clone(), v.clone())));)? + $( $(if let $grant = $check)? $(if $guard)? { m.insert($k, $v); };)+ + m + }}; +} diff --git a/lib/tests/bootstrap.rs b/lib/tests/bootstrap.rs deleted file mode 100644 index 74e421f0..00000000 --- a/lib/tests/bootstrap.rs +++ /dev/null @@ -1,250 +0,0 @@ -/// The tests in this file are checking that bootstrapping of the database works correctly -/// They are testing edge cases that may accidentally occur with bugs - we wan't to make sure -/// the system can recover in light of these issues. -/// -/// We may want to move these tests to another suite, as they aren't testing the statements like -/// the other tests are. -mod helpers; -mod parse; - -use helpers::new_ds; -use serial_test::serial; -use surrealdb::err::Error; -use surrealdb::kvs::LockType::Optimistic; -use surrealdb::kvs::Transaction; -use surrealdb::kvs::TransactionType::Write; -use surrealdb::sql::statements::LiveStatement; -use surrealdb::sql::Uuid; - -#[tokio::test] -#[serial] -async fn bootstrap_removes_unreachable_nodes() -> Result<(), Error> { - // Create the datastore - let dbs = new_ds().await.unwrap(); - - let mut tx = dbs.transaction(Write, Optimistic).await.unwrap(); - // Introduce missing nodes (without heartbeats) - let bad_node = uuid::Uuid::parse_str("9d8e16e4-9f6a-4704-8cf1-7cd55b937c5b").unwrap(); - tx.set_nd(bad_node).await.unwrap(); - - // Introduce a valid chain of data to confirm it is not removed from a cleanup - a_valid_notification( - &mut tx, - ValidNotificationState { - timestamp: None, - node_id: None, - live_query_id: None, - notification_id: None, - namespace: "testns".to_string(), - database: "testdb".to_string(), - table: "testtb".to_string(), - }, - ) - .await - .unwrap(); - - tx.commit().await.unwrap(); - - // Bootstrap - dbs.bootstrap().await.unwrap(); - - // Declare a function that will assert - async fn try_validate(tx: &mut Transaction, bad_node: &uuid::Uuid) -> Result<(), String> { - let res = tx.scan_nd(1000).await.map_err(|e| e.to_string())?; - tx.commit().await.map_err(|e| e.to_string())?; - for node in &res { - if node.name == bad_node.to_string() { - return Err(format!("The node name was actually the bad node {:?}", bad_node)); - } - } - // {Node generated by bootstrap} + {valid node who's uuid we don't know} - assert_eq!(res.len(), 2); - if res.len() != 2 { - return Err("Expected 2 nodes".to_string()); - } - Ok(()) - } - - // Verify the incorrect node is deleted, but self and valid still exist - let res = { - let mut err = None; - for _ in 0..5 { - let mut tx = dbs.transaction(Write, Optimistic).await.unwrap(); - let res = try_validate(&mut tx, &bad_node).await; - if res.is_ok() { - return Ok(()); - } - err = Some(res); - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - } - err.unwrap() - }; - res.unwrap(); - Ok(()) -} - -#[tokio::test] -#[serial] -async fn bootstrap_removes_unreachable_node_live_queries() -> Result<(), Error> { - // Create the datastore - let dbs = new_ds().await.unwrap(); - - // Introduce an invalid node live query - let mut tx = dbs.transaction(Write, Optimistic).await.unwrap(); - let valid_data = a_valid_notification( - &mut tx, - ValidNotificationState { - timestamp: None, - node_id: None, - live_query_id: None, - notification_id: None, - namespace: "testns".to_string(), - database: "testdb".to_string(), - table: "testtb".to_string(), - }, - ) - .await - .unwrap(); - let bad_nd_lq_id = uuid::Uuid::parse_str("67b0f588-2b95-4b6e-87f3-73d0a49034be").unwrap(); - tx.putc_ndlq( - valid_data.clone().node_id.unwrap().0, - bad_nd_lq_id, - &valid_data.namespace, - &valid_data.database, - &valid_data.table, - None, - ) - .await - .unwrap(); - tx.commit().await.unwrap(); - - // Bootstrap - dbs.bootstrap().await.unwrap(); - - // Verify node live query is deleted - let mut tx = dbs.transaction(Write, Optimistic).await.unwrap(); - let res = tx.scan_ndlq(valid_data.node_id.as_ref().unwrap(), 1000).await.unwrap(); - tx.commit().await.unwrap(); - assert_eq!(res.len(), 1, "We expect the node to be available"); - let tested_entry = res.first().unwrap(); - assert_eq!(tested_entry.lq, valid_data.live_query_id.unwrap()); - - Ok(()) -} - -#[tokio::test] -#[serial] -async fn bootstrap_removes_unreachable_table_live_queries() -> Result<(), Error> { - // Create the datastore - let dbs = new_ds().await.unwrap(); - - // Introduce an invalid table live query - let mut tx = dbs.transaction(Write, Optimistic).await.unwrap(); - let valid_data = a_valid_notification( - &mut tx, - ValidNotificationState { - timestamp: None, - node_id: None, - live_query_id: None, - notification_id: None, - namespace: "testns".to_string(), - database: "testdb".to_string(), - table: "testtb".to_string(), - }, - ) - .await - .unwrap(); - let bad_tb_lq_id = uuid::Uuid::parse_str("97b8fbe4-a147-4420-95dc-97db3a46c491").unwrap(); - let mut live_stm = LiveStatement::default(); - live_stm.id = bad_tb_lq_id.into(); - tx.putc_tblq(&valid_data.namespace, &valid_data.database, &valid_data.table, live_stm, None) - .await - .unwrap(); - tx.commit().await.unwrap(); - - // Bootstrap - dbs.bootstrap().await.unwrap(); - - // Verify invalid table live query is deleted - let mut tx = dbs.transaction(Write, Optimistic).await.unwrap(); - - let res = tx - .scan_tblq(&valid_data.namespace, &valid_data.database, &valid_data.table, 1000) - .await - .unwrap(); - tx.commit().await.unwrap(); - - assert_eq!(res.len(), 1, "Expected 1 table live query: {:?}", res); - let tested_entry = res.first().unwrap(); - assert_eq!(tested_entry.lq, valid_data.live_query_id.unwrap()); - Ok(()) -} - -#[tokio::test] -#[serial] -async fn bootstrap_removes_unreachable_live_query_notifications() -> Result<(), Error> { - Ok(()) -} - -/// ValidBootstrapState is a representation of a chain of information that bootstrap is concerned -/// with. It is used for two reasons -/// - sometimes we want to detect invalid data that has a valid path (notification without a live query). -/// - sometimes we want to detect existing valid data is not deleted -#[derive(Debug, Clone)] -struct ValidNotificationState { - pub timestamp: Option, - pub node_id: Option, - pub live_query_id: Option, - pub notification_id: Option, - pub namespace: String, - pub database: String, - pub table: String, -} - -/// Create a chain of valid state that bootstrapping should not remove. -/// As a general rule, there is no need to override the system defaults since this code is to place generic data. -/// If you see these IDs, it is because you captured this entry. -/// So its ok to share ID between tests -async fn a_valid_notification( - tx: &mut Transaction, - args: ValidNotificationState, -) -> Result { - let now = tx.clock().await; - let default_node_id = - Uuid::from(uuid::Uuid::parse_str("123e9d92-c975-4daf-8080-3082e83cfa9b").unwrap()); - let default_lq_id = - Uuid::from(uuid::Uuid::parse_str("ca02c2d0-31dd-4bf0-ada4-ee02b1191e0a").unwrap()); - let default_not_id = - Uuid::from(uuid::Uuid::parse_str("c952cf7d-b503-4370-802e-cd2404f2160d").unwrap()); - let entry = ValidNotificationState { - timestamp: Some(args.timestamp.unwrap_or(now.value)), - node_id: Some(args.node_id.unwrap_or(default_node_id)), - live_query_id: Some(args.live_query_id.unwrap_or(default_lq_id)), - notification_id: Some(args.notification_id.unwrap_or(default_not_id)), - ..args - }; - let mut live_stm = LiveStatement::default(); - live_stm.id = entry.live_query_id.unwrap(); - live_stm.node = entry.node_id.unwrap(); - - // Create heartbeat - tx.set_hb(entry.timestamp.unwrap().into(), entry.node_id.unwrap().0).await?; - // Create cluster node entry - tx.set_nd(entry.node_id.unwrap().0).await?; - // Create node live query registration - tx.putc_ndlq( - entry.node_id.unwrap().0, - entry.live_query_id.unwrap().0, - &entry.namespace, - &entry.database, - &entry.table, - None, - ) - .await?; - // Create table live query registration - tx.putc_tblq(&entry.namespace, &entry.database, &entry.table, live_stm, None).await?; - // TODO Create notification - // tx.putc_tbnt( - // ).await?; - Ok(entry) -} diff --git a/lib/tests/changefeeds.rs b/lib/tests/changefeeds.rs index 890ff400..735ff1c1 100644 --- a/lib/tests/changefeeds.rs +++ b/lib/tests/changefeeds.rs @@ -179,8 +179,7 @@ async fn database_change_feeds() -> Result<(), Error> { // This is neccessary to mark a point in time that can be GC'd current_time += 1; dbs.tick_at(current_time).await?; - let mut tx = dbs.transaction(Write, Optimistic).await?; - tx.print_all().await; + let tx = dbs.transaction(Write, Optimistic).await?; tx.cancel().await?; let res = &mut dbs.execute(sql, &ses, None).await?; diff --git a/lib/tests/define.rs b/lib/tests/define.rs index 3dc3811f..9bd0cc62 100644 --- a/lib/tests/define.rs +++ b/lib/tests/define.rs @@ -31,6 +31,7 @@ async fn define_statement_namespace() -> Result<(), Error> { "{ accesses: {}, namespaces: { test: 'DEFINE NAMESPACE test' }, + nodes: {}, users: {}, }", ); @@ -1254,7 +1255,7 @@ where let res = val.walk(&part); for (i, v) in res { let mut idiom = Idiom::default(); - idiom.0 = part.clone(); + idiom.0.clone_from(&part); assert_eq!(idiom, i); check(v); } @@ -1274,8 +1275,8 @@ async fn permissions_checks_define_ns() { // Define the expected results for the check statement when the test statement succeeded and when it failed let check_results = [ - vec!["{ accesses: { }, namespaces: { NS: 'DEFINE NAMESPACE NS' }, users: { } }"], - vec!["{ accesses: { }, namespaces: { }, users: { } }"], + vec!["{ accesses: { }, namespaces: { NS: 'DEFINE NAMESPACE NS' }, nodes: { }, users: { } }"], + vec!["{ accesses: { }, namespaces: { }, nodes: { }, users: { } }"], ]; let test_cases = [ @@ -1439,8 +1440,8 @@ async fn permissions_checks_define_access_root() { // Define the expected results for the check statement when the test statement succeeded and when it failed let check_results = [ - vec!["{ accesses: { access: \"DEFINE ACCESS access ON ROOT TYPE JWT ALGORITHM HS512 KEY '[REDACTED]' WITH ISSUER KEY '[REDACTED]' DURATION FOR TOKEN 1h, FOR SESSION NONE\" }, namespaces: { }, users: { } }"], - vec!["{ accesses: { }, namespaces: { }, users: { } }"] + vec!["{ accesses: { access: \"DEFINE ACCESS access ON ROOT TYPE JWT ALGORITHM HS512 KEY '[REDACTED]' WITH ISSUER KEY '[REDACTED]' DURATION FOR TOKEN 1h, FOR SESSION NONE\" }, namespaces: { }, nodes: { }, users: { } }"], + vec!["{ accesses: { }, namespaces: { }, nodes: { }, users: { } }"] ]; let test_cases = [ @@ -1565,8 +1566,8 @@ async fn permissions_checks_define_user_root() { // Define the expected results for the check statement when the test statement succeeded and when it failed let check_results = [ - vec!["{ accesses: { }, namespaces: { }, users: { user: \"DEFINE USER user ON ROOT PASSHASH 'secret' ROLES VIEWER DURATION FOR TOKEN 15m, FOR SESSION 6h\" } }"], - vec!["{ accesses: { }, namespaces: { }, users: { } }"] + vec!["{ accesses: { }, namespaces: { }, nodes: { }, users: { user: \"DEFINE USER user ON ROOT PASSHASH 'secret' ROLES VIEWER DURATION FOR TOKEN 15m, FOR SESSION 6h\" } }"], + vec!["{ accesses: { }, namespaces: { }, nodes: { }, users: { } }"] ]; let test_cases = [ diff --git a/lib/tests/info.rs b/lib/tests/info.rs index 2e80226f..8d07121c 100644 --- a/lib/tests/info.rs +++ b/lib/tests/info.rs @@ -24,10 +24,9 @@ async fn info_for_root() { let out = res.pop().unwrap().output(); assert!(out.is_ok(), "Unexpected error: {:?}", out); - let output_regex = Regex::new( - r"\{ accesses: \{ access: .* \}, namespaces: \{ NS: .* \}, users: \{ user: .* \} \}", - ) - .unwrap(); + let output_regex = + Regex::new(r"\{ accesses: \{ access: .* \}, namespaces: \{ NS: .* \}, nodes: \{ .* \}, users: \{ user: .* \} \}") + .unwrap(); let out_str = out.unwrap().to_string(); assert!( output_regex.is_match(&out_str), @@ -213,8 +212,8 @@ async fn permissions_checks_info_root() { // Define the expected results for the check statement when the test statement succeeded and when it failed let check_results = [ - vec!["{ accesses: { }, namespaces: { }, users: { } }"], - vec!["{ accesses: { }, namespaces: { }, users: { } }"], + vec!["{ accesses: { }, namespaces: { }, nodes: { }, users: { } }"], + vec!["{ accesses: { }, namespaces: { }, nodes: { }, users: { } }"], ]; let test_cases = [ diff --git a/lib/tests/live.rs b/lib/tests/live.rs deleted file mode 100644 index 675b266f..00000000 --- a/lib/tests/live.rs +++ /dev/null @@ -1,173 +0,0 @@ -mod helpers; -mod parse; - -use helpers::new_ds; -use surrealdb::dbs::Session; -use surrealdb::err::Error; -use surrealdb::fflags::FFLAGS; -use surrealdb::sql::Value; -use surrealdb_core::dbs::Options; - -#[tokio::test] -async fn live_query_fails_if_no_change_feed() -> Result<(), Error> { - if !FFLAGS.change_feed_live_queries.enabled() { - return Ok(()); - } - let sql = " - LIVE SELECT * FROM lq_test_123; - "; - let dbs = new_ds().await?; - let ses = Session::owner().with_ns("test").with_db("test").with_rt(true); - let res = &mut dbs.execute(sql, &ses, None).await?; - assert_eq!(res.len(), 1); - let res = res.remove(0).result; - assert!(res.is_err(), "{:?}", res); - let err = res.as_ref().err().unwrap(); - assert_eq!( - format!("{}", err), - "Failed to process Live Query: The Live Query must have a change feed for it it work" - ); - Ok(()) -} - -#[tokio::test] -async fn live_query_fails_if_change_feed_missing_diff() -> Result<(), Error> { - if !FFLAGS.change_feed_live_queries.enabled() { - return Ok(()); - } - let sql = " - DEFINE TABLE lq_test_123 CHANGEFEED 10m; - LIVE SELECT * FROM lq_test_123; - "; - let dbs = new_ds().await?; - let ses = Session::owner().with_ns("test").with_db("test").with_rt(true); - let res = &mut dbs.execute(sql, &ses, None).await?; - assert_eq!(res.len(), 2); - res.remove(0).result.unwrap(); - let res = res.remove(0).result; - assert!(res.is_err(), "{:?}", res); - let err = res.as_ref().err().unwrap(); - assert_eq!( - format!("{}", err), - "Failed to process Live Query: The Live Query must have a change feed that includes relative changes" - ); - Ok(()) -} - -#[tokio::test] -async fn live_query_sends_registered_lq_details() -> Result<(), Error> { - if !FFLAGS.change_feed_live_queries.enabled() { - return Ok(()); - } - let sql = " - DEFINE TABLE lq_test_123 CHANGEFEED 10m INCLUDE ORIGINAL; - LIVE SELECT * FROM lq_test_123; - "; - let mut stack = reblessive::tree::TreeStack::new(); - let dbs = new_ds().await?; - let ses = Session::owner().with_ns("test").with_db("test").with_rt(true); - let res = &mut dbs.execute(sql, &ses, None).await?; - assert_eq!(res.len(), 2); - - // Define table didnt fail - let tmp = res.remove(0).result; - assert!(tmp.is_ok()); - - // Live query returned a valid uuid - let actual = res.remove(0).result.unwrap(); - let live_id = match actual { - Value::Uuid(live_id) => live_id, - _ => panic!("Expected a UUID"), - }; - assert!(!live_id.is_nil()); - - // Create some data - let res = &mut dbs.execute("CREATE lq_test_123", &ses, None).await?; - assert_eq!(res.len(), 1); - - let result = res.remove(0); - assert!(result.result.is_ok()); - - let ctx = Default::default(); - let opt = Options::default(); - stack.enter(|stk| dbs.process_lq_notifications(stk, &ctx, &opt)).finish().await?; - - let notifications_chan = dbs.notifications().unwrap(); - - assert!(notifications_chan.try_recv().is_ok()); - assert!(notifications_chan.try_recv().is_err()); - - Ok(()) -} - -#[tokio::test] -async fn live_query_does_not_drop_notifications() -> Result<(), Error> { - let table = "lq_test_123"; - const RUNS: u16 = 1_000; - let define_stm = match FFLAGS.change_feed_live_queries.enabled() { - true => format!("DEFINE TABLE {table} CHANGEFEED 10m INCLUDE ORIGINAL;"), - false => format!("DEFINE TABLE {table};"), - }; - let sql = format!( - " - {define_stm} - LIVE SELECT * FROM lq_test_123; - " - ); - let mut stack = reblessive::tree::TreeStack::new(); - let dbs = new_ds().await?; - let ses = Session::owner().with_ns("test").with_db("test").with_rt(true); - let res = &mut dbs.execute(&sql, &ses, None).await?; - assert_eq!(res.len(), 2); - - // Define table didnt fail - let tmp = res.remove(0).result; - assert!(tmp.is_ok()); - - // Live query returned a valid uuid - let actual = res.remove(0).result.unwrap(); - let live_id = match actual { - Value::Uuid(live_id) => live_id, - _ => panic!("Expected a UUID"), - }; - assert!(!live_id.is_nil()); - - // Create some data - for i in 0..RUNS { - let test_sql = format!( - " - CREATE {table}:test_case_{i}; - UPDATE {table}:test_case_{i} SET name = 'test_case_{i}'; - DELETE {table}:test_case_{i}; - " - ); - let res = &mut dbs.execute(&test_sql, &ses, None).await?; - // All statements succeed - let expected = 3; - assert_eq!(res.len(), expected); - for _ in 0..expected { - let result = res.remove(0); - assert!(result.result.is_ok()); - } - // Process the notifications - let ctx = Default::default(); - let opt = Options::default(); - stack.enter(|stk| dbs.process_lq_notifications(stk, &ctx, &opt)).finish().await?; - - let notifications_chan = dbs.notifications().unwrap(); - - for case in 0..expected { - tokio::select! { - _ = tokio::time::sleep(tokio::time::Duration::from_millis(1000)) => { - panic!("Timeout - Failed at run {} for case {}", i, case); - } - msg = notifications_chan.recv() => { - assert!(msg.is_ok(), "Failed at run {} for case {}", i, case); - } - } - } - assert!(notifications_chan.try_recv().is_err()); - } - - Ok(()) -} diff --git a/lib/tests/remove.rs b/lib/tests/remove.rs index 6b670923..19559470 100644 --- a/lib/tests/remove.rs +++ b/lib/tests/remove.rs @@ -12,7 +12,6 @@ use std::collections::HashMap; use surrealdb::dbs::Session; use surrealdb::err::Error; use surrealdb::iam::Role; -use surrealdb::kvs::{LockType::*, TransactionType::*}; use surrealdb::sql::Value; #[tokio::test] @@ -212,12 +211,6 @@ async fn remove_statement_index() -> Result<(), Error> { }", ); assert_eq!(tmp, val); - - let mut tx = dbs.transaction(Read, Optimistic).await?; - for ix in ["uniq_isbn", "idx_author", "ft_title"] { - assert_empty_prefix!(&mut tx, surrealdb::key::index::all::new("test", "test", "book", ix)); - } - // Every index store cache has been removed assert!(dbs.index_store().is_empty().await); Ok(()) @@ -589,8 +582,8 @@ async fn permissions_checks_remove_ns() { // Define the expected results for the check statement when the test statement succeeded and when it failed let check_results = [ - vec!["{ accesses: { }, namespaces: { }, users: { } }"], - vec!["{ accesses: { }, namespaces: { NS: 'DEFINE NAMESPACE NS' }, users: { } }"], + vec!["{ accesses: { }, namespaces: { }, nodes: { }, users: { } }"], + vec!["{ accesses: { }, namespaces: { NS: 'DEFINE NAMESPACE NS' }, nodes: { }, users: { } }"], ]; let test_cases = [ @@ -757,8 +750,8 @@ async fn permissions_checks_remove_root_access() { // Define the expected results for the check statement when the test statement succeeded and when it failed let check_results = [ - vec!["{ accesses: { }, namespaces: { }, users: { } }"], - vec!["{ accesses: { access: \"DEFINE ACCESS access ON ROOT TYPE JWT ALGORITHM HS512 KEY '[REDACTED]' WITH ISSUER KEY '[REDACTED]' DURATION FOR TOKEN 1h, FOR SESSION NONE\" }, namespaces: { }, users: { } }"], + vec!["{ accesses: { }, namespaces: { }, nodes: { }, users: { } }"], + vec!["{ accesses: { access: \"DEFINE ACCESS access ON ROOT TYPE JWT ALGORITHM HS512 KEY '[REDACTED]' WITH ISSUER KEY '[REDACTED]' DURATION FOR TOKEN 1h, FOR SESSION NONE\" }, namespaces: { }, nodes: { }, users: { } }"], ]; let test_cases = [ @@ -883,8 +876,8 @@ async fn permissions_checks_remove_root_user() { // Define the expected results for the check statement when the test statement succeeded and when it failed let check_results = [ - vec!["{ accesses: { }, namespaces: { }, users: { } }"], - vec!["{ accesses: { }, namespaces: { }, users: { user: \"DEFINE USER user ON ROOT PASSHASH 'secret' ROLES VIEWER DURATION FOR TOKEN 1h, FOR SESSION NONE\" } }"], + vec!["{ accesses: { }, namespaces: { }, nodes: { }, users: { } }"], + vec!["{ accesses: { }, namespaces: { }, nodes: { }, users: { user: \"DEFINE USER user ON ROOT PASSHASH 'secret' ROLES VIEWER DURATION FOR TOKEN 1h, FOR SESSION NONE\" } }"], ]; let test_cases = [ diff --git a/lib/tests/strict.rs b/lib/tests/strict.rs index cc4fbf1c..5d2ac9af 100644 --- a/lib/tests/strict.rs +++ b/lib/tests/strict.rs @@ -235,6 +235,7 @@ async fn loose_mode_all_ok() -> Result<(), Error> { "{ accesses: {}, namespaces: { test: 'DEFINE NAMESPACE test' }, + nodes: {}, users: {}, }", ); diff --git a/lib/tests/util.rs b/lib/tests/util.rs index 0d529b2e..97519bc9 100644 --- a/lib/tests/util.rs +++ b/lib/tests/util.rs @@ -9,7 +9,7 @@ macro_rules! assert_empty_val { #[allow(unused_macros)] macro_rules! assert_empty_prefix { ($tx:expr, $rng:expr) => {{ - let r = $tx.getp($rng, 1).await?; + let r = $tx.getp($rng).await?; assert!(r.is_empty()); }}; } @@ -17,7 +17,7 @@ macro_rules! assert_empty_prefix { #[allow(unused_macros)] macro_rules! assert_empty_range { ($tx:expr, $rng:expr) => {{ - let r = $tx.getr($rng, 1).await?; + let r = $tx.getr($rng).await?; assert!(r.is_empty()); }}; } diff --git a/src/cli/mod.rs b/src/cli/mod.rs index fc0a6da6..4dcf652b 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -15,7 +15,9 @@ mod version; mod version_client; use crate::cli::version_client::VersionClient; -use crate::cnf::{DEBUG_BUILD_WARNING, LOGO, PKG_VERSION}; +#[cfg(debug_assertions)] +use crate::cnf::DEBUG_BUILD_WARNING; +use crate::cnf::{LOGO, PKG_VERSION}; use crate::env::RELEASE; use clap::{Parser, Subcommand}; pub use config::CF; diff --git a/src/cli/start.rs b/src/cli/start.rs index 76d26b01..941d7c0b 100644 --- a/src/cli/start.rs +++ b/src/cli/start.rs @@ -9,7 +9,7 @@ use crate::env; use crate::err::Error; use crate::net::{self, client_ip::ClientIp}; use clap::Args; -use opentelemetry::Context as TelemetryContext; +use opentelemetry::Context; use std::net::SocketAddr; use std::path::PathBuf; use std::time::Duration; @@ -153,12 +153,10 @@ pub async fn init( // Initialize opentelemetry and logging crate::telemetry::builder().with_filter(log).init(); // Start metrics subsystem - crate::telemetry::metrics::init(&TelemetryContext::current()) - .expect("failed to initialize metrics"); + crate::telemetry::metrics::init(&Context::current()).expect("failed to initialize metrics"); - // Check if a banner should be outputted + // Check if we should output a banner if !no_banner { - // Output SurrealDB logo println!("{LOGO}"); } // Clean the path @@ -168,7 +166,13 @@ pub async fn init( } else { endpoint.path }; - // Setup the cli options + // Extract the certificate and key + let (crt, key) = if let Some(val) = web { + (val.web_crt, val.web_key) + } else { + (None, None) + }; + // Setup the command-line options let _ = config::CF.set(Config { bind: listen_addresses.first().cloned().unwrap(), client_ip, @@ -176,9 +180,9 @@ pub async fn init( user, pass, no_identification_headers, - crt: web.as_ref().and_then(|x| x.web_crt.clone()), - key: web.as_ref().and_then(|x| x.web_key.clone()), engine: Some(EngineOptions::default().with_tick_interval(tick_interval)), + crt, + key, }); // This is the cancellation token propagated down to // all the async functions that needs to be stopped gracefully. diff --git a/src/cli/validator/parser/env_filter.rs b/src/cli/validator/parser/env_filter.rs index f13f8151..bb33ffd3 100644 --- a/src/cli/validator/parser/env_filter.rs +++ b/src/cli/validator/parser/env_filter.rs @@ -1,9 +1,8 @@ +use crate::telemetry::filter_from_value; use clap::builder::{NonEmptyStringValueParser, PossibleValue, TypedValueParser}; use clap::error::{ContextKind, ContextValue, ErrorKind}; use tracing_subscriber::EnvFilter; -use crate::telemetry::filter_from_value; - #[derive(Debug)] pub struct CustomEnvFilter(pub EnvFilter); diff --git a/src/cnf/mod.rs b/src/cnf/mod.rs index a7289bf8..0f761504 100644 --- a/src/cnf/mod.rs +++ b/src/cnf/mod.rs @@ -14,6 +14,7 @@ Y88b d88P Y88b 888 888 888 Y8b. 888 888 888 888 .d88P 888 d88P "; +#[cfg(debug_assertions)] pub const DEBUG_BUILD_WARNING: &str = "\ ┌─────────────────────────────────────────────────────────────────────────────┐ │ !!! THIS IS A DEBUG BUILD !!! │ diff --git a/src/dbs/mod.rs b/src/dbs/mod.rs index a6bbbddc..2303a88e 100644 --- a/src/dbs/mod.rs +++ b/src/dbs/mod.rs @@ -29,7 +29,7 @@ pub struct StartCommandDbsOptions { unauthenticated: bool, #[command(flatten)] #[command(next_help_heading = "Capabilities")] - caps: DbsCapabilities, + capabilities: DbsCapabilities, #[arg(help = "Sets the directory for storing temporary database files")] #[arg(env = "SURREAL_TEMPORARY_DIRECTORY", long = "temporary-directory")] #[arg(value_parser = super::cli::validator::dir_exists)] @@ -208,12 +208,14 @@ pub async fn init( query_timeout, transaction_timeout, unauthenticated, - caps, + capabilities, temporary_directory, }: StartCommandDbsOptions, ) -> Result<(), Error> { // Get local copy of options let opt = CF.get().unwrap(); + // Convert the capabilities + let capabilities = capabilities.into(); // Log specified strict mode debug!("Database strict mode is {strict_mode}"); // Log specified query timeout @@ -228,41 +230,26 @@ pub async fn init( if unauthenticated { warn!("❌🔒 IMPORTANT: Authentication is disabled. This is not recommended for production use. 🔒❌"); } - - let caps = caps.into(); - debug!("Server capabilities: {caps}"); - - #[allow(unused_mut)] + // Log the specified server capabilities + debug!("Server capabilities: {capabilities}"); // Parse and setup the desired kv datastore - let mut dbs = Datastore::new(&opt.path) + let dbs = Datastore::new(&opt.path) .await? .with_notifications() .with_strict_mode(strict_mode) .with_query_timeout(query_timeout) .with_transaction_timeout(transaction_timeout) .with_auth_enabled(!unauthenticated) - .with_capabilities(caps); - - let mut dbs = match temporary_directory { - Some(tmp_dir) => dbs.with_temporary_directory(tmp_dir), - _ => dbs, - }; - - if let Some(engine_options) = opt.engine { - dbs = dbs.with_engine_options(engine_options); + .with_temporary_directory(temporary_directory) + .with_capabilities(capabilities); + // Setup initial server auth credentials + if let (Some(user), Some(pass)) = (opt.user.as_ref(), opt.pass.as_ref()) { + dbs.setup_initial_creds(user, pass).await?; } - // Make immutable - let dbs = dbs; - + // Bootstrap the datastore dbs.bootstrap().await?; - - if let Some(user) = opt.user.as_ref() { - dbs.setup_initial_creds(user, opt.pass.as_ref().unwrap()).await?; - } - // Store database instance let _ = DB.set(Arc::new(dbs)); - // All ok Ok(()) } @@ -312,7 +299,8 @@ mod tests { .get_root_user(creds.username) .await .unwrap() - .hash; + .hash + .clone(); ds.setup_initial_creds(creds.username, creds.password).await.unwrap(); assert_eq!( @@ -324,6 +312,7 @@ mod tests { .await .unwrap() .hash + .clone() ) } diff --git a/src/mac/mod.rs b/src/mac/mod.rs index de1257c0..200a18c4 100644 --- a/src/mac/mod.rs +++ b/src/mac/mod.rs @@ -1,8 +1,9 @@ +/// Creates a new b-tree map of key-value pairs macro_rules! map { - ($($k:expr => $v:expr),* $(,)? $( => $x:expr )?) => {{ + ($($k:expr $(, if let $grant:pat = $check:expr)? $(, if $guard:expr)? => $v:expr),* $(,)? $( => $x:expr )?) => {{ let mut m = ::std::collections::BTreeMap::new(); - $(m.extend($x.iter().map(|(k, v)| (k.clone(), v.clone())));)? - $(m.insert($k, $v);)+ + $(m.extend($x.iter().map(|(k, v)| (k.clone(), v.clone())));)? + $( $(if let $grant = $check)? $(if $guard)? { m.insert($k, $v); };)+ m }}; } diff --git a/src/net/health.rs b/src/net/health.rs index df6e13c6..9182bc7f 100644 --- a/src/net/health.rs +++ b/src/net/health.rs @@ -22,12 +22,24 @@ async fn handler() -> impl IntoResponse { // The transaction failed to start Err(_) => Err(Error::InvalidStorage), // The transaction was successful - Ok(mut tx) => { + Ok(tx) => { // Cancel the transaction trace!("Health endpoint cancelling transaction"); - let _ = tx.cancel().await; - // Return the response - Ok(()) + // Attempt to fetch data + match tx.get(vec![0x00]).await { + Err(_) => { + // Ensure the transaction is cancelled + let _ = tx.cancel().await; + // Return an error for this endpoint + Err(Error::InvalidStorage) + } + Ok(_) => { + // Ensure the transaction is cancelled + let _ = tx.cancel().await; + // Return success for this endpoint + Ok(()) + } + } } } } diff --git a/src/net/ml.rs b/src/net/ml.rs index c86b1983..411ed95b 100644 --- a/src/net/ml.rs +++ b/src/net/ml.rs @@ -102,7 +102,7 @@ async fn export( // Check the permissions level db.check(&session, View, Model.on_db(&nsv, &dbv))?; // Start a new readonly transaction - let mut tx = db.transaction(Read, Optimistic).await?; + let tx = db.transaction(Read, Optimistic).await?; // Attempt to get the model definition let info = tx.get_db_model(&nsv, &dbv, &name, &version).await?; // Calculate the path of the model file diff --git a/src/rpc/connection.rs b/src/rpc/connection.rs index 348733ce..fff75afb 100644 --- a/src/rpc/connection.rs +++ b/src/rpc/connection.rs @@ -71,23 +71,22 @@ impl Connection { /// Serve the RPC endpoint pub async fn serve(rpc: Arc>, ws: WebSocket) { - // Get the WebSocket ID + // Get the RPC lock let rpc_lock = rpc.read().await; - // Get the WebSocket ID + // Get the WebSocket id let id = rpc_lock.id; + // Get the WebSocket state let state = rpc_lock.state.clone(); - + // Log the succesful WebSocket connection + trace!("WebSocket {} connected", id); // Split the socket into sending and receiving streams let (sender, receiver) = ws.split(); // Create an internal channel for sending and receiving let internal_sender = rpc_lock.channels.0.clone(); let internal_receiver = rpc_lock.channels.1.clone(); - - // drop the lock early so rpc is free to be written to. + // Drop the lock early so rpc is free to be written to. std::mem::drop(rpc_lock); - trace!("WebSocket {} connected", id); - if let Err(err) = telemetry::metrics::ws::on_connect() { error!("Error running metrics::ws::on_connect hook: {}", err); } @@ -126,9 +125,8 @@ impl Connection { true }); - // Garbage collect queries - if let Err(e) = DB.get().unwrap().garbage_collect_dead_session(gc.as_slice()).await { - error!("Failed to garbage collect dead sessions: {:?}", e); + if let Err(err) = DB.get().unwrap().delete_queries(gc).await { + error!("Error handling RPC connection: {}", err); } if let Err(err) = telemetry::metrics::ws::on_disconnect() { diff --git a/src/telemetry/logs/mod.rs b/src/telemetry/logs/mod.rs index 95eeb027..7ede22ae 100644 --- a/src/telemetry/logs/mod.rs +++ b/src/telemetry/logs/mod.rs @@ -1,18 +1,36 @@ +use crate::cli::validator::parser::env_filter::CustomEnvFilter; use tracing::Subscriber; use tracing_subscriber::fmt::format::FmtSpan; use tracing_subscriber::Layer; -use crate::cli::validator::parser::env_filter::CustomEnvFilter; - pub fn new(filter: CustomEnvFilter) -> Box + Send + Sync> where S: Subscriber + for<'a> tracing_subscriber::registry::LookupSpan<'a> + Send + Sync, { - tracing_subscriber::fmt::layer() - .compact() - .with_ansi(true) - .with_span_events(FmtSpan::NONE) - .with_writer(std::io::stderr) - .with_filter(filter.0) - .boxed() + #[cfg(not(debug_assertions))] + { + tracing_subscriber::fmt::layer() + .compact() + .with_ansi(true) + .with_target(true) + .with_span_events(FmtSpan::NONE) + .with_writer(std::io::stderr) + .with_filter(filter.0) + .boxed() + } + #[cfg(debug_assertions)] + { + tracing_subscriber::fmt::layer() + .compact() + .with_ansi(true) + .with_file(true) + .with_target(true) + .with_line_number(true) + .with_thread_ids(false) + .with_thread_names(false) + .with_span_events(FmtSpan::NONE) + .with_writer(std::io::stderr) + .with_filter(filter.0) + .boxed() + } } diff --git a/src/telemetry/mod.rs b/src/telemetry/mod.rs index 7e55c39a..a03ccc10 100644 --- a/src/telemetry/mod.rs +++ b/src/telemetry/mod.rs @@ -2,8 +2,6 @@ mod logs; pub mod metrics; pub mod traces; -use std::time::Duration; - use crate::cli::validator::parser::env_filter::CustomEnvFilter; use once_cell::sync::Lazy; use opentelemetry::metrics::MetricsError; @@ -11,8 +9,10 @@ use opentelemetry::sdk::resource::{ EnvResourceDetector, SdkProvidedResourceDetector, TelemetryResourceDetector, }; use opentelemetry::sdk::Resource; -use opentelemetry::{Context as TelemetryContext, KeyValue}; +use opentelemetry::{Context, KeyValue}; +use std::time::Duration; use tracing::{Level, Subscriber}; +use tracing_subscriber::filter::ParseError; use tracing_subscriber::prelude::*; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; @@ -72,14 +72,13 @@ impl Builder { /// Build a tracing dispatcher with the fmt subscriber (logs) and the chosen tracer subscriber pub fn build(self) -> Box { + // Setup a registry for composing layers let registry = tracing_subscriber::registry(); - // Setup logging layer let registry = registry.with(logs::new(self.filter.clone())); - // Setup tracing layer let registry = registry.with(traces::new(self.filter)); - + // Return the registry Box::new(registry) } @@ -90,25 +89,33 @@ impl Builder { } pub fn shutdown() -> Result<(), MetricsError> { - // Flush all telemetry data + // Flush all telemetry data and block until done opentelemetry::global::shutdown_tracer_provider(); - metrics::shutdown(&TelemetryContext::current())?; - - Ok(()) + // Shutdown the metrics provider fully + metrics::shutdown(&Context::current()) } /// Create an EnvFilter from the given value. If the value is not a valid log level, it will be treated as EnvFilter directives. -pub fn filter_from_value(v: &str) -> Result { +pub fn filter_from_value(v: &str) -> Result { match v { // Don't show any logs at all "none" => Ok(EnvFilter::default()), - // Check if we should show all log levels - "full" => Ok(EnvFilter::default().add_directive(Level::TRACE.into())), - // Otherwise, let's only show errors + // Otherwise, let's show only errors "error" => Ok(EnvFilter::default().add_directive(Level::ERROR.into())), + // Otherwise, let's show warnings and above + "warn" => Ok(EnvFilter::default().add_directive(Level::WARN.into())), + // Otherwise, let's show info and above + "info" => Ok(EnvFilter::default().add_directive(Level::INFO.into())), + // Otherwise, let's show debugs and above + "debug" => EnvFilter::builder() + .parse("warn,surreal=debug,surrealdb=debug,surrealdb::core::kvs=info"), // Specify the log level for each code area - "warn" | "info" | "debug" | "trace" => EnvFilter::builder() - .parse(format!("error,surreal={v},surrealdb={v},surrealdb::core::kvs::tx=error")), + "trace" => EnvFilter::builder() + .parse("warn,surreal=trace,surrealdb=trace,surrealdb::core::kvs=info"), + // Check if we should show all surreal logs + "full" => EnvFilter::builder().parse("debug,surreal=trace,surrealdb=trace"), + // Check if we should show all module logs + "all" => Ok(EnvFilter::default().add_directive(Level::TRACE.into())), // Let's try to parse the custom log level _ => EnvFilter::builder().parse(v), } diff --git a/supply-chain/audits.toml b/supply-chain/audits.toml index a430e01a..b4c5fbec 100644 --- a/supply-chain/audits.toml +++ b/supply-chain/audits.toml @@ -100,6 +100,12 @@ user-id = 3987 # Rushmore Mushambi (rushmorem) start = "2023-08-29" end = "2025-01-24" +[[trusted.surrealdb-tikv-client]] +criteria = "safe-to-deploy" +user-id = 217605 # Yusuke Kuoka (mumoshu) +start = "2023-06-19" +end = "2025-07-08" + [[trusted.surrealkv]] criteria = "safe-to-deploy" user-id = 145457 # Tobie Morgan Hitchcock (tobiemh) diff --git a/supply-chain/config.toml b/supply-chain/config.toml index eb72dc4e..02c53a91 100644 --- a/supply-chain/config.toml +++ b/supply-chain/config.toml @@ -616,19 +616,19 @@ version = "0.11.0" criteria = "safe-to-deploy" [[exemptions.foundationdb]] -version = "0.8.0" +version = "0.9.0" criteria = "safe-to-deploy" [[exemptions.foundationdb-gen]] -version = "0.8.0" +version = "0.9.0" criteria = "safe-to-deploy" [[exemptions.foundationdb-macros]] -version = "0.2.0" +version = "0.3.0" criteria = "safe-to-deploy" [[exemptions.foundationdb-sys]] -version = "0.8.0" +version = "0.9.0" criteria = "safe-to-deploy" [[exemptions.fst]] @@ -647,10 +647,6 @@ criteria = "safe-to-deploy" version = "0.3.30" criteria = "safe-to-deploy" -[[exemptions.futures-concurrency]] -version = "7.5.0" -criteria = "safe-to-deploy" - [[exemptions.futures-executor]] version = "0.3.30" criteria = "safe-to-deploy" @@ -1319,10 +1315,6 @@ criteria = "safe-to-deploy" version = "0.26.0" criteria = "safe-to-deploy" -[[exemptions.quick_cache]] -version = "0.4.2" -criteria = "safe-to-deploy" - [[exemptions.radium]] version = "0.7.0" criteria = "safe-to-deploy" @@ -1604,7 +1596,7 @@ version = "0.6.0" criteria = "safe-to-deploy" [[exemptions.serde_bytes]] -version = "0.11.14" +version = "0.11.15" criteria = "safe-to-deploy" [[exemptions.serde_html_form]] @@ -1743,10 +1735,6 @@ criteria = "safe-to-deploy" version = "0.11.1" criteria = "safe-to-deploy" -[[exemptions.surrealdb-tikv-client]] -version = "0.2.0-surreal.2" -criteria = "safe-to-deploy" - [[exemptions.symbolic-common]] version = "12.8.0" criteria = "safe-to-deploy" @@ -1819,12 +1807,16 @@ criteria = "safe-to-run" version = "0.2.15" criteria = "safe-to-run" -[[exemptions.time]] -version = "0.3.36" +[[exemptions.thiserror]] +version = "1.0.61" criteria = "safe-to-deploy" -[[exemptions.time-macros]] -version = "0.2.17" +[[exemptions.thiserror-impl]] +version = "1.0.61" +criteria = "safe-to-deploy" + +[[exemptions.time]] +version = "0.3.36" criteria = "safe-to-deploy" [[exemptions.tiny-keccak]] @@ -1836,7 +1828,7 @@ version = "1.2.1" criteria = "safe-to-deploy" [[exemptions.tokio]] -version = "1.37.0" +version = "1.38.0" criteria = "safe-to-deploy" [[exemptions.tokio-io-timeout]] @@ -1844,7 +1836,7 @@ version = "1.2.0" criteria = "safe-to-deploy" [[exemptions.tokio-macros]] -version = "2.2.0" +version = "2.3.0" criteria = "safe-to-deploy" [[exemptions.tokio-rustls]] @@ -1884,7 +1876,7 @@ version = "0.8.3" criteria = "safe-to-deploy" [[exemptions.tonic]] -version = "0.9.2" +version = "0.10.2" criteria = "safe-to-deploy" [[exemptions.tower]] diff --git a/supply-chain/imports.lock b/supply-chain/imports.lock index 91daa939..1a3e3978 100644 --- a/supply-chain/imports.lock +++ b/supply-chain/imports.lock @@ -3,7 +3,7 @@ [[unpublished.surrealdb]] version = "2.0.0" -audited_as = "1.5.2" +audited_as = "1.5.4" [[unpublished.surrealdb-core]] version = "2.0.0" @@ -66,8 +66,8 @@ user-login = "tobiemh" user-name = "Tobie Morgan Hitchcock" [[publisher.echodb]] -version = "0.6.0" -when = "2024-04-05" +version = "0.7.0" +when = "2024-06-10" user-id = 145457 user-login = "tobiemh" user-name = "Tobie Morgan Hitchcock" @@ -87,8 +87,8 @@ user-login = "Amanieu" user-name = "Amanieu d'Antras" [[publisher.indxdb]] -version = "0.4.0" -when = "2023-06-13" +version = "0.5.0" +when = "2024-06-10" user-id = 145457 user-login = "tobiemh" user-name = "Tobie Morgan Hitchcock" @@ -108,8 +108,8 @@ user-login = "rushmorem" user-name = "Rushmore Mushambi" [[publisher.revision]] -version = "0.7.0" -when = "2024-04-17" +version = "0.7.1" +when = "2024-06-19" user-id = 145457 user-login = "tobiemh" user-name = "Tobie Morgan Hitchcock" @@ -129,8 +129,8 @@ user-login = "tobiemh" user-name = "Tobie Morgan Hitchcock" [[publisher.surrealdb]] -version = "1.5.2" -when = "2024-06-06" +version = "1.5.4" +when = "2024-07-10" user-id = 145457 user-login = "tobiemh" user-name = "Tobie Morgan Hitchcock" @@ -156,9 +156,16 @@ user-id = 3987 user-login = "rushmorem" user-name = "Rushmore Mushambi" +[[publisher.surrealdb-tikv-client]] +version = "0.3.0-surreal.1" +when = "2024-06-24" +user-id = 217605 +user-login = "mumoshu" +user-name = "Yusuke Kuoka" + [[publisher.surrealkv]] -version = "0.1.5" -when = "2024-04-24" +version = "0.3.0" +when = "2024-07-04" user-id = 145457 user-login = "tobiemh" user-name = "Tobie Morgan Hitchcock" @@ -199,8 +206,8 @@ user-login = "Manishearth" user-name = "Manish Goregaokar" [[publisher.vart]] -version = "0.2.1" -when = "2024-04-24" +version = "0.4.0" +when = "2024-07-04" user-id = 145457 user-login = "tobiemh" user-name = "Tobie Morgan Hitchcock" @@ -610,18 +617,6 @@ criteria = "safe-to-deploy" version = "1.0.1" notes = "No unsafe usage or ambient capabilities" -[[audits.embark-studios.audits.thiserror]] -who = "Johan Andersson " -criteria = "safe-to-deploy" -version = "1.0.40" -notes = "Wrapper over implementation crate, found no unsafe or ambient capabilities used" - -[[audits.embark-studios.audits.thiserror-impl]] -who = "Johan Andersson " -criteria = "safe-to-deploy" -version = "1.0.40" -notes = "Found no unsafe or ambient capabilities used" - [[audits.embark-studios.audits.utf8parse]] who = "Johan Andersson " criteria = "safe-to-deploy" @@ -968,6 +963,13 @@ Previously reviewed during security review and the audit is grandparented in. """ aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT" +[[audits.google.audits.take_mut]] +who = "David Koloski " +criteria = "safe-to-deploy" +version = "0.2.2" +notes = "Reviewed on https://fxrev.dev/883543" +aggregated-from = "https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/third_party/rust_crates/supply-chain/audits.toml?format=TEXT" + [[audits.google.audits.termcolor]] who = "danakj@chromium.org" criteria = "safe-to-run" @@ -1117,16 +1119,6 @@ who = "Ameer Ghani " criteria = "safe-to-deploy" version = "1.12.1" -[[audits.isrg.audits.thiserror]] -who = "Brandon Pitman " -criteria = "safe-to-deploy" -delta = "1.0.40 -> 1.0.43" - -[[audits.isrg.audits.thiserror-impl]] -who = "Brandon Pitman " -criteria = "safe-to-deploy" -delta = "1.0.40 -> 1.0.43" - [[audits.isrg.audits.untrusted]] who = "David Cook " criteria = "safe-to-deploy" @@ -1653,6 +1645,24 @@ criteria = "safe-to-deploy" delta = "0.1.0 -> 0.1.1" aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml" +[[audits.mozilla.audits.time-macros]] +who = "Kershaw Chang " +criteria = "safe-to-deploy" +version = "0.2.6" +aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml" + +[[audits.mozilla.audits.time-macros]] +who = "Kershaw Chang " +criteria = "safe-to-deploy" +delta = "0.2.6 -> 0.2.10" +aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml" + +[[audits.mozilla.audits.time-macros]] +who = "Alex Franchuk " +criteria = "safe-to-deploy" +delta = "0.2.10 -> 0.2.18" +aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml" + [[audits.mozilla.audits.unicode-bidi]] who = "Makoto Kato " criteria = "safe-to-deploy" @@ -1915,72 +1925,6 @@ criteria = "safe-to-deploy" delta = "2.1.0 -> 2.2.0" aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" -[[audits.zcash.audits.thiserror]] -who = "Jack Grigg " -criteria = "safe-to-deploy" -delta = "1.0.43 -> 1.0.48" -aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" - -[[audits.zcash.audits.thiserror]] -who = "Jack Grigg " -criteria = "safe-to-deploy" -delta = "1.0.48 -> 1.0.51" -aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" - -[[audits.zcash.audits.thiserror]] -who = "Jack Grigg " -criteria = "safe-to-deploy" -delta = "1.0.51 -> 1.0.52" -notes = "Reruns the build script if the `RUSTC_BOOTSTRAP` env variable changes." -aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" - -[[audits.zcash.audits.thiserror]] -who = "Jack Grigg " -criteria = "safe-to-deploy" -delta = "1.0.52 -> 1.0.56" -notes = """ -Build script changes are to refactor the existing probe into a separate file -(which removes a filesystem write), and adjust how it gets rerun in response to -changes in the build environment. -""" -aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" - -[[audits.zcash.audits.thiserror]] -who = "Daira-Emma Hopwood " -criteria = "safe-to-deploy" -delta = "1.0.56 -> 1.0.58" -aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" - -[[audits.zcash.audits.thiserror-impl]] -who = "Jack Grigg " -criteria = "safe-to-deploy" -delta = "1.0.43 -> 1.0.48" -aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" - -[[audits.zcash.audits.thiserror-impl]] -who = "Jack Grigg " -criteria = "safe-to-deploy" -delta = "1.0.48 -> 1.0.51" -aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" - -[[audits.zcash.audits.thiserror-impl]] -who = "Jack Grigg " -criteria = "safe-to-deploy" -delta = "1.0.51 -> 1.0.52" -aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" - -[[audits.zcash.audits.thiserror-impl]] -who = "Jack Grigg " -criteria = "safe-to-deploy" -delta = "1.0.52 -> 1.0.56" -aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" - -[[audits.zcash.audits.thiserror-impl]] -who = "Daira-Emma Hopwood " -criteria = "safe-to-deploy" -delta = "1.0.56 -> 1.0.58" -aggregated-from = "https://raw.githubusercontent.com/zcash/zcash/master/qa/supply-chain/audits.toml" - [[audits.zcash.audits.thread_local]] who = "Jack Grigg " criteria = "safe-to-deploy" diff --git a/tests/cli_integration.rs b/tests/cli_integration.rs index 116efd7d..5e83bd82 100644 --- a/tests/cli_integration.rs +++ b/tests/cli_integration.rs @@ -66,14 +66,6 @@ mod cli_integration { assert!(common::run("version --turbo").output().is_err()); } - fn debug_builds_contain_debug_message(addr: &str, creds: &str, ns: &Ulid, db: &Ulid) { - info!("* Debug builds contain debug message"); - let args = - format!("sql --conn http://{addr} {creds} --ns {ns} --db {db} --multi --hide-welcome"); - let res = common::run(&args).input("CREATE not_a_table:not_a_record;\n").output().unwrap(); - assert!(res.contains("Debug builds are not intended for production use")); - } - #[test(tokio::test)] async fn all_commands() { // Commands without credentials when auth is disabled, should succeed @@ -89,7 +81,14 @@ mod cli_integration { let db = Ulid::new(); #[cfg(debug_assertions)] - debug_builds_contain_debug_message(&addr, creds, &ns, &db); + { + info!("* Debug builds contain debug message"); + let args = format!( + "sql --conn http://{addr} {creds} --ns {ns} --db {db} --multi --hide-welcome" + ); + let output = common::run(&args).input("CREATE any:any;\n").output().unwrap(); + assert!(output.contains("Debug builds are not intended for production use")); + } info!("* Create a record"); {