mirror of
https://github.com/aljazceru/turso.git
synced 2026-02-07 09:14:26 +01:00
Merge branch 'main' into enc-page-1
This commit is contained in:
16
.github/workflows/rust_perf.yml
vendored
16
.github/workflows/rust_perf.yml
vendored
@@ -11,7 +11,7 @@ env:
|
||||
|
||||
jobs:
|
||||
bench:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: useblacksmith/setup-node@v5
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
# on pull events isn't compatible with this workflow being required to pass branch protection.
|
||||
fail-on-alert: false
|
||||
comment-on-alert: true
|
||||
comment-always: false
|
||||
comment-always: true
|
||||
# Nyrkiö configuration
|
||||
# Get yours from https://nyrkio.com/docs/getting-started
|
||||
nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }}
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
nyrkio-settings-threshold: 0%
|
||||
|
||||
clickbench:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: useblacksmith/setup-node@v5
|
||||
@@ -77,7 +77,7 @@ jobs:
|
||||
# on pull events isn't compatible with this workflow being required to pass branch protection.
|
||||
fail-on-alert: false
|
||||
comment-on-alert: true
|
||||
comment-always: false
|
||||
comment-always: true
|
||||
# Nyrkiö configuration
|
||||
# Get yours from https://nyrkio.com/docs/getting-started
|
||||
nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }}
|
||||
@@ -101,7 +101,7 @@ jobs:
|
||||
nyrkio-public: true
|
||||
|
||||
tpc-h-criterion:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DB_FILE: "perf/tpc-h/TPC-H.db"
|
||||
steps:
|
||||
@@ -138,7 +138,7 @@ jobs:
|
||||
# on pull events isn't compatible with this workflow being required to pass branch protection.
|
||||
fail-on-alert: false
|
||||
comment-on-alert: true
|
||||
comment-always: false
|
||||
comment-always: true
|
||||
# Nyrkiö configuration
|
||||
# Get yours from https://nyrkio.com/docs/getting-started
|
||||
nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }}
|
||||
@@ -155,14 +155,14 @@ jobs:
|
||||
nyrkio-settings-threshold: 0%
|
||||
|
||||
tpc-h:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: TPC-H
|
||||
run: ./perf/tpc-h/benchmark.sh
|
||||
|
||||
vfs-bench-compile:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: useblacksmith/rust-cache@v3
|
||||
|
||||
253
Cargo.lock
generated
253
Cargo.lock
generated
@@ -122,12 +122,6 @@ dependencies = [
|
||||
"rand 0.9.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "android-tzdata"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
|
||||
|
||||
[[package]]
|
||||
name = "android_log-sys"
|
||||
version = "0.3.2"
|
||||
@@ -320,9 +314,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.9.0"
|
||||
version = "2.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
|
||||
checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
@@ -356,7 +350,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "531a9155a481e2ee699d4f98f43c0ca4ff8ee1bfd55c31e9e98fb29d2b176fe0"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"regex-automata 0.4.9",
|
||||
"regex-automata",
|
||||
"serde",
|
||||
]
|
||||
|
||||
@@ -466,11 +460,10 @@ checksum = "18758054972164c3264f7c8386f5fc6da6114cb46b619fd365d4e3b2dc3ae487"
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.40"
|
||||
version = "0.4.42"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c"
|
||||
checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2"
|
||||
dependencies = [
|
||||
"android-tzdata",
|
||||
"iana-time-zone",
|
||||
"js-sys",
|
||||
"num-traits",
|
||||
@@ -518,9 +511,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.32"
|
||||
version = "4.5.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6088f3ae8c3608d19260cd7445411865a485688711b78b5be70d78cd96136f83"
|
||||
checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
@@ -528,9 +521,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.32"
|
||||
version = "4.5.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "22a7ef7f676155edfb82daa97f99441f3ebf4a58d5e32f295a56259f1b6facc8"
|
||||
checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@@ -552,9 +545,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.5.32"
|
||||
version = "4.5.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7"
|
||||
checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
@@ -666,7 +659,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"ctor 0.5.0",
|
||||
"env_logger 0.10.2",
|
||||
"env_logger 0.11.7",
|
||||
"log",
|
||||
"rand 0.9.2",
|
||||
"rand_chacha 0.9.0",
|
||||
@@ -796,7 +789,7 @@ version = "0.28.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"crossterm_winapi",
|
||||
"parking_lot",
|
||||
"rustix 0.38.44",
|
||||
@@ -1120,7 +1113,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0"
|
||||
dependencies = [
|
||||
"log",
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1129,11 +1121,8 @@ version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580"
|
||||
dependencies = [
|
||||
"humantime",
|
||||
"is-terminal",
|
||||
"log",
|
||||
"regex",
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1145,7 +1134,6 @@ dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
"env_filter",
|
||||
"jiff",
|
||||
"log",
|
||||
]
|
||||
|
||||
@@ -1514,7 +1502,7 @@ version = "0.20.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5220b8ba44c68a9a7f7a7659e864dd73692e417ef0211bea133c7b74e031eeb9"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"libc",
|
||||
"libgit2-sys",
|
||||
"log",
|
||||
@@ -1605,12 +1593,6 @@ dependencies = [
|
||||
"itoa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "humantime"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f"
|
||||
|
||||
[[package]]
|
||||
name = "iana-time-zone"
|
||||
version = "0.1.62"
|
||||
@@ -1793,9 +1775,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "2.11.0"
|
||||
version = "2.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9"
|
||||
checksum = "206a8042aec68fa4a62e8d3f7aa4ceb508177d9324faf261e1959e495b7a1921"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.15.2",
|
||||
@@ -1815,7 +1797,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"indexmap 2.11.0",
|
||||
"indexmap 2.11.1",
|
||||
"is-terminal",
|
||||
"itoa",
|
||||
"log",
|
||||
@@ -1832,7 +1814,7 @@ version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"inotify-sys",
|
||||
"libc",
|
||||
]
|
||||
@@ -1861,7 +1843,7 @@ version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c2f96dfbc20c12b9b4f12eef60472d8c29b9c3f29463570dcb47e4a48551168"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
@@ -1928,30 +1910,6 @@ version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||
|
||||
[[package]]
|
||||
name = "jiff"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c102670231191d07d37a35af3eb77f1f0dbf7a71be51a962dcd57ea607be7260"
|
||||
dependencies = [
|
||||
"jiff-static",
|
||||
"log",
|
||||
"portable-atomic",
|
||||
"portable-atomic-util",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jiff-static"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4cdde31a9d349f1b1f51a0b3714a5940ac022976f4b49485fc04be052b183b4c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jni"
|
||||
version = "0.21.1"
|
||||
@@ -2089,7 +2047,7 @@ version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
]
|
||||
@@ -2182,10 +2140,10 @@ dependencies = [
|
||||
"chrono",
|
||||
"clap",
|
||||
"dirs 6.0.0",
|
||||
"env_logger 0.10.2",
|
||||
"env_logger 0.11.7",
|
||||
"garde",
|
||||
"hex",
|
||||
"indexmap 2.11.0",
|
||||
"indexmap 2.11.1",
|
||||
"itertools 0.14.0",
|
||||
"json5",
|
||||
"log",
|
||||
@@ -2194,7 +2152,7 @@ dependencies = [
|
||||
"rand 0.9.2",
|
||||
"rand_chacha 0.9.0",
|
||||
"regex",
|
||||
"regex-syntax 0.8.5",
|
||||
"regex-syntax",
|
||||
"rusqlite",
|
||||
"schemars 1.0.4",
|
||||
"serde",
|
||||
@@ -2276,11 +2234,11 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "matchers"
|
||||
version = "0.1.0"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
|
||||
checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9"
|
||||
dependencies = [
|
||||
"regex-automata 0.1.10",
|
||||
"regex-automata",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2399,7 +2357,7 @@ version = "3.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96671d5c84cee3ae4cab96386b9f953b22569ece9677b9fdd1492550a165eca5"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"ctor 0.4.2",
|
||||
"napi-build",
|
||||
"napi-sys",
|
||||
@@ -2475,7 +2433,7 @@ version = "0.29.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"cfg-if",
|
||||
"cfg_aliases",
|
||||
"libc",
|
||||
@@ -2493,7 +2451,7 @@ version = "8.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fee8403b3d66ac7b26aee6e40a897d85dc5ce26f44da36b8b73e987cc52e943"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"filetime",
|
||||
"fsevent-sys",
|
||||
"inotify",
|
||||
@@ -2512,16 +2470,6 @@ version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e0826a989adedc2a244799e823aece04662b66609d96af8dff7ac6df9a8925d"
|
||||
|
||||
[[package]]
|
||||
name = "nu-ansi-term"
|
||||
version = "0.46.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
|
||||
dependencies = [
|
||||
"overload",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nu-ansi-term"
|
||||
version = "0.50.1"
|
||||
@@ -2588,7 +2536,7 @@ version = "6.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "336b9c63443aceef14bea841b899035ae3abe89b7c486aaf4c5bd8aafedac3f0"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"onig_sys",
|
||||
@@ -2633,12 +2581,6 @@ dependencies = [
|
||||
"log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "overload"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
|
||||
|
||||
[[package]]
|
||||
name = "owo-colors"
|
||||
version = "4.2.0"
|
||||
@@ -2696,7 +2638,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"thiserror 2.0.12",
|
||||
"thiserror 2.0.16",
|
||||
"ucd-trie",
|
||||
]
|
||||
|
||||
@@ -2758,7 +2700,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eac26e981c03a6e53e0aee43c113e3202f5581d5360dae7bd2c70e800dd0451d"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"indexmap 2.11.0",
|
||||
"indexmap 2.11.1",
|
||||
"quick-xml 0.32.0",
|
||||
"serde",
|
||||
"time",
|
||||
@@ -2825,15 +2767,6 @@ version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e"
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic-util"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507"
|
||||
dependencies = [
|
||||
"portable-atomic",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "powerfmt"
|
||||
version = "0.2.0"
|
||||
@@ -3176,7 +3109,7 @@ version = "0.5.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3198,7 +3131,7 @@ checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b"
|
||||
dependencies = [
|
||||
"getrandom 0.2.15",
|
||||
"libredox",
|
||||
"thiserror 2.0.12",
|
||||
"thiserror 2.0.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3229,17 +3162,8 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata 0.4.9",
|
||||
"regex-syntax 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
|
||||
dependencies = [
|
||||
"regex-syntax 0.6.29",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3250,15 +3174,9 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-syntax 0.8.5",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.8.5"
|
||||
@@ -3339,7 +3257,7 @@ version = "0.37.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"fallible-iterator",
|
||||
"fallible-streaming-iterator",
|
||||
"hashlink",
|
||||
@@ -3384,7 +3302,7 @@ version = "0.38.44"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.4.15",
|
||||
@@ -3397,7 +3315,7 @@ version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.9.3",
|
||||
@@ -3416,7 +3334,7 @@ version = "15.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ee1e066dc922e513bda599c6ccb5f3bb2b0ea5870a579448f2622993f0a9a2f"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"cfg-if",
|
||||
"clipboard-win",
|
||||
"fd-lock",
|
||||
@@ -3656,7 +3574,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"garde",
|
||||
"hex",
|
||||
"indexmap 2.11.0",
|
||||
"indexmap 2.11.1",
|
||||
"itertools 0.14.0",
|
||||
"rand 0.9.2",
|
||||
"rand_chacha 0.9.0",
|
||||
@@ -3817,7 +3735,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"onig",
|
||||
"plist",
|
||||
"regex-syntax 0.8.5",
|
||||
"regex-syntax",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
@@ -3845,15 +3763,6 @@ dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "terminal_size"
|
||||
version = "0.4.2"
|
||||
@@ -3913,11 +3822,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "2.0.12"
|
||||
version = "2.0.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
|
||||
checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0"
|
||||
dependencies = [
|
||||
"thiserror-impl 2.0.12",
|
||||
"thiserror-impl 2.0.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3933,9 +3842,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-impl"
|
||||
version = "2.0.12"
|
||||
version = "2.0.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d"
|
||||
checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -4049,7 +3958,7 @@ version = "0.8.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae"
|
||||
dependencies = [
|
||||
"indexmap 2.11.0",
|
||||
"indexmap 2.11.1",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@@ -4071,7 +3980,7 @@ version = "0.22.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e"
|
||||
dependencies = [
|
||||
"indexmap 2.11.0",
|
||||
"indexmap 2.11.1",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@@ -4142,14 +4051,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-subscriber"
|
||||
version = "0.3.19"
|
||||
version = "0.3.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008"
|
||||
checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5"
|
||||
dependencies = [
|
||||
"matchers",
|
||||
"nu-ansi-term 0.46.0",
|
||||
"nu-ansi-term",
|
||||
"once_cell",
|
||||
"regex",
|
||||
"regex-automata",
|
||||
"sharded-slab",
|
||||
"smallvec",
|
||||
"thread_local",
|
||||
@@ -4162,10 +4071,10 @@ dependencies = [
|
||||
name = "turso"
|
||||
version = "0.2.0-pre.3"
|
||||
dependencies = [
|
||||
"rand 0.8.5",
|
||||
"rand_chacha 0.3.1",
|
||||
"rand 0.9.2",
|
||||
"rand_chacha 0.9.0",
|
||||
"tempfile",
|
||||
"thiserror 2.0.12",
|
||||
"thiserror 2.0.16",
|
||||
"tokio",
|
||||
"turso_core",
|
||||
]
|
||||
@@ -4175,7 +4084,7 @@ name = "turso-java"
|
||||
version = "0.2.0-pre.3"
|
||||
dependencies = [
|
||||
"jni",
|
||||
"thiserror 2.0.12",
|
||||
"thiserror 2.0.16",
|
||||
"turso_core",
|
||||
]
|
||||
|
||||
@@ -4191,12 +4100,12 @@ dependencies = [
|
||||
"csv",
|
||||
"ctrlc",
|
||||
"dirs 5.0.1",
|
||||
"env_logger 0.10.2",
|
||||
"env_logger 0.11.7",
|
||||
"libc",
|
||||
"limbo_completion",
|
||||
"miette",
|
||||
"mimalloc",
|
||||
"nu-ansi-term 0.50.1",
|
||||
"nu-ansi-term",
|
||||
"rustyline",
|
||||
"schemars 0.8.22",
|
||||
"serde",
|
||||
@@ -4220,7 +4129,7 @@ dependencies = [
|
||||
"aes",
|
||||
"aes-gcm",
|
||||
"antithesis_sdk",
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"built",
|
||||
"bytemuck",
|
||||
"cfg_block",
|
||||
@@ -4249,7 +4158,7 @@ dependencies = [
|
||||
"rand 0.8.5",
|
||||
"rand_chacha 0.9.0",
|
||||
"regex",
|
||||
"regex-syntax 0.8.5",
|
||||
"regex-syntax",
|
||||
"rstest",
|
||||
"rusqlite",
|
||||
"rustix 1.0.7",
|
||||
@@ -4260,12 +4169,11 @@ dependencies = [
|
||||
"strum_macros",
|
||||
"tempfile",
|
||||
"test-log",
|
||||
"thiserror 1.0.69",
|
||||
"thiserror 2.0.16",
|
||||
"tracing",
|
||||
"turso_ext",
|
||||
"turso_macros",
|
||||
"turso_parser",
|
||||
"turso_sqlite3_parser",
|
||||
"twox-hash",
|
||||
"uncased",
|
||||
"uuid",
|
||||
@@ -4324,7 +4232,7 @@ dependencies = [
|
||||
name = "turso_parser"
|
||||
version = "0.2.0-pre.3"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"criterion",
|
||||
"fallible-iterator",
|
||||
"miette",
|
||||
@@ -4332,7 +4240,7 @@ dependencies = [
|
||||
"serde",
|
||||
"strum",
|
||||
"strum_macros",
|
||||
"thiserror 1.0.69",
|
||||
"thiserror 2.0.16",
|
||||
"turso_macros",
|
||||
]
|
||||
|
||||
@@ -4353,11 +4261,11 @@ dependencies = [
|
||||
name = "turso_sqlite3_parser"
|
||||
version = "0.2.0-pre.3"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
"cc",
|
||||
"env_logger 0.11.7",
|
||||
"fallible-iterator",
|
||||
"indexmap 2.11.0",
|
||||
"indexmap 2.11.1",
|
||||
"log",
|
||||
"memchr",
|
||||
"miette",
|
||||
@@ -4400,7 +4308,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"thiserror 2.0.12",
|
||||
"thiserror 2.0.16",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
@@ -4773,9 +4681,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "windows-link"
|
||||
version = "0.1.1"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38"
|
||||
checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
@@ -5006,7 +4914,26 @@ version = "0.39.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bitflags 2.9.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "write-throughput"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"futures",
|
||||
"tokio",
|
||||
"tracing-subscriber",
|
||||
"turso",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "write-throughput-sqlite"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"rusqlite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
24
Cargo.toml
24
Cargo.toml
@@ -30,11 +30,11 @@ members = [
|
||||
"sync/engine",
|
||||
"sql_generation",
|
||||
"whopper",
|
||||
"perf/throughput/turso",
|
||||
"perf/throughput/rusqlite",
|
||||
]
|
||||
exclude = [
|
||||
"perf/latency/limbo",
|
||||
"perf/throughput/rusqlite",
|
||||
"perf/throughput/turso"
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -71,10 +71,30 @@ mimalloc = { version = "0.1.47", default-features = false }
|
||||
rusqlite = { version = "0.37.0", features = ["bundled"] }
|
||||
itertools = "0.14.0"
|
||||
rand = "0.9.2"
|
||||
rand_chacha = "0.9.0"
|
||||
tracing = "0.1.41"
|
||||
schemars = "1.0.4"
|
||||
garde = "0.22"
|
||||
parking_lot = "0.12.4"
|
||||
tokio = { version = "1.0", default-features = false }
|
||||
tracing-subscriber = "0.3.20"
|
||||
futures = "0.3"
|
||||
clap = "4.5.47"
|
||||
thiserror = "2.0.16"
|
||||
tempfile = "3.20.0"
|
||||
indexmap = "2.11.1"
|
||||
miette = "7.6.0"
|
||||
bitflags = "2.9.4"
|
||||
fallible-iterator = "0.3.0"
|
||||
criterion = "0.5"
|
||||
chrono = { version = "0.4.42", default-features = false }
|
||||
hex = "0.4"
|
||||
antithesis_sdk = "0.2"
|
||||
cfg-if = "1.0.0"
|
||||
tracing-appender = "0.2.3"
|
||||
env_logger = { version = "0.11.6", default-features = false }
|
||||
regex = "1.11.1"
|
||||
regex-syntax = { version = "0.8.5", default-features = false }
|
||||
|
||||
[profile.release]
|
||||
debug = "line-tables-only"
|
||||
|
||||
@@ -23,6 +23,7 @@ COPY ./extensions ./extensions/
|
||||
COPY ./macros ./macros/
|
||||
COPY ./packages ./packages/
|
||||
COPY ./parser ./parser/
|
||||
COPY ./perf/throughput/turso ./perf/throughput/turso/
|
||||
COPY ./simulator ./simulator/
|
||||
COPY ./sql_generation ./sql_generation
|
||||
COPY ./sqlite3 ./sqlite3/
|
||||
@@ -63,6 +64,7 @@ COPY --from=planner /app/extensions ./extensions/
|
||||
COPY --from=planner /app/macros ./macros/
|
||||
COPY --from=planner /app/packages ./packages/
|
||||
COPY --from=planner /app/parser ./parser/
|
||||
COPY --from=planner /perf/throughput/turso ./perf/throughput/turso/
|
||||
COPY --from=planner /app/simulator ./simulator/
|
||||
COPY --from=planner /app/sql_generation ./sql_generation
|
||||
COPY --from=planner /app/sqlite3 ./sqlite3/
|
||||
|
||||
@@ -14,6 +14,6 @@ crate-type = ["cdylib"]
|
||||
path = "rs_src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
turso_core = { path = "../../core", features = ["io_uring"] }
|
||||
turso_core = { workspace = true, features = ["io_uring"] }
|
||||
jni = "0.21.1"
|
||||
thiserror = "2.0.9"
|
||||
thiserror = { workspace = true }
|
||||
|
||||
@@ -14,7 +14,7 @@ crate-type = ["cdylib", "lib"]
|
||||
turso_core = { workspace = true }
|
||||
napi = { version = "3.1.3", default-features = false, features = ["napi6"] }
|
||||
napi-derive = { version = "3.1.1", default-features = true }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
tracing.workspace = true
|
||||
|
||||
[features]
|
||||
|
||||
@@ -81,8 +81,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-android-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-android-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -97,8 +97,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-android-arm-eabi')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-android-arm-eabi/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -117,8 +117,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-win32-x64-msvc')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-win32-x64-msvc/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -133,8 +133,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-win32-ia32-msvc')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-win32-ia32-msvc/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -149,8 +149,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-win32-arm64-msvc')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-win32-arm64-msvc/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -168,8 +168,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-darwin-universal')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-darwin-universal/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -184,8 +184,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-darwin-x64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-darwin-x64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -200,8 +200,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-darwin-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-darwin-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -220,8 +220,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-freebsd-x64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-freebsd-x64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -236,8 +236,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-freebsd-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-freebsd-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -257,8 +257,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-x64-musl')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-x64-musl/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -273,8 +273,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-x64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-x64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -291,8 +291,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-arm64-musl')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-arm64-musl/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -307,8 +307,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-arm64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-arm64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -325,8 +325,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-arm-musleabihf')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-arm-musleabihf/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -341,8 +341,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-arm-gnueabihf')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-arm-gnueabihf/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -359,8 +359,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-riscv64-musl')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-riscv64-musl/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -375,8 +375,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-riscv64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-riscv64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -392,8 +392,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-ppc64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-ppc64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -408,8 +408,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-s390x-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-s390x-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -428,8 +428,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-openharmony-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-openharmony-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -444,8 +444,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-openharmony-x64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-openharmony-x64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -460,8 +460,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-openharmony-arm')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-openharmony-arm/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
|
||||
@@ -146,9 +146,9 @@ impl IO for Opfs {
|
||||
if result >= 0 {
|
||||
Ok(Arc::new(OpfsFile { handle: result }))
|
||||
} else if result == -404 {
|
||||
Err(turso_core::LimboError::InternalError(
|
||||
"files must be created in advance for OPFS IO".to_string(),
|
||||
))
|
||||
Err(turso_core::LimboError::InternalError(format!(
|
||||
"unexpected path {path}: files must be created in advance for OPFS IO"
|
||||
)))
|
||||
} else {
|
||||
Err(turso_core::LimboError::InternalError(format!(
|
||||
"unexpected file lookup error: {result}"
|
||||
|
||||
@@ -17,7 +17,7 @@ turso_sync_engine = { workspace = true }
|
||||
turso_core = { workspace = true }
|
||||
turso_node = { workspace = true }
|
||||
genawaiter = { version = "0.99.1", default-features = false }
|
||||
tracing-subscriber = "0.3.19"
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
napi-build = "2.2.3"
|
||||
|
||||
@@ -160,7 +160,7 @@ test('checkpoint', async () => {
|
||||
expect((await db1.stats()).revertWal).toBe(revertWal);
|
||||
})
|
||||
|
||||
test('persistence', async () => {
|
||||
test('persistence-push', async () => {
|
||||
{
|
||||
const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
|
||||
await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
|
||||
@@ -203,6 +203,63 @@ test('persistence', async () => {
|
||||
}
|
||||
})
|
||||
|
||||
test('persistence-offline', async () => {
|
||||
{
|
||||
const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
|
||||
await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
|
||||
await db.exec("DELETE FROM q");
|
||||
await db.push();
|
||||
await db.close();
|
||||
}
|
||||
const path = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
{
|
||||
const db = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
|
||||
await db.exec(`INSERT INTO q VALUES ('k1', 'v1')`);
|
||||
await db.exec(`INSERT INTO q VALUES ('k2', 'v2')`);
|
||||
await db.push();
|
||||
await db.close();
|
||||
}
|
||||
{
|
||||
const db = await connect({ path: path, url: "https://not-valid-url.localhost" });
|
||||
const rows = await db.prepare("SELECT * FROM q").all();
|
||||
const expected = [{ x: 'k1', y: 'v1' }, { x: 'k2', y: 'v2' }];
|
||||
expect(rows.sort(localeCompare)).toEqual(expected.sort(localeCompare))
|
||||
await db.close();
|
||||
}
|
||||
})
|
||||
|
||||
test('persistence-pull-push', async () => {
|
||||
{
|
||||
const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
|
||||
await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
|
||||
await db.exec("DELETE FROM q");
|
||||
await db.push();
|
||||
await db.close();
|
||||
}
|
||||
const path1 = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
const path2 = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
const db1 = await connect({ path: path1, url: process.env.VITE_TURSO_DB_URL });
|
||||
await db1.exec(`INSERT INTO q VALUES ('k1', 'v1')`);
|
||||
await db1.exec(`INSERT INTO q VALUES ('k2', 'v2')`);
|
||||
const stats1 = await db1.stats();
|
||||
|
||||
const db2 = await connect({ path: path2, url: process.env.VITE_TURSO_DB_URL });
|
||||
await db2.exec(`INSERT INTO q VALUES ('k3', 'v3')`);
|
||||
await db2.exec(`INSERT INTO q VALUES ('k4', 'v4')`);
|
||||
|
||||
await Promise.all([db1.push(), db2.push()]);
|
||||
await Promise.all([db1.pull(), db2.pull()]);
|
||||
const stats2 = await db1.stats();
|
||||
console.info(stats1, stats2);
|
||||
expect(stats1.revision).not.toBe(stats2.revision);
|
||||
|
||||
const rows1 = await db1.prepare('SELECT * FROM q').all();
|
||||
const rows2 = await db2.prepare('SELECT * FROM q').all();
|
||||
const expected = [{ x: 'k1', y: 'v1' }, { x: 'k2', y: 'v2' }, { x: 'k3', y: 'v3' }, { x: 'k4', y: 'v4' }];
|
||||
expect(rows1.sort(localeCompare)).toEqual(expected.sort(localeCompare))
|
||||
expect(rows2.sort(localeCompare)).toEqual(expected.sort(localeCompare))
|
||||
})
|
||||
|
||||
test('transform', async () => {
|
||||
{
|
||||
const db = await connect({
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { registerFileAtWorker, unregisterFileAtWorker } from "@tursodatabase/database-browser-common"
|
||||
import { DatabasePromise, DatabaseOpts, NativeDatabase } from "@tursodatabase/database-common"
|
||||
import { ProtocolIo, run, SyncOpts, RunOpts, memoryIO } from "@tursodatabase/sync-common";
|
||||
import { ProtocolIo, run, SyncOpts, RunOpts, memoryIO, SyncEngineStats } from "@tursodatabase/sync-common";
|
||||
|
||||
let BrowserIo: ProtocolIo = {
|
||||
async read(path: string): Promise<Buffer | Uint8Array | null> {
|
||||
@@ -44,7 +44,7 @@ class Database extends DatabasePromise {
|
||||
async checkpoint() {
|
||||
await run(this.runOpts, this.io, this.engine, this.engine.checkpoint());
|
||||
}
|
||||
async stats(): Promise<{ operations: number, mainWal: number, revertWal: number, lastPullUnixTime: number, lastPushUnixTime: number | null }> {
|
||||
async stats(): Promise<SyncEngineStats> {
|
||||
return (await run(this.runOpts, this.io, this.engine, this.engine.stats()));
|
||||
}
|
||||
override async close(): Promise<void> {
|
||||
@@ -54,7 +54,7 @@ class Database extends DatabasePromise {
|
||||
await Promise.all([
|
||||
unregisterFileAtWorker(this.worker, this.fsPath),
|
||||
unregisterFileAtWorker(this.worker, `${this.fsPath}-wal`),
|
||||
unregisterFileAtWorker(this.worker, `${this.fsPath}-revert`),
|
||||
unregisterFileAtWorker(this.worker, `${this.fsPath}-wal-revert`),
|
||||
unregisterFileAtWorker(this.worker, `${this.fsPath}-info`),
|
||||
unregisterFileAtWorker(this.worker, `${this.fsPath}-changes`),
|
||||
]);
|
||||
@@ -95,7 +95,7 @@ async function connect(opts: SyncOpts, connect: (any) => any, init: () => Promis
|
||||
await Promise.all([
|
||||
registerFileAtWorker(worker, opts.path),
|
||||
registerFileAtWorker(worker, `${opts.path}-wal`),
|
||||
registerFileAtWorker(worker, `${opts.path}-revert`),
|
||||
registerFileAtWorker(worker, `${opts.path}-wal-revert`),
|
||||
registerFileAtWorker(worker, `${opts.path}-info`),
|
||||
registerFileAtWorker(worker, `${opts.path}-changes`),
|
||||
]);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { run, memoryIO } from "./run.js"
|
||||
import { SyncOpts, ProtocolIo, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult } from "./types.js"
|
||||
import { SyncOpts, ProtocolIo, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult, SyncEngineStats } from "./types.js"
|
||||
|
||||
export { run, memoryIO, }
|
||||
export type { SyncOpts, ProtocolIo, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult }
|
||||
export type { SyncOpts, ProtocolIo, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult, SyncEngineStats }
|
||||
@@ -44,7 +44,13 @@ export interface DatabaseRowStatement {
|
||||
values: Array<any>
|
||||
}
|
||||
|
||||
export type GeneratorResponse =
|
||||
| { type: 'IO' }
|
||||
| { type: 'Done' }
|
||||
| { type: 'SyncEngineStats', operations: number, mainWal: number, revertWal: number, lastPullUnixTime: number, lastPushUnixTime: number | null }
|
||||
export interface SyncEngineStats {
|
||||
operations: number;
|
||||
mainWal: number;
|
||||
revertWal: number;
|
||||
lastPullUnixTime: number;
|
||||
lastPushUnixTime: number | null;
|
||||
revision: string | null;
|
||||
}
|
||||
|
||||
export type GeneratorResponse = { type: 'IO' } | { type: 'Done' } | ({ type: 'SyncEngineStats' } & SyncEngineStats)
|
||||
@@ -81,8 +81,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-android-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-android-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -97,8 +97,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-android-arm-eabi')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-android-arm-eabi/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -117,8 +117,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-win32-x64-msvc')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-win32-x64-msvc/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -133,8 +133,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-win32-ia32-msvc')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-win32-ia32-msvc/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -149,8 +149,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-win32-arm64-msvc')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-win32-arm64-msvc/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -168,8 +168,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-darwin-universal')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-darwin-universal/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -184,8 +184,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-darwin-x64')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-darwin-x64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -200,8 +200,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-darwin-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-darwin-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -220,8 +220,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-freebsd-x64')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-freebsd-x64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -236,8 +236,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-freebsd-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-freebsd-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -257,8 +257,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-linux-x64-musl')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-linux-x64-musl/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -273,8 +273,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-linux-x64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-linux-x64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -291,8 +291,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-linux-arm64-musl')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-linux-arm64-musl/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -307,8 +307,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-linux-arm64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-linux-arm64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -325,8 +325,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-linux-arm-musleabihf')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-linux-arm-musleabihf/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -341,8 +341,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-linux-arm-gnueabihf')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-linux-arm-gnueabihf/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -359,8 +359,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-linux-riscv64-musl')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-linux-riscv64-musl/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -375,8 +375,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-linux-riscv64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-linux-riscv64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -392,8 +392,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-linux-ppc64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-linux-ppc64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -408,8 +408,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-linux-s390x-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-linux-s390x-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -428,8 +428,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-openharmony-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-openharmony-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -444,8 +444,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-openharmony-x64')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-openharmony-x64/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
@@ -460,8 +460,8 @@ function requireNative() {
|
||||
try {
|
||||
const binding = require('@tursodatabase/sync-openharmony-arm')
|
||||
const bindingPackageVersion = require('@tursodatabase/sync-openharmony-arm/package.json').version
|
||||
if (bindingPackageVersion !== '0.2.0-pre.1' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.1 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
if (bindingPackageVersion !== '0.2.0-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.2.0-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
|
||||
@@ -4,6 +4,14 @@ import { connect, DatabaseRowMutation, DatabaseRowTransformResult } from './prom
|
||||
|
||||
const localeCompare = (a, b) => a.x.localeCompare(b.x);
|
||||
|
||||
function cleanup(path) {
|
||||
unlinkSync(path);
|
||||
unlinkSync(`${path}-wal`);
|
||||
unlinkSync(`${path}-info`);
|
||||
unlinkSync(`${path}-changes`);
|
||||
try { unlinkSync(`${path}-wal-revert`) } catch (e) { }
|
||||
}
|
||||
|
||||
test('select-after-push', async () => {
|
||||
{
|
||||
const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
|
||||
@@ -161,7 +169,8 @@ test('checkpoint', async () => {
|
||||
expect((await db1.stats()).revertWal).toBe(revertWal);
|
||||
})
|
||||
|
||||
test('persistence', async () => {
|
||||
|
||||
test('persistence-push', async () => {
|
||||
{
|
||||
const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
|
||||
await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
|
||||
@@ -182,9 +191,11 @@ test('persistence', async () => {
|
||||
const db2 = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
|
||||
await db2.exec(`INSERT INTO q VALUES ('k3', 'v3')`);
|
||||
await db2.exec(`INSERT INTO q VALUES ('k4', 'v4')`);
|
||||
const rows = await db2.prepare('SELECT * FROM q').all();
|
||||
const stmt = db2.prepare('SELECT * FROM q');
|
||||
const rows = await stmt.all();
|
||||
const expected = [{ x: 'k1', y: 'v1' }, { x: 'k2', y: 'v2' }, { x: 'k3', y: 'v3' }, { x: 'k4', y: 'v4' }];
|
||||
expect(rows).toEqual(expected)
|
||||
stmt.close();
|
||||
await db2.close();
|
||||
}
|
||||
|
||||
@@ -201,12 +212,75 @@ test('persistence', async () => {
|
||||
expect(rows).toEqual(expected)
|
||||
await db4.close();
|
||||
}
|
||||
}
|
||||
finally {
|
||||
cleanup(path);
|
||||
}
|
||||
})
|
||||
|
||||
test('persistence-offline', async () => {
|
||||
{
|
||||
const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
|
||||
await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
|
||||
await db.exec("DELETE FROM q");
|
||||
await db.push();
|
||||
await db.close();
|
||||
}
|
||||
const path = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
try {
|
||||
{
|
||||
const db = await connect({ path: path, url: process.env.VITE_TURSO_DB_URL });
|
||||
await db.exec(`INSERT INTO q VALUES ('k1', 'v1')`);
|
||||
await db.exec(`INSERT INTO q VALUES ('k2', 'v2')`);
|
||||
await db.push();
|
||||
await db.close();
|
||||
}
|
||||
{
|
||||
const db = await connect({ path: path, url: "https://not-valid-url.localhost" });
|
||||
const rows = await db.prepare("SELECT * FROM q").all();
|
||||
const expected = [{ x: 'k1', y: 'v1' }, { x: 'k2', y: 'v2' }];
|
||||
expect(rows.sort(localeCompare)).toEqual(expected.sort(localeCompare))
|
||||
await db.close();
|
||||
}
|
||||
} finally {
|
||||
unlinkSync(path);
|
||||
unlinkSync(`${path}-wal`);
|
||||
unlinkSync(`${path}-info`);
|
||||
unlinkSync(`${path}-changes`);
|
||||
try { unlinkSync(`${path}-revert`) } catch (e) { }
|
||||
cleanup(path);
|
||||
}
|
||||
})
|
||||
|
||||
test('persistence-pull-push', async () => {
|
||||
{
|
||||
const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL });
|
||||
await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)");
|
||||
await db.exec("DELETE FROM q");
|
||||
await db.push();
|
||||
await db.close();
|
||||
}
|
||||
const path1 = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
const path2 = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
try {
|
||||
const db1 = await connect({ path: path1, url: process.env.VITE_TURSO_DB_URL });
|
||||
await db1.exec(`INSERT INTO q VALUES ('k1', 'v1')`);
|
||||
await db1.exec(`INSERT INTO q VALUES ('k2', 'v2')`);
|
||||
const stats1 = await db1.stats();
|
||||
|
||||
const db2 = await connect({ path: path2, url: process.env.VITE_TURSO_DB_URL });
|
||||
await db2.exec(`INSERT INTO q VALUES ('k3', 'v3')`);
|
||||
await db2.exec(`INSERT INTO q VALUES ('k4', 'v4')`);
|
||||
|
||||
await Promise.all([db1.push(), db2.push()]);
|
||||
await Promise.all([db1.pull(), db2.pull()]);
|
||||
const stats2 = await db1.stats();
|
||||
console.info(stats1, stats2);
|
||||
expect(stats1.revision).not.toBe(stats2.revision);
|
||||
|
||||
const rows1 = await db1.prepare('SELECT * FROM q').all();
|
||||
const rows2 = await db2.prepare('SELECT * FROM q').all();
|
||||
const expected = [{ x: 'k1', y: 'v1' }, { x: 'k2', y: 'v2' }, { x: 'k3', y: 'v3' }, { x: 'k4', y: 'v4' }];
|
||||
expect(rows1.sort(localeCompare)).toEqual(expected.sort(localeCompare))
|
||||
expect(rows2.sort(localeCompare)).toEqual(expected.sort(localeCompare))
|
||||
} finally {
|
||||
cleanup(path1);
|
||||
cleanup(path2);
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { DatabasePromise, DatabaseOpts, NativeDatabase } from "@tursodatabase/database-common"
|
||||
import { ProtocolIo, run, SyncOpts, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult } from "@tursodatabase/sync-common";
|
||||
import { ProtocolIo, run, SyncOpts, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult, SyncEngineStats } from "@tursodatabase/sync-common";
|
||||
import { Database as NativeDB, SyncEngine } from "#index";
|
||||
import { promises } from "node:fs";
|
||||
|
||||
@@ -61,7 +61,7 @@ class Database extends DatabasePromise {
|
||||
async checkpoint() {
|
||||
await run(this.runOpts, this.io, this.engine, this.engine.checkpoint());
|
||||
}
|
||||
async stats(): Promise<{ operations: number, mainWal: number, revertWal: number, lastPullUnixTime: number, lastPushUnixTime: number | null }> {
|
||||
async stats(): Promise<SyncEngineStats> {
|
||||
return (await run(this.runOpts, this.io, this.engine, this.engine.stats()));
|
||||
}
|
||||
override async close(): Promise<void> {
|
||||
|
||||
@@ -45,6 +45,7 @@ pub enum GeneratorResponse {
|
||||
revert_wal: i64,
|
||||
last_pull_unix_time: i64,
|
||||
last_push_unix_time: Option<i64>,
|
||||
revision: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -269,13 +269,14 @@ impl SyncEngine {
|
||||
self.run(async move |coro, sync_engine| {
|
||||
let sync_engine = try_read(sync_engine)?;
|
||||
let sync_engine = try_unwrap(&sync_engine)?;
|
||||
let changes = sync_engine.stats(coro).await?;
|
||||
let stats = sync_engine.stats(coro).await?;
|
||||
Ok(Some(GeneratorResponse::SyncEngineStats {
|
||||
operations: changes.cdc_operations,
|
||||
main_wal: changes.main_wal_size as i64,
|
||||
revert_wal: changes.revert_wal_size as i64,
|
||||
last_pull_unix_time: changes.last_pull_unix_time,
|
||||
last_push_unix_time: changes.last_push_unix_time,
|
||||
operations: stats.cdc_operations,
|
||||
main_wal: stats.main_wal_size as i64,
|
||||
revert_wal: stats.revert_wal_size as i64,
|
||||
last_pull_unix_time: stats.last_pull_unix_time,
|
||||
last_push_unix_time: stats.last_push_unix_time,
|
||||
revision: stats.revision,
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -18,10 +18,10 @@ tracing_release = ["turso_core/tracing_release"]
|
||||
|
||||
[dependencies]
|
||||
turso_core = { workspace = true, features = ["io_uring"] }
|
||||
thiserror = "2.0.9"
|
||||
thiserror = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.20.0"
|
||||
tokio = { version = "1.29.1", features = ["full"] }
|
||||
rand = "0.8.5"
|
||||
rand_chacha = "0.3.1"
|
||||
tempfile = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
rand = { workspace = true }
|
||||
rand_chacha = { workspace = true }
|
||||
|
||||
@@ -393,6 +393,29 @@ impl Connection {
|
||||
|
||||
Ok(conn.get_auto_commit())
|
||||
}
|
||||
|
||||
/// Sets maximum total accumuated timeout. If the duration is None or Zero, we unset the busy handler for this Connection
|
||||
///
|
||||
/// This api defers slighty from: https://www.sqlite.org/c3ref/busy_timeout.html
|
||||
///
|
||||
/// Instead of sleeping for linear amount of time specified by the user,
|
||||
/// we will sleep in phases, until the the total amount of time is reached.
|
||||
/// This means we first sleep of 1ms, then if we still return busy, we sleep for 2 ms, and repeat until a maximum of 100 ms per phase.
|
||||
///
|
||||
/// Example:
|
||||
/// 1. Set duration to 5ms
|
||||
/// 2. Step through query -> returns Busy -> sleep/yield for 1 ms
|
||||
/// 3. Step through query -> returns Busy -> sleep/yield for 2 ms
|
||||
/// 4. Step through query -> returns Busy -> sleep/yield for 2 ms (totaling 5 ms of sleep)
|
||||
/// 5. Step through query -> returns Busy -> return Busy to user
|
||||
pub fn busy_timeout(&self, duration: Option<std::time::Duration>) -> Result<()> {
|
||||
let conn = self
|
||||
.inner
|
||||
.lock()
|
||||
.map_err(|e| Error::MutexError(e.to_string()))?;
|
||||
conn.busy_timeout(duration);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Connection {
|
||||
|
||||
@@ -19,27 +19,27 @@ path = "main.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
cfg-if = "1.0.0"
|
||||
clap = { version = "4.5.31", features = ["derive"] }
|
||||
cfg-if = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
clap_complete = { version = "=4.5.47", features = ["unstable-dynamic"] }
|
||||
comfy-table = "7.1.4"
|
||||
csv = "1.3.1"
|
||||
ctrlc = "3.4.4"
|
||||
dirs = "5.0.1"
|
||||
env_logger = "0.10.1"
|
||||
env_logger = { workspace = true }
|
||||
libc = "0.2.172"
|
||||
turso_core = { path = "../core", default-features = true, features = [] }
|
||||
limbo_completion = { path = "../extensions/completion", features = ["static"] }
|
||||
miette = { version = "7.4.0", features = ["fancy"] }
|
||||
miette = { workspace = true, features = ["fancy"] }
|
||||
nu-ansi-term = {version = "0.50.1", features = ["serde", "derive_serde_style"]}
|
||||
rustyline = { version = "15.0.0", default-features = true, features = [
|
||||
"derive",
|
||||
] }
|
||||
shlex = "1.3.0"
|
||||
syntect = { git = "https://github.com/trishume/syntect.git", rev = "64644ffe064457265cbcee12a0c1baf9485ba6ee" }
|
||||
tracing = "0.1.41"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
tracing = { workspace = true }
|
||||
tracing-appender = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
toml = {version = "0.8.20", features = ["preserve_order"]}
|
||||
schemars = {version = "0.8.22", features = ["preserve_order"]}
|
||||
serde = { workspace = true, features = ["derive"]}
|
||||
|
||||
@@ -1102,7 +1102,7 @@ impl Limbo {
|
||||
table_name: &str,
|
||||
) -> anyhow::Result<bool> {
|
||||
let sql = format!(
|
||||
"SELECT sql, type, name FROM {db_prefix}.sqlite_schema WHERE type IN ('table', 'index', 'view') AND (tbl_name = '{table_name}' OR name = '{table_name}') AND name NOT LIKE 'sqlite_%' ORDER BY CASE type WHEN 'table' THEN 1 WHEN 'view' THEN 2 WHEN 'index' THEN 3 END, rowid"
|
||||
"SELECT sql, type, name FROM {db_prefix}.sqlite_schema WHERE type IN ('table', 'index', 'view') AND (tbl_name = '{table_name}' OR name = '{table_name}') AND name NOT LIKE 'sqlite_%' AND name NOT LIKE '__turso_internal_%' ORDER BY CASE type WHEN 'table' THEN 1 WHEN 'view' THEN 2 WHEN 'index' THEN 3 END, rowid"
|
||||
);
|
||||
|
||||
let mut found = false;
|
||||
@@ -1135,7 +1135,7 @@ impl Limbo {
|
||||
db_prefix: &str,
|
||||
db_display_name: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let sql = format!("SELECT sql, type, name FROM {db_prefix}.sqlite_schema WHERE type IN ('table', 'index', 'view') AND name NOT LIKE 'sqlite_%' ORDER BY CASE type WHEN 'table' THEN 1 WHEN 'view' THEN 2 WHEN 'index' THEN 3 END, rowid");
|
||||
let sql = format!("SELECT sql, type, name FROM {db_prefix}.sqlite_schema WHERE type IN ('table', 'index', 'view') AND name NOT LIKE 'sqlite_%' AND name NOT LIKE '__turso_internal_%' ORDER BY CASE type WHEN 'table' THEN 1 WHEN 'view' THEN 2 WHEN 'index' THEN 3 END, rowid");
|
||||
|
||||
match self.conn.query(&sql) {
|
||||
Ok(Some(ref mut rows)) => loop {
|
||||
|
||||
@@ -44,36 +44,35 @@ libc = { version = "0.2.172" }
|
||||
libloading = "0.8.6"
|
||||
|
||||
[dependencies]
|
||||
antithesis_sdk = { version = "0.2.5", optional = true }
|
||||
antithesis_sdk = { workspace = true, optional = true }
|
||||
turso_ext = { workspace = true, features = ["core_only"] }
|
||||
cfg_block = "0.1.1"
|
||||
fallible-iterator = "0.3.0"
|
||||
hex = "0.4.3"
|
||||
turso_sqlite3_parser = { workspace = true }
|
||||
thiserror = "1.0.61"
|
||||
fallible-iterator = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
getrandom = { version = "0.2.15" }
|
||||
regex = "1.11.1"
|
||||
regex-syntax = { version = "0.8.5", default-features = false, features = [
|
||||
regex = { workspace = true }
|
||||
regex-syntax = { workspace = true, default-features = false, features = [
|
||||
"unicode",
|
||||
] }
|
||||
chrono = { version = "0.4.38", default-features = false, features = ["clock"] }
|
||||
chrono = { workspace = true, default-features = false, features = ["clock"] }
|
||||
julian_day_converter = "0.4.5"
|
||||
rand = "0.8.5"
|
||||
libm = "0.2"
|
||||
turso_macros = { workspace = true }
|
||||
miette = "7.6.0"
|
||||
miette = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
crossbeam-skiplist = "0.1.3"
|
||||
tracing = "0.1.41"
|
||||
tracing = { workspace = true }
|
||||
ryu = "1.0.19"
|
||||
uncased = "0.9.10"
|
||||
strum_macros = { workspace = true }
|
||||
bitflags = "2.9.0"
|
||||
bitflags = { workspace = true }
|
||||
serde = { workspace = true, optional = true, features = ["derive"] }
|
||||
paste = "1.0.15"
|
||||
uuid = { version = "1.11.0", features = ["v4", "v7"], optional = true }
|
||||
tempfile = "3.8.0"
|
||||
tempfile = { workspace = true }
|
||||
pack1 = { version = "1.0.0", features = ["bytemuck"] }
|
||||
bytemuck = "1.23.1"
|
||||
aes-gcm = { version = "0.10.3"}
|
||||
@@ -83,7 +82,7 @@ aegis = "0.9.0"
|
||||
twox-hash = "2.1.1"
|
||||
|
||||
[build-dependencies]
|
||||
chrono = { version = "0.4.38", default-features = false }
|
||||
chrono = { workspace = true, default-features = false }
|
||||
built = { version = "0.7.5", features = ["git2", "chrono"] }
|
||||
|
||||
[target.'cfg(not(target_family = "windows"))'.dev-dependencies]
|
||||
@@ -91,7 +90,7 @@ pprof = { version = "0.14.0", features = ["criterion", "flamegraph"] }
|
||||
|
||||
[dev-dependencies]
|
||||
memory-stats = "1.2.0"
|
||||
criterion = { version = "0.5", features = [
|
||||
criterion = { workspace = true, features = [
|
||||
"html_reports",
|
||||
"async",
|
||||
"async_futures",
|
||||
@@ -101,11 +100,11 @@ rusqlite.workspace = true
|
||||
quickcheck = { version = "1.0", default-features = false }
|
||||
quickcheck_macros = { version = "1.0", default-features = false }
|
||||
rand = "0.8.5" # Required for quickcheck
|
||||
rand_chacha = "0.9.0"
|
||||
env_logger = "0.11.6"
|
||||
rand_chacha = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
test-log = { version = "0.2.17", features = ["trace"] }
|
||||
sorted-vec = "0.8.6"
|
||||
mimalloc = { version = "0.1.46", default-features = false }
|
||||
mimalloc = { workspace = true, default-features = false }
|
||||
|
||||
[[bench]]
|
||||
name = "benchmark"
|
||||
|
||||
@@ -35,8 +35,10 @@ fn bench(c: &mut Criterion) {
|
||||
let db = bench_db();
|
||||
b.to_async(FuturesExecutor).iter(|| async {
|
||||
let conn = db.conn.clone();
|
||||
let tx_id = db.mvcc_store.begin_tx(conn.get_pager().clone());
|
||||
db.mvcc_store.rollback_tx(tx_id, conn.get_pager().clone())
|
||||
let tx_id = db.mvcc_store.begin_tx(conn.get_pager().clone()).unwrap();
|
||||
db.mvcc_store
|
||||
.rollback_tx(tx_id, conn.get_pager().clone(), &conn)
|
||||
.unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
@@ -44,7 +46,7 @@ fn bench(c: &mut Criterion) {
|
||||
group.bench_function("begin_tx + commit_tx", |b| {
|
||||
b.to_async(FuturesExecutor).iter(|| async {
|
||||
let conn = &db.conn;
|
||||
let tx_id = db.mvcc_store.begin_tx(conn.get_pager().clone());
|
||||
let tx_id = db.mvcc_store.begin_tx(conn.get_pager().clone()).unwrap();
|
||||
let mv_store = &db.mvcc_store;
|
||||
let mut sm = mv_store
|
||||
.commit_tx(tx_id, conn.get_pager().clone(), conn)
|
||||
@@ -65,7 +67,7 @@ fn bench(c: &mut Criterion) {
|
||||
group.bench_function("begin_tx-read-commit_tx", |b| {
|
||||
b.to_async(FuturesExecutor).iter(|| async {
|
||||
let conn = &db.conn;
|
||||
let tx_id = db.mvcc_store.begin_tx(conn.get_pager().clone());
|
||||
let tx_id = db.mvcc_store.begin_tx(conn.get_pager().clone()).unwrap();
|
||||
db.mvcc_store
|
||||
.read(
|
||||
tx_id,
|
||||
@@ -97,7 +99,7 @@ fn bench(c: &mut Criterion) {
|
||||
group.bench_function("begin_tx-update-commit_tx", |b| {
|
||||
b.to_async(FuturesExecutor).iter(|| async {
|
||||
let conn = &db.conn;
|
||||
let tx_id = db.mvcc_store.begin_tx(conn.get_pager().clone());
|
||||
let tx_id = db.mvcc_store.begin_tx(conn.get_pager().clone()).unwrap();
|
||||
db.mvcc_store
|
||||
.update(
|
||||
tx_id,
|
||||
@@ -109,7 +111,6 @@ fn bench(c: &mut Criterion) {
|
||||
data: record_data.clone(),
|
||||
column_count: 1,
|
||||
},
|
||||
conn.get_pager().clone(),
|
||||
)
|
||||
.unwrap();
|
||||
let mv_store = &db.mvcc_store;
|
||||
@@ -129,7 +130,7 @@ fn bench(c: &mut Criterion) {
|
||||
});
|
||||
|
||||
let db = bench_db();
|
||||
let tx_id = db.mvcc_store.begin_tx(db.conn.get_pager().clone());
|
||||
let tx_id = db.mvcc_store.begin_tx(db.conn.get_pager().clone()).unwrap();
|
||||
db.mvcc_store
|
||||
.insert(
|
||||
tx_id,
|
||||
@@ -158,8 +159,7 @@ fn bench(c: &mut Criterion) {
|
||||
});
|
||||
|
||||
let db = bench_db();
|
||||
let tx_id = db.mvcc_store.begin_tx(db.conn.get_pager().clone());
|
||||
let conn = &db.conn;
|
||||
let tx_id = db.mvcc_store.begin_tx(db.conn.get_pager().clone()).unwrap();
|
||||
db.mvcc_store
|
||||
.insert(
|
||||
tx_id,
|
||||
@@ -186,7 +186,6 @@ fn bench(c: &mut Criterion) {
|
||||
data: record_data.clone(),
|
||||
column_count: 1,
|
||||
},
|
||||
conn.get_pager().clone(),
|
||||
)
|
||||
.unwrap();
|
||||
})
|
||||
|
||||
@@ -8,15 +8,15 @@
|
||||
use crate::incremental::dbsp::{Delta, DeltaPair};
|
||||
use crate::incremental::expr_compiler::CompiledExpression;
|
||||
use crate::incremental::operator::{
|
||||
EvalState, FilterOperator, FilterPredicate, IncrementalOperator, InputOperator, ProjectOperator,
|
||||
create_dbsp_state_index, DbspStateCursors, EvalState, FilterOperator, FilterPredicate,
|
||||
IncrementalOperator, InputOperator, ProjectOperator,
|
||||
};
|
||||
use crate::incremental::persistence::WriteRow;
|
||||
use crate::storage::btree::BTreeCursor;
|
||||
use crate::storage::btree::{BTreeCursor, BTreeKey};
|
||||
// Note: logical module must be made pub(crate) in translate/mod.rs
|
||||
use crate::translate::logical::{
|
||||
BinaryOperator, LogicalExpr, LogicalPlan, LogicalSchema, SchemaRef,
|
||||
};
|
||||
use crate::types::{IOResult, SeekKey, Value};
|
||||
use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult, Value};
|
||||
use crate::Pager;
|
||||
use crate::{return_and_restore_if_io, return_if_io, LimboError, Result};
|
||||
use std::collections::HashMap;
|
||||
@@ -24,8 +24,120 @@ use std::fmt::{self, Display, Formatter};
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
// The state table is always a key-value store with 3 columns: key, state, and weight.
|
||||
const OPERATOR_COLUMNS: usize = 3;
|
||||
// The state table has 5 columns: operator_id, zset_id, element_id, value, weight
|
||||
const OPERATOR_COLUMNS: usize = 5;
|
||||
|
||||
/// State machine for writing rows to simple materialized views (table-only, no index)
|
||||
#[derive(Debug, Default)]
|
||||
pub enum WriteRowView {
|
||||
#[default]
|
||||
GetRecord,
|
||||
Delete,
|
||||
Insert {
|
||||
final_weight: isize,
|
||||
},
|
||||
Done,
|
||||
}
|
||||
|
||||
impl WriteRowView {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Write a row with weight management for table-only storage.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cursor` - BTree cursor for the storage
|
||||
/// * `key` - The key to seek (TableRowId)
|
||||
/// * `build_record` - Function that builds the record values to insert.
|
||||
/// Takes the final_weight and returns the complete record values.
|
||||
/// * `weight` - The weight delta to apply
|
||||
pub fn write_row(
|
||||
&mut self,
|
||||
cursor: &mut BTreeCursor,
|
||||
key: SeekKey,
|
||||
build_record: impl Fn(isize) -> Vec<Value>,
|
||||
weight: isize,
|
||||
) -> Result<IOResult<()>> {
|
||||
loop {
|
||||
match self {
|
||||
WriteRowView::GetRecord => {
|
||||
let res = return_if_io!(cursor.seek(key.clone(), SeekOp::GE { eq_only: true }));
|
||||
if !matches!(res, SeekResult::Found) {
|
||||
*self = WriteRowView::Insert {
|
||||
final_weight: weight,
|
||||
};
|
||||
} else {
|
||||
let existing_record = return_if_io!(cursor.record());
|
||||
let r = existing_record.ok_or_else(|| {
|
||||
LimboError::InternalError(format!(
|
||||
"Found key {key:?} in storage but could not read record"
|
||||
))
|
||||
})?;
|
||||
let values = r.get_values();
|
||||
|
||||
// Weight is always the last value
|
||||
let existing_weight = match values.last() {
|
||||
Some(val) => match val.to_owned() {
|
||||
Value::Integer(w) => w as isize,
|
||||
_ => {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"Invalid weight value in storage for key {key:?}"
|
||||
)))
|
||||
}
|
||||
},
|
||||
None => {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"No weight value found in storage for key {key:?}"
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
let final_weight = existing_weight + weight;
|
||||
if final_weight <= 0 {
|
||||
*self = WriteRowView::Delete
|
||||
} else {
|
||||
*self = WriteRowView::Insert { final_weight }
|
||||
}
|
||||
}
|
||||
}
|
||||
WriteRowView::Delete => {
|
||||
// Mark as Done before delete to avoid retry on I/O
|
||||
*self = WriteRowView::Done;
|
||||
return_if_io!(cursor.delete());
|
||||
}
|
||||
WriteRowView::Insert { final_weight } => {
|
||||
return_if_io!(cursor.seek(key.clone(), SeekOp::GE { eq_only: true }));
|
||||
|
||||
// Extract the row ID from the key
|
||||
let key_i64 = match key {
|
||||
SeekKey::TableRowId(id) => id,
|
||||
_ => {
|
||||
return Err(LimboError::InternalError(
|
||||
"Expected TableRowId for storage".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
// Build the record values using the provided function
|
||||
let record_values = build_record(*final_weight);
|
||||
|
||||
// Create an ImmutableRecord from the values
|
||||
let immutable_record =
|
||||
ImmutableRecord::from_values(&record_values, record_values.len());
|
||||
let btree_key = BTreeKey::new_table_rowid(key_i64, Some(&immutable_record));
|
||||
|
||||
// Mark as Done before insert to avoid retry on I/O
|
||||
*self = WriteRowView::Done;
|
||||
return_if_io!(cursor.insert(&btree_key));
|
||||
}
|
||||
WriteRowView::Done => {
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// State machine for commit operations
|
||||
pub enum CommitState {
|
||||
@@ -36,8 +148,8 @@ pub enum CommitState {
|
||||
CommitOperators {
|
||||
/// Execute state for running the circuit
|
||||
execute_state: Box<ExecuteState>,
|
||||
/// Persistent cursor for operator state btree (internal_state_root)
|
||||
state_cursor: Box<BTreeCursor>,
|
||||
/// Persistent cursors for operator state (table and index)
|
||||
state_cursors: Box<DbspStateCursors>,
|
||||
},
|
||||
|
||||
/// Updating the materialized view with the delta
|
||||
@@ -47,7 +159,7 @@ pub enum CommitState {
|
||||
/// Current index in delta.changes being processed
|
||||
current_index: usize,
|
||||
/// State for writing individual rows
|
||||
write_row_state: WriteRow,
|
||||
write_row_state: WriteRowView,
|
||||
/// Cursor for view data btree - created fresh for each row
|
||||
view_cursor: Box<BTreeCursor>,
|
||||
},
|
||||
@@ -60,7 +172,8 @@ impl std::fmt::Debug for CommitState {
|
||||
Self::CommitOperators { execute_state, .. } => f
|
||||
.debug_struct("CommitOperators")
|
||||
.field("execute_state", execute_state)
|
||||
.field("has_state_cursor", &true)
|
||||
.field("has_state_table_cursor", &true)
|
||||
.field("has_state_index_cursor", &true)
|
||||
.finish(),
|
||||
Self::UpdateView {
|
||||
delta,
|
||||
@@ -221,25 +334,13 @@ impl std::fmt::Debug for DbspNode {
|
||||
impl DbspNode {
|
||||
fn process_node(
|
||||
&mut self,
|
||||
pager: Rc<Pager>,
|
||||
eval_state: &mut EvalState,
|
||||
root_page: usize,
|
||||
commit_operators: bool,
|
||||
state_cursor: Option<&mut Box<BTreeCursor>>,
|
||||
cursors: &mut DbspStateCursors,
|
||||
) -> Result<IOResult<Delta>> {
|
||||
// Process delta using the executable operator
|
||||
let op = &mut self.executable;
|
||||
|
||||
// Use provided cursor or create a local one
|
||||
let mut local_cursor;
|
||||
let cursor = if let Some(cursor) = state_cursor {
|
||||
cursor.as_mut()
|
||||
} else {
|
||||
// Create a local cursor if none was provided
|
||||
local_cursor = BTreeCursor::new_table(None, pager.clone(), root_page, OPERATOR_COLUMNS);
|
||||
&mut local_cursor
|
||||
};
|
||||
|
||||
let state = if commit_operators {
|
||||
// Clone the deltas from eval_state - don't extract them
|
||||
// in case we need to re-execute due to I/O
|
||||
@@ -247,12 +348,12 @@ impl DbspNode {
|
||||
EvalState::Init { deltas } => deltas.clone(),
|
||||
_ => panic!("commit can only be called when eval_state is in Init state"),
|
||||
};
|
||||
let result = return_if_io!(op.commit(deltas, cursor));
|
||||
let result = return_if_io!(op.commit(deltas, cursors));
|
||||
// After successful commit, move state to Done
|
||||
*eval_state = EvalState::Done;
|
||||
result
|
||||
} else {
|
||||
return_if_io!(op.eval(eval_state, cursor))
|
||||
return_if_io!(op.eval(eval_state, cursors))
|
||||
};
|
||||
Ok(IOResult::Done(state))
|
||||
}
|
||||
@@ -275,14 +376,20 @@ pub struct DbspCircuit {
|
||||
|
||||
/// Root page for the main materialized view data
|
||||
pub(super) main_data_root: usize,
|
||||
/// Root page for internal DBSP state
|
||||
/// Root page for internal DBSP state table
|
||||
pub(super) internal_state_root: usize,
|
||||
/// Root page for the DBSP state table's primary key index
|
||||
pub(super) internal_state_index_root: usize,
|
||||
}
|
||||
|
||||
impl DbspCircuit {
|
||||
/// Create a new empty circuit with initial empty schema
|
||||
/// The actual output schema will be set when the root node is established
|
||||
pub fn new(main_data_root: usize, internal_state_root: usize) -> Self {
|
||||
pub fn new(
|
||||
main_data_root: usize,
|
||||
internal_state_root: usize,
|
||||
internal_state_index_root: usize,
|
||||
) -> Self {
|
||||
// Start with an empty schema - will be updated when root is set
|
||||
let empty_schema = Arc::new(LogicalSchema::new(vec![]));
|
||||
Self {
|
||||
@@ -293,6 +400,7 @@ impl DbspCircuit {
|
||||
commit_state: CommitState::Init,
|
||||
main_data_root,
|
||||
internal_state_root,
|
||||
internal_state_index_root,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -326,18 +434,18 @@ impl DbspCircuit {
|
||||
|
||||
pub fn run_circuit(
|
||||
&mut self,
|
||||
pager: Rc<Pager>,
|
||||
execute_state: &mut ExecuteState,
|
||||
pager: &Rc<Pager>,
|
||||
state_cursors: &mut DbspStateCursors,
|
||||
commit_operators: bool,
|
||||
state_cursor: &mut Box<BTreeCursor>,
|
||||
) -> Result<IOResult<Delta>> {
|
||||
if let Some(root_id) = self.root {
|
||||
self.execute_node(
|
||||
root_id,
|
||||
pager,
|
||||
pager.clone(),
|
||||
execute_state,
|
||||
commit_operators,
|
||||
Some(state_cursor),
|
||||
state_cursors,
|
||||
)
|
||||
} else {
|
||||
Err(LimboError::ParseError(
|
||||
@@ -358,7 +466,23 @@ impl DbspCircuit {
|
||||
execute_state: &mut ExecuteState,
|
||||
) -> Result<IOResult<Delta>> {
|
||||
if let Some(root_id) = self.root {
|
||||
self.execute_node(root_id, pager, execute_state, false, None)
|
||||
// Create temporary cursors for execute (non-commit) operations
|
||||
let table_cursor = BTreeCursor::new_table(
|
||||
None,
|
||||
pager.clone(),
|
||||
self.internal_state_root,
|
||||
OPERATOR_COLUMNS,
|
||||
);
|
||||
let index_def = create_dbsp_state_index(self.internal_state_index_root);
|
||||
let index_cursor = BTreeCursor::new_index(
|
||||
None,
|
||||
pager.clone(),
|
||||
self.internal_state_index_root,
|
||||
&index_def,
|
||||
3,
|
||||
);
|
||||
let mut cursors = DbspStateCursors::new(table_cursor, index_cursor);
|
||||
self.execute_node(root_id, pager, execute_state, false, &mut cursors)
|
||||
} else {
|
||||
Err(LimboError::ParseError(
|
||||
"Circuit has no root node".to_string(),
|
||||
@@ -398,29 +522,42 @@ impl DbspCircuit {
|
||||
let mut state = std::mem::replace(&mut self.commit_state, CommitState::Init);
|
||||
match &mut state {
|
||||
CommitState::Init => {
|
||||
// Create state cursor when entering CommitOperators state
|
||||
let state_cursor = Box::new(BTreeCursor::new_table(
|
||||
// Create state cursors when entering CommitOperators state
|
||||
let state_table_cursor = BTreeCursor::new_table(
|
||||
None,
|
||||
pager.clone(),
|
||||
self.internal_state_root,
|
||||
OPERATOR_COLUMNS,
|
||||
);
|
||||
let index_def = create_dbsp_state_index(self.internal_state_index_root);
|
||||
let state_index_cursor = BTreeCursor::new_index(
|
||||
None,
|
||||
pager.clone(),
|
||||
self.internal_state_index_root,
|
||||
&index_def,
|
||||
3, // Index on first 3 columns
|
||||
);
|
||||
|
||||
let state_cursors = Box::new(DbspStateCursors::new(
|
||||
state_table_cursor,
|
||||
state_index_cursor,
|
||||
));
|
||||
|
||||
self.commit_state = CommitState::CommitOperators {
|
||||
execute_state: Box::new(ExecuteState::Init {
|
||||
input_data: input_delta_set.clone(),
|
||||
}),
|
||||
state_cursor,
|
||||
state_cursors,
|
||||
};
|
||||
}
|
||||
CommitState::CommitOperators {
|
||||
ref mut execute_state,
|
||||
ref mut state_cursor,
|
||||
ref mut state_cursors,
|
||||
} => {
|
||||
let delta = return_and_restore_if_io!(
|
||||
&mut self.commit_state,
|
||||
state,
|
||||
self.run_circuit(pager.clone(), execute_state, true, state_cursor)
|
||||
self.run_circuit(execute_state, &pager, state_cursors, true,)
|
||||
);
|
||||
|
||||
// Create view cursor when entering UpdateView state
|
||||
@@ -434,7 +571,7 @@ impl DbspCircuit {
|
||||
self.commit_state = CommitState::UpdateView {
|
||||
delta,
|
||||
current_index: 0,
|
||||
write_row_state: WriteRow::new(),
|
||||
write_row_state: WriteRowView::new(),
|
||||
view_cursor,
|
||||
};
|
||||
}
|
||||
@@ -453,7 +590,7 @@ impl DbspCircuit {
|
||||
|
||||
// If we're starting a new row (GetRecord state), we need a fresh cursor
|
||||
// due to btree cursor state machine limitations
|
||||
if matches!(write_row_state, WriteRow::GetRecord) {
|
||||
if matches!(write_row_state, WriteRowView::GetRecord) {
|
||||
*view_cursor = Box::new(BTreeCursor::new_table(
|
||||
None,
|
||||
pager.clone(),
|
||||
@@ -493,7 +630,7 @@ impl DbspCircuit {
|
||||
self.commit_state = CommitState::UpdateView {
|
||||
delta,
|
||||
current_index: *current_index + 1,
|
||||
write_row_state: WriteRow::new(),
|
||||
write_row_state: WriteRowView::new(),
|
||||
view_cursor,
|
||||
};
|
||||
}
|
||||
@@ -509,7 +646,7 @@ impl DbspCircuit {
|
||||
pager: Rc<Pager>,
|
||||
execute_state: &mut ExecuteState,
|
||||
commit_operators: bool,
|
||||
state_cursor: Option<&mut Box<BTreeCursor>>,
|
||||
cursors: &mut DbspStateCursors,
|
||||
) -> Result<IOResult<Delta>> {
|
||||
loop {
|
||||
match execute_state {
|
||||
@@ -577,12 +714,30 @@ impl DbspCircuit {
|
||||
// Get the (node_id, state) pair for the current index
|
||||
let (input_node_id, input_state) = &mut input_states[*current_index];
|
||||
|
||||
// Create temporary cursors for the recursive call
|
||||
let temp_table_cursor = BTreeCursor::new_table(
|
||||
None,
|
||||
pager.clone(),
|
||||
self.internal_state_root,
|
||||
OPERATOR_COLUMNS,
|
||||
);
|
||||
let index_def = create_dbsp_state_index(self.internal_state_index_root);
|
||||
let temp_index_cursor = BTreeCursor::new_index(
|
||||
None,
|
||||
pager.clone(),
|
||||
self.internal_state_index_root,
|
||||
&index_def,
|
||||
3,
|
||||
);
|
||||
let mut temp_cursors =
|
||||
DbspStateCursors::new(temp_table_cursor, temp_index_cursor);
|
||||
|
||||
let delta = return_if_io!(self.execute_node(
|
||||
*input_node_id,
|
||||
pager.clone(),
|
||||
input_state,
|
||||
commit_operators,
|
||||
None // Input nodes don't need state cursor
|
||||
&mut temp_cursors
|
||||
));
|
||||
input_deltas.push(delta);
|
||||
*current_index += 1;
|
||||
@@ -595,13 +750,8 @@ impl DbspCircuit {
|
||||
.get_mut(&node_id)
|
||||
.ok_or_else(|| LimboError::ParseError("Node not found".to_string()))?;
|
||||
|
||||
let output_delta = return_if_io!(node.process_node(
|
||||
pager.clone(),
|
||||
eval_state,
|
||||
self.internal_state_root,
|
||||
commit_operators,
|
||||
state_cursor,
|
||||
));
|
||||
let output_delta =
|
||||
return_if_io!(node.process_node(eval_state, commit_operators, cursors));
|
||||
return Ok(IOResult::Done(output_delta));
|
||||
}
|
||||
}
|
||||
@@ -660,9 +810,17 @@ pub struct DbspCompiler {
|
||||
|
||||
impl DbspCompiler {
|
||||
/// Create a new DBSP compiler
|
||||
pub fn new(main_data_root: usize, internal_state_root: usize) -> Self {
|
||||
pub fn new(
|
||||
main_data_root: usize,
|
||||
internal_state_root: usize,
|
||||
internal_state_index_root: usize,
|
||||
) -> Self {
|
||||
Self {
|
||||
circuit: DbspCircuit::new(main_data_root, internal_state_root),
|
||||
circuit: DbspCircuit::new(
|
||||
main_data_root,
|
||||
internal_state_root,
|
||||
internal_state_index_root,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -781,9 +939,9 @@ impl DbspCompiler {
|
||||
use crate::function::AggFunc;
|
||||
use crate::incremental::operator::AggregateFunction;
|
||||
|
||||
let agg_fn = match fun {
|
||||
match fun {
|
||||
AggFunc::Count | AggFunc::Count0 => {
|
||||
AggregateFunction::Count
|
||||
aggregate_functions.push(AggregateFunction::Count);
|
||||
}
|
||||
AggFunc::Sum => {
|
||||
if args.is_empty() {
|
||||
@@ -791,7 +949,7 @@ impl DbspCompiler {
|
||||
}
|
||||
// Extract column name from the argument
|
||||
if let LogicalExpr::Column(col) = &args[0] {
|
||||
AggregateFunction::Sum(col.name.clone())
|
||||
aggregate_functions.push(AggregateFunction::Sum(col.name.clone()));
|
||||
} else {
|
||||
return Err(LimboError::ParseError(
|
||||
"Only column references are supported in aggregate functions for incremental views".to_string()
|
||||
@@ -803,36 +961,43 @@ impl DbspCompiler {
|
||||
return Err(LimboError::ParseError("AVG requires an argument".to_string()));
|
||||
}
|
||||
if let LogicalExpr::Column(col) = &args[0] {
|
||||
AggregateFunction::Avg(col.name.clone())
|
||||
aggregate_functions.push(AggregateFunction::Avg(col.name.clone()));
|
||||
} else {
|
||||
return Err(LimboError::ParseError(
|
||||
"Only column references are supported in aggregate functions for incremental views".to_string()
|
||||
));
|
||||
}
|
||||
}
|
||||
// MIN and MAX are not supported in incremental views due to storage overhead.
|
||||
// To correctly handle deletions, these operators would need to track all values
|
||||
// in each group, resulting in O(n) storage overhead. This is prohibitive for
|
||||
// large datasets. Alternative approaches like maintaining sorted indexes still
|
||||
// require O(n) storage. Until a more efficient solution is found, MIN/MAX
|
||||
// aggregations are not supported in materialized views.
|
||||
AggFunc::Min => {
|
||||
return Err(LimboError::ParseError(
|
||||
"MIN aggregation is not supported in incremental materialized views due to O(n) storage overhead required for handling deletions".to_string()
|
||||
));
|
||||
if args.is_empty() {
|
||||
return Err(LimboError::ParseError("MIN requires an argument".to_string()));
|
||||
}
|
||||
if let LogicalExpr::Column(col) = &args[0] {
|
||||
aggregate_functions.push(AggregateFunction::Min(col.name.clone()));
|
||||
} else {
|
||||
return Err(LimboError::ParseError(
|
||||
"Only column references are supported in MIN for incremental views".to_string()
|
||||
));
|
||||
}
|
||||
}
|
||||
AggFunc::Max => {
|
||||
return Err(LimboError::ParseError(
|
||||
"MAX aggregation is not supported in incremental materialized views due to O(n) storage overhead required for handling deletions".to_string()
|
||||
));
|
||||
if args.is_empty() {
|
||||
return Err(LimboError::ParseError("MAX requires an argument".to_string()));
|
||||
}
|
||||
if let LogicalExpr::Column(col) = &args[0] {
|
||||
aggregate_functions.push(AggregateFunction::Max(col.name.clone()));
|
||||
} else {
|
||||
return Err(LimboError::ParseError(
|
||||
"Only column references are supported in MAX for incremental views".to_string()
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(LimboError::ParseError(
|
||||
format!("Unsupported aggregate function in DBSP compiler: {fun:?}")
|
||||
));
|
||||
}
|
||||
};
|
||||
aggregate_functions.push(agg_fn);
|
||||
}
|
||||
} else {
|
||||
return Err(LimboError::ParseError(
|
||||
"Expected aggregate function in aggregate expressions".to_string()
|
||||
@@ -840,19 +1005,17 @@ impl DbspCompiler {
|
||||
}
|
||||
}
|
||||
|
||||
// Create the AggregateOperator with a unique operator_id
|
||||
// Use the next_node_id as the operator_id to ensure uniqueness
|
||||
let operator_id = self.circuit.next_id;
|
||||
|
||||
use crate::incremental::operator::AggregateOperator;
|
||||
let executable: Box<dyn IncrementalOperator> = Box::new(AggregateOperator::new(
|
||||
operator_id, // Use next_node_id as operator_id
|
||||
group_by_columns,
|
||||
operator_id,
|
||||
group_by_columns.clone(),
|
||||
aggregate_functions.clone(),
|
||||
input_column_names,
|
||||
input_column_names.clone(),
|
||||
));
|
||||
|
||||
// Create aggregate node
|
||||
let node_id = self.circuit.add_node(
|
||||
let result_node_id = self.circuit.add_node(
|
||||
DbspOperator::Aggregate {
|
||||
group_exprs: dbsp_group_exprs,
|
||||
aggr_exprs: aggregate_functions,
|
||||
@@ -861,7 +1024,8 @@ impl DbspCompiler {
|
||||
vec![input_id],
|
||||
executable,
|
||||
);
|
||||
Ok(node_id)
|
||||
|
||||
Ok(result_node_id)
|
||||
}
|
||||
LogicalPlan::TableScan(scan) => {
|
||||
// Create input node with InputOperator for uniform handling
|
||||
@@ -1252,7 +1416,7 @@ mod tests {
|
||||
}};
|
||||
}
|
||||
|
||||
fn setup_btree_for_circuit() -> (Rc<Pager>, usize, usize) {
|
||||
fn setup_btree_for_circuit() -> (Rc<Pager>, usize, usize, usize) {
|
||||
let io: Arc<dyn IO> = Arc::new(MemoryIO::new());
|
||||
let db = Database::open_file(io.clone(), ":memory:", false, false).unwrap();
|
||||
let conn = db.connect().unwrap();
|
||||
@@ -1270,13 +1434,24 @@ mod tests {
|
||||
.block(|| pager.btree_create(&CreateBTreeFlags::new_table()))
|
||||
.unwrap() as usize;
|
||||
|
||||
(pager, main_root_page, dbsp_state_page)
|
||||
let dbsp_state_index_page = pager
|
||||
.io
|
||||
.block(|| pager.btree_create(&CreateBTreeFlags::new_index()))
|
||||
.unwrap() as usize;
|
||||
|
||||
(
|
||||
pager,
|
||||
main_root_page,
|
||||
dbsp_state_page,
|
||||
dbsp_state_index_page,
|
||||
)
|
||||
}
|
||||
|
||||
// Macro to compile SQL to DBSP circuit
|
||||
macro_rules! compile_sql {
|
||||
($sql:expr) => {{
|
||||
let (pager, main_root_page, dbsp_state_page) = setup_btree_for_circuit();
|
||||
let (pager, main_root_page, dbsp_state_page, dbsp_state_index_page) =
|
||||
setup_btree_for_circuit();
|
||||
let schema = test_schema!();
|
||||
let mut parser = Parser::new($sql.as_bytes());
|
||||
let cmd = parser
|
||||
@@ -1289,7 +1464,7 @@ mod tests {
|
||||
let mut builder = LogicalPlanBuilder::new(&schema);
|
||||
let logical_plan = builder.build_statement(&stmt).unwrap();
|
||||
(
|
||||
DbspCompiler::new(main_root_page, dbsp_state_page)
|
||||
DbspCompiler::new(main_root_page, dbsp_state_page, dbsp_state_index_page)
|
||||
.compile(&logical_plan)
|
||||
.unwrap(),
|
||||
pager,
|
||||
@@ -3162,10 +3337,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_circuit_rowid_update_consolidation() {
|
||||
let (pager, p1, p2) = setup_btree_for_circuit();
|
||||
let (pager, p1, p2, p3) = setup_btree_for_circuit();
|
||||
|
||||
// Test that circuit properly consolidates state when rowid changes
|
||||
let mut circuit = DbspCircuit::new(p1, p2);
|
||||
let mut circuit = DbspCircuit::new(p1, p2, p3);
|
||||
|
||||
// Create a simple filter node
|
||||
let schema = Arc::new(LogicalSchema::new(vec![
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,12 @@
|
||||
use crate::incremental::operator::{AggregateFunction, AggregateState};
|
||||
use crate::incremental::dbsp::HashableRow;
|
||||
use crate::incremental::operator::{
|
||||
generate_storage_id, AggColumnInfo, AggregateFunction, AggregateOperator, AggregateState,
|
||||
DbspStateCursors, MinMaxDeltas, AGG_TYPE_MINMAX,
|
||||
};
|
||||
use crate::storage::btree::{BTreeCursor, BTreeKey};
|
||||
use crate::types::{IOResult, SeekKey, SeekOp, SeekResult};
|
||||
use crate::{return_if_io, Result, Value};
|
||||
use crate::types::{IOResult, ImmutableRecord, RefValue, SeekKey, SeekOp, SeekResult};
|
||||
use crate::{return_if_io, LimboError, Result, Value};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub enum ReadRecord {
|
||||
@@ -32,21 +37,22 @@ impl ReadRecord {
|
||||
} else {
|
||||
let record = return_if_io!(cursor.record());
|
||||
let r = record.ok_or_else(|| {
|
||||
crate::LimboError::InternalError(format!(
|
||||
LimboError::InternalError(format!(
|
||||
"Found key {key:?} in aggregate storage but could not read record"
|
||||
))
|
||||
})?;
|
||||
let values = r.get_values();
|
||||
let blob = values[1].to_owned();
|
||||
// The blob is in column 3: operator_id, zset_id, element_id, value, weight
|
||||
let blob = values[3].to_owned();
|
||||
|
||||
let (state, _group_key) = match blob {
|
||||
Value::Blob(blob) => AggregateState::from_blob(&blob, aggregates)
|
||||
.ok_or_else(|| {
|
||||
crate::LimboError::InternalError(format!(
|
||||
LimboError::InternalError(format!(
|
||||
"Cannot deserialize aggregate state {blob:?}",
|
||||
))
|
||||
}),
|
||||
_ => Err(crate::LimboError::ParseError(
|
||||
_ => Err(LimboError::ParseError(
|
||||
"Value in aggregator not blob".to_string(),
|
||||
)),
|
||||
}?;
|
||||
@@ -63,8 +69,22 @@ impl ReadRecord {
|
||||
pub enum WriteRow {
|
||||
#[default]
|
||||
GetRecord,
|
||||
Delete,
|
||||
Insert {
|
||||
Delete {
|
||||
rowid: i64,
|
||||
},
|
||||
DeleteIndex,
|
||||
ComputeNewRowId {
|
||||
final_weight: isize,
|
||||
},
|
||||
InsertNew {
|
||||
rowid: i64,
|
||||
final_weight: isize,
|
||||
},
|
||||
InsertIndex {
|
||||
rowid: i64,
|
||||
},
|
||||
UpdateExisting {
|
||||
rowid: i64,
|
||||
final_weight: isize,
|
||||
},
|
||||
Done,
|
||||
@@ -75,97 +95,193 @@ impl WriteRow {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Write a row with weight management.
|
||||
/// Write a row with weight management using index for lookups.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cursor` - BTree cursor for the storage
|
||||
/// * `key` - The key to seek (TableRowId)
|
||||
/// * `build_record` - Function that builds the record values to insert.
|
||||
/// Takes the final_weight and returns the complete record values.
|
||||
/// * `cursors` - DBSP state cursors (table and index)
|
||||
/// * `index_key` - The key to seek in the index
|
||||
/// * `record_values` - The record values (without weight) to insert
|
||||
/// * `weight` - The weight delta to apply
|
||||
pub fn write_row<F>(
|
||||
pub fn write_row(
|
||||
&mut self,
|
||||
cursor: &mut BTreeCursor,
|
||||
key: SeekKey,
|
||||
build_record: F,
|
||||
cursors: &mut DbspStateCursors,
|
||||
index_key: Vec<Value>,
|
||||
record_values: Vec<Value>,
|
||||
weight: isize,
|
||||
) -> Result<IOResult<()>>
|
||||
where
|
||||
F: Fn(isize) -> Vec<Value>,
|
||||
{
|
||||
) -> Result<IOResult<()>> {
|
||||
loop {
|
||||
match self {
|
||||
WriteRow::GetRecord => {
|
||||
let res = return_if_io!(cursor.seek(key.clone(), SeekOp::GE { eq_only: true }));
|
||||
// First, seek in the index to find if the row exists
|
||||
let index_values = index_key.clone();
|
||||
let index_record =
|
||||
ImmutableRecord::from_values(&index_values, index_values.len());
|
||||
|
||||
let res = return_if_io!(cursors.index_cursor.seek(
|
||||
SeekKey::IndexKey(&index_record),
|
||||
SeekOp::GE { eq_only: true }
|
||||
));
|
||||
|
||||
if !matches!(res, SeekResult::Found) {
|
||||
*self = WriteRow::Insert {
|
||||
// Row doesn't exist, we'll insert a new one
|
||||
*self = WriteRow::ComputeNewRowId {
|
||||
final_weight: weight,
|
||||
};
|
||||
} else {
|
||||
let existing_record = return_if_io!(cursor.record());
|
||||
// Found in index, get the rowid it points to
|
||||
let rowid = return_if_io!(cursors.index_cursor.rowid());
|
||||
let rowid = rowid.ok_or_else(|| {
|
||||
LimboError::InternalError(
|
||||
"Index cursor does not have a valid rowid".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
// Now seek in the table using the rowid
|
||||
let table_res = return_if_io!(cursors
|
||||
.table_cursor
|
||||
.seek(SeekKey::TableRowId(rowid), SeekOp::GE { eq_only: true }));
|
||||
|
||||
if !matches!(table_res, SeekResult::Found) {
|
||||
return Err(LimboError::InternalError(
|
||||
"Index points to non-existent table row".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let existing_record = return_if_io!(cursors.table_cursor.record());
|
||||
let r = existing_record.ok_or_else(|| {
|
||||
crate::LimboError::InternalError(format!(
|
||||
"Found key {key:?} in storage but could not read record"
|
||||
))
|
||||
LimboError::InternalError(
|
||||
"Found rowid in table but could not read record".to_string(),
|
||||
)
|
||||
})?;
|
||||
let values = r.get_values();
|
||||
|
||||
// Weight is always the last value
|
||||
let existing_weight = match values.last() {
|
||||
// Weight is always the last value (column 4 in our 5-column structure)
|
||||
let existing_weight = match values.get(4) {
|
||||
Some(val) => match val.to_owned() {
|
||||
Value::Integer(w) => w as isize,
|
||||
_ => {
|
||||
return Err(crate::LimboError::InternalError(format!(
|
||||
"Invalid weight value in storage for key {key:?}"
|
||||
)))
|
||||
return Err(LimboError::InternalError(
|
||||
"Invalid weight value in storage".to_string(),
|
||||
))
|
||||
}
|
||||
},
|
||||
None => {
|
||||
return Err(crate::LimboError::InternalError(format!(
|
||||
"No weight value found in storage for key {key:?}"
|
||||
)))
|
||||
return Err(LimboError::InternalError(
|
||||
"No weight value found in storage".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let final_weight = existing_weight + weight;
|
||||
if final_weight <= 0 {
|
||||
*self = WriteRow::Delete
|
||||
// Store index_key for later deletion of index entry
|
||||
*self = WriteRow::Delete { rowid }
|
||||
} else {
|
||||
*self = WriteRow::Insert { final_weight }
|
||||
// Store the rowid for update
|
||||
*self = WriteRow::UpdateExisting {
|
||||
rowid,
|
||||
final_weight,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
WriteRow::Delete => {
|
||||
WriteRow::Delete { rowid } => {
|
||||
// Seek to the row and delete it
|
||||
return_if_io!(cursors
|
||||
.table_cursor
|
||||
.seek(SeekKey::TableRowId(*rowid), SeekOp::GE { eq_only: true }));
|
||||
|
||||
// Transition to DeleteIndex to also delete the index entry
|
||||
*self = WriteRow::DeleteIndex;
|
||||
return_if_io!(cursors.table_cursor.delete());
|
||||
}
|
||||
WriteRow::DeleteIndex => {
|
||||
// Mark as Done before delete to avoid retry on I/O
|
||||
*self = WriteRow::Done;
|
||||
return_if_io!(cursor.delete());
|
||||
return_if_io!(cursors.index_cursor.delete());
|
||||
}
|
||||
WriteRow::Insert { final_weight } => {
|
||||
return_if_io!(cursor.seek(key.clone(), SeekOp::GE { eq_only: true }));
|
||||
|
||||
// Extract the row ID from the key
|
||||
let key_i64 = match key {
|
||||
SeekKey::TableRowId(id) => id,
|
||||
_ => {
|
||||
return Err(crate::LimboError::InternalError(
|
||||
"Expected TableRowId for storage".to_string(),
|
||||
))
|
||||
WriteRow::ComputeNewRowId { final_weight } => {
|
||||
// Find the last rowid to compute the next one
|
||||
return_if_io!(cursors.table_cursor.last());
|
||||
let rowid = if cursors.table_cursor.is_empty() {
|
||||
1
|
||||
} else {
|
||||
match return_if_io!(cursors.table_cursor.rowid()) {
|
||||
Some(id) => id + 1,
|
||||
None => {
|
||||
return Err(LimboError::InternalError(
|
||||
"Table cursor has rows but no valid rowid".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Build the record values using the provided function
|
||||
let record_values = build_record(*final_weight);
|
||||
// Transition to InsertNew with the computed rowid
|
||||
*self = WriteRow::InsertNew {
|
||||
rowid,
|
||||
final_weight: *final_weight,
|
||||
};
|
||||
}
|
||||
WriteRow::InsertNew {
|
||||
rowid,
|
||||
final_weight,
|
||||
} => {
|
||||
let rowid_val = *rowid;
|
||||
let final_weight_val = *final_weight;
|
||||
|
||||
// Seek to where we want to insert
|
||||
// The insert will position the cursor correctly
|
||||
return_if_io!(cursors.table_cursor.seek(
|
||||
SeekKey::TableRowId(rowid_val),
|
||||
SeekOp::GE { eq_only: false }
|
||||
));
|
||||
|
||||
// Build the complete record with weight
|
||||
// Use the function parameter record_values directly
|
||||
let mut complete_record = record_values.clone();
|
||||
complete_record.push(Value::Integer(final_weight_val as i64));
|
||||
|
||||
// Create an ImmutableRecord from the values
|
||||
let immutable_record = crate::types::ImmutableRecord::from_values(
|
||||
&record_values,
|
||||
record_values.len(),
|
||||
);
|
||||
let btree_key = BTreeKey::new_table_rowid(key_i64, Some(&immutable_record));
|
||||
let immutable_record =
|
||||
ImmutableRecord::from_values(&complete_record, complete_record.len());
|
||||
let btree_key = BTreeKey::new_table_rowid(rowid_val, Some(&immutable_record));
|
||||
|
||||
// Transition to InsertIndex state after table insertion
|
||||
*self = WriteRow::InsertIndex { rowid: rowid_val };
|
||||
return_if_io!(cursors.table_cursor.insert(&btree_key));
|
||||
}
|
||||
WriteRow::InsertIndex { rowid } => {
|
||||
// For has_rowid indexes, we need to append the rowid to the index key
|
||||
// Use the function parameter index_key directly
|
||||
let mut index_values = index_key.clone();
|
||||
index_values.push(Value::Integer(*rowid));
|
||||
|
||||
// Create the index record with the rowid appended
|
||||
let index_record =
|
||||
ImmutableRecord::from_values(&index_values, index_values.len());
|
||||
let index_btree_key = BTreeKey::new_index_key(&index_record);
|
||||
|
||||
// Mark as Done before index insert to avoid retry on I/O
|
||||
*self = WriteRow::Done;
|
||||
return_if_io!(cursors.index_cursor.insert(&index_btree_key));
|
||||
}
|
||||
WriteRow::UpdateExisting {
|
||||
rowid,
|
||||
final_weight,
|
||||
} => {
|
||||
// Build the complete record with weight
|
||||
let mut complete_record = record_values.clone();
|
||||
complete_record.push(Value::Integer(*final_weight as i64));
|
||||
|
||||
// Create an ImmutableRecord from the values
|
||||
let immutable_record =
|
||||
ImmutableRecord::from_values(&complete_record, complete_record.len());
|
||||
let btree_key = BTreeKey::new_table_rowid(*rowid, Some(&immutable_record));
|
||||
|
||||
// Mark as Done before insert to avoid retry on I/O
|
||||
*self = WriteRow::Done;
|
||||
return_if_io!(cursor.insert(&btree_key));
|
||||
// BTree insert with existing key will replace the old value
|
||||
return_if_io!(cursors.table_cursor.insert(&btree_key));
|
||||
}
|
||||
WriteRow::Done => {
|
||||
return Ok(IOResult::Done(()));
|
||||
@@ -174,3 +290,672 @@ impl WriteRow {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// State machine for recomputing MIN/MAX values after deletion
|
||||
#[derive(Debug)]
|
||||
pub enum RecomputeMinMax {
|
||||
ProcessElements {
|
||||
/// Current column being processed
|
||||
current_column_idx: usize,
|
||||
/// Columns to process (combined MIN and MAX)
|
||||
columns_to_process: Vec<(String, String, bool)>, // (group_key, column_name, is_min)
|
||||
/// MIN/MAX deltas for checking values and weights
|
||||
min_max_deltas: MinMaxDeltas,
|
||||
},
|
||||
Scan {
|
||||
/// Columns still to process
|
||||
columns_to_process: Vec<(String, String, bool)>,
|
||||
/// Current index in columns_to_process (will resume from here)
|
||||
current_column_idx: usize,
|
||||
/// MIN/MAX deltas for checking values and weights
|
||||
min_max_deltas: MinMaxDeltas,
|
||||
/// Current group key being processed
|
||||
group_key: String,
|
||||
/// Current column name being processed
|
||||
column_name: String,
|
||||
/// Whether we're looking for MIN (true) or MAX (false)
|
||||
is_min: bool,
|
||||
/// The scan state machine for finding the new MIN/MAX
|
||||
scan_state: Box<ScanState>,
|
||||
},
|
||||
Done,
|
||||
}
|
||||
|
||||
impl RecomputeMinMax {
|
||||
pub fn new(
|
||||
min_max_deltas: MinMaxDeltas,
|
||||
existing_groups: &HashMap<String, AggregateState>,
|
||||
operator: &AggregateOperator,
|
||||
) -> Self {
|
||||
let mut groups_to_check: HashSet<(String, String, bool)> = HashSet::new();
|
||||
|
||||
// Remember the min_max_deltas are essentially just the only column that is affected by
|
||||
// this min/max, in delta (actually ZSet - consolidated delta) format. This makes it easier
|
||||
// for us to consume it in here.
|
||||
//
|
||||
// The most challenging case is the case where there is a retraction, since we need to go
|
||||
// back to the index.
|
||||
for (group_key_str, values) in &min_max_deltas {
|
||||
for ((col_name, hashable_row), weight) in values {
|
||||
let col_info = operator.column_min_max.get(col_name);
|
||||
|
||||
let value = &hashable_row.values[0];
|
||||
|
||||
if *weight < 0 {
|
||||
// Deletion detected - check if it's the current MIN/MAX
|
||||
if let Some(state) = existing_groups.get(group_key_str) {
|
||||
// Check for MIN
|
||||
if let Some(current_min) = state.mins.get(col_name) {
|
||||
if current_min == value {
|
||||
groups_to_check.insert((
|
||||
group_key_str.clone(),
|
||||
col_name.clone(),
|
||||
true,
|
||||
));
|
||||
}
|
||||
}
|
||||
// Check for MAX
|
||||
if let Some(current_max) = state.maxs.get(col_name) {
|
||||
if current_max == value {
|
||||
groups_to_check.insert((
|
||||
group_key_str.clone(),
|
||||
col_name.clone(),
|
||||
false,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if *weight > 0 {
|
||||
// If it is not found in the existing groups, then we only need to care
|
||||
// about this if this is a new record being inserted
|
||||
if let Some(info) = col_info {
|
||||
if info.has_min {
|
||||
groups_to_check.insert((group_key_str.clone(), col_name.clone(), true));
|
||||
}
|
||||
if info.has_max {
|
||||
groups_to_check.insert((
|
||||
group_key_str.clone(),
|
||||
col_name.clone(),
|
||||
false,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if groups_to_check.is_empty() {
|
||||
// No recomputation or initialization needed
|
||||
Self::Done
|
||||
} else {
|
||||
// Convert HashSet to Vec for indexed processing
|
||||
let groups_to_check_vec: Vec<_> = groups_to_check.into_iter().collect();
|
||||
Self::ProcessElements {
|
||||
current_column_idx: 0,
|
||||
columns_to_process: groups_to_check_vec,
|
||||
min_max_deltas,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process(
|
||||
&mut self,
|
||||
existing_groups: &mut HashMap<String, AggregateState>,
|
||||
operator: &AggregateOperator,
|
||||
cursors: &mut DbspStateCursors,
|
||||
) -> Result<IOResult<()>> {
|
||||
loop {
|
||||
match self {
|
||||
RecomputeMinMax::ProcessElements {
|
||||
current_column_idx,
|
||||
columns_to_process,
|
||||
min_max_deltas,
|
||||
} => {
|
||||
if *current_column_idx >= columns_to_process.len() {
|
||||
*self = RecomputeMinMax::Done;
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
|
||||
let (group_key, column_name, is_min) =
|
||||
columns_to_process[*current_column_idx].clone();
|
||||
|
||||
// Get column index from pre-computed info
|
||||
let column_index = operator
|
||||
.column_min_max
|
||||
.get(&column_name)
|
||||
.map(|info| info.index)
|
||||
.unwrap(); // Should always exist since we're processing known columns
|
||||
|
||||
// Get current value from existing state
|
||||
let current_value = existing_groups.get(&group_key).and_then(|state| {
|
||||
if is_min {
|
||||
state.mins.get(&column_name).cloned()
|
||||
} else {
|
||||
state.maxs.get(&column_name).cloned()
|
||||
}
|
||||
});
|
||||
|
||||
// Create storage keys for index lookup
|
||||
let storage_id =
|
||||
generate_storage_id(operator.operator_id, column_index, AGG_TYPE_MINMAX);
|
||||
let zset_id = operator.generate_group_rowid(&group_key);
|
||||
|
||||
// Get the values for this group from min_max_deltas
|
||||
let group_values = min_max_deltas.get(&group_key).cloned().unwrap_or_default();
|
||||
|
||||
let columns_to_process = std::mem::take(columns_to_process);
|
||||
let min_max_deltas = std::mem::take(min_max_deltas);
|
||||
|
||||
let scan_state = if is_min {
|
||||
Box::new(ScanState::new_for_min(
|
||||
current_value,
|
||||
group_key.clone(),
|
||||
column_name.clone(),
|
||||
storage_id,
|
||||
zset_id,
|
||||
group_values,
|
||||
))
|
||||
} else {
|
||||
Box::new(ScanState::new_for_max(
|
||||
current_value,
|
||||
group_key.clone(),
|
||||
column_name.clone(),
|
||||
storage_id,
|
||||
zset_id,
|
||||
group_values,
|
||||
))
|
||||
};
|
||||
|
||||
*self = RecomputeMinMax::Scan {
|
||||
columns_to_process,
|
||||
current_column_idx: *current_column_idx,
|
||||
min_max_deltas,
|
||||
group_key,
|
||||
column_name,
|
||||
is_min,
|
||||
scan_state,
|
||||
};
|
||||
}
|
||||
RecomputeMinMax::Scan {
|
||||
columns_to_process,
|
||||
current_column_idx,
|
||||
min_max_deltas,
|
||||
group_key,
|
||||
column_name,
|
||||
is_min,
|
||||
scan_state,
|
||||
} => {
|
||||
// Find new value using the scan state machine
|
||||
let new_value = return_if_io!(scan_state.find_new_value(cursors));
|
||||
|
||||
// Update the state with new value (create if doesn't exist)
|
||||
let state = existing_groups.entry(group_key.clone()).or_default();
|
||||
|
||||
if *is_min {
|
||||
if let Some(min_val) = new_value {
|
||||
state.mins.insert(column_name.clone(), min_val);
|
||||
} else {
|
||||
state.mins.remove(column_name);
|
||||
}
|
||||
} else if let Some(max_val) = new_value {
|
||||
state.maxs.insert(column_name.clone(), max_val);
|
||||
} else {
|
||||
state.maxs.remove(column_name);
|
||||
}
|
||||
|
||||
// Move to next column
|
||||
let min_max_deltas = std::mem::take(min_max_deltas);
|
||||
let columns_to_process = std::mem::take(columns_to_process);
|
||||
*self = RecomputeMinMax::ProcessElements {
|
||||
current_column_idx: *current_column_idx + 1,
|
||||
columns_to_process,
|
||||
min_max_deltas,
|
||||
};
|
||||
}
|
||||
RecomputeMinMax::Done => {
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// State machine for scanning through the index to find new MIN/MAX values
|
||||
#[derive(Debug)]
|
||||
pub enum ScanState {
|
||||
CheckCandidate {
|
||||
/// Current candidate value for MIN/MAX
|
||||
candidate: Option<Value>,
|
||||
/// Group key being processed
|
||||
group_key: String,
|
||||
/// Column name being processed
|
||||
column_name: String,
|
||||
/// Storage ID for the index seek
|
||||
storage_id: i64,
|
||||
/// ZSet ID for the group
|
||||
zset_id: i64,
|
||||
/// Group values from MinMaxDeltas: (column_name, HashableRow) -> weight
|
||||
group_values: HashMap<(String, HashableRow), isize>,
|
||||
/// Whether we're looking for MIN (true) or MAX (false)
|
||||
is_min: bool,
|
||||
},
|
||||
FetchNextCandidate {
|
||||
/// Current candidate to seek past
|
||||
current_candidate: Value,
|
||||
/// Group key being processed
|
||||
group_key: String,
|
||||
/// Column name being processed
|
||||
column_name: String,
|
||||
/// Storage ID for the index seek
|
||||
storage_id: i64,
|
||||
/// ZSet ID for the group
|
||||
zset_id: i64,
|
||||
/// Group values from MinMaxDeltas: (column_name, HashableRow) -> weight
|
||||
group_values: HashMap<(String, HashableRow), isize>,
|
||||
/// Whether we're looking for MIN (true) or MAX (false)
|
||||
is_min: bool,
|
||||
},
|
||||
Done {
|
||||
/// The final MIN/MAX value found
|
||||
result: Option<Value>,
|
||||
},
|
||||
}
|
||||
|
||||
impl ScanState {
|
||||
pub fn new_for_min(
|
||||
current_min: Option<Value>,
|
||||
group_key: String,
|
||||
column_name: String,
|
||||
storage_id: i64,
|
||||
zset_id: i64,
|
||||
group_values: HashMap<(String, HashableRow), isize>,
|
||||
) -> Self {
|
||||
Self::CheckCandidate {
|
||||
candidate: current_min,
|
||||
group_key,
|
||||
column_name,
|
||||
storage_id,
|
||||
zset_id,
|
||||
group_values,
|
||||
is_min: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Extract a new candidate from the index. It is possible that, when searching,
|
||||
// we end up going into a different operator altogether. That means we have
|
||||
// exhausted this operator (or group) entirely, and no good candidate was found
|
||||
fn extract_new_candidate(
|
||||
cursors: &mut DbspStateCursors,
|
||||
index_record: &ImmutableRecord,
|
||||
seek_op: SeekOp,
|
||||
storage_id: i64,
|
||||
zset_id: i64,
|
||||
) -> Result<IOResult<Option<Value>>> {
|
||||
let seek_result = return_if_io!(cursors
|
||||
.index_cursor
|
||||
.seek(SeekKey::IndexKey(index_record), seek_op));
|
||||
if !matches!(seek_result, SeekResult::Found) {
|
||||
return Ok(IOResult::Done(None));
|
||||
}
|
||||
|
||||
let record = return_if_io!(cursors.index_cursor.record()).ok_or_else(|| {
|
||||
LimboError::InternalError(
|
||||
"Record found on the cursor, but could not be read".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
let values = record.get_values();
|
||||
if values.len() < 3 {
|
||||
return Ok(IOResult::Done(None));
|
||||
}
|
||||
|
||||
let Some(rec_storage_id) = values.first() else {
|
||||
return Ok(IOResult::Done(None));
|
||||
};
|
||||
let Some(rec_zset_id) = values.get(1) else {
|
||||
return Ok(IOResult::Done(None));
|
||||
};
|
||||
|
||||
// Check if we're still in the same group
|
||||
if let (RefValue::Integer(rec_sid), RefValue::Integer(rec_zid)) =
|
||||
(rec_storage_id, rec_zset_id)
|
||||
{
|
||||
if *rec_sid != storage_id || *rec_zid != zset_id {
|
||||
return Ok(IOResult::Done(None));
|
||||
}
|
||||
} else {
|
||||
return Ok(IOResult::Done(None));
|
||||
}
|
||||
|
||||
// Get the value (3rd element)
|
||||
Ok(IOResult::Done(values.get(2).map(|v| v.to_owned())))
|
||||
}
|
||||
|
||||
pub fn new_for_max(
|
||||
current_max: Option<Value>,
|
||||
group_key: String,
|
||||
column_name: String,
|
||||
storage_id: i64,
|
||||
zset_id: i64,
|
||||
group_values: HashMap<(String, HashableRow), isize>,
|
||||
) -> Self {
|
||||
Self::CheckCandidate {
|
||||
candidate: current_max,
|
||||
group_key,
|
||||
column_name,
|
||||
storage_id,
|
||||
zset_id,
|
||||
group_values,
|
||||
is_min: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find_new_value(
|
||||
&mut self,
|
||||
cursors: &mut DbspStateCursors,
|
||||
) -> Result<IOResult<Option<Value>>> {
|
||||
loop {
|
||||
match self {
|
||||
ScanState::CheckCandidate {
|
||||
candidate,
|
||||
group_key,
|
||||
column_name,
|
||||
storage_id,
|
||||
zset_id,
|
||||
group_values,
|
||||
is_min,
|
||||
} => {
|
||||
// First, check if we have a candidate
|
||||
if let Some(cand_val) = candidate {
|
||||
// Check if the candidate is retracted (weight <= 0)
|
||||
// Create a HashableRow to look up the weight
|
||||
let hashable_cand = HashableRow::new(0, vec![cand_val.clone()]);
|
||||
let key = (column_name.clone(), hashable_cand);
|
||||
let is_retracted =
|
||||
group_values.get(&key).is_some_and(|weight| *weight <= 0);
|
||||
|
||||
if is_retracted {
|
||||
// Candidate is retracted, need to fetch next from index
|
||||
*self = ScanState::FetchNextCandidate {
|
||||
current_candidate: cand_val.clone(),
|
||||
group_key: std::mem::take(group_key),
|
||||
column_name: std::mem::take(column_name),
|
||||
storage_id: *storage_id,
|
||||
zset_id: *zset_id,
|
||||
group_values: std::mem::take(group_values),
|
||||
is_min: *is_min,
|
||||
};
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Candidate is valid or we have no candidate
|
||||
// Now find the best value from insertions in group_values
|
||||
let mut best_from_zset = None;
|
||||
for ((col, hashable_val), weight) in group_values.iter() {
|
||||
if col == column_name && *weight > 0 {
|
||||
let value = &hashable_val.values[0];
|
||||
// Skip NULL values - they don't participate in MIN/MAX
|
||||
if value == &Value::Null {
|
||||
continue;
|
||||
}
|
||||
// This is an insertion for our column
|
||||
if let Some(ref current_best) = best_from_zset {
|
||||
if *is_min {
|
||||
if value.cmp(current_best) == std::cmp::Ordering::Less {
|
||||
best_from_zset = Some(value.clone());
|
||||
}
|
||||
} else if value.cmp(current_best) == std::cmp::Ordering::Greater {
|
||||
best_from_zset = Some(value.clone());
|
||||
}
|
||||
} else {
|
||||
best_from_zset = Some(value.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compare candidate with best from ZSet, filtering out NULLs
|
||||
let result = match (&candidate, &best_from_zset) {
|
||||
(Some(cand), Some(zset_val)) if cand != &Value::Null => {
|
||||
if *is_min {
|
||||
if zset_val.cmp(cand) == std::cmp::Ordering::Less {
|
||||
Some(zset_val.clone())
|
||||
} else {
|
||||
Some(cand.clone())
|
||||
}
|
||||
} else if zset_val.cmp(cand) == std::cmp::Ordering::Greater {
|
||||
Some(zset_val.clone())
|
||||
} else {
|
||||
Some(cand.clone())
|
||||
}
|
||||
}
|
||||
(Some(cand), None) if cand != &Value::Null => Some(cand.clone()),
|
||||
(None, Some(zset_val)) => Some(zset_val.clone()),
|
||||
(Some(cand), Some(_)) if cand == &Value::Null => best_from_zset,
|
||||
_ => None,
|
||||
};
|
||||
|
||||
*self = ScanState::Done { result };
|
||||
}
|
||||
|
||||
ScanState::FetchNextCandidate {
|
||||
current_candidate,
|
||||
group_key,
|
||||
column_name,
|
||||
storage_id,
|
||||
zset_id,
|
||||
group_values,
|
||||
is_min,
|
||||
} => {
|
||||
// Seek to the next value in the index
|
||||
let index_key = vec![
|
||||
Value::Integer(*storage_id),
|
||||
Value::Integer(*zset_id),
|
||||
current_candidate.clone(),
|
||||
];
|
||||
let index_record = ImmutableRecord::from_values(&index_key, index_key.len());
|
||||
|
||||
let seek_op = if *is_min {
|
||||
SeekOp::GT // For MIN, seek greater than current
|
||||
} else {
|
||||
SeekOp::LT // For MAX, seek less than current
|
||||
};
|
||||
|
||||
let new_candidate = return_if_io!(Self::extract_new_candidate(
|
||||
cursors,
|
||||
&index_record,
|
||||
seek_op,
|
||||
*storage_id,
|
||||
*zset_id
|
||||
));
|
||||
|
||||
*self = ScanState::CheckCandidate {
|
||||
candidate: new_candidate,
|
||||
group_key: std::mem::take(group_key),
|
||||
column_name: std::mem::take(column_name),
|
||||
storage_id: *storage_id,
|
||||
zset_id: *zset_id,
|
||||
group_values: std::mem::take(group_values),
|
||||
is_min: *is_min,
|
||||
};
|
||||
}
|
||||
|
||||
ScanState::Done { result } => {
|
||||
return Ok(IOResult::Done(result.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// State machine for persisting Min/Max values to storage
|
||||
#[derive(Debug)]
|
||||
pub enum MinMaxPersistState {
|
||||
Init {
|
||||
min_max_deltas: MinMaxDeltas,
|
||||
group_keys: Vec<String>,
|
||||
},
|
||||
ProcessGroup {
|
||||
min_max_deltas: MinMaxDeltas,
|
||||
group_keys: Vec<String>,
|
||||
group_idx: usize,
|
||||
value_idx: usize,
|
||||
},
|
||||
WriteValue {
|
||||
min_max_deltas: MinMaxDeltas,
|
||||
group_keys: Vec<String>,
|
||||
group_idx: usize,
|
||||
value_idx: usize,
|
||||
value: Value,
|
||||
column_name: String,
|
||||
weight: isize,
|
||||
write_row: WriteRow,
|
||||
},
|
||||
Done,
|
||||
}
|
||||
|
||||
impl MinMaxPersistState {
|
||||
pub fn new(min_max_deltas: MinMaxDeltas) -> Self {
|
||||
let group_keys: Vec<String> = min_max_deltas.keys().cloned().collect();
|
||||
Self::Init {
|
||||
min_max_deltas,
|
||||
group_keys,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn persist_min_max(
|
||||
&mut self,
|
||||
operator_id: usize,
|
||||
column_min_max: &HashMap<String, AggColumnInfo>,
|
||||
cursors: &mut DbspStateCursors,
|
||||
generate_group_rowid: impl Fn(&str) -> i64,
|
||||
) -> Result<IOResult<()>> {
|
||||
loop {
|
||||
match self {
|
||||
MinMaxPersistState::Init {
|
||||
min_max_deltas,
|
||||
group_keys,
|
||||
} => {
|
||||
let min_max_deltas = std::mem::take(min_max_deltas);
|
||||
let group_keys = std::mem::take(group_keys);
|
||||
*self = MinMaxPersistState::ProcessGroup {
|
||||
min_max_deltas,
|
||||
group_keys,
|
||||
group_idx: 0,
|
||||
value_idx: 0,
|
||||
};
|
||||
}
|
||||
MinMaxPersistState::ProcessGroup {
|
||||
min_max_deltas,
|
||||
group_keys,
|
||||
group_idx,
|
||||
value_idx,
|
||||
} => {
|
||||
// Check if we're past all groups
|
||||
if *group_idx >= group_keys.len() {
|
||||
*self = MinMaxPersistState::Done;
|
||||
continue;
|
||||
}
|
||||
|
||||
let group_key_str = &group_keys[*group_idx];
|
||||
let values = &min_max_deltas[group_key_str]; // This should always exist
|
||||
|
||||
// Convert HashMap to Vec for indexed access
|
||||
let values_vec: Vec<_> = values.iter().collect();
|
||||
|
||||
// Check if we have more values in current group
|
||||
if *value_idx >= values_vec.len() {
|
||||
*group_idx += 1;
|
||||
*value_idx = 0;
|
||||
// Continue to check if we're past all groups now
|
||||
continue;
|
||||
}
|
||||
|
||||
// Process current value and extract what we need before taking ownership
|
||||
let ((column_name, hashable_row), weight) = values_vec[*value_idx];
|
||||
let column_name = column_name.clone();
|
||||
let value = hashable_row.values[0].clone(); // Extract the Value from HashableRow
|
||||
let weight = *weight;
|
||||
|
||||
let min_max_deltas = std::mem::take(min_max_deltas);
|
||||
let group_keys = std::mem::take(group_keys);
|
||||
*self = MinMaxPersistState::WriteValue {
|
||||
min_max_deltas,
|
||||
group_keys,
|
||||
group_idx: *group_idx,
|
||||
value_idx: *value_idx,
|
||||
column_name,
|
||||
value,
|
||||
weight,
|
||||
write_row: WriteRow::new(),
|
||||
};
|
||||
}
|
||||
MinMaxPersistState::WriteValue {
|
||||
min_max_deltas,
|
||||
group_keys,
|
||||
group_idx,
|
||||
value_idx,
|
||||
value,
|
||||
column_name,
|
||||
weight,
|
||||
write_row,
|
||||
} => {
|
||||
// Should have exited in the previous state
|
||||
assert!(*group_idx < group_keys.len());
|
||||
|
||||
let group_key_str = &group_keys[*group_idx];
|
||||
|
||||
// Get the column index from the pre-computed map
|
||||
let column_info = column_min_max
|
||||
.get(&*column_name)
|
||||
.expect("Column should exist in column_min_max map");
|
||||
let column_index = column_info.index;
|
||||
|
||||
// Build the key components for MinMax storage using new encoding
|
||||
let storage_id =
|
||||
generate_storage_id(operator_id, column_index, AGG_TYPE_MINMAX);
|
||||
let zset_id = generate_group_rowid(group_key_str);
|
||||
|
||||
// element_id is the actual value for Min/Max
|
||||
let element_id_val = value.clone();
|
||||
|
||||
// Create index key
|
||||
let index_key = vec![
|
||||
Value::Integer(storage_id),
|
||||
Value::Integer(zset_id),
|
||||
element_id_val.clone(),
|
||||
];
|
||||
|
||||
// Record values (operator_id, zset_id, element_id, unused_placeholder)
|
||||
// For MIN/MAX, the element_id IS the value, so we use NULL for the 4th column
|
||||
let record_values = vec![
|
||||
Value::Integer(storage_id),
|
||||
Value::Integer(zset_id),
|
||||
element_id_val.clone(),
|
||||
Value::Null, // Placeholder - not used for MIN/MAX
|
||||
];
|
||||
|
||||
return_if_io!(write_row.write_row(
|
||||
cursors,
|
||||
index_key.clone(),
|
||||
record_values,
|
||||
*weight
|
||||
));
|
||||
|
||||
// Move to next value
|
||||
let min_max_deltas = std::mem::take(min_max_deltas);
|
||||
let group_keys = std::mem::take(group_keys);
|
||||
*self = MinMaxPersistState::ProcessGroup {
|
||||
min_max_deltas,
|
||||
group_keys,
|
||||
group_idx: *group_idx,
|
||||
value_idx: *value_idx + 1,
|
||||
};
|
||||
}
|
||||
MinMaxPersistState::Done => {
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,6 +206,7 @@ impl IncrementalView {
|
||||
schema: &Schema,
|
||||
main_data_root: usize,
|
||||
internal_state_root: usize,
|
||||
internal_state_index_root: usize,
|
||||
) -> Result<DbspCircuit> {
|
||||
// Build the logical plan from the SELECT statement
|
||||
let mut builder = LogicalPlanBuilder::new(schema);
|
||||
@@ -214,7 +215,11 @@ impl IncrementalView {
|
||||
let logical_plan = builder.build_statement(&stmt)?;
|
||||
|
||||
// Compile the logical plan to a DBSP circuit with the storage roots
|
||||
let compiler = DbspCompiler::new(main_data_root, internal_state_root);
|
||||
let compiler = DbspCompiler::new(
|
||||
main_data_root,
|
||||
internal_state_root,
|
||||
internal_state_index_root,
|
||||
);
|
||||
let circuit = compiler.compile(&logical_plan)?;
|
||||
|
||||
Ok(circuit)
|
||||
@@ -271,6 +276,7 @@ impl IncrementalView {
|
||||
schema: &Schema,
|
||||
main_data_root: usize,
|
||||
internal_state_root: usize,
|
||||
internal_state_index_root: usize,
|
||||
) -> Result<Self> {
|
||||
let mut parser = Parser::new(sql.as_bytes());
|
||||
let cmd = parser.next_cmd()?;
|
||||
@@ -287,6 +293,7 @@ impl IncrementalView {
|
||||
schema,
|
||||
main_data_root,
|
||||
internal_state_root,
|
||||
internal_state_index_root,
|
||||
),
|
||||
_ => Err(LimboError::ParseError(format!(
|
||||
"View is not a CREATE MATERIALIZED VIEW statement: {sql}"
|
||||
@@ -300,6 +307,7 @@ impl IncrementalView {
|
||||
schema: &Schema,
|
||||
main_data_root: usize,
|
||||
internal_state_root: usize,
|
||||
internal_state_index_root: usize,
|
||||
) -> Result<Self> {
|
||||
let name = view_name.name.as_str().to_string();
|
||||
|
||||
@@ -327,6 +335,7 @@ impl IncrementalView {
|
||||
schema,
|
||||
main_data_root,
|
||||
internal_state_root,
|
||||
internal_state_index_root,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -340,13 +349,19 @@ impl IncrementalView {
|
||||
schema: &Schema,
|
||||
main_data_root: usize,
|
||||
internal_state_root: usize,
|
||||
internal_state_index_root: usize,
|
||||
) -> Result<Self> {
|
||||
// Create the tracker that will be shared by all operators
|
||||
let tracker = Arc::new(Mutex::new(ComputationTracker::new()));
|
||||
|
||||
// Compile the SELECT statement into a DBSP circuit
|
||||
let circuit =
|
||||
Self::try_compile_circuit(&select_stmt, schema, main_data_root, internal_state_root)?;
|
||||
let circuit = Self::try_compile_circuit(
|
||||
&select_stmt,
|
||||
schema,
|
||||
main_data_root,
|
||||
internal_state_root,
|
||||
internal_state_index_root,
|
||||
)?;
|
||||
|
||||
Ok(Self {
|
||||
name,
|
||||
|
||||
@@ -6,6 +6,10 @@ pub struct Instant {
|
||||
pub micros: u32,
|
||||
}
|
||||
|
||||
const NSEC_PER_SEC: u64 = 1_000_000_000;
|
||||
const NANOS_PER_MICRO: u32 = 1_000;
|
||||
const MICROS_PER_SEC: u32 = NSEC_PER_SEC as u32 / NANOS_PER_MICRO;
|
||||
|
||||
impl Instant {
|
||||
pub fn to_system_time(self) -> SystemTime {
|
||||
if self.secs >= 0 {
|
||||
@@ -24,6 +28,35 @@ impl Instant {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
|
||||
let mut secs = self.secs.checked_add_unsigned(other.as_secs())?;
|
||||
|
||||
// Micros calculations can't overflow because micros are <1B which fit
|
||||
// in a u32.
|
||||
let mut micros = other.subsec_micros() + self.micros;
|
||||
if micros >= MICROS_PER_SEC {
|
||||
micros -= MICROS_PER_SEC;
|
||||
secs = secs.checked_add(1)?;
|
||||
}
|
||||
|
||||
Some(Self { secs, micros })
|
||||
}
|
||||
|
||||
pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
|
||||
let mut secs = self.secs.checked_sub_unsigned(other.as_secs())?;
|
||||
|
||||
// Similar to above, micros can't overflow.
|
||||
let mut micros = self.micros as i32 - other.subsec_micros() as i32;
|
||||
if micros < 0 {
|
||||
micros += MICROS_PER_SEC as i32;
|
||||
secs = secs.checked_sub(1)?;
|
||||
}
|
||||
Some(Self {
|
||||
secs,
|
||||
micros: micros as u32,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: chrono::TimeZone> From<chrono::DateTime<T>> for Instant {
|
||||
@@ -35,6 +68,22 @@ impl<T: chrono::TimeZone> From<chrono::DateTime<T>> for Instant {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Add<Duration> for Instant {
|
||||
type Output = Instant;
|
||||
|
||||
fn add(self, rhs: Duration) -> Self::Output {
|
||||
self.checked_add_duration(&rhs).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Sub<Duration> for Instant {
|
||||
type Output = Instant;
|
||||
|
||||
fn sub(self, rhs: Duration) -> Self::Output {
|
||||
self.checked_sub_duration(&rhs).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Clock {
|
||||
fn now(&self) -> Instant;
|
||||
}
|
||||
|
||||
148
core/lib.rs
148
core/lib.rs
@@ -40,7 +40,6 @@ pub mod numeric;
|
||||
#[cfg(not(feature = "fuzz"))]
|
||||
mod numeric;
|
||||
|
||||
use crate::incremental::view::AllViewsTxState;
|
||||
use crate::storage::checksum::CHECKSUM_REQUIRED_RESERVED_BYTES;
|
||||
use crate::storage::encryption::CipherMode;
|
||||
use crate::translate::pragma::TURSO_CDC_DEFAULT_TABLE_NAME;
|
||||
@@ -50,6 +49,7 @@ use crate::types::{WalFrameInfo, WalState};
|
||||
use crate::util::{OpenMode, OpenOptions};
|
||||
use crate::vdbe::metrics::ConnectionMetrics;
|
||||
use crate::vtab::VirtualTable;
|
||||
use crate::{incremental::view::AllViewsTxState, translate::emitter::TransactionMode};
|
||||
use core::str;
|
||||
pub use error::{CompletionError, LimboError};
|
||||
pub use io::clock::{Clock, Instant};
|
||||
@@ -75,6 +75,7 @@ use std::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc, LazyLock, Mutex, Weak,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
#[cfg(feature = "fs")]
|
||||
use storage::database::DatabaseFile;
|
||||
@@ -497,7 +498,6 @@ impl Database {
|
||||
),
|
||||
database_schemas: RefCell::new(std::collections::HashMap::new()),
|
||||
auto_commit: Cell::new(true),
|
||||
mv_transactions: RefCell::new(Vec::new()),
|
||||
transaction_state: Cell::new(TransactionState::None),
|
||||
last_insert_rowid: Cell::new(0),
|
||||
last_change: Cell::new(0),
|
||||
@@ -511,7 +511,7 @@ impl Database {
|
||||
closed: Cell::new(false),
|
||||
attached_databases: RefCell::new(DatabaseCatalog::new()),
|
||||
query_only: Cell::new(false),
|
||||
mv_tx_id: Cell::new(None),
|
||||
mv_tx: Cell::new(None),
|
||||
view_transaction_states: AllViewsTxState::new(),
|
||||
metrics: RefCell::new(ConnectionMetrics::new()),
|
||||
is_nested_stmt: Cell::new(false),
|
||||
@@ -519,6 +519,7 @@ impl Database {
|
||||
encryption_cipher_mode: Cell::new(None),
|
||||
sync_mode: Cell::new(SyncMode::Full),
|
||||
data_sync_retry: Cell::new(false),
|
||||
busy_timeout: Cell::new(None),
|
||||
});
|
||||
self.n_connections
|
||||
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
@@ -978,8 +979,6 @@ pub struct Connection {
|
||||
database_schemas: RefCell<std::collections::HashMap<usize, Arc<Schema>>>,
|
||||
/// Whether to automatically commit transaction
|
||||
auto_commit: Cell<bool>,
|
||||
/// Transactions that are in progress.
|
||||
mv_transactions: RefCell<Vec<crate::mvcc::database::TxID>>,
|
||||
transaction_state: Cell<TransactionState>,
|
||||
last_insert_rowid: Cell<i64>,
|
||||
last_change: Cell<i64>,
|
||||
@@ -998,7 +997,7 @@ pub struct Connection {
|
||||
/// Attached databases
|
||||
attached_databases: RefCell<DatabaseCatalog>,
|
||||
query_only: Cell<bool>,
|
||||
pub(crate) mv_tx_id: Cell<Option<crate::mvcc::database::TxID>>,
|
||||
pub(crate) mv_tx: Cell<Option<(crate::mvcc::database::TxID, TransactionMode)>>,
|
||||
|
||||
/// Per-connection view transaction states for uncommitted changes. This represents
|
||||
/// one entry per view that was touched in the transaction.
|
||||
@@ -1012,6 +1011,8 @@ pub struct Connection {
|
||||
encryption_cipher_mode: Cell<Option<CipherMode>>,
|
||||
sync_mode: Cell<SyncMode>,
|
||||
data_sync_retry: Cell<bool>,
|
||||
/// User defined max accumulated Busy timeout duration
|
||||
busy_timeout: Cell<Option<std::time::Duration>>,
|
||||
}
|
||||
|
||||
impl Drop for Connection {
|
||||
@@ -2158,6 +2159,83 @@ impl Connection {
|
||||
}
|
||||
pager.set_encryption_context(cipher_mode, key)
|
||||
}
|
||||
|
||||
/// Sets maximum total accumuated timeout. If the duration is None or Zero, we unset the busy handler for this Connection
|
||||
///
|
||||
/// This api defers slighty from: https://www.sqlite.org/c3ref/busy_timeout.html
|
||||
///
|
||||
/// Instead of sleeping for linear amount of time specified by the user,
|
||||
/// we will sleep in phases, until the the total amount of time is reached.
|
||||
/// This means we first sleep of 1ms, then if we still return busy, we sleep for 2 ms, and repeat until a maximum of 100 ms per phase.
|
||||
///
|
||||
/// Example:
|
||||
/// 1. Set duration to 5ms
|
||||
/// 2. Step through query -> returns Busy -> sleep/yield for 1 ms
|
||||
/// 3. Step through query -> returns Busy -> sleep/yield for 2 ms
|
||||
/// 4. Step through query -> returns Busy -> sleep/yield for 2 ms (totaling 5 ms of sleep)
|
||||
/// 5. Step through query -> returns Busy -> return Busy to user
|
||||
///
|
||||
/// This slight api change demonstrated a better throughtput in `perf/throughput/turso` benchmark
|
||||
pub fn busy_timeout(&self, mut duration: Option<std::time::Duration>) {
|
||||
duration = duration.filter(|duration| !duration.is_zero());
|
||||
self.busy_timeout.set(duration);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct BusyTimeout {
|
||||
/// Busy timeout instant
|
||||
timeout: Option<Instant>,
|
||||
/// Max duration of timeout set by Connection
|
||||
max_duration: Duration,
|
||||
/// Accumulated duration for busy timeout
|
||||
///
|
||||
/// It will be decremented until it reaches 0, then after that no timeout will be emitted
|
||||
accum_duration: Duration,
|
||||
iteration: usize,
|
||||
}
|
||||
|
||||
impl BusyTimeout {
|
||||
const DELAYS: [std::time::Duration; 12] = [
|
||||
Duration::from_millis(1),
|
||||
Duration::from_millis(2),
|
||||
Duration::from_millis(5),
|
||||
Duration::from_millis(10),
|
||||
Duration::from_millis(15),
|
||||
Duration::from_millis(20),
|
||||
Duration::from_millis(25),
|
||||
Duration::from_millis(25),
|
||||
Duration::from_millis(25),
|
||||
Duration::from_millis(50),
|
||||
Duration::from_millis(50),
|
||||
Duration::from_millis(100),
|
||||
];
|
||||
|
||||
pub fn new(duration: std::time::Duration) -> Self {
|
||||
Self {
|
||||
timeout: None,
|
||||
max_duration: duration,
|
||||
iteration: 0,
|
||||
accum_duration: duration,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn initiate_timeout(&mut self, now: Instant) {
|
||||
self.timeout = Self::DELAYS.get(self.iteration).and_then(|delay| {
|
||||
if self.accum_duration.is_zero() {
|
||||
None
|
||||
} else {
|
||||
let new_timeout = now + (*delay).min(self.accum_duration);
|
||||
self.accum_duration = self.accum_duration.saturating_sub(*delay);
|
||||
Some(new_timeout)
|
||||
}
|
||||
});
|
||||
self.iteration = if self.iteration < Self::DELAYS.len() - 1 {
|
||||
self.iteration + 1
|
||||
} else {
|
||||
self.iteration
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Statement {
|
||||
@@ -2173,6 +2251,8 @@ pub struct Statement {
|
||||
query_mode: QueryMode,
|
||||
/// Flag to show if the statement was busy
|
||||
busy: bool,
|
||||
/// Busy timeout instant
|
||||
busy_timeout: Option<BusyTimeout>,
|
||||
}
|
||||
|
||||
impl Statement {
|
||||
@@ -2197,6 +2277,7 @@ impl Statement {
|
||||
accesses_db,
|
||||
query_mode,
|
||||
busy: false,
|
||||
busy_timeout: None,
|
||||
}
|
||||
}
|
||||
pub fn get_query_mode(&self) -> QueryMode {
|
||||
@@ -2207,8 +2288,8 @@ impl Statement {
|
||||
self.program.n_change.get()
|
||||
}
|
||||
|
||||
pub fn set_mv_tx_id(&mut self, mv_tx_id: Option<u64>) {
|
||||
self.program.connection.mv_tx_id.set(mv_tx_id);
|
||||
pub fn set_mv_tx(&mut self, mv_tx: Option<(u64, TransactionMode)>) {
|
||||
self.program.connection.mv_tx.set(mv_tx);
|
||||
}
|
||||
|
||||
pub fn interrupt(&mut self) {
|
||||
@@ -2216,7 +2297,19 @@ impl Statement {
|
||||
}
|
||||
|
||||
pub fn step(&mut self) -> Result<StepResult> {
|
||||
let res = if !self.accesses_db {
|
||||
if let Some(busy_timeout) = self.busy_timeout.as_mut() {
|
||||
if let Some(timeout) = busy_timeout.timeout {
|
||||
let now = self.pager.io.now();
|
||||
|
||||
if now < timeout {
|
||||
// Yield the query as the timeout has not been reached yet
|
||||
return Ok(StepResult::IO);
|
||||
}
|
||||
// Timeout ended now continue to query execution
|
||||
}
|
||||
}
|
||||
|
||||
let mut res = if !self.accesses_db {
|
||||
self.program.step(
|
||||
&mut self.state,
|
||||
self.mv_store.clone(),
|
||||
@@ -2257,6 +2350,18 @@ impl Statement {
|
||||
self.busy = true;
|
||||
}
|
||||
|
||||
if matches!(res, Ok(StepResult::Busy)) {
|
||||
self.check_if_busy_handler_set();
|
||||
if let Some(busy_timeout) = self.busy_timeout.as_mut() {
|
||||
busy_timeout.initiate_timeout(self.pager.io.now());
|
||||
if busy_timeout.timeout.is_some() {
|
||||
// Yield instead of busy, as now we will try to wait for the timeout
|
||||
// before continuing execution
|
||||
res = Ok(StepResult::IO);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
@@ -2427,6 +2532,7 @@ impl Statement {
|
||||
pub fn _reset(&mut self, max_registers: Option<usize>, max_cursors: Option<usize>) {
|
||||
self.state.reset(max_registers, max_cursors);
|
||||
self.busy = false;
|
||||
self.check_if_busy_handler_set();
|
||||
}
|
||||
|
||||
pub fn row(&self) -> Option<&Row> {
|
||||
@@ -2440,6 +2546,30 @@ impl Statement {
|
||||
pub fn is_busy(&self) -> bool {
|
||||
self.busy
|
||||
}
|
||||
|
||||
/// Checks if the busy handler is set in the connection and sets the handler if needed
|
||||
fn check_if_busy_handler_set(&mut self) {
|
||||
let conn_busy_timeout = self
|
||||
.program
|
||||
.connection
|
||||
.busy_timeout
|
||||
.get()
|
||||
.map(BusyTimeout::new);
|
||||
if self.busy_timeout.is_none() {
|
||||
self.busy_timeout = conn_busy_timeout;
|
||||
return;
|
||||
}
|
||||
if let Some(conn_busy_timeout) = conn_busy_timeout {
|
||||
let busy_timeout = self
|
||||
.busy_timeout
|
||||
.as_mut()
|
||||
.expect("busy timeout was checked for None above");
|
||||
// User changed max duration, so clear previous handler and set a new one
|
||||
if busy_timeout.max_duration != conn_busy_timeout.max_duration {
|
||||
*busy_timeout = conn_busy_timeout;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type Row = vdbe::Row;
|
||||
|
||||
@@ -46,14 +46,20 @@ impl<Clock: LogicalClock> MvccLazyCursor<Clock> {
|
||||
/// Sets the cursor to the inserted row.
|
||||
pub fn insert(&mut self, row: Row) -> Result<()> {
|
||||
self.current_pos = CursorPosition::Loaded(row.id);
|
||||
self.db.insert(self.tx_id, row).inspect_err(|_| {
|
||||
self.current_pos = CursorPosition::BeforeFirst;
|
||||
})?;
|
||||
if self.db.read(self.tx_id, row.id)?.is_some() {
|
||||
self.db.update(self.tx_id, row).inspect_err(|_| {
|
||||
self.current_pos = CursorPosition::BeforeFirst;
|
||||
})?;
|
||||
} else {
|
||||
self.db.insert(self.tx_id, row).inspect_err(|_| {
|
||||
self.current_pos = CursorPosition::BeforeFirst;
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete(&mut self, rowid: RowID, pager: Rc<Pager>) -> Result<()> {
|
||||
self.db.delete(self.tx_id, rowid, pager)?;
|
||||
pub fn delete(&mut self, rowid: RowID) -> Result<()> {
|
||||
self.db.delete(self.tx_id, rowid)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ use crate::storage::sqlite3_ondisk::DatabaseHeader;
|
||||
use crate::storage::wal::TursoRwLock;
|
||||
use crate::types::IOResult;
|
||||
use crate::types::ImmutableRecord;
|
||||
use crate::types::SeekResult;
|
||||
use crate::Completion;
|
||||
use crate::IOExt;
|
||||
use crate::LimboError;
|
||||
@@ -27,6 +28,8 @@ use std::ops::Bound;
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use tracing::instrument;
|
||||
use tracing::Level;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
@@ -141,20 +144,28 @@ impl std::fmt::Display for Transaction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
|
||||
write!(
|
||||
f,
|
||||
"{{ state: {}, id: {}, begin_ts: {}, write_set: {:?}, read_set: {:?}",
|
||||
"{{ state: {}, id: {}, begin_ts: {}, write_set: [",
|
||||
self.state.load(),
|
||||
self.tx_id,
|
||||
self.begin_ts,
|
||||
// FIXME: I'm sorry, we obviously shouldn't be cloning here.
|
||||
self.write_set
|
||||
.iter()
|
||||
.map(|v| *v.value())
|
||||
.collect::<Vec<RowID>>(),
|
||||
self.read_set
|
||||
.iter()
|
||||
.map(|v| *v.value())
|
||||
.collect::<Vec<RowID>>()
|
||||
)
|
||||
)?;
|
||||
|
||||
for (i, v) in self.write_set.iter().enumerate() {
|
||||
if i > 0 {
|
||||
write!(f, ", ")?
|
||||
}
|
||||
write!(f, "{:?}", *v.value())?;
|
||||
}
|
||||
|
||||
write!(f, "], read_set: [")?;
|
||||
for (i, v) in self.read_set.iter().enumerate() {
|
||||
if i > 0 {
|
||||
write!(f, ", ")?;
|
||||
}
|
||||
write!(f, "{:?}", *v.value())?;
|
||||
}
|
||||
|
||||
write!(f, "] }}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -380,7 +391,7 @@ impl<Clock: LogicalClock> StateTransition for CommitStateMachine<Clock> {
|
||||
type Context = MvStore<Clock>;
|
||||
type SMResult = ();
|
||||
|
||||
#[tracing::instrument(fields(state = ?self.state), skip(self, mvcc_store))]
|
||||
#[tracing::instrument(fields(state = ?self.state), skip(self, mvcc_store), level = Level::DEBUG)]
|
||||
fn step(&mut self, mvcc_store: &Self::Context) -> Result<TransitionResult<Self::SMResult>> {
|
||||
match self.state {
|
||||
CommitState::Initial => {
|
||||
@@ -476,12 +487,26 @@ impl<Clock: LogicalClock> StateTransition for CommitStateMachine<Clock> {
|
||||
only if TE commits.
|
||||
"""
|
||||
*/
|
||||
tx.state.store(TransactionState::Committed(end_ts));
|
||||
tracing::trace!("commit_tx(tx_id={})", self.tx_id);
|
||||
self.write_set
|
||||
.extend(tx.write_set.iter().map(|v| *v.value()));
|
||||
self.write_set
|
||||
.sort_by(|a, b| a.table_id.cmp(&b.table_id).then(a.row_id.cmp(&b.row_id)));
|
||||
if self.write_set.is_empty() {
|
||||
tx.state.store(TransactionState::Committed(end_ts));
|
||||
if mvcc_store.is_exclusive_tx(&self.tx_id) {
|
||||
mvcc_store.release_exclusive_tx(&self.tx_id);
|
||||
self.commit_coordinator.pager_commit_lock.unlock();
|
||||
// FIXME: this function isnt re-entrant
|
||||
self.pager
|
||||
.io
|
||||
.block(|| self.pager.end_tx(false, &self.connection))?;
|
||||
} else {
|
||||
self.pager.end_read_tx()?;
|
||||
}
|
||||
self.finalize(mvcc_store)?;
|
||||
return Ok(TransitionResult::Done(()));
|
||||
}
|
||||
self.state = CommitState::BeginPagerTxn { end_ts };
|
||||
Ok(TransitionResult::Continue)
|
||||
}
|
||||
@@ -501,6 +526,9 @@ impl<Clock: LogicalClock> StateTransition for CommitStateMachine<Clock> {
|
||||
requires_seek: true,
|
||||
};
|
||||
return Ok(TransitionResult::Continue);
|
||||
} else if mvcc_store.has_exclusive_tx() {
|
||||
// There is an exclusive transaction holding the write lock. We must abort.
|
||||
return Err(LimboError::WriteWriteConflict);
|
||||
}
|
||||
// Currently txns are queued without any heuristics whasoever. This is important because
|
||||
// we need to ensure writes to disk happen sequentially.
|
||||
@@ -535,9 +563,27 @@ impl<Clock: LogicalClock> StateTransition for CommitStateMachine<Clock> {
|
||||
})?;
|
||||
}
|
||||
}
|
||||
// We started a pager read transaction at the beginning of the MV transaction, because
|
||||
// any reads we do from the database file and WAL must uphold snapshot isolation.
|
||||
// However, now we must end and immediately restart the read transaction before committing.
|
||||
// This is because other transactions may have committed writes to the DB file or WAL,
|
||||
// and our pager must read in those changes when applying our writes; otherwise we would overwrite
|
||||
// the changes from the previous committed transactions.
|
||||
//
|
||||
// Note that this would be incredibly unsafe in the regular transaction model, but in MVCC we trust
|
||||
// the MV-store to uphold the guarantee that no write-write conflicts happened.
|
||||
self.pager.end_read_tx().expect("end_read_tx cannot fail");
|
||||
let result = self.pager.begin_read_tx()?;
|
||||
if let crate::result::LimboResult::Busy = result {
|
||||
// We cannot obtain a WAL read lock due to contention, so we must abort.
|
||||
self.commit_coordinator.pager_commit_lock.unlock();
|
||||
return Err(LimboError::WriteWriteConflict);
|
||||
}
|
||||
let result = self.pager.io.block(|| self.pager.begin_write_tx())?;
|
||||
if let crate::result::LimboResult::Busy = result {
|
||||
panic!("Pager write transaction busy, in mvcc this should never happen");
|
||||
// There is a non-CONCURRENT transaction holding the write lock. We must abort.
|
||||
self.commit_coordinator.pager_commit_lock.unlock();
|
||||
return Err(LimboError::WriteWriteConflict);
|
||||
}
|
||||
self.state = CommitState::WriteRow {
|
||||
end_ts,
|
||||
@@ -558,8 +604,10 @@ impl<Clock: LogicalClock> StateTransition for CommitStateMachine<Clock> {
|
||||
let id = &self.write_set[write_set_index];
|
||||
if let Some(row_versions) = mvcc_store.rows.get(id) {
|
||||
let row_versions = row_versions.value().read();
|
||||
// Find rows that were written by this transaction
|
||||
for row_version in row_versions.iter() {
|
||||
// Find rows that were written by this transaction.
|
||||
// Hekaton uses oldest-to-newest order for row versions, so we reverse iterate to find the newest one
|
||||
// this transaction changed.
|
||||
for row_version in row_versions.iter().rev() {
|
||||
if let TxTimestampOrID::TxID(row_tx_id) = row_version.begin {
|
||||
if row_tx_id == self.tx_id {
|
||||
let cursor = if let Some(cursor) = self.cursors.get(&id.table_id) {
|
||||
@@ -709,6 +757,9 @@ impl<Clock: LogicalClock> StateTransition for CommitStateMachine<Clock> {
|
||||
}
|
||||
CommitState::Commit { end_ts } => {
|
||||
let mut log_record = LogRecord::new(end_ts);
|
||||
let tx = mvcc_store.txs.get(&self.tx_id).unwrap();
|
||||
let tx_unlocked = tx.value();
|
||||
tx_unlocked.state.store(TransactionState::Committed(end_ts));
|
||||
for id in &self.write_set {
|
||||
if let Some(row_versions) = mvcc_store.rows.get(id) {
|
||||
let mut row_versions = row_versions.value().write();
|
||||
@@ -778,7 +829,7 @@ impl StateTransition for WriteRowStateMachine {
|
||||
type Context = ();
|
||||
type SMResult = ();
|
||||
|
||||
#[tracing::instrument(fields(state = ?self.state), skip(self, _context))]
|
||||
#[tracing::instrument(fields(state = ?self.state), skip(self, _context), level = Level::DEBUG)]
|
||||
fn step(&mut self, _context: &Self::Context) -> Result<TransitionResult<Self::SMResult>> {
|
||||
use crate::types::{IOResult, SeekKey, SeekOp};
|
||||
|
||||
@@ -881,7 +932,13 @@ impl StateTransition for DeleteRowStateMachine {
|
||||
.write()
|
||||
.seek(seek_key, SeekOp::GE { eq_only: true })?
|
||||
{
|
||||
IOResult::Done(_) => {
|
||||
IOResult::Done(seek_res) => {
|
||||
if seek_res == SeekResult::NotFound {
|
||||
crate::bail_corrupt_error!(
|
||||
"MVCC delete: rowid {} not found",
|
||||
self.rowid.row_id
|
||||
);
|
||||
}
|
||||
self.state = DeleteRowState::Delete;
|
||||
Ok(TransitionResult::Continue)
|
||||
}
|
||||
@@ -1028,9 +1085,9 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
/// # Returns
|
||||
///
|
||||
/// Returns `true` if the row was successfully updated, and `false` otherwise.
|
||||
pub fn update(&self, tx_id: TxID, row: Row, pager: Rc<Pager>) -> Result<bool> {
|
||||
pub fn update(&self, tx_id: TxID, row: Row) -> Result<bool> {
|
||||
tracing::trace!("update(tx_id={}, row.id={:?})", tx_id, row.id);
|
||||
if !self.delete(tx_id, row.id, pager)? {
|
||||
if !self.delete(tx_id, row.id)? {
|
||||
return Ok(false);
|
||||
}
|
||||
self.insert(tx_id, row)?;
|
||||
@@ -1039,9 +1096,9 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
|
||||
/// Inserts a row in the database with new values, previously deleting
|
||||
/// any old data if it existed. Bails on a delete error, e.g. write-write conflict.
|
||||
pub fn upsert(&self, tx_id: TxID, row: Row, pager: Rc<Pager>) -> Result<()> {
|
||||
pub fn upsert(&self, tx_id: TxID, row: Row) -> Result<()> {
|
||||
tracing::trace!("upsert(tx_id={}, row.id={:?})", tx_id, row.id);
|
||||
self.delete(tx_id, row.id, pager)?;
|
||||
self.delete(tx_id, row.id)?;
|
||||
self.insert(tx_id, row)
|
||||
}
|
||||
|
||||
@@ -1059,7 +1116,7 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
///
|
||||
/// Returns `true` if the row was successfully deleted, and `false` otherwise.
|
||||
///
|
||||
pub fn delete(&self, tx_id: TxID, id: RowID, pager: Rc<Pager>) -> Result<bool> {
|
||||
pub fn delete(&self, tx_id: TxID, id: RowID) -> Result<bool> {
|
||||
tracing::trace!("delete(tx_id={}, id={:?})", tx_id, id);
|
||||
let row_versions_opt = self.rows.get(&id);
|
||||
if let Some(ref row_versions) = row_versions_opt {
|
||||
@@ -1079,7 +1136,6 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
if is_write_write_conflict(&self.txs, tx, rv) {
|
||||
drop(row_versions);
|
||||
drop(row_versions_opt);
|
||||
self.rollback_tx(tx_id, pager);
|
||||
return Err(LimboError::WriteWriteConflict);
|
||||
}
|
||||
|
||||
@@ -1248,19 +1304,51 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
///
|
||||
/// This is used for IMMEDIATE and EXCLUSIVE transaction types where we need
|
||||
/// to ensure exclusive write access as per SQLite semantics.
|
||||
pub fn begin_exclusive_tx(&self, pager: Rc<Pager>) -> Result<IOResult<TxID>> {
|
||||
let tx_id = self.get_tx_id();
|
||||
pub fn begin_exclusive_tx(
|
||||
&self,
|
||||
pager: Rc<Pager>,
|
||||
maybe_existing_tx_id: Option<TxID>,
|
||||
) -> Result<IOResult<TxID>> {
|
||||
self._begin_exclusive_tx(pager, false, maybe_existing_tx_id)
|
||||
}
|
||||
|
||||
/// Upgrades a read transaction to an exclusive write transaction.
|
||||
///
|
||||
/// This is used for IMMEDIATE and EXCLUSIVE transaction types where we need
|
||||
/// to ensure exclusive write access as per SQLite semantics.
|
||||
pub fn upgrade_to_exclusive_tx(
|
||||
&self,
|
||||
pager: Rc<Pager>,
|
||||
maybe_existing_tx_id: Option<TxID>,
|
||||
) -> Result<IOResult<TxID>> {
|
||||
self._begin_exclusive_tx(pager, true, maybe_existing_tx_id)
|
||||
}
|
||||
|
||||
/// Begins an exclusive write transaction that prevents concurrent writes.
|
||||
///
|
||||
/// This is used for IMMEDIATE and EXCLUSIVE transaction types where we need
|
||||
/// to ensure exclusive write access as per SQLite semantics.
|
||||
#[instrument(skip_all, level = Level::DEBUG)]
|
||||
fn _begin_exclusive_tx(
|
||||
&self,
|
||||
pager: Rc<Pager>,
|
||||
is_upgrade_from_read: bool,
|
||||
maybe_existing_tx_id: Option<TxID>,
|
||||
) -> Result<IOResult<TxID>> {
|
||||
let tx_id = maybe_existing_tx_id.unwrap_or_else(|| self.get_tx_id());
|
||||
let begin_ts = self.get_timestamp();
|
||||
|
||||
self.acquire_exclusive_tx(&tx_id)?;
|
||||
|
||||
// Try to acquire the pager read lock
|
||||
match pager.begin_read_tx()? {
|
||||
LimboResult::Busy => {
|
||||
self.release_exclusive_tx(&tx_id);
|
||||
return Err(LimboError::Busy);
|
||||
if !is_upgrade_from_read {
|
||||
match pager.begin_read_tx()? {
|
||||
LimboResult::Busy => {
|
||||
self.release_exclusive_tx(&tx_id);
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
LimboResult::Ok => {}
|
||||
}
|
||||
LimboResult::Ok => {}
|
||||
}
|
||||
let locked = self.commit_coordinator.pager_commit_lock.write();
|
||||
if !locked {
|
||||
@@ -1273,7 +1361,15 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
LimboResult::Busy => {
|
||||
tracing::debug!("begin_exclusive_tx: tx_id={} failed with Busy", tx_id);
|
||||
// Failed to get pager lock - release our exclusive lock
|
||||
panic!("begin_exclusive_tx: tx_id={tx_id} failed with Busy, this should never happen as we were able to lock mvcc exclusive write lock");
|
||||
self.commit_coordinator.pager_commit_lock.unlock();
|
||||
self.release_exclusive_tx(&tx_id);
|
||||
if maybe_existing_tx_id.is_none() {
|
||||
// If we were upgrading an existing non-CONCURRENT mvcc transaction to write, we don't end the read tx on Busy.
|
||||
// But if we were beginning a completely new non-CONCURRENT mvcc transaction, we do end it because the next time the connection
|
||||
// attempts to do something, it will open a new read tx, which will fail if we don't end this one here.
|
||||
pager.end_read_tx()?;
|
||||
}
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
LimboResult::Ok => {
|
||||
let tx = Transaction::new(tx_id, begin_ts);
|
||||
@@ -1294,7 +1390,7 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
/// This function starts a new transaction in the database and returns a `TxID` value
|
||||
/// that you can use to perform operations within the transaction. All changes made within the
|
||||
/// transaction are isolated from other transactions until you commit the transaction.
|
||||
pub fn begin_tx(&self, pager: Rc<Pager>) -> TxID {
|
||||
pub fn begin_tx(&self, pager: Rc<Pager>) -> Result<TxID> {
|
||||
let tx_id = self.get_tx_id();
|
||||
let begin_ts = self.get_timestamp();
|
||||
let tx = Transaction::new(tx_id, begin_ts);
|
||||
@@ -1303,8 +1399,11 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
|
||||
// TODO: we need to tie a pager's read transaction to a transaction ID, so that future refactors to read
|
||||
// pages from WAL/DB read from a consistent state to maintiain snapshot isolation.
|
||||
pager.begin_read_tx().unwrap();
|
||||
tx_id
|
||||
let result = pager.begin_read_tx()?;
|
||||
if let crate::result::LimboResult::Busy = result {
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
Ok(tx_id)
|
||||
}
|
||||
|
||||
/// Commits a transaction with the specified transaction ID.
|
||||
@@ -1322,7 +1421,6 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
pager: Rc<Pager>,
|
||||
connection: &Arc<Connection>,
|
||||
) -> Result<StateMachine<CommitStateMachine<Clock>>> {
|
||||
tracing::trace!("commit_tx(tx_id={})", tx_id);
|
||||
let state_machine: StateMachine<CommitStateMachine<Clock>> =
|
||||
StateMachine::<CommitStateMachine<Clock>>::new(CommitStateMachine::new(
|
||||
CommitState::Initial,
|
||||
@@ -1343,21 +1441,39 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `tx_id` - The ID of the transaction to abort.
|
||||
pub fn rollback_tx(&self, tx_id: TxID, pager: Rc<Pager>) {
|
||||
pub fn rollback_tx(
|
||||
&self,
|
||||
tx_id: TxID,
|
||||
pager: Rc<Pager>,
|
||||
connection: &Connection,
|
||||
) -> Result<()> {
|
||||
let tx_unlocked = self.txs.get(&tx_id).unwrap();
|
||||
let tx = tx_unlocked.value();
|
||||
assert_eq!(tx.state, TransactionState::Active);
|
||||
connection.mv_tx.set(None);
|
||||
assert!(tx.state == TransactionState::Active || tx.state == TransactionState::Preparing);
|
||||
tx.state.store(TransactionState::Aborted);
|
||||
tracing::trace!("abort(tx_id={})", tx_id);
|
||||
let write_set: Vec<RowID> = tx.write_set.iter().map(|v| *v.value()).collect();
|
||||
|
||||
if self.is_exclusive_tx(&tx_id) {
|
||||
let pager_rollback_done = if self.is_exclusive_tx(&tx_id) {
|
||||
self.commit_coordinator.pager_commit_lock.unlock();
|
||||
self.release_exclusive_tx(&tx_id);
|
||||
}
|
||||
pager.io.block(|| pager.end_tx(true, connection))?;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
for ref id in write_set {
|
||||
if let Some(row_versions) = self.rows.get(id) {
|
||||
let mut row_versions = row_versions.value().write();
|
||||
for rv in row_versions.iter_mut() {
|
||||
if rv.end == Some(TxTimestampOrID::TxID(tx_id)) {
|
||||
// undo deletions by this transaction
|
||||
rv.end = None;
|
||||
}
|
||||
}
|
||||
// remove insertions by this transaction
|
||||
row_versions.retain(|rv| rv.begin != TxTimestampOrID::TxID(tx_id));
|
||||
if row_versions.is_empty() {
|
||||
self.rows.remove(id);
|
||||
@@ -1368,10 +1484,14 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
let tx = tx_unlocked.value();
|
||||
tx.state.store(TransactionState::Terminated);
|
||||
tracing::trace!("terminate(tx_id={})", tx_id);
|
||||
pager.end_read_tx().unwrap();
|
||||
if !pager_rollback_done {
|
||||
pager.end_read_tx()?;
|
||||
}
|
||||
// FIXME: verify that we can already remove the transaction here!
|
||||
// Maybe it's fine for snapshot isolation, but too early for serializable?
|
||||
self.txs.remove(&tx_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if the given transaction is the exclusive transaction.
|
||||
@@ -1379,6 +1499,11 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
self.exclusive_tx.read().as_ref() == Some(tx_id)
|
||||
}
|
||||
|
||||
/// Returns true if there is an exclusive transaction ongoing.
|
||||
fn has_exclusive_tx(&self) -> bool {
|
||||
self.exclusive_tx.read().is_some()
|
||||
}
|
||||
|
||||
/// Acquires the exclusive transaction lock to the given transaction ID.
|
||||
fn acquire_exclusive_tx(&self, tx_id: &TxID) -> Result<()> {
|
||||
let mut exclusive_tx = self.exclusive_tx.write();
|
||||
@@ -1505,8 +1630,8 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
||||
// we can either switch to a tree-like structure, or at least use partition_point()
|
||||
// which performs a binary search for the insertion point.
|
||||
let mut position = 0_usize;
|
||||
for (i, v) in versions.iter().rev().enumerate() {
|
||||
if self.get_begin_timestamp(&v.begin) < self.get_begin_timestamp(&row_version.begin) {
|
||||
for (i, v) in versions.iter().enumerate().rev() {
|
||||
if self.get_begin_timestamp(&v.begin) <= self.get_begin_timestamp(&row_version.begin) {
|
||||
position = i + 1;
|
||||
break;
|
||||
}
|
||||
@@ -1734,7 +1859,9 @@ fn is_end_visible(
|
||||
match row_version.end {
|
||||
Some(TxTimestampOrID::Timestamp(rv_end_ts)) => current_tx.begin_ts < rv_end_ts,
|
||||
Some(TxTimestampOrID::TxID(rv_end)) => {
|
||||
let other_tx = txs.get(&rv_end).unwrap();
|
||||
let other_tx = txs
|
||||
.get(&rv_end)
|
||||
.unwrap_or_else(|| panic!("Transaction {rv_end} not found"));
|
||||
let other_tx = other_tx.value();
|
||||
let visible = match other_tx.state.load() {
|
||||
// V's sharp mind discovered an issue with the hekaton paper which basically states that a
|
||||
|
||||
@@ -95,7 +95,10 @@ pub(crate) fn generate_simple_string_row(table_id: u64, id: i64, data: &str) ->
|
||||
fn test_insert_read() {
|
||||
let db = MvccTestDb::new();
|
||||
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx1_row = generate_simple_string_row(1, 1, "Hello");
|
||||
db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap();
|
||||
let row = db
|
||||
@@ -112,7 +115,10 @@ fn test_insert_read() {
|
||||
assert_eq!(tx1_row, row);
|
||||
commit_tx(db.mvcc_store.clone(), &db.conn, tx1).unwrap();
|
||||
|
||||
let tx2 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -130,7 +136,10 @@ fn test_insert_read() {
|
||||
#[test]
|
||||
fn test_read_nonexistent() {
|
||||
let db = MvccTestDb::new();
|
||||
let tx = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row = db.mvcc_store.read(
|
||||
tx,
|
||||
RowID {
|
||||
@@ -145,7 +154,10 @@ fn test_read_nonexistent() {
|
||||
fn test_delete() {
|
||||
let db = MvccTestDb::new();
|
||||
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx1_row = generate_simple_string_row(1, 1, "Hello");
|
||||
db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap();
|
||||
let row = db
|
||||
@@ -167,7 +179,6 @@ fn test_delete() {
|
||||
table_id: 1,
|
||||
row_id: 1,
|
||||
},
|
||||
db.conn.pager.borrow().clone(),
|
||||
)
|
||||
.unwrap();
|
||||
let row = db
|
||||
@@ -183,7 +194,10 @@ fn test_delete() {
|
||||
assert!(row.is_none());
|
||||
commit_tx(db.mvcc_store.clone(), &db.conn, tx1).unwrap();
|
||||
|
||||
let tx2 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -200,7 +214,10 @@ fn test_delete() {
|
||||
#[test]
|
||||
fn test_delete_nonexistent() {
|
||||
let db = MvccTestDb::new();
|
||||
let tx = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
assert!(!db
|
||||
.mvcc_store
|
||||
.delete(
|
||||
@@ -209,7 +226,6 @@ fn test_delete_nonexistent() {
|
||||
table_id: 1,
|
||||
row_id: 1
|
||||
},
|
||||
db.conn.pager.borrow().clone(),
|
||||
)
|
||||
.unwrap());
|
||||
}
|
||||
@@ -217,7 +233,10 @@ fn test_delete_nonexistent() {
|
||||
#[test]
|
||||
fn test_commit() {
|
||||
let db = MvccTestDb::new();
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx1_row = generate_simple_string_row(1, 1, "Hello");
|
||||
db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap();
|
||||
let row = db
|
||||
@@ -233,9 +252,7 @@ fn test_commit() {
|
||||
.unwrap();
|
||||
assert_eq!(tx1_row, row);
|
||||
let tx1_updated_row = generate_simple_string_row(1, 1, "World");
|
||||
db.mvcc_store
|
||||
.update(tx1, tx1_updated_row.clone(), db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
db.mvcc_store.update(tx1, tx1_updated_row.clone()).unwrap();
|
||||
let row = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -250,7 +267,10 @@ fn test_commit() {
|
||||
assert_eq!(tx1_updated_row, row);
|
||||
commit_tx(db.mvcc_store.clone(), &db.conn, tx1).unwrap();
|
||||
|
||||
let tx2 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -270,7 +290,10 @@ fn test_commit() {
|
||||
#[test]
|
||||
fn test_rollback() {
|
||||
let db = MvccTestDb::new();
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row1 = generate_simple_string_row(1, 1, "Hello");
|
||||
db.mvcc_store.insert(tx1, row1.clone()).unwrap();
|
||||
let row2 = db
|
||||
@@ -286,9 +309,7 @@ fn test_rollback() {
|
||||
.unwrap();
|
||||
assert_eq!(row1, row2);
|
||||
let row3 = generate_simple_string_row(1, 1, "World");
|
||||
db.mvcc_store
|
||||
.update(tx1, row3.clone(), db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
db.mvcc_store.update(tx1, row3.clone()).unwrap();
|
||||
let row4 = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -302,8 +323,12 @@ fn test_rollback() {
|
||||
.unwrap();
|
||||
assert_eq!(row3, row4);
|
||||
db.mvcc_store
|
||||
.rollback_tx(tx1, db.conn.pager.borrow().clone());
|
||||
let tx2 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
.rollback_tx(tx1, db.conn.pager.borrow().clone(), &db.conn)
|
||||
.unwrap();
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row5 = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -322,7 +347,10 @@ fn test_dirty_write() {
|
||||
let db = MvccTestDb::new();
|
||||
|
||||
// T1 inserts a row with ID 1, but does not commit.
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx1_row = generate_simple_string_row(1, 1, "Hello");
|
||||
db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap();
|
||||
let row = db
|
||||
@@ -340,12 +368,12 @@ fn test_dirty_write() {
|
||||
|
||||
let conn2 = db.db.connect().unwrap();
|
||||
// T2 attempts to delete row with ID 1, but fails because T1 has not committed.
|
||||
let tx2 = db.mvcc_store.begin_tx(conn2.pager.borrow().clone());
|
||||
let tx2_row = generate_simple_string_row(1, 1, "World");
|
||||
assert!(!db
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.update(tx2, tx2_row, conn2.pager.borrow().clone())
|
||||
.unwrap());
|
||||
.begin_tx(conn2.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx2_row = generate_simple_string_row(1, 1, "World");
|
||||
assert!(!db.mvcc_store.update(tx2, tx2_row).unwrap());
|
||||
|
||||
let row = db
|
||||
.mvcc_store
|
||||
@@ -366,13 +394,19 @@ fn test_dirty_read() {
|
||||
let db = MvccTestDb::new();
|
||||
|
||||
// T1 inserts a row with ID 1, but does not commit.
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row1 = generate_simple_string_row(1, 1, "Hello");
|
||||
db.mvcc_store.insert(tx1, row1).unwrap();
|
||||
|
||||
// T2 attempts to read row with ID 1, but doesn't see one because T1 has not committed.
|
||||
let conn2 = db.db.connect().unwrap();
|
||||
let tx2 = db.mvcc_store.begin_tx(conn2.pager.borrow().clone());
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.begin_tx(conn2.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row2 = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -391,14 +425,20 @@ fn test_dirty_read_deleted() {
|
||||
let db = MvccTestDb::new();
|
||||
|
||||
// T1 inserts a row with ID 1 and commits.
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx1_row = generate_simple_string_row(1, 1, "Hello");
|
||||
db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap();
|
||||
commit_tx(db.mvcc_store.clone(), &db.conn, tx1).unwrap();
|
||||
|
||||
// T2 deletes row with ID 1, but does not commit.
|
||||
let conn2 = db.db.connect().unwrap();
|
||||
let tx2 = db.mvcc_store.begin_tx(conn2.pager.borrow().clone());
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.begin_tx(conn2.pager.borrow().clone())
|
||||
.unwrap();
|
||||
assert!(db
|
||||
.mvcc_store
|
||||
.delete(
|
||||
@@ -407,13 +447,15 @@ fn test_dirty_read_deleted() {
|
||||
table_id: 1,
|
||||
row_id: 1
|
||||
},
|
||||
conn2.pager.borrow().clone(),
|
||||
)
|
||||
.unwrap());
|
||||
|
||||
// T3 reads row with ID 1, but doesn't see the delete because T2 hasn't committed.
|
||||
let conn3 = db.db.connect().unwrap();
|
||||
let tx3 = db.mvcc_store.begin_tx(conn3.pager.borrow().clone());
|
||||
let tx3 = db
|
||||
.mvcc_store
|
||||
.begin_tx(conn3.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -433,7 +475,10 @@ fn test_fuzzy_read() {
|
||||
let db = MvccTestDb::new();
|
||||
|
||||
// T1 inserts a row with ID 1 and commits.
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx1_row = generate_simple_string_row(1, 1, "First");
|
||||
db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap();
|
||||
let row = db
|
||||
@@ -452,7 +497,10 @@ fn test_fuzzy_read() {
|
||||
|
||||
// T2 reads the row with ID 1 within an active transaction.
|
||||
let conn2 = db.db.connect().unwrap();
|
||||
let tx2 = db.mvcc_store.begin_tx(conn2.pager.borrow().clone());
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.begin_tx(conn2.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -468,11 +516,12 @@ fn test_fuzzy_read() {
|
||||
|
||||
// T3 updates the row and commits.
|
||||
let conn3 = db.db.connect().unwrap();
|
||||
let tx3 = db.mvcc_store.begin_tx(conn3.pager.borrow().clone());
|
||||
let tx3_row = generate_simple_string_row(1, 1, "Second");
|
||||
db.mvcc_store
|
||||
.update(tx3, tx3_row, conn3.pager.borrow().clone())
|
||||
let tx3 = db
|
||||
.mvcc_store
|
||||
.begin_tx(conn3.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx3_row = generate_simple_string_row(1, 1, "Second");
|
||||
db.mvcc_store.update(tx3, tx3_row).unwrap();
|
||||
commit_tx(db.mvcc_store.clone(), &conn3, tx3).unwrap();
|
||||
|
||||
// T2 still reads the same version of the row as before.
|
||||
@@ -492,9 +541,7 @@ fn test_fuzzy_read() {
|
||||
// T2 tries to update the row, but fails because T3 has already committed an update to the row,
|
||||
// so T2 trying to write would violate snapshot isolation if it succeeded.
|
||||
let tx2_newrow = generate_simple_string_row(1, 1, "Third");
|
||||
let update_result = db
|
||||
.mvcc_store
|
||||
.update(tx2, tx2_newrow, conn2.pager.borrow().clone());
|
||||
let update_result = db.mvcc_store.update(tx2, tx2_newrow);
|
||||
assert!(matches!(update_result, Err(LimboError::WriteWriteConflict)));
|
||||
}
|
||||
|
||||
@@ -503,7 +550,10 @@ fn test_lost_update() {
|
||||
let db = MvccTestDb::new();
|
||||
|
||||
// T1 inserts a row with ID 1 and commits.
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx1_row = generate_simple_string_row(1, 1, "Hello");
|
||||
db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap();
|
||||
let row = db
|
||||
@@ -522,22 +572,28 @@ fn test_lost_update() {
|
||||
|
||||
// T2 attempts to update row ID 1 within an active transaction.
|
||||
let conn2 = db.db.connect().unwrap();
|
||||
let tx2 = db.mvcc_store.begin_tx(conn2.pager.borrow().clone());
|
||||
let tx2_row = generate_simple_string_row(1, 1, "World");
|
||||
assert!(db
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.update(tx2, tx2_row.clone(), conn2.pager.borrow().clone())
|
||||
.unwrap());
|
||||
.begin_tx(conn2.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx2_row = generate_simple_string_row(1, 1, "World");
|
||||
assert!(db.mvcc_store.update(tx2, tx2_row.clone()).unwrap());
|
||||
|
||||
// T3 also attempts to update row ID 1 within an active transaction.
|
||||
let conn3 = db.db.connect().unwrap();
|
||||
let tx3 = db.mvcc_store.begin_tx(conn3.pager.borrow().clone());
|
||||
let tx3 = db
|
||||
.mvcc_store
|
||||
.begin_tx(conn3.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx3_row = generate_simple_string_row(1, 1, "Hello, world!");
|
||||
assert!(matches!(
|
||||
db.mvcc_store
|
||||
.update(tx3, tx3_row, conn3.pager.borrow().clone(),),
|
||||
db.mvcc_store.update(tx3, tx3_row),
|
||||
Err(LimboError::WriteWriteConflict)
|
||||
));
|
||||
// hack: in the actual tursodb database we rollback the mvcc tx ourselves, so manually roll it back here
|
||||
db.mvcc_store
|
||||
.rollback_tx(tx3, conn3.pager.borrow().clone(), &conn3)
|
||||
.unwrap();
|
||||
|
||||
commit_tx(db.mvcc_store.clone(), &conn2, tx2).unwrap();
|
||||
assert!(matches!(
|
||||
@@ -546,7 +602,10 @@ fn test_lost_update() {
|
||||
));
|
||||
|
||||
let conn4 = db.db.connect().unwrap();
|
||||
let tx4 = db.mvcc_store.begin_tx(conn4.pager.borrow().clone());
|
||||
let tx4 = db
|
||||
.mvcc_store
|
||||
.begin_tx(conn4.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -568,19 +627,22 @@ fn test_committed_visibility() {
|
||||
let db = MvccTestDb::new();
|
||||
|
||||
// let's add $10 to my account since I like money
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx1_row = generate_simple_string_row(1, 1, "10");
|
||||
db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap();
|
||||
commit_tx(db.mvcc_store.clone(), &db.conn, tx1).unwrap();
|
||||
|
||||
// but I like more money, so let me try adding $10 more
|
||||
let conn2 = db.db.connect().unwrap();
|
||||
let tx2 = db.mvcc_store.begin_tx(conn2.pager.borrow().clone());
|
||||
let tx2_row = generate_simple_string_row(1, 1, "20");
|
||||
assert!(db
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.update(tx2, tx2_row.clone(), conn2.pager.borrow().clone())
|
||||
.unwrap());
|
||||
.begin_tx(conn2.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx2_row = generate_simple_string_row(1, 1, "20");
|
||||
assert!(db.mvcc_store.update(tx2, tx2_row.clone()).unwrap());
|
||||
let row = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -596,7 +658,10 @@ fn test_committed_visibility() {
|
||||
|
||||
// can I check how much money I have?
|
||||
let conn3 = db.db.connect().unwrap();
|
||||
let tx3 = db.mvcc_store.begin_tx(conn3.pager.borrow().clone());
|
||||
let tx3 = db
|
||||
.mvcc_store
|
||||
.begin_tx(conn3.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let row = db
|
||||
.mvcc_store
|
||||
.read(
|
||||
@@ -616,10 +681,16 @@ fn test_committed_visibility() {
|
||||
fn test_future_row() {
|
||||
let db = MvccTestDb::new();
|
||||
|
||||
let tx1 = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx1 = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
|
||||
let conn2 = db.db.connect().unwrap();
|
||||
let tx2 = db.mvcc_store.begin_tx(conn2.pager.borrow().clone());
|
||||
let tx2 = db
|
||||
.mvcc_store
|
||||
.begin_tx(conn2.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let tx2_row = generate_simple_string_row(1, 1, "Hello");
|
||||
db.mvcc_store.insert(tx2, tx2_row).unwrap();
|
||||
|
||||
@@ -663,7 +734,10 @@ use crate::{MemoryIO, Statement};
|
||||
|
||||
fn setup_test_db() -> (MvccTestDb, u64) {
|
||||
let db = MvccTestDb::new();
|
||||
let tx_id = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx_id = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
|
||||
let table_id = 1;
|
||||
let test_rows = [
|
||||
@@ -683,13 +757,19 @@ fn setup_test_db() -> (MvccTestDb, u64) {
|
||||
|
||||
commit_tx(db.mvcc_store.clone(), &db.conn, tx_id).unwrap();
|
||||
|
||||
let tx_id = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx_id = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
(db, tx_id)
|
||||
}
|
||||
|
||||
fn setup_lazy_db(initial_keys: &[i64]) -> (MvccTestDb, u64) {
|
||||
let db = MvccTestDb::new();
|
||||
let tx_id = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx_id = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
|
||||
let table_id = 1;
|
||||
for i in initial_keys {
|
||||
@@ -702,7 +782,10 @@ fn setup_lazy_db(initial_keys: &[i64]) -> (MvccTestDb, u64) {
|
||||
|
||||
commit_tx(db.mvcc_store.clone(), &db.conn, tx_id).unwrap();
|
||||
|
||||
let tx_id = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx_id = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
(db, tx_id)
|
||||
}
|
||||
|
||||
@@ -866,10 +949,13 @@ fn test_cursor_with_empty_table() {
|
||||
{
|
||||
// FIXME: force page 1 initialization
|
||||
let pager = db.conn.pager.borrow().clone();
|
||||
let tx_id = db.mvcc_store.begin_tx(pager.clone());
|
||||
let tx_id = db.mvcc_store.begin_tx(pager.clone()).unwrap();
|
||||
commit_tx(db.mvcc_store.clone(), &db.conn, tx_id).unwrap();
|
||||
}
|
||||
let tx_id = db.mvcc_store.begin_tx(db.conn.pager.borrow().clone());
|
||||
let tx_id = db
|
||||
.mvcc_store
|
||||
.begin_tx(db.conn.pager.borrow().clone())
|
||||
.unwrap();
|
||||
let table_id = 1; // Empty table
|
||||
|
||||
// Test LazyScanCursor with empty table
|
||||
@@ -1092,7 +1178,7 @@ fn test_restart() {
|
||||
{
|
||||
let conn = db.connect();
|
||||
let mvcc_store = db.get_mvcc_store();
|
||||
let tx_id = mvcc_store.begin_tx(conn.pager.borrow().clone());
|
||||
let tx_id = mvcc_store.begin_tx(conn.pager.borrow().clone()).unwrap();
|
||||
let row = generate_simple_string_row(1, 1, "foo");
|
||||
|
||||
mvcc_store.insert(tx_id, row).unwrap();
|
||||
@@ -1104,13 +1190,13 @@ fn test_restart() {
|
||||
{
|
||||
let conn = db.connect();
|
||||
let mvcc_store = db.get_mvcc_store();
|
||||
let tx_id = mvcc_store.begin_tx(conn.pager.borrow().clone());
|
||||
let tx_id = mvcc_store.begin_tx(conn.pager.borrow().clone()).unwrap();
|
||||
let row = generate_simple_string_row(1, 2, "bar");
|
||||
|
||||
mvcc_store.insert(tx_id, row).unwrap();
|
||||
commit_tx(mvcc_store.clone(), &conn, tx_id).unwrap();
|
||||
|
||||
let tx_id = mvcc_store.begin_tx(conn.pager.borrow().clone());
|
||||
let tx_id = mvcc_store.begin_tx(conn.pager.borrow().clone()).unwrap();
|
||||
let row = mvcc_store.read(tx_id, RowID::new(1, 2)).unwrap().unwrap();
|
||||
let record = get_record_value(&row);
|
||||
match record.get_value(0).unwrap() {
|
||||
@@ -1381,3 +1467,30 @@ fn test_batch_writes() {
|
||||
}
|
||||
println!("start: {start} end: {end}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transaction_display() {
|
||||
let state = AtomicTransactionState::from(TransactionState::Preparing);
|
||||
let tx_id = 42;
|
||||
let begin_ts = 20250914;
|
||||
|
||||
let write_set = SkipSet::new();
|
||||
write_set.insert(RowID::new(1, 11));
|
||||
write_set.insert(RowID::new(1, 13));
|
||||
|
||||
let read_set = SkipSet::new();
|
||||
read_set.insert(RowID::new(2, 17));
|
||||
read_set.insert(RowID::new(2, 19));
|
||||
|
||||
let tx = Transaction {
|
||||
state,
|
||||
tx_id,
|
||||
begin_ts,
|
||||
write_set,
|
||||
read_set,
|
||||
};
|
||||
|
||||
let expected = "{ state: Preparing, id: 42, begin_ts: 20250914, write_set: [RowID { table_id: 1, row_id: 11 }, RowID { table_id: 1, row_id: 13 }], read_set: [RowID { table_id: 2, row_id: 17 }, RowID { table_id: 2, row_id: 19 }] }";
|
||||
let output = format!("{tx}");
|
||||
assert_eq!(output, expected);
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ mod tests {
|
||||
let conn = db.get_db().connect().unwrap();
|
||||
let mvcc_store = db.get_db().mv_store.as_ref().unwrap().clone();
|
||||
for _ in 0..iterations {
|
||||
let tx = mvcc_store.begin_tx(conn.pager.borrow().clone());
|
||||
let tx = mvcc_store.begin_tx(conn.pager.borrow().clone()).unwrap();
|
||||
let id = IDS.fetch_add(1, Ordering::SeqCst);
|
||||
let id = RowID {
|
||||
table_id: 1,
|
||||
@@ -74,7 +74,7 @@ mod tests {
|
||||
let row = generate_simple_string_row(1, id.row_id, "Hello");
|
||||
mvcc_store.insert(tx, row.clone()).unwrap();
|
||||
commit_tx_no_conn(&db, tx, &conn).unwrap();
|
||||
let tx = mvcc_store.begin_tx(conn.pager.borrow().clone());
|
||||
let tx = mvcc_store.begin_tx(conn.pager.borrow().clone()).unwrap();
|
||||
let committed_row = mvcc_store.read(tx, id).unwrap();
|
||||
commit_tx_no_conn(&db, tx, &conn).unwrap();
|
||||
assert_eq!(committed_row, Some(row));
|
||||
@@ -86,7 +86,7 @@ mod tests {
|
||||
let conn = db.get_db().connect().unwrap();
|
||||
let mvcc_store = db.get_db().mv_store.as_ref().unwrap().clone();
|
||||
for _ in 0..iterations {
|
||||
let tx = mvcc_store.begin_tx(conn.pager.borrow().clone());
|
||||
let tx = mvcc_store.begin_tx(conn.pager.borrow().clone()).unwrap();
|
||||
let id = IDS.fetch_add(1, Ordering::SeqCst);
|
||||
let id = RowID {
|
||||
table_id: 1,
|
||||
@@ -95,7 +95,7 @@ mod tests {
|
||||
let row = generate_simple_string_row(1, id.row_id, "World");
|
||||
mvcc_store.insert(tx, row.clone()).unwrap();
|
||||
commit_tx_no_conn(&db, tx, &conn).unwrap();
|
||||
let tx = mvcc_store.begin_tx(conn.pager.borrow().clone());
|
||||
let tx = mvcc_store.begin_tx(conn.pager.borrow().clone()).unwrap();
|
||||
let committed_row = mvcc_store.read(tx, id).unwrap();
|
||||
commit_tx_no_conn(&db, tx, &conn).unwrap();
|
||||
assert_eq!(committed_row, Some(row));
|
||||
@@ -127,15 +127,14 @@ mod tests {
|
||||
let dropped = mvcc_store.drop_unused_row_versions();
|
||||
tracing::debug!("garbage collected {dropped} versions");
|
||||
}
|
||||
let tx = mvcc_store.begin_tx(conn.pager.borrow().clone());
|
||||
let tx = mvcc_store.begin_tx(conn.pager.borrow().clone()).unwrap();
|
||||
let id = i % 16;
|
||||
let id = RowID {
|
||||
table_id: 1,
|
||||
row_id: id,
|
||||
};
|
||||
let row = generate_simple_string_row(1, id.row_id, &format!("{prefix} @{tx}"));
|
||||
if let Err(e) = mvcc_store.upsert(tx, row.clone(), conn.pager.borrow().clone())
|
||||
{
|
||||
if let Err(e) = mvcc_store.upsert(tx, row.clone()) {
|
||||
tracing::trace!("upsert failed: {e}");
|
||||
failed_upserts += 1;
|
||||
continue;
|
||||
|
||||
@@ -306,6 +306,8 @@ impl Schema {
|
||||
|
||||
// Store DBSP state table root pages: view_name -> dbsp_state_root_page
|
||||
let mut dbsp_state_roots: HashMap<String, usize> = HashMap::new();
|
||||
// Store DBSP state table index root pages: view_name -> dbsp_state_index_root_page
|
||||
let mut dbsp_state_index_roots: HashMap<String, usize> = HashMap::new();
|
||||
// Store materialized view info (SQL and root page) for later creation
|
||||
let mut materialized_view_info: HashMap<String, (String, usize)> = HashMap::new();
|
||||
|
||||
@@ -357,6 +359,7 @@ impl Schema {
|
||||
&mut from_sql_indexes,
|
||||
&mut automatic_indices,
|
||||
&mut dbsp_state_roots,
|
||||
&mut dbsp_state_index_roots,
|
||||
&mut materialized_view_info,
|
||||
)?;
|
||||
drop(record_cursor);
|
||||
@@ -369,7 +372,11 @@ impl Schema {
|
||||
|
||||
self.populate_indices(from_sql_indexes, automatic_indices)?;
|
||||
|
||||
self.populate_materialized_views(materialized_view_info, dbsp_state_roots)?;
|
||||
self.populate_materialized_views(
|
||||
materialized_view_info,
|
||||
dbsp_state_roots,
|
||||
dbsp_state_index_roots,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -492,6 +499,7 @@ impl Schema {
|
||||
&mut self,
|
||||
materialized_view_info: std::collections::HashMap<String, (String, usize)>,
|
||||
dbsp_state_roots: std::collections::HashMap<String, usize>,
|
||||
dbsp_state_index_roots: std::collections::HashMap<String, usize>,
|
||||
) -> Result<()> {
|
||||
for (view_name, (sql, main_root)) in materialized_view_info {
|
||||
// Look up the DBSP state root for this view - must exist for materialized views
|
||||
@@ -501,9 +509,17 @@ impl Schema {
|
||||
))
|
||||
})?;
|
||||
|
||||
// Create the IncrementalView with both root pages
|
||||
let incremental_view =
|
||||
IncrementalView::from_sql(&sql, self, main_root, *dbsp_state_root)?;
|
||||
// Look up the DBSP state index root (may not exist for older schemas)
|
||||
let dbsp_state_index_root =
|
||||
dbsp_state_index_roots.get(&view_name).copied().unwrap_or(0);
|
||||
// Create the IncrementalView with all root pages
|
||||
let incremental_view = IncrementalView::from_sql(
|
||||
&sql,
|
||||
self,
|
||||
main_root,
|
||||
*dbsp_state_root,
|
||||
dbsp_state_index_root,
|
||||
)?;
|
||||
let referenced_tables = incremental_view.get_referenced_table_names();
|
||||
|
||||
// Create a BTreeTable for the materialized view
|
||||
@@ -539,6 +555,7 @@ impl Schema {
|
||||
from_sql_indexes: &mut Vec<UnparsedFromSqlIndex>,
|
||||
automatic_indices: &mut std::collections::HashMap<String, Vec<(String, usize)>>,
|
||||
dbsp_state_roots: &mut std::collections::HashMap<String, usize>,
|
||||
dbsp_state_index_roots: &mut std::collections::HashMap<String, usize>,
|
||||
materialized_view_info: &mut std::collections::HashMap<String, (String, usize)>,
|
||||
) -> Result<()> {
|
||||
match ty {
|
||||
@@ -593,12 +610,23 @@ impl Schema {
|
||||
// index|sqlite_autoindex_foo_1|foo|3|
|
||||
let index_name = name.to_string();
|
||||
let table_name = table_name.to_string();
|
||||
match automatic_indices.entry(table_name) {
|
||||
std::collections::hash_map::Entry::Vacant(e) => {
|
||||
e.insert(vec![(index_name, root_page as usize)]);
|
||||
}
|
||||
std::collections::hash_map::Entry::Occupied(mut e) => {
|
||||
e.get_mut().push((index_name, root_page as usize));
|
||||
|
||||
// Check if this is an index for a DBSP state table
|
||||
if table_name.starts_with(DBSP_TABLE_PREFIX) {
|
||||
// Extract the view name from __turso_internal_dbsp_state_<viewname>
|
||||
let view_name = table_name
|
||||
.strip_prefix(DBSP_TABLE_PREFIX)
|
||||
.unwrap()
|
||||
.to_string();
|
||||
dbsp_state_index_roots.insert(view_name, root_page as usize);
|
||||
} else {
|
||||
match automatic_indices.entry(table_name) {
|
||||
std::collections::hash_map::Entry::Vacant(e) => {
|
||||
e.insert(vec![(index_name, root_page as usize)]);
|
||||
}
|
||||
std::collections::hash_map::Entry::Occupied(mut e) => {
|
||||
e.get_mut().push((index_name, root_page as usize));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2157,7 +2157,7 @@ impl BTreeCursor {
|
||||
(cmp, found)
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
#[instrument(skip_all, level = Level::DEBUG)]
|
||||
pub fn move_to(&mut self, key: SeekKey<'_>, cmp: SeekOp) -> Result<IOResult<()>> {
|
||||
turso_assert!(
|
||||
self.mv_cursor.is_none(),
|
||||
@@ -3542,17 +3542,20 @@ impl BTreeCursor {
|
||||
usable_space,
|
||||
)?;
|
||||
let overflow_cell_count_after = parent_contents.overflow_cells.len();
|
||||
let divider_cell_is_overflow_cell =
|
||||
overflow_cell_count_after > overflow_cell_count_before;
|
||||
#[cfg(debug_assertions)]
|
||||
BTreeCursor::validate_balance_non_root_divider_cell_insertion(
|
||||
balance_info,
|
||||
parent_contents,
|
||||
divider_cell_insert_idx_in_parent,
|
||||
divider_cell_is_overflow_cell,
|
||||
page,
|
||||
usable_space,
|
||||
);
|
||||
{
|
||||
let divider_cell_is_overflow_cell =
|
||||
overflow_cell_count_after > overflow_cell_count_before;
|
||||
|
||||
BTreeCursor::validate_balance_non_root_divider_cell_insertion(
|
||||
balance_info,
|
||||
parent_contents,
|
||||
divider_cell_insert_idx_in_parent,
|
||||
divider_cell_is_overflow_cell,
|
||||
page,
|
||||
usable_space,
|
||||
);
|
||||
}
|
||||
}
|
||||
tracing::debug!(
|
||||
"balance_non_root(parent_overflow={})",
|
||||
@@ -4625,7 +4628,7 @@ impl BTreeCursor {
|
||||
}
|
||||
};
|
||||
let row = crate::mvcc::database::Row::new(row_id, record_buf, num_columns);
|
||||
mv_cursor.borrow_mut().insert(row).unwrap();
|
||||
mv_cursor.borrow_mut().insert(row)?;
|
||||
}
|
||||
None => todo!("Support mvcc inserts with index btrees"),
|
||||
},
|
||||
@@ -4655,7 +4658,7 @@ impl BTreeCursor {
|
||||
pub fn delete(&mut self) -> Result<IOResult<()>> {
|
||||
if let Some(mv_cursor) = &self.mv_cursor {
|
||||
let rowid = mv_cursor.borrow_mut().current_row_id().unwrap();
|
||||
mv_cursor.borrow_mut().delete(rowid, self.pager.clone())?;
|
||||
mv_cursor.borrow_mut().delete(rowid)?;
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
|
||||
|
||||
@@ -552,13 +552,8 @@ enum AllocatePage1State {
|
||||
#[derive(Debug, Clone)]
|
||||
enum FreePageState {
|
||||
Start,
|
||||
AddToTrunk {
|
||||
page: Arc<Page>,
|
||||
trunk_page: Option<Arc<Page>>,
|
||||
},
|
||||
NewTrunk {
|
||||
page: Arc<Page>,
|
||||
},
|
||||
AddToTrunk { page: Arc<Page> },
|
||||
NewTrunk { page: Arc<Page> },
|
||||
}
|
||||
|
||||
impl Pager {
|
||||
@@ -1741,25 +1736,19 @@ impl Pager {
|
||||
let trunk_page_id = header.freelist_trunk_page.get();
|
||||
|
||||
if trunk_page_id != 0 {
|
||||
*state = FreePageState::AddToTrunk {
|
||||
page,
|
||||
trunk_page: None,
|
||||
};
|
||||
*state = FreePageState::AddToTrunk { page };
|
||||
} else {
|
||||
*state = FreePageState::NewTrunk { page };
|
||||
}
|
||||
}
|
||||
FreePageState::AddToTrunk { page, trunk_page } => {
|
||||
FreePageState::AddToTrunk { page } => {
|
||||
let trunk_page_id = header.freelist_trunk_page.get();
|
||||
if trunk_page.is_none() {
|
||||
// Add as leaf to current trunk
|
||||
let (page, c) = self.read_page(trunk_page_id as usize)?;
|
||||
trunk_page.replace(page);
|
||||
if let Some(c) = c {
|
||||
let (trunk_page, c) = self.read_page(trunk_page_id as usize)?;
|
||||
if let Some(c) = c {
|
||||
if !c.is_completed() {
|
||||
io_yield_one!(c);
|
||||
}
|
||||
}
|
||||
let trunk_page = trunk_page.as_ref().unwrap();
|
||||
turso_assert!(trunk_page.is_loaded(), "trunk_page should be loaded");
|
||||
|
||||
let trunk_page_contents = trunk_page.get_contents();
|
||||
@@ -1775,7 +1764,7 @@ impl Pager {
|
||||
trunk_page.get().id == trunk_page_id as usize,
|
||||
"trunk page has unexpected id"
|
||||
);
|
||||
self.add_dirty(trunk_page);
|
||||
self.add_dirty(&trunk_page);
|
||||
|
||||
trunk_page_contents.write_u32_no_offset(
|
||||
TRUNK_PAGE_LEAF_COUNT_OFFSET,
|
||||
|
||||
@@ -927,7 +927,7 @@ pub fn begin_read_page(
|
||||
db_file.read_page(page_idx, io_ctx, c)
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
#[instrument(skip_all, level = Level::DEBUG)]
|
||||
pub fn finish_read_page(page_idx: usize, buffer_ref: Arc<Buffer>, page: PageRef) {
|
||||
tracing::trace!("finish_read_page(page_idx = {page_idx})");
|
||||
let pos = if page_idx == DatabaseHeader::PAGE_ID {
|
||||
|
||||
@@ -1099,14 +1099,27 @@ impl Wal for WalFile {
|
||||
let epoch = shared_file.read().epoch.load(Ordering::Acquire);
|
||||
frame.set_wal_tag(frame_id, epoch);
|
||||
});
|
||||
let shared = self.get_shared();
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap();
|
||||
let file = {
|
||||
let shared = self.get_shared();
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
// important not to hold shared lock beyond this point to avoid deadlock scenario where:
|
||||
// thread 1: takes readlock here, passes reference to shared.file to begin_read_wal_frame
|
||||
// thread 2: tries to acquire write lock elsewhere
|
||||
// thread 1: tries to re-acquire read lock in the completion (see 'complete' above)
|
||||
//
|
||||
// this causes a deadlock due to the locking policy in parking_lot:
|
||||
// from https://docs.rs/parking_lot/latest/parking_lot/type.RwLock.html:
|
||||
// "This lock uses a task-fair locking policy which avoids both reader and writer starvation.
|
||||
// This means that readers trying to acquire the lock will block even if the lock is unlocked
|
||||
// when there are writers waiting to acquire the lock.
|
||||
// Because of this, attempts to recursively acquire a read lock within a single thread may result in a deadlock."
|
||||
shared.file.as_ref().unwrap().clone()
|
||||
};
|
||||
begin_read_wal_frame(
|
||||
file,
|
||||
&file,
|
||||
offset + WAL_FRAME_HEADER_SIZE as u64,
|
||||
buffer_pool,
|
||||
complete,
|
||||
|
||||
@@ -198,6 +198,7 @@ pub fn translate_alter_table(
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: All quoted ids will be quoted with `[]`, we should store some info from the parsed AST
|
||||
btree.columns.push(column.clone());
|
||||
|
||||
let sql = btree.to_sql();
|
||||
|
||||
@@ -185,7 +185,7 @@ pub enum OperationMode {
|
||||
DELETE,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
/// Sqlite always considers Read transactions implicit
|
||||
pub enum TransactionMode {
|
||||
None,
|
||||
|
||||
@@ -2,7 +2,7 @@ use crate::schema::{Schema, DBSP_TABLE_PREFIX};
|
||||
use crate::storage::pager::CreateBTreeFlags;
|
||||
use crate::translate::emitter::Resolver;
|
||||
use crate::translate::schema::{emit_schema_entry, SchemaEntryType, SQLITE_TABLEID};
|
||||
use crate::util::normalize_ident;
|
||||
use crate::util::{normalize_ident, PRIMARY_KEY_AUTOMATIC_INDEX_NAME_PREFIX};
|
||||
use crate::vdbe::builder::{CursorType, ProgramBuilder};
|
||||
use crate::vdbe::insn::{CmpInsFlags, Cookie, Insn, RegisterOrLiteral};
|
||||
use crate::{Connection, Result, SymbolTable};
|
||||
@@ -141,7 +141,20 @@ pub fn translate_create_materialized_view(
|
||||
|
||||
// Add the DBSP state table to sqlite_master (required for materialized views)
|
||||
let dbsp_table_name = format!("{DBSP_TABLE_PREFIX}{normalized_view_name}");
|
||||
let dbsp_sql = format!("CREATE TABLE {dbsp_table_name} (key INTEGER PRIMARY KEY, state BLOB)");
|
||||
// The element_id column uses SQLite's dynamic typing system to store different value types:
|
||||
// - For hash-based operators (joins, filters): stores INTEGER hash values or rowids
|
||||
// - For future MIN/MAX operators: stores the actual values being compared (INTEGER, REAL, TEXT, BLOB)
|
||||
// SQLite's type affinity and sorting rules ensure correct ordering within each operator's data
|
||||
let dbsp_sql = format!(
|
||||
"CREATE TABLE {dbsp_table_name} (\
|
||||
operator_id INTEGER NOT NULL, \
|
||||
zset_id INTEGER NOT NULL, \
|
||||
element_id NOT NULL, \
|
||||
value BLOB, \
|
||||
weight INTEGER NOT NULL, \
|
||||
PRIMARY KEY (operator_id, zset_id, element_id)\
|
||||
)"
|
||||
);
|
||||
|
||||
emit_schema_entry(
|
||||
&mut program,
|
||||
@@ -155,11 +168,37 @@ pub fn translate_create_materialized_view(
|
||||
Some(dbsp_sql),
|
||||
)?;
|
||||
|
||||
// Create automatic primary key index for the DBSP table
|
||||
// Since the table has PRIMARY KEY (operator_id, zset_id, element_id), we need an index
|
||||
let dbsp_index_root_reg = program.alloc_register();
|
||||
program.emit_insn(Insn::CreateBtree {
|
||||
db: 0,
|
||||
root: dbsp_index_root_reg,
|
||||
flags: CreateBTreeFlags::new_index(),
|
||||
});
|
||||
|
||||
// Register the index in sqlite_schema
|
||||
let dbsp_index_name = format!(
|
||||
"{}{}_1",
|
||||
PRIMARY_KEY_AUTOMATIC_INDEX_NAME_PREFIX, &dbsp_table_name
|
||||
);
|
||||
emit_schema_entry(
|
||||
&mut program,
|
||||
&resolver,
|
||||
sqlite_schema_cursor_id,
|
||||
None, // cdc_table_cursor_id
|
||||
SchemaEntryType::Index,
|
||||
&dbsp_index_name,
|
||||
&dbsp_table_name,
|
||||
dbsp_index_root_reg,
|
||||
None, // Automatic indexes don't store SQL
|
||||
)?;
|
||||
|
||||
// Parse schema to load the new view and DBSP state table
|
||||
program.emit_insn(Insn::ParseSchema {
|
||||
db: sqlite_schema_cursor_id,
|
||||
where_clause: Some(format!(
|
||||
"name = '{normalized_view_name}' OR name = '{dbsp_table_name}'"
|
||||
"name = '{normalized_view_name}' OR name = '{dbsp_table_name}' OR name = '{dbsp_index_name}'"
|
||||
)),
|
||||
});
|
||||
|
||||
|
||||
17
core/util.rs
17
core/util.rs
@@ -1,6 +1,7 @@
|
||||
#![allow(unused)]
|
||||
use crate::incremental::view::IncrementalView;
|
||||
use crate::numeric::StrToF64;
|
||||
use crate::translate::emitter::TransactionMode;
|
||||
use crate::translate::expr::WalkControl;
|
||||
use crate::types::IOResult;
|
||||
use crate::{
|
||||
@@ -150,10 +151,10 @@ pub fn parse_schema_rows(
|
||||
mut rows: Statement,
|
||||
schema: &mut Schema,
|
||||
syms: &SymbolTable,
|
||||
mv_tx_id: Option<u64>,
|
||||
mv_tx: Option<(u64, TransactionMode)>,
|
||||
mut existing_views: HashMap<String, Arc<Mutex<IncrementalView>>>,
|
||||
) -> Result<()> {
|
||||
rows.set_mv_tx_id(mv_tx_id);
|
||||
rows.set_mv_tx(mv_tx);
|
||||
// TODO: if we IO, this unparsed indexes is lost. Will probably need some state between
|
||||
// IO runs
|
||||
let mut from_sql_indexes = Vec::with_capacity(10);
|
||||
@@ -162,6 +163,9 @@ pub fn parse_schema_rows(
|
||||
// Store DBSP state table root pages: view_name -> dbsp_state_root_page
|
||||
let mut dbsp_state_roots: std::collections::HashMap<String, usize> =
|
||||
std::collections::HashMap::new();
|
||||
// Store DBSP state table index root pages: view_name -> dbsp_state_index_root_page
|
||||
let mut dbsp_state_index_roots: std::collections::HashMap<String, usize> =
|
||||
std::collections::HashMap::new();
|
||||
// Store materialized view info (SQL and root page) for later creation
|
||||
let mut materialized_view_info: std::collections::HashMap<String, (String, usize)> =
|
||||
std::collections::HashMap::new();
|
||||
@@ -184,8 +188,9 @@ pub fn parse_schema_rows(
|
||||
&mut from_sql_indexes,
|
||||
&mut automatic_indices,
|
||||
&mut dbsp_state_roots,
|
||||
&mut dbsp_state_index_roots,
|
||||
&mut materialized_view_info,
|
||||
)?;
|
||||
)?
|
||||
}
|
||||
StepResult::IO => {
|
||||
// TODO: How do we ensure that the I/O we submitted to
|
||||
@@ -199,7 +204,11 @@ pub fn parse_schema_rows(
|
||||
}
|
||||
|
||||
schema.populate_indices(from_sql_indexes, automatic_indices)?;
|
||||
schema.populate_materialized_views(materialized_view_info, dbsp_state_roots)?;
|
||||
schema.populate_materialized_views(
|
||||
materialized_view_info,
|
||||
dbsp_state_roots,
|
||||
dbsp_state_index_roots,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -941,8 +941,8 @@ pub fn op_open_read(
|
||||
let pager = program.get_pager_from_database_index(db);
|
||||
|
||||
let (_, cursor_type) = program.cursor_ref.get(*cursor_id).unwrap();
|
||||
let mv_cursor = match program.connection.mv_tx_id.get() {
|
||||
Some(tx_id) => {
|
||||
let mv_cursor = match program.connection.mv_tx.get() {
|
||||
Some((tx_id, _)) => {
|
||||
let table_id = *root_page as u64;
|
||||
let mv_store = mv_store.unwrap().clone();
|
||||
let mv_cursor = Rc::new(RefCell::new(
|
||||
@@ -2156,7 +2156,7 @@ pub fn op_transaction(
|
||||
// In MVCC we don't have write exclusivity, therefore we just need to start a transaction if needed.
|
||||
// Programs can run Transaction twice, first with read flag and then with write flag. So a single txid is enough
|
||||
// for both.
|
||||
if program.connection.mv_tx_id.get().is_none() {
|
||||
if program.connection.mv_tx.get().is_none() {
|
||||
// We allocate the first page lazily in the first transaction.
|
||||
return_if_io!(pager.maybe_allocate_page1());
|
||||
// TODO: when we fix MVCC enable schema cookie detection for reprepare statements
|
||||
@@ -2168,23 +2168,31 @@ pub fn op_transaction(
|
||||
// }
|
||||
let tx_id = match tx_mode {
|
||||
TransactionMode::None | TransactionMode::Read | TransactionMode::Concurrent => {
|
||||
mv_store.begin_tx(pager.clone())
|
||||
mv_store.begin_tx(pager.clone())?
|
||||
}
|
||||
TransactionMode::Write => {
|
||||
return_if_io!(mv_store.begin_exclusive_tx(pager.clone()))
|
||||
return_if_io!(mv_store.begin_exclusive_tx(pager.clone(), None))
|
||||
}
|
||||
};
|
||||
conn.mv_transactions.borrow_mut().push(tx_id);
|
||||
program.connection.mv_tx_id.set(Some(tx_id));
|
||||
} else if updated
|
||||
&& matches!(new_transaction_state, TransactionState::Write { .. })
|
||||
&& matches!(tx_mode, TransactionMode::Write)
|
||||
{
|
||||
// For MVCC with concurrent transactions, we don't need to upgrade to exclusive.
|
||||
// The existing MVCC transaction can handle both reads and writes.
|
||||
// We only upgrade to exclusive for IMMEDIATE/EXCLUSIVE transaction modes.
|
||||
// Since we already have an MVCC transaction from BEGIN CONCURRENT,
|
||||
// we can just continue using it for writes.
|
||||
program.connection.mv_tx.set(Some((tx_id, *tx_mode)));
|
||||
} else if updated {
|
||||
// TODO: fix tx_mode in Insn::Transaction, now each statement overrides it even if there's already a CONCURRENT Tx in progress, for example
|
||||
let mv_tx_mode = program.connection.mv_tx.get().unwrap().1;
|
||||
let actual_tx_mode = if mv_tx_mode == TransactionMode::Concurrent {
|
||||
TransactionMode::Concurrent
|
||||
} else {
|
||||
*tx_mode
|
||||
};
|
||||
if matches!(new_transaction_state, TransactionState::Write { .. })
|
||||
&& matches!(actual_tx_mode, TransactionMode::Write)
|
||||
{
|
||||
let (tx_id, mv_tx_mode) = program.connection.mv_tx.get().unwrap();
|
||||
if mv_tx_mode == TransactionMode::Read {
|
||||
return_if_io!(mv_store.upgrade_to_exclusive_tx(pager.clone(), Some(tx_id)));
|
||||
} else {
|
||||
return_if_io!(mv_store.begin_exclusive_tx(pager.clone(), Some(tx_id)));
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if matches!(tx_mode, TransactionMode::Concurrent) {
|
||||
@@ -2292,14 +2300,20 @@ pub fn op_auto_commit(
|
||||
if *auto_commit != conn.auto_commit.get() {
|
||||
if *rollback {
|
||||
// TODO(pere): add rollback I/O logic once we implement rollback journal
|
||||
return_if_io!(pager.end_tx(true, &conn));
|
||||
if let Some(mv_store) = mv_store {
|
||||
if let Some((tx_id, _)) = conn.mv_tx.get() {
|
||||
mv_store.rollback_tx(tx_id, pager.clone(), &conn)?;
|
||||
}
|
||||
} else {
|
||||
return_if_io!(pager.end_tx(true, &conn));
|
||||
}
|
||||
conn.transaction_state.replace(TransactionState::None);
|
||||
conn.auto_commit.replace(true);
|
||||
} else {
|
||||
conn.auto_commit.replace(*auto_commit);
|
||||
}
|
||||
} else {
|
||||
let mvcc_tx_active = program.connection.mv_tx_id.get().is_some();
|
||||
let mvcc_tx_active = program.connection.mv_tx.get().is_some();
|
||||
if !mvcc_tx_active {
|
||||
if !*auto_commit {
|
||||
return Err(LimboError::TxError(
|
||||
@@ -6374,8 +6388,8 @@ pub fn op_open_write(
|
||||
CursorType::BTreeIndex(index) => Some(index),
|
||||
_ => None,
|
||||
};
|
||||
let mv_cursor = match program.connection.mv_tx_id.get() {
|
||||
Some(tx_id) => {
|
||||
let mv_cursor = match program.connection.mv_tx.get() {
|
||||
Some((tx_id, _)) => {
|
||||
let table_id = root_page;
|
||||
let mv_store = mv_store.unwrap().clone();
|
||||
let mv_cursor = Rc::new(RefCell::new(
|
||||
@@ -6649,7 +6663,7 @@ pub fn op_parse_schema(
|
||||
stmt,
|
||||
schema,
|
||||
&conn.syms.borrow(),
|
||||
program.connection.mv_tx_id.get(),
|
||||
program.connection.mv_tx.get(),
|
||||
existing_views,
|
||||
)
|
||||
})
|
||||
@@ -6664,7 +6678,7 @@ pub fn op_parse_schema(
|
||||
stmt,
|
||||
schema,
|
||||
&conn.syms.borrow(),
|
||||
program.connection.mv_tx_id.get(),
|
||||
program.connection.mv_tx.get(),
|
||||
existing_views,
|
||||
)
|
||||
})
|
||||
@@ -7120,8 +7134,8 @@ pub fn op_open_ephemeral(
|
||||
let root_page = return_if_io!(pager.btree_create(flag));
|
||||
|
||||
let (_, cursor_type) = program.cursor_ref.get(cursor_id).unwrap();
|
||||
let mv_cursor = match program.connection.mv_tx_id.get() {
|
||||
Some(tx_id) => {
|
||||
let mv_cursor = match program.connection.mv_tx.get() {
|
||||
Some((tx_id, _)) => {
|
||||
let table_id = root_page as u64;
|
||||
let mv_store = mv_store.unwrap().clone();
|
||||
let mv_cursor = Rc::new(RefCell::new(
|
||||
|
||||
@@ -820,17 +820,11 @@ impl Program {
|
||||
let auto_commit = conn.auto_commit.get();
|
||||
if auto_commit {
|
||||
// FIXME: we don't want to commit stuff from other programs.
|
||||
let mut mv_transactions = conn.mv_transactions.borrow_mut();
|
||||
if matches!(program_state.commit_state, CommitState::Ready) {
|
||||
assert!(
|
||||
mv_transactions.len() <= 1,
|
||||
"for now we only support one mv transaction in single connection, {mv_transactions:?}",
|
||||
);
|
||||
if mv_transactions.is_empty() {
|
||||
let Some((tx_id, _)) = conn.mv_tx.get() else {
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
let tx_id = mv_transactions.first().unwrap();
|
||||
let state_machine = mv_store.commit_tx(*tx_id, pager.clone(), &conn).unwrap();
|
||||
};
|
||||
let state_machine = mv_store.commit_tx(tx_id, pager.clone(), &conn).unwrap();
|
||||
program_state.commit_state = CommitState::CommitingMvcc { state_machine };
|
||||
}
|
||||
let CommitState::CommitingMvcc { state_machine } = &mut program_state.commit_state
|
||||
@@ -840,10 +834,9 @@ impl Program {
|
||||
match self.step_end_mvcc_txn(state_machine, mv_store)? {
|
||||
IOResult::Done(_) => {
|
||||
assert!(state_machine.is_finalized());
|
||||
conn.mv_tx_id.set(None);
|
||||
conn.mv_tx.set(None);
|
||||
conn.transaction_state.replace(TransactionState::None);
|
||||
program_state.commit_state = CommitState::Ready;
|
||||
mv_transactions.clear();
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
IOResult::IO(io) => {
|
||||
@@ -1082,10 +1075,14 @@ pub fn handle_program_error(
|
||||
LimboError::TxError(_) => {}
|
||||
// Table locked errors, e.g. trying to checkpoint in an interactive transaction, do not cause a rollback.
|
||||
LimboError::TableLocked => {}
|
||||
// Busy errors do not cause a rollback.
|
||||
LimboError::Busy => {}
|
||||
_ => {
|
||||
if let Some(mv_store) = mv_store {
|
||||
if let Some(tx_id) = connection.mv_tx_id.get() {
|
||||
mv_store.rollback_tx(tx_id, pager.clone());
|
||||
if let Some((tx_id, _)) = connection.mv_tx.get() {
|
||||
connection.transaction_state.replace(TransactionState::None);
|
||||
connection.auto_commit.replace(true);
|
||||
mv_store.rollback_tx(tx_id, pager.clone(), connection)?;
|
||||
}
|
||||
} else {
|
||||
pager
|
||||
|
||||
@@ -16,4 +16,4 @@ static = []
|
||||
turso_macros = { workspace = true }
|
||||
|
||||
getrandom = "0.3.1"
|
||||
chrono = "0.4.40"
|
||||
chrono = { workspace = true, default-features = true }
|
||||
|
||||
@@ -18,7 +18,7 @@ turso_ext = { workspace = true, features = ["static"] }
|
||||
csv = "1.3.1"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.19.1"
|
||||
tempfile = { workspace = true }
|
||||
|
||||
[target.'cfg(not(target_family = "wasm"))'.dependencies]
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
|
||||
@@ -17,7 +17,7 @@ crate-type = ["cdylib", "lib"]
|
||||
|
||||
[dependencies]
|
||||
turso_ext = { workspace = true, features = ["static"] }
|
||||
regex = "1.11.1"
|
||||
regex = { workspace = true }
|
||||
|
||||
[target.'cfg(not(target_family = "wasm"))'.dependencies]
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
|
||||
@@ -14,7 +14,7 @@ crate-type = ["cdylib", "lib"]
|
||||
static= [ "turso_ext/static" ]
|
||||
|
||||
[dependencies]
|
||||
env_logger = "0.11.6"
|
||||
env_logger = { workspace = true }
|
||||
lazy_static = "1.5.0"
|
||||
turso_ext = { workspace = true, features = ["static", "vfs"] }
|
||||
log = "0.4.26"
|
||||
|
||||
@@ -15,17 +15,17 @@ default = []
|
||||
serde = ["dep:serde", "bitflags/serde"]
|
||||
|
||||
[dependencies]
|
||||
bitflags = "2.0"
|
||||
miette = "7.4.0"
|
||||
bitflags = { workspace = true }
|
||||
miette = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
strum_macros = {workspace = true }
|
||||
serde = { workspace = true , optional = true, features = ["derive"] }
|
||||
thiserror = "1.0.61"
|
||||
thiserror = { workspace = true }
|
||||
turso_macros = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
fallible-iterator = "0.3"
|
||||
criterion = { version = "0.5", features = ["html_reports" ] }
|
||||
fallible-iterator = { workspace = true }
|
||||
criterion = { workspace = true, features = ["html_reports" ] }
|
||||
|
||||
[target.'cfg(not(target_family = "windows"))'.dev-dependencies]
|
||||
pprof = { version = "0.14.0", features = ["criterion", "flamegraph"] }
|
||||
|
||||
403
perf/throughput/rusqlite/Cargo.lock
generated
403
perf/throughput/rusqlite/Cargo.lock
generated
@@ -1,403 +0,0 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.8.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
"zerocopy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstream"
|
||||
version = "0.6.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"anstyle-parse",
|
||||
"anstyle-query",
|
||||
"anstyle-wincon",
|
||||
"colorchoice",
|
||||
"is_terminal_polyfill",
|
||||
"utf8parse",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle"
|
||||
version = "1.0.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd"
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-parse"
|
||||
version = "0.2.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2"
|
||||
dependencies = [
|
||||
"utf8parse",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-query"
|
||||
version = "1.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2"
|
||||
dependencies = [
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-wincon"
|
||||
version = "3.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"once_cell_polyfill",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.36"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5252b3d2648e5eedbc1a6f501e3c795e07025c1e93bbf8bbdd6eef7f447a6d54"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"shlex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
"clap_lex",
|
||||
"strsim",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.5.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_lex"
|
||||
version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
|
||||
|
||||
[[package]]
|
||||
name = "colorchoice"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
|
||||
|
||||
[[package]]
|
||||
name = "fallible-iterator"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"
|
||||
|
||||
[[package]]
|
||||
name = "fallible-streaming-iterator"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
|
||||
|
||||
[[package]]
|
||||
name = "find-msvc-tools"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.14.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashlink"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af"
|
||||
dependencies = [
|
||||
"hashbrown",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||
|
||||
[[package]]
|
||||
name = "is_terminal_polyfill"
|
||||
version = "1.70.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
|
||||
|
||||
[[package]]
|
||||
name = "libsqlite3-sys"
|
||||
version = "0.28.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.21.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
|
||||
|
||||
[[package]]
|
||||
name = "once_cell_polyfill"
|
||||
version = "1.70.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad"
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.101"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rusqlite"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"fallible-iterator",
|
||||
"fallible-streaming-iterator",
|
||||
"hashlink",
|
||||
"libsqlite3-sys",
|
||||
"smallvec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shlex"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.106"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d"
|
||||
|
||||
[[package]]
|
||||
name = "utf8parse"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
|
||||
|
||||
[[package]]
|
||||
name = "windows-link"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.60.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.53.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
|
||||
|
||||
[[package]]
|
||||
name = "write-throughput"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"rusqlite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
@@ -1,12 +1,12 @@
|
||||
[package]
|
||||
name = "write-throughput"
|
||||
name = "write-throughput-sqlite"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
name = "write-throughput"
|
||||
name = "write-throughput-sqlite"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
rusqlite = { version = "0.31", features = ["bundled"] }
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
rusqlite = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
@@ -2,7 +2,7 @@ use clap::Parser;
|
||||
use rusqlite::{Connection, Result};
|
||||
use std::sync::{Arc, Barrier};
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "write-throughput")]
|
||||
@@ -73,7 +73,7 @@ fn main() -> Result<()> {
|
||||
match handle.join() {
|
||||
Ok(Ok(inserts)) => total_inserts += inserts,
|
||||
Ok(Err(e)) => {
|
||||
eprintln!("Thread error: {}", e);
|
||||
eprintln!("Thread error: {e}");
|
||||
return Err(e);
|
||||
}
|
||||
Err(_) => {
|
||||
@@ -87,9 +87,9 @@ fn main() -> Result<()> {
|
||||
let overall_throughput = (total_inserts as f64) / overall_elapsed.as_secs_f64();
|
||||
|
||||
println!("\n=== BENCHMARK RESULTS ===");
|
||||
println!("Total inserts: {}", total_inserts);
|
||||
println!("Total inserts: {total_inserts}",);
|
||||
println!("Total time: {:.2}s", overall_elapsed.as_secs_f64());
|
||||
println!("Overall throughput: {:.2} inserts/sec", overall_throughput);
|
||||
println!("Overall throughput: {overall_throughput:.2} inserts/sec");
|
||||
println!("Threads: {}", args.threads);
|
||||
println!("Batch size: {}", args.batch_size);
|
||||
println!("Iterations per thread: {}", args.iterations);
|
||||
@@ -116,7 +116,7 @@ fn setup_database(db_path: &str) -> Result<Connection> {
|
||||
[],
|
||||
)?;
|
||||
|
||||
println!("Database created at: {}", db_path);
|
||||
println!("Database created at: {db_path}");
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ fn worker_thread(
|
||||
|
||||
for i in 0..batch_size {
|
||||
let id = thread_id * iterations * batch_size + iteration * batch_size + i;
|
||||
stmt.execute([&id.to_string(), &format!("data_{}", id)])?;
|
||||
stmt.execute([&id.to_string(), &format!("data_{id}")])?;
|
||||
total_inserts += 1;
|
||||
}
|
||||
if think_ms > 0 {
|
||||
|
||||
2066
perf/throughput/turso/Cargo.lock
generated
2066
perf/throughput/turso/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,7 +8,8 @@ name = "write-throughput"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
turso = { path = "../../../bindings/rust" }
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
futures = "0.3"
|
||||
turso = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
tokio = { workspace = true, default-features = true, features = ["full"] }
|
||||
futures = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use clap::{Parser, ValueEnum};
|
||||
use std::sync::{Arc, Barrier};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::Instant;
|
||||
use std::sync::{Arc, Barrier};
|
||||
use std::time::{Duration, Instant};
|
||||
use turso::{Builder, Database, Result};
|
||||
|
||||
#[derive(Debug, Clone, Copy, ValueEnum)]
|
||||
@@ -33,10 +33,18 @@ struct Args {
|
||||
help = "Per transaction think time (ms)"
|
||||
)]
|
||||
think: u64,
|
||||
|
||||
#[arg(
|
||||
long = "timeout",
|
||||
default_value = "30000",
|
||||
help = "Busy timeout in milliseconds"
|
||||
)]
|
||||
timeout: u64,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
let args = Args::parse();
|
||||
|
||||
println!(
|
||||
@@ -58,6 +66,8 @@ async fn main() -> Result<()> {
|
||||
let start_barrier = Arc::new(Barrier::new(args.threads));
|
||||
let mut handles = Vec::new();
|
||||
|
||||
let timeout = Duration::from_millis(args.timeout);
|
||||
|
||||
let overall_start = Instant::now();
|
||||
|
||||
for thread_id in 0..args.threads {
|
||||
@@ -72,17 +82,18 @@ async fn main() -> Result<()> {
|
||||
barrier,
|
||||
args.mode,
|
||||
args.think,
|
||||
timeout,
|
||||
));
|
||||
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
let mut total_inserts = 0;
|
||||
for handle in handles {
|
||||
for (idx, handle) in handles.into_iter().enumerate() {
|
||||
match handle.await {
|
||||
Ok(Ok(inserts)) => total_inserts += inserts,
|
||||
Ok(Err(e)) => {
|
||||
eprintln!("Thread error: {}", e);
|
||||
eprintln!("Thread error {idx}: {e}");
|
||||
return Err(e);
|
||||
}
|
||||
Err(_) => {
|
||||
@@ -96,9 +107,9 @@ async fn main() -> Result<()> {
|
||||
let overall_throughput = (total_inserts as f64) / overall_elapsed.as_secs_f64();
|
||||
|
||||
println!("\n=== BENCHMARK RESULTS ===");
|
||||
println!("Total inserts: {}", total_inserts);
|
||||
println!("Total inserts: {total_inserts}");
|
||||
println!("Total time: {:.2}s", overall_elapsed.as_secs_f64());
|
||||
println!("Overall throughput: {:.2} inserts/sec", overall_throughput);
|
||||
println!("Overall throughput: {overall_throughput:.2} inserts/sec");
|
||||
println!("Threads: {}", args.threads);
|
||||
println!("Batch size: {}", args.batch_size);
|
||||
println!("Iterations per thread: {}", args.iterations);
|
||||
@@ -133,10 +144,11 @@ async fn setup_database(db_path: &str, mode: TransactionMode) -> Result<Database
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("Database created at: {}", db_path);
|
||||
println!("Database created at: {db_path}");
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn worker_thread(
|
||||
thread_id: usize,
|
||||
db: Database,
|
||||
@@ -145,6 +157,7 @@ async fn worker_thread(
|
||||
start_barrier: Arc<Barrier>,
|
||||
mode: TransactionMode,
|
||||
think_ms: u64,
|
||||
timeout: Duration,
|
||||
) -> Result<u64> {
|
||||
start_barrier.wait();
|
||||
|
||||
@@ -155,6 +168,7 @@ async fn worker_thread(
|
||||
|
||||
for iteration in 0..iterations {
|
||||
let conn = db.connect()?;
|
||||
conn.busy_timeout(Some(timeout))?;
|
||||
let total_inserts = Arc::clone(&total_inserts);
|
||||
let tx_fut = async move {
|
||||
let mut stmt = conn
|
||||
@@ -171,7 +185,7 @@ async fn worker_thread(
|
||||
let id = thread_id * iterations * batch_size + iteration * batch_size + i;
|
||||
stmt.execute(turso::params::Params::Positional(vec![
|
||||
turso::Value::Integer(id as i64),
|
||||
turso::Value::Text(format!("data_{}", id)),
|
||||
turso::Value::Text(format!("data_{id}")),
|
||||
]))
|
||||
.await?;
|
||||
total_inserts.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
@@ -19,6 +19,7 @@ COPY extensions ./extensions/
|
||||
COPY macros ./macros/
|
||||
COPY sync ./sync
|
||||
COPY parser ./parser/
|
||||
COPY perf/throughput/turso ./perf/throughput/turso
|
||||
COPY vendored ./vendored/
|
||||
COPY cli ./cli/
|
||||
COPY sqlite3 ./sqlite3/
|
||||
@@ -43,6 +44,7 @@ COPY --from=planner /app/vendored ./vendored/
|
||||
COPY --from=planner /app/extensions ./extensions/
|
||||
COPY --from=planner /app/macros ./macros/
|
||||
COPY --from=planner /app/parser ./parser/
|
||||
COPY --from=planner /app/perf/throughput/turso ./perf/throughput/turso
|
||||
COPY --from=planner /app/simulator ./simulator/
|
||||
COPY --from=planner /app/packages ./packages/
|
||||
COPY --from=planner /app/sql_generation ./sql_generation/
|
||||
|
||||
@@ -15,27 +15,27 @@ name = "limbo_sim"
|
||||
path = "main.rs"
|
||||
|
||||
[dependencies]
|
||||
turso_core = { path = "../core", features = ["simulator"]}
|
||||
turso_core = { workspace = true, features = ["simulator"]}
|
||||
rand = { workspace = true }
|
||||
rand_chacha = "0.9.0"
|
||||
rand_chacha = { workspace = true }
|
||||
log = "0.4.20"
|
||||
env_logger = "0.10.1"
|
||||
regex = "1.11.1"
|
||||
regex-syntax = { version = "0.8.5", default-features = false, features = [
|
||||
env_logger = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
regex-syntax = { workspace = true, default-features = false, features = [
|
||||
"unicode",
|
||||
] }
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
notify = "8.0.0"
|
||||
rusqlite.workspace = true
|
||||
dirs = "6.0.0"
|
||||
chrono = { version = "0.4.40", features = ["serde"] }
|
||||
chrono = { workspace = true, default-features = true, features = ["serde"] }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
anyhow.workspace = true
|
||||
hex = "0.4.3"
|
||||
itertools = "0.14.0"
|
||||
hex = { workspace = true }
|
||||
itertools = { workspace = true }
|
||||
sql_generation = { workspace = true }
|
||||
turso_parser = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
@@ -43,4 +43,4 @@ garde = { workspace = true, features = ["derive", "serde"] }
|
||||
json5 = { version = "0.4.1" }
|
||||
strum = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
indexmap = "2.10.0"
|
||||
indexmap = { workspace = true }
|
||||
|
||||
@@ -10,7 +10,7 @@ repository.workspace = true
|
||||
path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
hex = "0.4.3"
|
||||
hex = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
turso_core = { workspace = true, features = ["simulator"] }
|
||||
turso_parser = { workspace = true, features = ["serde"] }
|
||||
@@ -21,7 +21,7 @@ anyhow = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
garde = { workspace = true, features = ["derive", "serde"] }
|
||||
indexmap = { version = "2.11.0" }
|
||||
indexmap = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
rand_chacha = "0.9.0"
|
||||
rand_chacha = { workspace = true }
|
||||
|
||||
@@ -22,15 +22,15 @@ doc = false
|
||||
crate-type = ["lib", "cdylib", "staticlib"]
|
||||
|
||||
[dependencies]
|
||||
env_logger = { version = "0.11.3", default-features = false }
|
||||
env_logger = { workspace = true, default-features = false }
|
||||
libc = "0.2.169"
|
||||
turso_core = { path = "../core", features = ["conn_raw_api"] }
|
||||
tracing = "0.1.41"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
turso_core = { workspace = true, features = ["conn_raw_api"] }
|
||||
tracing = { workspace = true }
|
||||
tracing-appender = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.8.0"
|
||||
tempfile = { workspace = true }
|
||||
|
||||
[package.metadata.capi.header]
|
||||
name = "sqlite3.h"
|
||||
|
||||
@@ -21,12 +21,12 @@ experimental_indexes = []
|
||||
|
||||
[dependencies]
|
||||
anarchist-readable-name-generator-lib = "0.1.0"
|
||||
antithesis_sdk = "0.2.5"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
hex = "0.4"
|
||||
tempfile = "3.20.0"
|
||||
tokio = { version = "1.29.1", features = ["full"] }
|
||||
tracing = "0.1.41"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
turso = { path = "../bindings/rust" }
|
||||
antithesis_sdk = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
hex = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tracing = { workspace = true }
|
||||
tracing-appender = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
turso = { workspace = true }
|
||||
|
||||
@@ -488,6 +488,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let plan = plan.clone();
|
||||
let conn = db.lock().await.connect()?;
|
||||
|
||||
conn.execute("PRAGMA data_sync_retry = 1", ()).await?;
|
||||
|
||||
// Apply each DDL statement individually
|
||||
for stmt in &plan.ddl_statements {
|
||||
if opts.verbose {
|
||||
|
||||
@@ -22,11 +22,11 @@ roaring = "0.11.2"
|
||||
|
||||
[dev-dependencies]
|
||||
ctor = "0.4.2"
|
||||
tempfile = "3.20.0"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
tokio = { version = "1.46.1", features = ["macros", "rt-multi-thread", "test-util"] }
|
||||
tempfile = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread", "test-util"] }
|
||||
uuid = "1.17.0"
|
||||
rand = "0.9.2"
|
||||
rand_chacha = "0.9.0"
|
||||
rand = { workspace = true }
|
||||
rand_chacha = { workspace = true }
|
||||
turso = { workspace = true, features = ["conn_raw_api"] }
|
||||
futures = "0.3.31"
|
||||
futures = { workspace = true }
|
||||
|
||||
@@ -246,12 +246,17 @@ impl<P: ProtocolIO> DatabaseSyncEngine<P> {
|
||||
let main_conn = connect_untracked(&self.main_tape)?;
|
||||
let change_id = self.meta().last_pushed_change_id_hint;
|
||||
let last_pull_unix_time = self.meta().last_pull_unix_time;
|
||||
let revision = self.meta().synced_revision.clone().map(|x| match x {
|
||||
DatabasePullRevision::Legacy {
|
||||
generation,
|
||||
synced_frame_no,
|
||||
} => format!("generation={generation},synced_frame_no={synced_frame_no:?}"),
|
||||
DatabasePullRevision::V1 { revision } => revision,
|
||||
});
|
||||
let last_push_unix_time = self.meta().last_push_unix_time;
|
||||
let revert_wal_path = &self.revert_db_wal_path;
|
||||
let revert_wal_file = self
|
||||
.io
|
||||
.open_file(revert_wal_path, OpenFlags::all(), false)?;
|
||||
let revert_wal_size = revert_wal_file.size()?;
|
||||
let revert_wal_file = self.io.try_open(revert_wal_path)?;
|
||||
let revert_wal_size = revert_wal_file.map(|f| f.size()).transpose()?.unwrap_or(0);
|
||||
let main_wal_frames = main_conn.wal_state()?.max_frame;
|
||||
let main_wal_size = if main_wal_frames == 0 {
|
||||
0
|
||||
@@ -264,6 +269,7 @@ impl<P: ProtocolIO> DatabaseSyncEngine<P> {
|
||||
revert_wal_size,
|
||||
last_pull_unix_time,
|
||||
last_push_unix_time,
|
||||
revision,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -416,7 +422,6 @@ impl<P: ProtocolIO> DatabaseSyncEngine<P> {
|
||||
&mut self,
|
||||
coro: &Coro<Ctx>,
|
||||
remote_changes: DbChangesStatus,
|
||||
now: turso_core::Instant,
|
||||
) -> Result<()> {
|
||||
assert!(remote_changes.file_slot.is_some(), "file_slot must be set");
|
||||
let changes_file = remote_changes.file_slot.as_ref().unwrap().value.clone();
|
||||
@@ -436,7 +441,7 @@ impl<P: ProtocolIO> DatabaseSyncEngine<P> {
|
||||
m.revert_since_wal_watermark = revert_since_wal_watermark;
|
||||
m.synced_revision = Some(remote_changes.revision);
|
||||
m.last_pushed_change_id_hint = 0;
|
||||
m.last_pull_unix_time = now.secs;
|
||||
m.last_pull_unix_time = remote_changes.time.secs;
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
@@ -656,13 +661,12 @@ impl<P: ProtocolIO> DatabaseSyncEngine<P> {
|
||||
}
|
||||
|
||||
pub async fn pull_changes_from_remote<Ctx>(&mut self, coro: &Coro<Ctx>) -> Result<()> {
|
||||
let now = self.io.now();
|
||||
let changes = self.wait_changes_from_remote(coro).await?;
|
||||
if changes.file_slot.is_some() {
|
||||
self.apply_changes_from_remote(coro, changes, now).await?;
|
||||
self.apply_changes_from_remote(coro, changes).await?;
|
||||
} else {
|
||||
self.update_meta(coro, |m| {
|
||||
m.last_pull_unix_time = now.secs;
|
||||
m.last_pull_unix_time = changes.time.secs;
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@ pub struct SyncEngineStats {
|
||||
pub revert_wal_size: u64,
|
||||
pub last_pull_unix_time: i64,
|
||||
pub last_push_unix_time: Option<i64>,
|
||||
pub revision: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
|
||||
@@ -19,6 +19,14 @@ do_execsql_test_on_specific_db {:memory:} alter-table-rename-column {
|
||||
"CREATE INDEX i ON t (b)"
|
||||
}
|
||||
|
||||
do_execsql_test_on_specific_db {:memory:} alter-table-rename-quoted-column {
|
||||
CREATE TABLE t (a INTEGER);
|
||||
ALTER TABLE t RENAME a TO "ab cd";
|
||||
SELECT sql FROM sqlite_schema;
|
||||
} {
|
||||
"CREATE TABLE t (\"ab cd\" INTEGER)"
|
||||
}
|
||||
|
||||
do_execsql_test_on_specific_db {:memory:} alter-table-add-column {
|
||||
CREATE TABLE t (a);
|
||||
INSERT INTO t VALUES (1);
|
||||
@@ -74,6 +82,14 @@ do_execsql_test_on_specific_db {:memory:} alter-table-add-column-default {
|
||||
"0.1|hello"
|
||||
}
|
||||
|
||||
do_execsql_test_on_specific_db {:memory:} alter-table-add-quoted-column {
|
||||
CREATE TABLE test (a);
|
||||
ALTER TABLE test ADD COLUMN [b c];
|
||||
SELECT sql FROM sqlite_schema;
|
||||
} {
|
||||
"CREATE TABLE test (a, [b c])"
|
||||
}
|
||||
|
||||
do_execsql_test_on_specific_db {:memory:} alter-table-drop-column {
|
||||
CREATE TABLE t (a, b);
|
||||
INSERT INTO t VALUES (1, 1), (2, 2), (3, 3);
|
||||
|
||||
@@ -16,24 +16,24 @@ path = "integration/mod.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
env_logger = "0.10.1"
|
||||
turso_core = { path = "../core", features = ["conn_raw_api"] }
|
||||
turso = { path = "../bindings/rust", features = ["conn_raw_api"] }
|
||||
tokio = { version = "1.47", features = ["full"] }
|
||||
env_logger = { workspace = true }
|
||||
turso_core = { workspace = true, features = ["conn_raw_api"] }
|
||||
turso = { workspace = true, features = ["conn_raw_api"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
rusqlite.workspace = true
|
||||
tempfile = "3.0.7"
|
||||
tempfile = { workspace = true }
|
||||
log = "0.4.22"
|
||||
assert_cmd = "^2"
|
||||
rand_chacha = "0.9.0"
|
||||
rand = "0.9.0"
|
||||
rand_chacha = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
zerocopy = "0.8.26"
|
||||
ctor = "0.5.0"
|
||||
twox-hash = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
test-log = { version = "0.2.17", features = ["trace"] }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
tracing = "0.1.41"
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[features]
|
||||
encryption = ["turso_core/encryption"]
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use rand::seq::IndexedRandom;
|
||||
use rand::Rng;
|
||||
use rand_chacha::{rand_core::SeedableRng, ChaCha8Rng};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::BTreeMap;
|
||||
use turso::{Builder, Value};
|
||||
|
||||
// In-memory representation of the database state
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
struct DbRow {
|
||||
id: i64,
|
||||
other_columns: HashMap<String, Value>,
|
||||
other_columns: BTreeMap<String, Value>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for DbRow {
|
||||
@@ -33,9 +33,9 @@ impl std::fmt::Display for DbRow {
|
||||
#[derive(Debug, Clone)]
|
||||
struct TransactionState {
|
||||
// The schema this transaction can see (snapshot)
|
||||
schema: HashMap<String, TableSchema>,
|
||||
schema: BTreeMap<String, TableSchema>,
|
||||
// The rows this transaction can see (snapshot)
|
||||
visible_rows: HashMap<i64, DbRow>,
|
||||
visible_rows: BTreeMap<i64, DbRow>,
|
||||
// Pending changes in this transaction
|
||||
pending_changes: Vec<Operation>,
|
||||
}
|
||||
@@ -55,19 +55,24 @@ struct TableSchema {
|
||||
#[derive(Debug)]
|
||||
struct ShadowDb {
|
||||
// Schema
|
||||
schema: HashMap<String, TableSchema>,
|
||||
schema: BTreeMap<String, TableSchema>,
|
||||
// Committed state (what's actually in the database)
|
||||
committed_rows: HashMap<i64, DbRow>,
|
||||
committed_rows: BTreeMap<i64, DbRow>,
|
||||
// Transaction states
|
||||
transactions: HashMap<usize, Option<TransactionState>>,
|
||||
transactions: BTreeMap<usize, Option<TransactionState>>,
|
||||
query_gen_options: QueryGenOptions,
|
||||
}
|
||||
|
||||
impl ShadowDb {
|
||||
fn new(initial_schema: HashMap<String, TableSchema>) -> Self {
|
||||
fn new(
|
||||
initial_schema: BTreeMap<String, TableSchema>,
|
||||
query_gen_options: QueryGenOptions,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: initial_schema,
|
||||
committed_rows: HashMap::new(),
|
||||
transactions: HashMap::new(),
|
||||
committed_rows: BTreeMap::new(),
|
||||
transactions: BTreeMap::new(),
|
||||
query_gen_options,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,7 +190,7 @@ impl ShadowDb {
|
||||
&mut self,
|
||||
tx_id: usize,
|
||||
id: i64,
|
||||
other_columns: HashMap<String, Value>,
|
||||
other_columns: BTreeMap<String, Value>,
|
||||
) -> Result<(), String> {
|
||||
if let Some(tx_state) = self.transactions.get_mut(&tx_id) {
|
||||
// Check if row exists in visible state
|
||||
@@ -212,7 +217,7 @@ impl ShadowDb {
|
||||
&mut self,
|
||||
tx_id: usize,
|
||||
id: i64,
|
||||
other_columns: HashMap<String, Value>,
|
||||
other_columns: BTreeMap<String, Value>,
|
||||
) -> Result<(), String> {
|
||||
if let Some(tx_state) = self.transactions.get_mut(&tx_id) {
|
||||
// Check if row exists in visible state
|
||||
@@ -388,16 +393,18 @@ impl std::fmt::Display for AlterTableOp {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum Operation {
|
||||
Begin,
|
||||
Begin {
|
||||
concurrent: bool,
|
||||
},
|
||||
Commit,
|
||||
Rollback,
|
||||
Insert {
|
||||
id: i64,
|
||||
other_columns: HashMap<String, Value>,
|
||||
other_columns: BTreeMap<String, Value>,
|
||||
},
|
||||
Update {
|
||||
id: i64,
|
||||
other_columns: HashMap<String, Value>,
|
||||
other_columns: BTreeMap<String, Value>,
|
||||
},
|
||||
Delete {
|
||||
id: i64,
|
||||
@@ -423,7 +430,9 @@ fn value_to_sql(v: &Value) -> String {
|
||||
impl std::fmt::Display for Operation {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Operation::Begin => write!(f, "BEGIN"),
|
||||
Operation::Begin { concurrent } => {
|
||||
write!(f, "BEGIN{}", if *concurrent { " CONCURRENT" } else { "" })
|
||||
}
|
||||
Operation::Commit => write!(f, "COMMIT"),
|
||||
Operation::Rollback => write!(f, "ROLLBACK"),
|
||||
Operation::Insert { id, other_columns } => {
|
||||
@@ -477,37 +486,121 @@ fn rng_from_time_or_env() -> (ChaCha8Rng, u64) {
|
||||
/// Verify translation isolation semantics with multiple concurrent connections.
|
||||
/// This test is ignored because it still fails sometimes; unsure if it fails due to a bug in the test or a bug in the implementation.
|
||||
async fn test_multiple_connections_fuzz() {
|
||||
multiple_connections_fuzz(false).await
|
||||
multiple_connections_fuzz(FuzzOptions::default()).await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "MVCC is currently under development, it is expected to fail"]
|
||||
// Same as test_multiple_connections_fuzz, but with MVCC enabled.
|
||||
async fn test_multiple_connections_fuzz_mvcc() {
|
||||
multiple_connections_fuzz(true).await
|
||||
let mvcc_fuzz_options = FuzzOptions {
|
||||
mvcc_enabled: true,
|
||||
max_num_connections: 2,
|
||||
query_gen_options: QueryGenOptions {
|
||||
weight_begin_deferred: 8,
|
||||
weight_begin_concurrent: 8,
|
||||
weight_commit: 8,
|
||||
weight_rollback: 8,
|
||||
weight_checkpoint: 0,
|
||||
weight_ddl: 0,
|
||||
weight_dml: 76,
|
||||
dml_gen_options: DmlGenOptions {
|
||||
weight_insert: 25,
|
||||
weight_delete: 25,
|
||||
weight_select: 25,
|
||||
weight_update: 25,
|
||||
},
|
||||
},
|
||||
..FuzzOptions::default()
|
||||
};
|
||||
multiple_connections_fuzz(mvcc_fuzz_options).await
|
||||
}
|
||||
|
||||
async fn multiple_connections_fuzz(mvcc_enabled: bool) {
|
||||
#[derive(Debug, Clone)]
|
||||
struct FuzzOptions {
|
||||
num_iterations: usize,
|
||||
operations_per_connection: usize,
|
||||
max_num_connections: usize,
|
||||
query_gen_options: QueryGenOptions,
|
||||
mvcc_enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct QueryGenOptions {
|
||||
weight_begin_deferred: usize,
|
||||
weight_begin_concurrent: usize,
|
||||
weight_commit: usize,
|
||||
weight_rollback: usize,
|
||||
weight_checkpoint: usize,
|
||||
weight_ddl: usize,
|
||||
weight_dml: usize,
|
||||
dml_gen_options: DmlGenOptions,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct DmlGenOptions {
|
||||
weight_insert: usize,
|
||||
weight_update: usize,
|
||||
weight_delete: usize,
|
||||
weight_select: usize,
|
||||
}
|
||||
|
||||
impl Default for FuzzOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
num_iterations: 50,
|
||||
operations_per_connection: 30,
|
||||
max_num_connections: 8,
|
||||
query_gen_options: QueryGenOptions::default(),
|
||||
mvcc_enabled: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for QueryGenOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
weight_begin_deferred: 10,
|
||||
weight_begin_concurrent: 0,
|
||||
weight_commit: 10,
|
||||
weight_rollback: 10,
|
||||
weight_checkpoint: 5,
|
||||
weight_ddl: 5,
|
||||
weight_dml: 55,
|
||||
dml_gen_options: DmlGenOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DmlGenOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
weight_insert: 25,
|
||||
weight_update: 25,
|
||||
weight_delete: 25,
|
||||
weight_select: 25,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn multiple_connections_fuzz(opts: FuzzOptions) {
|
||||
let (mut rng, seed) = rng_from_time_or_env();
|
||||
println!("Multiple connections fuzz test seed: {seed}");
|
||||
|
||||
const NUM_ITERATIONS: usize = 50;
|
||||
const OPERATIONS_PER_CONNECTION: usize = 30;
|
||||
const MAX_NUM_CONNECTIONS: usize = 8;
|
||||
|
||||
for iteration in 0..NUM_ITERATIONS {
|
||||
let num_connections = rng.random_range(2..=MAX_NUM_CONNECTIONS);
|
||||
for iteration in 0..opts.num_iterations {
|
||||
let num_connections = rng.random_range(2..=opts.max_num_connections);
|
||||
println!("--- Seed {seed} Iteration {iteration} ---");
|
||||
println!("Options: {opts:?}");
|
||||
// Create a fresh database for each iteration
|
||||
let tempfile = tempfile::NamedTempFile::new().unwrap();
|
||||
let db = Builder::new_local(tempfile.path().to_str().unwrap())
|
||||
.with_mvcc(mvcc_enabled)
|
||||
.with_mvcc(opts.mvcc_enabled)
|
||||
.build()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// SHARED shadow database for all connections
|
||||
let mut schema = HashMap::new();
|
||||
let mut schema = BTreeMap::new();
|
||||
schema.insert(
|
||||
"test_table".to_string(),
|
||||
TableSchema {
|
||||
@@ -525,7 +618,7 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
|
||||
],
|
||||
},
|
||||
);
|
||||
let mut shared_shadow_db = ShadowDb::new(schema);
|
||||
let mut shared_shadow_db = ShadowDb::new(schema, opts.query_gen_options.clone());
|
||||
let mut next_tx_id = 0;
|
||||
|
||||
// Create connections
|
||||
@@ -544,26 +637,67 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
|
||||
connections.push((conn, conn_id, None::<usize>)); // (connection, conn_id, current_tx_id)
|
||||
}
|
||||
|
||||
let is_acceptable_error = |e: &turso::Error| -> bool {
|
||||
let e_string = e.to_string();
|
||||
e_string.contains("is locked")
|
||||
|| e_string.contains("busy")
|
||||
|| e_string.contains("Write-write conflict")
|
||||
};
|
||||
let requires_rollback = |e: &turso::Error| -> bool {
|
||||
let e_string = e.to_string();
|
||||
e_string.contains("Write-write conflict")
|
||||
};
|
||||
|
||||
let handle_error = |e: &turso::Error,
|
||||
tx_id: &mut Option<usize>,
|
||||
conn_id: usize,
|
||||
op_num: usize,
|
||||
shadow_db: &mut ShadowDb| {
|
||||
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
|
||||
if requires_rollback(e) {
|
||||
if let Some(tx_id) = tx_id {
|
||||
println!("Connection {conn_id}(op={op_num}) rolling back transaction {tx_id}");
|
||||
shadow_db.rollback_transaction(*tx_id);
|
||||
}
|
||||
*tx_id = None;
|
||||
}
|
||||
if is_acceptable_error(e) {
|
||||
return;
|
||||
}
|
||||
panic!("Unexpected error: {e}");
|
||||
};
|
||||
|
||||
// Interleave operations between all connections
|
||||
for op_num in 0..OPERATIONS_PER_CONNECTION {
|
||||
for op_num in 0..opts.operations_per_connection {
|
||||
for (conn, conn_id, current_tx_id) in &mut connections {
|
||||
// Generate operation based on current transaction state
|
||||
let (operation, visible_rows) =
|
||||
generate_operation(&mut rng, *current_tx_id, &mut shared_shadow_db);
|
||||
|
||||
let is_in_tx = current_tx_id.is_some();
|
||||
let is_in_tx_str = if is_in_tx {
|
||||
format!("true(tx_id={:?})", current_tx_id.unwrap())
|
||||
} else {
|
||||
"false".to_string()
|
||||
};
|
||||
let has_snapshot = current_tx_id.is_some_and(|tx_id| {
|
||||
shared_shadow_db.transactions.get(&tx_id).unwrap().is_some()
|
||||
});
|
||||
println!("Connection {conn_id}(op={op_num}): {operation}, is_in_tx={is_in_tx}, has_snapshot={has_snapshot}");
|
||||
println!("Connection {conn_id}(op={op_num}): {operation}, is_in_tx={is_in_tx_str}, has_snapshot={has_snapshot}");
|
||||
|
||||
match operation {
|
||||
Operation::Begin => {
|
||||
Operation::Begin { concurrent } => {
|
||||
shared_shadow_db.begin_transaction(next_tx_id, false);
|
||||
if concurrent {
|
||||
// in tursodb, BEGIN CONCURRENT immediately starts a transaction.
|
||||
shared_shadow_db.take_snapshot_if_not_exists(next_tx_id);
|
||||
}
|
||||
*current_tx_id = Some(next_tx_id);
|
||||
next_tx_id += 1;
|
||||
|
||||
conn.execute("BEGIN", ()).await.unwrap();
|
||||
let query = operation.to_string();
|
||||
|
||||
conn.execute(query.as_str(), ()).await.unwrap();
|
||||
}
|
||||
Operation::Commit => {
|
||||
let Some(tx_id) = *current_tx_id else {
|
||||
@@ -578,13 +712,13 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
|
||||
shared_shadow_db.commit_transaction(tx_id);
|
||||
*current_tx_id = None;
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
|
||||
// Check if it's an acceptable error
|
||||
if !e.to_string().contains("database is locked") {
|
||||
panic!("Unexpected error during commit: {e}");
|
||||
}
|
||||
}
|
||||
Err(e) => handle_error(
|
||||
&e,
|
||||
current_tx_id,
|
||||
*conn_id,
|
||||
op_num,
|
||||
&mut shared_shadow_db,
|
||||
),
|
||||
}
|
||||
}
|
||||
Operation::Rollback => {
|
||||
@@ -598,15 +732,13 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
|
||||
shared_shadow_db.rollback_transaction(tx_id);
|
||||
*current_tx_id = None;
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
|
||||
// Check if it's an acceptable error
|
||||
if !e.to_string().contains("Busy")
|
||||
&& !e.to_string().contains("database is locked")
|
||||
{
|
||||
panic!("Unexpected error during rollback: {e}");
|
||||
}
|
||||
}
|
||||
Err(e) => handle_error(
|
||||
&e,
|
||||
current_tx_id,
|
||||
*conn_id,
|
||||
op_num,
|
||||
&mut shared_shadow_db,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -645,13 +777,13 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
|
||||
next_tx_id += 1;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
|
||||
// Check if it's an acceptable error
|
||||
if !e.to_string().contains("database is locked") {
|
||||
panic!("Unexpected error during insert: {e}");
|
||||
}
|
||||
}
|
||||
Err(e) => handle_error(
|
||||
&e,
|
||||
current_tx_id,
|
||||
*conn_id,
|
||||
op_num,
|
||||
&mut shared_shadow_db,
|
||||
),
|
||||
}
|
||||
}
|
||||
Operation::Update { id, other_columns } => {
|
||||
@@ -683,13 +815,13 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
|
||||
next_tx_id += 1;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
|
||||
// Check if it's an acceptable error
|
||||
if !e.to_string().contains("database is locked") {
|
||||
panic!("Unexpected error during update: {e}");
|
||||
}
|
||||
}
|
||||
Err(e) => handle_error(
|
||||
&e,
|
||||
current_tx_id,
|
||||
*conn_id,
|
||||
op_num,
|
||||
&mut shared_shadow_db,
|
||||
),
|
||||
}
|
||||
}
|
||||
Operation::Delete { id } => {
|
||||
@@ -716,13 +848,13 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
|
||||
next_tx_id += 1;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Connection {conn_id}(op={op_num}) FAILED: {e}");
|
||||
// Check if it's an acceptable error
|
||||
if !e.to_string().contains("database is locked") {
|
||||
panic!("Unexpected error during delete: {e}");
|
||||
}
|
||||
}
|
||||
Err(e) => handle_error(
|
||||
&e,
|
||||
current_tx_id,
|
||||
*conn_id,
|
||||
op_num,
|
||||
&mut shared_shadow_db,
|
||||
),
|
||||
}
|
||||
}
|
||||
Operation::Select => {
|
||||
@@ -735,9 +867,13 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
|
||||
let ok = loop {
|
||||
match rows.next().await {
|
||||
Err(e) => {
|
||||
if !e.to_string().contains("database is locked") {
|
||||
panic!("Unexpected error during select: {e}");
|
||||
}
|
||||
handle_error(
|
||||
&e,
|
||||
current_tx_id,
|
||||
*conn_id,
|
||||
op_num,
|
||||
&mut shared_shadow_db,
|
||||
);
|
||||
break false;
|
||||
}
|
||||
Ok(None) => {
|
||||
@@ -747,7 +883,7 @@ async fn multiple_connections_fuzz(mvcc_enabled: bool) {
|
||||
let Value::Integer(id) = row.get_value(0).unwrap() else {
|
||||
panic!("Unexpected value for id: {:?}", row.get_value(0));
|
||||
};
|
||||
let mut other_columns = HashMap::new();
|
||||
let mut other_columns = BTreeMap::new();
|
||||
for i in 1..columns.len() {
|
||||
let column = columns.get(i).unwrap();
|
||||
let value = row.get_value(i).unwrap();
|
||||
@@ -879,120 +1015,169 @@ fn generate_operation(
|
||||
shadow_db.get_visible_rows(None) // No transaction
|
||||
}
|
||||
};
|
||||
match rng.random_range(0..100) {
|
||||
0..=9 => {
|
||||
if !in_transaction {
|
||||
(Operation::Begin, get_visible_rows())
|
||||
} else {
|
||||
let visible_rows = get_visible_rows();
|
||||
(
|
||||
generate_data_operation(rng, &visible_rows, &schema_clone),
|
||||
visible_rows,
|
||||
)
|
||||
}
|
||||
}
|
||||
10..=14 => {
|
||||
if in_transaction {
|
||||
(Operation::Commit, get_visible_rows())
|
||||
} else {
|
||||
let visible_rows = get_visible_rows();
|
||||
(
|
||||
generate_data_operation(rng, &visible_rows, &schema_clone),
|
||||
visible_rows,
|
||||
)
|
||||
}
|
||||
}
|
||||
15..=19 => {
|
||||
if in_transaction {
|
||||
(Operation::Rollback, get_visible_rows())
|
||||
} else {
|
||||
let visible_rows = get_visible_rows();
|
||||
(
|
||||
generate_data_operation(rng, &visible_rows, &schema_clone),
|
||||
visible_rows,
|
||||
)
|
||||
}
|
||||
}
|
||||
20..=22 => {
|
||||
let mode = match rng.random_range(0..=3) {
|
||||
0 => CheckpointMode::Passive,
|
||||
1 => CheckpointMode::Restart,
|
||||
2 => CheckpointMode::Truncate,
|
||||
3 => CheckpointMode::Full,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
(Operation::Checkpoint { mode }, get_visible_rows())
|
||||
}
|
||||
23..=26 => {
|
||||
let op = match rng.random_range(0..6) {
|
||||
0..=2 => AlterTableOp::AddColumn {
|
||||
name: format!("col_{}", rng.random_range(1..i64::MAX)),
|
||||
ty: "TEXT".to_string(),
|
||||
},
|
||||
3..=4 => {
|
||||
let table_schema = schema_clone.get("test_table").unwrap();
|
||||
let columns_no_id = table_schema
|
||||
.columns
|
||||
.iter()
|
||||
.filter(|c| c.name != "id")
|
||||
.collect::<Vec<_>>();
|
||||
if columns_no_id.is_empty() {
|
||||
AlterTableOp::AddColumn {
|
||||
name: format!("col_{}", rng.random_range(1..i64::MAX)),
|
||||
ty: "TEXT".to_string(),
|
||||
}
|
||||
} else {
|
||||
let column = columns_no_id.choose(rng).unwrap();
|
||||
AlterTableOp::DropColumn {
|
||||
name: column.name.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
5 => {
|
||||
let columns_no_id = schema_clone
|
||||
.get("test_table")
|
||||
.unwrap()
|
||||
.columns
|
||||
.iter()
|
||||
.filter(|c| c.name != "id")
|
||||
.collect::<Vec<_>>();
|
||||
if columns_no_id.is_empty() {
|
||||
AlterTableOp::AddColumn {
|
||||
name: format!("col_{}", rng.random_range(1..i64::MAX)),
|
||||
ty: "TEXT".to_string(),
|
||||
}
|
||||
} else {
|
||||
let column = columns_no_id.choose(rng).unwrap();
|
||||
AlterTableOp::RenameColumn {
|
||||
old_name: column.name.clone(),
|
||||
new_name: format!("col_{}", rng.random_range(1..i64::MAX)),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
(Operation::AlterTable { op }, get_visible_rows())
|
||||
}
|
||||
_ => {
|
||||
|
||||
let mut start = 0;
|
||||
let range_begin_deferred = start..start + shadow_db.query_gen_options.weight_begin_deferred;
|
||||
start += shadow_db.query_gen_options.weight_begin_deferred;
|
||||
let range_begin_concurrent = start..start + shadow_db.query_gen_options.weight_begin_concurrent;
|
||||
start += shadow_db.query_gen_options.weight_begin_concurrent;
|
||||
let range_commit = start..start + shadow_db.query_gen_options.weight_commit;
|
||||
start += shadow_db.query_gen_options.weight_commit;
|
||||
let range_rollback = start..start + shadow_db.query_gen_options.weight_rollback;
|
||||
start += shadow_db.query_gen_options.weight_rollback;
|
||||
let range_checkpoint = start..start + shadow_db.query_gen_options.weight_checkpoint;
|
||||
start += shadow_db.query_gen_options.weight_checkpoint;
|
||||
let range_ddl = start..start + shadow_db.query_gen_options.weight_ddl;
|
||||
start += shadow_db.query_gen_options.weight_ddl;
|
||||
let range_dml = start..start + shadow_db.query_gen_options.weight_dml;
|
||||
start += shadow_db.query_gen_options.weight_dml;
|
||||
|
||||
let random_val = rng.random_range(0..start);
|
||||
|
||||
if range_begin_deferred.contains(&random_val) {
|
||||
if !in_transaction {
|
||||
(Operation::Begin { concurrent: false }, get_visible_rows())
|
||||
} else {
|
||||
let visible_rows = get_visible_rows();
|
||||
(
|
||||
generate_data_operation(rng, &visible_rows, &schema_clone),
|
||||
generate_data_operation(
|
||||
rng,
|
||||
&visible_rows,
|
||||
&schema_clone,
|
||||
&shadow_db.query_gen_options.dml_gen_options,
|
||||
),
|
||||
visible_rows,
|
||||
)
|
||||
}
|
||||
} else if range_begin_concurrent.contains(&random_val) {
|
||||
if !in_transaction {
|
||||
(Operation::Begin { concurrent: true }, get_visible_rows())
|
||||
} else {
|
||||
let visible_rows = get_visible_rows();
|
||||
(
|
||||
generate_data_operation(
|
||||
rng,
|
||||
&visible_rows,
|
||||
&schema_clone,
|
||||
&shadow_db.query_gen_options.dml_gen_options,
|
||||
),
|
||||
visible_rows,
|
||||
)
|
||||
}
|
||||
} else if range_commit.contains(&random_val) {
|
||||
if in_transaction {
|
||||
(Operation::Commit, get_visible_rows())
|
||||
} else {
|
||||
let visible_rows = get_visible_rows();
|
||||
(
|
||||
generate_data_operation(
|
||||
rng,
|
||||
&visible_rows,
|
||||
&schema_clone,
|
||||
&shadow_db.query_gen_options.dml_gen_options,
|
||||
),
|
||||
visible_rows,
|
||||
)
|
||||
}
|
||||
} else if range_rollback.contains(&random_val) {
|
||||
if in_transaction {
|
||||
(Operation::Rollback, get_visible_rows())
|
||||
} else {
|
||||
let visible_rows = get_visible_rows();
|
||||
(
|
||||
generate_data_operation(
|
||||
rng,
|
||||
&visible_rows,
|
||||
&schema_clone,
|
||||
&shadow_db.query_gen_options.dml_gen_options,
|
||||
),
|
||||
visible_rows,
|
||||
)
|
||||
}
|
||||
} else if range_checkpoint.contains(&random_val) {
|
||||
let mode = match rng.random_range(0..=3) {
|
||||
0 => CheckpointMode::Passive,
|
||||
1 => CheckpointMode::Restart,
|
||||
2 => CheckpointMode::Truncate,
|
||||
3 => CheckpointMode::Full,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
(Operation::Checkpoint { mode }, get_visible_rows())
|
||||
} else if range_ddl.contains(&random_val) {
|
||||
let op = match rng.random_range(0..6) {
|
||||
0..=2 => AlterTableOp::AddColumn {
|
||||
name: format!("col_{}", rng.random_range(1..i64::MAX)),
|
||||
ty: "TEXT".to_string(),
|
||||
},
|
||||
3..=4 => {
|
||||
let table_schema = schema_clone.get("test_table").unwrap();
|
||||
let columns_no_id = table_schema
|
||||
.columns
|
||||
.iter()
|
||||
.filter(|c| c.name != "id")
|
||||
.collect::<Vec<_>>();
|
||||
if columns_no_id.is_empty() {
|
||||
AlterTableOp::AddColumn {
|
||||
name: format!("col_{}", rng.random_range(1..i64::MAX)),
|
||||
ty: "TEXT".to_string(),
|
||||
}
|
||||
} else {
|
||||
let column = columns_no_id.choose(rng).unwrap();
|
||||
AlterTableOp::DropColumn {
|
||||
name: column.name.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
5 => {
|
||||
let columns_no_id = schema_clone
|
||||
.get("test_table")
|
||||
.unwrap()
|
||||
.columns
|
||||
.iter()
|
||||
.filter(|c| c.name != "id")
|
||||
.collect::<Vec<_>>();
|
||||
if columns_no_id.is_empty() {
|
||||
AlterTableOp::AddColumn {
|
||||
name: format!("col_{}", rng.random_range(1..i64::MAX)),
|
||||
ty: "TEXT".to_string(),
|
||||
}
|
||||
} else {
|
||||
let column = columns_no_id.choose(rng).unwrap();
|
||||
AlterTableOp::RenameColumn {
|
||||
old_name: column.name.clone(),
|
||||
new_name: format!("col_{}", rng.random_range(1..i64::MAX)),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
(Operation::AlterTable { op }, get_visible_rows())
|
||||
} else if range_dml.contains(&random_val) {
|
||||
let visible_rows = get_visible_rows();
|
||||
(
|
||||
generate_data_operation(
|
||||
rng,
|
||||
&visible_rows,
|
||||
&schema_clone,
|
||||
&shadow_db.query_gen_options.dml_gen_options,
|
||||
),
|
||||
visible_rows,
|
||||
)
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_data_operation(
|
||||
rng: &mut ChaCha8Rng,
|
||||
visible_rows: &[DbRow],
|
||||
schema: &HashMap<String, TableSchema>,
|
||||
schema: &BTreeMap<String, TableSchema>,
|
||||
dml_gen_options: &DmlGenOptions,
|
||||
) -> Operation {
|
||||
let table_schema = schema.get("test_table").unwrap();
|
||||
let op_num = rng.random_range(0..4);
|
||||
let mut generate_insert_operation = || {
|
||||
let generate_insert_operation = |rng: &mut ChaCha8Rng| {
|
||||
let id = rng.random_range(1..i64::MAX);
|
||||
let mut other_columns = HashMap::new();
|
||||
let mut other_columns = BTreeMap::new();
|
||||
for column in table_schema.columns.iter() {
|
||||
if column.name == "id" {
|
||||
continue;
|
||||
@@ -1009,61 +1194,65 @@ fn generate_data_operation(
|
||||
}
|
||||
Operation::Insert { id, other_columns }
|
||||
};
|
||||
match op_num {
|
||||
0 => {
|
||||
// Insert
|
||||
generate_insert_operation()
|
||||
}
|
||||
1 => {
|
||||
// Update
|
||||
if visible_rows.is_empty() {
|
||||
// No rows to update, try insert instead
|
||||
generate_insert_operation()
|
||||
} else {
|
||||
let columns_no_id = table_schema
|
||||
.columns
|
||||
let mut start = 0;
|
||||
let range_insert = start..start + dml_gen_options.weight_insert;
|
||||
start += dml_gen_options.weight_insert;
|
||||
let range_update = start..start + dml_gen_options.weight_update;
|
||||
start += dml_gen_options.weight_update;
|
||||
let range_delete = start..start + dml_gen_options.weight_delete;
|
||||
start += dml_gen_options.weight_delete;
|
||||
let range_select = start..start + dml_gen_options.weight_select;
|
||||
start += dml_gen_options.weight_select;
|
||||
|
||||
let random_val = rng.random_range(0..start);
|
||||
|
||||
if range_insert.contains(&random_val) {
|
||||
generate_insert_operation(rng)
|
||||
} else if range_update.contains(&random_val) {
|
||||
if visible_rows.is_empty() {
|
||||
// No rows to update, try insert instead
|
||||
generate_insert_operation(rng)
|
||||
} else {
|
||||
let columns_no_id = table_schema
|
||||
.columns
|
||||
.iter()
|
||||
.filter(|c| c.name != "id")
|
||||
.collect::<Vec<_>>();
|
||||
if columns_no_id.is_empty() {
|
||||
// No columns to update, try insert instead
|
||||
return generate_insert_operation(rng);
|
||||
}
|
||||
let id = visible_rows.choose(rng).unwrap().id;
|
||||
let col_name_to_update = columns_no_id.choose(rng).unwrap().name.clone();
|
||||
let mut other_columns = BTreeMap::new();
|
||||
other_columns.insert(
|
||||
col_name_to_update.clone(),
|
||||
match columns_no_id
|
||||
.iter()
|
||||
.filter(|c| c.name != "id")
|
||||
.collect::<Vec<_>>();
|
||||
if columns_no_id.is_empty() {
|
||||
// No columns to update, try insert instead
|
||||
return generate_insert_operation();
|
||||
}
|
||||
let id = visible_rows.choose(rng).unwrap().id;
|
||||
let col_name_to_update = columns_no_id.choose(rng).unwrap().name.clone();
|
||||
let mut other_columns = HashMap::new();
|
||||
other_columns.insert(
|
||||
col_name_to_update.clone(),
|
||||
match columns_no_id
|
||||
.iter()
|
||||
.find(|c| c.name == col_name_to_update)
|
||||
.unwrap()
|
||||
.ty
|
||||
.as_str()
|
||||
{
|
||||
"TEXT" => Value::Text(format!("updated_{}", rng.random_range(1..i64::MAX))),
|
||||
"INTEGER" => Value::Integer(rng.random_range(1..i64::MAX)),
|
||||
"REAL" => Value::Real(rng.random_range(1..i64::MAX) as f64),
|
||||
_ => Value::Null,
|
||||
},
|
||||
);
|
||||
Operation::Update { id, other_columns }
|
||||
}
|
||||
.find(|c| c.name == col_name_to_update)
|
||||
.unwrap()
|
||||
.ty
|
||||
.as_str()
|
||||
{
|
||||
"TEXT" => Value::Text(format!("updated_{}", rng.random_range(1..i64::MAX))),
|
||||
"INTEGER" => Value::Integer(rng.random_range(1..i64::MAX)),
|
||||
"REAL" => Value::Real(rng.random_range(1..i64::MAX) as f64),
|
||||
_ => Value::Null,
|
||||
},
|
||||
);
|
||||
Operation::Update { id, other_columns }
|
||||
}
|
||||
2 => {
|
||||
// Delete
|
||||
if visible_rows.is_empty() {
|
||||
// No rows to delete, try insert instead
|
||||
generate_insert_operation()
|
||||
} else {
|
||||
let id = visible_rows.choose(rng).unwrap().id;
|
||||
Operation::Delete { id }
|
||||
}
|
||||
} else if range_delete.contains(&random_val) {
|
||||
if visible_rows.is_empty() {
|
||||
// No rows to delete, try insert instead
|
||||
generate_insert_operation(rng)
|
||||
} else {
|
||||
let id = visible_rows.choose(rng).unwrap().id;
|
||||
Operation::Delete { id }
|
||||
}
|
||||
3 => {
|
||||
// Select
|
||||
Operation::Select
|
||||
}
|
||||
_ => unreachable!(),
|
||||
} else if range_select.contains(&random_val) {
|
||||
Operation::Select
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -355,6 +355,109 @@ fn test_mvcc_concurrent_insert_basic() {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mvcc_update_same_row_twice() {
|
||||
let tmp_db = TempDatabase::new_with_opts(
|
||||
"test_mvcc_update_same_row_twice.db",
|
||||
turso_core::DatabaseOpts::new().with_mvcc(true),
|
||||
);
|
||||
let conn1 = tmp_db.connect_limbo();
|
||||
|
||||
conn1
|
||||
.execute("CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)")
|
||||
.unwrap();
|
||||
|
||||
conn1
|
||||
.execute("INSERT INTO test (id, value) VALUES (1, 'first')")
|
||||
.unwrap();
|
||||
|
||||
conn1
|
||||
.execute("UPDATE test SET value = 'second' WHERE id = 1")
|
||||
.unwrap();
|
||||
|
||||
let stmt = conn1
|
||||
.query("SELECT value FROM test WHERE id = 1")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let row = helper_read_single_row(stmt);
|
||||
let Value::Text(value) = &row[0] else {
|
||||
panic!("expected text value");
|
||||
};
|
||||
assert_eq!(value.as_str(), "second");
|
||||
|
||||
conn1
|
||||
.execute("UPDATE test SET value = 'third' WHERE id = 1")
|
||||
.unwrap();
|
||||
|
||||
let stmt = conn1
|
||||
.query("SELECT value FROM test WHERE id = 1")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let row = helper_read_single_row(stmt);
|
||||
let Value::Text(value) = &row[0] else {
|
||||
panic!("expected text value");
|
||||
};
|
||||
assert_eq!(value.as_str(), "third");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mvcc_concurrent_conflicting_update() {
|
||||
let tmp_db = TempDatabase::new_with_opts(
|
||||
"test_mvcc_concurrent_conflicting_update.db",
|
||||
turso_core::DatabaseOpts::new().with_mvcc(true),
|
||||
);
|
||||
let conn1 = tmp_db.connect_limbo();
|
||||
let conn2 = tmp_db.connect_limbo();
|
||||
|
||||
conn1
|
||||
.execute("CREATE TABLE test (id INTEGER, value TEXT)")
|
||||
.unwrap();
|
||||
|
||||
conn1
|
||||
.execute("INSERT INTO test (id, value) VALUES (1, 'first')")
|
||||
.unwrap();
|
||||
|
||||
conn1.execute("BEGIN CONCURRENT").unwrap();
|
||||
conn2.execute("BEGIN CONCURRENT").unwrap();
|
||||
|
||||
conn1
|
||||
.execute("UPDATE test SET value = 'second' WHERE id = 1")
|
||||
.unwrap();
|
||||
let err = conn2
|
||||
.execute("UPDATE test SET value = 'third' WHERE id = 1")
|
||||
.expect_err("expected error");
|
||||
assert!(matches!(err, LimboError::WriteWriteConflict));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mvcc_concurrent_conflicting_update_2() {
|
||||
let tmp_db = TempDatabase::new_with_opts(
|
||||
"test_mvcc_concurrent_conflicting_update.db",
|
||||
turso_core::DatabaseOpts::new().with_mvcc(true),
|
||||
);
|
||||
let conn1 = tmp_db.connect_limbo();
|
||||
let conn2 = tmp_db.connect_limbo();
|
||||
|
||||
conn1
|
||||
.execute("CREATE TABLE test (id INTEGER, value TEXT)")
|
||||
.unwrap();
|
||||
|
||||
conn1
|
||||
.execute("INSERT INTO test (id, value) VALUES (1, 'first'), (2, 'first')")
|
||||
.unwrap();
|
||||
|
||||
conn1.execute("BEGIN CONCURRENT").unwrap();
|
||||
conn2.execute("BEGIN CONCURRENT").unwrap();
|
||||
|
||||
conn1
|
||||
.execute("UPDATE test SET value = 'second' WHERE id = 1")
|
||||
.unwrap();
|
||||
let err = conn2
|
||||
.execute("UPDATE test SET value = 'third' WHERE id BETWEEN 0 AND 10")
|
||||
.expect_err("expected error");
|
||||
assert!(matches!(err, LimboError::WriteWriteConflict));
|
||||
}
|
||||
|
||||
fn helper_read_all_rows(mut stmt: turso_core::Statement) -> Vec<Vec<Value>> {
|
||||
let mut ret = Vec::new();
|
||||
loop {
|
||||
|
||||
@@ -27,17 +27,17 @@ serde = ["dep:serde", "indexmap/serde", "bitflags/serde"]
|
||||
[dependencies]
|
||||
log = "0.4.22"
|
||||
memchr = "2.0"
|
||||
fallible-iterator = "0.3"
|
||||
bitflags = "2.0"
|
||||
indexmap = "2.0"
|
||||
miette = "7.4.0"
|
||||
fallible-iterator = { workspace = true }
|
||||
bitflags = { workspace = true }
|
||||
indexmap = { workspace = true }
|
||||
miette = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
strum_macros = {workspace = true }
|
||||
serde = { workspace = true , optional = true, features = ["derive"] }
|
||||
smallvec = { version = "1.15.1", features = ["const_generics"] }
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger = { version = "0.11", default-features = false }
|
||||
env_logger = { workspace = true, default-features = false }
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0"
|
||||
|
||||
@@ -9,10 +9,10 @@ turso_sqlite3_parser = { path = "..", default-features = false, features = [
|
||||
"YYNOERRORRECOVERY",
|
||||
"NDEBUG",
|
||||
] }
|
||||
fallible-iterator = "0.3"
|
||||
fallible-iterator = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.5"
|
||||
criterion = { workspace = true }
|
||||
|
||||
[[bench]]
|
||||
name = "sqlparser_bench"
|
||||
|
||||
@@ -16,14 +16,14 @@ path = "main.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
memmap2 = "0.9"
|
||||
rand = { workspace = true }
|
||||
rand_chacha = "0.9.0"
|
||||
rand_chacha = { workspace = true }
|
||||
sql_generation = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
turso_core = { path = "../core", features = ["simulator"]}
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
turso_core = { workspace = true, features = ["simulator"]}
|
||||
turso_parser = { workspace = true }
|
||||
|
||||
[features]
|
||||
|
||||
Reference in New Issue
Block a user