mirror of
https://github.com/aljazceru/turso.git
synced 2026-02-11 03:04:22 +01:00
Merge remote-tracking branch 'origin/main' into cdc_fail_autoincrement
This commit is contained in:
8
.github/workflows/napi-sync.yml
vendored
8
.github/workflows/napi-sync.yml
vendored
@@ -78,7 +78,7 @@ jobs:
|
||||
.cargo-cache
|
||||
target/
|
||||
key: ${{ matrix.settings.target }}-cargo-${{ matrix.settings.host }}
|
||||
- uses: goto-bus-stop/setup-zig@v2
|
||||
- uses: mlugg/setup-zig@v2
|
||||
if: ${{ matrix.settings.target == 'armv7-unknown-linux-gnueabihf' || matrix.settings.target == 'armv7-unknown-linux-musleabihf' }}
|
||||
with:
|
||||
version: 0.13.0
|
||||
@@ -175,11 +175,13 @@ jobs:
|
||||
if git log -1 --pretty=%B | grep "^Turso [0-9]\+\.[0-9]\+\.[0-9]\+$";
|
||||
then
|
||||
echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc
|
||||
npm publish --access public
|
||||
make publish-native
|
||||
make publish-browser
|
||||
elif git log -1 --pretty=%B | grep "^Turso [0-9]\+\.[0-9]\+\.[0-9]\+";
|
||||
then
|
||||
echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc
|
||||
npm publish --tag next --access public
|
||||
make publish-native-next
|
||||
make publish-browser-next
|
||||
else
|
||||
echo "Not a release, skipping publish"
|
||||
fi
|
||||
|
||||
63
.github/workflows/napi.yml
vendored
63
.github/workflows/napi.yml
vendored
@@ -19,6 +19,10 @@ defaults:
|
||||
run:
|
||||
working-directory: bindings/javascript
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 20
|
||||
@@ -27,20 +31,18 @@ jobs:
|
||||
matrix:
|
||||
settings:
|
||||
- host: windows-latest
|
||||
build: |
|
||||
yarn build --target x86_64-pc-windows-msvc
|
||||
yarn test
|
||||
target: x86_64-pc-windows-msvc
|
||||
build: yarn workspace @tursodatabase/database napi-build --target x86_64-pc-windows-msvc
|
||||
- host: ubuntu-latest
|
||||
target: x86_64-unknown-linux-gnu
|
||||
docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-debian
|
||||
build: yarn build --target x86_64-unknown-linux-gnu
|
||||
build: yarn workspace @tursodatabase/database napi-build --target x86_64-unknown-linux-gnu
|
||||
- host: macos-latest
|
||||
target: aarch64-apple-darwin
|
||||
build: yarn build --target aarch64-apple-darwin
|
||||
build: yarn workspace @tursodatabase/database napi-build --target aarch64-apple-darwin
|
||||
- host: blacksmith-2vcpu-ubuntu-2404-arm
|
||||
target: aarch64-unknown-linux-gnu
|
||||
build: yarn build --target aarch64-unknown-linux-gnu
|
||||
build: yarn workspace @tursodatabase/database napi-build --target aarch64-unknown-linux-gnu
|
||||
- host: ubuntu-latest
|
||||
target: wasm32-wasip1-threads
|
||||
setup: |
|
||||
@@ -52,7 +54,7 @@ jobs:
|
||||
export CMAKE_BUILD_PARALLEL_LEVEL=$(nproc)
|
||||
export TARGET_CXXFLAGS="--target=wasm32-wasi-threads --sysroot=$(pwd)/wasi-sdk-25.0-x86_64-linux/share/wasi-sysroot -pthread -mllvm -wasm-enable-sjlj -lsetjmp"
|
||||
export TARGET_CFLAGS="$TARGET_CXXFLAGS"
|
||||
yarn build --target wasm32-wasip1-threads
|
||||
yarn workspace @tursodatabase/database-browser build
|
||||
name: stable - ${{ matrix.settings.target }} - node@20
|
||||
runs-on: ${{ matrix.settings.host }}
|
||||
steps:
|
||||
@@ -78,7 +80,7 @@ jobs:
|
||||
.cargo-cache
|
||||
target/
|
||||
key: ${{ matrix.settings.target }}-cargo-${{ matrix.settings.host }}
|
||||
- uses: goto-bus-stop/setup-zig@v2
|
||||
- uses: mlugg/setup-zig@v2
|
||||
if: ${{ matrix.settings.target == 'armv7-unknown-linux-gnueabihf' || matrix.settings.target == 'armv7-unknown-linux-musleabihf' }}
|
||||
with:
|
||||
version: 0.13.0
|
||||
@@ -88,6 +90,8 @@ jobs:
|
||||
shell: bash
|
||||
- name: Install dependencies
|
||||
run: yarn install
|
||||
- name: Build common
|
||||
run: yarn workspace @tursodatabase/database-common build
|
||||
- name: Setup node x86
|
||||
uses: actions/setup-node@v4
|
||||
if: matrix.settings.target == 'x86_64-pc-windows-msvc'
|
||||
@@ -110,8 +114,8 @@ jobs:
|
||||
with:
|
||||
name: bindings-${{ matrix.settings.target }}
|
||||
path: |
|
||||
bindings/javascript/${{ env.APP_NAME }}.*.node
|
||||
bindings/javascript/${{ env.APP_NAME }}.*.wasm
|
||||
bindings/javascript/packages/native/${{ env.APP_NAME }}.*.node
|
||||
bindings/javascript/packages/browser/${{ env.APP_NAME }}.*.wasm
|
||||
if-no-files-found: error
|
||||
test-linux-x64-gnu-binding:
|
||||
name: Test bindings on Linux-x64-gnu - node@${{ matrix.node }}
|
||||
@@ -131,20 +135,21 @@ jobs:
|
||||
node-version: ${{ matrix.node }}
|
||||
- name: Install dependencies
|
||||
run: yarn install
|
||||
- name: Download artifacts
|
||||
- name: Build common
|
||||
run: yarn workspace @tursodatabase/database-common build
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bindings-x86_64-unknown-linux-gnu
|
||||
path: bindings/javascript
|
||||
path: bindings/javascript/packages
|
||||
merge-multiple: true
|
||||
- name: List packages
|
||||
run: ls -R .
|
||||
shell: bash
|
||||
- name: Test bindings
|
||||
run: docker run --rm -v $(pwd):/build -w /build node:${{ matrix.node }}-slim yarn test
|
||||
run: docker run --rm -v $(pwd):/build -w /build node:${{ matrix.node }}-slim yarn workspace @tursodatabase/database test
|
||||
publish:
|
||||
name: Publish
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
@@ -156,33 +161,35 @@ jobs:
|
||||
uses: useblacksmith/setup-node@v5
|
||||
with:
|
||||
node-version: 20
|
||||
- name: Install dependencies
|
||||
run: yarn install
|
||||
- name: create npm dirs
|
||||
run: yarn napi create-npm-dirs
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: bindings/javascript/artifacts
|
||||
- name: Move artifacts
|
||||
run: yarn artifacts
|
||||
- name: List packages
|
||||
run: ls -R ./npm
|
||||
shell: bash
|
||||
path: bindings/javascript/packages
|
||||
merge-multiple: true
|
||||
- name: Install dependencies
|
||||
run: yarn install
|
||||
- name: Install dependencies
|
||||
run: yarn tsc-build
|
||||
- name: Publish
|
||||
if: "startsWith(github.ref, 'refs/tags/v')"
|
||||
run: |
|
||||
npm config set provenance true
|
||||
if git log -1 --pretty=%B | grep "^Turso [0-9]\+\.[0-9]\+\.[0-9]\+$";
|
||||
then
|
||||
echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc
|
||||
npm publish --access public
|
||||
npm publish --workspaces --access public
|
||||
elif git log -1 --pretty=%B | grep "^Turso [0-9]\+\.[0-9]\+\.[0-9]\+";
|
||||
then
|
||||
echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc
|
||||
npm publish --tag next --access public
|
||||
npm publish --workspaces --access public --tag next
|
||||
else
|
||||
echo "Not a release, skipping publish"
|
||||
echo "git log structure is unexpected, skip publishing"
|
||||
npm publish --workspaces --dry-run
|
||||
fi
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
- name: Publish (dry-run)
|
||||
if: "!startsWith(github.ref, 'refs/tags/v')"
|
||||
run: |
|
||||
npm publish --workspaces --dry-run
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -4,6 +4,7 @@
|
||||
|
||||
*.so
|
||||
*.ipynb
|
||||
*.o
|
||||
|
||||
# Python
|
||||
.mypy_cache/
|
||||
@@ -41,3 +42,5 @@ simulator.log
|
||||
**/*.txt
|
||||
profile.json.gz
|
||||
simulator-output/
|
||||
|
||||
&1
|
||||
|
||||
@@ -393,7 +393,7 @@ Modifiers:
|
||||
| jsonb_group_array(value) | Yes | |
|
||||
| json_group_object(label,value) | Yes | |
|
||||
| jsonb_group_object(name,value) | Yes | |
|
||||
| json_each(json) | | |
|
||||
| json_each(json) | Yes | |
|
||||
| json_each(json,path) | | |
|
||||
| json_tree(json) | | |
|
||||
| json_tree(json,path) | | |
|
||||
|
||||
70
Cargo.lock
generated
70
Cargo.lock
generated
@@ -667,7 +667,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
|
||||
|
||||
[[package]]
|
||||
name = "core_tester"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
@@ -1800,9 +1800,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "2.10.0"
|
||||
version = "2.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661"
|
||||
checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.15.2",
|
||||
@@ -1822,7 +1822,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.11.0",
|
||||
"is-terminal",
|
||||
"itoa",
|
||||
"log",
|
||||
@@ -2126,7 +2126,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_completion"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"mimalloc",
|
||||
"turso_ext",
|
||||
@@ -2134,7 +2134,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_crypto"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"blake3",
|
||||
"data-encoding",
|
||||
@@ -2147,7 +2147,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_csv"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"csv",
|
||||
"mimalloc",
|
||||
@@ -2157,7 +2157,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_ipaddr"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"ipnetwork",
|
||||
"mimalloc",
|
||||
@@ -2166,7 +2166,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_percentile"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"mimalloc",
|
||||
"turso_ext",
|
||||
@@ -2174,7 +2174,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_regexp"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"mimalloc",
|
||||
"regex",
|
||||
@@ -2183,7 +2183,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_sim"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chrono",
|
||||
@@ -2192,7 +2192,7 @@ dependencies = [
|
||||
"env_logger 0.10.2",
|
||||
"garde",
|
||||
"hex",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.11.0",
|
||||
"itertools 0.14.0",
|
||||
"json5",
|
||||
"log",
|
||||
@@ -2216,7 +2216,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_sqlite_test_ext"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
@@ -2774,7 +2774,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eac26e981c03a6e53e0aee43c113e3202f5581d5360dae7bd2c70e800dd0451d"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.11.0",
|
||||
"quick-xml 0.32.0",
|
||||
"serde",
|
||||
"time",
|
||||
@@ -2971,7 +2971,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "py-turso"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"pyo3",
|
||||
@@ -3666,12 +3666,13 @@ checksum = "d372029cb5195f9ab4e4b9aef550787dce78b124fcaee8d82519925defcd6f0d"
|
||||
|
||||
[[package]]
|
||||
name = "sql_generation"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"anarchist-readable-name-generator-lib 0.2.0",
|
||||
"anyhow",
|
||||
"garde",
|
||||
"hex",
|
||||
"indexmap 2.11.0",
|
||||
"itertools 0.14.0",
|
||||
"rand 0.9.2",
|
||||
"rand_chacha 0.9.0",
|
||||
@@ -4064,7 +4065,7 @@ version = "0.8.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae"
|
||||
dependencies = [
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.11.0",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@@ -4086,7 +4087,7 @@ version = "0.22.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e"
|
||||
dependencies = [
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.11.0",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@@ -4175,7 +4176,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"rand 0.8.5",
|
||||
"rand_chacha 0.3.1",
|
||||
@@ -4187,7 +4188,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso-java"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"jni",
|
||||
"thiserror 2.0.12",
|
||||
@@ -4196,7 +4197,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_cli"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
@@ -4229,7 +4230,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_core"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"aegis",
|
||||
"aes",
|
||||
@@ -4288,7 +4289,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_dart"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"flutter_rust_bridge",
|
||||
"turso_core",
|
||||
@@ -4296,7 +4297,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_ext"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"getrandom 0.3.2",
|
||||
@@ -4305,7 +4306,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_ext_tests"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"env_logger 0.11.7",
|
||||
"lazy_static",
|
||||
@@ -4316,7 +4317,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_macros"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -4325,18 +4326,19 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_node"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"napi",
|
||||
"napi-build",
|
||||
"napi-derive",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"turso_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "turso_parser"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"criterion",
|
||||
@@ -4352,7 +4354,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_sqlite3"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"env_logger 0.11.7",
|
||||
"libc",
|
||||
@@ -4365,13 +4367,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_sqlite3_parser"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"cc",
|
||||
"env_logger 0.11.7",
|
||||
"fallible-iterator",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.11.0",
|
||||
"log",
|
||||
"memchr",
|
||||
"miette",
|
||||
@@ -4383,7 +4385,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_stress"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"anarchist-readable-name-generator-lib 0.1.2",
|
||||
"antithesis_sdk",
|
||||
@@ -4399,7 +4401,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_sync_engine"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bytes",
|
||||
@@ -4425,7 +4427,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_sync_js"
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
dependencies = [
|
||||
"genawaiter",
|
||||
"http",
|
||||
|
||||
34
Cargo.toml
34
Cargo.toml
@@ -33,29 +33,29 @@ members = [
|
||||
exclude = ["perf/latency/limbo"]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.5-pre.2"
|
||||
version = "0.1.5-pre.4"
|
||||
authors = ["the Limbo authors"]
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/tursodatabase/turso"
|
||||
|
||||
[workspace.dependencies]
|
||||
turso = { path = "bindings/rust", version = "0.1.5-pre.2" }
|
||||
turso_node = { path = "bindings/javascript", version = "0.1.5-pre.2" }
|
||||
limbo_completion = { path = "extensions/completion", version = "0.1.5-pre.2" }
|
||||
turso_core = { path = "core", version = "0.1.5-pre.2" }
|
||||
turso_sync_engine = { path = "sync/engine", version = "0.1.5-pre.2" }
|
||||
limbo_crypto = { path = "extensions/crypto", version = "0.1.5-pre.2" }
|
||||
limbo_csv = { path = "extensions/csv", version = "0.1.5-pre.2" }
|
||||
turso_ext = { path = "extensions/core", version = "0.1.5-pre.2" }
|
||||
turso_ext_tests = { path = "extensions/tests", version = "0.1.5-pre.2" }
|
||||
limbo_ipaddr = { path = "extensions/ipaddr", version = "0.1.5-pre.2" }
|
||||
turso_macros = { path = "macros", version = "0.1.5-pre.2" }
|
||||
limbo_percentile = { path = "extensions/percentile", version = "0.1.5-pre.2" }
|
||||
limbo_regexp = { path = "extensions/regexp", version = "0.1.5-pre.2" }
|
||||
turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.1.5-pre.2" }
|
||||
limbo_uuid = { path = "extensions/uuid", version = "0.1.5-pre.2" }
|
||||
turso_parser = { path = "parser", version = "0.1.5-pre.2" }
|
||||
turso = { path = "bindings/rust", version = "0.1.5-pre.4" }
|
||||
turso_node = { path = "bindings/javascript", version = "0.1.5-pre.4" }
|
||||
limbo_completion = { path = "extensions/completion", version = "0.1.5-pre.4" }
|
||||
turso_core = { path = "core", version = "0.1.5-pre.4" }
|
||||
turso_sync_engine = { path = "sync/engine", version = "0.1.5-pre.4" }
|
||||
limbo_crypto = { path = "extensions/crypto", version = "0.1.5-pre.4" }
|
||||
limbo_csv = { path = "extensions/csv", version = "0.1.5-pre.4" }
|
||||
turso_ext = { path = "extensions/core", version = "0.1.5-pre.4" }
|
||||
turso_ext_tests = { path = "extensions/tests", version = "0.1.5-pre.4" }
|
||||
limbo_ipaddr = { path = "extensions/ipaddr", version = "0.1.5-pre.4" }
|
||||
turso_macros = { path = "macros", version = "0.1.5-pre.4" }
|
||||
limbo_percentile = { path = "extensions/percentile", version = "0.1.5-pre.4" }
|
||||
limbo_regexp = { path = "extensions/regexp", version = "0.1.5-pre.4" }
|
||||
turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.1.5-pre.4" }
|
||||
limbo_uuid = { path = "extensions/uuid", version = "0.1.5-pre.4" }
|
||||
turso_parser = { path = "parser", version = "0.1.5-pre.4" }
|
||||
sql_generation = { path = "sql_generation" }
|
||||
strum = { version = "0.26", features = ["derive"] }
|
||||
strum_macros = "0.26"
|
||||
|
||||
@@ -110,6 +110,7 @@ WORKDIR /app
|
||||
COPY ./antithesis-tests/bank-test/*.py /opt/antithesis/test/v1/bank-test/
|
||||
COPY ./antithesis-tests/stress-composer/*.py /opt/antithesis/test/v1/stress-composer/
|
||||
COPY ./antithesis-tests/stress /opt/antithesis/test/v1/stress
|
||||
COPY ./antithesis-tests/stress-io_uring /opt/antithesis/test/v1/stress-io_uring
|
||||
COPY ./antithesis-tests/stress-unreliable /opt/antithesis/test/v1/stress-unreliable
|
||||
RUN chmod 777 -R /opt/antithesis/test/v1
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ cur_init.execute("CREATE TABLE schemas (schema TEXT, tbl INT)")
|
||||
cur_init.execute("CREATE TABLE indexes (idx_name TEXT, tbl_name TEXT, idx_type TEXT, cols TEXT)")
|
||||
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Error connecting to database: {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -29,7 +29,7 @@ print(f"Selected table: tbl_{selected_tbl} with {tbl_schema['colCount']} columns
|
||||
|
||||
# Connect to the main database
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -29,7 +29,7 @@ tbl_schema = json.loads(schema_json)
|
||||
|
||||
# Connect to the main database
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -18,7 +18,7 @@ cur_init = con_init.cursor()
|
||||
|
||||
# Connect to the main database
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -32,7 +32,7 @@ pk = tbl_schema["pk"]
|
||||
cols = [f"col_{col}" for col in range(tbl_schema["colCount"]) if col != pk]
|
||||
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -15,7 +15,7 @@ cur_init = con_init.cursor()
|
||||
|
||||
# Connect to the main database
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -24,7 +24,7 @@ selected_idx = get_random() % len(existing_schemas)
|
||||
selected_tbl = existing_schemas[selected_idx][0]
|
||||
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -28,7 +28,7 @@ tbl_schema = json.loads(schema_json)
|
||||
cols = ", ".join([f"col_{col}" for col in range(tbl_schema["colCount"])])
|
||||
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -28,7 +28,7 @@ tbl_schema = json.loads(schema_json)
|
||||
cols = ", ".join([f"col_{col}" for col in range(tbl_schema["colCount"])])
|
||||
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -28,7 +28,7 @@ tbl_schema = json.loads(schema_json)
|
||||
cols = ", ".join([f"col_{col}" for col in range(tbl_schema["colCount"])])
|
||||
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -29,7 +29,7 @@ tbl_schema = json.loads(schema_json)
|
||||
tbl_name = f"tbl_{selected_tbl}"
|
||||
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
@@ -32,7 +32,7 @@ pk = tbl_schema["pk"]
|
||||
cols = [f"col_{col}" for col in range(tbl_schema["colCount"]) if col != pk]
|
||||
# print(cols)
|
||||
try:
|
||||
con = turso.connect("stress_composer.db", experimental_indexes=True)
|
||||
con = turso.connect("stress_composer.db")
|
||||
except Exception as e:
|
||||
print(f"Failed to open stress_composer.db. Exiting... {e}")
|
||||
exit(0)
|
||||
|
||||
3
antithesis-tests/stress-io_uring/singleton_driver_stress.sh
Executable file
3
antithesis-tests/stress-io_uring/singleton_driver_stress.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
/bin/turso_stress --silent --nr-threads 2 --nr-iterations 10000 --vfs io_uring
|
||||
2
bindings/javascript/.gitignore
vendored
2
bindings/javascript/.gitignore
vendored
@@ -196,3 +196,5 @@ Cargo.lock
|
||||
|
||||
*.node
|
||||
*.wasm
|
||||
|
||||
npm
|
||||
|
||||
@@ -11,3 +11,5 @@ yarn.lock
|
||||
.yarn
|
||||
__test__
|
||||
renovate.json
|
||||
examples
|
||||
perf
|
||||
|
||||
@@ -15,9 +15,11 @@ turso_core = { workspace = true }
|
||||
napi = { version = "3.1.3", default-features = false, features = ["napi6"] }
|
||||
napi-derive = { version = "3.1.1", default-features = true }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
tracing.workspace = true
|
||||
|
||||
[features]
|
||||
encryption = ["turso_core/encryption"]
|
||||
browser = []
|
||||
|
||||
[build-dependencies]
|
||||
napi-build = "2.2.3"
|
||||
|
||||
20
bindings/javascript/Makefile
Normal file
20
bindings/javascript/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
pack-native:
|
||||
npm publish --dry-run && npm pack
|
||||
pack-browser:
|
||||
cp package.json package.native.json
|
||||
cp package.browser.json package.json
|
||||
npm publish --dry-run && npm pack; cp package.native.json package.json
|
||||
|
||||
publish-native:
|
||||
npm publish --access public
|
||||
publish-browser:
|
||||
cp package.json package.native.json
|
||||
cp package.browser.json package.json
|
||||
npm publish --access public; cp package.native.json package.json
|
||||
|
||||
publish-native-next:
|
||||
npm publish --tag next --access public
|
||||
publish-browser-next:
|
||||
cp package.json package.native.json
|
||||
cp package.browser.json package.json
|
||||
npm publish --tag next --access public; cp package.native.json package.json
|
||||
@@ -1 +0,0 @@
|
||||
export * from '@tursodatabase/database-wasm32-wasi'
|
||||
@@ -1,398 +0,0 @@
|
||||
// prettier-ignore
|
||||
/* eslint-disable */
|
||||
// @ts-nocheck
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
import { createRequire } from 'node:module'
|
||||
const require = createRequire(import.meta.url)
|
||||
const __dirname = new URL('.', import.meta.url).pathname
|
||||
|
||||
const { readFileSync } = require('node:fs')
|
||||
let nativeBinding = null
|
||||
const loadErrors = []
|
||||
|
||||
const isMusl = () => {
|
||||
let musl = false
|
||||
if (process.platform === 'linux') {
|
||||
musl = isMuslFromFilesystem()
|
||||
if (musl === null) {
|
||||
musl = isMuslFromReport()
|
||||
}
|
||||
if (musl === null) {
|
||||
musl = isMuslFromChildProcess()
|
||||
}
|
||||
}
|
||||
return musl
|
||||
}
|
||||
|
||||
const isFileMusl = (f) => f.includes('libc.musl-') || f.includes('ld-musl-')
|
||||
|
||||
const isMuslFromFilesystem = () => {
|
||||
try {
|
||||
return readFileSync('/usr/bin/ldd', 'utf-8').includes('musl')
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
const isMuslFromReport = () => {
|
||||
let report = null
|
||||
if (typeof process.report?.getReport === 'function') {
|
||||
process.report.excludeNetwork = true
|
||||
report = process.report.getReport()
|
||||
}
|
||||
if (!report) {
|
||||
return null
|
||||
}
|
||||
if (report.header && report.header.glibcVersionRuntime) {
|
||||
return false
|
||||
}
|
||||
if (Array.isArray(report.sharedObjects)) {
|
||||
if (report.sharedObjects.some(isFileMusl)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const isMuslFromChildProcess = () => {
|
||||
try {
|
||||
return require('child_process').execSync('ldd --version', { encoding: 'utf8' }).includes('musl')
|
||||
} catch (e) {
|
||||
// If we reach this case, we don't know if the system is musl or not, so is better to just fallback to false
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
function requireNative() {
|
||||
if (process.env.NAPI_RS_NATIVE_LIBRARY_PATH) {
|
||||
try {
|
||||
nativeBinding = require(process.env.NAPI_RS_NATIVE_LIBRARY_PATH);
|
||||
} catch (err) {
|
||||
loadErrors.push(err)
|
||||
}
|
||||
} else if (process.platform === 'android') {
|
||||
if (process.arch === 'arm64') {
|
||||
try {
|
||||
return require('./turso.android-arm64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-android-arm64')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'arm') {
|
||||
try {
|
||||
return require('./turso.android-arm-eabi.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-android-arm-eabi')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on Android ${process.arch}`))
|
||||
}
|
||||
} else if (process.platform === 'win32') {
|
||||
if (process.arch === 'x64') {
|
||||
try {
|
||||
return require('./turso.win32-x64-msvc.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-win32-x64-msvc')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'ia32') {
|
||||
try {
|
||||
return require('./turso.win32-ia32-msvc.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-win32-ia32-msvc')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'arm64') {
|
||||
try {
|
||||
return require('./turso.win32-arm64-msvc.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-win32-arm64-msvc')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on Windows: ${process.arch}`))
|
||||
}
|
||||
} else if (process.platform === 'darwin') {
|
||||
try {
|
||||
return require('./turso.darwin-universal.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-darwin-universal')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
if (process.arch === 'x64') {
|
||||
try {
|
||||
return require('./turso.darwin-x64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-darwin-x64')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'arm64') {
|
||||
try {
|
||||
return require('./turso.darwin-arm64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-darwin-arm64')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on macOS: ${process.arch}`))
|
||||
}
|
||||
} else if (process.platform === 'freebsd') {
|
||||
if (process.arch === 'x64') {
|
||||
try {
|
||||
return require('./turso.freebsd-x64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-freebsd-x64')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'arm64') {
|
||||
try {
|
||||
return require('./turso.freebsd-arm64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-freebsd-arm64')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on FreeBSD: ${process.arch}`))
|
||||
}
|
||||
} else if (process.platform === 'linux') {
|
||||
if (process.arch === 'x64') {
|
||||
if (isMusl()) {
|
||||
try {
|
||||
return require('./turso.linux-x64-musl.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-x64-musl')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
return require('./turso.linux-x64-gnu.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-x64-gnu')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
}
|
||||
} else if (process.arch === 'arm64') {
|
||||
if (isMusl()) {
|
||||
try {
|
||||
return require('./turso.linux-arm64-musl.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-arm64-musl')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
return require('./turso.linux-arm64-gnu.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-arm64-gnu')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
}
|
||||
} else if (process.arch === 'arm') {
|
||||
if (isMusl()) {
|
||||
try {
|
||||
return require('./turso.linux-arm-musleabihf.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-arm-musleabihf')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
return require('./turso.linux-arm-gnueabihf.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-arm-gnueabihf')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
}
|
||||
} else if (process.arch === 'riscv64') {
|
||||
if (isMusl()) {
|
||||
try {
|
||||
return require('./turso.linux-riscv64-musl.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-riscv64-musl')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
return require('./turso.linux-riscv64-gnu.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-riscv64-gnu')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
}
|
||||
} else if (process.arch === 'ppc64') {
|
||||
try {
|
||||
return require('./turso.linux-ppc64-gnu.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-ppc64-gnu')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 's390x') {
|
||||
try {
|
||||
return require('./turso.linux-s390x-gnu.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-s390x-gnu')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on Linux: ${process.arch}`))
|
||||
}
|
||||
} else if (process.platform === 'openharmony') {
|
||||
if (process.arch === 'arm64') {
|
||||
try {
|
||||
return require('./turso.linux-arm64-ohos.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-arm64-ohos')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'x64') {
|
||||
try {
|
||||
return require('./turso.linux-x64-ohos.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-x64-ohos')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'arm') {
|
||||
try {
|
||||
return require('./turso.linux-arm-ohos.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
return require('@tursodatabase/database-linux-arm-ohos')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on OpenHarmony: ${process.arch}`))
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported OS: ${process.platform}, architecture: ${process.arch}`))
|
||||
}
|
||||
}
|
||||
|
||||
nativeBinding = requireNative()
|
||||
|
||||
if (!nativeBinding || process.env.NAPI_RS_FORCE_WASI) {
|
||||
try {
|
||||
nativeBinding = require('./turso.wasi.cjs')
|
||||
} catch (err) {
|
||||
if (process.env.NAPI_RS_FORCE_WASI) {
|
||||
loadErrors.push(err)
|
||||
}
|
||||
}
|
||||
if (!nativeBinding) {
|
||||
try {
|
||||
nativeBinding = require('@tursodatabase/database-wasm32-wasi')
|
||||
} catch (err) {
|
||||
if (process.env.NAPI_RS_FORCE_WASI) {
|
||||
loadErrors.push(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!nativeBinding) {
|
||||
if (loadErrors.length > 0) {
|
||||
throw new Error(
|
||||
`Cannot find native binding. ` +
|
||||
`npm has a bug related to optional dependencies (https://github.com/npm/cli/issues/4828). ` +
|
||||
'Please try `npm i` again after removing both package-lock.json and node_modules directory.',
|
||||
{ cause: loadErrors }
|
||||
)
|
||||
}
|
||||
throw new Error(`Failed to load native binding`)
|
||||
}
|
||||
|
||||
const { Database, Statement } = nativeBinding
|
||||
export { Database }
|
||||
export { Statement }
|
||||
4003
bindings/javascript/package-lock.json
generated
4003
bindings/javascript/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,64 +1,13 @@
|
||||
{
|
||||
"name": "@tursodatabase/database",
|
||||
"version": "0.1.5-pre.2",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
},
|
||||
"description": "The Turso database library",
|
||||
"module": "./dist/promise.js",
|
||||
"main": "./dist/promise.js",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": "./dist/promise.js",
|
||||
"./compat": "./dist/compat.js"
|
||||
},
|
||||
"files": [
|
||||
"browser.js",
|
||||
"index.js",
|
||||
"index.d.ts",
|
||||
"dist/**"
|
||||
],
|
||||
"types": "index.d.ts",
|
||||
"napi": {
|
||||
"binaryName": "turso",
|
||||
"targets": [
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-pc-windows-msvc",
|
||||
"universal-apple-darwin",
|
||||
"aarch64-unknown-linux-gnu",
|
||||
"wasm32-wasip1-threads"
|
||||
]
|
||||
},
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@napi-rs/cli": "^3.0.4",
|
||||
"@napi-rs/wasm-runtime": "^1.0.1",
|
||||
"ava": "^6.0.1",
|
||||
"better-sqlite3": "^11.9.1",
|
||||
"typescript": "^5.9.2"
|
||||
},
|
||||
"ava": {
|
||||
"timeout": "3m"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
},
|
||||
"scripts": {
|
||||
"artifacts": "napi artifacts",
|
||||
"build": "npm exec tsc && napi build --platform --release --esm",
|
||||
"build:debug": "npm exec tsc && napi build --platform",
|
||||
"prepublishOnly": "npm exec tsc && napi prepublish -t npm",
|
||||
"test": "true",
|
||||
"universal": "napi universalize",
|
||||
"version": "napi version"
|
||||
"build": "npm run build --workspaces",
|
||||
"tsc-build": "npm run tsc-build --workspaces",
|
||||
"test": "npm run test --workspaces"
|
||||
},
|
||||
"packageManager": "yarn@4.9.2",
|
||||
"imports": {
|
||||
"#entry-point": {
|
||||
"types": "./index.d.ts",
|
||||
"browser": "./browser.js",
|
||||
"node": "./index.js"
|
||||
}
|
||||
}
|
||||
"workspaces": [
|
||||
"packages/common",
|
||||
"packages/native",
|
||||
"packages/browser"
|
||||
],
|
||||
"version": "0.1.5-pre.4"
|
||||
}
|
||||
124
bindings/javascript/packages/browser/README.md
Normal file
124
bindings/javascript/packages/browser/README.md
Normal file
@@ -0,0 +1,124 @@
|
||||
<p align="center">
|
||||
<h1 align="center">Turso Database for JavaScript in Browser</h1>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a title="JavaScript" target="_blank" href="https://www.npmjs.com/package/@tursodatabase/database"><img alt="npm" src="https://img.shields.io/npm/v/@tursodatabase/database"></a>
|
||||
<a title="MIT" target="_blank" href="https://github.com/tursodatabase/turso/blob/main/LICENSE.md"><img src="http://img.shields.io/badge/license-MIT-orange.svg?style=flat-square"></a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a title="Users Discord" target="_blank" href="https://tur.so/discord"><img alt="Chat with other users of Turso on Discord" src="https://img.shields.io/discord/933071162680958986?label=Discord&logo=Discord&style=social"></a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
This package is the Turso embedded database library for JavaScript in Browser.
|
||||
|
||||
> **⚠️ Warning:** This software is ALPHA, only use for development, testing, and experimentation. We are working to make it production ready, but do not use it for critical data right now.
|
||||
|
||||
## Features
|
||||
|
||||
- **SQLite compatible:** SQLite query language and file format support ([status](https://github.com/tursodatabase/turso/blob/main/COMPAT.md)).
|
||||
- **In-process**: No network overhead, runs directly in your Node.js process
|
||||
- **TypeScript support**: Full TypeScript definitions included
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install @tursodatabase/database-browser
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
### In-Memory Database
|
||||
|
||||
```javascript
|
||||
import { connect } from '@tursodatabase/database-browser';
|
||||
|
||||
// Create an in-memory database
|
||||
const db = await connect(':memory:');
|
||||
|
||||
// Create a table
|
||||
await db.exec('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)');
|
||||
|
||||
// Insert data
|
||||
const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
|
||||
await insert.run('Alice', 'alice@example.com');
|
||||
await insert.run('Bob', 'bob@example.com');
|
||||
|
||||
// Query data
|
||||
const users = await db.prepare('SELECT * FROM users').all();
|
||||
console.log(users);
|
||||
// Output: [
|
||||
// { id: 1, name: 'Alice', email: 'alice@example.com' },
|
||||
// { id: 2, name: 'Bob', email: 'bob@example.com' }
|
||||
// ]
|
||||
```
|
||||
|
||||
### File-Based Database
|
||||
|
||||
```javascript
|
||||
import { connect } from '@tursodatabase/database-browser';
|
||||
|
||||
// Create or open a database file
|
||||
const db = await connect('my-database.db');
|
||||
|
||||
// Create a table
|
||||
await db.exec(`
|
||||
CREATE TABLE IF NOT EXISTS posts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
title TEXT NOT NULL,
|
||||
content TEXT,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`);
|
||||
|
||||
// Insert a post
|
||||
const insertPost = db.prepare('INSERT INTO posts (title, content) VALUES (?, ?)');
|
||||
const result = await insertPost.run('Hello World', 'This is my first blog post!');
|
||||
|
||||
console.log(`Inserted post with ID: ${result.lastInsertRowid}`);
|
||||
```
|
||||
|
||||
### Transactions
|
||||
|
||||
```javascript
|
||||
import { connect } from '@tursodatabase/database-browser';
|
||||
|
||||
const db = await connect('transactions.db');
|
||||
|
||||
// Using transactions for atomic operations
|
||||
const transaction = db.transaction(async (users) => {
|
||||
const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
|
||||
for (const user of users) {
|
||||
await insert.run(user.name, user.email);
|
||||
}
|
||||
});
|
||||
|
||||
// Execute transaction
|
||||
await transaction([
|
||||
{ name: 'Alice', email: 'alice@example.com' },
|
||||
{ name: 'Bob', email: 'bob@example.com' }
|
||||
]);
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
For complete API documentation, see [JavaScript API Reference](../../../../docs/javascript-api-reference.md).
|
||||
|
||||
## Related Packages
|
||||
|
||||
* The [@tursodatabase/serverless](https://www.npmjs.com/package/@tursodatabase/serverless) package provides a serverless driver with the same API.
|
||||
* The [@tursodatabase/sync](https://www.npmjs.com/package/@tursodatabase/sync) package provides bidirectional sync between a local Turso database and Turso Cloud.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the [MIT license](../../LICENSE.md).
|
||||
|
||||
## Support
|
||||
|
||||
- [GitHub Issues](https://github.com/tursodatabase/turso/issues)
|
||||
- [Documentation](https://docs.turso.tech)
|
||||
- [Discord Community](https://tur.so/discord)
|
||||
@@ -1,7 +1,7 @@
|
||||
import {
|
||||
createOnMessage as __wasmCreateOnMessageForFsProxy,
|
||||
getDefaultContext as __emnapiGetDefaultContext,
|
||||
instantiateNapiModuleSync as __emnapiInstantiateNapiModuleSync,
|
||||
instantiateNapiModule as __emnapiInstantiateNapiModule,
|
||||
WASI as __WASI,
|
||||
} from '@napi-rs/wasm-runtime'
|
||||
|
||||
@@ -23,19 +23,25 @@ const __sharedMemory = new WebAssembly.Memory({
|
||||
|
||||
const __wasmFile = await fetch(__wasmUrl).then((res) => res.arrayBuffer())
|
||||
|
||||
export let MainWorker = null;
|
||||
|
||||
function panic(name) {
|
||||
throw new Error(`method ${name} must be invoked only from the main thread`);
|
||||
}
|
||||
|
||||
const {
|
||||
instance: __napiInstance,
|
||||
module: __wasiModule,
|
||||
napiModule: __napiModule,
|
||||
} = __emnapiInstantiateNapiModuleSync(__wasmFile, {
|
||||
} = await __emnapiInstantiateNapiModule(__wasmFile, {
|
||||
context: __emnapiContext,
|
||||
asyncWorkPoolSize: 4,
|
||||
asyncWorkPoolSize: 1,
|
||||
wasi: __wasi,
|
||||
onCreateWorker() {
|
||||
const worker = new Worker(new URL('./wasi-worker-browser.mjs', import.meta.url), {
|
||||
const worker = new Worker(new URL('./worker.mjs', import.meta.url), {
|
||||
type: 'module',
|
||||
})
|
||||
|
||||
MainWorker = worker;
|
||||
return worker
|
||||
},
|
||||
overwriteImports(importObject) {
|
||||
@@ -44,6 +50,13 @@ const {
|
||||
...importObject.napi,
|
||||
...importObject.emnapi,
|
||||
memory: __sharedMemory,
|
||||
is_web_worker: () => false,
|
||||
lookup_file: () => panic("lookup_file"),
|
||||
read: () => panic("read"),
|
||||
write: () => panic("write"),
|
||||
sync: () => panic("sync"),
|
||||
truncate: () => panic("truncate"),
|
||||
size: () => panic("size"),
|
||||
}
|
||||
return importObject
|
||||
},
|
||||
@@ -57,4 +70,8 @@ const {
|
||||
})
|
||||
export default __napiModule.exports
|
||||
export const Database = __napiModule.exports.Database
|
||||
export const Opfs = __napiModule.exports.Opfs
|
||||
export const OpfsFile = __napiModule.exports.OpfsFile
|
||||
export const Statement = __napiModule.exports.Statement
|
||||
export const connect = __napiModule.exports.connect
|
||||
export const initThreadPool = __napiModule.exports.initThreadPool
|
||||
44
bindings/javascript/packages/browser/package.json
Normal file
44
bindings/javascript/packages/browser/package.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"name": "@tursodatabase/database-browser",
|
||||
"version": "0.1.5-pre.3",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
},
|
||||
"license": "MIT",
|
||||
"main": "dist/promise.js",
|
||||
"packageManager": "yarn@4.9.2",
|
||||
"files": [
|
||||
"index.js",
|
||||
"worker.mjs",
|
||||
"turso.wasm32-wasi.wasm",
|
||||
"dist/**",
|
||||
"README.md"
|
||||
],
|
||||
"devDependencies": {
|
||||
"@napi-rs/cli": "^3.1.5",
|
||||
"@vitest/browser": "^3.2.4",
|
||||
"playwright": "^1.55.0",
|
||||
"typescript": "^5.9.2",
|
||||
"vitest": "^3.2.4"
|
||||
},
|
||||
"scripts": {
|
||||
"napi-build": "napi build --features browser --release --platform --target wasm32-wasip1-threads --no-js --manifest-path ../../Cargo.toml --output-dir . && rm index.d.ts turso.wasi* wasi* browser.js",
|
||||
"tsc-build": "npm exec tsc",
|
||||
"build": "npm run napi-build && npm run tsc-build",
|
||||
"test": "CI=1 vitest --browser=chromium --run && CI=1 vitest --browser=firefox --run"
|
||||
},
|
||||
"napi": {
|
||||
"binaryName": "turso",
|
||||
"targets": [
|
||||
"wasm32-wasip1-threads"
|
||||
]
|
||||
},
|
||||
"imports": {
|
||||
"#index": "./index.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"@napi-rs/wasm-runtime": "^1.0.3",
|
||||
"@tursodatabase/database-common": "^0.1.5-pre.3"
|
||||
}
|
||||
}
|
||||
95
bindings/javascript/packages/browser/promise.test.ts
Normal file
95
bindings/javascript/packages/browser/promise.test.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
import { expect, test, afterEach } from 'vitest'
|
||||
import { connect } from './promise.js'
|
||||
|
||||
test('in-memory db', async () => {
|
||||
const db = await connect(":memory:");
|
||||
await db.exec("CREATE TABLE t(x)");
|
||||
await db.exec("INSERT INTO t VALUES (1), (2), (3)");
|
||||
const stmt = db.prepare("SELECT * FROM t WHERE x % 2 = ?");
|
||||
const rows = await stmt.all([1]);
|
||||
expect(rows).toEqual([{ x: 1 }, { x: 3 }]);
|
||||
})
|
||||
|
||||
test('on-disk db', async () => {
|
||||
const path = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
const db1 = await connect(path);
|
||||
await db1.exec("CREATE TABLE t(x)");
|
||||
await db1.exec("INSERT INTO t VALUES (1), (2), (3)");
|
||||
const stmt1 = db1.prepare("SELECT * FROM t WHERE x % 2 = ?");
|
||||
expect(stmt1.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]);
|
||||
const rows1 = await stmt1.all([1]);
|
||||
expect(rows1).toEqual([{ x: 1 }, { x: 3 }]);
|
||||
await db1.close();
|
||||
stmt1.close();
|
||||
|
||||
const db2 = await connect(path);
|
||||
const stmt2 = db2.prepare("SELECT * FROM t WHERE x % 2 = ?");
|
||||
expect(stmt2.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]);
|
||||
const rows2 = await stmt2.all([1]);
|
||||
expect(rows2).toEqual([{ x: 1 }, { x: 3 }]);
|
||||
db2.close();
|
||||
})
|
||||
|
||||
test('attach', async () => {
|
||||
const path1 = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
const path2 = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
const db1 = await connect(path1);
|
||||
await db1.exec("CREATE TABLE t(x)");
|
||||
await db1.exec("INSERT INTO t VALUES (1), (2), (3)");
|
||||
const db2 = await connect(path2);
|
||||
await db2.exec("CREATE TABLE q(x)");
|
||||
await db2.exec("INSERT INTO q VALUES (4), (5), (6)");
|
||||
|
||||
await db1.exec(`ATTACH '${path2}' as secondary`);
|
||||
|
||||
const stmt = db1.prepare("SELECT * FROM t UNION ALL SELECT * FROM secondary.q");
|
||||
expect(stmt.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]);
|
||||
const rows = await stmt.all([1]);
|
||||
expect(rows).toEqual([{ x: 1 }, { x: 2 }, { x: 3 }, { x: 4 }, { x: 5 }, { x: 6 }]);
|
||||
})
|
||||
|
||||
test('blobs', async () => {
|
||||
const db = await connect(":memory:");
|
||||
const rows = await db.prepare("SELECT x'1020' as x").all();
|
||||
expect(rows).toEqual([{ x: new Uint8Array([16, 32]) }])
|
||||
})
|
||||
|
||||
|
||||
test('example-1', async () => {
|
||||
const db = await connect(':memory:');
|
||||
await db.exec('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)');
|
||||
|
||||
const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
|
||||
await insert.run('Alice', 'alice@example.com');
|
||||
await insert.run('Bob', 'bob@example.com');
|
||||
|
||||
const users = await db.prepare('SELECT * FROM users').all();
|
||||
expect(users).toEqual([
|
||||
{ id: 1, name: 'Alice', email: 'alice@example.com' },
|
||||
{ id: 2, name: 'Bob', email: 'bob@example.com' }
|
||||
]);
|
||||
})
|
||||
|
||||
test('example-2', async () => {
|
||||
const db = await connect(':memory:');
|
||||
await db.exec('CREATE TABLE users (name, email)');
|
||||
// Using transactions for atomic operations
|
||||
const transaction = db.transaction(async (users) => {
|
||||
const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
|
||||
for (const user of users) {
|
||||
await insert.run(user.name, user.email);
|
||||
}
|
||||
});
|
||||
|
||||
// Execute transaction
|
||||
await transaction([
|
||||
{ name: 'Alice', email: 'alice@example.com' },
|
||||
{ name: 'Bob', email: 'bob@example.com' }
|
||||
]);
|
||||
|
||||
const rows = await db.prepare('SELECT * FROM users').all();
|
||||
expect(rows).toEqual([
|
||||
{ name: 'Alice', email: 'alice@example.com' },
|
||||
{ name: 'Bob', email: 'bob@example.com' }
|
||||
]);
|
||||
})
|
||||
78
bindings/javascript/packages/browser/promise.ts
Normal file
78
bindings/javascript/packages/browser/promise.ts
Normal file
@@ -0,0 +1,78 @@
|
||||
import { DatabasePromise, NativeDatabase, DatabaseOpts, SqliteError } from "@tursodatabase/database-common"
|
||||
import { connect as nativeConnect, initThreadPool, MainWorker } from "#index";
|
||||
|
||||
let workerRequestId = 0;
|
||||
class Database extends DatabasePromise {
|
||||
files: string[];
|
||||
constructor(db: NativeDatabase, files: string[], opts: DatabaseOpts = {}) {
|
||||
super(db, opts)
|
||||
this.files = files;
|
||||
}
|
||||
async close() {
|
||||
let currentId = workerRequestId;
|
||||
workerRequestId += this.files.length;
|
||||
|
||||
let tasks = [];
|
||||
for (const file of this.files) {
|
||||
(MainWorker as any).postMessage({ __turso__: "unregister", path: file, id: currentId });
|
||||
tasks.push(waitFor(currentId));
|
||||
currentId += 1;
|
||||
}
|
||||
await Promise.all(tasks);
|
||||
this.db.close();
|
||||
}
|
||||
}
|
||||
|
||||
function waitFor(id: number): Promise<any> {
|
||||
let waitResolve, waitReject;
|
||||
const callback = msg => {
|
||||
if (msg.data.id == id) {
|
||||
if (msg.data.error != null) {
|
||||
waitReject(msg.data.error)
|
||||
} else {
|
||||
waitResolve()
|
||||
}
|
||||
cleanup();
|
||||
}
|
||||
};
|
||||
const cleanup = () => (MainWorker as any).removeEventListener("message", callback);
|
||||
|
||||
(MainWorker as any).addEventListener("message", callback);
|
||||
const result = new Promise((resolve, reject) => {
|
||||
waitResolve = resolve;
|
||||
waitReject = reject;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new database connection asynchronously.
|
||||
*
|
||||
* @param {string} path - Path to the database file.
|
||||
* @param {Object} opts - Options for database behavior.
|
||||
* @returns {Promise<Database>} - A promise that resolves to a Database instance.
|
||||
*/
|
||||
async function connect(path: string, opts: DatabaseOpts = {}): Promise<Database> {
|
||||
if (path == ":memory:") {
|
||||
const db = await nativeConnect(path, { tracing: opts.tracing });
|
||||
return new Database(db, [], opts);
|
||||
}
|
||||
await initThreadPool();
|
||||
if (MainWorker == null) {
|
||||
throw new Error("panic: MainWorker is not set");
|
||||
}
|
||||
|
||||
let currentId = workerRequestId;
|
||||
workerRequestId += 2;
|
||||
|
||||
let dbHandlePromise = waitFor(currentId);
|
||||
let walHandlePromise = waitFor(currentId + 1);
|
||||
(MainWorker as any).postMessage({ __turso__: "register", path: `${path}`, id: currentId });
|
||||
(MainWorker as any).postMessage({ __turso__: "register", path: `${path}-wal`, id: currentId + 1 });
|
||||
await Promise.all([dbHandlePromise, walHandlePromise]);
|
||||
const db = await nativeConnect(path, { tracing: opts.tracing });
|
||||
const files = [path, `${path}-wal`];
|
||||
return new Database(db, files, opts);
|
||||
}
|
||||
|
||||
export { connect, Database, SqliteError }
|
||||
21
bindings/javascript/packages/browser/tsconfig.json
Normal file
21
bindings/javascript/packages/browser/tsconfig.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"skipLibCheck": true,
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"module": "nodenext",
|
||||
"target": "esnext",
|
||||
"outDir": "dist/",
|
||||
"lib": [
|
||||
"es2020"
|
||||
],
|
||||
"paths": {
|
||||
"#index": [
|
||||
"./index.js"
|
||||
]
|
||||
}
|
||||
},
|
||||
"include": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
23
bindings/javascript/packages/browser/vitest.config.ts
Normal file
23
bindings/javascript/packages/browser/vitest.config.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { defineConfig } from 'vitest/config'
|
||||
|
||||
export default defineConfig({
|
||||
define: {
|
||||
'process.env.NODE_DEBUG_NATIVE': 'false',
|
||||
},
|
||||
server: {
|
||||
headers: {
|
||||
"Cross-Origin-Embedder-Policy": "require-corp",
|
||||
"Cross-Origin-Opener-Policy": "same-origin"
|
||||
},
|
||||
},
|
||||
test: {
|
||||
browser: {
|
||||
enabled: true,
|
||||
provider: 'playwright',
|
||||
instances: [
|
||||
{ browser: 'chromium' },
|
||||
{ browser: 'firefox' }
|
||||
],
|
||||
},
|
||||
},
|
||||
})
|
||||
160
bindings/javascript/packages/browser/worker.mjs
Normal file
160
bindings/javascript/packages/browser/worker.mjs
Normal file
@@ -0,0 +1,160 @@
|
||||
import { instantiateNapiModuleSync, MessageHandler, WASI } from '@napi-rs/wasm-runtime'
|
||||
|
||||
var fileByPath = new Map();
|
||||
var fileByHandle = new Map();
|
||||
let fileHandles = 0;
|
||||
var memory = null;
|
||||
|
||||
function getUint8ArrayFromWasm(ptr, len) {
|
||||
ptr = ptr >>> 0;
|
||||
return new Uint8Array(memory.buffer).subarray(ptr, ptr + len);
|
||||
}
|
||||
|
||||
|
||||
async function registerFile(path) {
|
||||
if (fileByPath.has(path)) {
|
||||
return;
|
||||
}
|
||||
const opfsRoot = await navigator.storage.getDirectory();
|
||||
const opfsHandle = await opfsRoot.getFileHandle(path, { create: true });
|
||||
const opfsSync = await opfsHandle.createSyncAccessHandle();
|
||||
fileHandles += 1;
|
||||
fileByPath.set(path, { handle: fileHandles, sync: opfsSync });
|
||||
fileByHandle.set(fileHandles, opfsSync);
|
||||
}
|
||||
|
||||
async function unregisterFile(path) {
|
||||
const file = fileByPath.get(path);
|
||||
if (file == null) {
|
||||
return;
|
||||
}
|
||||
fileByPath.delete(path);
|
||||
fileByHandle.delete(file.handle);
|
||||
file.sync.close();
|
||||
}
|
||||
|
||||
function lookup_file(pathPtr, pathLen) {
|
||||
try {
|
||||
const buffer = getUint8ArrayFromWasm(pathPtr, pathLen);
|
||||
const notShared = new Uint8Array(buffer.length);
|
||||
notShared.set(buffer);
|
||||
const decoder = new TextDecoder('utf-8');
|
||||
const path = decoder.decode(notShared);
|
||||
const file = fileByPath.get(path);
|
||||
if (file == null) {
|
||||
return -404;
|
||||
}
|
||||
return file.handle;
|
||||
} catch (e) {
|
||||
console.error('lookupFile', pathPtr, pathLen, e);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
function read(handle, bufferPtr, bufferLen, offset) {
|
||||
try {
|
||||
const buffer = getUint8ArrayFromWasm(bufferPtr, bufferLen);
|
||||
const file = fileByHandle.get(Number(handle));
|
||||
const result = file.read(buffer, { at: Number(offset) });
|
||||
return result;
|
||||
} catch (e) {
|
||||
console.error('read', handle, bufferPtr, bufferLen, offset, e);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
function write(handle, bufferPtr, bufferLen, offset) {
|
||||
try {
|
||||
const buffer = getUint8ArrayFromWasm(bufferPtr, bufferLen);
|
||||
const file = fileByHandle.get(Number(handle));
|
||||
const result = file.write(buffer, { at: Number(offset) });
|
||||
return result;
|
||||
} catch (e) {
|
||||
console.error('write', handle, bufferPtr, bufferLen, offset, e);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
function sync(handle) {
|
||||
try {
|
||||
const file = fileByHandle.get(Number(handle));
|
||||
file.flush();
|
||||
return 0;
|
||||
} catch (e) {
|
||||
console.error('sync', handle, e);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
function truncate(handle, size) {
|
||||
try {
|
||||
const file = fileByHandle.get(Number(handle));
|
||||
const result = file.truncate(size);
|
||||
return result;
|
||||
} catch (e) {
|
||||
console.error('truncate', handle, size, e);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
function size(handle) {
|
||||
try {
|
||||
const file = fileByHandle.get(Number(handle));
|
||||
const size = file.getSize()
|
||||
return size;
|
||||
} catch (e) {
|
||||
console.error('size', handle, e);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
const handler = new MessageHandler({
|
||||
onLoad({ wasmModule, wasmMemory }) {
|
||||
memory = wasmMemory;
|
||||
const wasi = new WASI({
|
||||
print: function () {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log.apply(console, arguments)
|
||||
},
|
||||
printErr: function () {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error.apply(console, arguments)
|
||||
},
|
||||
})
|
||||
return instantiateNapiModuleSync(wasmModule, {
|
||||
childThread: true,
|
||||
wasi,
|
||||
overwriteImports(importObject) {
|
||||
importObject.env = {
|
||||
...importObject.env,
|
||||
...importObject.napi,
|
||||
...importObject.emnapi,
|
||||
memory: wasmMemory,
|
||||
is_web_worker: () => true,
|
||||
lookup_file: lookup_file,
|
||||
read: read,
|
||||
write: write,
|
||||
sync: sync,
|
||||
truncate: truncate,
|
||||
size: size,
|
||||
}
|
||||
},
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
globalThis.onmessage = async function (e) {
|
||||
if (e.data.__turso__ == 'register') {
|
||||
try {
|
||||
await registerFile(e.data.path)
|
||||
self.postMessage({ id: e.data.id })
|
||||
} catch (error) {
|
||||
self.postMessage({ id: e.data.id, error: error });
|
||||
}
|
||||
return;
|
||||
} else if (e.data.__turso__ == 'unregister') {
|
||||
try {
|
||||
await unregisterFile(e.data.path)
|
||||
self.postMessage({ id: e.data.id })
|
||||
} catch (error) {
|
||||
self.postMessage({ id: e.data.id, error: error });
|
||||
}
|
||||
return;
|
||||
}
|
||||
handler.handle(e)
|
||||
}
|
||||
8
bindings/javascript/packages/common/README.md
Normal file
8
bindings/javascript/packages/common/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
## About
|
||||
|
||||
This package is the Turso embedded database common JS library which is shared between final builds for Node and Browser.
|
||||
|
||||
Do not use this package directly - instead you must use `@tursodatabase/database` or `@tursodatabase/database-browser`.
|
||||
|
||||
> **⚠️ Warning:** This software is ALPHA, only use for development, testing, and experimentation. We are working to make it production ready, but do not use it for critical data right now.
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
import { Database as NativeDB, Statement as NativeStatement } from "#entry-point";
|
||||
import { bindParams } from "./bind.js";
|
||||
|
||||
import { SqliteError } from "./sqlite-error.js";
|
||||
|
||||
// Step result constants
|
||||
const STEP_ROW = 1;
|
||||
const STEP_DONE = 2;
|
||||
const STEP_IO = 3;
|
||||
import { NativeDatabase, NativeStatement, STEP_IO, STEP_ROW, STEP_DONE } from "./types.js";
|
||||
|
||||
const convertibleErrorTypes = { TypeError };
|
||||
const CONVERTIBLE_ERROR_PREFIX = "[TURSO_CONVERT_TYPE]";
|
||||
@@ -35,7 +29,7 @@ function createErrorByName(name, message) {
|
||||
* Database represents a connection that can prepare and execute SQL statements.
|
||||
*/
|
||||
class Database {
|
||||
db: NativeDB;
|
||||
db: NativeDatabase;
|
||||
memory: boolean;
|
||||
open: boolean;
|
||||
private _inTransaction: boolean = false;
|
||||
@@ -50,15 +44,14 @@ class Database {
|
||||
* @param {boolean} [opts.fileMustExist=false] - If true, throws if database file does not exist.
|
||||
* @param {number} [opts.timeout=0] - Timeout duration in milliseconds for database operations. Defaults to 0 (no timeout).
|
||||
*/
|
||||
constructor(path: string, opts: any = {}) {
|
||||
constructor(db: NativeDatabase, opts: any = {}) {
|
||||
opts.readonly = opts.readonly === undefined ? false : opts.readonly;
|
||||
opts.fileMustExist =
|
||||
opts.fileMustExist === undefined ? false : opts.fileMustExist;
|
||||
opts.timeout = opts.timeout === undefined ? 0 : opts.timeout;
|
||||
|
||||
this.db = new NativeDB(path);
|
||||
this.db = db;
|
||||
this.memory = this.db.memory;
|
||||
const db = this.db;
|
||||
|
||||
Object.defineProperties(this, {
|
||||
inTransaction: {
|
||||
@@ -66,7 +59,7 @@ class Database {
|
||||
},
|
||||
name: {
|
||||
get() {
|
||||
return path;
|
||||
return db.path;
|
||||
},
|
||||
},
|
||||
readonly: {
|
||||
@@ -199,7 +192,7 @@ class Database {
|
||||
}
|
||||
|
||||
try {
|
||||
this.db.batch(sql);
|
||||
this.db.batchSync(sql);
|
||||
} catch (err) {
|
||||
throw convertError(err);
|
||||
}
|
||||
@@ -301,7 +294,7 @@ class Statement {
|
||||
this.stmt.reset();
|
||||
bindParams(this.stmt, bindParameters);
|
||||
for (; ;) {
|
||||
const stepResult = this.stmt.step();
|
||||
const stepResult = this.stmt.stepSync();
|
||||
if (stepResult === STEP_IO) {
|
||||
this.db.db.ioLoopSync();
|
||||
continue;
|
||||
@@ -330,7 +323,7 @@ class Statement {
|
||||
this.stmt.reset();
|
||||
bindParams(this.stmt, bindParameters);
|
||||
for (; ;) {
|
||||
const stepResult = this.stmt.step();
|
||||
const stepResult = this.stmt.stepSync();
|
||||
if (stepResult === STEP_IO) {
|
||||
this.db.db.ioLoopSync();
|
||||
continue;
|
||||
@@ -354,7 +347,7 @@ class Statement {
|
||||
bindParams(this.stmt, bindParameters);
|
||||
|
||||
while (true) {
|
||||
const stepResult = this.stmt.step();
|
||||
const stepResult = this.stmt.stepSync();
|
||||
if (stepResult === STEP_IO) {
|
||||
this.db.db.ioLoopSync();
|
||||
continue;
|
||||
@@ -378,7 +371,7 @@ class Statement {
|
||||
bindParams(this.stmt, bindParameters);
|
||||
const rows: any[] = [];
|
||||
for (; ;) {
|
||||
const stepResult = this.stmt.step();
|
||||
const stepResult = this.stmt.stepSync();
|
||||
if (stepResult === STEP_IO) {
|
||||
this.db.db.ioLoopSync();
|
||||
continue;
|
||||
@@ -417,4 +410,4 @@ class Statement {
|
||||
}
|
||||
}
|
||||
|
||||
export { Database, SqliteError }
|
||||
export { Database, Statement }
|
||||
6
bindings/javascript/packages/common/index.ts
Normal file
6
bindings/javascript/packages/common/index.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
import { NativeDatabase, NativeStatement, DatabaseOpts } from "./types.js";
|
||||
import { Database as DatabaseCompat, Statement as StatementCompat } from "./compat.js";
|
||||
import { Database as DatabasePromise, Statement as StatementPromise } from "./promise.js";
|
||||
import { SqliteError } from "./sqlite-error.js";
|
||||
|
||||
export { DatabaseCompat, StatementCompat, DatabasePromise, StatementPromise, NativeDatabase, NativeStatement, SqliteError, DatabaseOpts }
|
||||
25
bindings/javascript/packages/common/package.json
Normal file
25
bindings/javascript/packages/common/package.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"name": "@tursodatabase/database-common",
|
||||
"version": "0.1.5-pre.3",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
},
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"packageManager": "yarn@4.9.2",
|
||||
"files": [
|
||||
"dist/**",
|
||||
"README.md"
|
||||
],
|
||||
"devDependencies": {
|
||||
"typescript": "^5.9.2"
|
||||
},
|
||||
"scripts": {
|
||||
"tsc-build": "npm exec tsc",
|
||||
"build": "npm run tsc-build",
|
||||
"test": "echo 'no tests'"
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,6 @@
|
||||
import { Database as NativeDB, Statement as NativeStatement } from "#entry-point";
|
||||
import { bindParams } from "./bind.js";
|
||||
|
||||
import { SqliteError } from "./sqlite-error.js";
|
||||
|
||||
// Step result constants
|
||||
const STEP_ROW = 1;
|
||||
const STEP_DONE = 2;
|
||||
const STEP_IO = 3;
|
||||
import { NativeDatabase, NativeStatement, STEP_IO, STEP_ROW, STEP_DONE, DatabaseOpts } from "./types.js";
|
||||
|
||||
const convertibleErrorTypes = { TypeError };
|
||||
const CONVERTIBLE_ERROR_PREFIX = "[TURSO_CONVERT_TYPE]";
|
||||
@@ -35,7 +29,7 @@ function createErrorByName(name, message) {
|
||||
* Database represents a connection that can prepare and execute SQL statements.
|
||||
*/
|
||||
class Database {
|
||||
db: NativeDB;
|
||||
db: NativeDatabase;
|
||||
memory: boolean;
|
||||
open: boolean;
|
||||
private _inTransaction: boolean = false;
|
||||
@@ -49,19 +43,18 @@ class Database {
|
||||
* @param {boolean} [opts.fileMustExist=false] - If true, throws if database file does not exist.
|
||||
* @param {number} [opts.timeout=0] - Timeout duration in milliseconds for database operations. Defaults to 0 (no timeout).
|
||||
*/
|
||||
constructor(path: string, opts: any = {}) {
|
||||
constructor(db: NativeDatabase, opts: DatabaseOpts = {}) {
|
||||
opts.readonly = opts.readonly === undefined ? false : opts.readonly;
|
||||
opts.fileMustExist =
|
||||
opts.fileMustExist === undefined ? false : opts.fileMustExist;
|
||||
opts.timeout = opts.timeout === undefined ? 0 : opts.timeout;
|
||||
|
||||
const db = new NativeDB(path);
|
||||
this.initialize(db, opts.path, opts.readonly);
|
||||
this.initialize(db, opts.name, opts.readonly);
|
||||
}
|
||||
static create() {
|
||||
return Object.create(this.prototype);
|
||||
}
|
||||
initialize(db: NativeDB, name, readonly) {
|
||||
initialize(db: NativeDatabase, name, readonly) {
|
||||
this.db = db;
|
||||
this.memory = db.memory;
|
||||
Object.defineProperties(this, {
|
||||
@@ -112,22 +105,22 @@ class Database {
|
||||
*
|
||||
* @param {function} fn - The function to wrap in a transaction.
|
||||
*/
|
||||
transaction(fn) {
|
||||
transaction(fn: (...any) => Promise<any>) {
|
||||
if (typeof fn !== "function")
|
||||
throw new TypeError("Expected first argument to be a function");
|
||||
|
||||
const db = this;
|
||||
const wrapTxn = (mode) => {
|
||||
return (...bindParameters) => {
|
||||
db.exec("BEGIN " + mode);
|
||||
return async (...bindParameters) => {
|
||||
await db.exec("BEGIN " + mode);
|
||||
db._inTransaction = true;
|
||||
try {
|
||||
const result = fn(...bindParameters);
|
||||
db.exec("COMMIT");
|
||||
const result = await fn(...bindParameters);
|
||||
await db.exec("COMMIT");
|
||||
db._inTransaction = false;
|
||||
return result;
|
||||
} catch (err) {
|
||||
db.exec("ROLLBACK");
|
||||
await db.exec("ROLLBACK");
|
||||
db._inTransaction = false;
|
||||
throw err;
|
||||
}
|
||||
@@ -147,7 +140,7 @@ class Database {
|
||||
return properties.default.value;
|
||||
}
|
||||
|
||||
pragma(source, options) {
|
||||
async pragma(source, options) {
|
||||
if (options == null) options = {};
|
||||
|
||||
if (typeof source !== "string")
|
||||
@@ -158,8 +151,8 @@ class Database {
|
||||
|
||||
const pragma = `PRAGMA ${source}`;
|
||||
|
||||
const stmt = this.prepare(pragma);
|
||||
const results = stmt.all();
|
||||
const stmt = await this.prepare(pragma);
|
||||
const results = await stmt.all();
|
||||
|
||||
return results;
|
||||
}
|
||||
@@ -197,13 +190,13 @@ class Database {
|
||||
*
|
||||
* @param {string} sql - The SQL statement string to execute.
|
||||
*/
|
||||
exec(sql) {
|
||||
async exec(sql) {
|
||||
if (!this.open) {
|
||||
throw new TypeError("The database connection is not open");
|
||||
}
|
||||
|
||||
try {
|
||||
this.db.batch(sql);
|
||||
await this.db.batchAsync(sql);
|
||||
} catch (err) {
|
||||
throw convertError(err);
|
||||
}
|
||||
@@ -228,7 +221,7 @@ class Database {
|
||||
/**
|
||||
* Closes the database connection.
|
||||
*/
|
||||
close() {
|
||||
async close() {
|
||||
this.db.close();
|
||||
}
|
||||
}
|
||||
@@ -305,7 +298,7 @@ class Statement {
|
||||
bindParams(this.stmt, bindParameters);
|
||||
|
||||
while (true) {
|
||||
const stepResult = this.stmt.step();
|
||||
const stepResult = await this.stmt.stepAsync();
|
||||
if (stepResult === STEP_IO) {
|
||||
await this.db.db.ioLoopAsync();
|
||||
continue;
|
||||
@@ -335,7 +328,7 @@ class Statement {
|
||||
bindParams(this.stmt, bindParameters);
|
||||
|
||||
while (true) {
|
||||
const stepResult = this.stmt.step();
|
||||
const stepResult = await this.stmt.stepAsync();
|
||||
if (stepResult === STEP_IO) {
|
||||
await this.db.db.ioLoopAsync();
|
||||
continue;
|
||||
@@ -359,7 +352,7 @@ class Statement {
|
||||
bindParams(this.stmt, bindParameters);
|
||||
|
||||
while (true) {
|
||||
const stepResult = this.stmt.step();
|
||||
const stepResult = await this.stmt.stepAsync();
|
||||
if (stepResult === STEP_IO) {
|
||||
await this.db.db.ioLoopAsync();
|
||||
continue;
|
||||
@@ -384,7 +377,7 @@ class Statement {
|
||||
const rows: any[] = [];
|
||||
|
||||
while (true) {
|
||||
const stepResult = this.stmt.step();
|
||||
const stepResult = await this.stmt.stepAsync();
|
||||
if (stepResult === STEP_IO) {
|
||||
await this.db.db.ioLoopAsync();
|
||||
continue;
|
||||
@@ -421,17 +414,9 @@ class Statement {
|
||||
throw convertError(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new database connection asynchronously.
|
||||
*
|
||||
* @param {string} path - Path to the database file.
|
||||
* @param {Object} opts - Options for database behavior.
|
||||
* @returns {Promise<Database>} - A promise that resolves to a Database instance.
|
||||
*/
|
||||
async function connect(path: string, opts: any = {}): Promise<Database> {
|
||||
return new Database(path, opts);
|
||||
close() {
|
||||
this.stmt.finalize();
|
||||
}
|
||||
}
|
||||
|
||||
export { Database, SqliteError, connect }
|
||||
export { Database, Statement }
|
||||
@@ -1,17 +1,14 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"skipLibCheck": true,
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"module": "esnext",
|
||||
"target": "esnext",
|
||||
"outDir": "dist/",
|
||||
"lib": [
|
||||
"es2020"
|
||||
],
|
||||
"paths": {
|
||||
"#entry-point": [
|
||||
"./index.js"
|
||||
]
|
||||
}
|
||||
},
|
||||
"include": [
|
||||
"*"
|
||||
46
bindings/javascript/packages/common/types.ts
Normal file
46
bindings/javascript/packages/common/types.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
export interface DatabaseOpts {
|
||||
readonly?: boolean,
|
||||
fileMustExist?: boolean,
|
||||
timeout?: number
|
||||
name?: string
|
||||
tracing?: 'info' | 'debug' | 'trace'
|
||||
}
|
||||
|
||||
export interface NativeDatabase {
|
||||
memory: boolean,
|
||||
path: string,
|
||||
new(path: string): NativeDatabase;
|
||||
batchSync(sql: string);
|
||||
batchAsync(sql: string): Promise<void>;
|
||||
|
||||
ioLoopSync();
|
||||
ioLoopAsync(): Promise<void>;
|
||||
|
||||
prepare(sql: string): NativeStatement;
|
||||
|
||||
pluck(pluckMode: boolean);
|
||||
defaultSafeIntegers(toggle: boolean);
|
||||
totalChanges(): number;
|
||||
changes(): number;
|
||||
lastInsertRowid(): number;
|
||||
close();
|
||||
}
|
||||
|
||||
|
||||
// Step result constants
|
||||
export const STEP_ROW = 1;
|
||||
export const STEP_DONE = 2;
|
||||
export const STEP_IO = 3;
|
||||
|
||||
export interface NativeStatement {
|
||||
stepAsync(): Promise<number>;
|
||||
stepSync(): number;
|
||||
|
||||
pluck(pluckMode: boolean);
|
||||
safeIntegers(toggle: boolean);
|
||||
raw(toggle: boolean);
|
||||
columns(): string[];
|
||||
row(): any;
|
||||
reset();
|
||||
finalize();
|
||||
}
|
||||
125
bindings/javascript/packages/native/README.md
Normal file
125
bindings/javascript/packages/native/README.md
Normal file
@@ -0,0 +1,125 @@
|
||||
<p align="center">
|
||||
<h1 align="center">Turso Database for JavaScript in Node</h1>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a title="JavaScript" target="_blank" href="https://www.npmjs.com/package/@tursodatabase/database"><img alt="npm" src="https://img.shields.io/npm/v/@tursodatabase/database"></a>
|
||||
<a title="MIT" target="_blank" href="https://github.com/tursodatabase/turso/blob/main/LICENSE.md"><img src="http://img.shields.io/badge/license-MIT-orange.svg?style=flat-square"></a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a title="Users Discord" target="_blank" href="https://tur.so/discord"><img alt="Chat with other users of Turso on Discord" src="https://img.shields.io/discord/933071162680958986?label=Discord&logo=Discord&style=social"></a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
This package is the Turso embedded database library for JavaScript in Node.
|
||||
|
||||
> **⚠️ Warning:** This software is ALPHA, only use for development, testing, and experimentation. We are working to make it production ready, but do not use it for critical data right now.
|
||||
|
||||
## Features
|
||||
|
||||
- **SQLite compatible:** SQLite query language and file format support ([status](https://github.com/tursodatabase/turso/blob/main/COMPAT.md)).
|
||||
- **In-process**: No network overhead, runs directly in your Node.js process
|
||||
- **TypeScript support**: Full TypeScript definitions included
|
||||
- **Cross-platform**: Supports Linux (x86 and arm64), macOS, Windows (browser is supported in the separate package `@tursodatabase/database-browser` package)
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install @tursodatabase/database
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
### In-Memory Database
|
||||
|
||||
```javascript
|
||||
import { connect } from '@tursodatabase/database';
|
||||
|
||||
// Create an in-memory database
|
||||
const db = await connect(':memory:');
|
||||
|
||||
// Create a table
|
||||
await db.exec('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)');
|
||||
|
||||
// Insert data
|
||||
const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
|
||||
await insert.run('Alice', 'alice@example.com');
|
||||
await insert.run('Bob', 'bob@example.com');
|
||||
|
||||
// Query data
|
||||
const users = await db.prepare('SELECT * FROM users').all();
|
||||
console.log(users);
|
||||
// Output: [
|
||||
// { id: 1, name: 'Alice', email: 'alice@example.com' },
|
||||
// { id: 2, name: 'Bob', email: 'bob@example.com' }
|
||||
// ]
|
||||
```
|
||||
|
||||
### File-Based Database
|
||||
|
||||
```javascript
|
||||
import { connect } from '@tursodatabase/database';
|
||||
|
||||
// Create or open a database file
|
||||
const db = await connect('my-database.db');
|
||||
|
||||
// Create a table
|
||||
await db.exec(`
|
||||
CREATE TABLE IF NOT EXISTS posts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
title TEXT NOT NULL,
|
||||
content TEXT,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`);
|
||||
|
||||
// Insert a post
|
||||
const insertPost = db.prepare('INSERT INTO posts (title, content) VALUES (?, ?)');
|
||||
const result = await insertPost.run('Hello World', 'This is my first blog post!');
|
||||
|
||||
console.log(`Inserted post with ID: ${result.lastInsertRowid}`);
|
||||
```
|
||||
|
||||
### Transactions
|
||||
|
||||
```javascript
|
||||
import { connect } from '@tursodatabase/database';
|
||||
|
||||
const db = await connect('transactions.db');
|
||||
|
||||
// Using transactions for atomic operations
|
||||
const transaction = db.transaction(async (users) => {
|
||||
const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
|
||||
for (const user of users) {
|
||||
await insert.run(user.name, user.email);
|
||||
}
|
||||
});
|
||||
|
||||
// Execute transaction
|
||||
await transaction([
|
||||
{ name: 'Alice', email: 'alice@example.com' },
|
||||
{ name: 'Bob', email: 'bob@example.com' }
|
||||
]);
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
For complete API documentation, see [JavaScript API Reference](../../../../docs/javascript-api-reference.md).
|
||||
|
||||
## Related Packages
|
||||
|
||||
* The [@tursodatabase/serverless](https://www.npmjs.com/package/@tursodatabase/serverless) package provides a serverless driver with the same API.
|
||||
* The [@tursodatabase/sync](https://www.npmjs.com/package/@tursodatabase/sync) package provides bidirectional sync between a local Turso database and Turso Cloud.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the [MIT license](../../LICENSE.md).
|
||||
|
||||
## Support
|
||||
|
||||
- [GitHub Issues](https://github.com/tursodatabase/turso/issues)
|
||||
- [Documentation](https://docs.turso.tech)
|
||||
- [Discord Community](https://tur.so/discord)
|
||||
67
bindings/javascript/packages/native/compat.test.ts
Normal file
67
bindings/javascript/packages/native/compat.test.ts
Normal file
@@ -0,0 +1,67 @@
|
||||
import { unlinkSync } from "node:fs";
|
||||
import { expect, test } from 'vitest'
|
||||
import { Database } from './compat.js'
|
||||
|
||||
test('in-memory db', () => {
|
||||
const db = new Database(":memory:");
|
||||
db.exec("CREATE TABLE t(x)");
|
||||
db.exec("INSERT INTO t VALUES (1), (2), (3)");
|
||||
const stmt = db.prepare("SELECT * FROM t WHERE x % 2 = ?");
|
||||
const rows = stmt.all([1]);
|
||||
expect(rows).toEqual([{ x: 1 }, { x: 3 }]);
|
||||
})
|
||||
|
||||
test('on-disk db', () => {
|
||||
const path = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
try {
|
||||
const db1 = new Database(path);
|
||||
db1.exec("CREATE TABLE t(x)");
|
||||
db1.exec("INSERT INTO t VALUES (1), (2), (3)");
|
||||
const stmt1 = db1.prepare("SELECT * FROM t WHERE x % 2 = ?");
|
||||
expect(stmt1.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]);
|
||||
const rows1 = stmt1.all([1]);
|
||||
expect(rows1).toEqual([{ x: 1 }, { x: 3 }]);
|
||||
db1.close();
|
||||
|
||||
const db2 = new Database(path);
|
||||
const stmt2 = db2.prepare("SELECT * FROM t WHERE x % 2 = ?");
|
||||
expect(stmt2.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]);
|
||||
const rows2 = stmt2.all([1]);
|
||||
expect(rows2).toEqual([{ x: 1 }, { x: 3 }]);
|
||||
db2.close();
|
||||
} finally {
|
||||
unlinkSync(path);
|
||||
unlinkSync(`${path}-wal`);
|
||||
}
|
||||
})
|
||||
|
||||
test('attach', () => {
|
||||
const path1 = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
const path2 = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
try {
|
||||
const db1 = new Database(path1);
|
||||
db1.exec("CREATE TABLE t(x)");
|
||||
db1.exec("INSERT INTO t VALUES (1), (2), (3)");
|
||||
const db2 = new Database(path2);
|
||||
db2.exec("CREATE TABLE q(x)");
|
||||
db2.exec("INSERT INTO q VALUES (4), (5), (6)");
|
||||
|
||||
db1.exec(`ATTACH '${path2}' as secondary`);
|
||||
|
||||
const stmt = db1.prepare("SELECT * FROM t UNION ALL SELECT * FROM secondary.q");
|
||||
expect(stmt.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]);
|
||||
const rows = stmt.all([1]);
|
||||
expect(rows).toEqual([{ x: 1 }, { x: 2 }, { x: 3 }, { x: 4 }, { x: 5 }, { x: 6 }]);
|
||||
} finally {
|
||||
unlinkSync(path1);
|
||||
unlinkSync(`${path1}-wal`);
|
||||
unlinkSync(path2);
|
||||
unlinkSync(`${path2}-wal`);
|
||||
}
|
||||
})
|
||||
|
||||
test('blobs', () => {
|
||||
const db = new Database(":memory:");
|
||||
const rows = db.prepare("SELECT x'1020' as x").all();
|
||||
expect(rows).toEqual([{ x: Buffer.from([16, 32]) }])
|
||||
})
|
||||
10
bindings/javascript/packages/native/compat.ts
Normal file
10
bindings/javascript/packages/native/compat.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import { DatabaseCompat, NativeDatabase, SqliteError, DatabaseOpts } from "@tursodatabase/database-common"
|
||||
import { Database as NativeDB } from "#index";
|
||||
|
||||
class Database extends DatabaseCompat {
|
||||
constructor(path: string, opts: DatabaseOpts = {}) {
|
||||
super(new NativeDB(path, { tracing: opts.tracing }) as unknown as NativeDatabase, opts)
|
||||
}
|
||||
}
|
||||
|
||||
export { Database, SqliteError }
|
||||
@@ -8,13 +8,13 @@ export declare class Database {
|
||||
* # Arguments
|
||||
* * `path` - The path to the database file.
|
||||
*/
|
||||
constructor(path: string)
|
||||
constructor(path: string, opts?: DatabaseOpts | undefined | null)
|
||||
/** Returns whether the database is in memory-only mode. */
|
||||
get memory(): boolean
|
||||
/** Returns whether the database connection is open. */
|
||||
get open(): boolean
|
||||
/**
|
||||
* Executes a batch of SQL statements.
|
||||
* Executes a batch of SQL statements on main thread
|
||||
*
|
||||
* # Arguments
|
||||
*
|
||||
@@ -22,7 +22,17 @@ export declare class Database {
|
||||
*
|
||||
* # Returns
|
||||
*/
|
||||
batch(sql: string): void
|
||||
batchSync(sql: string): void
|
||||
/**
|
||||
* Executes a batch of SQL statements outside of main thread
|
||||
*
|
||||
* # Arguments
|
||||
*
|
||||
* * `sql` - The SQL statements to execute.
|
||||
*
|
||||
* # Returns
|
||||
*/
|
||||
batchAsync(sql: string): Promise<unknown>
|
||||
/**
|
||||
* Prepares a statement for execution.
|
||||
*
|
||||
@@ -105,10 +115,15 @@ export declare class Statement {
|
||||
*/
|
||||
bindAt(index: number, value: unknown): void
|
||||
/**
|
||||
* Step the statement and return result code:
|
||||
* Step the statement and return result code (executed on the main thread):
|
||||
* 1 = Row available, 2 = Done, 3 = I/O needed
|
||||
*/
|
||||
step(): number
|
||||
stepSync(): number
|
||||
/**
|
||||
* Step the statement and return result code (executed on the background thread):
|
||||
* 1 = Row available, 2 = Done, 3 = I/O needed
|
||||
*/
|
||||
stepAsync(): Promise<unknown>
|
||||
/** Get the current row data according to the presentation mode */
|
||||
row(): unknown
|
||||
/** Sets the presentation mode to raw. */
|
||||
@@ -128,3 +143,7 @@ export declare class Statement {
|
||||
/** Finalizes the statement. */
|
||||
finalize(): void
|
||||
}
|
||||
|
||||
export interface DatabaseOpts {
|
||||
tracing?: string
|
||||
}
|
||||
513
bindings/javascript/packages/native/index.js
Normal file
513
bindings/javascript/packages/native/index.js
Normal file
@@ -0,0 +1,513 @@
|
||||
// prettier-ignore
|
||||
/* eslint-disable */
|
||||
// @ts-nocheck
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
import { createRequire } from 'node:module'
|
||||
const require = createRequire(import.meta.url)
|
||||
const __dirname = new URL('.', import.meta.url).pathname
|
||||
|
||||
const { readFileSync } = require('node:fs')
|
||||
let nativeBinding = null
|
||||
const loadErrors = []
|
||||
|
||||
const isMusl = () => {
|
||||
let musl = false
|
||||
if (process.platform === 'linux') {
|
||||
musl = isMuslFromFilesystem()
|
||||
if (musl === null) {
|
||||
musl = isMuslFromReport()
|
||||
}
|
||||
if (musl === null) {
|
||||
musl = isMuslFromChildProcess()
|
||||
}
|
||||
}
|
||||
return musl
|
||||
}
|
||||
|
||||
const isFileMusl = (f) => f.includes('libc.musl-') || f.includes('ld-musl-')
|
||||
|
||||
const isMuslFromFilesystem = () => {
|
||||
try {
|
||||
return readFileSync('/usr/bin/ldd', 'utf-8').includes('musl')
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
const isMuslFromReport = () => {
|
||||
let report = null
|
||||
if (typeof process.report?.getReport === 'function') {
|
||||
process.report.excludeNetwork = true
|
||||
report = process.report.getReport()
|
||||
}
|
||||
if (!report) {
|
||||
return null
|
||||
}
|
||||
if (report.header && report.header.glibcVersionRuntime) {
|
||||
return false
|
||||
}
|
||||
if (Array.isArray(report.sharedObjects)) {
|
||||
if (report.sharedObjects.some(isFileMusl)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const isMuslFromChildProcess = () => {
|
||||
try {
|
||||
return require('child_process').execSync('ldd --version', { encoding: 'utf8' }).includes('musl')
|
||||
} catch (e) {
|
||||
// If we reach this case, we don't know if the system is musl or not, so is better to just fallback to false
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
function requireNative() {
|
||||
if (process.env.NAPI_RS_NATIVE_LIBRARY_PATH) {
|
||||
try {
|
||||
nativeBinding = require(process.env.NAPI_RS_NATIVE_LIBRARY_PATH);
|
||||
} catch (err) {
|
||||
loadErrors.push(err)
|
||||
}
|
||||
} else if (process.platform === 'android') {
|
||||
if (process.arch === 'arm64') {
|
||||
try {
|
||||
return require('./turso.android-arm64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-android-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-android-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'arm') {
|
||||
try {
|
||||
return require('./turso.android-arm-eabi.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-android-arm-eabi')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-android-arm-eabi/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on Android ${process.arch}`))
|
||||
}
|
||||
} else if (process.platform === 'win32') {
|
||||
if (process.arch === 'x64') {
|
||||
try {
|
||||
return require('./turso.win32-x64-msvc.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-win32-x64-msvc')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-win32-x64-msvc/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'ia32') {
|
||||
try {
|
||||
return require('./turso.win32-ia32-msvc.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-win32-ia32-msvc')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-win32-ia32-msvc/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'arm64') {
|
||||
try {
|
||||
return require('./turso.win32-arm64-msvc.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-win32-arm64-msvc')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-win32-arm64-msvc/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on Windows: ${process.arch}`))
|
||||
}
|
||||
} else if (process.platform === 'darwin') {
|
||||
try {
|
||||
return require('./turso.darwin-universal.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-darwin-universal')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-darwin-universal/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
if (process.arch === 'x64') {
|
||||
try {
|
||||
return require('./turso.darwin-x64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-darwin-x64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-darwin-x64/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'arm64') {
|
||||
try {
|
||||
return require('./turso.darwin-arm64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-darwin-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-darwin-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on macOS: ${process.arch}`))
|
||||
}
|
||||
} else if (process.platform === 'freebsd') {
|
||||
if (process.arch === 'x64') {
|
||||
try {
|
||||
return require('./turso.freebsd-x64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-freebsd-x64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-freebsd-x64/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'arm64') {
|
||||
try {
|
||||
return require('./turso.freebsd-arm64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-freebsd-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-freebsd-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on FreeBSD: ${process.arch}`))
|
||||
}
|
||||
} else if (process.platform === 'linux') {
|
||||
if (process.arch === 'x64') {
|
||||
if (isMusl()) {
|
||||
try {
|
||||
return require('./turso.linux-x64-musl.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-x64-musl')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-x64-musl/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
return require('./turso.linux-x64-gnu.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-x64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-x64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
}
|
||||
} else if (process.arch === 'arm64') {
|
||||
if (isMusl()) {
|
||||
try {
|
||||
return require('./turso.linux-arm64-musl.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-arm64-musl')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-arm64-musl/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
return require('./turso.linux-arm64-gnu.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-arm64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-arm64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
}
|
||||
} else if (process.arch === 'arm') {
|
||||
if (isMusl()) {
|
||||
try {
|
||||
return require('./turso.linux-arm-musleabihf.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-arm-musleabihf')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-arm-musleabihf/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
return require('./turso.linux-arm-gnueabihf.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-arm-gnueabihf')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-arm-gnueabihf/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
}
|
||||
} else if (process.arch === 'riscv64') {
|
||||
if (isMusl()) {
|
||||
try {
|
||||
return require('./turso.linux-riscv64-musl.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-riscv64-musl')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-riscv64-musl/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
return require('./turso.linux-riscv64-gnu.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-riscv64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-riscv64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
}
|
||||
} else if (process.arch === 'ppc64') {
|
||||
try {
|
||||
return require('./turso.linux-ppc64-gnu.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-ppc64-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-ppc64-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 's390x') {
|
||||
try {
|
||||
return require('./turso.linux-s390x-gnu.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-linux-s390x-gnu')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-linux-s390x-gnu/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on Linux: ${process.arch}`))
|
||||
}
|
||||
} else if (process.platform === 'openharmony') {
|
||||
if (process.arch === 'arm64') {
|
||||
try {
|
||||
return require('./turso.openharmony-arm64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-openharmony-arm64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-openharmony-arm64/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'x64') {
|
||||
try {
|
||||
return require('./turso.openharmony-x64.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-openharmony-x64')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-openharmony-x64/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else if (process.arch === 'arm') {
|
||||
try {
|
||||
return require('./turso.openharmony-arm.node')
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
try {
|
||||
const binding = require('@tursodatabase/database-openharmony-arm')
|
||||
const bindingPackageVersion = require('@tursodatabase/database-openharmony-arm/package.json').version
|
||||
if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') {
|
||||
throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`)
|
||||
}
|
||||
return binding
|
||||
} catch (e) {
|
||||
loadErrors.push(e)
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported architecture on OpenHarmony: ${process.arch}`))
|
||||
}
|
||||
} else {
|
||||
loadErrors.push(new Error(`Unsupported OS: ${process.platform}, architecture: ${process.arch}`))
|
||||
}
|
||||
}
|
||||
|
||||
nativeBinding = requireNative()
|
||||
|
||||
if (!nativeBinding || process.env.NAPI_RS_FORCE_WASI) {
|
||||
try {
|
||||
nativeBinding = require('./turso.wasi.cjs')
|
||||
} catch (err) {
|
||||
if (process.env.NAPI_RS_FORCE_WASI) {
|
||||
loadErrors.push(err)
|
||||
}
|
||||
}
|
||||
if (!nativeBinding) {
|
||||
try {
|
||||
nativeBinding = require('@tursodatabase/database-wasm32-wasi')
|
||||
} catch (err) {
|
||||
if (process.env.NAPI_RS_FORCE_WASI) {
|
||||
loadErrors.push(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!nativeBinding) {
|
||||
if (loadErrors.length > 0) {
|
||||
throw new Error(
|
||||
`Cannot find native binding. ` +
|
||||
`npm has a bug related to optional dependencies (https://github.com/npm/cli/issues/4828). ` +
|
||||
'Please try `npm i` again after removing both package-lock.json and node_modules directory.',
|
||||
{ cause: loadErrors }
|
||||
)
|
||||
}
|
||||
throw new Error(`Failed to load native binding`)
|
||||
}
|
||||
|
||||
const { Database, Statement } = nativeBinding
|
||||
export { Database }
|
||||
export { Statement }
|
||||
52
bindings/javascript/packages/native/package.json
Normal file
52
bindings/javascript/packages/native/package.json
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"name": "@tursodatabase/database",
|
||||
"version": "0.1.5-pre.3",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
},
|
||||
"license": "MIT",
|
||||
"module": "./dist/promise.js",
|
||||
"main": "./dist/promise.js",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": "./dist/promise.js",
|
||||
"./compat": "./dist/compat.js"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"dist/**",
|
||||
"README.md"
|
||||
],
|
||||
"packageManager": "yarn@4.9.2",
|
||||
"devDependencies": {
|
||||
"@napi-rs/cli": "^3.1.5",
|
||||
"@types/node": "^24.3.1",
|
||||
"typescript": "^5.9.2",
|
||||
"vitest": "^3.2.4"
|
||||
},
|
||||
"scripts": {
|
||||
"napi-build": "napi build --platform --release --esm --manifest-path ../../Cargo.toml --output-dir .",
|
||||
"napi-dirs": "napi create-npm-dirs",
|
||||
"napi-artifacts": "napi artifacts --output-dir .",
|
||||
"tsc-build": "npm exec tsc",
|
||||
"build": "npm run napi-build && npm run tsc-build",
|
||||
"test": "vitest --run",
|
||||
"prepublishOnly": "npm run napi-dirs && npm run napi-artifacts && napi prepublish -t npm"
|
||||
},
|
||||
"napi": {
|
||||
"binaryName": "turso",
|
||||
"targets": [
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-pc-windows-msvc",
|
||||
"universal-apple-darwin",
|
||||
"aarch64-unknown-linux-gnu"
|
||||
]
|
||||
},
|
||||
"dependencies": {
|
||||
"@tursodatabase/database-common": "^0.1.5-pre.3"
|
||||
},
|
||||
"imports": {
|
||||
"#index": "./index.js"
|
||||
}
|
||||
}
|
||||
107
bindings/javascript/packages/native/promise.test.ts
Normal file
107
bindings/javascript/packages/native/promise.test.ts
Normal file
@@ -0,0 +1,107 @@
|
||||
import { unlinkSync } from "node:fs";
|
||||
import { expect, test } from 'vitest'
|
||||
import { connect } from './promise.js'
|
||||
|
||||
test('in-memory db', async () => {
|
||||
const db = await connect(":memory:");
|
||||
await db.exec("CREATE TABLE t(x)");
|
||||
await db.exec("INSERT INTO t VALUES (1), (2), (3)");
|
||||
const stmt = db.prepare("SELECT * FROM t WHERE x % 2 = ?");
|
||||
const rows = await stmt.all([1]);
|
||||
expect(rows).toEqual([{ x: 1 }, { x: 3 }]);
|
||||
})
|
||||
|
||||
test('on-disk db', async () => {
|
||||
const path = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
try {
|
||||
const db1 = await connect(path);
|
||||
await db1.exec("CREATE TABLE t(x)");
|
||||
await db1.exec("INSERT INTO t VALUES (1), (2), (3)");
|
||||
const stmt1 = db1.prepare("SELECT * FROM t WHERE x % 2 = ?");
|
||||
expect(stmt1.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]);
|
||||
const rows1 = await stmt1.all([1]);
|
||||
expect(rows1).toEqual([{ x: 1 }, { x: 3 }]);
|
||||
db1.close();
|
||||
|
||||
const db2 = await connect(path);
|
||||
const stmt2 = db2.prepare("SELECT * FROM t WHERE x % 2 = ?");
|
||||
expect(stmt2.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]);
|
||||
const rows2 = await stmt2.all([1]);
|
||||
expect(rows2).toEqual([{ x: 1 }, { x: 3 }]);
|
||||
db2.close();
|
||||
} finally {
|
||||
unlinkSync(path);
|
||||
unlinkSync(`${path}-wal`);
|
||||
}
|
||||
})
|
||||
|
||||
test('attach', async () => {
|
||||
const path1 = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
const path2 = `test-${(Math.random() * 10000) | 0}.db`;
|
||||
try {
|
||||
const db1 = await connect(path1);
|
||||
await db1.exec("CREATE TABLE t(x)");
|
||||
await db1.exec("INSERT INTO t VALUES (1), (2), (3)");
|
||||
const db2 = await connect(path2);
|
||||
await db2.exec("CREATE TABLE q(x)");
|
||||
await db2.exec("INSERT INTO q VALUES (4), (5), (6)");
|
||||
|
||||
await db1.exec(`ATTACH '${path2}' as secondary`);
|
||||
|
||||
const stmt = db1.prepare("SELECT * FROM t UNION ALL SELECT * FROM secondary.q");
|
||||
expect(stmt.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]);
|
||||
const rows = await stmt.all([1]);
|
||||
expect(rows).toEqual([{ x: 1 }, { x: 2 }, { x: 3 }, { x: 4 }, { x: 5 }, { x: 6 }]);
|
||||
} finally {
|
||||
unlinkSync(path1);
|
||||
unlinkSync(`${path1}-wal`);
|
||||
unlinkSync(path2);
|
||||
unlinkSync(`${path2}-wal`);
|
||||
}
|
||||
})
|
||||
|
||||
test('blobs', async () => {
|
||||
const db = await connect(":memory:");
|
||||
const rows = await db.prepare("SELECT x'1020' as x").all();
|
||||
expect(rows).toEqual([{ x: Buffer.from([16, 32]) }])
|
||||
})
|
||||
|
||||
|
||||
test('example-1', async () => {
|
||||
const db = await connect(':memory:');
|
||||
await db.exec('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)');
|
||||
|
||||
const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
|
||||
await insert.run('Alice', 'alice@example.com');
|
||||
await insert.run('Bob', 'bob@example.com');
|
||||
|
||||
const users = await db.prepare('SELECT * FROM users').all();
|
||||
expect(users).toEqual([
|
||||
{ id: 1, name: 'Alice', email: 'alice@example.com' },
|
||||
{ id: 2, name: 'Bob', email: 'bob@example.com' }
|
||||
]);
|
||||
})
|
||||
|
||||
test('example-2', async () => {
|
||||
const db = await connect(':memory:');
|
||||
await db.exec('CREATE TABLE users (name, email)');
|
||||
// Using transactions for atomic operations
|
||||
const transaction = db.transaction(async (users) => {
|
||||
const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
|
||||
for (const user of users) {
|
||||
await insert.run(user.name, user.email);
|
||||
}
|
||||
});
|
||||
|
||||
// Execute transaction
|
||||
await transaction([
|
||||
{ name: 'Alice', email: 'alice@example.com' },
|
||||
{ name: 'Bob', email: 'bob@example.com' }
|
||||
]);
|
||||
|
||||
const rows = await db.prepare('SELECT * FROM users').all();
|
||||
expect(rows).toEqual([
|
||||
{ name: 'Alice', email: 'alice@example.com' },
|
||||
{ name: 'Bob', email: 'bob@example.com' }
|
||||
]);
|
||||
})
|
||||
21
bindings/javascript/packages/native/promise.ts
Normal file
21
bindings/javascript/packages/native/promise.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import { DatabasePromise, NativeDatabase, SqliteError, DatabaseOpts } from "@tursodatabase/database-common"
|
||||
import { Database as NativeDB } from "#index";
|
||||
|
||||
class Database extends DatabasePromise {
|
||||
constructor(path: string, opts: DatabaseOpts = {}) {
|
||||
super(new NativeDB(path, { tracing: opts.tracing }) as unknown as NativeDatabase, opts)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new database connection asynchronously.
|
||||
*
|
||||
* @param {string} path - Path to the database file.
|
||||
* @param {Object} opts - Options for database behavior.
|
||||
* @returns {Promise<Database>} - A promise that resolves to a Database instance.
|
||||
*/
|
||||
async function connect(path: string, opts: any = {}): Promise<Database> {
|
||||
return new Database(path, opts);
|
||||
}
|
||||
|
||||
export { connect, Database, SqliteError }
|
||||
21
bindings/javascript/packages/native/tsconfig.json
Normal file
21
bindings/javascript/packages/native/tsconfig.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"skipLibCheck": true,
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"module": "nodenext",
|
||||
"target": "esnext",
|
||||
"outDir": "dist/",
|
||||
"lib": [
|
||||
"es2020"
|
||||
],
|
||||
"paths": {
|
||||
"#index": [
|
||||
"./index.js"
|
||||
]
|
||||
}
|
||||
},
|
||||
"include": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
28
bindings/javascript/perf/package-lock.json
generated
28
bindings/javascript/perf/package-lock.json
generated
@@ -6,28 +6,34 @@
|
||||
"": {
|
||||
"name": "turso-perf",
|
||||
"dependencies": {
|
||||
"@tursodatabase/database": "..",
|
||||
"@tursodatabase/database": "../packages/native",
|
||||
"better-sqlite3": "^9.5.0",
|
||||
"mitata": "^0.1.11"
|
||||
}
|
||||
},
|
||||
"..": {
|
||||
"workspaces": [
|
||||
"packages/core",
|
||||
"packages/native",
|
||||
"packages/browser"
|
||||
]
|
||||
},
|
||||
"../packages/native": {
|
||||
"name": "@tursodatabase/database",
|
||||
"version": "0.1.4-pre.4",
|
||||
"version": "0.1.5-pre.3",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@napi-rs/cli": "^3.0.4",
|
||||
"@napi-rs/wasm-runtime": "^1.0.1",
|
||||
"ava": "^6.0.1",
|
||||
"better-sqlite3": "^11.9.1",
|
||||
"typescript": "^5.9.2"
|
||||
"dependencies": {
|
||||
"@tursodatabase/database-common": "^0.1.5-pre.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
"devDependencies": {
|
||||
"@napi-rs/cli": "^3.1.5",
|
||||
"@types/node": "^24.3.1",
|
||||
"typescript": "^5.9.2",
|
||||
"vitest": "^3.2.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@tursodatabase/database": {
|
||||
"resolved": "..",
|
||||
"resolved": "../packages/native",
|
||||
"link": true
|
||||
},
|
||||
"node_modules/base64-js": {
|
||||
|
||||
@@ -2,9 +2,10 @@
|
||||
"name": "turso-perf",
|
||||
"type": "module",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"dependencies": {
|
||||
"better-sqlite3": "^9.5.0",
|
||||
"@tursodatabase/database": "..",
|
||||
"@tursodatabase/database": "../packages/native",
|
||||
"mitata": "^0.1.11"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { run, bench, group, baseline } from 'mitata';
|
||||
|
||||
import Database from '@tursodatabase/database';
|
||||
import { Database } from '@tursodatabase/database/compat';
|
||||
|
||||
const db = new Database(':memory:');
|
||||
|
||||
|
||||
254
bindings/javascript/src/browser.rs
Normal file
254
bindings/javascript/src/browser.rs
Normal file
@@ -0,0 +1,254 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
use turso_core::{storage::database::DatabaseFile, Clock, File, Instant, IO};
|
||||
|
||||
use crate::{init_tracing, is_memory, Database, DatabaseOpts};
|
||||
|
||||
pub struct NoopTask;
|
||||
|
||||
impl Task for NoopTask {
|
||||
type Output = ();
|
||||
type JsValue = ();
|
||||
fn compute(&mut self) -> Result<Self::Output> {
|
||||
Ok(())
|
||||
}
|
||||
fn resolve(&mut self, _: Env, _: Self::Output) -> Result<Self::JsValue> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
/// turso-db in the the browser requires explicit thread pool initialization
|
||||
/// so, we just put no-op task on the thread pool and force emnapi to allocate web worker
|
||||
pub fn init_thread_pool() -> napi::Result<AsyncTask<NoopTask>> {
|
||||
Ok(AsyncTask::new(NoopTask))
|
||||
}
|
||||
|
||||
pub struct ConnectTask {
|
||||
path: String,
|
||||
is_memory: bool,
|
||||
io: Arc<dyn turso_core::IO>,
|
||||
}
|
||||
|
||||
pub struct ConnectResult {
|
||||
db: Arc<turso_core::Database>,
|
||||
conn: Arc<turso_core::Connection>,
|
||||
}
|
||||
|
||||
unsafe impl Send for ConnectResult {}
|
||||
|
||||
impl Task for ConnectTask {
|
||||
type Output = ConnectResult;
|
||||
type JsValue = Database;
|
||||
|
||||
fn compute(&mut self) -> Result<Self::Output> {
|
||||
let file = self
|
||||
.io
|
||||
.open_file(&self.path, turso_core::OpenFlags::Create, false)
|
||||
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to open file: {e}")))?;
|
||||
|
||||
let db_file = Arc::new(DatabaseFile::new(file));
|
||||
let db = turso_core::Database::open(self.io.clone(), &self.path, db_file, false, true)
|
||||
.map_err(|e| {
|
||||
Error::new(
|
||||
Status::GenericFailure,
|
||||
format!("Failed to open database: {e}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
let conn = db
|
||||
.connect()
|
||||
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to connect: {e}")))?;
|
||||
|
||||
Ok(ConnectResult { db, conn })
|
||||
}
|
||||
|
||||
fn resolve(&mut self, _: Env, result: Self::Output) -> Result<Self::JsValue> {
|
||||
Ok(Database::create(
|
||||
Some(result.db),
|
||||
self.io.clone(),
|
||||
result.conn,
|
||||
self.is_memory,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
// we offload connect to the web-worker because:
|
||||
// 1. browser main-thread do not support Atomic.wait operations
|
||||
// 2. turso-db use blocking IO [io.wait_for_completion(c)] in few places during initialization path
|
||||
//
|
||||
// so, we offload connect to the worker thread
|
||||
pub fn connect(path: String, opts: Option<DatabaseOpts>) -> Result<AsyncTask<ConnectTask>> {
|
||||
if let Some(opts) = opts {
|
||||
init_tracing(opts.tracing);
|
||||
}
|
||||
let task = if is_memory(&path) {
|
||||
ConnectTask {
|
||||
io: Arc::new(turso_core::MemoryIO::new()),
|
||||
is_memory: true,
|
||||
path,
|
||||
}
|
||||
} else {
|
||||
let io = Arc::new(Opfs::new()?);
|
||||
ConnectTask {
|
||||
io,
|
||||
is_memory: false,
|
||||
path,
|
||||
}
|
||||
};
|
||||
Ok(AsyncTask::new(task))
|
||||
}
|
||||
#[napi]
|
||||
#[derive(Clone)]
|
||||
pub struct Opfs;
|
||||
|
||||
#[napi]
|
||||
#[derive(Clone)]
|
||||
struct OpfsFile {
|
||||
handle: i32,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl Opfs {
|
||||
#[napi(constructor)]
|
||||
pub fn new() -> napi::Result<Self> {
|
||||
Ok(Self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clock for Opfs {
|
||||
fn now(&self) -> Instant {
|
||||
Instant { secs: 0, micros: 0 } // TODO
|
||||
}
|
||||
}
|
||||
|
||||
#[link(wasm_import_module = "env")]
|
||||
extern "C" {
|
||||
fn lookup_file(path: *const u8, path_len: usize) -> i32;
|
||||
fn read(handle: i32, buffer: *mut u8, buffer_len: usize, offset: i32) -> i32;
|
||||
fn write(handle: i32, buffer: *const u8, buffer_len: usize, offset: i32) -> i32;
|
||||
fn sync(handle: i32) -> i32;
|
||||
fn truncate(handle: i32, length: usize) -> i32;
|
||||
fn size(handle: i32) -> i32;
|
||||
fn is_web_worker() -> bool;
|
||||
}
|
||||
|
||||
fn is_web_worker_safe() -> bool {
|
||||
unsafe { is_web_worker() }
|
||||
}
|
||||
|
||||
impl IO for Opfs {
|
||||
fn open_file(
|
||||
&self,
|
||||
path: &str,
|
||||
_: turso_core::OpenFlags,
|
||||
_: bool,
|
||||
) -> turso_core::Result<std::sync::Arc<dyn turso_core::File>> {
|
||||
tracing::info!("open_file: {}", path);
|
||||
let result = unsafe { lookup_file(path.as_ptr(), path.len()) };
|
||||
if result >= 0 {
|
||||
Ok(Arc::new(OpfsFile { handle: result }))
|
||||
} else if result == -404 {
|
||||
Err(turso_core::LimboError::InternalError(
|
||||
"files must be created in advance for OPFS IO".to_string(),
|
||||
))
|
||||
} else {
|
||||
Err(turso_core::LimboError::InternalError(format!(
|
||||
"unexpected file lookup error: {result}"
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_file(&self, _: &str) -> turso_core::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl File for OpfsFile {
|
||||
fn lock_file(&self, _: bool) -> turso_core::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unlock_file(&self) -> turso_core::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn pread(
|
||||
&self,
|
||||
pos: u64,
|
||||
c: turso_core::Completion,
|
||||
) -> turso_core::Result<turso_core::Completion> {
|
||||
assert!(
|
||||
is_web_worker_safe(),
|
||||
"opfs must be used only from web worker for now"
|
||||
);
|
||||
tracing::debug!("pread({}): pos={}", self.handle, pos);
|
||||
let handle = self.handle;
|
||||
let read_c = c.as_read();
|
||||
let buffer = read_c.buf_arc();
|
||||
let buffer = buffer.as_mut_slice();
|
||||
let result = unsafe { read(handle, buffer.as_mut_ptr(), buffer.len(), pos as i32) };
|
||||
c.complete(result as i32);
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
fn pwrite(
|
||||
&self,
|
||||
pos: u64,
|
||||
buffer: Arc<turso_core::Buffer>,
|
||||
c: turso_core::Completion,
|
||||
) -> turso_core::Result<turso_core::Completion> {
|
||||
assert!(
|
||||
is_web_worker_safe(),
|
||||
"opfs must be used only from web worker for now"
|
||||
);
|
||||
tracing::debug!("pwrite({}): pos={}", self.handle, pos);
|
||||
let handle = self.handle;
|
||||
let buffer = buffer.as_slice();
|
||||
let result = unsafe { write(handle, buffer.as_ptr(), buffer.len(), pos as i32) };
|
||||
c.complete(result as i32);
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
fn sync(&self, c: turso_core::Completion) -> turso_core::Result<turso_core::Completion> {
|
||||
assert!(
|
||||
is_web_worker_safe(),
|
||||
"opfs must be used only from web worker for now"
|
||||
);
|
||||
tracing::debug!("sync({})", self.handle);
|
||||
let handle = self.handle;
|
||||
let result = unsafe { sync(handle) };
|
||||
c.complete(result as i32);
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
fn truncate(
|
||||
&self,
|
||||
len: u64,
|
||||
c: turso_core::Completion,
|
||||
) -> turso_core::Result<turso_core::Completion> {
|
||||
assert!(
|
||||
is_web_worker_safe(),
|
||||
"opfs must be used only from web worker for now"
|
||||
);
|
||||
tracing::debug!("truncate({}): len={}", self.handle, len);
|
||||
let handle = self.handle;
|
||||
let result = unsafe { truncate(handle, len as usize) };
|
||||
c.complete(result as i32);
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
fn size(&self) -> turso_core::Result<u64> {
|
||||
assert!(
|
||||
is_web_worker_safe(),
|
||||
"size can be called only from web worker context"
|
||||
);
|
||||
tracing::debug!("size({})", self.handle);
|
||||
let handle = self.handle;
|
||||
let result = unsafe { size(handle) };
|
||||
Ok(result as u64)
|
||||
}
|
||||
}
|
||||
@@ -10,14 +10,20 @@
|
||||
//! - Iterating through query results
|
||||
//! - Managing the I/O event loop
|
||||
|
||||
#[cfg(feature = "browser")]
|
||||
pub mod browser;
|
||||
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi::{Env, Task};
|
||||
use napi_derive::napi;
|
||||
use std::sync::OnceLock;
|
||||
use std::{
|
||||
cell::{Cell, RefCell},
|
||||
num::NonZeroUsize,
|
||||
sync::Arc,
|
||||
};
|
||||
use tracing_subscriber::filter::LevelFilter;
|
||||
use tracing_subscriber::fmt::format::FmtSpan;
|
||||
|
||||
/// Step result constants
|
||||
const STEP_ROW: u32 = 1;
|
||||
@@ -38,12 +44,107 @@ enum PresentationMode {
|
||||
pub struct Database {
|
||||
_db: Option<Arc<turso_core::Database>>,
|
||||
io: Arc<dyn turso_core::IO>,
|
||||
conn: Arc<turso_core::Connection>,
|
||||
conn: Option<Arc<turso_core::Connection>>,
|
||||
is_memory: bool,
|
||||
is_open: Cell<bool>,
|
||||
default_safe_integers: Cell<bool>,
|
||||
}
|
||||
|
||||
pub(crate) fn is_memory(path: &str) -> bool {
|
||||
path == ":memory:"
|
||||
}
|
||||
|
||||
static TRACING_INIT: OnceLock<()> = OnceLock::new();
|
||||
pub(crate) fn init_tracing(level_filter: Option<String>) {
|
||||
let Some(level_filter) = level_filter else {
|
||||
return;
|
||||
};
|
||||
let level_filter = match level_filter.as_ref() {
|
||||
"info" => LevelFilter::INFO,
|
||||
"debug" => LevelFilter::DEBUG,
|
||||
"trace" => LevelFilter::TRACE,
|
||||
_ => return,
|
||||
};
|
||||
TRACING_INIT.get_or_init(|| {
|
||||
tracing_subscriber::fmt()
|
||||
.with_ansi(false)
|
||||
.with_thread_ids(true)
|
||||
.with_span_events(FmtSpan::ACTIVE)
|
||||
.with_max_level(level_filter)
|
||||
.init();
|
||||
});
|
||||
}
|
||||
|
||||
pub enum DbTask {
|
||||
Batch {
|
||||
conn: Arc<turso_core::Connection>,
|
||||
sql: String,
|
||||
},
|
||||
Step {
|
||||
stmt: Arc<RefCell<Option<turso_core::Statement>>>,
|
||||
},
|
||||
}
|
||||
|
||||
unsafe impl Send for DbTask {}
|
||||
|
||||
impl Task for DbTask {
|
||||
type Output = u32;
|
||||
type JsValue = u32;
|
||||
|
||||
fn compute(&mut self) -> Result<Self::Output> {
|
||||
match self {
|
||||
DbTask::Batch { conn, sql } => {
|
||||
batch_sync(conn, sql)?;
|
||||
Ok(0)
|
||||
}
|
||||
DbTask::Step { stmt } => step_sync(stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve(&mut self, _: Env, output: Self::Output) -> Result<Self::JsValue> {
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct DatabaseOpts {
|
||||
pub tracing: Option<String>,
|
||||
}
|
||||
|
||||
fn batch_sync(conn: &Arc<turso_core::Connection>, sql: &str) -> napi::Result<()> {
|
||||
conn.prepare_execute_batch(sql).map_err(|e| {
|
||||
Error::new(
|
||||
Status::GenericFailure,
|
||||
format!("Failed to execute batch: {e}"),
|
||||
)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn step_sync(stmt: &Arc<RefCell<Option<turso_core::Statement>>>) -> napi::Result<u32> {
|
||||
let mut stmt_ref = stmt.borrow_mut();
|
||||
let stmt = stmt_ref
|
||||
.as_mut()
|
||||
.ok_or_else(|| Error::new(Status::GenericFailure, "Statement has been finalized"))?;
|
||||
|
||||
match stmt.step() {
|
||||
Ok(turso_core::StepResult::Row) => Ok(STEP_ROW),
|
||||
Ok(turso_core::StepResult::IO) => Ok(STEP_IO),
|
||||
Ok(turso_core::StepResult::Done) => Ok(STEP_DONE),
|
||||
Ok(turso_core::StepResult::Interrupt) => Err(Error::new(
|
||||
Status::GenericFailure,
|
||||
"Statement was interrupted",
|
||||
)),
|
||||
Ok(turso_core::StepResult::Busy) => {
|
||||
Err(Error::new(Status::GenericFailure, "Database is busy"))
|
||||
}
|
||||
Err(e) => Err(Error::new(
|
||||
Status::GenericFailure,
|
||||
format!("Step failed: {e}"),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl Database {
|
||||
/// Creates a new database instance.
|
||||
@@ -51,9 +152,11 @@ impl Database {
|
||||
/// # Arguments
|
||||
/// * `path` - The path to the database file.
|
||||
#[napi(constructor)]
|
||||
pub fn new(path: String) -> Result<Self> {
|
||||
let is_memory = path == ":memory:";
|
||||
let io: Arc<dyn turso_core::IO> = if is_memory {
|
||||
pub fn new(path: String, opts: Option<DatabaseOpts>) -> Result<Self> {
|
||||
if let Some(opts) = opts {
|
||||
init_tracing(opts.tracing);
|
||||
}
|
||||
let io: Arc<dyn turso_core::IO> = if is_memory(&path) {
|
||||
Arc::new(turso_core::MemoryIO::new())
|
||||
} else {
|
||||
Arc::new(turso_core::PlatformIO::new().map_err(|e| {
|
||||
@@ -61,6 +164,11 @@ impl Database {
|
||||
})?)
|
||||
};
|
||||
|
||||
#[cfg(feature = "browser")]
|
||||
if !is_memory(&path) {
|
||||
return Err(Error::new(Status::GenericFailure, "sync constructor is not supported for FS-backed databases in the WASM. Use async connect(...) method instead".to_string()));
|
||||
}
|
||||
|
||||
let file = io
|
||||
.open_file(&path, turso_core::OpenFlags::Create, false)
|
||||
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to open file: {e}")))?;
|
||||
@@ -78,7 +186,7 @@ impl Database {
|
||||
.connect()
|
||||
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to connect: {e}")))?;
|
||||
|
||||
Ok(Self::create(Some(db), io, conn, is_memory))
|
||||
Ok(Self::create(Some(db), io, conn, is_memory(&path)))
|
||||
}
|
||||
|
||||
pub fn create(
|
||||
@@ -90,13 +198,23 @@ impl Database {
|
||||
Database {
|
||||
_db: db,
|
||||
io,
|
||||
conn,
|
||||
conn: Some(conn),
|
||||
is_memory,
|
||||
is_open: Cell::new(true),
|
||||
default_safe_integers: Cell::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn conn(&self) -> Result<Arc<turso_core::Connection>> {
|
||||
let Some(conn) = self.conn.as_ref() else {
|
||||
return Err(napi::Error::new(
|
||||
napi::Status::GenericFailure,
|
||||
"connection is not set",
|
||||
));
|
||||
};
|
||||
Ok(conn.clone())
|
||||
}
|
||||
|
||||
/// Returns whether the database is in memory-only mode.
|
||||
#[napi(getter)]
|
||||
pub fn memory(&self) -> bool {
|
||||
@@ -109,7 +227,7 @@ impl Database {
|
||||
self.is_open.get()
|
||||
}
|
||||
|
||||
/// Executes a batch of SQL statements.
|
||||
/// Executes a batch of SQL statements on main thread
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
@@ -117,14 +235,23 @@ impl Database {
|
||||
///
|
||||
/// # Returns
|
||||
#[napi]
|
||||
pub fn batch(&self, sql: String) -> Result<()> {
|
||||
self.conn.prepare_execute_batch(&sql).map_err(|e| {
|
||||
Error::new(
|
||||
Status::GenericFailure,
|
||||
format!("Failed to execute batch: {e}"),
|
||||
)
|
||||
})?;
|
||||
Ok(())
|
||||
pub fn batch_sync(&self, sql: String) -> Result<()> {
|
||||
batch_sync(&self.conn()?, &sql)
|
||||
}
|
||||
|
||||
/// Executes a batch of SQL statements outside of main thread
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `sql` - The SQL statements to execute.
|
||||
///
|
||||
/// # Returns
|
||||
#[napi]
|
||||
pub fn batch_async(&self, sql: String) -> Result<AsyncTask<DbTask>> {
|
||||
Ok(AsyncTask::new(DbTask::Batch {
|
||||
conn: self.conn()?.clone(),
|
||||
sql,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Prepares a statement for execution.
|
||||
@@ -139,14 +266,15 @@ impl Database {
|
||||
#[napi]
|
||||
pub fn prepare(&self, sql: String) -> Result<Statement> {
|
||||
let stmt = self
|
||||
.conn
|
||||
.conn()?
|
||||
.prepare(&sql)
|
||||
.map_err(|e| Error::new(Status::GenericFailure, format!("{e}")))?;
|
||||
let column_names: Vec<std::ffi::CString> = (0..stmt.num_columns())
|
||||
.map(|i| std::ffi::CString::new(stmt.get_column_name(i).to_string()).unwrap())
|
||||
.collect();
|
||||
Ok(Statement {
|
||||
stmt: RefCell::new(Some(stmt)),
|
||||
#[allow(clippy::arc_with_non_send_sync)]
|
||||
stmt: Arc::new(RefCell::new(Some(stmt))),
|
||||
column_names,
|
||||
mode: RefCell::new(PresentationMode::Expanded),
|
||||
safe_integers: Cell::new(self.default_safe_integers.get()),
|
||||
@@ -160,7 +288,7 @@ impl Database {
|
||||
/// The rowid of the last row inserted.
|
||||
#[napi]
|
||||
pub fn last_insert_rowid(&self) -> Result<i64> {
|
||||
Ok(self.conn.last_insert_rowid())
|
||||
Ok(self.conn()?.last_insert_rowid())
|
||||
}
|
||||
|
||||
/// Returns the number of changes made by the last statement.
|
||||
@@ -170,7 +298,7 @@ impl Database {
|
||||
/// The number of changes made by the last statement.
|
||||
#[napi]
|
||||
pub fn changes(&self) -> Result<i64> {
|
||||
Ok(self.conn.changes())
|
||||
Ok(self.conn()?.changes())
|
||||
}
|
||||
|
||||
/// Returns the total number of changes made by all statements.
|
||||
@@ -180,7 +308,7 @@ impl Database {
|
||||
/// The total number of changes made by all statements.
|
||||
#[napi]
|
||||
pub fn total_changes(&self) -> Result<i64> {
|
||||
Ok(self.conn.total_changes())
|
||||
Ok(self.conn()?.total_changes())
|
||||
}
|
||||
|
||||
/// Closes the database connection.
|
||||
@@ -189,9 +317,10 @@ impl Database {
|
||||
///
|
||||
/// `Ok(())` if the database is closed successfully.
|
||||
#[napi]
|
||||
pub fn close(&self) -> Result<()> {
|
||||
pub fn close(&mut self) -> Result<()> {
|
||||
self.is_open.set(false);
|
||||
// Database close is handled automatically when dropped
|
||||
let _ = self._db.take().unwrap();
|
||||
let _ = self.conn.take().unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -225,7 +354,7 @@ impl Database {
|
||||
/// A prepared statement.
|
||||
#[napi]
|
||||
pub struct Statement {
|
||||
stmt: RefCell<Option<turso_core::Statement>>,
|
||||
stmt: Arc<RefCell<Option<turso_core::Statement>>>,
|
||||
column_names: Vec<std::ffi::CString>,
|
||||
mode: RefCell<PresentationMode>,
|
||||
safe_integers: Cell<bool>,
|
||||
@@ -344,31 +473,20 @@ impl Statement {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Step the statement and return result code:
|
||||
/// Step the statement and return result code (executed on the main thread):
|
||||
/// 1 = Row available, 2 = Done, 3 = I/O needed
|
||||
#[napi]
|
||||
pub fn step(&self) -> Result<u32> {
|
||||
let mut stmt_ref = self.stmt.borrow_mut();
|
||||
let stmt = stmt_ref
|
||||
.as_mut()
|
||||
.ok_or_else(|| Error::new(Status::GenericFailure, "Statement has been finalized"))?;
|
||||
pub fn step_sync(&self) -> Result<u32> {
|
||||
step_sync(&self.stmt)
|
||||
}
|
||||
|
||||
match stmt.step() {
|
||||
Ok(turso_core::StepResult::Row) => Ok(STEP_ROW),
|
||||
Ok(turso_core::StepResult::Done) => Ok(STEP_DONE),
|
||||
Ok(turso_core::StepResult::IO) => Ok(STEP_IO),
|
||||
Ok(turso_core::StepResult::Interrupt) => Err(Error::new(
|
||||
Status::GenericFailure,
|
||||
"Statement was interrupted",
|
||||
)),
|
||||
Ok(turso_core::StepResult::Busy) => {
|
||||
Err(Error::new(Status::GenericFailure, "Database is busy"))
|
||||
}
|
||||
Err(e) => Err(Error::new(
|
||||
Status::GenericFailure,
|
||||
format!("Step failed: {e}"),
|
||||
)),
|
||||
}
|
||||
/// Step the statement and return result code (executed on the background thread):
|
||||
/// 1 = Row available, 2 = Done, 3 = I/O needed
|
||||
#[napi]
|
||||
pub fn step_async(&self) -> Result<AsyncTask<DbTask>> {
|
||||
Ok(AsyncTask::new(DbTask::Step {
|
||||
stmt: self.stmt.clone(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Get the current row data according to the presentation mode
|
||||
@@ -543,8 +661,17 @@ fn to_js_value<'a>(
|
||||
turso_core::Value::Float(f) => ToNapiValue::into_unknown(*f, env),
|
||||
turso_core::Value::Text(s) => ToNapiValue::into_unknown(s.as_str(), env),
|
||||
turso_core::Value::Blob(b) => {
|
||||
let buffer = Buffer::from(b.as_slice());
|
||||
ToNapiValue::into_unknown(buffer, env)
|
||||
#[cfg(not(feature = "browser"))]
|
||||
{
|
||||
let buffer = Buffer::from(b.as_slice());
|
||||
ToNapiValue::into_unknown(buffer, env)
|
||||
}
|
||||
// emnapi do not support Buffer
|
||||
#[cfg(feature = "browser")]
|
||||
{
|
||||
let buffer = Uint8Array::from(b.as_slice());
|
||||
ToNapiValue::into_unknown(buffer, env)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
/* eslint-disable */
|
||||
/* prettier-ignore */
|
||||
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
const __nodeFs = require('node:fs')
|
||||
const __nodePath = require('node:path')
|
||||
const { WASI: __nodeWASI } = require('node:wasi')
|
||||
const { Worker } = require('node:worker_threads')
|
||||
|
||||
const {
|
||||
createOnMessage: __wasmCreateOnMessageForFsProxy,
|
||||
getDefaultContext: __emnapiGetDefaultContext,
|
||||
instantiateNapiModuleSync: __emnapiInstantiateNapiModuleSync,
|
||||
} = require('@napi-rs/wasm-runtime')
|
||||
|
||||
const __rootDir = __nodePath.parse(process.cwd()).root
|
||||
|
||||
const __wasi = new __nodeWASI({
|
||||
version: 'preview1',
|
||||
env: process.env,
|
||||
preopens: {
|
||||
[__rootDir]: __rootDir,
|
||||
}
|
||||
})
|
||||
|
||||
const __emnapiContext = __emnapiGetDefaultContext()
|
||||
|
||||
const __sharedMemory = new WebAssembly.Memory({
|
||||
initial: 4000,
|
||||
maximum: 65536,
|
||||
shared: true,
|
||||
})
|
||||
|
||||
let __wasmFilePath = __nodePath.join(__dirname, 'turso.wasm32-wasi.wasm')
|
||||
const __wasmDebugFilePath = __nodePath.join(__dirname, 'turso.wasm32-wasi.debug.wasm')
|
||||
|
||||
if (__nodeFs.existsSync(__wasmDebugFilePath)) {
|
||||
__wasmFilePath = __wasmDebugFilePath
|
||||
} else if (!__nodeFs.existsSync(__wasmFilePath)) {
|
||||
try {
|
||||
__wasmFilePath = __nodePath.resolve('@tursodatabase/database-wasm32-wasi')
|
||||
} catch {
|
||||
throw new Error('Cannot find turso.wasm32-wasi.wasm file, and @tursodatabase/database-wasm32-wasi package is not installed.')
|
||||
}
|
||||
}
|
||||
|
||||
const { instance: __napiInstance, module: __wasiModule, napiModule: __napiModule } = __emnapiInstantiateNapiModuleSync(__nodeFs.readFileSync(__wasmFilePath), {
|
||||
context: __emnapiContext,
|
||||
asyncWorkPoolSize: (function() {
|
||||
const threadsSizeFromEnv = Number(process.env.NAPI_RS_ASYNC_WORK_POOL_SIZE ?? process.env.UV_THREADPOOL_SIZE)
|
||||
// NaN > 0 is false
|
||||
if (threadsSizeFromEnv > 0) {
|
||||
return threadsSizeFromEnv
|
||||
} else {
|
||||
return 4
|
||||
}
|
||||
})(),
|
||||
reuseWorker: true,
|
||||
wasi: __wasi,
|
||||
onCreateWorker() {
|
||||
const worker = new Worker(__nodePath.join(__dirname, 'wasi-worker.mjs'), {
|
||||
env: process.env,
|
||||
})
|
||||
worker.onmessage = ({ data }) => {
|
||||
__wasmCreateOnMessageForFsProxy(__nodeFs)(data)
|
||||
}
|
||||
|
||||
// The main thread of Node.js waits for all the active handles before exiting.
|
||||
// But Rust threads are never waited without `thread::join`.
|
||||
// So here we hack the code of Node.js to prevent the workers from being referenced (active).
|
||||
// According to https://github.com/nodejs/node/blob/19e0d472728c79d418b74bddff588bea70a403d0/lib/internal/worker.js#L415,
|
||||
// a worker is consist of two handles: kPublicPort and kHandle.
|
||||
{
|
||||
const kPublicPort = Object.getOwnPropertySymbols(worker).find(s =>
|
||||
s.toString().includes("kPublicPort")
|
||||
);
|
||||
if (kPublicPort) {
|
||||
worker[kPublicPort].ref = () => {};
|
||||
}
|
||||
|
||||
const kHandle = Object.getOwnPropertySymbols(worker).find(s =>
|
||||
s.toString().includes("kHandle")
|
||||
);
|
||||
if (kHandle) {
|
||||
worker[kHandle].ref = () => {};
|
||||
}
|
||||
|
||||
worker.unref();
|
||||
}
|
||||
return worker
|
||||
},
|
||||
overwriteImports(importObject) {
|
||||
importObject.env = {
|
||||
...importObject.env,
|
||||
...importObject.napi,
|
||||
...importObject.emnapi,
|
||||
memory: __sharedMemory,
|
||||
}
|
||||
return importObject
|
||||
},
|
||||
beforeInit({ instance }) {
|
||||
for (const name of Object.keys(instance.exports)) {
|
||||
if (name.startsWith('__napi_register__')) {
|
||||
instance.exports[name]()
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
module.exports = __napiModule.exports
|
||||
module.exports.Database = __napiModule.exports.Database
|
||||
module.exports.Statement = __napiModule.exports.Statement
|
||||
@@ -1,32 +0,0 @@
|
||||
import { instantiateNapiModuleSync, MessageHandler, WASI } from '@napi-rs/wasm-runtime'
|
||||
|
||||
const handler = new MessageHandler({
|
||||
onLoad({ wasmModule, wasmMemory }) {
|
||||
const wasi = new WASI({
|
||||
print: function () {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log.apply(console, arguments)
|
||||
},
|
||||
printErr: function() {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error.apply(console, arguments)
|
||||
},
|
||||
})
|
||||
return instantiateNapiModuleSync(wasmModule, {
|
||||
childThread: true,
|
||||
wasi,
|
||||
overwriteImports(importObject) {
|
||||
importObject.env = {
|
||||
...importObject.env,
|
||||
...importObject.napi,
|
||||
...importObject.emnapi,
|
||||
memory: wasmMemory,
|
||||
}
|
||||
},
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
globalThis.onmessage = function (e) {
|
||||
handler.handle(e)
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
import fs from "node:fs";
|
||||
import { createRequire } from "node:module";
|
||||
import { parse } from "node:path";
|
||||
import { WASI } from "node:wasi";
|
||||
import { parentPort, Worker } from "node:worker_threads";
|
||||
|
||||
const require = createRequire(import.meta.url);
|
||||
|
||||
const { instantiateNapiModuleSync, MessageHandler, getDefaultContext } = require("@napi-rs/wasm-runtime");
|
||||
|
||||
if (parentPort) {
|
||||
parentPort.on("message", (data) => {
|
||||
globalThis.onmessage({ data });
|
||||
});
|
||||
}
|
||||
|
||||
Object.assign(globalThis, {
|
||||
self: globalThis,
|
||||
require,
|
||||
Worker,
|
||||
importScripts: function (f) {
|
||||
;(0, eval)(fs.readFileSync(f, "utf8") + "//# sourceURL=" + f);
|
||||
},
|
||||
postMessage: function (msg) {
|
||||
if (parentPort) {
|
||||
parentPort.postMessage(msg);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
const emnapiContext = getDefaultContext();
|
||||
|
||||
const __rootDir = parse(process.cwd()).root;
|
||||
|
||||
const handler = new MessageHandler({
|
||||
onLoad({ wasmModule, wasmMemory }) {
|
||||
const wasi = new WASI({
|
||||
version: 'preview1',
|
||||
env: process.env,
|
||||
preopens: {
|
||||
[__rootDir]: __rootDir,
|
||||
},
|
||||
});
|
||||
|
||||
return instantiateNapiModuleSync(wasmModule, {
|
||||
childThread: true,
|
||||
wasi,
|
||||
context: emnapiContext,
|
||||
overwriteImports(importObject) {
|
||||
importObject.env = {
|
||||
...importObject.env,
|
||||
...importObject.napi,
|
||||
...importObject.emnapi,
|
||||
memory: wasmMemory
|
||||
};
|
||||
},
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
globalThis.onmessage = function (e) {
|
||||
handler.handle(e);
|
||||
};
|
||||
File diff suppressed because it is too large
Load Diff
@@ -82,6 +82,7 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
pub struct Builder {
|
||||
path: String,
|
||||
enable_mvcc: bool,
|
||||
vfs: Option<String>,
|
||||
}
|
||||
|
||||
impl Builder {
|
||||
@@ -90,6 +91,7 @@ impl Builder {
|
||||
Self {
|
||||
path: path.to_string(),
|
||||
enable_mvcc: false,
|
||||
vfs: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,25 +100,68 @@ impl Builder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_io(mut self, vfs: String) -> Self {
|
||||
self.vfs = Some(vfs);
|
||||
self
|
||||
}
|
||||
|
||||
/// Build the database.
|
||||
#[allow(unused_variables, clippy::arc_with_non_send_sync)]
|
||||
pub async fn build(self) -> Result<Database> {
|
||||
match self.path.as_str() {
|
||||
":memory:" => {
|
||||
let io: Arc<dyn turso_core::IO> = Arc::new(turso_core::MemoryIO::new());
|
||||
let db = turso_core::Database::open_file(
|
||||
io,
|
||||
self.path.as_str(),
|
||||
self.enable_mvcc,
|
||||
true,
|
||||
)?;
|
||||
Ok(Database { inner: db })
|
||||
let io = self.get_io()?;
|
||||
let db = turso_core::Database::open_file(io, self.path.as_str(), self.enable_mvcc, true)?;
|
||||
Ok(Database { inner: db })
|
||||
}
|
||||
|
||||
fn get_io(&self) -> Result<Arc<dyn turso_core::IO>> {
|
||||
let vfs_choice = self.vfs.as_deref().unwrap_or("");
|
||||
|
||||
if self.path == ":memory:" && vfs_choice.is_empty() {
|
||||
return Ok(Arc::new(turso_core::MemoryIO::new()));
|
||||
}
|
||||
|
||||
match vfs_choice {
|
||||
"memory" => Ok(Arc::new(turso_core::MemoryIO::new())),
|
||||
"syscall" => {
|
||||
#[cfg(target_family = "unix")]
|
||||
{
|
||||
Ok(Arc::new(
|
||||
turso_core::UnixIO::new()
|
||||
.map_err(|e| Error::SqlExecutionFailure(e.to_string()))?,
|
||||
))
|
||||
}
|
||||
#[cfg(not(target_family = "unix"))]
|
||||
{
|
||||
Ok(Arc::new(
|
||||
turso_core::PlatformIO::new()
|
||||
.map_err(|e| Error::SqlExecutionFailure(e.to_string()))?,
|
||||
))
|
||||
}
|
||||
}
|
||||
path => {
|
||||
let io: Arc<dyn turso_core::IO> = Arc::new(turso_core::PlatformIO::new()?);
|
||||
let db = turso_core::Database::open_file(io, path, self.enable_mvcc, true)?;
|
||||
Ok(Database { inner: db })
|
||||
#[cfg(target_os = "linux")]
|
||||
"io_uring" => Ok(Arc::new(
|
||||
turso_core::UringIO::new()
|
||||
.map_err(|e| Error::SqlExecutionFailure(e.to_string()))?,
|
||||
)),
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
"io_uring" => Err(Error::SqlExecutionFailure(
|
||||
"io_uring is only available on Linux targets".to_string(),
|
||||
)),
|
||||
"" => {
|
||||
// Default behavior: memory for ":memory:", platform IO for files
|
||||
if self.path == ":memory:" {
|
||||
Ok(Arc::new(turso_core::MemoryIO::new()))
|
||||
} else {
|
||||
Ok(Arc::new(
|
||||
turso_core::PlatformIO::new()
|
||||
.map_err(|e| Error::SqlExecutionFailure(e.to_string()))?,
|
||||
))
|
||||
}
|
||||
}
|
||||
_ => Ok(Arc::new(
|
||||
turso_core::PlatformIO::new()
|
||||
.map_err(|e| Error::SqlExecutionFailure(e.to_string()))?,
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
30
cli/app.rs
30
cli/app.rs
@@ -500,33 +500,7 @@ impl Limbo {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
if line.trim_start().starts_with("--") {
|
||||
if let Some(remaining) = line.split_once('\n') {
|
||||
let after_comment = remaining.1.trim();
|
||||
if !after_comment.is_empty() {
|
||||
if after_comment.ends_with(';') {
|
||||
self.run_query(after_comment);
|
||||
if self.opts.echo {
|
||||
let _ = self.writeln(after_comment);
|
||||
}
|
||||
let conn = self.conn.clone();
|
||||
let runner = conn.query_runner(after_comment.as_bytes());
|
||||
for output in runner {
|
||||
if let Err(e) = self.print_query_result(after_comment, output, None) {
|
||||
let _ = self.writeln(e.to_string());
|
||||
}
|
||||
}
|
||||
self.reset_input();
|
||||
return self.handle_input_line(after_comment);
|
||||
} else {
|
||||
self.set_multiline_prompt();
|
||||
let _ = self.reset_line(line);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.reset_line(line)?;
|
||||
if line.ends_with(';') {
|
||||
self.buffer_input(line);
|
||||
@@ -1400,7 +1374,7 @@ impl Limbo {
|
||||
// FIXME: we don't yet support PRAGMA foreign_keys=OFF internally,
|
||||
// so for now this hacky boolean that decides not to emit it when cloning
|
||||
if fk {
|
||||
writeln!(out, "PRAGMA foreign_keys=OFF")?;
|
||||
writeln!(out, "PRAGMA foreign_keys=OFF;")?;
|
||||
}
|
||||
writeln!(out, "BEGIN TRANSACTION;")?;
|
||||
// FIXME: At this point, SQLite executes the following:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1618
core/incremental/cursor.rs
Normal file
1618
core/incremental/cursor.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,19 +1,86 @@
|
||||
// Simplified DBSP integration for incremental view maintenance
|
||||
// For now, we'll use a basic approach and can expand to full DBSP later
|
||||
|
||||
use std::collections::HashMap;
|
||||
use super::hashable_row::HashableRow;
|
||||
use crate::Value;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
|
||||
type DeltaEntry = (HashableRow, isize);
|
||||
/// A delta represents ordered changes to data
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Delta {
|
||||
/// Ordered list of changes: (row, weight) where weight is +1 for insert, -1 for delete
|
||||
/// It is crucial that this is ordered. Imagine the case of an update, which becomes a delete +
|
||||
/// insert. If this is not ordered, it would be applied in arbitrary order and break the view.
|
||||
pub changes: Vec<DeltaEntry>,
|
||||
}
|
||||
|
||||
impl Delta {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
changes: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, row_key: i64, values: Vec<Value>) {
|
||||
let row = HashableRow::new(row_key, values);
|
||||
self.changes.push((row, 1));
|
||||
}
|
||||
|
||||
pub fn delete(&mut self, row_key: i64, values: Vec<Value>) {
|
||||
let row = HashableRow::new(row_key, values);
|
||||
self.changes.push((row, -1));
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.changes.is_empty()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.changes.len()
|
||||
}
|
||||
|
||||
/// Merge another delta into this one
|
||||
/// This preserves the order of operations - no consolidation is done
|
||||
/// to maintain the full history of changes
|
||||
pub fn merge(&mut self, other: &Delta) {
|
||||
// Simply append all changes from other, preserving order
|
||||
self.changes.extend(other.changes.iter().cloned());
|
||||
}
|
||||
|
||||
/// Consolidate changes by combining entries with the same HashableRow
|
||||
pub fn consolidate(&mut self) {
|
||||
if self.changes.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Use a HashMap to accumulate weights
|
||||
let mut consolidated: HashMap<HashableRow, isize> = HashMap::new();
|
||||
|
||||
for (row, weight) in self.changes.drain(..) {
|
||||
*consolidated.entry(row).or_insert(0) += weight;
|
||||
}
|
||||
|
||||
// Convert back to vec, filtering out zero weights
|
||||
self.changes = consolidated
|
||||
.into_iter()
|
||||
.filter(|(_, weight)| *weight != 0)
|
||||
.collect();
|
||||
}
|
||||
}
|
||||
|
||||
/// A simplified ZSet for incremental computation
|
||||
/// Each element has a weight: positive for additions, negative for deletions
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct SimpleZSet<T> {
|
||||
data: HashMap<T, isize>,
|
||||
data: BTreeMap<T, isize>,
|
||||
}
|
||||
|
||||
impl<T: std::hash::Hash + Eq + Clone> SimpleZSet<T> {
|
||||
#[allow(dead_code)]
|
||||
impl<T: std::hash::Hash + Eq + Ord + Clone> SimpleZSet<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
data: HashMap::new(),
|
||||
data: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,36 +112,121 @@ impl<T: std::hash::Hash + Eq + Clone> SimpleZSet<T> {
|
||||
self.insert(item.clone(), weight);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A simplified stream for incremental computation
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SimpleStream<T> {
|
||||
current: SimpleZSet<T>,
|
||||
}
|
||||
|
||||
impl<T: std::hash::Hash + Eq + Clone> SimpleStream<T> {
|
||||
pub fn from_zset(zset: SimpleZSet<T>) -> Self {
|
||||
Self { current: zset }
|
||||
/// Get the weight for a specific item (0 if not present)
|
||||
pub fn get(&self, item: &T) -> isize {
|
||||
self.data.get(item).copied().unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Apply a delta (change) to the stream
|
||||
pub fn apply_delta(&mut self, delta: &SimpleZSet<T>) {
|
||||
self.current.merge(delta);
|
||||
/// Get the first element (smallest key) in the Z-set
|
||||
pub fn first(&self) -> Option<(&T, isize)> {
|
||||
self.data.iter().next().map(|(k, &v)| (k, v))
|
||||
}
|
||||
|
||||
/// Get the current state as a vector of items (only positive weights)
|
||||
pub fn to_vec(&self) -> Vec<T> {
|
||||
self.current.to_vec()
|
||||
/// Get the last element (largest key) in the Z-set
|
||||
pub fn last(&self) -> Option<(&T, isize)> {
|
||||
self.data.iter().next_back().map(|(k, &v)| (k, v))
|
||||
}
|
||||
|
||||
/// Get a range of elements
|
||||
pub fn range<R>(&self, range: R) -> impl Iterator<Item = (&T, isize)> + '_
|
||||
where
|
||||
R: std::ops::RangeBounds<T>,
|
||||
{
|
||||
self.data.range(range).map(|(k, &v)| (k, v))
|
||||
}
|
||||
|
||||
/// Check if empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.data.is_empty()
|
||||
}
|
||||
|
||||
/// Get the number of elements
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
}
|
||||
|
||||
// Type aliases for convenience
|
||||
use super::hashable_row::HashableRow;
|
||||
|
||||
pub type RowKey = HashableRow;
|
||||
pub type RowKeyZSet = SimpleZSet<RowKey>;
|
||||
pub type RowKeyStream = SimpleStream<RowKey>;
|
||||
|
||||
impl RowKeyZSet {
|
||||
/// Create a Z-set from a Delta by consolidating all changes
|
||||
pub fn from_delta(delta: &Delta) -> Self {
|
||||
let mut zset = Self::new();
|
||||
|
||||
// Add all changes from the delta, consolidating as we go
|
||||
for (row, weight) in &delta.changes {
|
||||
zset.insert(row.clone(), *weight);
|
||||
}
|
||||
|
||||
zset
|
||||
}
|
||||
|
||||
/// Seek to find ALL entries for the best matching rowid
|
||||
/// For GT/GE: returns all entries for the smallest rowid that satisfies the condition
|
||||
/// For LT/LE: returns all entries for the largest rowid that satisfies the condition
|
||||
/// Returns empty vec if no match found
|
||||
pub fn seek(&self, target: i64, op: crate::types::SeekOp) -> Vec<(HashableRow, isize)> {
|
||||
use crate::types::SeekOp;
|
||||
|
||||
// First find the best matching rowid
|
||||
let best_rowid = match op {
|
||||
SeekOp::GT => {
|
||||
// Find smallest rowid > target
|
||||
self.data
|
||||
.iter()
|
||||
.filter(|(row, _)| row.rowid > target)
|
||||
.map(|(row, _)| row.rowid)
|
||||
.min()
|
||||
}
|
||||
SeekOp::GE { eq_only: false } => {
|
||||
// Find smallest rowid >= target
|
||||
self.data
|
||||
.iter()
|
||||
.filter(|(row, _)| row.rowid >= target)
|
||||
.map(|(row, _)| row.rowid)
|
||||
.min()
|
||||
}
|
||||
SeekOp::GE { eq_only: true } | SeekOp::LE { eq_only: true } => {
|
||||
// Need exact match
|
||||
if self.data.iter().any(|(row, _)| row.rowid == target) {
|
||||
Some(target)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
SeekOp::LT => {
|
||||
// Find largest rowid < target
|
||||
self.data
|
||||
.iter()
|
||||
.filter(|(row, _)| row.rowid < target)
|
||||
.map(|(row, _)| row.rowid)
|
||||
.max()
|
||||
}
|
||||
SeekOp::LE { eq_only: false } => {
|
||||
// Find largest rowid <= target
|
||||
self.data
|
||||
.iter()
|
||||
.filter(|(row, _)| row.rowid <= target)
|
||||
.map(|(row, _)| row.rowid)
|
||||
.max()
|
||||
}
|
||||
};
|
||||
|
||||
// Now get ALL entries with that rowid
|
||||
match best_rowid {
|
||||
Some(rowid) => self
|
||||
.data
|
||||
.iter()
|
||||
.filter(|(row, _)| row.rowid == rowid)
|
||||
.map(|(k, &v)| (k.clone(), v))
|
||||
.collect(),
|
||||
None => Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -78,3 +78,23 @@ impl Hash for HashableRow {
|
||||
self.cached_hash.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for HashableRow {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for HashableRow {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
// First compare by rowid, then by values if rowids are equal
|
||||
// This ensures Ord is consistent with Eq (which compares all fields)
|
||||
match self.rowid.cmp(&other.rowid) {
|
||||
std::cmp::Ordering::Equal => {
|
||||
// If rowids are equal, compare values to maintain consistency with Eq
|
||||
self.values.cmp(&other.values)
|
||||
}
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod compiler;
|
||||
pub mod cursor;
|
||||
pub mod dbsp;
|
||||
pub mod expr_compiler;
|
||||
pub mod hashable_row;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,13 +1,16 @@
|
||||
use super::compiler::{DbspCircuit, DbspCompiler, DeltaSet};
|
||||
use super::dbsp::{RowKeyStream, RowKeyZSet};
|
||||
use super::operator::{ComputationTracker, Delta, FilterPredicate};
|
||||
use super::dbsp::Delta;
|
||||
use super::operator::{ComputationTracker, FilterPredicate};
|
||||
use crate::schema::{BTreeTable, Column, Schema};
|
||||
use crate::storage::btree::BTreeCursor;
|
||||
use crate::translate::logical::LogicalPlanBuilder;
|
||||
use crate::types::{IOCompletions, IOResult, Value};
|
||||
use crate::types::{IOResult, Value};
|
||||
use crate::util::extract_view_columns;
|
||||
use crate::{io_yield_one, Completion, LimboError, Result, Statement};
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use crate::{return_if_io, LimboError, Pager, Result, Statement};
|
||||
use std::cell::RefCell;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::rc::Rc;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use turso_parser::ast;
|
||||
use turso_parser::{
|
||||
@@ -23,18 +26,26 @@ pub enum PopulateState {
|
||||
Processing {
|
||||
stmt: Box<Statement>,
|
||||
rows_processed: usize,
|
||||
/// If we're in the middle of processing a row (merge_delta returned I/O)
|
||||
pending_row: Option<(i64, Vec<Value>)>, // (rowid, values)
|
||||
},
|
||||
/// Population complete
|
||||
Done,
|
||||
}
|
||||
|
||||
/// State machine for merge_delta to handle I/O operations
|
||||
impl fmt::Debug for PopulateState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
PopulateState::Start => write!(f, "Start"),
|
||||
PopulateState::Processing { rows_processed, .. } => f
|
||||
PopulateState::Processing {
|
||||
rows_processed,
|
||||
pending_row,
|
||||
..
|
||||
} => f
|
||||
.debug_struct("Processing")
|
||||
.field("rows_processed", rows_processed)
|
||||
.field("has_pending", &pending_row.is_some())
|
||||
.finish(),
|
||||
PopulateState::Done => write!(f, "Done"),
|
||||
}
|
||||
@@ -45,11 +56,95 @@ impl fmt::Debug for PopulateState {
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ViewTransactionState {
|
||||
// Per-connection delta for uncommitted changes (contains both weights and values)
|
||||
pub delta: Delta,
|
||||
// Using RefCell for interior mutability
|
||||
delta: RefCell<Delta>,
|
||||
}
|
||||
|
||||
/// Incremental view that maintains a stream of row keys using DBSP-style computation
|
||||
/// The actual row data is stored as transformed Values
|
||||
impl ViewTransactionState {
|
||||
/// Create a new transaction state
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
delta: RefCell::new(Delta::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a row into the delta
|
||||
pub fn insert(&self, key: i64, values: Vec<Value>) {
|
||||
self.delta.borrow_mut().insert(key, values);
|
||||
}
|
||||
|
||||
/// Delete a row from the delta
|
||||
pub fn delete(&self, key: i64, values: Vec<Value>) {
|
||||
self.delta.borrow_mut().delete(key, values);
|
||||
}
|
||||
|
||||
/// Clear all changes in the delta
|
||||
pub fn clear(&self) {
|
||||
self.delta.borrow_mut().changes.clear();
|
||||
}
|
||||
|
||||
/// Get a clone of the current delta
|
||||
pub fn get_delta(&self) -> Delta {
|
||||
self.delta.borrow().clone()
|
||||
}
|
||||
|
||||
/// Check if the delta is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.delta.borrow().is_empty()
|
||||
}
|
||||
|
||||
/// Returns how many elements exist in the delta.
|
||||
pub fn len(&self) -> usize {
|
||||
self.delta.borrow().len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Container for all view transaction states within a connection
|
||||
/// Provides interior mutability for the map of view states
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct AllViewsTxState {
|
||||
states: Rc<RefCell<HashMap<String, Rc<ViewTransactionState>>>>,
|
||||
}
|
||||
|
||||
impl AllViewsTxState {
|
||||
/// Create a new container for view transaction states
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
states: Rc::new(RefCell::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get or create a transaction state for a view
|
||||
pub fn get_or_create(&self, view_name: &str) -> Rc<ViewTransactionState> {
|
||||
let mut states = self.states.borrow_mut();
|
||||
states
|
||||
.entry(view_name.to_string())
|
||||
.or_insert_with(|| Rc::new(ViewTransactionState::new()))
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Get a transaction state for a view if it exists
|
||||
pub fn get(&self, view_name: &str) -> Option<Rc<ViewTransactionState>> {
|
||||
self.states.borrow().get(view_name).cloned()
|
||||
}
|
||||
|
||||
/// Clear all transaction states
|
||||
pub fn clear(&self) {
|
||||
self.states.borrow_mut().clear();
|
||||
}
|
||||
|
||||
/// Check if there are no transaction states
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.states.borrow().is_empty()
|
||||
}
|
||||
|
||||
/// Get all view names that have transaction states
|
||||
pub fn get_view_names(&self) -> Vec<String> {
|
||||
self.states.borrow().keys().cloned().collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Incremental view that maintains its state through a DBSP circuit
|
||||
///
|
||||
/// This version keeps everything in-memory. This is acceptable for small views, since DBSP
|
||||
/// doesn't have to track the history of changes. Still for very large views (think of the result
|
||||
@@ -62,12 +157,7 @@ pub struct ViewTransactionState {
|
||||
/// Uses DBSP circuits for incremental computation.
|
||||
#[derive(Debug)]
|
||||
pub struct IncrementalView {
|
||||
// Stream of row keys for this view
|
||||
stream: RowKeyStream,
|
||||
name: String,
|
||||
// Store the actual row data as Values, keyed by row_key
|
||||
// Using BTreeMap for ordered iteration
|
||||
pub records: BTreeMap<i64, Vec<Value>>,
|
||||
// WHERE clause predicate for filtering (kept for compatibility)
|
||||
pub where_predicate: FilterPredicate,
|
||||
// The SELECT statement that defines how to transform input data
|
||||
@@ -75,8 +165,6 @@ pub struct IncrementalView {
|
||||
|
||||
// DBSP circuit that encapsulates the computation
|
||||
circuit: DbspCircuit,
|
||||
// Track whether circuit has been initialized with data
|
||||
circuit_initialized: bool,
|
||||
|
||||
// Tables referenced by this view (extracted from FROM clause and JOINs)
|
||||
base_table: Arc<BTreeTable>,
|
||||
@@ -88,6 +176,8 @@ pub struct IncrementalView {
|
||||
// We will use this one day to export rows_read, but for now, will just test that we're doing the expected amount of compute
|
||||
#[cfg_attr(not(test), allow(dead_code))]
|
||||
pub tracker: Arc<Mutex<ComputationTracker>>,
|
||||
// Root page of the btree storing the materialized state (0 for unmaterialized)
|
||||
root_page: usize,
|
||||
}
|
||||
|
||||
impl IncrementalView {
|
||||
@@ -110,6 +200,8 @@ impl IncrementalView {
|
||||
select: &ast::Select,
|
||||
schema: &Schema,
|
||||
_base_table: &Arc<BTreeTable>,
|
||||
main_data_root: usize,
|
||||
internal_state_root: usize,
|
||||
) -> Result<DbspCircuit> {
|
||||
// Build the logical plan from the SELECT statement
|
||||
let mut builder = LogicalPlanBuilder::new(schema);
|
||||
@@ -117,8 +209,8 @@ impl IncrementalView {
|
||||
let stmt = ast::Stmt::Select(select.clone());
|
||||
let logical_plan = builder.build_statement(&stmt)?;
|
||||
|
||||
// Compile the logical plan to a DBSP circuit
|
||||
let compiler = DbspCompiler::new();
|
||||
// Compile the logical plan to a DBSP circuit with the storage roots
|
||||
let compiler = DbspCompiler::new(main_data_root, internal_state_root);
|
||||
let circuit = compiler.compile(&logical_plan)?;
|
||||
|
||||
Ok(circuit)
|
||||
@@ -145,7 +237,37 @@ impl IncrementalView {
|
||||
false
|
||||
}
|
||||
|
||||
pub fn from_sql(sql: &str, schema: &Schema) -> Result<Self> {
|
||||
/// Validate a SELECT statement and extract the columns it would produce
|
||||
/// This is used during CREATE MATERIALIZED VIEW to validate the view before storing it
|
||||
pub fn validate_and_extract_columns(
|
||||
select: &ast::Select,
|
||||
schema: &Schema,
|
||||
) -> Result<Vec<crate::schema::Column>> {
|
||||
// For now, just extract columns from a simple select
|
||||
// This will need to be expanded to handle joins, aggregates, etc.
|
||||
|
||||
// Get the base table name
|
||||
let base_table_name = Self::extract_base_table(select).ok_or_else(|| {
|
||||
LimboError::ParseError("Cannot extract base table from SELECT".to_string())
|
||||
})?;
|
||||
|
||||
// Get the table from schema
|
||||
let table = schema
|
||||
.get_table(&base_table_name)
|
||||
.and_then(|t| t.btree())
|
||||
.ok_or_else(|| LimboError::ParseError(format!("Table {base_table_name} not found")))?;
|
||||
|
||||
// For now, return all columns from the base table
|
||||
// In the future, this should parse the select list and handle projections
|
||||
Ok(table.columns.clone())
|
||||
}
|
||||
|
||||
pub fn from_sql(
|
||||
sql: &str,
|
||||
schema: &Schema,
|
||||
main_data_root: usize,
|
||||
internal_state_root: usize,
|
||||
) -> Result<Self> {
|
||||
let mut parser = Parser::new(sql.as_bytes());
|
||||
let cmd = parser.next_cmd()?;
|
||||
let cmd = cmd.expect("View is an empty statement");
|
||||
@@ -155,7 +277,13 @@ impl IncrementalView {
|
||||
view_name,
|
||||
columns: _,
|
||||
select,
|
||||
}) => IncrementalView::from_stmt(view_name, select, schema),
|
||||
}) => IncrementalView::from_stmt(
|
||||
view_name,
|
||||
select,
|
||||
schema,
|
||||
main_data_root,
|
||||
internal_state_root,
|
||||
),
|
||||
_ => Err(LimboError::ParseError(format!(
|
||||
"View is not a CREATE MATERIALIZED VIEW statement: {sql}"
|
||||
))),
|
||||
@@ -166,6 +294,8 @@ impl IncrementalView {
|
||||
view_name: ast::QualifiedName,
|
||||
select: ast::Select,
|
||||
schema: &Schema,
|
||||
main_data_root: usize,
|
||||
internal_state_root: usize,
|
||||
) -> Result<Self> {
|
||||
let name = view_name.name.as_str().to_string();
|
||||
|
||||
@@ -203,9 +333,12 @@ impl IncrementalView {
|
||||
base_table,
|
||||
view_columns,
|
||||
schema,
|
||||
main_data_root,
|
||||
internal_state_root,
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
name: String,
|
||||
where_predicate: FilterPredicate,
|
||||
@@ -213,30 +346,31 @@ impl IncrementalView {
|
||||
base_table: Arc<BTreeTable>,
|
||||
columns: Vec<Column>,
|
||||
schema: &Schema,
|
||||
main_data_root: usize,
|
||||
internal_state_root: usize,
|
||||
) -> Result<Self> {
|
||||
let records = BTreeMap::new();
|
||||
|
||||
// Create the tracker that will be shared by all operators
|
||||
let tracker = Arc::new(Mutex::new(ComputationTracker::new()));
|
||||
|
||||
// Compile the SELECT statement into a DBSP circuit
|
||||
let circuit = Self::try_compile_circuit(&select_stmt, schema, &base_table)?;
|
||||
|
||||
// Circuit will be initialized when we first call merge_delta
|
||||
let circuit_initialized = false;
|
||||
let circuit = Self::try_compile_circuit(
|
||||
&select_stmt,
|
||||
schema,
|
||||
&base_table,
|
||||
main_data_root,
|
||||
internal_state_root,
|
||||
)?;
|
||||
|
||||
Ok(Self {
|
||||
stream: RowKeyStream::from_zset(RowKeyZSet::new()),
|
||||
name,
|
||||
records,
|
||||
where_predicate,
|
||||
select_stmt,
|
||||
circuit,
|
||||
circuit_initialized,
|
||||
base_table,
|
||||
columns,
|
||||
populate_state: PopulateState::Start,
|
||||
tracker,
|
||||
root_page: main_data_root,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -244,6 +378,29 @@ impl IncrementalView {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn base_table(&self) -> &Arc<BTreeTable> {
|
||||
&self.base_table
|
||||
}
|
||||
|
||||
/// Execute the circuit with uncommitted changes to get processed delta
|
||||
pub fn execute_with_uncommitted(
|
||||
&mut self,
|
||||
uncommitted: DeltaSet,
|
||||
pager: Rc<Pager>,
|
||||
execute_state: &mut crate::incremental::compiler::ExecuteState,
|
||||
) -> crate::Result<crate::types::IOResult<Delta>> {
|
||||
// Initialize execute_state with the input data
|
||||
*execute_state = crate::incremental::compiler::ExecuteState::Init {
|
||||
input_data: uncommitted,
|
||||
};
|
||||
self.circuit.execute(pager, execute_state)
|
||||
}
|
||||
|
||||
/// Get the root page for this materialized view's btree
|
||||
pub fn get_root_page(&self) -> usize {
|
||||
self.root_page
|
||||
}
|
||||
|
||||
/// Get all table names referenced by this view
|
||||
pub fn get_referenced_table_names(&self) -> Vec<String> {
|
||||
vec![self.base_table.name.clone()]
|
||||
@@ -348,132 +505,189 @@ impl IncrementalView {
|
||||
|
||||
/// Populate the view by scanning the source table using a state machine
|
||||
/// This can be called multiple times and will resume from where it left off
|
||||
/// This method is only for materialized views and will persist data to the btree
|
||||
pub fn populate_from_table(
|
||||
&mut self,
|
||||
conn: &std::sync::Arc<crate::Connection>,
|
||||
pager: &std::rc::Rc<crate::Pager>,
|
||||
_btree_cursor: &mut BTreeCursor,
|
||||
) -> crate::Result<IOResult<()>> {
|
||||
// If already populated, return immediately
|
||||
if matches!(self.populate_state, PopulateState::Done) {
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
|
||||
const BATCH_SIZE: usize = 100; // Process 100 rows at a time before yielding
|
||||
// Assert that this is a materialized view with a root page
|
||||
assert!(
|
||||
self.root_page != 0,
|
||||
"populate_from_table should only be called for materialized views with root_page"
|
||||
);
|
||||
|
||||
loop {
|
||||
match &mut self.populate_state {
|
||||
PopulateState::Start => {
|
||||
// Generate the SQL query for populating the view
|
||||
// It is best to use a standard query than a cursor for two reasons:
|
||||
// 1) Using a sql query will allow us to be much more efficient in cases where we only want
|
||||
// some rows, in particular for indexed filters
|
||||
// 2) There are two types of cursors: index and table. In some situations (like for example
|
||||
// if the table has an integer primary key), the key will be exclusively in the index
|
||||
// btree and not in the table btree. Using cursors would force us to be aware of this
|
||||
// distinction (and others), and ultimately lead to reimplementing the whole query
|
||||
// machinery (next step is which index is best to use, etc)
|
||||
let query = self.sql_for_populate()?;
|
||||
// To avoid borrow checker issues, we need to handle state transitions carefully
|
||||
let needs_start = matches!(self.populate_state, PopulateState::Start);
|
||||
|
||||
// Prepare the statement
|
||||
let stmt = conn.prepare(&query)?;
|
||||
if needs_start {
|
||||
// Generate the SQL query for populating the view
|
||||
// It is best to use a standard query than a cursor for two reasons:
|
||||
// 1) Using a sql query will allow us to be much more efficient in cases where we only want
|
||||
// some rows, in particular for indexed filters
|
||||
// 2) There are two types of cursors: index and table. In some situations (like for example
|
||||
// if the table has an integer primary key), the key will be exclusively in the index
|
||||
// btree and not in the table btree. Using cursors would force us to be aware of this
|
||||
// distinction (and others), and ultimately lead to reimplementing the whole query
|
||||
// machinery (next step is which index is best to use, etc)
|
||||
let query = self.sql_for_populate()?;
|
||||
|
||||
self.populate_state = PopulateState::Processing {
|
||||
stmt: Box::new(stmt),
|
||||
rows_processed: 0,
|
||||
};
|
||||
// Continue to next state
|
||||
// Prepare the statement
|
||||
let stmt = conn.prepare(&query)?;
|
||||
|
||||
self.populate_state = PopulateState::Processing {
|
||||
stmt: Box::new(stmt),
|
||||
rows_processed: 0,
|
||||
pending_row: None,
|
||||
};
|
||||
// Continue to next state
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle Done state
|
||||
if matches!(self.populate_state, PopulateState::Done) {
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
|
||||
// Handle Processing state - extract state to avoid borrow issues
|
||||
let (mut stmt, mut rows_processed, pending_row) =
|
||||
match std::mem::replace(&mut self.populate_state, PopulateState::Done) {
|
||||
PopulateState::Processing {
|
||||
stmt,
|
||||
rows_processed,
|
||||
pending_row,
|
||||
} => (stmt, rows_processed, pending_row),
|
||||
_ => unreachable!("We already handled Start and Done states"),
|
||||
};
|
||||
|
||||
// If we have a pending row from a previous I/O interruption, process it first
|
||||
if let Some((rowid, values)) = pending_row {
|
||||
// Create a single-row delta for the pending row
|
||||
let mut single_row_delta = Delta::new();
|
||||
single_row_delta.insert(rowid, values.clone());
|
||||
|
||||
// Process the pending row with the pager
|
||||
match self.merge_delta(&single_row_delta, pager.clone())? {
|
||||
IOResult::Done(_) => {
|
||||
// Row processed successfully, continue to next row
|
||||
rows_processed += 1;
|
||||
// Continue to fetch next row from statement
|
||||
}
|
||||
IOResult::IO(io) => {
|
||||
// Still not done, save state with pending row
|
||||
self.populate_state = PopulateState::Processing {
|
||||
stmt,
|
||||
rows_processed,
|
||||
pending_row: Some((rowid, values)), // Keep the pending row
|
||||
};
|
||||
return Ok(IOResult::IO(io));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PopulateState::Processing {
|
||||
stmt,
|
||||
rows_processed,
|
||||
} => {
|
||||
// Collect rows into a delta batch
|
||||
let mut batch_delta = Delta::new();
|
||||
let mut batch_count = 0;
|
||||
// Process rows one at a time - no batching
|
||||
loop {
|
||||
// This step() call resumes from where the statement left off
|
||||
match stmt.step()? {
|
||||
crate::vdbe::StepResult::Row => {
|
||||
// Get the row
|
||||
let row = stmt.row().unwrap();
|
||||
|
||||
loop {
|
||||
if batch_count >= BATCH_SIZE {
|
||||
// Process this batch through the standard pipeline
|
||||
self.merge_delta(&batch_delta);
|
||||
// Yield control after processing a batch
|
||||
// TODO: currently this inner statement is the one that is tracking completions
|
||||
// so as a stop gap we can just return a dummy completion here
|
||||
io_yield_one!(Completion::new_dummy());
|
||||
}
|
||||
// Extract values from the row
|
||||
let all_values: Vec<crate::types::Value> =
|
||||
row.get_values().cloned().collect();
|
||||
|
||||
// This step() call resumes from where the statement left off
|
||||
match stmt.step()? {
|
||||
crate::vdbe::StepResult::Row => {
|
||||
// Get the row
|
||||
let row = stmt.row().unwrap();
|
||||
|
||||
// Extract values from the row
|
||||
let all_values: Vec<crate::types::Value> =
|
||||
row.get_values().cloned().collect();
|
||||
|
||||
// Determine how to extract the rowid
|
||||
// If there's a rowid alias (INTEGER PRIMARY KEY), the rowid is one of the columns
|
||||
// Otherwise, it's the last value we explicitly selected
|
||||
let (rowid, values) = if let Some((idx, _)) =
|
||||
self.base_table.get_rowid_alias_column()
|
||||
{
|
||||
// The rowid is the value at the rowid alias column index
|
||||
let rowid = match all_values.get(idx) {
|
||||
Some(crate::types::Value::Integer(id)) => *id,
|
||||
_ => {
|
||||
// This shouldn't happen - rowid alias must be an integer
|
||||
*rows_processed += 1;
|
||||
batch_count += 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
// All values are table columns (no separate rowid was selected)
|
||||
(rowid, all_values)
|
||||
} else {
|
||||
// The last value is the explicitly selected rowid
|
||||
let rowid = match all_values.last() {
|
||||
Some(crate::types::Value::Integer(id)) => *id,
|
||||
_ => {
|
||||
// This shouldn't happen - rowid must be an integer
|
||||
*rows_processed += 1;
|
||||
batch_count += 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
// Get all values except the rowid
|
||||
let values = all_values[..all_values.len() - 1].to_vec();
|
||||
(rowid, values)
|
||||
// Determine how to extract the rowid
|
||||
// If there's a rowid alias (INTEGER PRIMARY KEY), the rowid is one of the columns
|
||||
// Otherwise, it's the last value we explicitly selected
|
||||
let (rowid, values) =
|
||||
if let Some((idx, _)) = self.base_table.get_rowid_alias_column() {
|
||||
// The rowid is the value at the rowid alias column index
|
||||
let rowid = match all_values.get(idx) {
|
||||
Some(crate::types::Value::Integer(id)) => *id,
|
||||
_ => {
|
||||
// This shouldn't happen - rowid alias must be an integer
|
||||
rows_processed += 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
// All values are table columns (no separate rowid was selected)
|
||||
(rowid, all_values)
|
||||
} else {
|
||||
// The last value is the explicitly selected rowid
|
||||
let rowid = match all_values.last() {
|
||||
Some(crate::types::Value::Integer(id)) => *id,
|
||||
_ => {
|
||||
// This shouldn't happen - rowid must be an integer
|
||||
rows_processed += 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
// Get all values except the rowid
|
||||
let values = all_values[..all_values.len() - 1].to_vec();
|
||||
(rowid, values)
|
||||
};
|
||||
|
||||
// Add to batch delta - let merge_delta handle filtering and aggregation
|
||||
batch_delta.insert(rowid, values);
|
||||
// Create a single-row delta and process it immediately
|
||||
let mut single_row_delta = Delta::new();
|
||||
single_row_delta.insert(rowid, values.clone());
|
||||
|
||||
*rows_processed += 1;
|
||||
batch_count += 1;
|
||||
// Process this single row through merge_delta with the pager
|
||||
match self.merge_delta(&single_row_delta, pager.clone())? {
|
||||
IOResult::Done(_) => {
|
||||
// Row processed successfully, continue to next row
|
||||
rows_processed += 1;
|
||||
}
|
||||
crate::vdbe::StepResult::Done => {
|
||||
// Process any remaining rows in the batch
|
||||
self.merge_delta(&batch_delta);
|
||||
// All rows processed, move to Done state
|
||||
self.populate_state = PopulateState::Done;
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
crate::vdbe::StepResult::Interrupt | crate::vdbe::StepResult::Busy => {
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
crate::vdbe::StepResult::IO => {
|
||||
// Process current batch before yielding
|
||||
self.merge_delta(&batch_delta);
|
||||
// The Statement needs to wait for IO
|
||||
io_yield_one!(Completion::new_dummy());
|
||||
IOResult::IO(io) => {
|
||||
// Save state and return I/O
|
||||
// We'll resume at the SAME row when called again (don't increment rows_processed)
|
||||
// The circuit still has unfinished work for this row
|
||||
self.populate_state = PopulateState::Processing {
|
||||
stmt,
|
||||
rows_processed, // Don't increment - row not done yet!
|
||||
pending_row: Some((rowid, values)), // Save the row for resumption
|
||||
};
|
||||
return Ok(IOResult::IO(io));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PopulateState::Done => {
|
||||
// Already populated
|
||||
return Ok(IOResult::Done(()));
|
||||
crate::vdbe::StepResult::Done => {
|
||||
// All rows processed, we're done
|
||||
self.populate_state = PopulateState::Done;
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
|
||||
crate::vdbe::StepResult::Interrupt | crate::vdbe::StepResult::Busy => {
|
||||
// Save state before returning error
|
||||
self.populate_state = PopulateState::Processing {
|
||||
stmt,
|
||||
rows_processed,
|
||||
pending_row: None, // No pending row when interrupted between rows
|
||||
};
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
|
||||
crate::vdbe::StepResult::IO => {
|
||||
// Statement needs I/O - save state and return
|
||||
self.populate_state = PopulateState::Processing {
|
||||
stmt,
|
||||
rows_processed,
|
||||
pending_row: None, // No pending row when interrupted between rows
|
||||
};
|
||||
// TODO: Get the actual I/O completion from the statement
|
||||
let completion = crate::io::Completion::new_dummy();
|
||||
return Ok(IOResult::IO(crate::types::IOCompletions::Single(
|
||||
completion,
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -555,95 +769,23 @@ impl IncrementalView {
|
||||
None
|
||||
}
|
||||
|
||||
/// Get the current records as an iterator - for cursor-based access
|
||||
pub fn iter(&self) -> impl Iterator<Item = (i64, Vec<Value>)> + '_ {
|
||||
self.stream.to_vec().into_iter().filter_map(move |row| {
|
||||
self.records
|
||||
.get(&row.rowid)
|
||||
.map(|values| (row.rowid, values.clone()))
|
||||
})
|
||||
}
|
||||
|
||||
/// Get current data merged with transaction state
|
||||
pub fn current_data(&self, tx_state: Option<&ViewTransactionState>) -> Vec<(i64, Vec<Value>)> {
|
||||
if let Some(tx_state) = tx_state {
|
||||
// Use circuit to process uncommitted changes
|
||||
let mut uncommitted = DeltaSet::new();
|
||||
uncommitted.insert(self.base_table.name.clone(), tx_state.delta.clone());
|
||||
|
||||
// Execute with uncommitted changes (won't affect circuit state)
|
||||
match self.circuit.execute(HashMap::new(), uncommitted) {
|
||||
Ok(processed_delta) => {
|
||||
// Merge processed delta with committed records
|
||||
let mut result_map: BTreeMap<i64, Vec<Value>> = self.records.clone();
|
||||
for (row, weight) in &processed_delta.changes {
|
||||
if *weight > 0 {
|
||||
result_map.insert(row.rowid, row.values.clone());
|
||||
} else if *weight < 0 {
|
||||
result_map.remove(&row.rowid);
|
||||
}
|
||||
}
|
||||
result_map.into_iter().collect()
|
||||
}
|
||||
Err(e) => {
|
||||
// Return error or panic - no fallback
|
||||
panic!("Failed to execute circuit with uncommitted data: {e:?}");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No transaction state: return committed records
|
||||
self.records.clone().into_iter().collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge a delta of changes into the view's current state
|
||||
pub fn merge_delta(&mut self, delta: &Delta) {
|
||||
pub fn merge_delta(
|
||||
&mut self,
|
||||
delta: &Delta,
|
||||
pager: std::rc::Rc<crate::Pager>,
|
||||
) -> crate::Result<IOResult<()>> {
|
||||
// Early return if delta is empty
|
||||
if delta.is_empty() {
|
||||
return;
|
||||
return Ok(IOResult::Done(()));
|
||||
}
|
||||
|
||||
// Use the circuit to process the delta
|
||||
// Use the circuit to process the delta and write to btree
|
||||
let mut input_data = HashMap::new();
|
||||
input_data.insert(self.base_table.name.clone(), delta.clone());
|
||||
|
||||
// If circuit hasn't been initialized yet, initialize it first
|
||||
// This happens during populate_from_table
|
||||
if !self.circuit_initialized {
|
||||
// Initialize the circuit with empty state
|
||||
self.circuit
|
||||
.initialize(HashMap::new())
|
||||
.expect("Failed to initialize circuit");
|
||||
self.circuit_initialized = true;
|
||||
}
|
||||
|
||||
// Execute the circuit to process the delta
|
||||
let current_delta = match self.circuit.execute(input_data.clone(), DeltaSet::empty()) {
|
||||
Ok(output) => {
|
||||
// Commit the changes to the circuit's internal state
|
||||
self.circuit
|
||||
.commit(input_data)
|
||||
.expect("Failed to commit to circuit");
|
||||
output
|
||||
}
|
||||
Err(e) => {
|
||||
panic!("Failed to execute circuit: {e:?}");
|
||||
}
|
||||
};
|
||||
|
||||
// Update records and stream with the processed delta
|
||||
let mut zset_delta = RowKeyZSet::new();
|
||||
|
||||
for (row, weight) in ¤t_delta.changes {
|
||||
if *weight > 0 {
|
||||
self.records.insert(row.rowid, row.values.clone());
|
||||
zset_delta.insert(row.clone(), 1);
|
||||
} else if *weight < 0 {
|
||||
self.records.remove(&row.rowid);
|
||||
zset_delta.insert(row.clone(), -1);
|
||||
}
|
||||
}
|
||||
|
||||
self.stream.apply_delta(&zset_delta);
|
||||
// The circuit now handles all btree I/O internally with the provided pager
|
||||
let _delta = return_if_io!(self.circuit.commit(input_data, pager));
|
||||
Ok(IOResult::Done(()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ use crate::{turso_assert, LimboError, Result};
|
||||
use parking_lot::Mutex;
|
||||
use rustix::fs::{self, FlockOperation, OFlags};
|
||||
use std::ptr::NonNull;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
io::ErrorKind,
|
||||
@@ -43,6 +44,10 @@ const MAX_WAIT: usize = 4;
|
||||
/// One memory arena for DB pages and another for WAL frames
|
||||
const ARENA_COUNT: usize = 2;
|
||||
|
||||
/// Arbitrary non-zero user_data for barrier operation when handling a partial writev
|
||||
/// writing a commit frame.
|
||||
const BARRIER_USER_DATA: u64 = 1;
|
||||
|
||||
pub struct UringIO {
|
||||
inner: Arc<Mutex<InnerUringIO>>,
|
||||
}
|
||||
@@ -56,6 +61,7 @@ struct WrappedIOUring {
|
||||
writev_states: HashMap<u64, WritevState>,
|
||||
overflow: VecDeque<io_uring::squeue::Entry>,
|
||||
iov_pool: IovecPool,
|
||||
pending_link: AtomicBool,
|
||||
}
|
||||
|
||||
struct InnerUringIO {
|
||||
@@ -122,6 +128,7 @@ impl UringIO {
|
||||
pending_ops: 0,
|
||||
writev_states: HashMap::new(),
|
||||
iov_pool: IovecPool::new(),
|
||||
pending_link: AtomicBool::new(false),
|
||||
},
|
||||
free_files: (0..FILES).collect(),
|
||||
free_arenas: [const { None }; ARENA_COUNT],
|
||||
@@ -153,6 +160,7 @@ macro_rules! with_fd {
|
||||
/// wrapper type to represent a possibly registered file descriptor,
|
||||
/// only used in WritevState, and piggy-backs on the available methods from
|
||||
/// `UringFile`, so we don't have to store the file on `WritevState`.
|
||||
#[derive(Clone)]
|
||||
enum Fd {
|
||||
Fixed(u32),
|
||||
RawFd(i32),
|
||||
@@ -194,10 +202,12 @@ struct WritevState {
|
||||
bufs: Vec<Arc<crate::Buffer>>,
|
||||
/// we keep the last iovec allocation alive until final CQE
|
||||
last_iov_allocation: Option<Box<[libc::iovec; MAX_IOVEC_ENTRIES]>>,
|
||||
had_partial: bool,
|
||||
linked_op: bool,
|
||||
}
|
||||
|
||||
impl WritevState {
|
||||
fn new(file: &UringFile, pos: u64, bufs: Vec<Arc<crate::Buffer>>) -> Self {
|
||||
fn new(file: &UringFile, pos: u64, linked: bool, bufs: Vec<Arc<crate::Buffer>>) -> Self {
|
||||
let file_id = file
|
||||
.id()
|
||||
.map(Fd::Fixed)
|
||||
@@ -212,6 +222,8 @@ impl WritevState {
|
||||
bufs,
|
||||
last_iov_allocation: None,
|
||||
total_len,
|
||||
had_partial: false,
|
||||
linked_op: linked,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -353,7 +365,7 @@ impl WrappedIOUring {
|
||||
}
|
||||
|
||||
/// Submit or resubmit a writev operation
|
||||
fn submit_writev(&mut self, key: u64, mut st: WritevState) {
|
||||
fn submit_writev(&mut self, key: u64, mut st: WritevState, continue_chain: bool) {
|
||||
st.free_last_iov(&mut self.iov_pool);
|
||||
let mut iov_allocation = self.iov_pool.acquire().unwrap_or_else(|| {
|
||||
// Fallback: allocate a new one if pool is exhausted
|
||||
@@ -391,7 +403,7 @@ impl WrappedIOUring {
|
||||
}
|
||||
// If we have coalesced everything into a single iovec, submit as a single`pwrite`
|
||||
if iov_count == 1 {
|
||||
let entry = with_fd!(st.file_id, |fd| {
|
||||
let mut entry = with_fd!(st.file_id, |fd| {
|
||||
if let Some(id) = st.bufs[st.current_buffer_idx].fixed_id() {
|
||||
io_uring::opcode::WriteFixed::new(
|
||||
fd,
|
||||
@@ -413,6 +425,16 @@ impl WrappedIOUring {
|
||||
.user_data(key)
|
||||
}
|
||||
});
|
||||
|
||||
if st.linked_op && !st.had_partial {
|
||||
// Starting a new link chain
|
||||
entry = entry.flags(io_uring::squeue::Flags::IO_LINK);
|
||||
self.pending_link.store(true, Ordering::Release);
|
||||
} else if continue_chain && !st.had_partial {
|
||||
// Continue existing chain
|
||||
entry = entry.flags(io_uring::squeue::Flags::IO_LINK);
|
||||
}
|
||||
|
||||
self.submit_entry(&entry);
|
||||
return;
|
||||
}
|
||||
@@ -422,12 +444,15 @@ impl WrappedIOUring {
|
||||
let ptr = iov_allocation.as_ptr() as *mut libc::iovec;
|
||||
st.last_iov_allocation = Some(iov_allocation);
|
||||
|
||||
let entry = with_fd!(st.file_id, |fd| {
|
||||
let mut entry = with_fd!(st.file_id, |fd| {
|
||||
io_uring::opcode::Writev::new(fd, ptr, iov_count as u32)
|
||||
.offset(st.file_pos)
|
||||
.build()
|
||||
.user_data(key)
|
||||
});
|
||||
if st.linked_op {
|
||||
entry = entry.flags(io_uring::squeue::Flags::IO_LINK);
|
||||
}
|
||||
// track the current state in case we get a partial write
|
||||
self.writev_states.insert(key, st);
|
||||
self.submit_entry(&entry);
|
||||
@@ -452,6 +477,19 @@ impl WrappedIOUring {
|
||||
);
|
||||
// write complete, return iovec to pool
|
||||
state.free_last_iov(&mut self.iov_pool);
|
||||
if state.linked_op && state.had_partial {
|
||||
// if it was a linked operation, we need to submit a fsync after this writev
|
||||
// to ensure data is on disk
|
||||
self.ring.submit().expect("submit after writev");
|
||||
let file_id = state.file_id;
|
||||
let sync = with_fd!(file_id, |fd| {
|
||||
io_uring::opcode::Fsync::new(fd)
|
||||
.build()
|
||||
.user_data(BARRIER_USER_DATA)
|
||||
})
|
||||
.flags(io_uring::squeue::Flags::IO_DRAIN);
|
||||
self.submit_entry(&sync);
|
||||
}
|
||||
completion_from_key(user_data).complete(state.total_written as i32);
|
||||
}
|
||||
remaining => {
|
||||
@@ -461,8 +499,10 @@ impl WrappedIOUring {
|
||||
written,
|
||||
remaining
|
||||
);
|
||||
// partial write, submit next
|
||||
self.submit_writev(user_data, state);
|
||||
// make sure partial write is recorded, because fsync could happen after this
|
||||
// and we are not finished writing to disk
|
||||
state.had_partial = true;
|
||||
self.submit_writev(user_data, state, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -530,6 +570,14 @@ impl IO for UringIO {
|
||||
// if we have ongoing writev state, handle it separately and don't call completion
|
||||
ring.handle_writev_completion(state, user_data, result);
|
||||
continue;
|
||||
} else if user_data == BARRIER_USER_DATA {
|
||||
// barrier operation, no completion to call
|
||||
if result < 0 {
|
||||
let err = std::io::Error::from_raw_os_error(result);
|
||||
tracing::error!("barrier operation failed: {}", err);
|
||||
return Err(err.into());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
completion_from_key(user_data).complete(result)
|
||||
}
|
||||
@@ -680,7 +728,7 @@ impl File for UringFile {
|
||||
|
||||
fn pwrite(&self, pos: u64, buffer: Arc<crate::Buffer>, c: Completion) -> Result<Completion> {
|
||||
let mut io = self.io.lock();
|
||||
let write = {
|
||||
let mut write = {
|
||||
let ptr = buffer.as_ptr();
|
||||
let len = buffer.len();
|
||||
with_fd!(self, |fd| {
|
||||
@@ -708,6 +756,15 @@ impl File for UringFile {
|
||||
}
|
||||
})
|
||||
};
|
||||
if c.needs_link() {
|
||||
// Start a new link chain
|
||||
write = write.flags(io_uring::squeue::Flags::IO_LINK);
|
||||
io.ring.pending_link.store(true, Ordering::Release);
|
||||
} else if io.ring.pending_link.load(Ordering::Acquire) {
|
||||
// Continue existing link chain
|
||||
write = write.flags(io_uring::squeue::Flags::IO_LINK);
|
||||
}
|
||||
|
||||
io.ring.submit_entry(&write);
|
||||
Ok(c)
|
||||
}
|
||||
@@ -720,6 +777,8 @@ impl File for UringFile {
|
||||
.build()
|
||||
.user_data(get_key(c.clone()))
|
||||
});
|
||||
// sync always ends the chain of linked operations
|
||||
io.ring.pending_link.store(false, Ordering::Release);
|
||||
io.ring.submit_entry(&sync);
|
||||
Ok(c)
|
||||
}
|
||||
@@ -734,10 +793,14 @@ impl File for UringFile {
|
||||
if bufs.len().eq(&1) {
|
||||
return self.pwrite(pos, bufs[0].clone(), c.clone());
|
||||
}
|
||||
let linked = c.needs_link();
|
||||
tracing::trace!("pwritev(pos = {}, bufs.len() = {})", pos, bufs.len());
|
||||
// create state to track ongoing writev operation
|
||||
let state = WritevState::new(self, pos, bufs);
|
||||
self.io.lock().ring.submit_writev(get_key(c.clone()), state);
|
||||
let state = WritevState::new(self, pos, linked, bufs);
|
||||
let mut io = self.io.lock();
|
||||
let continue_chain = !linked && io.ring.pending_link.load(Ordering::Acquire);
|
||||
io.ring
|
||||
.submit_writev(get_key(c.clone()), state, continue_chain);
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
@@ -746,12 +809,16 @@ impl File for UringFile {
|
||||
}
|
||||
|
||||
fn truncate(&self, len: u64, c: Completion) -> Result<Completion> {
|
||||
let truncate = with_fd!(self, |fd| {
|
||||
let mut truncate = with_fd!(self, |fd| {
|
||||
io_uring::opcode::Ftruncate::new(fd, len)
|
||||
.build()
|
||||
.user_data(get_key(c.clone()))
|
||||
});
|
||||
self.io.lock().ring.submit_entry(&truncate);
|
||||
let mut io = self.io.lock();
|
||||
if io.ring.pending_link.load(Ordering::Acquire) {
|
||||
truncate = truncate.flags(io_uring::squeue::Flags::IO_LINK);
|
||||
}
|
||||
io.ring.submit_entry(&truncate);
|
||||
Ok(c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,6 +135,7 @@ struct CompletionInner {
|
||||
/// None means we completed successfully
|
||||
// Thread safe with OnceLock
|
||||
result: std::sync::OnceLock<Option<CompletionError>>,
|
||||
needs_link: bool,
|
||||
}
|
||||
|
||||
impl Debug for CompletionType {
|
||||
@@ -161,10 +162,34 @@ impl Completion {
|
||||
inner: Arc::new(CompletionInner {
|
||||
completion_type,
|
||||
result: OnceLock::new(),
|
||||
needs_link: false,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_linked(completion_type: CompletionType) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(CompletionInner {
|
||||
completion_type,
|
||||
result: OnceLock::new(),
|
||||
needs_link: true,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn needs_link(&self) -> bool {
|
||||
self.inner.needs_link
|
||||
}
|
||||
|
||||
pub fn new_write_linked<F>(complete: F) -> Self
|
||||
where
|
||||
F: Fn(Result<i32, CompletionError>) + 'static,
|
||||
{
|
||||
Self::new_linked(CompletionType::Write(WriteCompletion::new(Box::new(
|
||||
complete,
|
||||
))))
|
||||
}
|
||||
|
||||
pub fn new_write<F>(complete: F) -> Self
|
||||
where
|
||||
F: Fn(Result<i32, CompletionError>) + 'static,
|
||||
@@ -226,27 +251,31 @@ impl Completion {
|
||||
}
|
||||
|
||||
pub fn complete(&self, result: i32) {
|
||||
if self.inner.result.set(None).is_ok() {
|
||||
let result = Ok(result);
|
||||
match &self.inner.completion_type {
|
||||
CompletionType::Read(r) => r.callback(result),
|
||||
CompletionType::Write(w) => w.callback(result),
|
||||
CompletionType::Sync(s) => s.callback(result), // fix
|
||||
CompletionType::Truncate(t) => t.callback(result),
|
||||
};
|
||||
}
|
||||
let result = Ok(result);
|
||||
match &self.inner.completion_type {
|
||||
CompletionType::Read(r) => r.callback(result),
|
||||
CompletionType::Write(w) => w.callback(result),
|
||||
CompletionType::Sync(s) => s.callback(result), // fix
|
||||
CompletionType::Truncate(t) => t.callback(result),
|
||||
};
|
||||
self.inner
|
||||
.result
|
||||
.set(None)
|
||||
.expect("result must be set only once");
|
||||
}
|
||||
|
||||
pub fn error(&self, err: CompletionError) {
|
||||
if self.inner.result.set(Some(err)).is_ok() {
|
||||
let result = Err(err);
|
||||
match &self.inner.completion_type {
|
||||
CompletionType::Read(r) => r.callback(result),
|
||||
CompletionType::Write(w) => w.callback(result),
|
||||
CompletionType::Sync(s) => s.callback(result), // fix
|
||||
CompletionType::Truncate(t) => t.callback(result),
|
||||
};
|
||||
}
|
||||
let result = Err(err);
|
||||
match &self.inner.completion_type {
|
||||
CompletionType::Read(r) => r.callback(result),
|
||||
CompletionType::Write(w) => w.callback(result),
|
||||
CompletionType::Sync(s) => s.callback(result), // fix
|
||||
CompletionType::Truncate(t) => t.callback(result),
|
||||
};
|
||||
self.inner
|
||||
.result
|
||||
.set(Some(err))
|
||||
.expect("result must be set only once");
|
||||
}
|
||||
|
||||
pub fn abort(&self) {
|
||||
|
||||
@@ -841,6 +841,18 @@ impl JsonbHeader {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ArrayIteratorState {
|
||||
cursor: usize,
|
||||
end: usize,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
pub struct ObjectIteratorState {
|
||||
cursor: usize,
|
||||
end: usize,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl Jsonb {
|
||||
pub fn new(capacity: usize, data: Option<&[u8]>) -> Self {
|
||||
if let Some(data) = data {
|
||||
@@ -2872,6 +2884,94 @@ impl Jsonb {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn array_iterator(&self) -> Result<ArrayIteratorState> {
|
||||
let (hdr, off) = self.read_header(0)?;
|
||||
match hdr {
|
||||
JsonbHeader(ElementType::ARRAY, len) => Ok(ArrayIteratorState {
|
||||
cursor: off,
|
||||
end: off + len,
|
||||
index: 0,
|
||||
}),
|
||||
_ => bail_parse_error!("jsonb.array_iterator(): not an array"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn array_iterator_next(
|
||||
&self,
|
||||
st: &ArrayIteratorState,
|
||||
) -> Option<((usize, Jsonb), ArrayIteratorState)> {
|
||||
if st.cursor >= st.end {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (JsonbHeader(_, payload_len), header_len) = self.read_header(st.cursor).ok()?;
|
||||
let start = st.cursor;
|
||||
let stop = start.checked_add(header_len + payload_len)?;
|
||||
|
||||
if stop > st.end || stop > self.data.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let elem = Jsonb::new(stop - start, Some(&self.data[start..stop]));
|
||||
let next = ArrayIteratorState {
|
||||
cursor: stop,
|
||||
end: st.end,
|
||||
index: st.index + 1,
|
||||
};
|
||||
|
||||
Some(((st.index, elem), next))
|
||||
}
|
||||
|
||||
pub fn object_iterator(&self) -> Result<ObjectIteratorState> {
|
||||
let (hdr, off) = self.read_header(0)?;
|
||||
match hdr {
|
||||
JsonbHeader(ElementType::OBJECT, len) => Ok(ObjectIteratorState {
|
||||
cursor: off,
|
||||
end: off + len,
|
||||
index: 0,
|
||||
}),
|
||||
_ => bail_parse_error!("jsonb.object_iterator(): not an object"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn object_iterator_next(
|
||||
&self,
|
||||
st: &ObjectIteratorState,
|
||||
) -> Option<((usize, Jsonb, Jsonb), ObjectIteratorState)> {
|
||||
if st.cursor >= st.end {
|
||||
return None;
|
||||
}
|
||||
|
||||
// key
|
||||
let (JsonbHeader(key_ty, key_len), key_hdr_len) = self.read_header(st.cursor).ok()?;
|
||||
if !key_ty.is_valid_key() {
|
||||
return None;
|
||||
}
|
||||
let key_start = st.cursor;
|
||||
let key_stop = key_start.checked_add(key_hdr_len + key_len)?;
|
||||
if key_stop > st.end || key_stop > self.data.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// value
|
||||
let (JsonbHeader(_, val_len), val_hdr_len) = self.read_header(key_stop).ok()?;
|
||||
let val_start = key_stop;
|
||||
let val_stop = val_start.checked_add(val_hdr_len + val_len)?;
|
||||
if val_stop > st.end || val_stop > self.data.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let key = Jsonb::new(key_stop - key_start, Some(&self.data[key_start..key_stop]));
|
||||
let value = Jsonb::new(val_stop - val_start, Some(&self.data[val_start..val_stop]));
|
||||
let next = ObjectIteratorState {
|
||||
cursor: val_stop,
|
||||
end: st.end,
|
||||
index: st.index + 1,
|
||||
};
|
||||
|
||||
Some(((st.index, key, value), next))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Jsonb {
|
||||
|
||||
@@ -3,6 +3,7 @@ mod error;
|
||||
pub(crate) mod jsonb;
|
||||
mod ops;
|
||||
pub(crate) mod path;
|
||||
pub(crate) mod vtab;
|
||||
|
||||
use crate::json::error::Error as JsonError;
|
||||
pub use crate::json::ops::{
|
||||
|
||||
436
core/json/vtab.rs
Normal file
436
core/json/vtab.rs
Normal file
@@ -0,0 +1,436 @@
|
||||
use std::{cell::RefCell, result::Result, sync::Arc};
|
||||
|
||||
use turso_ext::{ConstraintUsage, ResultCode};
|
||||
|
||||
use crate::{
|
||||
json::{
|
||||
convert_dbtype_to_jsonb,
|
||||
jsonb::{ArrayIteratorState, Jsonb, ObjectIteratorState},
|
||||
vtab::columns::Columns,
|
||||
Conv,
|
||||
},
|
||||
types::Text,
|
||||
vtab::{InternalVirtualTable, InternalVirtualTableCursor},
|
||||
Connection, LimboError, Value,
|
||||
};
|
||||
|
||||
use super::jsonb;
|
||||
|
||||
pub struct JsonEachVirtualTable;
|
||||
|
||||
const COL_KEY: usize = 0;
|
||||
const COL_VALUE: usize = 1;
|
||||
const COL_TYPE: usize = 2;
|
||||
const COL_ATOM: usize = 3;
|
||||
const COL_ID: usize = 4;
|
||||
const COL_PARENT: usize = 5;
|
||||
const COL_FULLKEY: usize = 6;
|
||||
const COL_PATH: usize = 7;
|
||||
const COL_JSON: usize = 8;
|
||||
const COL_ROOT: usize = 9;
|
||||
|
||||
impl InternalVirtualTable for JsonEachVirtualTable {
|
||||
fn name(&self) -> String {
|
||||
"json_each".to_owned()
|
||||
}
|
||||
|
||||
fn open(
|
||||
&self,
|
||||
_conn: Arc<Connection>,
|
||||
) -> crate::Result<std::sync::Arc<RefCell<(dyn InternalVirtualTableCursor + 'static)>>> {
|
||||
Ok(Arc::new(RefCell::new(JsonEachCursor::default())))
|
||||
}
|
||||
|
||||
fn best_index(
|
||||
&self,
|
||||
constraints: &[turso_ext::ConstraintInfo],
|
||||
_order_by: &[turso_ext::OrderByInfo],
|
||||
) -> Result<turso_ext::IndexInfo, ResultCode> {
|
||||
use turso_ext::ConstraintOp;
|
||||
|
||||
let mut usages = vec![
|
||||
ConstraintUsage {
|
||||
argv_index: None,
|
||||
omit: false
|
||||
};
|
||||
constraints.len()
|
||||
];
|
||||
let mut have_json = false;
|
||||
|
||||
for (i, c) in constraints.iter().enumerate() {
|
||||
if c.usable && c.op == ConstraintOp::Eq && c.column_index as usize == COL_JSON {
|
||||
usages[i] = ConstraintUsage {
|
||||
argv_index: Some(1),
|
||||
omit: true,
|
||||
};
|
||||
have_json = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(turso_ext::IndexInfo {
|
||||
idx_num: i32::from(have_json),
|
||||
idx_str: None,
|
||||
order_by_consumed: false,
|
||||
estimated_cost: if have_json { 10.0 } else { 1_000_000.0 },
|
||||
estimated_rows: if have_json { 100 } else { u32::MAX },
|
||||
constraint_usages: usages,
|
||||
})
|
||||
}
|
||||
|
||||
fn sql(&self) -> String {
|
||||
"CREATE TABLE json_each(
|
||||
key ANY, -- key for current element relative to its parent
|
||||
value ANY, -- value for the current element
|
||||
type TEXT, -- 'object','array','string','integer', etc.
|
||||
atom ANY, -- value for primitive types, null for array & object
|
||||
id INTEGER, -- integer ID for this element
|
||||
parent INTEGER, -- integer ID for the parent of this element
|
||||
fullkey TEXT, -- full path describing the current element
|
||||
path TEXT, -- path to the container of the current row
|
||||
json JSON HIDDEN, -- 1st input parameter: the raw JSON
|
||||
root TEXT HIDDEN -- 2nd input parameter: the PATH at which to start
|
||||
);"
|
||||
.to_owned()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for JsonEachVirtualTable {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("JsonEachVirtualTable").finish()
|
||||
}
|
||||
}
|
||||
|
||||
enum IteratorState {
|
||||
Array(ArrayIteratorState),
|
||||
Object(ObjectIteratorState),
|
||||
Primitive,
|
||||
None,
|
||||
}
|
||||
|
||||
pub struct JsonEachCursor {
|
||||
rowid: i64,
|
||||
no_more_rows: bool,
|
||||
json: Jsonb,
|
||||
iterator_state: IteratorState,
|
||||
columns: Columns,
|
||||
}
|
||||
|
||||
impl Default for JsonEachCursor {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
rowid: 0,
|
||||
no_more_rows: false,
|
||||
json: Jsonb::new(0, None),
|
||||
iterator_state: IteratorState::None,
|
||||
columns: Columns::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InternalVirtualTableCursor for JsonEachCursor {
|
||||
fn filter(
|
||||
&mut self,
|
||||
args: &[Value],
|
||||
_idx_str: Option<String>,
|
||||
_idx_num: i32,
|
||||
) -> Result<bool, LimboError> {
|
||||
if args.is_empty() {
|
||||
return Ok(false);
|
||||
}
|
||||
if args.len() == 2 {
|
||||
return Err(LimboError::InvalidArgument(
|
||||
"2-arg json_each is not supported yet".to_owned(),
|
||||
));
|
||||
}
|
||||
if args.len() != 1 && args.len() != 2 {
|
||||
return Err(LimboError::InvalidArgument(
|
||||
"json_each accepts 1 or 2 arguments".to_owned(),
|
||||
));
|
||||
}
|
||||
|
||||
let db_value = &args[0];
|
||||
|
||||
let jsonb = convert_dbtype_to_jsonb(db_value, Conv::Strict)?;
|
||||
|
||||
let element_type = jsonb.element_type()?;
|
||||
self.json = jsonb;
|
||||
|
||||
match element_type {
|
||||
jsonb::ElementType::ARRAY => {
|
||||
let iter = self.json.array_iterator()?;
|
||||
self.iterator_state = IteratorState::Array(iter);
|
||||
}
|
||||
jsonb::ElementType::OBJECT => {
|
||||
let iter = self.json.object_iterator()?;
|
||||
self.iterator_state = IteratorState::Object(iter);
|
||||
}
|
||||
jsonb::ElementType::NULL
|
||||
| jsonb::ElementType::TRUE
|
||||
| jsonb::ElementType::FALSE
|
||||
| jsonb::ElementType::INT
|
||||
| jsonb::ElementType::INT5
|
||||
| jsonb::ElementType::FLOAT
|
||||
| jsonb::ElementType::FLOAT5
|
||||
| jsonb::ElementType::TEXT
|
||||
| jsonb::ElementType::TEXT5
|
||||
| jsonb::ElementType::TEXTJ
|
||||
| jsonb::ElementType::TEXTRAW => {
|
||||
self.iterator_state = IteratorState::Primitive;
|
||||
}
|
||||
jsonb::ElementType::RESERVED1
|
||||
| jsonb::ElementType::RESERVED2
|
||||
| jsonb::ElementType::RESERVED3 => {
|
||||
unreachable!("element type not supported: {element_type:?}");
|
||||
}
|
||||
};
|
||||
|
||||
self.next()
|
||||
}
|
||||
|
||||
fn next(&mut self) -> Result<bool, LimboError> {
|
||||
self.rowid += 1;
|
||||
if self.no_more_rows {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
match &self.iterator_state {
|
||||
IteratorState::Array(state) => {
|
||||
let Some(((idx, jsonb), new_state)) = self.json.array_iterator_next(state) else {
|
||||
self.no_more_rows = true;
|
||||
return Ok(false);
|
||||
};
|
||||
self.iterator_state = IteratorState::Array(new_state);
|
||||
self.columns = Columns::new(columns::Key::Integer(idx as i64), jsonb);
|
||||
}
|
||||
IteratorState::Object(state) => {
|
||||
let Some(((_idx, key, value), new_state)): Option<(
|
||||
(usize, Jsonb, Jsonb),
|
||||
ObjectIteratorState,
|
||||
)> = self.json.object_iterator_next(state) else {
|
||||
self.no_more_rows = true;
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
self.iterator_state = IteratorState::Object(new_state);
|
||||
let key = key.to_string();
|
||||
self.columns = Columns::new(columns::Key::String(key), value);
|
||||
}
|
||||
IteratorState::Primitive => {
|
||||
let json = std::mem::replace(&mut self.json, Jsonb::new(0, None));
|
||||
self.columns = Columns::new_from_primitive(json);
|
||||
self.no_more_rows = true;
|
||||
}
|
||||
IteratorState::None => unreachable!(),
|
||||
};
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn rowid(&self) -> i64 {
|
||||
self.rowid
|
||||
}
|
||||
|
||||
fn column(&self, idx: usize) -> Result<Value, LimboError> {
|
||||
Ok(match idx {
|
||||
COL_KEY => self.columns.key(),
|
||||
COL_VALUE => self.columns.value()?,
|
||||
COL_TYPE => self.columns.ttype(),
|
||||
COL_ATOM => self.columns.atom()?,
|
||||
COL_ID => Value::Integer(self.rowid),
|
||||
COL_PARENT => self.columns.parent(),
|
||||
COL_FULLKEY => self.columns.fullkey(),
|
||||
COL_PATH => self.columns.path(),
|
||||
COL_ROOT => Value::Text(Text::new("json, todo")),
|
||||
_ => Value::Null,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
mod columns {
|
||||
use crate::{
|
||||
json::{
|
||||
json_string_to_db_type,
|
||||
jsonb::{self, ElementType, Jsonb},
|
||||
OutputVariant,
|
||||
},
|
||||
types::Text,
|
||||
LimboError, Value,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) enum Key {
|
||||
Integer(i64),
|
||||
String(String),
|
||||
}
|
||||
|
||||
impl Key {
|
||||
fn empty() -> Self {
|
||||
Self::Integer(0)
|
||||
}
|
||||
|
||||
fn fullkey_representation(&self) -> Value {
|
||||
match self {
|
||||
Key::Integer(ref i) => Value::Text(Text::new(&format!("$[{i}]"))),
|
||||
Key::String(ref text) => {
|
||||
let mut needs_quoting: bool = false;
|
||||
|
||||
let mut text = (text[1..text.len() - 1]).to_owned();
|
||||
if text.contains('.') || text.contains(" ") || text.contains('"') {
|
||||
needs_quoting = true;
|
||||
}
|
||||
|
||||
if needs_quoting {
|
||||
text = format!("\"{text}\"");
|
||||
}
|
||||
let s = format!("$.{text}");
|
||||
|
||||
Value::Text(Text::new(&s))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn key_representation(&self) -> Value {
|
||||
match self {
|
||||
Key::Integer(ref i) => Value::Integer(*i),
|
||||
Key::String(ref s) => Value::Text(Text::new(
|
||||
&s[1..s.len() - 1].to_owned().replace("\\\"", "\""),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct Columns {
|
||||
key: Key,
|
||||
value: Jsonb,
|
||||
is_primitive: bool,
|
||||
}
|
||||
|
||||
impl Default for Columns {
|
||||
fn default() -> Columns {
|
||||
Self {
|
||||
key: Key::empty(),
|
||||
value: Jsonb::new(0, None),
|
||||
is_primitive: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Columns {
|
||||
pub(super) fn new(key: Key, value: Jsonb) -> Self {
|
||||
Self {
|
||||
key,
|
||||
value,
|
||||
is_primitive: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn new_from_primitive(value: Jsonb) -> Self {
|
||||
Self {
|
||||
key: Key::empty(),
|
||||
value,
|
||||
is_primitive: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn atom(&self) -> Result<Value, LimboError> {
|
||||
Self::atom_from_value(&self.value)
|
||||
}
|
||||
|
||||
pub(super) fn value(&self) -> Result<Value, LimboError> {
|
||||
let element_type = self.value.element_type()?;
|
||||
Ok(match element_type {
|
||||
ElementType::ARRAY | ElementType::OBJECT => {
|
||||
json_string_to_db_type(self.value.clone(), element_type, OutputVariant::String)?
|
||||
}
|
||||
_ => Self::atom_from_value(&self.value)?,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn key(&self) -> Value {
|
||||
if self.is_primitive {
|
||||
return Value::Null;
|
||||
}
|
||||
self.key.key_representation()
|
||||
}
|
||||
|
||||
fn atom_from_value(value: &Jsonb) -> Result<Value, LimboError> {
|
||||
let element_type = value.element_type().expect("invalid value");
|
||||
let string: Result<Value, LimboError> = match element_type {
|
||||
jsonb::ElementType::NULL => Ok(Value::Null),
|
||||
jsonb::ElementType::TRUE => Ok(Value::Integer(1)),
|
||||
jsonb::ElementType::FALSE => Ok(Value::Integer(0)),
|
||||
jsonb::ElementType::INT | jsonb::ElementType::INT5 => Self::jsonb_to_integer(value),
|
||||
jsonb::ElementType::FLOAT | jsonb::ElementType::FLOAT5 => {
|
||||
Self::jsonb_to_float(value)
|
||||
}
|
||||
jsonb::ElementType::TEXT
|
||||
| jsonb::ElementType::TEXTJ
|
||||
| jsonb::ElementType::TEXT5
|
||||
| jsonb::ElementType::TEXTRAW => {
|
||||
let s = value.to_string();
|
||||
let s = (s[1..s.len() - 1]).to_string();
|
||||
Ok(Value::Text(Text::new(&s)))
|
||||
}
|
||||
jsonb::ElementType::ARRAY => Ok(Value::Null),
|
||||
jsonb::ElementType::OBJECT => Ok(Value::Null),
|
||||
jsonb::ElementType::RESERVED1 => Ok(Value::Null),
|
||||
jsonb::ElementType::RESERVED2 => Ok(Value::Null),
|
||||
jsonb::ElementType::RESERVED3 => Ok(Value::Null),
|
||||
};
|
||||
|
||||
string
|
||||
}
|
||||
|
||||
fn jsonb_to_integer(value: &Jsonb) -> Result<Value, LimboError> {
|
||||
let string = value.to_string();
|
||||
let int = string.parse::<i64>()?;
|
||||
|
||||
Ok(Value::Integer(int))
|
||||
}
|
||||
|
||||
fn jsonb_to_float(value: &Jsonb) -> Result<Value, LimboError> {
|
||||
let string = value.to_string();
|
||||
let float = string.parse::<f64>()?;
|
||||
|
||||
Ok(Value::Float(float))
|
||||
}
|
||||
|
||||
pub(super) fn fullkey(&self) -> Value {
|
||||
if self.is_primitive {
|
||||
return Value::Text(Text::new("$"));
|
||||
}
|
||||
self.key.fullkey_representation()
|
||||
}
|
||||
|
||||
pub(super) fn path(&self) -> Value {
|
||||
Value::Text(Text::new("$"))
|
||||
}
|
||||
|
||||
pub(super) fn parent(&self) -> Value {
|
||||
Value::Null
|
||||
}
|
||||
|
||||
pub(super) fn ttype(&self) -> Value {
|
||||
let element_type = self.value.element_type().expect("invalid value");
|
||||
let ttype = match element_type {
|
||||
jsonb::ElementType::NULL => "null",
|
||||
jsonb::ElementType::TRUE => "true",
|
||||
jsonb::ElementType::FALSE => "false",
|
||||
jsonb::ElementType::INT | jsonb::ElementType::INT5 => "integer",
|
||||
jsonb::ElementType::FLOAT | jsonb::ElementType::FLOAT5 => "real",
|
||||
jsonb::ElementType::TEXT
|
||||
| jsonb::ElementType::TEXTJ
|
||||
| jsonb::ElementType::TEXT5
|
||||
| jsonb::ElementType::TEXTRAW => "text",
|
||||
jsonb::ElementType::ARRAY => "array",
|
||||
jsonb::ElementType::OBJECT => "object",
|
||||
jsonb::ElementType::RESERVED1
|
||||
| jsonb::ElementType::RESERVED2
|
||||
| jsonb::ElementType::RESERVED3 => unreachable!(),
|
||||
};
|
||||
|
||||
Value::Text(Text::new(ttype))
|
||||
}
|
||||
}
|
||||
}
|
||||
202
core/lib.rs
202
core/lib.rs
@@ -32,7 +32,6 @@ mod uuid;
|
||||
mod vdbe;
|
||||
mod vector;
|
||||
mod vtab;
|
||||
mod vtab_view;
|
||||
|
||||
#[cfg(feature = "fuzz")]
|
||||
pub mod numeric;
|
||||
@@ -40,7 +39,7 @@ pub mod numeric;
|
||||
#[cfg(not(feature = "fuzz"))]
|
||||
mod numeric;
|
||||
|
||||
use crate::incremental::view::ViewTransactionState;
|
||||
use crate::incremental::view::AllViewsTxState;
|
||||
use crate::storage::encryption::CipherMode;
|
||||
use crate::translate::optimizer::optimize_plan;
|
||||
use crate::translate::pragma::TURSO_CDC_DEFAULT_TABLE_NAME;
|
||||
@@ -65,20 +64,23 @@ use parking_lot::RwLock;
|
||||
use schema::Schema;
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
cell::{Cell, RefCell, UnsafeCell},
|
||||
cell::{Cell, RefCell},
|
||||
collections::HashMap,
|
||||
fmt::{self, Display},
|
||||
io::Write,
|
||||
num::NonZero,
|
||||
ops::Deref,
|
||||
rc::Rc,
|
||||
sync::{atomic::AtomicUsize, Arc, LazyLock, Mutex, Weak},
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc, LazyLock, Mutex, Weak,
|
||||
},
|
||||
};
|
||||
#[cfg(feature = "fs")]
|
||||
use storage::database::DatabaseFile;
|
||||
pub use storage::database::IOContext;
|
||||
pub use storage::encryption::{EncryptionContext, EncryptionKey};
|
||||
use storage::page_cache::DumbLruPageCache;
|
||||
use storage::page_cache::PageCache;
|
||||
use storage::pager::{AtomicDbState, DbState};
|
||||
use storage::sqlite3_ondisk::PageSize;
|
||||
pub use storage::{
|
||||
@@ -185,8 +187,8 @@ pub struct Database {
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
// Shared structures of a Database are the parts that are common to multiple threads that might
|
||||
// create DB connections.
|
||||
_shared_page_cache: Arc<RwLock<DumbLruPageCache>>,
|
||||
maybe_shared_wal: RwLock<Option<Arc<UnsafeCell<WalFileShared>>>>,
|
||||
_shared_page_cache: Arc<RwLock<PageCache>>,
|
||||
shared_wal: Arc<RwLock<WalFileShared>>,
|
||||
db_state: Arc<AtomicDbState>,
|
||||
init_lock: Arc<Mutex<()>>,
|
||||
open_flags: OpenFlags,
|
||||
@@ -227,9 +229,9 @@ impl fmt::Debug for Database {
|
||||
};
|
||||
debug_struct.field("init_lock", &init_lock_status);
|
||||
|
||||
let wal_status = match self.maybe_shared_wal.try_read().as_deref() {
|
||||
Some(Some(_)) => "present",
|
||||
Some(None) => "none",
|
||||
let wal_status = match self.shared_wal.try_read() {
|
||||
Some(wal) if wal.enabled.load(Ordering::Relaxed) => "enabled",
|
||||
Some(_) => "disabled",
|
||||
None => "locked_for_write",
|
||||
};
|
||||
debug_struct.field("wal_state", &wal_status);
|
||||
@@ -365,7 +367,7 @@ impl Database {
|
||||
flags: OpenFlags,
|
||||
opts: DatabaseOpts,
|
||||
) -> Result<Arc<Database>> {
|
||||
let maybe_shared_wal = WalFileShared::open_shared_if_exists(&io, wal_path)?;
|
||||
let shared_wal = WalFileShared::open_shared_if_exists(&io, wal_path)?;
|
||||
|
||||
let mv_store = if opts.enable_mvcc {
|
||||
Some(Arc::new(MvStore::new(
|
||||
@@ -383,7 +385,7 @@ impl Database {
|
||||
DbState::Initialized
|
||||
};
|
||||
|
||||
let shared_page_cache = Arc::new(RwLock::new(DumbLruPageCache::default()));
|
||||
let shared_page_cache = Arc::new(RwLock::new(PageCache::default()));
|
||||
let syms = SymbolTable::new();
|
||||
let arena_size = if std::env::var("TESTING").is_ok_and(|v| v.eq_ignore_ascii_case("true")) {
|
||||
BufferPool::TEST_ARENA_SIZE
|
||||
@@ -397,7 +399,7 @@ impl Database {
|
||||
wal_path: wal_path.to_string(),
|
||||
schema: Mutex::new(Arc::new(Schema::new(opts.enable_indexes))),
|
||||
_shared_page_cache: shared_page_cache.clone(),
|
||||
maybe_shared_wal: RwLock::new(maybe_shared_wal),
|
||||
shared_wal,
|
||||
db_file,
|
||||
builtin_syms: syms.into(),
|
||||
io: io.clone(),
|
||||
@@ -438,13 +440,6 @@ impl Database {
|
||||
Ok(())
|
||||
})?;
|
||||
}
|
||||
// FIXME: the correct way to do this is to just materialize the view.
|
||||
// But this will allow us to keep going.
|
||||
let conn = db.connect()?;
|
||||
let pager = conn.pager.borrow().clone();
|
||||
pager
|
||||
.io
|
||||
.block(|| conn.schema.borrow().populate_materialized_views(&conn))?;
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
@@ -486,7 +481,7 @@ impl Database {
|
||||
attached_databases: RefCell::new(DatabaseCatalog::new()),
|
||||
query_only: Cell::new(false),
|
||||
mv_tx_id: Cell::new(None),
|
||||
view_transaction_states: RefCell::new(HashMap::new()),
|
||||
view_transaction_states: AllViewsTxState::new(),
|
||||
metrics: RefCell::new(ConnectionMetrics::new()),
|
||||
is_nested_stmt: Cell::new(false),
|
||||
encryption_key: RefCell::new(None),
|
||||
@@ -534,10 +529,10 @@ impl Database {
|
||||
/// 2. PageSize::default(), i.e. 4096
|
||||
fn determine_actual_page_size(
|
||||
&self,
|
||||
maybe_shared_wal: Option<&WalFileShared>,
|
||||
shared_wal: &WalFileShared,
|
||||
requested_page_size: Option<usize>,
|
||||
) -> Result<PageSize> {
|
||||
if let Some(shared_wal) = maybe_shared_wal {
|
||||
if shared_wal.enabled.load(Ordering::Relaxed) {
|
||||
let size_in_wal = shared_wal.page_size();
|
||||
if size_in_wal != 0 {
|
||||
let Some(page_size) = PageSize::new(size_in_wal) else {
|
||||
@@ -560,13 +555,12 @@ impl Database {
|
||||
}
|
||||
|
||||
fn init_pager(&self, requested_page_size: Option<usize>) -> Result<Pager> {
|
||||
// Open existing WAL file if present
|
||||
let mut maybe_shared_wal = self.maybe_shared_wal.write();
|
||||
if let Some(shared_wal) = maybe_shared_wal.clone() {
|
||||
let page_size = self.determine_actual_page_size(
|
||||
Some(unsafe { &*shared_wal.get() }),
|
||||
requested_page_size,
|
||||
)?;
|
||||
// Check if WAL is enabled
|
||||
let shared_wal = self.shared_wal.read();
|
||||
if shared_wal.enabled.load(Ordering::Relaxed) {
|
||||
let page_size = self.determine_actual_page_size(&shared_wal, requested_page_size)?;
|
||||
drop(shared_wal);
|
||||
|
||||
let buffer_pool = self.buffer_pool.clone();
|
||||
if self.db_state.is_initialized() {
|
||||
buffer_pool.finalize_with_page_size(page_size.get() as usize)?;
|
||||
@@ -575,14 +569,14 @@ impl Database {
|
||||
let db_state = self.db_state.clone();
|
||||
let wal = Rc::new(RefCell::new(WalFile::new(
|
||||
self.io.clone(),
|
||||
shared_wal,
|
||||
self.shared_wal.clone(),
|
||||
buffer_pool.clone(),
|
||||
)));
|
||||
let pager = Pager::new(
|
||||
self.db_file.clone(),
|
||||
Some(wal),
|
||||
self.io.clone(),
|
||||
Arc::new(RwLock::new(DumbLruPageCache::default())),
|
||||
Arc::new(RwLock::new(PageCache::default())),
|
||||
buffer_pool.clone(),
|
||||
db_state,
|
||||
self.init_lock.clone(),
|
||||
@@ -590,9 +584,10 @@ impl Database {
|
||||
pager.page_size.set(Some(page_size));
|
||||
return Ok(pager);
|
||||
}
|
||||
let buffer_pool = self.buffer_pool.clone();
|
||||
let page_size = self.determine_actual_page_size(&shared_wal, requested_page_size)?;
|
||||
drop(shared_wal);
|
||||
|
||||
let page_size = self.determine_actual_page_size(None, requested_page_size)?;
|
||||
let buffer_pool = self.buffer_pool.clone();
|
||||
|
||||
if self.db_state.is_initialized() {
|
||||
buffer_pool.finalize_with_page_size(page_size.get() as usize)?;
|
||||
@@ -604,7 +599,7 @@ impl Database {
|
||||
self.db_file.clone(),
|
||||
None,
|
||||
self.io.clone(),
|
||||
Arc::new(RwLock::new(DumbLruPageCache::default())),
|
||||
Arc::new(RwLock::new(PageCache::default())),
|
||||
buffer_pool.clone(),
|
||||
db_state,
|
||||
Arc::new(Mutex::new(())),
|
||||
@@ -614,13 +609,16 @@ impl Database {
|
||||
let file = self
|
||||
.io
|
||||
.open_file(&self.wal_path, OpenFlags::Create, false)?;
|
||||
let real_shared_wal = WalFileShared::new_shared(file)?;
|
||||
// Modify Database::maybe_shared_wal to point to the new WAL file so that other connections
|
||||
// can open the existing WAL.
|
||||
*maybe_shared_wal = Some(real_shared_wal.clone());
|
||||
|
||||
// Enable WAL in the existing shared instance
|
||||
{
|
||||
let mut shared_wal = self.shared_wal.write();
|
||||
shared_wal.create(file)?;
|
||||
}
|
||||
|
||||
let wal = Rc::new(RefCell::new(WalFile::new(
|
||||
self.io.clone(),
|
||||
real_shared_wal,
|
||||
self.shared_wal.clone(),
|
||||
buffer_pool,
|
||||
)));
|
||||
pager.set_wal(wal);
|
||||
@@ -628,6 +626,38 @@ impl Database {
|
||||
Ok(pager)
|
||||
}
|
||||
|
||||
#[cfg(feature = "fs")]
|
||||
pub fn io_for_path(path: &str) -> Result<Arc<dyn IO>> {
|
||||
use crate::util::MEMORY_PATH;
|
||||
let io: Arc<dyn IO> = match path.trim() {
|
||||
MEMORY_PATH => Arc::new(MemoryIO::new()),
|
||||
_ => Arc::new(PlatformIO::new()?),
|
||||
};
|
||||
Ok(io)
|
||||
}
|
||||
|
||||
#[cfg(feature = "fs")]
|
||||
pub fn io_for_vfs<S: AsRef<str> + std::fmt::Display>(vfs: S) -> Result<Arc<dyn IO>> {
|
||||
let vfsmods = ext::add_builtin_vfs_extensions(None)?;
|
||||
let io: Arc<dyn IO> = match vfsmods
|
||||
.iter()
|
||||
.find(|v| v.0 == vfs.as_ref())
|
||||
.map(|v| v.1.clone())
|
||||
{
|
||||
Some(vfs) => vfs,
|
||||
None => match vfs.as_ref() {
|
||||
"memory" => Arc::new(MemoryIO::new()),
|
||||
"syscall" => Arc::new(SyscallIO::new()?),
|
||||
#[cfg(all(target_os = "linux", feature = "io_uring"))]
|
||||
"io_uring" => Arc::new(UringIO::new()?),
|
||||
other => {
|
||||
return Err(LimboError::InvalidArgument(format!("no such VFS: {other}")));
|
||||
}
|
||||
},
|
||||
};
|
||||
Ok(io)
|
||||
}
|
||||
|
||||
/// Open a new database file with optionally specifying a VFS without an existing database
|
||||
/// connection and symbol table to register extensions.
|
||||
#[cfg(feature = "fs")]
|
||||
@@ -641,40 +671,13 @@ impl Database {
|
||||
where
|
||||
S: AsRef<str> + std::fmt::Display,
|
||||
{
|
||||
use crate::util::MEMORY_PATH;
|
||||
let vfsmods = ext::add_builtin_vfs_extensions(None)?;
|
||||
match vfs {
|
||||
Some(vfs) => {
|
||||
let io: Arc<dyn IO> = match vfsmods
|
||||
.iter()
|
||||
.find(|v| v.0 == vfs.as_ref())
|
||||
.map(|v| v.1.clone())
|
||||
{
|
||||
Some(vfs) => vfs,
|
||||
None => match vfs.as_ref() {
|
||||
"memory" => Arc::new(MemoryIO::new()),
|
||||
"syscall" => Arc::new(SyscallIO::new()?),
|
||||
#[cfg(all(target_os = "linux", feature = "io_uring"))]
|
||||
"io_uring" => Arc::new(UringIO::new()?),
|
||||
other => {
|
||||
return Err(LimboError::InvalidArgument(format!(
|
||||
"no such VFS: {other}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
};
|
||||
let db = Self::open_file_with_flags(io.clone(), path, flags, opts)?;
|
||||
Ok((io, db))
|
||||
}
|
||||
None => {
|
||||
let io: Arc<dyn IO> = match path.trim() {
|
||||
MEMORY_PATH => Arc::new(MemoryIO::new()),
|
||||
_ => Arc::new(PlatformIO::new()?),
|
||||
};
|
||||
let db = Self::open_file_with_flags(io.clone(), path, flags, opts)?;
|
||||
Ok((io, db))
|
||||
}
|
||||
}
|
||||
let io = vfs
|
||||
.map(|vfs| Self::io_for_vfs(vfs))
|
||||
.or_else(|| Some(Self::io_for_path(path)))
|
||||
.transpose()?
|
||||
.unwrap();
|
||||
let db = Self::open_file_with_flags(io.clone(), path, flags, opts)?;
|
||||
Ok((io, db))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -920,7 +923,7 @@ pub struct Connection {
|
||||
|
||||
/// Per-connection view transaction states for uncommitted changes. This represents
|
||||
/// one entry per view that was touched in the transaction.
|
||||
view_transaction_states: RefCell<HashMap<String, ViewTransactionState>>,
|
||||
view_transaction_states: AllViewsTxState,
|
||||
/// Connection-level metrics aggregation
|
||||
pub metrics: RefCell<ConnectionMetrics>,
|
||||
/// Whether the connection is executing a statement initiated by another statement.
|
||||
@@ -1066,7 +1069,7 @@ impl Connection {
|
||||
|
||||
// Preserve existing views to avoid expensive repopulation.
|
||||
// TODO: We may not need to do this if we materialize our views.
|
||||
let existing_views = self.schema.borrow().materialized_views.clone();
|
||||
let existing_views = self.schema.borrow().incremental_views.clone();
|
||||
|
||||
// TODO: this is hack to avoid a cyclical problem with schema reprepare
|
||||
// The problem here is that we prepare a statement here, but when the statement tries
|
||||
@@ -1090,13 +1093,6 @@ impl Connection {
|
||||
self.with_schema_mut(|schema| {
|
||||
*schema = fresh;
|
||||
});
|
||||
|
||||
{
|
||||
let schema = self.schema.borrow();
|
||||
pager
|
||||
.io
|
||||
.block(|| schema.populate_materialized_views(self))?;
|
||||
}
|
||||
Result::Ok(())
|
||||
}
|
||||
|
||||
@@ -1315,12 +1311,17 @@ impl Connection {
|
||||
}
|
||||
|
||||
#[cfg(feature = "fs")]
|
||||
fn from_uri_attached(uri: &str, db_opts: DatabaseOpts) -> Result<Arc<Database>> {
|
||||
fn from_uri_attached(
|
||||
uri: &str,
|
||||
db_opts: DatabaseOpts,
|
||||
io: Arc<dyn IO>,
|
||||
) -> Result<Arc<Database>> {
|
||||
let mut opts = OpenOptions::parse(uri)?;
|
||||
// FIXME: for now, only support read only attach
|
||||
opts.mode = OpenMode::ReadOnly;
|
||||
let flags = opts.get_flags()?;
|
||||
let (_io, db) = Database::open_new(&opts.path, opts.vfs.as_ref(), flags, db_opts)?;
|
||||
let io = opts.vfs.map(Database::io_for_vfs).unwrap_or(Ok(io))?;
|
||||
let db = Database::open_file_with_flags(io.clone(), &opts.path, flags, db_opts)?;
|
||||
if let Some(modeof) = opts.modeof {
|
||||
let perms = std::fs::metadata(modeof)?;
|
||||
std::fs::set_permissions(&opts.path, perms.permissions())?;
|
||||
@@ -1678,7 +1679,11 @@ impl Connection {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
*self._db.maybe_shared_wal.write() = None;
|
||||
{
|
||||
let mut shared_wal = self._db.shared_wal.write();
|
||||
shared_wal.enabled.store(false, Ordering::Relaxed);
|
||||
shared_wal.file = None;
|
||||
}
|
||||
self.pager.borrow_mut().clear_page_cache();
|
||||
let pager = self._db.init_pager(Some(size.get() as usize))?;
|
||||
self.pager.replace(Rc::new(pager));
|
||||
@@ -1723,7 +1728,7 @@ impl Connection {
|
||||
.expect("query must be parsed to statement");
|
||||
let syms = self.syms.borrow();
|
||||
self.with_schema_mut(|schema| {
|
||||
let existing_views = schema.materialized_views.clone();
|
||||
let existing_views = schema.incremental_views.clone();
|
||||
if let Err(LimboError::ExtensionError(e)) =
|
||||
parse_schema_rows(rows, schema, &syms, None, existing_views)
|
||||
{
|
||||
@@ -1874,7 +1879,7 @@ impl Connection {
|
||||
.with_indexes(use_indexes)
|
||||
.with_views(use_views)
|
||||
.with_strict(use_strict);
|
||||
let db = Self::from_uri_attached(path, db_opts)?;
|
||||
let db = Self::from_uri_attached(path, db_opts, self._db.io.clone())?;
|
||||
let pager = Rc::new(db.init_pager(None)?);
|
||||
|
||||
self.attached_databases
|
||||
@@ -2055,16 +2060,16 @@ impl Connection {
|
||||
self.syms.borrow().vtab_modules.keys().cloned().collect()
|
||||
}
|
||||
|
||||
pub fn set_encryption_key(&self, key: EncryptionKey) {
|
||||
pub fn set_encryption_key(&self, key: EncryptionKey) -> Result<()> {
|
||||
tracing::trace!("setting encryption key for connection");
|
||||
*self.encryption_key.borrow_mut() = Some(key.clone());
|
||||
self.set_encryption_context();
|
||||
self.set_encryption_context()
|
||||
}
|
||||
|
||||
pub fn set_encryption_cipher(&self, cipher_mode: CipherMode) {
|
||||
pub fn set_encryption_cipher(&self, cipher_mode: CipherMode) -> Result<()> {
|
||||
tracing::trace!("setting encryption cipher for connection");
|
||||
self.encryption_cipher_mode.replace(Some(cipher_mode));
|
||||
self.set_encryption_context();
|
||||
self.set_encryption_context()
|
||||
}
|
||||
|
||||
pub fn get_encryption_cipher_mode(&self) -> Option<CipherMode> {
|
||||
@@ -2072,17 +2077,22 @@ impl Connection {
|
||||
}
|
||||
|
||||
// if both key and cipher are set, set encryption context on pager
|
||||
fn set_encryption_context(&self) {
|
||||
fn set_encryption_context(&self) -> Result<()> {
|
||||
let key_ref = self.encryption_key.borrow();
|
||||
let Some(key) = key_ref.as_ref() else {
|
||||
return;
|
||||
return Ok(());
|
||||
};
|
||||
let Some(cipher_mode) = self.encryption_cipher_mode.get() else {
|
||||
return;
|
||||
return Ok(());
|
||||
};
|
||||
tracing::trace!("setting encryption ctx for connection");
|
||||
let pager = self.pager.borrow();
|
||||
pager.set_encryption_context(cipher_mode, key);
|
||||
if pager.is_encryption_ctx_set() {
|
||||
return Err(LimboError::InvalidArgument(
|
||||
"cannot reset encryption attributes if already set in the session".to_string(),
|
||||
));
|
||||
}
|
||||
pager.set_encryption_context(cipher_mode, key)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -271,6 +271,15 @@ pub struct CommitStateMachine<Clock: LogicalClock> {
|
||||
_phantom: PhantomData<Clock>,
|
||||
}
|
||||
|
||||
impl<Clock: LogicalClock> Debug for CommitStateMachine<Clock> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("CommitStateMachine")
|
||||
.field("state", &self.state)
|
||||
.field("is_finalized", &self.is_finalized)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WriteRowStateMachine {
|
||||
state: WriteRowState,
|
||||
is_finalized: bool,
|
||||
|
||||
@@ -654,10 +654,10 @@ fn test_future_row() {
|
||||
use crate::mvcc::cursor::MvccLazyCursor;
|
||||
use crate::mvcc::database::{MvStore, Row, RowID};
|
||||
use crate::types::Text;
|
||||
use crate::MemoryIO;
|
||||
use crate::RefValue;
|
||||
use crate::Value;
|
||||
use crate::{Database, StepResult};
|
||||
use crate::{MemoryIO, Statement};
|
||||
|
||||
// Simple atomic clock implementation for testing
|
||||
|
||||
@@ -1243,3 +1243,76 @@ fn get_rows(conn: &Arc<Connection>, query: &str) -> Vec<Vec<Value>> {
|
||||
}
|
||||
rows
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_concurrent_writes() {
|
||||
struct ConnectionState {
|
||||
conn: Arc<Connection>,
|
||||
inserts: Vec<i64>,
|
||||
current_statement: Option<Statement>,
|
||||
}
|
||||
let db = MvccTestDbNoConn::new_with_random_db();
|
||||
let mut connecitons = Vec::new();
|
||||
{
|
||||
let conn = db.connect();
|
||||
conn.execute("CREATE TABLE test (x)").unwrap();
|
||||
conn.close().unwrap();
|
||||
}
|
||||
for i in 0..2 {
|
||||
let conn = db.connect();
|
||||
let mut inserts = ((100 * i)..(100 * (i + 1))).collect::<Vec<_>>();
|
||||
inserts.reverse();
|
||||
connecitons.push(ConnectionState {
|
||||
conn,
|
||||
inserts,
|
||||
current_statement: None,
|
||||
});
|
||||
}
|
||||
|
||||
loop {
|
||||
let mut all_finished = true;
|
||||
for conn in &mut connecitons {
|
||||
if !conn.inserts.is_empty() && conn.current_statement.is_none() {
|
||||
all_finished = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (conn_id, conn) in connecitons.iter_mut().enumerate() {
|
||||
println!("connection {conn_id} inserts: {:?}", conn.inserts);
|
||||
if conn.current_statement.is_none() && !conn.inserts.is_empty() {
|
||||
let write = conn.inserts.pop().unwrap();
|
||||
println!("inserting row {write} from connection {conn_id}");
|
||||
conn.current_statement = Some(
|
||||
conn.conn
|
||||
.prepare(format!("INSERT INTO test (x) VALUES ({write})"))
|
||||
.unwrap(),
|
||||
);
|
||||
}
|
||||
if conn.current_statement.is_none() {
|
||||
continue;
|
||||
}
|
||||
let stmt = conn.current_statement.as_mut().unwrap();
|
||||
match stmt.step().unwrap() {
|
||||
// These you be only possible cases in write concurrency.
|
||||
// No rows because insert doesn't return
|
||||
// No interrupt because insert doesn't interrupt
|
||||
// No busy because insert in mvcc should be multi concurrent write
|
||||
StepResult::Done => {
|
||||
conn.current_statement = None;
|
||||
}
|
||||
StepResult::IO => {
|
||||
// let's skip doing I/O here, we want to perform io only after all the statements are stepped
|
||||
}
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
db.get_db().io.run_once().unwrap();
|
||||
|
||||
if all_finished {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -352,6 +352,16 @@ const VERTICAL_TAB: char = '\u{b}';
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct DoubleDouble(f64, f64);
|
||||
|
||||
impl DoubleDouble {
|
||||
pub const E100: Self = DoubleDouble(1.0e+100, -1.590_289_110_975_991_8e83);
|
||||
pub const E10: Self = DoubleDouble(1.0e+10, 0.0);
|
||||
pub const E1: Self = DoubleDouble(1.0e+01, 0.0);
|
||||
|
||||
pub const NEG_E100: Self = DoubleDouble(1.0e-100, -1.999_189_980_260_288_3e-117);
|
||||
pub const NEG_E10: Self = DoubleDouble(1.0e-10, -3.643_219_731_549_774e-27);
|
||||
pub const NEG_E1: Self = DoubleDouble(1.0e-01, -5.551_115_123_125_783e-18);
|
||||
}
|
||||
|
||||
impl From<u64> for DoubleDouble {
|
||||
fn from(value: u64) -> Self {
|
||||
let r = value as f64;
|
||||
@@ -371,6 +381,16 @@ impl From<u64> for DoubleDouble {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DoubleDouble> for u64 {
|
||||
fn from(value: DoubleDouble) -> Self {
|
||||
if value.1 < 0.0 {
|
||||
value.0 as u64 - value.1.abs() as u64
|
||||
} else {
|
||||
value.0 as u64 + value.1 as u64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DoubleDouble> for f64 {
|
||||
fn from(DoubleDouble(a, aa): DoubleDouble) -> Self {
|
||||
a + aa
|
||||
@@ -489,6 +509,10 @@ pub fn str_to_f64(input: impl AsRef<str>) -> Option<StrToF64> {
|
||||
}
|
||||
|
||||
if input.next_if(|ch| matches!(ch, '.')).is_some() {
|
||||
if matches!(input.peek(), Some('e' | 'E')) {
|
||||
return None;
|
||||
}
|
||||
|
||||
if had_digits || input.peek().is_some_and(char::is_ascii_digit) {
|
||||
is_fractional = true
|
||||
}
|
||||
@@ -539,28 +563,28 @@ pub fn str_to_f64(input: impl AsRef<str>) -> Option<StrToF64> {
|
||||
if exponent > 0 {
|
||||
while exponent >= 100 {
|
||||
exponent -= 100;
|
||||
result *= DoubleDouble(1.0e+100, -1.590_289_110_975_991_8e83);
|
||||
result *= DoubleDouble::E100;
|
||||
}
|
||||
while exponent >= 10 {
|
||||
exponent -= 10;
|
||||
result *= DoubleDouble(1.0e+10, 0.0);
|
||||
result *= DoubleDouble::E10;
|
||||
}
|
||||
while exponent >= 1 {
|
||||
exponent -= 1;
|
||||
result *= DoubleDouble(1.0e+01, 0.0);
|
||||
result *= DoubleDouble::E1;
|
||||
}
|
||||
} else {
|
||||
while exponent <= -100 {
|
||||
exponent += 100;
|
||||
result *= DoubleDouble(1.0e-100, -1.999_189_980_260_288_3e-117);
|
||||
result *= DoubleDouble::NEG_E100;
|
||||
}
|
||||
while exponent <= -10 {
|
||||
exponent += 10;
|
||||
result *= DoubleDouble(1.0e-10, -3.643_219_731_549_774e-27);
|
||||
result *= DoubleDouble::NEG_E10;
|
||||
}
|
||||
while exponent <= -1 {
|
||||
exponent += 1;
|
||||
result *= DoubleDouble(1.0e-01, -5.551_115_123_125_783e-18);
|
||||
result *= DoubleDouble::NEG_E1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -573,3 +597,130 @@ pub fn str_to_f64(input: impl AsRef<str>) -> Option<StrToF64> {
|
||||
StrToF64::Decimal(result)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn format_float(v: f64) -> String {
|
||||
if v.is_nan() {
|
||||
return "".to_string();
|
||||
}
|
||||
|
||||
if v.is_infinite() {
|
||||
return if v.is_sign_negative() { "-Inf" } else { "Inf" }.to_string();
|
||||
}
|
||||
|
||||
if v == 0.0 {
|
||||
return "0.0".to_string();
|
||||
}
|
||||
|
||||
let negative = v < 0.0;
|
||||
let mut d = DoubleDouble(v.abs(), 0.0);
|
||||
let mut exp = 0;
|
||||
|
||||
if d.0 > 9.223_372_036_854_775e18 {
|
||||
while d.0 > 9.223_372_036_854_774e118 {
|
||||
exp += 100;
|
||||
d *= DoubleDouble::NEG_E100;
|
||||
}
|
||||
while d.0 > 9.223_372_036_854_774e28 {
|
||||
exp += 10;
|
||||
d *= DoubleDouble::NEG_E10;
|
||||
}
|
||||
while d.0 > 9.223_372_036_854_775e18 {
|
||||
exp += 1;
|
||||
d *= DoubleDouble::NEG_E1;
|
||||
}
|
||||
} else {
|
||||
while d.0 < 9.223_372_036_854_775e-83 {
|
||||
exp -= 100;
|
||||
d *= DoubleDouble::E100;
|
||||
}
|
||||
while d.0 < 9.223_372_036_854_775e7 {
|
||||
exp -= 10;
|
||||
d *= DoubleDouble::E10;
|
||||
}
|
||||
while d.0 < 9.223_372_036_854_775e17 {
|
||||
exp -= 1;
|
||||
d *= DoubleDouble::E1;
|
||||
}
|
||||
}
|
||||
|
||||
let v = u64::from(d);
|
||||
|
||||
let mut digits = v.to_string().into_bytes();
|
||||
|
||||
let precision = 15;
|
||||
|
||||
let mut decimal_pos = digits.len() as i32 + exp;
|
||||
|
||||
'out: {
|
||||
if digits.len() > precision {
|
||||
let round_up = digits[precision] >= b'5';
|
||||
digits.truncate(precision);
|
||||
|
||||
if round_up {
|
||||
for i in (0..precision).rev() {
|
||||
if digits[i] < b'9' {
|
||||
digits[i] += 1;
|
||||
break 'out;
|
||||
}
|
||||
digits[i] = b'0';
|
||||
}
|
||||
|
||||
digits.insert(0, b'1');
|
||||
decimal_pos += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while digits.len() > 1 && digits[digits.len() - 1] == b'0' {
|
||||
digits.pop();
|
||||
}
|
||||
|
||||
let exp = decimal_pos - 1;
|
||||
|
||||
if (-4..=14).contains(&exp) {
|
||||
format!(
|
||||
"{}{}.{}{}",
|
||||
if negative { "-" } else { Default::default() },
|
||||
if decimal_pos > 0 {
|
||||
let zeroes = (decimal_pos - digits.len() as i32).max(0) as usize;
|
||||
let digits = digits
|
||||
.get(0..(decimal_pos.min(digits.len() as i32) as usize))
|
||||
.unwrap();
|
||||
(unsafe { str::from_utf8_unchecked(digits) }).to_owned() + &"0".repeat(zeroes)
|
||||
} else {
|
||||
"0".to_string()
|
||||
},
|
||||
"0".repeat(decimal_pos.min(0).unsigned_abs() as usize),
|
||||
digits
|
||||
.get((decimal_pos.max(0) as usize)..)
|
||||
.filter(|v| !v.is_empty())
|
||||
.map(|v| unsafe { str::from_utf8_unchecked(v) })
|
||||
.unwrap_or("0")
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"{}{}.{}e{}{:0width$}",
|
||||
if negative { "-" } else { "" },
|
||||
digits.first().cloned().unwrap_or(b'0') as char,
|
||||
digits
|
||||
.get(1..)
|
||||
.filter(|v| !v.is_empty())
|
||||
.map(|v| unsafe { str::from_utf8_unchecked(v) })
|
||||
.unwrap_or("0"),
|
||||
if exp.is_positive() { "+" } else { "-" },
|
||||
exp.abs(),
|
||||
width = if exp > 100 { 3 } else { 2 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_float() {
|
||||
assert_eq!(format_float(9.93e-322), "9.93071948140905e-322");
|
||||
assert_eq!(format_float(9.93), "9.93");
|
||||
assert_eq!(format_float(0.093), "0.093");
|
||||
assert_eq!(format_float(-0.093), "-0.093");
|
||||
assert_eq!(format_float(0.0), "0.0");
|
||||
assert_eq!(format_float(4.94e-322), "4.94065645841247e-322");
|
||||
assert_eq!(format_float(-20228007.0), "-20228007.0");
|
||||
}
|
||||
|
||||
216
core/schema.rs
216
core/schema.rs
@@ -1,8 +1,4 @@
|
||||
use crate::incremental::view::IncrementalView;
|
||||
use crate::types::IOResult;
|
||||
|
||||
/// Type alias for the materialized views collection
|
||||
pub type MaterializedViewsMap = HashMap<String, Arc<Mutex<IncrementalView>>>;
|
||||
|
||||
/// Simple view structure for non-materialized views
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -23,12 +19,12 @@ use crate::translate::plan::SelectPlan;
|
||||
use crate::util::{
|
||||
module_args_from_sql, module_name_from_sql, type_from_name, IOExt, UnparsedFromSqlIndex,
|
||||
};
|
||||
use crate::{return_if_io, LimboError, MvCursor, Pager, RefValue, SymbolTable, VirtualTable};
|
||||
use crate::{util::normalize_ident, Result};
|
||||
use crate::{LimboError, MvCursor, Pager, RefValue, SymbolTable, VirtualTable};
|
||||
use core::fmt;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::{BTreeSet, HashMap};
|
||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
use std::ops::Deref;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
@@ -42,11 +38,25 @@ use turso_parser::{
|
||||
|
||||
const SCHEMA_TABLE_NAME: &str = "sqlite_schema";
|
||||
const SCHEMA_TABLE_NAME_ALT: &str = "sqlite_master";
|
||||
pub const DBSP_TABLE_PREFIX: &str = "__turso_internal_dbsp_state_";
|
||||
|
||||
/// Check if a table name refers to a system table that should be protected from direct writes
|
||||
pub fn is_system_table(table_name: &str) -> bool {
|
||||
let normalized = table_name.to_lowercase();
|
||||
normalized == SCHEMA_TABLE_NAME || normalized == SCHEMA_TABLE_NAME_ALT
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Schema {
|
||||
pub tables: HashMap<String, Arc<Table>>,
|
||||
pub materialized_views: MaterializedViewsMap,
|
||||
|
||||
/// Track which tables are actually materialized views
|
||||
pub materialized_view_names: HashSet<String>,
|
||||
/// Store original SQL for materialized views (for .schema command)
|
||||
pub materialized_view_sql: HashMap<String, String>,
|
||||
/// The incremental view objects (DBSP circuits)
|
||||
pub incremental_views: HashMap<String, Arc<Mutex<IncrementalView>>>,
|
||||
|
||||
pub views: ViewsMap,
|
||||
|
||||
/// table_name to list of indexes for the table
|
||||
@@ -75,12 +85,16 @@ impl Schema {
|
||||
Arc::new(Table::Virtual(Arc::new((*function).clone()))),
|
||||
);
|
||||
}
|
||||
let materialized_views: MaterializedViewsMap = HashMap::new();
|
||||
let materialized_view_names = HashSet::new();
|
||||
let materialized_view_sql = HashMap::new();
|
||||
let incremental_views = HashMap::new();
|
||||
let views: ViewsMap = HashMap::new();
|
||||
let table_to_materialized_views: HashMap<String, Vec<String>> = HashMap::new();
|
||||
Self {
|
||||
tables,
|
||||
materialized_views,
|
||||
materialized_view_names,
|
||||
materialized_view_sql,
|
||||
incremental_views,
|
||||
views,
|
||||
indexes,
|
||||
has_indexes,
|
||||
@@ -96,41 +110,51 @@ impl Schema {
|
||||
.iter()
|
||||
.any(|idx| idx.1.iter().any(|i| i.name == name))
|
||||
}
|
||||
pub fn add_materialized_view(&mut self, view: IncrementalView) {
|
||||
pub fn add_materialized_view(&mut self, view: IncrementalView, table: Arc<Table>, sql: String) {
|
||||
let name = normalize_ident(view.name());
|
||||
self.materialized_views
|
||||
|
||||
// Add to tables (so it appears as a regular table)
|
||||
self.tables.insert(name.clone(), table);
|
||||
|
||||
// Track that this is a materialized view
|
||||
self.materialized_view_names.insert(name.clone());
|
||||
self.materialized_view_sql.insert(name.clone(), sql);
|
||||
|
||||
// Store the incremental view (DBSP circuit)
|
||||
self.incremental_views
|
||||
.insert(name, Arc::new(Mutex::new(view)));
|
||||
}
|
||||
|
||||
pub fn get_materialized_view(&self, name: &str) -> Option<Arc<Mutex<IncrementalView>>> {
|
||||
let name = normalize_ident(name);
|
||||
self.materialized_views.get(&name).cloned()
|
||||
self.incremental_views.get(&name).cloned()
|
||||
}
|
||||
|
||||
pub fn is_materialized_view(&self, name: &str) -> bool {
|
||||
let name = normalize_ident(name);
|
||||
self.materialized_view_names.contains(&name)
|
||||
}
|
||||
|
||||
pub fn remove_view(&mut self, name: &str) -> Result<()> {
|
||||
let name = normalize_ident(name);
|
||||
|
||||
// Check if we have both a regular view and a materialized view with the same name
|
||||
// It should be impossible to have both
|
||||
let has_regular_view = self.views.contains_key(&name);
|
||||
let has_materialized_view = self.materialized_views.contains_key(&name);
|
||||
|
||||
assert!(
|
||||
!(has_regular_view && has_materialized_view),
|
||||
"Found both regular view and materialized view with name: {name}"
|
||||
);
|
||||
|
||||
if has_regular_view {
|
||||
if self.views.contains_key(&name) {
|
||||
self.views.remove(&name);
|
||||
Ok(())
|
||||
} else if has_materialized_view {
|
||||
} else if self.materialized_view_names.contains(&name) {
|
||||
// Remove from tables
|
||||
self.tables.remove(&name);
|
||||
|
||||
// Remove from materialized view tracking
|
||||
self.materialized_view_names.remove(&name);
|
||||
self.materialized_view_sql.remove(&name);
|
||||
self.incremental_views.remove(&name);
|
||||
|
||||
// Remove from table_to_materialized_views dependencies
|
||||
for views in self.table_to_materialized_views.values_mut() {
|
||||
views.retain(|v| v != &name);
|
||||
}
|
||||
|
||||
// Remove the materialized view itself
|
||||
self.materialized_views.remove(&name);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(crate::LimboError::ParseError(format!(
|
||||
@@ -159,30 +183,6 @@ impl Schema {
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Get all materialized views that depend on a given table, skip normalizing ident.
|
||||
/// We are basically assuming we already normalized the ident.
|
||||
pub fn get_dependent_materialized_views_unnormalized(
|
||||
&self,
|
||||
table_name: &str,
|
||||
) -> Option<&Vec<String>> {
|
||||
self.table_to_materialized_views.get(table_name)
|
||||
}
|
||||
|
||||
/// Populate all materialized views by scanning their source tables
|
||||
/// Returns IOResult to support async execution
|
||||
pub fn populate_materialized_views(
|
||||
&self,
|
||||
conn: &Arc<crate::Connection>,
|
||||
) -> Result<IOResult<()>> {
|
||||
for view in self.materialized_views.values() {
|
||||
let mut view = view
|
||||
.lock()
|
||||
.map_err(|_| LimboError::InternalError("Failed to lock view".to_string()))?;
|
||||
return_if_io!(view.populate_from_table(conn));
|
||||
}
|
||||
Ok(IOResult::Done(()))
|
||||
}
|
||||
|
||||
/// Add a regular (non-materialized) view
|
||||
pub fn add_view(&mut self, view: View) {
|
||||
let name = normalize_ident(&view.name);
|
||||
@@ -218,6 +218,12 @@ impl Schema {
|
||||
pub fn remove_table(&mut self, table_name: &str) {
|
||||
let name = normalize_ident(table_name);
|
||||
self.tables.remove(&name);
|
||||
|
||||
// If this was a materialized view, also clean up the metadata
|
||||
if self.materialized_view_names.remove(&name) {
|
||||
self.incremental_views.remove(&name);
|
||||
self.materialized_view_sql.remove(&name);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_btree_table(&self, name: &str) -> Option<Arc<BTreeTable>> {
|
||||
@@ -291,8 +297,10 @@ impl Schema {
|
||||
let mut automatic_indices: HashMap<String, Vec<(String, usize)>> =
|
||||
HashMap::with_capacity(10);
|
||||
|
||||
// Collect materialized views for second pass to populate table_to_materialized_views mapping
|
||||
let mut materialized_views_to_process: Vec<(String, Vec<String>)> = Vec::new();
|
||||
// Store DBSP state table root pages: view_name -> dbsp_state_root_page
|
||||
let mut dbsp_state_roots: HashMap<String, usize> = HashMap::new();
|
||||
// Store materialized view info (SQL and root page) for later creation
|
||||
let mut materialized_view_info: HashMap<String, (String, usize)> = HashMap::new();
|
||||
|
||||
if matches!(pager.begin_read_tx()?, LimboResult::Busy) {
|
||||
return Err(LimboError::Busy);
|
||||
@@ -351,6 +359,18 @@ impl Schema {
|
||||
}
|
||||
|
||||
let table = BTreeTable::from_sql(sql, root_page as usize)?;
|
||||
|
||||
// Check if this is a DBSP state table
|
||||
if table.name.starts_with(DBSP_TABLE_PREFIX) {
|
||||
// Extract the view name from _dbsp_state_<viewname>
|
||||
let view_name = table
|
||||
.name
|
||||
.strip_prefix(DBSP_TABLE_PREFIX)
|
||||
.unwrap()
|
||||
.to_string();
|
||||
dbsp_state_roots.insert(view_name, root_page as usize);
|
||||
}
|
||||
|
||||
self.add_btree_table(Arc::new(table));
|
||||
}
|
||||
"index" => {
|
||||
@@ -412,6 +432,14 @@ impl Schema {
|
||||
};
|
||||
let name = name_text.as_str();
|
||||
|
||||
// Get the root page (column 3) to determine if this is a materialized view
|
||||
// Regular views have rootpage = 0, materialized views have rootpage != 0
|
||||
let root_page_value = record_cursor.get_value(&row, 3)?;
|
||||
let RefValue::Integer(root_page_int) = root_page_value else {
|
||||
return Err(LimboError::ConversionError("Expected integer value".into()));
|
||||
};
|
||||
let root_page = root_page_int as usize;
|
||||
|
||||
let sql_value = record_cursor.get_value(&row, 4)?;
|
||||
let RefValue::Text(sql_text) = sql_value else {
|
||||
return Err(LimboError::ConversionError("Expected text value".into()));
|
||||
@@ -423,15 +451,12 @@ impl Schema {
|
||||
if let Ok(Some(Cmd::Stmt(stmt))) = parser.next_cmd() {
|
||||
match stmt {
|
||||
Stmt::CreateMaterializedView { .. } => {
|
||||
// Create IncrementalView for materialized views
|
||||
if let Ok(incremental_view) = IncrementalView::from_sql(sql, self) {
|
||||
let referenced_tables =
|
||||
incremental_view.get_referenced_table_names();
|
||||
let view_name = name.to_string();
|
||||
self.add_materialized_view(incremental_view);
|
||||
materialized_views_to_process
|
||||
.push((view_name, referenced_tables));
|
||||
}
|
||||
// Store materialized view info for later creation
|
||||
// We'll create the actual IncrementalView in a later pass
|
||||
// when we have both the main root page and DBSP state root
|
||||
let view_name = name.to_string();
|
||||
materialized_view_info
|
||||
.insert(view_name, (sql.to_string(), root_page));
|
||||
}
|
||||
Stmt::CreateView {
|
||||
view_name: _,
|
||||
@@ -475,14 +500,6 @@ impl Schema {
|
||||
|
||||
pager.end_read_tx()?;
|
||||
|
||||
// Second pass: populate table_to_materialized_views mapping
|
||||
for (view_name, referenced_tables) in materialized_views_to_process {
|
||||
// Register this view as dependent on each referenced table
|
||||
for table_name in referenced_tables {
|
||||
self.add_materialized_view_dependency(&table_name, &view_name);
|
||||
}
|
||||
}
|
||||
|
||||
for unparsed_sql_from_index in from_sql_indexes {
|
||||
if !self.indexes_enabled() {
|
||||
self.table_set_has_index(&unparsed_sql_from_index.table_name);
|
||||
@@ -514,6 +531,39 @@ impl Schema {
|
||||
}
|
||||
}
|
||||
|
||||
// Third pass: Create materialized views now that we have both root pages
|
||||
for (view_name, (sql, main_root)) in materialized_view_info {
|
||||
// Look up the DBSP state root for this view - must exist for materialized views
|
||||
let dbsp_state_root = dbsp_state_roots.get(&view_name).ok_or_else(|| {
|
||||
LimboError::InternalError(format!(
|
||||
"Materialized view {view_name} is missing its DBSP state table"
|
||||
))
|
||||
})?;
|
||||
|
||||
// Create the IncrementalView with both root pages
|
||||
let incremental_view =
|
||||
IncrementalView::from_sql(&sql, self, main_root, *dbsp_state_root)?;
|
||||
let referenced_tables = incremental_view.get_referenced_table_names();
|
||||
|
||||
// Create a BTreeTable for the materialized view
|
||||
let table = Arc::new(Table::BTree(Arc::new(BTreeTable {
|
||||
name: view_name.clone(),
|
||||
root_page: main_root,
|
||||
columns: incremental_view.columns.clone(),
|
||||
primary_key_columns: Vec::new(),
|
||||
has_rowid: true,
|
||||
is_strict: false,
|
||||
unique_sets: None,
|
||||
})));
|
||||
|
||||
self.add_materialized_view(incremental_view, table, sql);
|
||||
|
||||
// Register dependencies
|
||||
for table_name in referenced_tables {
|
||||
self.add_materialized_view_dependency(&table_name, &view_name);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -559,15 +609,19 @@ impl Clone for Schema {
|
||||
(name.clone(), indexes)
|
||||
})
|
||||
.collect();
|
||||
let materialized_views = self
|
||||
.materialized_views
|
||||
let materialized_view_names = self.materialized_view_names.clone();
|
||||
let materialized_view_sql = self.materialized_view_sql.clone();
|
||||
let incremental_views = self
|
||||
.incremental_views
|
||||
.iter()
|
||||
.map(|(name, view)| (name.clone(), view.clone()))
|
||||
.collect();
|
||||
let views = self.views.clone();
|
||||
Self {
|
||||
tables,
|
||||
materialized_views,
|
||||
materialized_view_names,
|
||||
materialized_view_sql,
|
||||
incremental_views,
|
||||
views,
|
||||
indexes,
|
||||
has_indexes: self.has_indexes.clone(),
|
||||
@@ -1268,16 +1322,14 @@ impl Affinity {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_char(char: char) -> Result<Self> {
|
||||
pub fn from_char(char: char) -> Self {
|
||||
match char {
|
||||
SQLITE_AFF_INTEGER => Ok(Affinity::Integer),
|
||||
SQLITE_AFF_TEXT => Ok(Affinity::Text),
|
||||
SQLITE_AFF_NONE => Ok(Affinity::Blob),
|
||||
SQLITE_AFF_REAL => Ok(Affinity::Real),
|
||||
SQLITE_AFF_NUMERIC => Ok(Affinity::Numeric),
|
||||
_ => Err(LimboError::InternalError(format!(
|
||||
"Invalid affinity character: {char}"
|
||||
))),
|
||||
SQLITE_AFF_INTEGER => Affinity::Integer,
|
||||
SQLITE_AFF_TEXT => Affinity::Text,
|
||||
SQLITE_AFF_NONE => Affinity::Blob,
|
||||
SQLITE_AFF_REAL => Affinity::Real,
|
||||
SQLITE_AFF_NUMERIC => Affinity::Numeric,
|
||||
_ => Affinity::Blob,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1285,7 +1337,7 @@ impl Affinity {
|
||||
self.aff_mask() as u8
|
||||
}
|
||||
|
||||
pub fn from_char_code(code: u8) -> Result<Self, LimboError> {
|
||||
pub fn from_char_code(code: u8) -> Self {
|
||||
Self::from_char(code as char)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ pub trait StateTransition {
|
||||
fn is_finalized(&self) -> bool;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StateMachine<State: StateTransition> {
|
||||
state: State,
|
||||
is_finalized: bool,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -25,7 +25,7 @@ use std::sync::{Arc, Mutex};
|
||||
use tracing::{instrument, trace, Level};
|
||||
|
||||
use super::btree::btree_init_page;
|
||||
use super::page_cache::{CacheError, CacheResizeResult, DumbLruPageCache, PageCacheKey};
|
||||
use super::page_cache::{CacheError, CacheResizeResult, PageCache, PageCacheKey};
|
||||
use super::sqlite3_ondisk::begin_write_btree_page;
|
||||
use super::wal::CheckpointMode;
|
||||
use crate::storage::encryption::{CipherMode, EncryptionContext, EncryptionKey};
|
||||
@@ -129,7 +129,7 @@ pub struct PageInner {
|
||||
/// requests unpinning via [Page::unpin], the pin count will still be >0 if the outer
|
||||
/// code path has not yet requested to unpin the page as well.
|
||||
///
|
||||
/// Note that [DumbLruPageCache::clear] evicts the pages even if pinned, so as long as
|
||||
/// Note that [PageCache::clear] evicts the pages even if pinned, so as long as
|
||||
/// we clear the page cache on errors, pins will not 'leak'.
|
||||
pub pin_count: AtomicUsize,
|
||||
/// The WAL frame number this page was loaded from (0 if loaded from main DB file)
|
||||
@@ -464,7 +464,7 @@ pub struct Pager {
|
||||
/// in-memory databases, ephemeral tables and ephemeral indexes do not have a WAL.
|
||||
pub(crate) wal: Option<Rc<RefCell<dyn Wal>>>,
|
||||
/// A page cache for the database.
|
||||
page_cache: Arc<RwLock<DumbLruPageCache>>,
|
||||
page_cache: Arc<RwLock<PageCache>>,
|
||||
/// Buffer pool for temporary data storage.
|
||||
pub buffer_pool: Arc<BufferPool>,
|
||||
/// I/O interface for input/output operations.
|
||||
@@ -564,7 +564,7 @@ impl Pager {
|
||||
db_file: Arc<dyn DatabaseStorage>,
|
||||
wal: Option<Rc<RefCell<dyn Wal>>>,
|
||||
io: Arc<dyn crate::io::IO>,
|
||||
page_cache: Arc<RwLock<DumbLruPageCache>>,
|
||||
page_cache: Arc<RwLock<PageCache>>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
db_state: Arc<AtomicDbState>,
|
||||
init_lock: Arc<Mutex<()>>,
|
||||
@@ -1012,9 +1012,10 @@ impl Pager {
|
||||
// Give a chance for the allocation to happen elsewhere
|
||||
_ => {}
|
||||
}
|
||||
} else {
|
||||
// Give a chance for the allocation to happen elsewhere
|
||||
io_yield_one!(Completion::new_dummy());
|
||||
}
|
||||
// Give a chance for the allocation to happen elsewhere
|
||||
io_yield_one!(Completion::new_dummy());
|
||||
}
|
||||
Ok(IOResult::Done(()))
|
||||
}
|
||||
@@ -1125,9 +1126,19 @@ impl Pager {
|
||||
let page_key = PageCacheKey::new(page_idx);
|
||||
if let Some(page) = page_cache.get(&page_key)? {
|
||||
tracing::trace!("read_page(page_idx = {}) = cached", page_idx);
|
||||
turso_assert!(
|
||||
page_idx == page.get().id,
|
||||
"attempted to read page {page_idx} but got page {}",
|
||||
page.get().id
|
||||
);
|
||||
return Ok((page.clone(), None));
|
||||
}
|
||||
let (page, c) = self.read_page_no_cache(page_idx, None, false)?;
|
||||
turso_assert!(
|
||||
page_idx == page.get().id,
|
||||
"attempted to read page {page_idx} but got page {}",
|
||||
page.get().id
|
||||
);
|
||||
self.cache_insert(page_idx, page.clone(), &mut page_cache)?;
|
||||
Ok((page, Some(c)))
|
||||
}
|
||||
@@ -1153,7 +1164,7 @@ impl Pager {
|
||||
&self,
|
||||
page_idx: usize,
|
||||
page: PageRef,
|
||||
page_cache: &mut DumbLruPageCache,
|
||||
page_cache: &mut PageCache,
|
||||
) -> Result<()> {
|
||||
let page_key = PageCacheKey::new(page_idx);
|
||||
match page_cache.insert(page_key, page.clone()) {
|
||||
@@ -1171,7 +1182,7 @@ impl Pager {
|
||||
tracing::trace!("read_page(page_idx = {})", page_idx);
|
||||
let mut page_cache = self.page_cache.write();
|
||||
let page_key = PageCacheKey::new(page_idx);
|
||||
Ok(page_cache.get(&page_key)?)
|
||||
page_cache.get(&page_key)
|
||||
}
|
||||
|
||||
/// Get a page from cache only if it matches the target frame
|
||||
@@ -1981,7 +1992,7 @@ impl Pager {
|
||||
trunk_page.get_contents().as_ptr().fill(0);
|
||||
let page_key = PageCacheKey::new(trunk_page.get().id);
|
||||
{
|
||||
let mut page_cache = self.page_cache.write();
|
||||
let page_cache = self.page_cache.read();
|
||||
turso_assert!(
|
||||
page_cache.contains_key(&page_key),
|
||||
"page {} is not in cache",
|
||||
@@ -2013,7 +2024,7 @@ impl Pager {
|
||||
leaf_page.get_contents().as_ptr().fill(0);
|
||||
let page_key = PageCacheKey::new(leaf_page.get().id);
|
||||
{
|
||||
let mut page_cache = self.page_cache.write();
|
||||
let page_cache = self.page_cache.read();
|
||||
turso_assert!(
|
||||
page_cache.contains_key(&page_key),
|
||||
"page {} is not in cache",
|
||||
@@ -2090,13 +2101,11 @@ impl Pager {
|
||||
|
||||
// FIXME: use specific page key for writer instead of max frame, this will make readers not conflict
|
||||
assert!(page.is_dirty());
|
||||
cache
|
||||
.insert_ignore_existing(page_key, page.clone())
|
||||
.map_err(|e| {
|
||||
LimboError::InternalError(format!(
|
||||
"Failed to insert loaded page {id} into cache: {e:?}"
|
||||
))
|
||||
})?;
|
||||
cache.upsert_page(page_key, page.clone()).map_err(|e| {
|
||||
LimboError::InternalError(format!(
|
||||
"Failed to insert loaded page {id} into cache: {e:?}"
|
||||
))
|
||||
})?;
|
||||
page.set_loaded();
|
||||
Ok(())
|
||||
}
|
||||
@@ -2165,16 +2174,27 @@ impl Pager {
|
||||
Ok(IOResult::Done(f(header)))
|
||||
}
|
||||
|
||||
pub fn set_encryption_context(&self, cipher_mode: CipherMode, key: &EncryptionKey) {
|
||||
pub fn is_encryption_ctx_set(&self) -> bool {
|
||||
self.io_ctx.borrow_mut().encryption_context().is_some()
|
||||
}
|
||||
|
||||
pub fn set_encryption_context(
|
||||
&self,
|
||||
cipher_mode: CipherMode,
|
||||
key: &EncryptionKey,
|
||||
) -> Result<()> {
|
||||
let page_size = self.page_size.get().unwrap().get() as usize;
|
||||
let encryption_ctx = EncryptionContext::new(cipher_mode, key, page_size).unwrap();
|
||||
let encryption_ctx = EncryptionContext::new(cipher_mode, key, page_size)?;
|
||||
{
|
||||
let mut io_ctx = self.io_ctx.borrow_mut();
|
||||
io_ctx.set_encryption(encryption_ctx);
|
||||
}
|
||||
let Some(wal) = self.wal.as_ref() else { return };
|
||||
let Some(wal) = self.wal.as_ref() else {
|
||||
return Ok(());
|
||||
};
|
||||
wal.borrow_mut()
|
||||
.set_io_context(self.io_ctx.borrow().clone())
|
||||
.set_io_context(self.io_ctx.borrow().clone());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2395,14 +2415,14 @@ mod tests {
|
||||
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use crate::storage::page_cache::{DumbLruPageCache, PageCacheKey};
|
||||
use crate::storage::page_cache::{PageCache, PageCacheKey};
|
||||
|
||||
use super::Page;
|
||||
|
||||
#[test]
|
||||
fn test_shared_cache() {
|
||||
// ensure cache can be shared between threads
|
||||
let cache = Arc::new(RwLock::new(DumbLruPageCache::new(10)));
|
||||
let cache = Arc::new(RwLock::new(PageCache::new(10)));
|
||||
|
||||
let thread = {
|
||||
let cache = cache.clone();
|
||||
@@ -2435,7 +2455,7 @@ mod ptrmap_tests {
|
||||
use crate::io::{MemoryIO, OpenFlags, IO};
|
||||
use crate::storage::buffer_pool::BufferPool;
|
||||
use crate::storage::database::{DatabaseFile, DatabaseStorage};
|
||||
use crate::storage::page_cache::DumbLruPageCache;
|
||||
use crate::storage::page_cache::PageCache;
|
||||
use crate::storage::pager::Pager;
|
||||
use crate::storage::sqlite3_ondisk::PageSize;
|
||||
use crate::storage::wal::{WalFile, WalFileShared};
|
||||
@@ -2464,7 +2484,7 @@ mod ptrmap_tests {
|
||||
let pages = initial_db_pages + 10;
|
||||
let sz = std::cmp::max(std::cmp::min(pages, 64), pages);
|
||||
let buffer_pool = BufferPool::begin_init(&io, (sz * page_size) as usize);
|
||||
let page_cache = Arc::new(RwLock::new(DumbLruPageCache::new(sz as usize)));
|
||||
let page_cache = Arc::new(RwLock::new(PageCache::new(sz as usize)));
|
||||
|
||||
let wal = Rc::new(RefCell::new(WalFile::new(
|
||||
io.clone(),
|
||||
|
||||
@@ -65,7 +65,8 @@ use crate::types::{RawSlice, RefValue, SerialType, SerialTypeKind, TextRef, Text
|
||||
use crate::{
|
||||
bail_corrupt_error, turso_assert, CompletionError, File, IOContext, Result, WalFileShared,
|
||||
};
|
||||
use std::cell::{Cell, UnsafeCell};
|
||||
use parking_lot::RwLock;
|
||||
use std::cell::Cell;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::mem::MaybeUninit;
|
||||
use std::pin::Pin;
|
||||
@@ -995,17 +996,14 @@ pub fn write_pages_vectored(
|
||||
pager: &Pager,
|
||||
batch: BTreeMap<usize, Arc<Buffer>>,
|
||||
done_flag: Arc<AtomicBool>,
|
||||
final_write: bool,
|
||||
) -> Result<Vec<Completion>> {
|
||||
if batch.is_empty() {
|
||||
done_flag.store(true, Ordering::Relaxed);
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
// batch item array is already sorted by id, so we just need to find contiguous ranges of page_id's
|
||||
// to submit as `writev`/write_pages calls.
|
||||
|
||||
let page_sz = pager.page_size.get().expect("page size is not set").get() as usize;
|
||||
|
||||
// Count expected number of runs to create the atomic counter we need to track each batch
|
||||
let mut run_count = 0;
|
||||
let mut prev_id = None;
|
||||
@@ -1023,26 +1021,21 @@ pub fn write_pages_vectored(
|
||||
// Create the atomic counters
|
||||
let runs_left = Arc::new(AtomicUsize::new(run_count));
|
||||
let done = done_flag.clone();
|
||||
// we know how many runs, but we don't know how many buffers per run, so we can only give an
|
||||
// estimate of the capacity
|
||||
const EST_BUFF_CAPACITY: usize = 32;
|
||||
|
||||
// Iterate through the batch, submitting each run as soon as it ends
|
||||
// We can reuse this across runs without reallocating
|
||||
let mut run_bufs = Vec::with_capacity(EST_BUFF_CAPACITY);
|
||||
let mut run_start_id: Option<usize> = None;
|
||||
|
||||
// Iterate through the batch
|
||||
// Track which run we're on to identify the last one
|
||||
let mut current_run = 0;
|
||||
let mut iter = batch.iter().peekable();
|
||||
|
||||
let mut completions = Vec::new();
|
||||
|
||||
while let Some((id, item)) = iter.next() {
|
||||
// Track the start of the run
|
||||
if run_start_id.is_none() {
|
||||
run_start_id = Some(*id);
|
||||
}
|
||||
|
||||
// Add this page to the current run
|
||||
run_bufs.push(item.clone());
|
||||
|
||||
// Check if this is the end of a run
|
||||
@@ -1052,24 +1045,32 @@ pub fn write_pages_vectored(
|
||||
};
|
||||
|
||||
if is_end_of_run {
|
||||
current_run += 1;
|
||||
let start_id = run_start_id.expect("should have a start id");
|
||||
let runs_left_cl = runs_left.clone();
|
||||
let done_cl = done.clone();
|
||||
|
||||
// This is the last chunk if it's the last run AND final_write is true
|
||||
let is_last_chunk = current_run == run_count && final_write;
|
||||
|
||||
let total_sz = (page_sz * run_bufs.len()) as i32;
|
||||
let c = Completion::new_write(move |res| {
|
||||
let cmp = move |res| {
|
||||
let Ok(res) = res else {
|
||||
return;
|
||||
};
|
||||
// writev calls can sometimes return partial writes, but our `pwritev`
|
||||
// implementation aggregates any partial writes and calls completion with total
|
||||
turso_assert!(total_sz == res, "failed to write expected size");
|
||||
if runs_left_cl.fetch_sub(1, Ordering::AcqRel) == 1 {
|
||||
done_cl.store(true, Ordering::Release);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Submit write operation for this run, decrementing the counter if we error
|
||||
let c = if is_last_chunk {
|
||||
Completion::new_write_linked(cmp)
|
||||
} else {
|
||||
Completion::new_write(cmp)
|
||||
};
|
||||
|
||||
// Submit write operation for this run
|
||||
let io_ctx = &pager.io_ctx.borrow();
|
||||
match pager.db_file.write_pages(
|
||||
start_id,
|
||||
@@ -1538,8 +1539,22 @@ pub fn read_varint(buf: &[u8]) -> Result<(u64, usize)> {
|
||||
}
|
||||
}
|
||||
}
|
||||
v = (v << 8) + buf[8] as u64;
|
||||
Ok((v, 9))
|
||||
match buf.get(8) {
|
||||
Some(&c) => {
|
||||
// Values requiring 9 bytes must have non-zero in the top 8 bits (value >= 1<<56).
|
||||
// Since the final value is `(v<<8) + c`, the top 8 bits (v >> 48) must not be 0.
|
||||
// If those are zero, this should be treated as corrupt.
|
||||
// Perf? the comparison + branching happens only in parsing 9-byte varint which is rare.
|
||||
if (v >> 48) == 0 {
|
||||
bail_corrupt_error!("Invalid varint");
|
||||
}
|
||||
v = (v << 8) + c as u64;
|
||||
Ok((v, 9))
|
||||
}
|
||||
None => {
|
||||
bail_corrupt_error!("Invalid varint");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn varint_len(value: u64) -> usize {
|
||||
@@ -1608,7 +1623,7 @@ pub fn write_varint_to_vec(value: u64, payload: &mut Vec<u8>) {
|
||||
}
|
||||
|
||||
/// We need to read the WAL file on open to reconstruct the WAL frame cache.
|
||||
pub fn read_entire_wal_dumb(file: &Arc<dyn File>) -> Result<Arc<UnsafeCell<WalFileShared>>> {
|
||||
pub fn read_entire_wal_dumb(file: &Arc<dyn File>) -> Result<Arc<RwLock<WalFileShared>>> {
|
||||
let size = file.size()?;
|
||||
#[allow(clippy::arc_with_non_send_sync)]
|
||||
let buf_for_pread = Arc::new(Buffer::new_temporary(size as usize));
|
||||
@@ -1620,14 +1635,15 @@ pub fn read_entire_wal_dumb(file: &Arc<dyn File>) -> Result<Arc<UnsafeCell<WalFi
|
||||
l.unlock();
|
||||
}
|
||||
#[allow(clippy::arc_with_non_send_sync)]
|
||||
let wal_file_shared_ret = Arc::new(UnsafeCell::new(WalFileShared {
|
||||
let wal_file_shared_ret = Arc::new(RwLock::new(WalFileShared {
|
||||
enabled: AtomicBool::new(true),
|
||||
wal_header: header.clone(),
|
||||
min_frame: AtomicU64::new(0),
|
||||
max_frame: AtomicU64::new(0),
|
||||
nbackfills: AtomicU64::new(0),
|
||||
frame_cache: Arc::new(SpinLock::new(HashMap::new())),
|
||||
last_checksum: (0, 0),
|
||||
file: file.clone(),
|
||||
file: Some(file.clone()),
|
||||
read_locks,
|
||||
write_lock: TursoRwLock::new(),
|
||||
loaded: AtomicBool::new(false),
|
||||
@@ -1708,7 +1724,7 @@ pub fn read_entire_wal_dumb(file: &Arc<dyn File>) -> Result<Arc<UnsafeCell<WalFi
|
||||
let mut current_offset = WAL_HEADER_SIZE;
|
||||
let mut frame_idx = 1_u64;
|
||||
|
||||
let wfs_data = unsafe { &mut *wal_file_shared_for_completion.get() };
|
||||
let mut wfs_data = wal_file_shared_for_completion.write();
|
||||
|
||||
if !checksum_header_failed {
|
||||
while current_offset + WAL_FRAME_HEADER_SIZE + page_size <= buf_slice.len() {
|
||||
@@ -2208,4 +2224,14 @@ mod tests {
|
||||
|
||||
assert_eq!(small_vec.get(8), None);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case(&[])] // empty buffer
|
||||
#[case(&[0x80])] // truncated 1-byte with continuation
|
||||
#[case(&[0x80, 0x80])] // truncated 2-byte
|
||||
#[case(&[0x81, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80])] // 9-byte truncated to 8
|
||||
#[case(&[0x80; 9])] // bits set without end
|
||||
fn test_read_varint_malformed_inputs(#[case] buf: &[u8]) {
|
||||
assert!(read_varint(buf).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,12 @@
|
||||
|
||||
use std::array;
|
||||
use std::borrow::Cow;
|
||||
use std::cell::{RefCell, UnsafeCell};
|
||||
use std::cell::RefCell;
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use strum::EnumString;
|
||||
use tracing::{instrument, Level};
|
||||
|
||||
use parking_lot::RwLock;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
use std::{cell::Cell, fmt, rc::Rc, sync::Arc};
|
||||
@@ -465,6 +466,13 @@ impl OngoingCheckpoint {
|
||||
self.state = CheckpointState::Start;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_final_write(&self) -> bool {
|
||||
self.current_page as usize >= self.pages_to_checkpoint.len()
|
||||
&& self.inflight_reads.is_empty()
|
||||
&& !self.pending_writes.is_empty()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Whether or not new reads should be issued during checkpoint processing.
|
||||
fn should_issue_reads(&self) -> bool {
|
||||
@@ -552,7 +560,7 @@ pub struct WalFile {
|
||||
|
||||
syncing: Rc<Cell<bool>>,
|
||||
|
||||
shared: Arc<UnsafeCell<WalFileShared>>,
|
||||
shared: Arc<RwLock<WalFileShared>>,
|
||||
ongoing_checkpoint: OngoingCheckpoint,
|
||||
checkpoint_threshold: usize,
|
||||
// min and max frames for this connection
|
||||
@@ -654,6 +662,7 @@ impl fmt::Debug for WalFile {
|
||||
/// that needs to be communicated between threads so this struct does the job.
|
||||
#[allow(dead_code)]
|
||||
pub struct WalFileShared {
|
||||
pub enabled: AtomicBool,
|
||||
pub wal_header: Arc<SpinLock<WalHeader>>,
|
||||
pub min_frame: AtomicU64,
|
||||
pub max_frame: AtomicU64,
|
||||
@@ -666,7 +675,7 @@ pub struct WalFileShared {
|
||||
// TODO: this will need refactoring because this is incredible memory inefficient.
|
||||
pub frame_cache: Arc<SpinLock<HashMap<u64, Vec<u64>>>>,
|
||||
pub last_checksum: (u32, u32), // Check of last frame in WAL, this is a cumulative checksum over all frames in the WAL
|
||||
pub file: Arc<dyn File>,
|
||||
pub file: Option<Arc<dyn File>>,
|
||||
|
||||
/// Read locks advertise the maximum WAL frame a reader may access.
|
||||
/// Slot 0 is special, when it is held (shared) the reader bypasses the WAL and uses the main DB file.
|
||||
@@ -688,6 +697,7 @@ pub struct WalFileShared {
|
||||
impl fmt::Debug for WalFileShared {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("WalFileShared")
|
||||
.field("enabled", &self.enabled.load(Ordering::Relaxed))
|
||||
.field("wal_header", &self.wal_header)
|
||||
.field("min_frame", &self.min_frame)
|
||||
.field("max_frame", &self.max_frame)
|
||||
@@ -704,8 +714,8 @@ impl fmt::Debug for WalFileShared {
|
||||
/// the case of errors. It is held by the WalFile while checkpoint is ongoing
|
||||
/// then transferred to the CheckpointResult if necessary.
|
||||
enum CheckpointLocks {
|
||||
Writer { ptr: Arc<UnsafeCell<WalFileShared>> },
|
||||
Read0 { ptr: Arc<UnsafeCell<WalFileShared>> },
|
||||
Writer { ptr: Arc<RwLock<WalFileShared>> },
|
||||
Read0 { ptr: Arc<RwLock<WalFileShared>> },
|
||||
}
|
||||
|
||||
/// Database checkpointers takes the following locks, in order:
|
||||
@@ -716,62 +726,55 @@ enum CheckpointLocks {
|
||||
/// Exclusive lock on read-mark slots 1-N again. These are immediately released after being taken (RESTART and TRUNCATE only).
|
||||
/// All of the above use blocking locks.
|
||||
impl CheckpointLocks {
|
||||
fn new(ptr: Arc<UnsafeCell<WalFileShared>>, mode: CheckpointMode) -> Result<Self> {
|
||||
let shared = &mut unsafe { &mut *ptr.get() };
|
||||
if !shared.checkpoint_lock.write() {
|
||||
tracing::trace!("CheckpointGuard::new: checkpoint lock failed, returning Busy");
|
||||
// we hold the exclusive checkpoint lock no matter which mode for the duration
|
||||
return Err(LimboError::Busy);
|
||||
fn new(ptr: Arc<RwLock<WalFileShared>>, mode: CheckpointMode) -> Result<Self> {
|
||||
let ptr_clone = ptr.clone();
|
||||
{
|
||||
let shared = ptr.write();
|
||||
if !shared.checkpoint_lock.write() {
|
||||
tracing::trace!("CheckpointGuard::new: checkpoint lock failed, returning Busy");
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
match mode {
|
||||
CheckpointMode::Passive { .. } => {
|
||||
if !shared.read_locks[0].write() {
|
||||
shared.checkpoint_lock.unlock();
|
||||
tracing::trace!("CheckpointGuard: read0 lock failed, returning Busy");
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
}
|
||||
CheckpointMode::Full => {
|
||||
if !shared.read_locks[0].write() {
|
||||
shared.checkpoint_lock.unlock();
|
||||
tracing::trace!("CheckpointGuard: read0 lock failed (Full), Busy");
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
if !shared.write_lock.write() {
|
||||
shared.read_locks[0].unlock();
|
||||
shared.checkpoint_lock.unlock();
|
||||
tracing::trace!("CheckpointGuard: write lock failed (Full), Busy");
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
}
|
||||
CheckpointMode::Restart | CheckpointMode::Truncate { .. } => {
|
||||
if !shared.read_locks[0].write() {
|
||||
shared.checkpoint_lock.unlock();
|
||||
tracing::trace!("CheckpointGuard: read0 lock failed, returning Busy");
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
if !shared.write_lock.write() {
|
||||
shared.checkpoint_lock.unlock();
|
||||
shared.read_locks[0].unlock();
|
||||
tracing::trace!("CheckpointGuard: write lock failed, returning Busy");
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match mode {
|
||||
// Passive mode is the only mode not requiring a write lock, as it doesn't block
|
||||
// readers or writers. It acquires the checkpoint lock to ensure that no other
|
||||
// concurrent checkpoint happens, and acquires the exclusive read lock 0
|
||||
// to ensure that no readers read from a partially checkpointed db file.
|
||||
CheckpointMode::Passive { .. } => {
|
||||
let read0 = &mut shared.read_locks[0];
|
||||
if !read0.write() {
|
||||
shared.checkpoint_lock.unlock();
|
||||
tracing::trace!("CheckpointGuard: read0 lock failed, returning Busy");
|
||||
// for passive and full we need to hold the read0 lock
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
Ok(Self::Read0 { ptr })
|
||||
}
|
||||
CheckpointMode::Full => {
|
||||
// Full blocks writers and holds read0 exclusively (readers may still use >0 slots)
|
||||
let read0 = &mut shared.read_locks[0];
|
||||
if !read0.write() {
|
||||
shared.checkpoint_lock.unlock();
|
||||
tracing::trace!("CheckpointGuard: read0 lock failed (Full), Busy");
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
if !shared.write_lock.write() {
|
||||
read0.unlock();
|
||||
shared.checkpoint_lock.unlock();
|
||||
tracing::trace!("CheckpointGuard: write lock failed (Full), Busy");
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
Ok(Self::Writer { ptr })
|
||||
}
|
||||
CheckpointMode::Restart | CheckpointMode::Truncate { .. } => {
|
||||
// like all modes, we must acquire an exclusive checkpoint lock and lock on read 0
|
||||
// to prevent a reader from reading a partially checkpointed db file.
|
||||
let read0 = &mut shared.read_locks[0];
|
||||
if !read0.write() {
|
||||
shared.checkpoint_lock.unlock();
|
||||
tracing::trace!("CheckpointGuard: read0 lock failed, returning Busy");
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
// if we are resetting the log we must hold the write lock for the duration.
|
||||
// ensures no writer can append frames while we reset the log.
|
||||
if !shared.write_lock.write() {
|
||||
shared.checkpoint_lock.unlock();
|
||||
read0.unlock();
|
||||
tracing::trace!("CheckpointGuard: write lock failed, returning Busy");
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
Ok(Self::Writer { ptr })
|
||||
CheckpointMode::Passive { .. } => Ok(Self::Read0 { ptr: ptr_clone }),
|
||||
CheckpointMode::Full | CheckpointMode::Restart | CheckpointMode::Truncate { .. } => {
|
||||
Ok(Self::Writer { ptr: ptr_clone })
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -780,15 +783,17 @@ impl CheckpointLocks {
|
||||
impl Drop for CheckpointLocks {
|
||||
fn drop(&mut self) {
|
||||
match self {
|
||||
CheckpointLocks::Writer { ptr: shared } => unsafe {
|
||||
(*shared.get()).write_lock.unlock();
|
||||
(*shared.get()).read_locks[0].unlock();
|
||||
(*shared.get()).checkpoint_lock.unlock();
|
||||
},
|
||||
CheckpointLocks::Read0 { ptr: shared } => unsafe {
|
||||
(*shared.get()).read_locks[0].unlock();
|
||||
(*shared.get()).checkpoint_lock.unlock();
|
||||
},
|
||||
CheckpointLocks::Writer { ptr: shared } => {
|
||||
let guard = shared.write();
|
||||
guard.write_lock.unlock();
|
||||
guard.read_locks[0].unlock();
|
||||
guard.checkpoint_lock.unlock();
|
||||
}
|
||||
CheckpointLocks::Read0 { ptr: shared } => {
|
||||
let guard = shared.write();
|
||||
guard.read_locks[0].unlock();
|
||||
guard.checkpoint_lock.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -853,7 +858,13 @@ impl Wal for WalFile {
|
||||
|
||||
// If none found or lagging, try to claim/update a slot
|
||||
if best_idx == -1 || (best_mark as u64) < shared_max {
|
||||
for (idx, lock) in self.get_shared().read_locks.iter_mut().enumerate().skip(1) {
|
||||
for (idx, lock) in self
|
||||
.get_shared_mut()
|
||||
.read_locks
|
||||
.iter_mut()
|
||||
.enumerate()
|
||||
.skip(1)
|
||||
{
|
||||
if !lock.write() {
|
||||
continue; // busy slot
|
||||
}
|
||||
@@ -878,11 +889,12 @@ impl Wal for WalFile {
|
||||
// TODO: we should retry here instead of always returning Busy
|
||||
return Ok((LimboResult::Busy, db_changed));
|
||||
}
|
||||
let checkpoint_seq = shared.wal_header.lock().checkpoint_seq;
|
||||
(
|
||||
shared.max_frame.load(Ordering::Acquire),
|
||||
shared.nbackfills.load(Ordering::Acquire),
|
||||
shared.last_checksum,
|
||||
shared.wal_header.lock().checkpoint_seq,
|
||||
checkpoint_seq,
|
||||
)
|
||||
};
|
||||
|
||||
@@ -932,8 +944,7 @@ impl Wal for WalFile {
|
||||
fn end_read_tx(&self) {
|
||||
let slot = self.max_frame_read_lock_index.get();
|
||||
if slot != NO_LOCK_HELD {
|
||||
let rl = &mut self.get_shared().read_locks[slot];
|
||||
rl.unlock();
|
||||
self.get_shared_mut().read_locks[slot].unlock();
|
||||
self.max_frame_read_lock_index.set(NO_LOCK_HELD);
|
||||
tracing::debug!("end_read_tx(slot={slot})");
|
||||
} else {
|
||||
@@ -944,7 +955,7 @@ impl Wal for WalFile {
|
||||
/// Begin a write transaction
|
||||
#[instrument(skip_all, level = Level::DEBUG)]
|
||||
fn begin_write_tx(&mut self) -> Result<LimboResult> {
|
||||
let shared = self.get_shared();
|
||||
let shared = self.get_shared_mut();
|
||||
// sqlite/src/wal.c 3702
|
||||
// Cannot start a write transaction without first holding a read
|
||||
// transaction.
|
||||
@@ -957,16 +968,14 @@ impl Wal for WalFile {
|
||||
if !shared.write_lock.write() {
|
||||
return Ok(LimboResult::Busy);
|
||||
}
|
||||
let (shared_max, nbackfills, last_checksum) = {
|
||||
let shared = self.get_shared();
|
||||
(
|
||||
shared.max_frame.load(Ordering::Acquire),
|
||||
shared.nbackfills.load(Ordering::Acquire),
|
||||
shared.last_checksum,
|
||||
)
|
||||
};
|
||||
let (shared_max, nbackfills, last_checksum) = (
|
||||
shared.max_frame.load(Ordering::Acquire),
|
||||
shared.nbackfills.load(Ordering::Acquire),
|
||||
shared.last_checksum,
|
||||
);
|
||||
if self.max_frame == shared_max {
|
||||
// Snapshot still valid; adopt counters
|
||||
drop(shared);
|
||||
self.last_checksum = last_checksum;
|
||||
self.min_frame = nbackfills + 1;
|
||||
return Ok(LimboResult::Ok);
|
||||
@@ -1080,8 +1089,14 @@ impl Wal for WalFile {
|
||||
finish_read_page(page.get().id, buf, cloned);
|
||||
frame.set_wal_tag(frame_id, seq);
|
||||
});
|
||||
let shared = self.get_shared();
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap();
|
||||
begin_read_wal_frame(
|
||||
&self.get_shared().file,
|
||||
file,
|
||||
offset + WAL_FRAME_HEADER_SIZE as u64,
|
||||
buffer_pool,
|
||||
complete,
|
||||
@@ -1138,8 +1153,13 @@ impl Wal for WalFile {
|
||||
}
|
||||
}
|
||||
});
|
||||
let c =
|
||||
begin_read_wal_frame_raw(&self.buffer_pool, &self.get_shared().file, offset, complete)?;
|
||||
let shared = self.get_shared();
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap();
|
||||
let c = begin_read_wal_frame_raw(&self.buffer_pool, file, offset, complete)?;
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
@@ -1194,8 +1214,14 @@ impl Wal for WalFile {
|
||||
}
|
||||
}
|
||||
});
|
||||
let shared = self.get_shared();
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap();
|
||||
let c = begin_read_wal_frame(
|
||||
&self.get_shared().file,
|
||||
file,
|
||||
offset + WAL_FRAME_HEADER_SIZE as u64,
|
||||
buffer_pool,
|
||||
complete,
|
||||
@@ -1214,8 +1240,16 @@ impl Wal for WalFile {
|
||||
|
||||
// perform actual write
|
||||
let offset = self.frame_offset(frame_id);
|
||||
let shared = self.get_shared();
|
||||
let header = shared.wal_header.clone();
|
||||
let (header, file) = {
|
||||
let shared = self.get_shared();
|
||||
let header = shared.wal_header.clone();
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap().clone();
|
||||
(header, file)
|
||||
};
|
||||
let header = header.lock();
|
||||
let checksums = self.last_checksum;
|
||||
let (checksums, frame_bytes) = prepare_wal_frame(
|
||||
@@ -1228,7 +1262,7 @@ impl Wal for WalFile {
|
||||
page,
|
||||
);
|
||||
let c = Completion::new_write(|_| {});
|
||||
let c = shared.file.pwrite(offset, frame_bytes, c)?;
|
||||
let c = file.pwrite(offset, frame_bytes, c)?;
|
||||
self.io.wait_for_completion(c)?;
|
||||
self.complete_append_frame(page_id, frame_id, checksums);
|
||||
if db_size > 0 {
|
||||
@@ -1246,8 +1280,11 @@ impl Wal for WalFile {
|
||||
db_size: u32,
|
||||
) -> Result<Completion> {
|
||||
self.ensure_header_if_needed(page_size)?;
|
||||
let shared = self.get_shared();
|
||||
let shared_page_size = shared.wal_header.lock().page_size;
|
||||
let shared_page_size = {
|
||||
let shared = self.get_shared();
|
||||
let page_size = shared.wal_header.lock().page_size;
|
||||
page_size
|
||||
};
|
||||
turso_assert!(
|
||||
shared_page_size == page_size.get(),
|
||||
"page size mismatch - tried to change page size after WAL header was already initialized: shared.page_size={shared_page_size}, page_size={}",
|
||||
@@ -1307,7 +1344,12 @@ impl Wal for WalFile {
|
||||
page.set_wal_tag(frame_id, seq);
|
||||
}
|
||||
});
|
||||
let result = shared.file.pwrite(offset, frame_bytes.clone(), c)?;
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap();
|
||||
let result = file.pwrite(offset, frame_bytes.clone(), c)?;
|
||||
(result, frame_checksums)
|
||||
};
|
||||
self.complete_append_frame(page_id as u64, frame_id, checksums);
|
||||
@@ -1344,7 +1386,12 @@ impl Wal for WalFile {
|
||||
});
|
||||
let shared = self.get_shared();
|
||||
self.syncing.set(true);
|
||||
let c = shared.file.sync(completion)?;
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap();
|
||||
let c = file.sync(completion)?;
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
@@ -1392,7 +1439,7 @@ impl Wal for WalFile {
|
||||
|
||||
#[instrument(skip_all, level = Level::DEBUG)]
|
||||
fn finish_append_frames_commit(&mut self) -> Result<()> {
|
||||
let shared = self.get_shared();
|
||||
let mut shared = self.get_shared_mut();
|
||||
shared.max_frame.store(self.max_frame, Ordering::Release);
|
||||
tracing::trace!(self.max_frame, ?self.last_checksum);
|
||||
shared.last_checksum = self.last_checksum;
|
||||
@@ -1510,7 +1557,7 @@ impl Wal for WalFile {
|
||||
// single completion for the whole batch
|
||||
let total_len: i32 = iovecs.iter().map(|b| b.len() as i32).sum();
|
||||
let page_frame_for_cb = page_frame_and_checksum.clone();
|
||||
let c = Completion::new_write(move |res: Result<i32, CompletionError>| {
|
||||
let cmp = move |res: Result<i32, CompletionError>| {
|
||||
let Ok(bytes_written) = res else {
|
||||
return;
|
||||
};
|
||||
@@ -1523,9 +1570,21 @@ impl Wal for WalFile {
|
||||
page.clear_dirty();
|
||||
page.set_wal_tag(*fid, seq);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
let c = self.get_shared().file.pwritev(start_off, iovecs, c)?;
|
||||
let c = if db_size_on_commit.is_some() {
|
||||
Completion::new_write_linked(cmp)
|
||||
} else {
|
||||
Completion::new_write(cmp)
|
||||
};
|
||||
|
||||
let shared = self.get_shared();
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap();
|
||||
let c = file.pwritev(start_off, iovecs, c)?;
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
@@ -1542,16 +1601,23 @@ impl Wal for WalFile {
|
||||
impl WalFile {
|
||||
pub fn new(
|
||||
io: Arc<dyn IO>,
|
||||
shared: Arc<UnsafeCell<WalFileShared>>,
|
||||
shared: Arc<RwLock<WalFileShared>>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
) -> Self {
|
||||
let header = unsafe { shared.get().as_mut().unwrap().wal_header.lock() };
|
||||
let last_checksum = unsafe { (*shared.get()).last_checksum };
|
||||
let (header, last_checksum, max_frame) = {
|
||||
let shared_guard = shared.read();
|
||||
let header = *shared_guard.wal_header.lock();
|
||||
(
|
||||
header,
|
||||
shared_guard.last_checksum,
|
||||
shared_guard.max_frame.load(Ordering::Acquire),
|
||||
)
|
||||
};
|
||||
let now = io.now();
|
||||
Self {
|
||||
io,
|
||||
// default to max frame in WAL, so that when we read schema we can read from WAL too if it's there.
|
||||
max_frame: unsafe { (*shared.get()).max_frame.load(Ordering::Acquire) },
|
||||
max_frame,
|
||||
shared,
|
||||
ongoing_checkpoint: OngoingCheckpoint {
|
||||
time: now,
|
||||
@@ -1572,7 +1638,7 @@ impl WalFile {
|
||||
last_checksum,
|
||||
prev_checkpoint: CheckpointResult::default(),
|
||||
checkpoint_guard: None,
|
||||
header: *header,
|
||||
header,
|
||||
io_ctx: RefCell::new(IOContext::default()),
|
||||
}
|
||||
}
|
||||
@@ -1587,9 +1653,12 @@ impl WalFile {
|
||||
WAL_HEADER_SIZE as u64 + page_offset
|
||||
}
|
||||
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
fn get_shared(&self) -> &mut WalFileShared {
|
||||
unsafe { self.shared.get().as_mut().unwrap() }
|
||||
fn get_shared_mut(&self) -> parking_lot::RwLockWriteGuard<WalFileShared> {
|
||||
self.shared.write()
|
||||
}
|
||||
|
||||
fn get_shared(&self) -> parking_lot::RwLockReadGuard<WalFileShared> {
|
||||
self.shared.read()
|
||||
}
|
||||
|
||||
fn complete_append_frame(&mut self, page_id: u64, frame_id: u64, checksums: (u32, u32)) {
|
||||
@@ -1623,41 +1692,48 @@ impl WalFile {
|
||||
}
|
||||
tracing::debug!("ensure_header_if_needed");
|
||||
self.last_checksum = {
|
||||
let shared = self.get_shared();
|
||||
let mut hdr = shared.wal_header.lock();
|
||||
hdr.magic = if cfg!(target_endian = "big") {
|
||||
WAL_MAGIC_BE
|
||||
} else {
|
||||
WAL_MAGIC_LE
|
||||
let mut shared = self.get_shared_mut();
|
||||
let checksum = {
|
||||
let mut hdr = shared.wal_header.lock();
|
||||
hdr.magic = if cfg!(target_endian = "big") {
|
||||
WAL_MAGIC_BE
|
||||
} else {
|
||||
WAL_MAGIC_LE
|
||||
};
|
||||
if hdr.page_size == 0 {
|
||||
hdr.page_size = page_size.get();
|
||||
}
|
||||
if hdr.salt_1 == 0 && hdr.salt_2 == 0 {
|
||||
hdr.salt_1 = self.io.generate_random_number() as u32;
|
||||
hdr.salt_2 = self.io.generate_random_number() as u32;
|
||||
}
|
||||
|
||||
// recompute header checksum
|
||||
let prefix = &hdr.as_bytes()[..WAL_HEADER_SIZE - 8];
|
||||
let use_native = (hdr.magic & 1) != 0;
|
||||
let (c1, c2) = checksum_wal(prefix, &hdr, (0, 0), use_native);
|
||||
hdr.checksum_1 = c1;
|
||||
hdr.checksum_2 = c2;
|
||||
(c1, c2)
|
||||
};
|
||||
if hdr.page_size == 0 {
|
||||
hdr.page_size = page_size.get();
|
||||
}
|
||||
if hdr.salt_1 == 0 && hdr.salt_2 == 0 {
|
||||
hdr.salt_1 = self.io.generate_random_number() as u32;
|
||||
hdr.salt_2 = self.io.generate_random_number() as u32;
|
||||
}
|
||||
|
||||
// recompute header checksum
|
||||
let prefix = &hdr.as_bytes()[..WAL_HEADER_SIZE - 8];
|
||||
let use_native = (hdr.magic & 1) != 0;
|
||||
let (c1, c2) = checksum_wal(prefix, &hdr, (0, 0), use_native);
|
||||
hdr.checksum_1 = c1;
|
||||
hdr.checksum_2 = c2;
|
||||
|
||||
shared.last_checksum = (c1, c2);
|
||||
(c1, c2)
|
||||
shared.last_checksum = checksum;
|
||||
checksum
|
||||
};
|
||||
|
||||
self.max_frame = 0;
|
||||
let shared = self.get_shared();
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap();
|
||||
self.io
|
||||
.wait_for_completion(sqlite3_ondisk::begin_write_wal_header(
|
||||
&shared.file,
|
||||
file,
|
||||
&shared.wal_header.lock(),
|
||||
)?)?;
|
||||
self.io
|
||||
.wait_for_completion(shared.file.sync(Completion::new_sync(|_| {}))?)?;
|
||||
.wait_for_completion(file.sync(Completion::new_sync(|_| {}))?)?;
|
||||
shared.initialized.store(true, Ordering::Release);
|
||||
Ok(())
|
||||
}
|
||||
@@ -1821,7 +1897,10 @@ impl WalFile {
|
||||
let batch_map = self.ongoing_checkpoint.pending_writes.take();
|
||||
if !batch_map.is_empty() {
|
||||
let done_flag = self.ongoing_checkpoint.add_write();
|
||||
completions.extend(write_pages_vectored(pager, batch_map, done_flag)?);
|
||||
let is_final = self.ongoing_checkpoint.is_final_write();
|
||||
completions.extend(write_pages_vectored(
|
||||
pager, batch_map, done_flag, is_final,
|
||||
)?);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1970,7 +2049,7 @@ impl WalFile {
|
||||
/// We never modify slot values while a reader holds that slot's lock.
|
||||
/// TOOD: implement proper BUSY handling behavior
|
||||
fn determine_max_safe_checkpoint_frame(&self) -> u64 {
|
||||
let shared = self.get_shared();
|
||||
let mut shared = self.get_shared_mut();
|
||||
let shared_max = shared.max_frame.load(Ordering::Acquire);
|
||||
let mut max_safe_frame = shared_max;
|
||||
|
||||
@@ -2010,7 +2089,7 @@ impl WalFile {
|
||||
tracing::info!("restart_log(mode={mode:?})");
|
||||
{
|
||||
// Block all readers
|
||||
let shared = self.get_shared();
|
||||
let mut shared = self.get_shared_mut();
|
||||
for idx in 1..shared.read_locks.len() {
|
||||
let lock = &mut shared.read_locks[idx];
|
||||
if !lock.write() {
|
||||
@@ -2028,7 +2107,7 @@ impl WalFile {
|
||||
|
||||
let unlock = |e: Option<&LimboError>| {
|
||||
// release all read locks we just acquired, the caller will take care of the others
|
||||
let shared = unsafe { self.shared.get().as_mut().unwrap() };
|
||||
let shared = self.shared.write();
|
||||
for idx in 1..shared.read_locks.len() {
|
||||
shared.read_locks[idx].unlock();
|
||||
}
|
||||
@@ -2040,14 +2119,16 @@ impl WalFile {
|
||||
}
|
||||
};
|
||||
// reinitialize in‑memory state
|
||||
self.get_shared()
|
||||
self.get_shared_mut()
|
||||
.restart_wal_header(&self.io, mode)
|
||||
.inspect_err(|e| {
|
||||
unlock(Some(e));
|
||||
})?;
|
||||
let (header, cksm) = {
|
||||
let shared = self.get_shared();
|
||||
(*shared.wal_header.lock(), shared.last_checksum)
|
||||
let header = *shared.wal_header.lock();
|
||||
let cksm = shared.last_checksum;
|
||||
(header, cksm)
|
||||
};
|
||||
self.last_checksum = cksm;
|
||||
self.header = header;
|
||||
@@ -2061,10 +2142,12 @@ impl WalFile {
|
||||
});
|
||||
let shared = self.get_shared();
|
||||
// for now at least, lets do all this IO syncronously
|
||||
let c = shared
|
||||
.file
|
||||
.truncate(0, c)
|
||||
.inspect_err(|e| unlock(Some(e)))?;
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap();
|
||||
let c = file.truncate(0, c).inspect_err(|e| unlock(Some(e)))?;
|
||||
shared.initialized.store(false, Ordering::Release);
|
||||
self.io
|
||||
.wait_for_completion(c)
|
||||
@@ -2072,12 +2155,10 @@ impl WalFile {
|
||||
// fsync after truncation
|
||||
self.io
|
||||
.wait_for_completion(
|
||||
shared
|
||||
.file
|
||||
.sync(Completion::new_sync(|_| {
|
||||
tracing::trace!("WAL file synced after reset/truncation");
|
||||
}))
|
||||
.inspect_err(|e| unlock(Some(e)))?,
|
||||
file.sync(Completion::new_sync(|_| {
|
||||
tracing::trace!("WAL file synced after reset/truncation");
|
||||
}))
|
||||
.inspect_err(|e| unlock(Some(e)))?,
|
||||
)
|
||||
.inspect_err(|e| unlock(Some(e)))?;
|
||||
}
|
||||
@@ -2128,8 +2209,14 @@ impl WalFile {
|
||||
})
|
||||
};
|
||||
// schedule read of the page payload
|
||||
let shared = self.get_shared();
|
||||
assert!(
|
||||
shared.enabled.load(Ordering::Relaxed),
|
||||
"WAL must be enabled"
|
||||
);
|
||||
let file = shared.file.as_ref().unwrap();
|
||||
let c = begin_read_wal_frame(
|
||||
&self.get_shared().file,
|
||||
file,
|
||||
offset + WAL_FRAME_HEADER_SIZE as u64,
|
||||
self.buffer_pool.clone(),
|
||||
complete,
|
||||
@@ -2149,25 +2236,22 @@ impl WalFileShared {
|
||||
pub fn open_shared_if_exists(
|
||||
io: &Arc<dyn IO>,
|
||||
path: &str,
|
||||
) -> Result<Option<Arc<UnsafeCell<WalFileShared>>>> {
|
||||
) -> Result<Arc<RwLock<WalFileShared>>> {
|
||||
let file = io.open_file(path, crate::io::OpenFlags::Create, false)?;
|
||||
if file.size()? > 0 {
|
||||
let wal_file_shared = sqlite3_ondisk::read_entire_wal_dumb(&file)?;
|
||||
// TODO: Return a completion instead.
|
||||
let mut max_loops = 100_000;
|
||||
while !unsafe { &*wal_file_shared.get() }
|
||||
.loaded
|
||||
.load(Ordering::Acquire)
|
||||
{
|
||||
while !wal_file_shared.read().loaded.load(Ordering::Acquire) {
|
||||
io.run_once()?;
|
||||
max_loops -= 1;
|
||||
if max_loops == 0 {
|
||||
panic!("WAL file not loaded");
|
||||
}
|
||||
}
|
||||
Ok(Some(wal_file_shared))
|
||||
Ok(wal_file_shared)
|
||||
} else {
|
||||
Ok(None)
|
||||
WalFileShared::new_noop()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2175,7 +2259,42 @@ impl WalFileShared {
|
||||
Ok(self.initialized.load(Ordering::Acquire))
|
||||
}
|
||||
|
||||
pub fn new_shared(file: Arc<dyn File>) -> Result<Arc<UnsafeCell<WalFileShared>>> {
|
||||
pub fn new_noop() -> Result<Arc<RwLock<WalFileShared>>> {
|
||||
let wal_header = WalHeader {
|
||||
magic: 0,
|
||||
file_format: 0,
|
||||
page_size: 0,
|
||||
checkpoint_seq: 0,
|
||||
salt_1: 0,
|
||||
salt_2: 0,
|
||||
checksum_1: 0,
|
||||
checksum_2: 0,
|
||||
};
|
||||
let read_locks = array::from_fn(|_| TursoRwLock::new());
|
||||
for (i, lock) in read_locks.iter().enumerate() {
|
||||
lock.write();
|
||||
lock.set_value_exclusive(if i < 2 { 0 } else { READMARK_NOT_USED });
|
||||
lock.unlock();
|
||||
}
|
||||
let shared = WalFileShared {
|
||||
enabled: AtomicBool::new(false),
|
||||
wal_header: Arc::new(SpinLock::new(wal_header)),
|
||||
min_frame: AtomicU64::new(0),
|
||||
max_frame: AtomicU64::new(0),
|
||||
nbackfills: AtomicU64::new(0),
|
||||
frame_cache: Arc::new(SpinLock::new(HashMap::new())),
|
||||
last_checksum: (0, 0),
|
||||
file: None,
|
||||
read_locks,
|
||||
write_lock: TursoRwLock::new(),
|
||||
checkpoint_lock: TursoRwLock::new(),
|
||||
loaded: AtomicBool::new(true),
|
||||
initialized: AtomicBool::new(false),
|
||||
};
|
||||
Ok(Arc::new(RwLock::new(shared)))
|
||||
}
|
||||
|
||||
pub fn new_shared(file: Arc<dyn File>) -> Result<Arc<RwLock<WalFileShared>>> {
|
||||
let magic = if cfg!(target_endian = "big") {
|
||||
WAL_MAGIC_BE
|
||||
} else {
|
||||
@@ -2201,20 +2320,50 @@ impl WalFileShared {
|
||||
lock.unlock();
|
||||
}
|
||||
let shared = WalFileShared {
|
||||
enabled: AtomicBool::new(true),
|
||||
wal_header: Arc::new(SpinLock::new(wal_header)),
|
||||
min_frame: AtomicU64::new(0),
|
||||
max_frame: AtomicU64::new(0),
|
||||
nbackfills: AtomicU64::new(0),
|
||||
frame_cache: Arc::new(SpinLock::new(HashMap::new())),
|
||||
last_checksum: (0, 0),
|
||||
file,
|
||||
file: Some(file),
|
||||
read_locks,
|
||||
write_lock: TursoRwLock::new(),
|
||||
checkpoint_lock: TursoRwLock::new(),
|
||||
loaded: AtomicBool::new(true),
|
||||
initialized: AtomicBool::new(false),
|
||||
};
|
||||
Ok(Arc::new(UnsafeCell::new(shared)))
|
||||
Ok(Arc::new(RwLock::new(shared)))
|
||||
}
|
||||
|
||||
pub fn create(&mut self, file: Arc<dyn File>) -> Result<()> {
|
||||
if self.enabled.load(Ordering::Relaxed) {
|
||||
return Err(LimboError::InternalError("WAL already enabled".to_string()));
|
||||
}
|
||||
|
||||
let magic = if cfg!(target_endian = "big") {
|
||||
WAL_MAGIC_BE
|
||||
} else {
|
||||
WAL_MAGIC_LE
|
||||
};
|
||||
|
||||
*self.wal_header.lock() = WalHeader {
|
||||
magic,
|
||||
file_format: 3007000,
|
||||
page_size: 0, // Signifies WAL header that is not persistent on disk yet.
|
||||
checkpoint_seq: 0,
|
||||
salt_1: 0,
|
||||
salt_2: 0,
|
||||
checksum_1: 0,
|
||||
checksum_2: 0,
|
||||
};
|
||||
|
||||
self.file = Some(file);
|
||||
self.enabled.store(true, Ordering::Relaxed);
|
||||
self.initialized.store(false, Ordering::Relaxed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn page_size(&self) -> u32 {
|
||||
@@ -2280,10 +2429,11 @@ pub mod test {
|
||||
CheckpointMode, CheckpointResult, Completion, Connection, Database, LimboError, PlatformIO,
|
||||
StepResult, Wal, WalFile, WalFileShared, IO,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use std::{
|
||||
cell::{Cell, UnsafeCell},
|
||||
cell::Cell,
|
||||
rc::Rc,
|
||||
sync::{atomic::Ordering, Arc},
|
||||
};
|
||||
@@ -2310,19 +2460,18 @@ pub mod test {
|
||||
conn.execute("create table test (id integer primary key, value text)")
|
||||
.unwrap();
|
||||
let _ = conn.execute("insert into test (value) values ('test1'), ('test2'), ('test3')");
|
||||
let wal = db.maybe_shared_wal.write();
|
||||
let wal_file = wal.as_ref().unwrap();
|
||||
let file = unsafe { &mut *wal_file.get() };
|
||||
let wal = db.shared_wal.write();
|
||||
let wal_file = wal.file.as_ref().unwrap().clone();
|
||||
let done = Rc::new(Cell::new(false));
|
||||
let _done = done.clone();
|
||||
let _ = file.file.truncate(
|
||||
let _ = wal_file.truncate(
|
||||
WAL_HEADER_SIZE as u64,
|
||||
Completion::new_trunc(move |_| {
|
||||
let done = _done.clone();
|
||||
done.set(true);
|
||||
}),
|
||||
);
|
||||
assert!(file.file.size().unwrap() == WAL_HEADER_SIZE as u64);
|
||||
assert!(wal_file.size().unwrap() == WAL_HEADER_SIZE as u64);
|
||||
assert!(done.get());
|
||||
}
|
||||
|
||||
@@ -2409,12 +2558,11 @@ pub mod test {
|
||||
pager.io.block(|| wal.checkpoint(pager, mode)).unwrap()
|
||||
}
|
||||
|
||||
fn wal_header_snapshot(shared: &Arc<UnsafeCell<WalFileShared>>) -> (u32, u32, u32, u32) {
|
||||
fn wal_header_snapshot(shared: &Arc<RwLock<WalFileShared>>) -> (u32, u32, u32, u32) {
|
||||
// (checkpoint_seq, salt1, salt2, page_size)
|
||||
unsafe {
|
||||
let hdr = (*shared.get()).wal_header.lock();
|
||||
(hdr.checkpoint_seq, hdr.salt_1, hdr.salt_2, hdr.page_size)
|
||||
}
|
||||
let shared_guard = shared.read();
|
||||
let hdr = shared_guard.wal_header.lock();
|
||||
(hdr.checkpoint_seq, hdr.salt_1, hdr.salt_2, hdr.page_size)
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -2437,10 +2585,10 @@ pub mod test {
|
||||
}
|
||||
|
||||
// Snapshot header & counters before the RESTART checkpoint.
|
||||
let wal_shared = db.maybe_shared_wal.read().as_ref().unwrap().clone();
|
||||
let wal_shared = db.shared_wal.clone();
|
||||
let (seq_before, salt1_before, salt2_before, _ps_before) = wal_header_snapshot(&wal_shared);
|
||||
let (mx_before, backfill_before) = unsafe {
|
||||
let s = &*wal_shared.get();
|
||||
let (mx_before, backfill_before) = {
|
||||
let s = wal_shared.read();
|
||||
(
|
||||
s.max_frame.load(Ordering::SeqCst),
|
||||
s.nbackfills.load(Ordering::SeqCst),
|
||||
@@ -2478,8 +2626,8 @@ pub mod test {
|
||||
);
|
||||
assert_ne!(salt2_after, salt2_before, "salt_2 is randomized");
|
||||
|
||||
let (mx_after, backfill_after) = unsafe {
|
||||
let s = &*wal_shared.get();
|
||||
let (mx_after, backfill_after) = {
|
||||
let s = wal_shared.read();
|
||||
(
|
||||
s.max_frame.load(Ordering::SeqCst),
|
||||
s.nbackfills.load(Ordering::SeqCst),
|
||||
@@ -2510,7 +2658,7 @@ pub mod test {
|
||||
.borrow_mut()
|
||||
.finish_append_frames_commit()
|
||||
.unwrap();
|
||||
let new_max = unsafe { (&*wal_shared.get()).max_frame.load(Ordering::SeqCst) };
|
||||
let new_max = wal_shared.read().max_frame.load(Ordering::SeqCst);
|
||||
assert_eq!(new_max, 1, "first append after RESTART starts at frame 1");
|
||||
|
||||
std::fs::remove_dir_all(path).unwrap();
|
||||
@@ -2557,11 +2705,7 @@ pub mod test {
|
||||
upper_bound_inclusive: None,
|
||||
},
|
||||
);
|
||||
let maxf = unsafe {
|
||||
(&*db.maybe_shared_wal.read().as_ref().unwrap().get())
|
||||
.max_frame
|
||||
.load(Ordering::SeqCst)
|
||||
};
|
||||
let maxf = db.shared_wal.read().max_frame.load(Ordering::SeqCst);
|
||||
(res, maxf)
|
||||
};
|
||||
assert_eq!(res1.num_attempted, max_before);
|
||||
@@ -2670,7 +2814,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_wal_read_marks_after_restart() {
|
||||
let (db, _path) = get_database();
|
||||
let wal_shared = db.maybe_shared_wal.read().as_ref().unwrap().clone();
|
||||
let wal_shared = db.shared_wal.clone();
|
||||
|
||||
let conn = db.connect().unwrap();
|
||||
conn.execute("create table test(id integer primary key, value text)")
|
||||
@@ -2685,8 +2829,8 @@ pub mod test {
|
||||
}
|
||||
|
||||
// Verify read marks after restart
|
||||
let read_marks_after: Vec<_> = unsafe {
|
||||
let s = &*wal_shared.get();
|
||||
let read_marks_after: Vec<_> = {
|
||||
let s = wal_shared.read();
|
||||
(0..5).map(|i| s.read_locks[i].get_value()).collect()
|
||||
};
|
||||
|
||||
@@ -2774,7 +2918,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_wal_checkpoint_updates_read_marks() {
|
||||
let (db, _path) = get_database();
|
||||
let wal_shared = db.maybe_shared_wal.read().as_ref().unwrap().clone();
|
||||
let wal_shared = db.shared_wal.clone();
|
||||
|
||||
let conn = db.connect().unwrap();
|
||||
conn.execute("create table test(id integer primary key, value text)")
|
||||
@@ -2782,7 +2926,7 @@ pub mod test {
|
||||
bulk_inserts(&conn, 10, 5);
|
||||
|
||||
// get max frame before checkpoint
|
||||
let max_frame_before = unsafe { (*wal_shared.get()).max_frame.load(Ordering::SeqCst) };
|
||||
let max_frame_before = wal_shared.read().max_frame.load(Ordering::SeqCst);
|
||||
|
||||
{
|
||||
let pager = conn.pager.borrow();
|
||||
@@ -2797,7 +2941,7 @@ pub mod test {
|
||||
}
|
||||
|
||||
// check that read mark 1 (default reader) was updated to max_frame
|
||||
let read_mark_1 = unsafe { (*wal_shared.get()).read_locks[1].get_value() };
|
||||
let read_mark_1 = wal_shared.read().read_locks[1].get_value();
|
||||
|
||||
assert_eq!(
|
||||
read_mark_1 as u64, max_frame_before,
|
||||
@@ -3269,8 +3413,8 @@ pub mod test {
|
||||
}
|
||||
|
||||
// Snapshot the current mxFrame before running FULL
|
||||
let wal_shared = db.maybe_shared_wal.read().as_ref().unwrap().clone();
|
||||
let mx_before = unsafe { (&*wal_shared.get()).max_frame.load(Ordering::SeqCst) };
|
||||
let wal_shared = db.shared_wal.clone();
|
||||
let mx_before = wal_shared.read().max_frame.load(Ordering::SeqCst);
|
||||
assert!(mx_before > 0, "expected frames in WAL before FULL");
|
||||
|
||||
// Run FULL checkpoint - must backfill *all* frames up to mx_before
|
||||
@@ -3320,11 +3464,7 @@ pub mod test {
|
||||
for c in completions {
|
||||
db.io.wait_for_completion(c).unwrap();
|
||||
}
|
||||
let mx_now = unsafe {
|
||||
(&*db.maybe_shared_wal.read().as_ref().unwrap().get())
|
||||
.max_frame
|
||||
.load(Ordering::SeqCst)
|
||||
};
|
||||
let mx_now = db.shared_wal.read().max_frame.load(Ordering::SeqCst);
|
||||
assert!(mx_now > r_snapshot);
|
||||
|
||||
// FULL must return Busy while a reader is stuck behind
|
||||
|
||||
@@ -115,6 +115,7 @@ pub fn handle_distinct(program: &mut ProgramBuilder, agg: &Aggregate, agg_arg_re
|
||||
count: num_regs,
|
||||
dest_reg: record_reg,
|
||||
index_name: Some(distinct_ctx.ephemeral_index_name.to_string()),
|
||||
affinity_str: None,
|
||||
});
|
||||
program.emit_insn(Insn::IdxInsert {
|
||||
cursor_id: distinct_ctx.cursor_id,
|
||||
|
||||
@@ -28,6 +28,12 @@ pub fn translate_alter_table(
|
||||
body: alter_table,
|
||||
} = alter;
|
||||
let table_name = table_name.name.as_str();
|
||||
|
||||
// Check if someone is trying to ALTER a system table
|
||||
if crate::schema::is_system_table(table_name) {
|
||||
crate::bail_parse_error!("table {} may not be modified", table_name);
|
||||
}
|
||||
|
||||
if schema.table_has_indexes(table_name) && !schema.indexes_enabled() {
|
||||
// Let's disable altering a table with indices altogether instead of checking column by
|
||||
// column to be extra safe.
|
||||
@@ -135,11 +141,18 @@ pub fn translate_alter_table(
|
||||
|
||||
let record = program.alloc_register();
|
||||
|
||||
let affinity_str = btree
|
||||
.columns
|
||||
.iter()
|
||||
.map(|col| col.affinity().aff_mask())
|
||||
.collect::<String>();
|
||||
|
||||
program.emit_insn(Insn::MakeRecord {
|
||||
start_reg: first_column,
|
||||
count: column_count,
|
||||
dest_reg: record,
|
||||
index_name: None,
|
||||
affinity_str: Some(affinity_str),
|
||||
});
|
||||
|
||||
program.emit_insn(Insn::Insert {
|
||||
@@ -295,6 +308,7 @@ pub fn translate_alter_table(
|
||||
count: sqlite_schema_column_len,
|
||||
dest_reg: record,
|
||||
index_name: None,
|
||||
affinity_str: None,
|
||||
});
|
||||
|
||||
program.emit_insn(Insn::Insert {
|
||||
@@ -436,6 +450,7 @@ pub fn translate_alter_table(
|
||||
count: sqlite_schema_column_len,
|
||||
dest_reg: record,
|
||||
index_name: None,
|
||||
affinity_str: None,
|
||||
});
|
||||
|
||||
program.emit_insn(Insn::Insert {
|
||||
|
||||
@@ -212,6 +212,7 @@ pub fn translate_analyze(
|
||||
count: 3,
|
||||
dest_reg: record_reg,
|
||||
index_name: None,
|
||||
affinity_str: None,
|
||||
});
|
||||
program.emit_insn(Insn::NewRowid {
|
||||
cursor: stat_cursor,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use crate::schema::{Index, IndexColumn, Schema};
|
||||
use crate::translate::emitter::{emit_query, LimitCtx, TranslateCtx};
|
||||
use crate::translate::expr::translate_expr;
|
||||
use crate::translate::plan::{Plan, QueryDestination, SelectPlan};
|
||||
use crate::translate::result_row::try_fold_expr_to_i64;
|
||||
use crate::vdbe::builder::{CursorType, ProgramBuilder};
|
||||
use crate::vdbe::insn::Insn;
|
||||
use crate::vdbe::BranchOffset;
|
||||
@@ -31,36 +33,55 @@ pub fn emit_program_for_compound_select(
|
||||
|
||||
let right_plan = right_most.clone();
|
||||
// Trivial exit on LIMIT 0
|
||||
if let Some(limit) = limit {
|
||||
if *limit == 0 {
|
||||
program.result_columns = right_plan.result_columns;
|
||||
program.table_references.extend(right_plan.table_references);
|
||||
return Ok(());
|
||||
}
|
||||
if matches!(limit.as_ref().and_then(try_fold_expr_to_i64), Some(v) if v == 0) {
|
||||
program.result_columns = right_plan.result_columns;
|
||||
program.table_references.extend(right_plan.table_references);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let right_most_ctx = TranslateCtx::new(
|
||||
program,
|
||||
schema,
|
||||
syms,
|
||||
right_most.table_references.joined_tables().len(),
|
||||
);
|
||||
|
||||
// Each subselect shares the same limit_ctx and offset, because the LIMIT, OFFSET applies to
|
||||
// the entire compound select, not just a single subselect.
|
||||
let limit_ctx = limit.map(|limit| {
|
||||
let limit_ctx = limit.as_ref().map(|limit| {
|
||||
let reg = program.alloc_register();
|
||||
program.emit_insn(Insn::Integer {
|
||||
value: limit as i64,
|
||||
dest: reg,
|
||||
});
|
||||
if let Some(val) = try_fold_expr_to_i64(limit) {
|
||||
program.emit_insn(Insn::Integer {
|
||||
value: val,
|
||||
dest: reg,
|
||||
});
|
||||
} else {
|
||||
program.add_comment(program.offset(), "OFFSET expr");
|
||||
_ = translate_expr(program, None, limit, reg, &right_most_ctx.resolver);
|
||||
program.emit_insn(Insn::MustBeInt { reg });
|
||||
}
|
||||
LimitCtx::new_shared(reg)
|
||||
});
|
||||
let offset_reg = offset.map(|offset| {
|
||||
let offset_reg = offset.as_ref().map(|offset_expr| {
|
||||
let reg = program.alloc_register();
|
||||
program.emit_insn(Insn::Integer {
|
||||
value: offset as i64,
|
||||
dest: reg,
|
||||
});
|
||||
|
||||
if let Some(val) = try_fold_expr_to_i64(offset_expr) {
|
||||
// Compile-time constant offset
|
||||
program.emit_insn(Insn::Integer {
|
||||
value: val,
|
||||
dest: reg,
|
||||
});
|
||||
} else {
|
||||
program.add_comment(program.offset(), "OFFSET expr");
|
||||
_ = translate_expr(program, None, offset_expr, reg, &right_most_ctx.resolver);
|
||||
program.emit_insn(Insn::MustBeInt { reg });
|
||||
}
|
||||
|
||||
let combined_reg = program.alloc_register();
|
||||
program.emit_insn(Insn::OffsetLimit {
|
||||
offset_reg: reg,
|
||||
combined_reg,
|
||||
limit_reg: limit_ctx.unwrap().reg_limit,
|
||||
limit_reg: limit_ctx.as_ref().unwrap().reg_limit,
|
||||
});
|
||||
|
||||
reg
|
||||
@@ -137,8 +158,8 @@ fn emit_compound_select(
|
||||
let compound_select = Plan::CompoundSelect {
|
||||
left,
|
||||
right_most: plan,
|
||||
limit,
|
||||
offset,
|
||||
limit: limit.clone(),
|
||||
offset: offset.clone(),
|
||||
order_by,
|
||||
};
|
||||
emit_compound_select(
|
||||
@@ -503,6 +524,7 @@ fn read_intersect_rows(
|
||||
count: column_count,
|
||||
dest_reg: row_content_reg,
|
||||
index_name: None,
|
||||
affinity_str: None,
|
||||
});
|
||||
program.emit_insn(Insn::IdxInsert {
|
||||
cursor_id: target_cursor_id,
|
||||
|
||||
@@ -23,6 +23,12 @@ pub fn translate_delete(
|
||||
connection: &Arc<crate::Connection>,
|
||||
) -> Result<ProgramBuilder> {
|
||||
let tbl_name = normalize_ident(tbl_name.name.as_str());
|
||||
|
||||
// Check if this is a system table that should be protected from direct writes
|
||||
if crate::schema::is_system_table(&tbl_name) {
|
||||
crate::bail_parse_error!("table {} may not be modified", tbl_name);
|
||||
}
|
||||
|
||||
if schema.table_has_indexes(&tbl_name) && !schema.indexes_enabled() {
|
||||
// Let's disable altering a table with indices altogether instead of checking column by
|
||||
// column to be extra safe.
|
||||
@@ -76,6 +82,12 @@ pub fn prepare_delete_plan(
|
||||
Some(table) => table,
|
||||
None => crate::bail_parse_error!("no such table: {}", tbl_name),
|
||||
};
|
||||
|
||||
// Check if this is a materialized view
|
||||
if schema.is_materialized_view(&tbl_name) {
|
||||
crate::bail_parse_error!("cannot modify materialized view {}", tbl_name);
|
||||
}
|
||||
|
||||
let table = if let Some(table) = table.virtual_table() {
|
||||
Table::Virtual(table.clone())
|
||||
} else if let Some(table) = table.btree() {
|
||||
@@ -107,7 +119,8 @@ pub fn prepare_delete_plan(
|
||||
)?;
|
||||
|
||||
// Parse the LIMIT/OFFSET clause
|
||||
let (resolved_limit, resolved_offset) = limit.map_or(Ok((None, None)), |l| parse_limit(&l))?;
|
||||
let (resolved_limit, resolved_offset) =
|
||||
limit.map_or(Ok((None, None)), |mut l| parse_limit(&mut l, connection))?;
|
||||
|
||||
let plan = DeletePlan {
|
||||
table_references,
|
||||
|
||||
@@ -217,7 +217,7 @@ impl fmt::Display for UpdatePlan {
|
||||
)?;
|
||||
}
|
||||
}
|
||||
if let Some(limit) = self.limit {
|
||||
if let Some(limit) = self.limit.as_ref() {
|
||||
writeln!(f, "LIMIT: {limit}")?;
|
||||
}
|
||||
if let Some(ret) = &self.returning {
|
||||
|
||||
@@ -26,6 +26,7 @@ use crate::schema::{BTreeTable, Column, Schema, Table};
|
||||
use crate::translate::compound_select::emit_program_for_compound_select;
|
||||
use crate::translate::expr::{emit_returning_results, ReturningValueRegisters};
|
||||
use crate::translate::plan::{DeletePlan, Plan, QueryDestination, Search};
|
||||
use crate::translate::result_row::try_fold_expr_to_i64;
|
||||
use crate::translate::values::emit_values;
|
||||
use crate::util::exprs_are_equivalent;
|
||||
use crate::vdbe::builder::{CursorKey, CursorType, ProgramBuilder};
|
||||
@@ -227,7 +228,7 @@ fn emit_program_for_select(
|
||||
);
|
||||
|
||||
// Trivial exit on LIMIT 0
|
||||
if let Some(limit) = plan.limit {
|
||||
if let Some(limit) = plan.limit.as_ref().and_then(try_fold_expr_to_i64) {
|
||||
if limit == 0 {
|
||||
program.result_columns = plan.result_columns;
|
||||
program.table_references.extend(plan.table_references);
|
||||
@@ -256,7 +257,7 @@ pub fn emit_query<'a>(
|
||||
// Emit subqueries first so the results can be read in the main query loop.
|
||||
emit_subqueries(program, t_ctx, &mut plan.table_references)?;
|
||||
|
||||
init_limit(program, t_ctx, plan.limit, plan.offset);
|
||||
init_limit(program, t_ctx, &plan.limit, &plan.offset);
|
||||
|
||||
// No rows will be read from source table loops if there is a constant false condition eg. WHERE 0
|
||||
// however an aggregation might still happen,
|
||||
@@ -404,13 +405,15 @@ fn emit_program_for_delete(
|
||||
);
|
||||
|
||||
// exit early if LIMIT 0
|
||||
if let Some(0) = plan.limit {
|
||||
program.result_columns = plan.result_columns;
|
||||
program.table_references.extend(plan.table_references);
|
||||
return Ok(());
|
||||
if let Some(limit) = plan.limit.as_ref().and_then(try_fold_expr_to_i64) {
|
||||
if limit == 0 {
|
||||
program.result_columns = plan.result_columns;
|
||||
program.table_references.extend(plan.table_references);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
init_limit(program, &mut t_ctx, plan.limit, None);
|
||||
init_limit(program, &mut t_ctx, &plan.limit, &None);
|
||||
|
||||
// No rows will be read from source table loops if there is a constant false condition eg. WHERE 0
|
||||
let after_main_loop_label = program.allocate_label();
|
||||
@@ -660,13 +663,15 @@ fn emit_program_for_update(
|
||||
);
|
||||
|
||||
// Exit on LIMIT 0
|
||||
if let Some(0) = plan.limit {
|
||||
program.result_columns = plan.returning.unwrap_or_default();
|
||||
program.table_references.extend(plan.table_references);
|
||||
return Ok(());
|
||||
if let Some(limit) = plan.limit.as_ref().and_then(try_fold_expr_to_i64) {
|
||||
if limit == 0 {
|
||||
program.result_columns = plan.returning.unwrap_or_default();
|
||||
program.table_references.extend(plan.table_references);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
init_limit(program, &mut t_ctx, plan.limit, plan.offset);
|
||||
init_limit(program, &mut t_ctx, &plan.limit, &plan.offset);
|
||||
let after_main_loop_label = program.allocate_label();
|
||||
t_ctx.label_main_loop_end = Some(after_main_loop_label);
|
||||
if plan.contains_constant_false_condition {
|
||||
@@ -1033,6 +1038,7 @@ fn emit_update_insns(
|
||||
count: num_cols + 1,
|
||||
dest_reg: *record_reg,
|
||||
index_name: Some(index.name.clone()),
|
||||
affinity_str: None,
|
||||
});
|
||||
|
||||
if !index.unique {
|
||||
@@ -1133,11 +1139,19 @@ fn emit_update_insns(
|
||||
}
|
||||
|
||||
let record_reg = program.alloc_register();
|
||||
|
||||
let affinity_str = table_ref
|
||||
.columns()
|
||||
.iter()
|
||||
.map(|col| col.affinity().aff_mask())
|
||||
.collect::<String>();
|
||||
|
||||
program.emit_insn(Insn::MakeRecord {
|
||||
start_reg: start,
|
||||
count: table_ref.columns().len(),
|
||||
dest_reg: record_reg,
|
||||
index_name: None,
|
||||
affinity_str: Some(affinity_str),
|
||||
});
|
||||
|
||||
if has_user_provided_rowid {
|
||||
@@ -1277,6 +1291,7 @@ fn emit_update_insns(
|
||||
count: 2 * table_ref.columns().len(),
|
||||
dest_reg: record_reg,
|
||||
index_name: None,
|
||||
affinity_str: None,
|
||||
});
|
||||
Some(record_reg)
|
||||
} else {
|
||||
@@ -1393,11 +1408,18 @@ pub fn emit_cdc_patch_record(
|
||||
dst_reg: columns_reg + rowid_alias_position,
|
||||
extra_amount: 0,
|
||||
});
|
||||
let affinity_str = table
|
||||
.columns()
|
||||
.iter()
|
||||
.map(|col| col.affinity().aff_mask())
|
||||
.collect::<String>();
|
||||
|
||||
program.emit_insn(Insn::MakeRecord {
|
||||
start_reg: columns_reg,
|
||||
count: table.columns().len(),
|
||||
dest_reg: record_reg,
|
||||
index_name: None,
|
||||
affinity_str: Some(affinity_str),
|
||||
});
|
||||
record_reg
|
||||
} else {
|
||||
@@ -1423,11 +1445,17 @@ pub fn emit_cdc_full_record(
|
||||
program.emit_column_or_rowid(table_cursor_id, i, columns_reg + 1 + i);
|
||||
}
|
||||
}
|
||||
let affinity_str = columns
|
||||
.iter()
|
||||
.map(|col| col.affinity().aff_mask())
|
||||
.collect::<String>();
|
||||
|
||||
program.emit_insn(Insn::MakeRecord {
|
||||
start_reg: columns_reg + 1,
|
||||
count: columns.len(),
|
||||
dest_reg: columns_reg,
|
||||
index_name: None,
|
||||
affinity_str: Some(affinity_str),
|
||||
});
|
||||
columns_reg
|
||||
}
|
||||
@@ -1530,6 +1558,7 @@ pub fn emit_cdc_insns(
|
||||
count: 8,
|
||||
dest_reg: record_reg,
|
||||
index_name: None,
|
||||
affinity_str: None,
|
||||
});
|
||||
|
||||
program.emit_insn(Insn::Insert {
|
||||
@@ -1541,41 +1570,69 @@ pub fn emit_cdc_insns(
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize the limit/offset counters and registers.
|
||||
/// In case of compound SELECTs, the limit counter is initialized only once,
|
||||
/// hence [LimitCtx::initialize_counter] being false in those cases.
|
||||
fn init_limit(
|
||||
program: &mut ProgramBuilder,
|
||||
t_ctx: &mut TranslateCtx,
|
||||
limit: Option<isize>,
|
||||
offset: Option<isize>,
|
||||
limit: &Option<Box<Expr>>,
|
||||
offset: &Option<Box<Expr>>,
|
||||
) {
|
||||
if t_ctx.limit_ctx.is_none() {
|
||||
t_ctx.limit_ctx = limit.map(|_| LimitCtx::new(program));
|
||||
if t_ctx.limit_ctx.is_none() && limit.is_some() {
|
||||
t_ctx.limit_ctx = Some(LimitCtx::new(program));
|
||||
}
|
||||
let Some(limit_ctx) = t_ctx.limit_ctx else {
|
||||
let Some(limit_ctx) = &t_ctx.limit_ctx else {
|
||||
return;
|
||||
};
|
||||
if limit_ctx.initialize_counter {
|
||||
program.emit_insn(Insn::Integer {
|
||||
value: limit.expect("limit must be Some if limit_ctx is Some") as i64,
|
||||
dest: limit_ctx.reg_limit,
|
||||
});
|
||||
if let Some(expr) = limit {
|
||||
if let Some(value) = try_fold_expr_to_i64(expr) {
|
||||
program.emit_insn(Insn::Integer {
|
||||
value,
|
||||
dest: limit_ctx.reg_limit,
|
||||
});
|
||||
} else {
|
||||
let r = limit_ctx.reg_limit;
|
||||
program.add_comment(program.offset(), "OFFSET expr");
|
||||
_ = translate_expr(program, None, expr, r, &t_ctx.resolver);
|
||||
program.emit_insn(Insn::MustBeInt { reg: r });
|
||||
}
|
||||
}
|
||||
}
|
||||
if t_ctx.reg_offset.is_none() && offset.is_some_and(|n| n.ne(&0)) {
|
||||
let reg = program.alloc_register();
|
||||
t_ctx.reg_offset = Some(reg);
|
||||
program.emit_insn(Insn::Integer {
|
||||
value: offset.unwrap() as i64,
|
||||
dest: reg,
|
||||
});
|
||||
let combined_reg = program.alloc_register();
|
||||
t_ctx.reg_limit_offset_sum = Some(combined_reg);
|
||||
program.emit_insn(Insn::OffsetLimit {
|
||||
limit_reg: t_ctx.limit_ctx.unwrap().reg_limit,
|
||||
offset_reg: reg,
|
||||
combined_reg,
|
||||
});
|
||||
|
||||
if t_ctx.reg_offset.is_none() {
|
||||
if let Some(expr) = offset {
|
||||
if let Some(value) = try_fold_expr_to_i64(expr) {
|
||||
if value != 0 {
|
||||
let reg = program.alloc_register();
|
||||
t_ctx.reg_offset = Some(reg);
|
||||
program.emit_insn(Insn::Integer { value, dest: reg });
|
||||
let combined_reg = program.alloc_register();
|
||||
t_ctx.reg_limit_offset_sum = Some(combined_reg);
|
||||
program.emit_insn(Insn::OffsetLimit {
|
||||
limit_reg: limit_ctx.reg_limit,
|
||||
offset_reg: reg,
|
||||
combined_reg,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
let reg = program.alloc_register();
|
||||
t_ctx.reg_offset = Some(reg);
|
||||
let r = reg;
|
||||
|
||||
program.add_comment(program.offset(), "OFFSET expr");
|
||||
_ = translate_expr(program, None, expr, r, &t_ctx.resolver);
|
||||
program.emit_insn(Insn::MustBeInt { reg: r });
|
||||
|
||||
let combined_reg = program.alloc_register();
|
||||
t_ctx.reg_limit_offset_sum = Some(combined_reg);
|
||||
program.emit_insn(Insn::OffsetLimit {
|
||||
limit_reg: limit_ctx.reg_limit,
|
||||
offset_reg: reg,
|
||||
combined_reg,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,10 +336,19 @@ pub fn translate_condition_expr(
|
||||
resolver,
|
||||
)?;
|
||||
}
|
||||
ast::Expr::Binary(_, _, _) => {
|
||||
ast::Expr::Binary(e1, op, e2) => {
|
||||
let result_reg = program.alloc_register();
|
||||
translate_expr(program, Some(referenced_tables), expr, result_reg, resolver)?;
|
||||
emit_cond_jump(program, condition_metadata, result_reg);
|
||||
binary_expr_shared(
|
||||
program,
|
||||
Some(referenced_tables),
|
||||
e1,
|
||||
e2,
|
||||
op,
|
||||
result_reg,
|
||||
resolver,
|
||||
Some(condition_metadata),
|
||||
emit_binary_condition_insn,
|
||||
)?;
|
||||
}
|
||||
ast::Expr::Literal(_)
|
||||
| ast::Expr::Cast { .. }
|
||||
@@ -508,80 +517,18 @@ pub fn translate_expr(
|
||||
unreachable!("expression should have been rewritten in optmizer")
|
||||
}
|
||||
ast::Expr::Binary(e1, op, e2) => {
|
||||
// Check if both sides of the expression are equivalent and reuse the same register if so
|
||||
if exprs_are_equivalent(e1, e2) {
|
||||
let shared_reg = program.alloc_register();
|
||||
translate_expr(program, referenced_tables, e1, shared_reg, resolver)?;
|
||||
|
||||
emit_binary_insn(
|
||||
program,
|
||||
op,
|
||||
shared_reg,
|
||||
shared_reg,
|
||||
target_register,
|
||||
e1,
|
||||
e2,
|
||||
referenced_tables,
|
||||
)?;
|
||||
program.reset_collation();
|
||||
Ok(target_register)
|
||||
} else {
|
||||
let e1_reg = program.alloc_registers(2);
|
||||
let e2_reg = e1_reg + 1;
|
||||
|
||||
translate_expr(program, referenced_tables, e1, e1_reg, resolver)?;
|
||||
let left_collation_ctx = program.curr_collation_ctx();
|
||||
program.reset_collation();
|
||||
|
||||
translate_expr(program, referenced_tables, e2, e2_reg, resolver)?;
|
||||
let right_collation_ctx = program.curr_collation_ctx();
|
||||
program.reset_collation();
|
||||
|
||||
/*
|
||||
* The rules for determining which collating function to use for a binary comparison
|
||||
* operator (=, <, >, <=, >=, !=, IS, and IS NOT) are as follows:
|
||||
*
|
||||
* 1. If either operand has an explicit collating function assignment using the postfix COLLATE operator,
|
||||
* then the explicit collating function is used for comparison,
|
||||
* with precedence to the collating function of the left operand.
|
||||
*
|
||||
* 2. If either operand is a column, then the collating function of that column is used
|
||||
* with precedence to the left operand. For the purposes of the previous sentence,
|
||||
* a column name preceded by one or more unary "+" operators and/or CAST operators is still considered a column name.
|
||||
*
|
||||
* 3. Otherwise, the BINARY collating function is used for comparison.
|
||||
*/
|
||||
let collation_ctx = {
|
||||
match (left_collation_ctx, right_collation_ctx) {
|
||||
(Some((c_left, true)), _) => Some((c_left, true)),
|
||||
(_, Some((c_right, true))) => Some((c_right, true)),
|
||||
(Some((c_left, from_collate_left)), None) => {
|
||||
Some((c_left, from_collate_left))
|
||||
}
|
||||
(None, Some((c_right, from_collate_right))) => {
|
||||
Some((c_right, from_collate_right))
|
||||
}
|
||||
(Some((c_left, from_collate_left)), Some((_, false))) => {
|
||||
Some((c_left, from_collate_left))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
};
|
||||
program.set_collation(collation_ctx);
|
||||
|
||||
emit_binary_insn(
|
||||
program,
|
||||
op,
|
||||
e1_reg,
|
||||
e2_reg,
|
||||
target_register,
|
||||
e1,
|
||||
e2,
|
||||
referenced_tables,
|
||||
)?;
|
||||
program.reset_collation();
|
||||
Ok(target_register)
|
||||
}
|
||||
binary_expr_shared(
|
||||
program,
|
||||
referenced_tables,
|
||||
e1,
|
||||
e2,
|
||||
op,
|
||||
target_register,
|
||||
resolver,
|
||||
None,
|
||||
emit_binary_insn,
|
||||
)?;
|
||||
Ok(target_register)
|
||||
}
|
||||
ast::Expr::Case {
|
||||
base,
|
||||
@@ -2224,6 +2171,102 @@ pub fn translate_expr(
|
||||
Ok(target_register)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn binary_expr_shared(
|
||||
program: &mut ProgramBuilder,
|
||||
referenced_tables: Option<&TableReferences>,
|
||||
e1: &ast::Expr,
|
||||
e2: &ast::Expr,
|
||||
op: &ast::Operator,
|
||||
target_register: usize,
|
||||
resolver: &Resolver,
|
||||
condition_metadata: Option<ConditionMetadata>,
|
||||
emit_fn: impl Fn(
|
||||
&mut ProgramBuilder,
|
||||
&ast::Operator,
|
||||
usize, // left reg
|
||||
usize, // right reg
|
||||
usize, // target reg
|
||||
&ast::Expr, // left expr
|
||||
&ast::Expr, // right expr
|
||||
Option<&TableReferences>,
|
||||
Option<ConditionMetadata>,
|
||||
) -> Result<()>,
|
||||
) -> Result<usize> {
|
||||
// Check if both sides of the expression are equivalent and reuse the same register if so
|
||||
if exprs_are_equivalent(e1, e2) {
|
||||
let shared_reg = program.alloc_register();
|
||||
translate_expr(program, referenced_tables, e1, shared_reg, resolver)?;
|
||||
|
||||
emit_fn(
|
||||
program,
|
||||
op,
|
||||
shared_reg,
|
||||
shared_reg,
|
||||
target_register,
|
||||
e1,
|
||||
e2,
|
||||
referenced_tables,
|
||||
condition_metadata,
|
||||
)?;
|
||||
program.reset_collation();
|
||||
Ok(target_register)
|
||||
} else {
|
||||
let e1_reg = program.alloc_registers(2);
|
||||
let e2_reg = e1_reg + 1;
|
||||
|
||||
translate_expr(program, referenced_tables, e1, e1_reg, resolver)?;
|
||||
let left_collation_ctx = program.curr_collation_ctx();
|
||||
program.reset_collation();
|
||||
|
||||
translate_expr(program, referenced_tables, e2, e2_reg, resolver)?;
|
||||
let right_collation_ctx = program.curr_collation_ctx();
|
||||
program.reset_collation();
|
||||
|
||||
/*
|
||||
* The rules for determining which collating function to use for a binary comparison
|
||||
* operator (=, <, >, <=, >=, !=, IS, and IS NOT) are as follows:
|
||||
*
|
||||
* 1. If either operand has an explicit collating function assignment using the postfix COLLATE operator,
|
||||
* then the explicit collating function is used for comparison,
|
||||
* with precedence to the collating function of the left operand.
|
||||
*
|
||||
* 2. If either operand is a column, then the collating function of that column is used
|
||||
* with precedence to the left operand. For the purposes of the previous sentence,
|
||||
* a column name preceded by one or more unary "+" operators and/or CAST operators is still considered a column name.
|
||||
*
|
||||
* 3. Otherwise, the BINARY collating function is used for comparison.
|
||||
*/
|
||||
let collation_ctx = {
|
||||
match (left_collation_ctx, right_collation_ctx) {
|
||||
(Some((c_left, true)), _) => Some((c_left, true)),
|
||||
(_, Some((c_right, true))) => Some((c_right, true)),
|
||||
(Some((c_left, from_collate_left)), None) => Some((c_left, from_collate_left)),
|
||||
(None, Some((c_right, from_collate_right))) => Some((c_right, from_collate_right)),
|
||||
(Some((c_left, from_collate_left)), Some((_, false))) => {
|
||||
Some((c_left, from_collate_left))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
};
|
||||
program.set_collation(collation_ctx);
|
||||
|
||||
emit_fn(
|
||||
program,
|
||||
op,
|
||||
e1_reg,
|
||||
e2_reg,
|
||||
target_register,
|
||||
e1,
|
||||
e2,
|
||||
referenced_tables,
|
||||
condition_metadata,
|
||||
)?;
|
||||
program.reset_collation();
|
||||
Ok(target_register)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn emit_binary_insn(
|
||||
program: &mut ProgramBuilder,
|
||||
@@ -2234,6 +2277,7 @@ fn emit_binary_insn(
|
||||
lhs_expr: &Expr,
|
||||
rhs_expr: &Expr,
|
||||
referenced_tables: Option<&TableReferences>,
|
||||
_: Option<ConditionMetadata>,
|
||||
) -> Result<()> {
|
||||
let mut affinity = Affinity::Blob;
|
||||
if op.is_comparison() {
|
||||
@@ -2481,6 +2525,277 @@ fn emit_binary_insn(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn emit_binary_condition_insn(
|
||||
program: &mut ProgramBuilder,
|
||||
op: &ast::Operator,
|
||||
lhs: usize,
|
||||
rhs: usize,
|
||||
target_register: usize,
|
||||
lhs_expr: &Expr,
|
||||
rhs_expr: &Expr,
|
||||
referenced_tables: Option<&TableReferences>,
|
||||
condition_metadata: Option<ConditionMetadata>,
|
||||
) -> Result<()> {
|
||||
let condition_metadata = condition_metadata
|
||||
.expect("condition metadata must be provided for emit_binary_insn_conditional");
|
||||
let mut affinity = Affinity::Blob;
|
||||
if op.is_comparison() {
|
||||
affinity = comparison_affinity(lhs_expr, rhs_expr, referenced_tables);
|
||||
}
|
||||
|
||||
let opposite_op = match op {
|
||||
ast::Operator::NotEquals => ast::Operator::Equals,
|
||||
ast::Operator::Equals => ast::Operator::NotEquals,
|
||||
ast::Operator::Less => ast::Operator::GreaterEquals,
|
||||
ast::Operator::LessEquals => ast::Operator::Greater,
|
||||
ast::Operator::Greater => ast::Operator::LessEquals,
|
||||
ast::Operator::GreaterEquals => ast::Operator::Less,
|
||||
ast::Operator::Is => ast::Operator::IsNot,
|
||||
ast::Operator::IsNot => ast::Operator::Is,
|
||||
other => *other,
|
||||
};
|
||||
|
||||
// For conditional jumps we need to use the opposite comparison operator
|
||||
// when we intend to jump if the condition is false. Jumping when the condition is false
|
||||
// is the common case, e.g.:
|
||||
// WHERE x=1 turns into "jump if x != 1".
|
||||
// However, in e.g. "WHERE x=1 OR y=2" we want to jump if the condition is true
|
||||
// when evaluating "x=1", because we are jumping over the "y=2" condition, and if the condition
|
||||
// is false we move on to the "y=2" condition without jumping.
|
||||
let op_to_use = if condition_metadata.jump_if_condition_is_true {
|
||||
*op
|
||||
} else {
|
||||
opposite_op
|
||||
};
|
||||
|
||||
// Similarly, we "jump if NULL" only when we intend to jump if the condition is false.
|
||||
let flags = if condition_metadata.jump_if_condition_is_true {
|
||||
CmpInsFlags::default().with_affinity(affinity)
|
||||
} else {
|
||||
CmpInsFlags::default()
|
||||
.with_affinity(affinity)
|
||||
.jump_if_null()
|
||||
};
|
||||
|
||||
let target_pc = if condition_metadata.jump_if_condition_is_true {
|
||||
condition_metadata.jump_target_when_true
|
||||
} else {
|
||||
condition_metadata.jump_target_when_false
|
||||
};
|
||||
|
||||
// For conditional jumps that don't have a clear "opposite op" (e.g. x+y), we check whether the result is nonzero/nonnull
|
||||
// (or zero/null) depending on the condition metadata.
|
||||
let eval_result = |program: &mut ProgramBuilder, result_reg: usize| {
|
||||
if condition_metadata.jump_if_condition_is_true {
|
||||
program.emit_insn(Insn::If {
|
||||
reg: result_reg,
|
||||
target_pc,
|
||||
jump_if_null: false,
|
||||
});
|
||||
} else {
|
||||
program.emit_insn(Insn::IfNot {
|
||||
reg: result_reg,
|
||||
target_pc,
|
||||
jump_if_null: true,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
match op_to_use {
|
||||
ast::Operator::NotEquals => {
|
||||
program.emit_insn(Insn::Ne {
|
||||
lhs,
|
||||
rhs,
|
||||
target_pc,
|
||||
flags,
|
||||
collation: program.curr_collation(),
|
||||
});
|
||||
}
|
||||
ast::Operator::Equals => {
|
||||
program.emit_insn(Insn::Eq {
|
||||
lhs,
|
||||
rhs,
|
||||
target_pc,
|
||||
flags,
|
||||
collation: program.curr_collation(),
|
||||
});
|
||||
}
|
||||
ast::Operator::Less => {
|
||||
program.emit_insn(Insn::Lt {
|
||||
lhs,
|
||||
rhs,
|
||||
target_pc,
|
||||
flags,
|
||||
collation: program.curr_collation(),
|
||||
});
|
||||
}
|
||||
ast::Operator::LessEquals => {
|
||||
program.emit_insn(Insn::Le {
|
||||
lhs,
|
||||
rhs,
|
||||
target_pc,
|
||||
flags,
|
||||
collation: program.curr_collation(),
|
||||
});
|
||||
}
|
||||
ast::Operator::Greater => {
|
||||
program.emit_insn(Insn::Gt {
|
||||
lhs,
|
||||
rhs,
|
||||
target_pc,
|
||||
flags,
|
||||
collation: program.curr_collation(),
|
||||
});
|
||||
}
|
||||
ast::Operator::GreaterEquals => {
|
||||
program.emit_insn(Insn::Ge {
|
||||
lhs,
|
||||
rhs,
|
||||
target_pc,
|
||||
flags,
|
||||
collation: program.curr_collation(),
|
||||
});
|
||||
}
|
||||
ast::Operator::Is => {
|
||||
program.emit_insn(Insn::Eq {
|
||||
lhs,
|
||||
rhs,
|
||||
target_pc,
|
||||
flags: flags.null_eq(),
|
||||
collation: program.curr_collation(),
|
||||
});
|
||||
}
|
||||
ast::Operator::IsNot => {
|
||||
program.emit_insn(Insn::Ne {
|
||||
lhs,
|
||||
rhs,
|
||||
target_pc,
|
||||
flags: flags.null_eq(),
|
||||
collation: program.curr_collation(),
|
||||
});
|
||||
}
|
||||
ast::Operator::Add => {
|
||||
program.emit_insn(Insn::Add {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::Subtract => {
|
||||
program.emit_insn(Insn::Subtract {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::Multiply => {
|
||||
program.emit_insn(Insn::Multiply {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::Divide => {
|
||||
program.emit_insn(Insn::Divide {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::Modulus => {
|
||||
program.emit_insn(Insn::Remainder {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::And => {
|
||||
program.emit_insn(Insn::And {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::Or => {
|
||||
program.emit_insn(Insn::Or {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::BitwiseAnd => {
|
||||
program.emit_insn(Insn::BitAnd {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::BitwiseOr => {
|
||||
program.emit_insn(Insn::BitOr {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::RightShift => {
|
||||
program.emit_insn(Insn::ShiftRight {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::LeftShift => {
|
||||
program.emit_insn(Insn::ShiftLeft {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
#[cfg(feature = "json")]
|
||||
op @ (ast::Operator::ArrowRight | ast::Operator::ArrowRightShift) => {
|
||||
let json_func = match op {
|
||||
ast::Operator::ArrowRight => JsonFunc::JsonArrowExtract,
|
||||
ast::Operator::ArrowRightShift => JsonFunc::JsonArrowShiftExtract,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
program.emit_insn(Insn::Function {
|
||||
constant_mask: 0,
|
||||
start_reg: lhs,
|
||||
dest: target_register,
|
||||
func: FuncCtx {
|
||||
func: Func::Json(json_func),
|
||||
arg_count: 2,
|
||||
},
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
ast::Operator::Concat => {
|
||||
program.emit_insn(Insn::Concat {
|
||||
lhs,
|
||||
rhs,
|
||||
dest: target_register,
|
||||
});
|
||||
eval_result(program, target_register);
|
||||
}
|
||||
other_unimplemented => todo!("{:?}", other_unimplemented),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The base logic for translating LIKE and GLOB expressions.
|
||||
/// The logic for handling "NOT LIKE" is different depending on whether the expression
|
||||
/// is a conditional jump or not. This is why the caller handles the "NOT LIKE" behavior;
|
||||
@@ -3185,6 +3500,7 @@ pub fn translate_expr_for_returning(
|
||||
lhs,
|
||||
rhs,
|
||||
None, // No table references needed for RETURNING
|
||||
None, // No condition metadata needed for RETURNING
|
||||
)?;
|
||||
|
||||
Ok(target_register)
|
||||
|
||||
@@ -174,6 +174,7 @@ pub fn translate_create_index(
|
||||
count: columns.len() + 1,
|
||||
dest_reg: record_reg,
|
||||
index_name: Some(idx_name.clone()),
|
||||
affinity_str: None,
|
||||
});
|
||||
program.emit_insn(Insn::SorterInsert {
|
||||
cursor_id: sorter_cursor_id,
|
||||
|
||||
@@ -64,6 +64,7 @@ pub fn translate_insert(
|
||||
if with.is_some() {
|
||||
crate::bail_parse_error!("WITH clause is not supported");
|
||||
}
|
||||
|
||||
if on_conflict.is_some() {
|
||||
crate::bail_parse_error!("ON CONFLICT clause is not supported");
|
||||
}
|
||||
@@ -76,11 +77,22 @@ pub fn translate_insert(
|
||||
);
|
||||
}
|
||||
let table_name = &tbl_name.name;
|
||||
|
||||
// Check if this is a system table that should be protected from direct writes
|
||||
if crate::schema::is_system_table(table_name.as_str()) {
|
||||
crate::bail_parse_error!("table {} may not be modified", table_name);
|
||||
}
|
||||
|
||||
let table = match schema.get_table(table_name.as_str()) {
|
||||
Some(table) => table,
|
||||
None => crate::bail_parse_error!("no such table: {}", table_name),
|
||||
};
|
||||
|
||||
// Check if this is a materialized view
|
||||
if schema.is_materialized_view(table_name.as_str()) {
|
||||
crate::bail_parse_error!("cannot modify materialized view {}", table_name);
|
||||
}
|
||||
|
||||
let resolver = Resolver::new(schema, syms);
|
||||
|
||||
if let Some(virtual_table) = &table.virtual_table() {
|
||||
@@ -242,11 +254,35 @@ pub fn translate_insert(
|
||||
end_offset: yield_label,
|
||||
});
|
||||
let record_reg = program.alloc_register();
|
||||
|
||||
let affinity_str = if columns.is_empty() {
|
||||
btree_table
|
||||
.columns
|
||||
.iter()
|
||||
.filter(|col| !col.hidden)
|
||||
.map(|col| col.affinity().aff_mask())
|
||||
.collect::<String>()
|
||||
} else {
|
||||
columns
|
||||
.iter()
|
||||
.map(|col_name| {
|
||||
let column_name = normalize_ident(col_name.as_str());
|
||||
table
|
||||
.get_column_by_name(&column_name)
|
||||
.unwrap()
|
||||
.1
|
||||
.affinity()
|
||||
.aff_mask()
|
||||
})
|
||||
.collect::<String>()
|
||||
};
|
||||
|
||||
program.emit_insn(Insn::MakeRecord {
|
||||
start_reg: yield_reg + 1,
|
||||
count: result.num_result_cols,
|
||||
dest_reg: record_reg,
|
||||
index_name: None,
|
||||
affinity_str: Some(affinity_str),
|
||||
});
|
||||
|
||||
let rowid_reg = program.alloc_register();
|
||||
@@ -513,6 +549,7 @@ pub fn translate_insert(
|
||||
count: num_cols + 1,
|
||||
dest_reg: record_reg,
|
||||
index_name: Some(index.name.clone()),
|
||||
affinity_str: None,
|
||||
});
|
||||
|
||||
if index.unique {
|
||||
@@ -633,11 +670,18 @@ pub fn translate_insert(
|
||||
});
|
||||
}
|
||||
// Create and insert the record
|
||||
let affinity_str = insertion
|
||||
.col_mappings
|
||||
.iter()
|
||||
.map(|col_mapping| col_mapping.column.affinity().aff_mask())
|
||||
.collect::<String>();
|
||||
|
||||
program.emit_insn(Insn::MakeRecord {
|
||||
start_reg: insertion.first_col_register(),
|
||||
count: insertion.col_mappings.len(),
|
||||
dest_reg: insertion.record_register(),
|
||||
index_name: None,
|
||||
affinity_str: Some(affinity_str),
|
||||
});
|
||||
program.emit_insn(Insn::Insert {
|
||||
cursor: cursor_id,
|
||||
|
||||
@@ -196,7 +196,8 @@ pub fn init_loop(
|
||||
t_ctx.meta_left_joins[table_index] = Some(lj_metadata);
|
||||
}
|
||||
}
|
||||
let (table_cursor_id, index_cursor_id) = table.open_cursors(program, mode)?;
|
||||
let (table_cursor_id, index_cursor_id) =
|
||||
table.open_cursors(program, mode, t_ctx.resolver.schema)?;
|
||||
match &table.op {
|
||||
Operation::Scan(Scan::BTreeTable { index, .. }) => match (mode, &table.table) {
|
||||
(OperationMode::SELECT, Table::BTree(btree)) => {
|
||||
@@ -1428,6 +1429,7 @@ fn emit_autoindex(
|
||||
count: num_regs_to_reserve,
|
||||
dest_reg: record_reg,
|
||||
index_name: Some(index.name.clone()),
|
||||
affinity_str: None,
|
||||
});
|
||||
program.emit_insn(Insn::IdxInsert {
|
||||
cursor_id: index_cursor_id,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user