mirror of
https://github.com/aljazceru/turso.git
synced 2025-12-18 17:14:20 +01:00
Merge branch 'main' of https://github.com/tursodatabase/limbo
This commit is contained in:
2
.github/workflows/antithesis.yml
vendored
2
.github/workflows/antithesis.yml
vendored
@@ -13,7 +13,7 @@ env:
|
||||
ANTITHESIS_PASSWD: ${{ secrets.ANTITHESIS_PASSWD }}
|
||||
ANTITHESIS_DOCKER_HOST: us-central1-docker.pkg.dev
|
||||
ANTITHESIS_DOCKER_REPO: ${{ secrets.ANTITHESIS_DOCKER_REPO }}
|
||||
ANTITHESIS_EMAIL: "penberg@turso.tech;pmuniz@turso.tech"
|
||||
ANTITHESIS_EMAIL: ${{ secrets.ANTITHESIS_EMAIL }}
|
||||
ANTITHESIS_REGISTRY_KEY: ${{ secrets.ANTITHESIS_REGISTRY_KEY }}
|
||||
|
||||
jobs:
|
||||
|
||||
4
.github/workflows/long_fuzz_tests_btree.yml
vendored
4
.github/workflows/long_fuzz_tests_btree.yml
vendored
@@ -28,10 +28,6 @@ jobs:
|
||||
run: cargo test -- --ignored fuzz_long
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
- name: Run ignored long tests with index
|
||||
run: cargo test -- --ignored fuzz_long
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
simple-stress-test:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
|
||||
12
.github/workflows/rust.yml
vendored
12
.github/workflows/rust.yml
vendored
@@ -73,19 +73,12 @@ jobs:
|
||||
with:
|
||||
prefix-key: "v1-rust" # can be updated if we need to reset caches due to non-trivial change in the dependencies (for example, custom env var were set for single workspace project)
|
||||
- name: Install the project
|
||||
run: ./scripts/run-sim --iterations 50
|
||||
run: ./scripts/run-sim --maximum-tests 2000 loop -n 50 -s
|
||||
|
||||
test-limbo:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Install cargo-c
|
||||
env:
|
||||
LINK: https://github.com/lu-zero/cargo-c/releases/download/v0.10.7
|
||||
CARGO_C_FILE: cargo-c-x86_64-unknown-linux-musl.tar.gz
|
||||
run: |
|
||||
curl -L $LINK/$CARGO_C_FILE | tar xz -C ~/.cargo/bin
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install uv
|
||||
@@ -96,9 +89,6 @@ jobs:
|
||||
- name: Set up Python
|
||||
run: uv python install
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-extras --dev --all-packages
|
||||
|
||||
- uses: "./.github/shared/install_sqlite"
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
2
.github/workflows/rust_perf.yml
vendored
2
.github/workflows/rust_perf.yml
vendored
@@ -88,7 +88,7 @@ jobs:
|
||||
nyrkio-public: true
|
||||
|
||||
- name: Analyze SQLITE3 result with Nyrkiö
|
||||
uses: nyrkio/github-action-benchmark@HEAD
|
||||
uses: nyrkio/change-detection@HEAD
|
||||
with:
|
||||
name: clickbench/sqlite3
|
||||
tool: time
|
||||
|
||||
104
Cargo.lock
generated
104
Cargo.lock
generated
@@ -571,7 +571,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
|
||||
|
||||
[[package]]
|
||||
name = "core_tester"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
@@ -1879,14 +1879,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo-go"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"turso_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "limbo-wasm"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"console_error_panic_hook",
|
||||
"getrandom 0.2.15",
|
||||
@@ -1899,7 +1899,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_completion"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"mimalloc",
|
||||
"turso_ext",
|
||||
@@ -1907,7 +1907,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_crypto"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"blake3",
|
||||
"data-encoding",
|
||||
@@ -1920,7 +1920,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_csv"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"csv",
|
||||
"mimalloc",
|
||||
@@ -1930,7 +1930,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_ipaddr"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"ipnetwork",
|
||||
"mimalloc",
|
||||
@@ -1939,7 +1939,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_percentile"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"mimalloc",
|
||||
"turso_ext",
|
||||
@@ -1947,7 +1947,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_regexp"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"mimalloc",
|
||||
"regex",
|
||||
@@ -1956,7 +1956,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_sim"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"anarchist-readable-name-generator-lib",
|
||||
"anyhow",
|
||||
@@ -1983,7 +1983,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_sqlite3"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"env_logger 0.11.7",
|
||||
"libc",
|
||||
@@ -1996,7 +1996,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_sqlite_test_ext"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
@@ -2232,6 +2232,8 @@ dependencies = [
|
||||
"once_cell",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"regex",
|
||||
"semver",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
@@ -2474,45 +2476,6 @@ version = "2.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
|
||||
|
||||
[[package]]
|
||||
name = "phf"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078"
|
||||
dependencies = [
|
||||
"phf_shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_codegen"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a"
|
||||
dependencies = [
|
||||
"phf_generator",
|
||||
"phf_shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_generator"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d"
|
||||
dependencies = [
|
||||
"phf_shared",
|
||||
"rand 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_shared"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5"
|
||||
dependencies = [
|
||||
"siphasher",
|
||||
"uncased",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.16"
|
||||
@@ -2700,7 +2663,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "py-turso"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"pyo3",
|
||||
@@ -3295,12 +3258,6 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "siphasher"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.9"
|
||||
@@ -3312,9 +3269,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.14.0"
|
||||
version = "1.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd"
|
||||
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
@@ -3811,7 +3768,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"tempfile",
|
||||
"thiserror 2.0.12",
|
||||
@@ -3821,7 +3778,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso-java"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"jni",
|
||||
"thiserror 2.0.12",
|
||||
@@ -3830,7 +3787,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_cli"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
@@ -3861,7 +3818,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_core"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"antithesis_sdk",
|
||||
"bitflags 2.9.0",
|
||||
@@ -3914,7 +3871,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_dart"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"flutter_rust_bridge",
|
||||
"turso_core",
|
||||
@@ -3922,7 +3879,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_ext"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"getrandom 0.3.2",
|
||||
@@ -3931,7 +3888,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_ext_tests"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"env_logger 0.11.7",
|
||||
"lazy_static",
|
||||
@@ -3942,7 +3899,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_macros"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -3951,7 +3908,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_node"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"napi",
|
||||
"napi-build",
|
||||
@@ -3961,7 +3918,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_sqlite3_parser"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"cc",
|
||||
@@ -3971,18 +3928,15 @@ dependencies = [
|
||||
"log",
|
||||
"memchr",
|
||||
"miette",
|
||||
"phf",
|
||||
"phf_codegen",
|
||||
"phf_shared",
|
||||
"serde",
|
||||
"smallvec",
|
||||
"strum",
|
||||
"strum_macros",
|
||||
"uncased",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "turso_stress"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
dependencies = [
|
||||
"anarchist-readable-name-generator-lib",
|
||||
"antithesis_sdk",
|
||||
|
||||
26
Cargo.toml
26
Cargo.toml
@@ -31,25 +31,25 @@ members = [
|
||||
exclude = ["perf/latency/limbo"]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.1"
|
||||
version = "0.1.2-pre.2"
|
||||
authors = ["the Limbo authors"]
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/tursodatabase/turso"
|
||||
|
||||
[workspace.dependencies]
|
||||
limbo_completion = { path = "extensions/completion", version = "0.1.1" }
|
||||
turso_core = { path = "core", version = "0.1.1" }
|
||||
limbo_crypto = { path = "extensions/crypto", version = "0.1.1" }
|
||||
limbo_csv = { path = "extensions/csv", version = "0.1.1" }
|
||||
turso_ext = { path = "extensions/core", version = "0.1.1" }
|
||||
turso_ext_tests = { path = "extensions/tests", version = "0.1.1" }
|
||||
limbo_ipaddr = { path = "extensions/ipaddr", version = "0.1.1" }
|
||||
turso_macros = { path = "macros", version = "0.1.1" }
|
||||
limbo_percentile = { path = "extensions/percentile", version = "0.1.1" }
|
||||
limbo_regexp = { path = "extensions/regexp", version = "0.1.1" }
|
||||
turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.1.1" }
|
||||
limbo_uuid = { path = "extensions/uuid", version = "0.1.1" }
|
||||
limbo_completion = { path = "extensions/completion", version = "0.1.2-pre.2" }
|
||||
turso_core = { path = "core", version = "0.1.2-pre.2" }
|
||||
limbo_crypto = { path = "extensions/crypto", version = "0.1.2-pre.2" }
|
||||
limbo_csv = { path = "extensions/csv", version = "0.1.2-pre.2" }
|
||||
turso_ext = { path = "extensions/core", version = "0.1.2-pre.2" }
|
||||
turso_ext_tests = { path = "extensions/tests", version = "0.1.2-pre.2" }
|
||||
limbo_ipaddr = { path = "extensions/ipaddr", version = "0.1.2-pre.2" }
|
||||
turso_macros = { path = "macros", version = "0.1.2-pre.2" }
|
||||
limbo_percentile = { path = "extensions/percentile", version = "0.1.2-pre.2" }
|
||||
limbo_regexp = { path = "extensions/regexp", version = "0.1.2-pre.2" }
|
||||
turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.1.2-pre.2" }
|
||||
limbo_uuid = { path = "extensions/uuid", version = "0.1.2-pre.2" }
|
||||
strum = { version = "0.26", features = ["derive"] }
|
||||
strum_macros = "0.26"
|
||||
serde = "1.0"
|
||||
|
||||
@@ -12,6 +12,7 @@ WORKDIR /app
|
||||
FROM chef AS planner
|
||||
COPY ./Cargo.lock ./Cargo.lock
|
||||
COPY ./Cargo.toml ./Cargo.toml
|
||||
COPY ./bindings/dart ./bindings/dart/
|
||||
COPY ./bindings/go ./bindings/go/
|
||||
COPY ./bindings/java ./bindings/java/
|
||||
COPY ./bindings/javascript ./bindings/javascript/
|
||||
@@ -56,6 +57,7 @@ COPY --from=planner /app/sqlite3 ./sqlite3/
|
||||
COPY --from=planner /app/tests ./tests/
|
||||
COPY --from=planner /app/stress ./stress/
|
||||
COPY --from=planner /app/bindings/rust ./bindings/rust/
|
||||
COPY --from=planner /app/bindings/dart ./bindings/dart/
|
||||
COPY --from=planner /app/bindings/go ./bindings/go/
|
||||
COPY --from=planner /app/bindings/javascript ./bindings/javascript/
|
||||
COPY --from=planner /app/bindings/java ./bindings/java/
|
||||
@@ -84,7 +86,7 @@ RUN maturin build
|
||||
#
|
||||
|
||||
FROM debian:bullseye-slim AS runtime
|
||||
RUN apt-get update && apt-get install -y bash curl xz-utils python3 sqlite3 bc binutils pip && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash curl xz-utils python3 procps sqlite3 bc binutils pip && rm -rf /var/lib/apt/lists/*
|
||||
RUN pip install antithesis
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
61
Makefile
61
Makefile
@@ -3,9 +3,11 @@ CURRENT_RUST_VERSION := $(shell rustc -V | sed -E 's/rustc ([0-9]+\.[0-9]+\.[0-9
|
||||
CURRENT_RUST_TARGET := $(shell rustc -vV | grep host | cut -d ' ' -f 2)
|
||||
RUSTUP := $(shell command -v rustup 2> /dev/null)
|
||||
UNAME_S := $(shell uname -s)
|
||||
MINIMUM_TCL_VERSION := 8.6
|
||||
|
||||
# Executable used to execute the compatibility tests.
|
||||
SQLITE_EXEC ?= scripts/limbo-sqlite3
|
||||
RUST_LOG := off
|
||||
|
||||
all: check-rust-version check-wasm-target limbo limbo-wasm
|
||||
.PHONY: all
|
||||
@@ -26,6 +28,17 @@ check-rust-version:
|
||||
fi
|
||||
.PHONY: check-rust-version
|
||||
|
||||
check-tcl-version:
|
||||
@printf '%s\n' \
|
||||
'set need "$(MINIMUM_TCL_VERSION)"' \
|
||||
'set have [info patchlevel]' \
|
||||
'if {[package vcompare $$have $$need] < 0} {' \
|
||||
' puts stderr "tclsh $$have found — need $$need+"' \
|
||||
' exit 1' \
|
||||
'}' \
|
||||
| tclsh
|
||||
.PHONY: check-tcl-version
|
||||
|
||||
check-wasm-target:
|
||||
@echo "Checking wasm32-wasi target..."
|
||||
@if ! rustup target list | grep -q "wasm32-wasi (installed)"; then \
|
||||
@@ -51,27 +64,31 @@ uv-sync:
|
||||
uv sync --all-packages
|
||||
.PHONE: uv-sync
|
||||
|
||||
test: limbo uv-sync test-compat test-vector test-sqlite3 test-shell test-extensions test-memory test-write test-update test-constraint test-collate
|
||||
uv-sync-test:
|
||||
uv sync --all-extras --dev --package turso_test
|
||||
.PHONE: uv-sync
|
||||
|
||||
test: limbo uv-sync-test test-compat test-vector test-sqlite3 test-shell test-memory test-write test-update test-constraint test-collate test-extensions
|
||||
.PHONY: test
|
||||
|
||||
test-extensions: limbo uv-sync
|
||||
uv run --project limbo_test test-extensions
|
||||
test-extensions: limbo uv-sync-test
|
||||
RUST_LOG=$(RUST_LOG) uv run --project limbo_test test-extensions
|
||||
.PHONY: test-extensions
|
||||
|
||||
test-shell: limbo uv-sync
|
||||
SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-shell
|
||||
test-shell: limbo uv-sync-test
|
||||
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-shell
|
||||
.PHONY: test-shell
|
||||
|
||||
test-compat:
|
||||
SQLITE_EXEC=$(SQLITE_EXEC) ./testing/all.test
|
||||
test-compat: check-tcl-version
|
||||
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) ./testing/all.test
|
||||
.PHONY: test-compat
|
||||
|
||||
test-vector:
|
||||
SQLITE_EXEC=$(SQLITE_EXEC) ./testing/vector.test
|
||||
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) ./testing/vector.test
|
||||
.PHONY: test-vector
|
||||
|
||||
test-time:
|
||||
SQLITE_EXEC=$(SQLITE_EXEC) ./testing/time.test
|
||||
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) ./testing/time.test
|
||||
.PHONY: test-time
|
||||
|
||||
reset-db:
|
||||
@@ -85,48 +102,48 @@ test-sqlite3: reset-db
|
||||
.PHONY: test-sqlite3
|
||||
|
||||
test-json:
|
||||
SQLITE_EXEC=$(SQLITE_EXEC) ./testing/json.test
|
||||
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) ./testing/json.test
|
||||
.PHONY: test-json
|
||||
|
||||
test-memory: limbo uv-sync
|
||||
SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-memory
|
||||
test-memory: limbo uv-sync-test
|
||||
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-memory
|
||||
.PHONY: test-memory
|
||||
|
||||
test-write: limbo uv-sync
|
||||
test-write: limbo uv-sync-test
|
||||
@if [ "$(SQLITE_EXEC)" != "scripts/limbo-sqlite3" ]; then \
|
||||
SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-write; \
|
||||
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-write; \
|
||||
else \
|
||||
echo "Skipping test-write: SQLITE_EXEC does not have indexes scripts/limbo-sqlite3"; \
|
||||
fi
|
||||
.PHONY: test-write
|
||||
|
||||
test-update: limbo uv-sync
|
||||
test-update: limbo uv-sync-test
|
||||
@if [ "$(SQLITE_EXEC)" != "scripts/limbo-sqlite3" ]; then \
|
||||
SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-update; \
|
||||
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-update; \
|
||||
else \
|
||||
echo "Skipping test-update: SQLITE_EXEC does not have indexes scripts/limbo-sqlite3"; \
|
||||
fi
|
||||
.PHONY: test-update
|
||||
|
||||
test-collate: limbo uv-sync
|
||||
test-collate: limbo uv-sync-test
|
||||
@if [ "$(SQLITE_EXEC)" != "scripts/limbo-sqlite3" ]; then \
|
||||
SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-collate; \
|
||||
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-collate; \
|
||||
else \
|
||||
echo "Skipping test-collate: SQLITE_EXEC does not have indexes scripts/limbo-sqlite3"; \
|
||||
fi
|
||||
.PHONY: test-collate
|
||||
|
||||
test-constraint: limbo uv-sync
|
||||
test-constraint: limbo uv-sync-test
|
||||
@if [ "$(SQLITE_EXEC)" != "scripts/limbo-sqlite3" ]; then \
|
||||
SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-constraint; \
|
||||
RUST_LOG=$(RUST_LOG) SQLITE_EXEC=$(SQLITE_EXEC) uv run --project limbo_test test-constraint; \
|
||||
else \
|
||||
echo "Skipping test-constraint: SQLITE_EXEC does not have indexes scripts/limbo-sqlite3"; \
|
||||
fi
|
||||
.PHONY: test-constraint
|
||||
|
||||
bench-vfs: uv-sync
|
||||
bench-vfs: uv-sync-test
|
||||
cargo build --release
|
||||
uv run --project limbo_test bench-vfs "$(SQL)" "$(N)"
|
||||
RUST_LOG=$(RUST_LOG) uv run --project limbo_test bench-vfs "$(SQL)" "$(N)"
|
||||
|
||||
clickbench:
|
||||
./perf/clickbench/benchmark.sh
|
||||
|
||||
@@ -64,7 +64,7 @@ Turso
|
||||
Enter ".help" for usage hints.
|
||||
Connected to a transient in-memory database.
|
||||
Use ".open FILENAME" to reopen on a persistent database
|
||||
turso> CREATE TABLE users (id INT PRIMARY KEY, username TEXT);
|
||||
turso> CREATE TABLE users (id INT, username TEXT);
|
||||
turso> INSERT INTO users VALUES (1, 'alice');
|
||||
turso> INSERT INTO users VALUES (2, 'bob');
|
||||
turso> SELECT * FROM users;
|
||||
@@ -224,9 +224,11 @@ terms or conditions.
|
||||
|
||||
Thanks to all the partners of Turso!
|
||||
|
||||
<a href="https://antithesis.com/"><img src="assets/antithesis.jpg" width="400"></a>
|
||||
|
||||
<a href="https://blacksmith.sh"><img src="assets/blacksmith.svg" width="400"></a>
|
||||
|
||||
<a href="https://antithesis.com/"><img src="assets/antithesis.jpg" width="400"></a>
|
||||
<a href="https://nyrkio.com/"><img src="assets/turso-nyrkio.png" width="400"></a>
|
||||
|
||||
## Contributors
|
||||
|
||||
|
||||
@@ -50,3 +50,5 @@ cur.execute(f"""
|
||||
INSERT INTO initial_state (num_accts, total)
|
||||
VALUES ({num_accts}, {total})
|
||||
""")
|
||||
|
||||
con.commit()
|
||||
|
||||
@@ -83,4 +83,6 @@ for i in range(tbl_count):
|
||||
CREATE TABLE tbl_{i} ({cols_str})
|
||||
""")
|
||||
|
||||
con.commit()
|
||||
|
||||
print(f"DB Schemas\n------------\n{json.dumps(schemas, indent=2)}")
|
||||
|
||||
@@ -37,6 +37,13 @@ print(f"Attempt to delete {deletions} rows in tbl_{selected_tbl}...")
|
||||
for i in range(deletions):
|
||||
where_clause = f"col_{pk} = {generate_random_value(tbl_schema[f'col_{pk}']['data_type'])}"
|
||||
|
||||
cur.execute(f"""
|
||||
DELETE FROM tbl_{selected_tbl} WHERE {where_clause}
|
||||
""")
|
||||
try:
|
||||
cur.execute(f"""
|
||||
DELETE FROM tbl_{selected_tbl} WHERE {where_clause}
|
||||
""")
|
||||
except turso.OperationalError:
|
||||
con.rollback()
|
||||
# Re-raise other operational errors
|
||||
raise
|
||||
|
||||
con.commit()
|
||||
|
||||
@@ -44,5 +44,8 @@ for i in range(insertions):
|
||||
# Ignore UNIQUE constraint violations
|
||||
pass
|
||||
else:
|
||||
con.rollback()
|
||||
# Re-raise other operational errors
|
||||
raise
|
||||
|
||||
con.commit()
|
||||
|
||||
@@ -17,8 +17,7 @@ cur_init = con_init.cursor()
|
||||
|
||||
tbl_len = cur_init.execute("SELECT count FROM tables").fetchone()[0]
|
||||
selected_tbl = get_random() % tbl_len
|
||||
tbl_schema = json.loads(cur_init.execute(
|
||||
f"SELECT schema FROM schemas WHERE tbl = {selected_tbl}").fetchone()[0])
|
||||
tbl_schema = json.loads(cur_init.execute(f"SELECT schema FROM schemas WHERE tbl = {selected_tbl}").fetchone()[0])
|
||||
|
||||
tbl_name = f"tbl_{selected_tbl}"
|
||||
|
||||
@@ -29,8 +28,7 @@ except Exception as e:
|
||||
exit(0)
|
||||
|
||||
cur = con.cursor()
|
||||
cur.execute(
|
||||
"SELECT sql FROM sqlite_schema WHERE type = 'table' AND name = '" + tbl_name + "'")
|
||||
cur.execute("SELECT sql FROM sqlite_schema WHERE type = 'table' AND name = '" + tbl_name + "'")
|
||||
|
||||
result = cur.fetchone()
|
||||
|
||||
@@ -47,10 +45,8 @@ cur.execute("ALTER TABLE " + tbl_name + " RENAME TO " + tbl_name + "_old")
|
||||
con.rollback()
|
||||
|
||||
cur = con.cursor()
|
||||
cur.execute(
|
||||
"SELECT sql FROM sqlite_schema WHERE type = 'table' AND name = '" + tbl_name + "'")
|
||||
cur.execute("SELECT sql FROM sqlite_schema WHERE type = 'table' AND name = '" + tbl_name + "'")
|
||||
|
||||
schema_after = cur.fetchone()[0]
|
||||
|
||||
always(schema_before == schema_after,
|
||||
"schema should be the same after rollback", {})
|
||||
always(schema_before == schema_after, "schema should be the same after rollback", {})
|
||||
|
||||
@@ -58,5 +58,8 @@ for i in range(updates):
|
||||
# Ignore UNIQUE constraint violations
|
||||
pass
|
||||
else:
|
||||
con.rollback()
|
||||
# Re-raise other operational errors
|
||||
raise
|
||||
|
||||
con.commit()
|
||||
|
||||
BIN
assets/turso-nyrkio.png
Normal file
BIN
assets/turso-nyrkio.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 262 KiB |
@@ -7,7 +7,7 @@ use turso_core::{LimboError, Statement, StepResult, Value};
|
||||
|
||||
pub struct LimboRows<'conn> {
|
||||
stmt: Box<Statement>,
|
||||
conn: &'conn mut LimboConn,
|
||||
_conn: &'conn mut LimboConn,
|
||||
err: Option<LimboError>,
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ impl<'conn> LimboRows<'conn> {
|
||||
pub fn new(stmt: Statement, conn: &'conn mut LimboConn) -> Self {
|
||||
LimboRows {
|
||||
stmt: Box::new(stmt),
|
||||
conn,
|
||||
_conn: conn,
|
||||
err: None,
|
||||
}
|
||||
}
|
||||
@@ -55,8 +55,12 @@ pub extern "C" fn rows_next(ctx: *mut c_void) -> ResultCode {
|
||||
Ok(StepResult::Row) => ResultCode::Row,
|
||||
Ok(StepResult::Done) => ResultCode::Done,
|
||||
Ok(StepResult::IO) => {
|
||||
let _ = ctx.conn.io.run_once();
|
||||
ResultCode::Io
|
||||
let res = ctx.stmt.run_once();
|
||||
if res.is_err() {
|
||||
ResultCode::Error
|
||||
} else {
|
||||
ResultCode::Io
|
||||
}
|
||||
}
|
||||
Ok(StepResult::Busy) => ResultCode::Busy,
|
||||
Ok(StepResult::Interrupt) => ResultCode::Interrupt,
|
||||
|
||||
@@ -64,7 +64,10 @@ pub extern "C" fn stmt_execute(
|
||||
return ResultCode::Done;
|
||||
}
|
||||
Ok(StepResult::IO) => {
|
||||
let _ = stmt.conn.io.run_once();
|
||||
let res = statement.run_once();
|
||||
if res.is_err() {
|
||||
return ResultCode::Error;
|
||||
}
|
||||
}
|
||||
Ok(StepResult::Busy) => {
|
||||
return ResultCode::Busy;
|
||||
|
||||
@@ -13,12 +13,12 @@ use turso_core::Connection;
|
||||
#[derive(Clone)]
|
||||
pub struct TursoConnection {
|
||||
pub(crate) conn: Arc<Connection>,
|
||||
pub(crate) io: Arc<dyn turso_core::IO>,
|
||||
pub(crate) _io: Arc<dyn turso_core::IO>,
|
||||
}
|
||||
|
||||
impl TursoConnection {
|
||||
pub fn new(conn: Arc<Connection>, io: Arc<dyn turso_core::IO>) -> Self {
|
||||
TursoConnection { conn, io }
|
||||
TursoConnection { conn, _io: io }
|
||||
}
|
||||
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
|
||||
@@ -76,7 +76,7 @@ pub extern "system" fn Java_tech_turso_core_TursoStatement_step<'local>(
|
||||
};
|
||||
}
|
||||
StepResult::IO => {
|
||||
if let Err(e) = stmt.connection.io.run_once() {
|
||||
if let Err(e) = stmt.stmt.run_once() {
|
||||
set_err_msg_and_throw_exception(&mut env, obj, TURSO_ETC, e.to_string());
|
||||
return to_turso_step_result(&mut env, STEP_RESULT_ID_ERROR, None);
|
||||
}
|
||||
|
||||
@@ -24,7 +24,9 @@ public final class JDBC4Connection implements Connection {
|
||||
}
|
||||
|
||||
public TursoStatement prepare(String sql) throws SQLException {
|
||||
return connection.prepare(sql);
|
||||
final TursoStatement statement = connection.prepare(sql);
|
||||
statement.initializeColumnMetadata();
|
||||
return statement;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -34,7 +34,6 @@ public final class JDBC4PreparedStatement extends JDBC4Statement implements Prep
|
||||
super(connection);
|
||||
this.sql = sql;
|
||||
this.statement = connection.prepare(sql);
|
||||
this.statement.initializeColumnMetadata();
|
||||
this.resultSet = new JDBC4ResultSet(this.statement.getResultSet());
|
||||
}
|
||||
|
||||
|
||||
@@ -319,10 +319,8 @@ public final class JDBC4ResultSet implements ResultSet, ResultSetMetaData {
|
||||
}
|
||||
|
||||
@Override
|
||||
@SkipNullableCheck
|
||||
public Object getObject(int columnIndex) throws SQLException {
|
||||
// TODO
|
||||
return null;
|
||||
return resultSet.get(columnIndex);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -1226,20 +1224,22 @@ public final class JDBC4ResultSet implements ResultSet, ResultSetMetaData {
|
||||
|
||||
@Override
|
||||
public int getColumnDisplaySize(int column) throws SQLException {
|
||||
// TODO
|
||||
return 0;
|
||||
return Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getColumnLabel(int column) throws SQLException {
|
||||
// TODO
|
||||
return "";
|
||||
// TODO: should consider "AS" keyword
|
||||
return getColumnName(column);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getColumnName(int column) throws SQLException {
|
||||
// TODO
|
||||
return "";
|
||||
if (column > 0 && column <= resultSet.getColumnNames().length) {
|
||||
return resultSet.getColumnNames()[column - 1];
|
||||
}
|
||||
|
||||
throw new SQLException("Index out of bound: " + column);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -18,6 +18,7 @@ public class JDBC4Statement implements Statement {
|
||||
private final JDBC4Connection connection;
|
||||
|
||||
@Nullable protected TursoStatement statement = null;
|
||||
protected long updateCount;
|
||||
|
||||
// Because JDBC4Statement has different life cycle in compared to tursoStatement, let's use this
|
||||
// field to manage JDBC4Statement lifecycle
|
||||
@@ -173,8 +174,10 @@ public class JDBC4Statement implements Statement {
|
||||
// TODO: if sql is a readOnly query, do we still need the locks?
|
||||
connectionLock.lock();
|
||||
statement = connection.prepare(sql);
|
||||
final long previousChanges = statement.totalChanges();
|
||||
final boolean result = statement.execute();
|
||||
updateGeneratedKeys();
|
||||
updateCount = statement.totalChanges() - previousChanges;
|
||||
|
||||
return result;
|
||||
} finally {
|
||||
@@ -186,19 +189,13 @@ public class JDBC4Statement implements Statement {
|
||||
@Override
|
||||
public ResultSet getResultSet() throws SQLException {
|
||||
requireNonNull(statement, "statement is null");
|
||||
ensureOpen();
|
||||
return new JDBC4ResultSet(statement.getResultSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getUpdateCount() throws SQLException {
|
||||
// TODO
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getMoreResults() throws SQLException {
|
||||
// TODO
|
||||
return false;
|
||||
return (int) updateCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -254,9 +251,22 @@ public class JDBC4Statement implements Statement {
|
||||
return connection;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getMoreResults() throws SQLException {
|
||||
return getMoreResults(Statement.CLOSE_CURRENT_RESULT);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getMoreResults(int current) throws SQLException {
|
||||
// TODO
|
||||
requireNonNull(statement, "statement should not be null");
|
||||
|
||||
if (current != Statement.CLOSE_CURRENT_RESULT) {
|
||||
throw new SQLException("Invalid argument");
|
||||
}
|
||||
|
||||
statement.getResultSet().close();
|
||||
updateCount = -1;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package tech.turso.jdbc4;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
@@ -55,6 +56,30 @@ class JDBC4StatementTest {
|
||||
assertTrue(stmt.execute("SELECT * FROM users;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void execute_select() throws Exception {
|
||||
stmt.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, username TEXT);");
|
||||
stmt.execute("INSERT INTO users VALUES (1, 'turso 1')");
|
||||
stmt.execute("INSERT INTO users VALUES (2, 'turso 2')");
|
||||
stmt.execute("INSERT INTO users VALUES (3, 'turso 3')");
|
||||
|
||||
ResultSet rs = stmt.executeQuery("SELECT * FROM users;");
|
||||
rs.next();
|
||||
int rowCount = 0;
|
||||
|
||||
do {
|
||||
rowCount++;
|
||||
int id = rs.getInt(1);
|
||||
String username = rs.getString(2);
|
||||
|
||||
assertEquals(id, rowCount);
|
||||
assertEquals(username, "turso " + rowCount);
|
||||
} while (rs.next());
|
||||
|
||||
assertEquals(rowCount, 3);
|
||||
assertFalse(rs.next());
|
||||
}
|
||||
|
||||
@Test
|
||||
void close_statement_test() throws Exception {
|
||||
stmt.close();
|
||||
|
||||
@@ -13,7 +13,7 @@ crate-type = ["cdylib"]
|
||||
[dependencies]
|
||||
turso_core = { workspace = true }
|
||||
napi = { version = "2.16.17", default-features = false, features = ["napi4"] }
|
||||
napi-derive = { version = "2.16.13", default-features = false }
|
||||
napi-derive = { version = "2.16.13", default-features = true }
|
||||
|
||||
[build-dependencies]
|
||||
napi-build = "2.2.0"
|
||||
|
||||
@@ -1,44 +1,57 @@
|
||||
import test from "ava";
|
||||
import crypto from 'crypto';
|
||||
import fs from "node:fs";
|
||||
import { fileURLToPath } from "url";
|
||||
import path from "node:path"
|
||||
import DualTest from "./dual-test.mjs";
|
||||
|
||||
import Database from "better-sqlite3";
|
||||
const inMemoryTest = new DualTest(":memory:");
|
||||
const foobarTest = new DualTest("foobar.db");
|
||||
|
||||
test("Open in-memory database", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Open in-memory database", async (t) => {
|
||||
const db = t.context.db;
|
||||
t.is(db.memory, true);
|
||||
});
|
||||
|
||||
test("Property .name of in-memory database", async (t) => {
|
||||
let name = ":memory:";
|
||||
const db = new Database(name);
|
||||
t.is(db.name, name);
|
||||
inMemoryTest.both("Property .name of in-memory database", async (t) => {
|
||||
const db = t.context.db;
|
||||
t.is(db.name, t.context.path);
|
||||
});
|
||||
|
||||
test("Property .name of database", async (t) => {
|
||||
let name = "foobar.db";
|
||||
const db = new Database(name);
|
||||
t.is(db.name, name);
|
||||
foobarTest.both("Property .name of database", async (t) => {
|
||||
const db = t.context.db;
|
||||
t.is(db.name, t.context.path);
|
||||
});
|
||||
|
||||
test("Property .readonly of database if set", async (t) => {
|
||||
const db = new Database("foobar.db", { readonly: true });
|
||||
t.is(db.readonly, true);
|
||||
});
|
||||
new DualTest("foobar.db", { readonly: true })
|
||||
.both("Property .readonly of database if set", async (t) => {
|
||||
const db = t.context.db;
|
||||
t.is(db.readonly, true);
|
||||
});
|
||||
|
||||
test("Property .readonly of database if not set", async (t) => {
|
||||
const db = new Database("foobar.db");
|
||||
const genDatabaseFilename = () => {
|
||||
return `test-${crypto.randomBytes(8).toString('hex')}.db`;
|
||||
};
|
||||
|
||||
new DualTest().both("opening a read-only database fails if the file doesn't exist", async (t) => {
|
||||
t.throws(() => t.context.connect(genDatabaseFilename(), { readonly: true }),
|
||||
{
|
||||
any: true,
|
||||
code: 'SQLITE_CANTOPEN',
|
||||
});
|
||||
})
|
||||
|
||||
foobarTest.both("Property .readonly of database if not set", async (t) => {
|
||||
const db = t.context.db;
|
||||
t.is(db.readonly, false);
|
||||
});
|
||||
|
||||
test("Property .open of database", async (t) => {
|
||||
const db = new Database("foobar.db");
|
||||
foobarTest.onlySqlitePasses("Property .open of database", async (t) => {
|
||||
const db = t.context.db;
|
||||
t.is(db.open, true);
|
||||
});
|
||||
|
||||
test("Statement.get() returns data", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Statement.get() returns data", async (t) => {
|
||||
const db = t.context.db;
|
||||
const stmt = db.prepare("SELECT 1");
|
||||
const result = stmt.get();
|
||||
t.is(result["1"], 1);
|
||||
@@ -46,22 +59,24 @@ test("Statement.get() returns data", async (t) => {
|
||||
t.is(result2["1"], 1);
|
||||
});
|
||||
|
||||
test("Statement.get() returns undefined when no data", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Statement.get() returns undefined when no data", async (t) => {
|
||||
const db = t.context.db;
|
||||
const stmt = db.prepare("SELECT 1 WHERE 1 = 2");
|
||||
const result = stmt.get();
|
||||
t.is(result, undefined);
|
||||
});
|
||||
|
||||
test("Statement.run() returns correct result object", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.onlySqlitePasses("Statement.run() returns correct result object", async (t) => {
|
||||
// run() isn't 100% compatible with better-sqlite3
|
||||
// it should return a result object, not a row object
|
||||
const db = t.context.db;
|
||||
db.prepare("CREATE TABLE users (name TEXT)").run();
|
||||
const rows = db.prepare("INSERT INTO users (name) VALUES (?)").run("Alice");
|
||||
t.deepEqual(rows, { changes: 1, lastInsertRowid: 1 });
|
||||
});
|
||||
|
||||
test("Statment.iterate() should correctly return an iterable object", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Statment.iterate() should correctly return an iterable object", async (t) => {
|
||||
const db = t.context.db;
|
||||
db.prepare(
|
||||
"CREATE TABLE users (name TEXT, age INTEGER, nationality TEXT)",
|
||||
).run();
|
||||
@@ -83,31 +98,45 @@ test("Statment.iterate() should correctly return an iterable object", async (t)
|
||||
}
|
||||
});
|
||||
|
||||
test("Empty prepared statement should throw", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Empty prepared statement should throw", async (t) => {
|
||||
const db = t.context.db;
|
||||
t.throws(
|
||||
() => {
|
||||
db.prepare("");
|
||||
},
|
||||
{ instanceOf: Error },
|
||||
{ any: true }
|
||||
);
|
||||
});
|
||||
|
||||
test("Test pragma()", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.onlySqlitePasses("Empty prepared statement should throw the correct error", async (t) => {
|
||||
// the previous test can be removed once this one passes in Turso
|
||||
const db = t.context.db;
|
||||
t.throws(
|
||||
() => {
|
||||
db.prepare("");
|
||||
},
|
||||
{
|
||||
instanceOf: RangeError,
|
||||
message: "The supplied SQL string contains no statements",
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
inMemoryTest.both("Test pragma()", async (t) => {
|
||||
const db = t.context.db;
|
||||
t.deepEqual(typeof db.pragma("cache_size")[0].cache_size, "number");
|
||||
t.deepEqual(typeof db.pragma("cache_size", { simple: true }), "number");
|
||||
});
|
||||
|
||||
test("pragma query", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("pragma query", async (t) => {
|
||||
const db = t.context.db;
|
||||
let page_size = db.pragma("page_size");
|
||||
let expectedValue = [{ page_size: 4096 }];
|
||||
t.deepEqual(page_size, expectedValue);
|
||||
});
|
||||
|
||||
test("pragma table_list", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("pragma table_list", async (t) => {
|
||||
const db = t.context.db;
|
||||
let param = "sqlite_schema";
|
||||
let actual = db.pragma(`table_info(${param})`);
|
||||
let expectedValue = [
|
||||
@@ -120,16 +149,16 @@ test("pragma table_list", async (t) => {
|
||||
t.deepEqual(actual, expectedValue);
|
||||
});
|
||||
|
||||
test("simple pragma table_list", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("simple pragma table_list", async (t) => {
|
||||
const db = t.context.db;
|
||||
let param = "sqlite_schema";
|
||||
let actual = db.pragma(`table_info(${param})`, { simple: true });
|
||||
let expectedValue = 0;
|
||||
t.deepEqual(actual, expectedValue);
|
||||
});
|
||||
|
||||
test("Statement shouldn't bind twice with bind()", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Statement shouldn't bind twice with bind()", async (t) => {
|
||||
const db = t.context.db;
|
||||
db.prepare("CREATE TABLE users (name TEXT, age INTEGER)").run();
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Alice", 42);
|
||||
let stmt = db.prepare("SELECT * FROM users WHERE name = ?").bind("Alice");
|
||||
@@ -141,14 +170,17 @@ test("Statement shouldn't bind twice with bind()", async (t) => {
|
||||
|
||||
t.throws(
|
||||
() => {
|
||||
db.bind("Bob");
|
||||
stmt.bind("Bob");
|
||||
},
|
||||
{
|
||||
instanceOf: TypeError,
|
||||
message: 'The bind() method can only be invoked once per statement object',
|
||||
},
|
||||
{ instanceOf: Error },
|
||||
);
|
||||
});
|
||||
|
||||
test("Test pluck(): Rows should only have the values of the first column", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Test pluck(): Rows should only have the values of the first column", async (t) => {
|
||||
const db = t.context.db;
|
||||
db.prepare("CREATE TABLE users (name TEXT, age INTEGER)").run();
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Alice", 42);
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Bob", 24);
|
||||
@@ -161,8 +193,8 @@ test("Test pluck(): Rows should only have the values of the first column", async
|
||||
}
|
||||
});
|
||||
|
||||
test("Test raw(): Rows should be returned as arrays", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Test raw(): Rows should be returned as arrays", async (t) => {
|
||||
const db = t.context.db;
|
||||
db.prepare("CREATE TABLE users (name TEXT, age INTEGER)").run();
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Alice", 42);
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Bob", 24);
|
||||
@@ -194,7 +226,7 @@ test("Test raw(): Rows should be returned as arrays", async (t) => {
|
||||
t.deepEqual(rows[1], ["Bob", 24]);
|
||||
});
|
||||
|
||||
test("Test expand(): Columns should be namespaced", async (t) => {
|
||||
inMemoryTest.onlySqlitePasses("Test expand(): Columns should be namespaced", async (t) => {
|
||||
const expandedResults = [
|
||||
{
|
||||
users: {
|
||||
@@ -235,7 +267,7 @@ test("Test expand(): Columns should be namespaced", async (t) => {
|
||||
},
|
||||
];
|
||||
|
||||
const [db] = await connect(":memory:");
|
||||
const db = t.context.db;
|
||||
db.prepare("CREATE TABLE users (name TEXT, type TEXT)").run();
|
||||
db.prepare("CREATE TABLE addresses (userName TEXT, street TEXT, type TEXT)")
|
||||
.run();
|
||||
@@ -270,8 +302,8 @@ test("Test expand(): Columns should be namespaced", async (t) => {
|
||||
t.deepEqual(allRows, regularResults);
|
||||
});
|
||||
|
||||
test("Presentation modes should be mutually exclusive", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Presentation modes should be mutually exclusive", async (t) => {
|
||||
const db = t.context.db;
|
||||
db.prepare("CREATE TABLE users (name TEXT, age INTEGER)").run();
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Alice", 42);
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Bob", 24);
|
||||
@@ -310,22 +342,31 @@ test("Presentation modes should be mutually exclusive", async (t) => {
|
||||
t.truthy(name);
|
||||
t.assert(typeof name === "string");
|
||||
}
|
||||
});
|
||||
|
||||
inMemoryTest.onlySqlitePasses("Presentation mode 'expand' should be mutually exclusive", async (t) => {
|
||||
// this test can be appended to the previous one when 'expand' is implemented in Turso
|
||||
const db = t.context.db;
|
||||
db.prepare("CREATE TABLE users (name TEXT, age INTEGER)").run();
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Alice", 42);
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Bob", 24);
|
||||
|
||||
let stmt = db.prepare("SELECT * FROM users").pluck().raw();
|
||||
|
||||
// test expand()
|
||||
stmt = db.prepare("SELECT * FROM users").raw().pluck().expand();
|
||||
rows = stmt.all();
|
||||
const rows = stmt.all();
|
||||
t.true(Array.isArray(rows));
|
||||
t.is(rows.length, 2);
|
||||
t.deepEqual(rows[0], { users: { name: "Alice", age: 42 } });
|
||||
t.deepEqual(rows[1], { users: { name: "Bob", age: 24 } });
|
||||
});
|
||||
})
|
||||
|
||||
|
||||
test("Test exec(): Should correctly load multiple statements from file", async (t) => {
|
||||
inMemoryTest.both("Test exec(): Should correctly load multiple statements from file", async (t) => {
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
const [db] = await connect(":memory:");
|
||||
const db = t.context.db;
|
||||
const file = fs.readFileSync(path.resolve(__dirname, "./artifacts/basic-test.sql"), "utf8");
|
||||
db.exec(file);
|
||||
let rows = db.prepare("SELECT * FROM users").iterate();
|
||||
@@ -335,20 +376,17 @@ test("Test exec(): Should correctly load multiple statements from file", async (
|
||||
}
|
||||
});
|
||||
|
||||
test("Test Statement.database gets the database object", async t => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Test Statement.database gets the database object", async t => {
|
||||
const db = t.context.db;
|
||||
let stmt = db.prepare("SELECT 1");
|
||||
t.is(stmt.database, db);
|
||||
});
|
||||
|
||||
test("Test Statement.source", async t => {
|
||||
const [db] = await connect(":memory:");
|
||||
inMemoryTest.both("Test Statement.source", async t => {
|
||||
const db = t.context.db;
|
||||
let sql = "CREATE TABLE t (id int)";
|
||||
let stmt = db.prepare(sql);
|
||||
t.is(stmt.source, sql);
|
||||
});
|
||||
|
||||
const connect = async (path) => {
|
||||
const db = new Database(path);
|
||||
return [db];
|
||||
};
|
||||
|
||||
|
||||
@@ -1,257 +0,0 @@
|
||||
import test from "ava";
|
||||
import fs from "node:fs";
|
||||
import { fileURLToPath } from "url";
|
||||
import path from "node:path";
|
||||
|
||||
import Database from "../wrapper.js";
|
||||
|
||||
test("Open in-memory database", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
t.is(db.memory, true);
|
||||
});
|
||||
|
||||
test("Property .name of in-memory database", async (t) => {
|
||||
let name = ":memory:";
|
||||
const db = new Database(name);
|
||||
t.is(db.name, name);
|
||||
});
|
||||
|
||||
test("Property .name of database", async (t) => {
|
||||
let name = "foobar.db";
|
||||
const db = new Database(name);
|
||||
t.is(db.name, name);
|
||||
});
|
||||
|
||||
test("Statement.get() returns data", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
const stmt = db.prepare("SELECT 1");
|
||||
const result = stmt.get();
|
||||
t.is(result["1"], 1);
|
||||
const result2 = stmt.get();
|
||||
t.is(result2["1"], 1);
|
||||
});
|
||||
|
||||
test("Statement.get() returns undefined when no data", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
const stmt = db.prepare("SELECT 1 WHERE 1 = 2");
|
||||
const result = stmt.get();
|
||||
t.is(result, undefined);
|
||||
});
|
||||
|
||||
// run() isn't 100% compatible with better-sqlite3
|
||||
// it should return a result object, not a row object
|
||||
test("Statement.run() returns correct result object", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
db.prepare("CREATE TABLE users (name TEXT, age INTEGER)").run();
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Alice", 42);
|
||||
let rows = db.prepare("SELECT * FROM users").all();
|
||||
t.deepEqual(rows, [{ name: "Alice", age: 42 }]);
|
||||
});
|
||||
|
||||
test("Statment.iterate() should correctly return an iterable object", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
db.prepare(
|
||||
"CREATE TABLE users (name TEXT, age INTEGER, nationality TEXT)",
|
||||
).run();
|
||||
db.prepare("INSERT INTO users (name, age, nationality) VALUES (?, ?, ?)").run(
|
||||
["Alice", 42],
|
||||
"UK",
|
||||
);
|
||||
db.prepare("INSERT INTO users (name, age, nationality) VALUES (?, ?, ?)").run(
|
||||
"Bob",
|
||||
24,
|
||||
"USA",
|
||||
);
|
||||
|
||||
let rows = db.prepare("SELECT * FROM users").iterate();
|
||||
for (const row of rows) {
|
||||
t.truthy(row.name);
|
||||
t.truthy(row.nationality);
|
||||
t.true(typeof row.age === "number");
|
||||
}
|
||||
});
|
||||
|
||||
test("Empty prepared statement should throw", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
t.throws(
|
||||
() => {
|
||||
db.prepare("");
|
||||
},
|
||||
{ instanceOf: Error },
|
||||
);
|
||||
});
|
||||
|
||||
test("Test pragma()", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
t.true(typeof db.pragma("cache_size")[0].cache_size === "number");
|
||||
t.true(typeof db.pragma("cache_size", { simple: true }) === "number");
|
||||
});
|
||||
|
||||
test("Statement shouldn't bind twice with bind()", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
db.prepare("CREATE TABLE users (name TEXT, age INTEGER)").run();
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Alice", 42);
|
||||
let stmt = db.prepare("SELECT * FROM users WHERE name = ?").bind("Alice");
|
||||
|
||||
for (const row of stmt.iterate()) {
|
||||
t.truthy(row.name);
|
||||
t.true(typeof row.age === "number");
|
||||
}
|
||||
|
||||
t.throws(
|
||||
() => {
|
||||
db.bind("Bob");
|
||||
},
|
||||
{ instanceOf: Error },
|
||||
);
|
||||
});
|
||||
|
||||
test("Test pluck(): Rows should only have the values of the first column", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
db.prepare("CREATE TABLE users (name TEXT, age INTEGER)").run();
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Alice", 42);
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Bob", 24);
|
||||
|
||||
let stmt = db.prepare("SELECT * FROM users").pluck();
|
||||
|
||||
for (const row of stmt.iterate()) {
|
||||
t.truthy(row);
|
||||
t.assert(typeof row === "string");
|
||||
}
|
||||
});
|
||||
|
||||
test("Test raw(): Rows should be returned as arrays", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
db.prepare("CREATE TABLE users (name TEXT, age INTEGER)").run();
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Alice", 42);
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Bob", 24);
|
||||
|
||||
|
||||
let stmt = db.prepare("SELECT * FROM users").raw();
|
||||
|
||||
for (const row of stmt.iterate()) {
|
||||
t.true(Array.isArray(row));
|
||||
t.true(typeof row[0] === "string");
|
||||
t.true(typeof row[1] === "number");
|
||||
}
|
||||
|
||||
stmt = db.prepare("SELECT * FROM users WHERE name = ?").raw();
|
||||
const row = stmt.get("Alice");
|
||||
t.true(Array.isArray(row));
|
||||
t.is(row.length, 2);
|
||||
t.is(row[0], "Alice");
|
||||
t.is(row[1], 42);
|
||||
|
||||
const noRow = stmt.get("Charlie");
|
||||
t.is(noRow, undefined);
|
||||
|
||||
stmt = db.prepare("SELECT * FROM users").raw();
|
||||
const rows = stmt.all();
|
||||
t.true(Array.isArray(rows));
|
||||
t.is(rows.length, 2);
|
||||
t.deepEqual(rows[0], ["Alice", 42]);
|
||||
t.deepEqual(rows[1], ["Bob", 24]);
|
||||
});
|
||||
|
||||
test("Presentation modes should be mutually exclusive", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
db.prepare("CREATE TABLE users (name TEXT, age INTEGER)").run();
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Alice", 42);
|
||||
db.prepare("INSERT INTO users (name, age) VALUES (?, ?)").run("Bob", 24);
|
||||
|
||||
|
||||
// test raw()
|
||||
let stmt = db.prepare("SELECT * FROM users").pluck().raw();
|
||||
|
||||
for (const row of stmt.iterate()) {
|
||||
t.true(Array.isArray(row));
|
||||
t.true(typeof row[0] === "string");
|
||||
t.true(typeof row[1] === "number");
|
||||
}
|
||||
|
||||
stmt = db.prepare("SELECT * FROM users WHERE name = ?").raw();
|
||||
const row = stmt.get("Alice");
|
||||
t.true(Array.isArray(row));
|
||||
t.is(row.length, 2);
|
||||
t.is(row[0], "Alice");
|
||||
t.is(row[1], 42);
|
||||
|
||||
const noRow = stmt.get("Charlie");
|
||||
t.is(noRow, undefined);
|
||||
|
||||
stmt = db.prepare("SELECT * FROM users").raw();
|
||||
const rows = stmt.all();
|
||||
t.true(Array.isArray(rows));
|
||||
t.is(rows.length, 2);
|
||||
t.deepEqual(rows[0], ["Alice", 42]);
|
||||
t.deepEqual(rows[1], ["Bob", 24]);
|
||||
|
||||
// test pluck()
|
||||
stmt = db.prepare("SELECT * FROM users").raw().pluck();
|
||||
|
||||
for (const name of stmt.iterate()) {
|
||||
t.truthy(name);
|
||||
t.assert(typeof name === "string");
|
||||
}
|
||||
});
|
||||
|
||||
test("Test exec(): Should correctly load multiple statements from file", async (t) => {
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
const [db] = await connect(":memory:");
|
||||
const file = fs.readFileSync(path.resolve(__dirname, "./artifacts/basic-test.sql"), "utf8");
|
||||
db.exec(file);
|
||||
let rows = db.prepare("SELECT * FROM users").iterate();
|
||||
for (const row of rows) {
|
||||
t.truthy(row.name);
|
||||
t.true(typeof row.age === "number");
|
||||
}
|
||||
});
|
||||
|
||||
test("pragma query", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
let page_size = db.pragma("page_size");
|
||||
let expectedValue = [{ page_size: 4096 }];
|
||||
t.deepEqual(page_size, expectedValue);
|
||||
});
|
||||
|
||||
test("pragma table_list", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
let param = "sqlite_schema";
|
||||
let actual = db.pragma(`table_info(${param})`);
|
||||
let expectedValue = [
|
||||
{ cid: 0, name: "type", type: "TEXT", notnull: 0, dflt_value: null, pk: 0 },
|
||||
{ cid: 1, name: "name", type: "TEXT", notnull: 0, dflt_value: null, pk: 0 },
|
||||
{ cid: 2, name: "tbl_name", type: "TEXT", notnull: 0, dflt_value: null, pk: 0 },
|
||||
{ cid: 3, name: "rootpage", type: "INT", notnull: 0, dflt_value: null, pk: 0 },
|
||||
{ cid: 4, name: "sql", type: "TEXT", notnull: 0, dflt_value: null, pk: 0 },
|
||||
];
|
||||
t.deepEqual(actual, expectedValue);
|
||||
});
|
||||
|
||||
test("Test Statement.database gets the database object", async t => {
|
||||
const [db] = await connect(":memory:");
|
||||
let stmt = db.prepare("SELECT 1");
|
||||
t.is(stmt.database, db);
|
||||
});
|
||||
|
||||
test("Test Statement.source", async t => {
|
||||
const [db] = await connect(":memory:");
|
||||
let sql = "CREATE TABLE t (id int)";
|
||||
let stmt = db.prepare(sql);
|
||||
t.is(stmt.source, sql);
|
||||
});
|
||||
|
||||
test("simple pragma table_list", async (t) => {
|
||||
const [db] = await connect(":memory:");
|
||||
let param = "sqlite_schema";
|
||||
let actual = db.pragma(`table_info(${param})`, { simple: true });
|
||||
let expectedValue = 0;
|
||||
t.deepEqual(actual, expectedValue);
|
||||
});
|
||||
|
||||
const connect = async (path) => {
|
||||
const db = new Database(path);
|
||||
return [db];
|
||||
};
|
||||
@@ -377,7 +377,7 @@ dualTest.both("Database.pragma()", async (t) => {
|
||||
t.deepEqual(db.pragma("cache_size"), [{ "cache_size": 2000 }]);
|
||||
});
|
||||
|
||||
dualTest.onlySqlitePasses("errors", async (t) => {
|
||||
dualTest.both("errors", async (t) => {
|
||||
const db = t.context.db;
|
||||
|
||||
const syntaxError = await t.throws(() => {
|
||||
@@ -385,7 +385,7 @@ dualTest.onlySqlitePasses("errors", async (t) => {
|
||||
}, {
|
||||
any: true,
|
||||
instanceOf: t.context.errorType,
|
||||
message: 'near "SYNTAX": syntax error',
|
||||
message: /near "SYNTAX": syntax error/,
|
||||
code: 'SQLITE_ERROR'
|
||||
});
|
||||
const noTableError = await t.throws(() => {
|
||||
@@ -393,7 +393,7 @@ dualTest.onlySqlitePasses("errors", async (t) => {
|
||||
}, {
|
||||
any: true,
|
||||
instanceOf: t.context.errorType,
|
||||
message: "no such table: missing_table",
|
||||
message: /(Parse error: Table missing_table not found|no such table: missing_table)/,
|
||||
code: 'SQLITE_ERROR'
|
||||
});
|
||||
|
||||
|
||||
40
bindings/javascript/index.d.ts
vendored
40
bindings/javascript/index.d.ts
vendored
@@ -3,41 +3,41 @@
|
||||
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
export interface Options {
|
||||
readonly: boolean
|
||||
fileMustExist: boolean
|
||||
timeout: number
|
||||
export interface OpenDatabaseOptions {
|
||||
readonly?: boolean
|
||||
fileMustExist?: boolean
|
||||
timeout?: number
|
||||
}
|
||||
export interface PragmaOptions {
|
||||
simple: boolean
|
||||
}
|
||||
export declare class Database {
|
||||
memory: boolean
|
||||
readonly: boolean
|
||||
inTransaction: boolean
|
||||
open: boolean
|
||||
name: string
|
||||
constructor(path: string, options?: Options | undefined | null)
|
||||
constructor(path: string, options?: OpenDatabaseOptions | undefined | null)
|
||||
prepare(sql: string): Statement
|
||||
transaction(): void
|
||||
pragma(): void
|
||||
pragma(pragmaName: string, options?: PragmaOptions | undefined | null): unknown
|
||||
backup(): void
|
||||
serialize(): void
|
||||
function(): void
|
||||
aggregate(): void
|
||||
table(): void
|
||||
loadExtension(): void
|
||||
loadExtension(path: string): void
|
||||
exec(sql: string): void
|
||||
close(): void
|
||||
}
|
||||
export declare class Statement {
|
||||
database: Database
|
||||
source: string
|
||||
reader: boolean
|
||||
readonly: boolean
|
||||
busy: boolean
|
||||
get(): unknown
|
||||
all(): NapiResult
|
||||
run(args: Array<unknown>): void
|
||||
static iterate(): void
|
||||
static pluck(): void
|
||||
get(args?: Array<unknown> | undefined | null): unknown
|
||||
run(args?: Array<unknown> | undefined | null): unknown
|
||||
iterate(args?: Array<unknown> | undefined | null): IteratorStatement
|
||||
all(args?: Array<unknown> | undefined | null): unknown
|
||||
pluck(pluck?: boolean | undefined | null): void
|
||||
static expand(): void
|
||||
static raw(): void
|
||||
raw(raw?: boolean | undefined | null): void
|
||||
static columns(): void
|
||||
static bind(): void
|
||||
bind(args?: Array<unknown> | undefined | null): Statement
|
||||
}
|
||||
export declare class IteratorStatement { }
|
||||
|
||||
@@ -5,325 +5,313 @@
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
const { existsSync, readFileSync } = require('fs')
|
||||
const { join } = require("path");
|
||||
const { join } = require('path')
|
||||
|
||||
const { platform, arch } = process;
|
||||
const { platform, arch } = process
|
||||
|
||||
let nativeBinding = null;
|
||||
let localFileExisted = false;
|
||||
let loadError = null;
|
||||
let nativeBinding = null
|
||||
let localFileExisted = false
|
||||
let loadError = null
|
||||
|
||||
function isMusl() {
|
||||
// For Node 10
|
||||
if (!process.report || typeof process.report.getReport !== "function") {
|
||||
if (!process.report || typeof process.report.getReport !== 'function') {
|
||||
try {
|
||||
const lddPath = require("child_process")
|
||||
.execSync("which ldd")
|
||||
.toString()
|
||||
.trim();
|
||||
return readFileSync(lddPath, "utf8").includes("musl");
|
||||
const lddPath = require('child_process').execSync('which ldd').toString().trim()
|
||||
return readFileSync(lddPath, 'utf8').includes('musl')
|
||||
} catch (e) {
|
||||
return true;
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
const { glibcVersionRuntime } = process.report.getReport().header;
|
||||
return !glibcVersionRuntime;
|
||||
const { glibcVersionRuntime } = process.report.getReport().header
|
||||
return !glibcVersionRuntime
|
||||
}
|
||||
}
|
||||
|
||||
switch (platform) {
|
||||
case "android":
|
||||
case 'android':
|
||||
switch (arch) {
|
||||
case "arm64":
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.android-arm64.node"),
|
||||
);
|
||||
case 'arm64':
|
||||
localFileExisted = existsSync(join(__dirname, 'turso.android-arm64.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.android-arm64.node");
|
||||
nativeBinding = require('./turso.android-arm64.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-android-arm64");
|
||||
nativeBinding = require('@tursodatabase/turso-android-arm64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
break;
|
||||
case "arm":
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.android-arm-eabi.node"),
|
||||
);
|
||||
break
|
||||
case 'arm':
|
||||
localFileExisted = existsSync(join(__dirname, 'turso.android-arm-eabi.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.android-arm-eabi.node");
|
||||
nativeBinding = require('./turso.android-arm-eabi.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-android-arm-eabi");
|
||||
nativeBinding = require('@tursodatabase/turso-android-arm-eabi')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
break;
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on Android ${arch}`);
|
||||
throw new Error(`Unsupported architecture on Android ${arch}`)
|
||||
}
|
||||
break;
|
||||
case "win32":
|
||||
break
|
||||
case 'win32':
|
||||
switch (arch) {
|
||||
case "x64":
|
||||
case 'x64':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.win32-x64-msvc.node"),
|
||||
);
|
||||
join(__dirname, 'turso.win32-x64-msvc.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.win32-x64-msvc.node");
|
||||
nativeBinding = require('./turso.win32-x64-msvc.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-win32-x64-msvc");
|
||||
nativeBinding = require('@tursodatabase/turso-win32-x64-msvc')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
break;
|
||||
case "ia32":
|
||||
break
|
||||
case 'ia32':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.win32-ia32-msvc.node"),
|
||||
);
|
||||
join(__dirname, 'turso.win32-ia32-msvc.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.win32-ia32-msvc.node");
|
||||
nativeBinding = require('./turso.win32-ia32-msvc.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-win32-ia32-msvc");
|
||||
nativeBinding = require('@tursodatabase/turso-win32-ia32-msvc')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
break;
|
||||
case "arm64":
|
||||
break
|
||||
case 'arm64':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.win32-arm64-msvc.node"),
|
||||
);
|
||||
join(__dirname, 'turso.win32-arm64-msvc.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.win32-arm64-msvc.node");
|
||||
nativeBinding = require('./turso.win32-arm64-msvc.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-win32-arm64-msvc");
|
||||
nativeBinding = require('@tursodatabase/turso-win32-arm64-msvc')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
break;
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on Windows: ${arch}`);
|
||||
throw new Error(`Unsupported architecture on Windows: ${arch}`)
|
||||
}
|
||||
break;
|
||||
case "darwin":
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.darwin-universal.node"),
|
||||
);
|
||||
break
|
||||
case 'darwin':
|
||||
localFileExisted = existsSync(join(__dirname, 'turso.darwin-universal.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.darwin-universal.node");
|
||||
nativeBinding = require('./turso.darwin-universal.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-darwin-universal");
|
||||
nativeBinding = require('@tursodatabase/turso-darwin-universal')
|
||||
}
|
||||
break;
|
||||
break
|
||||
} catch {}
|
||||
switch (arch) {
|
||||
case "x64":
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.darwin-x64.node"),
|
||||
);
|
||||
case 'x64':
|
||||
localFileExisted = existsSync(join(__dirname, 'turso.darwin-x64.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.darwin-x64.node");
|
||||
nativeBinding = require('./turso.darwin-x64.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-darwin-x64");
|
||||
nativeBinding = require('@tursodatabase/turso-darwin-x64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
break;
|
||||
case "arm64":
|
||||
break
|
||||
case 'arm64':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.darwin-arm64.node"),
|
||||
);
|
||||
join(__dirname, 'turso.darwin-arm64.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.darwin-arm64.node");
|
||||
nativeBinding = require('./turso.darwin-arm64.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-darwin-arm64");
|
||||
nativeBinding = require('@tursodatabase/turso-darwin-arm64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
break;
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on macOS: ${arch}`);
|
||||
throw new Error(`Unsupported architecture on macOS: ${arch}`)
|
||||
}
|
||||
break;
|
||||
case "freebsd":
|
||||
if (arch !== "x64") {
|
||||
throw new Error(`Unsupported architecture on FreeBSD: ${arch}`);
|
||||
break
|
||||
case 'freebsd':
|
||||
if (arch !== 'x64') {
|
||||
throw new Error(`Unsupported architecture on FreeBSD: ${arch}`)
|
||||
}
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.freebsd-x64.node"),
|
||||
);
|
||||
localFileExisted = existsSync(join(__dirname, 'turso.freebsd-x64.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.freebsd-x64.node");
|
||||
nativeBinding = require('./turso.freebsd-x64.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-freebsd-x64");
|
||||
nativeBinding = require('@tursodatabase/turso-freebsd-x64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
break;
|
||||
case "linux":
|
||||
break
|
||||
case 'linux':
|
||||
switch (arch) {
|
||||
case "x64":
|
||||
case 'x64':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.linux-x64-musl.node"),
|
||||
);
|
||||
join(__dirname, 'turso.linux-x64-musl.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.linux-x64-musl.node");
|
||||
nativeBinding = require('./turso.linux-x64-musl.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-linux-x64-musl");
|
||||
nativeBinding = require('@tursodatabase/turso-linux-x64-musl')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.linux-x64-gnu.node"),
|
||||
);
|
||||
join(__dirname, 'turso.linux-x64-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.linux-x64-gnu.node");
|
||||
nativeBinding = require('./turso.linux-x64-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-linux-x64-gnu");
|
||||
nativeBinding = require('@tursodatabase/turso-linux-x64-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break;
|
||||
case "arm64":
|
||||
break
|
||||
case 'arm64':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.linux-arm64-musl.node"),
|
||||
);
|
||||
join(__dirname, 'turso.linux-arm64-musl.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.linux-arm64-musl.node");
|
||||
nativeBinding = require('./turso.linux-arm64-musl.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-linux-arm64-musl");
|
||||
nativeBinding = require('@tursodatabase/turso-linux-arm64-musl')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.linux-arm64-gnu.node"),
|
||||
);
|
||||
join(__dirname, 'turso.linux-arm64-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.linux-arm64-gnu.node");
|
||||
nativeBinding = require('./turso.linux-arm64-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-linux-arm64-gnu");
|
||||
nativeBinding = require('@tursodatabase/turso-linux-arm64-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break;
|
||||
case "arm":
|
||||
break
|
||||
case 'arm':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.linux-arm-musleabihf.node"),
|
||||
);
|
||||
join(__dirname, 'turso.linux-arm-musleabihf.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.linux-arm-musleabihf.node");
|
||||
nativeBinding = require('./turso.linux-arm-musleabihf.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-linux-arm-musleabihf");
|
||||
nativeBinding = require('@tursodatabase/turso-linux-arm-musleabihf')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.linux-arm-gnueabihf.node"),
|
||||
);
|
||||
join(__dirname, 'turso.linux-arm-gnueabihf.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.linux-arm-gnueabihf.node");
|
||||
nativeBinding = require('./turso.linux-arm-gnueabihf.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-linux-arm-gnueabihf");
|
||||
nativeBinding = require('@tursodatabase/turso-linux-arm-gnueabihf')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break;
|
||||
case "riscv64":
|
||||
break
|
||||
case 'riscv64':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.linux-riscv64-musl.node"),
|
||||
);
|
||||
join(__dirname, 'turso.linux-riscv64-musl.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.linux-riscv64-musl.node");
|
||||
nativeBinding = require('./turso.linux-riscv64-musl.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-linux-riscv64-musl");
|
||||
nativeBinding = require('@tursodatabase/turso-linux-riscv64-musl')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.linux-riscv64-gnu.node"),
|
||||
);
|
||||
join(__dirname, 'turso.linux-riscv64-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.linux-riscv64-gnu.node");
|
||||
nativeBinding = require('./turso.linux-riscv64-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-linux-riscv64-gnu");
|
||||
nativeBinding = require('@tursodatabase/turso-linux-riscv64-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break;
|
||||
case "s390x":
|
||||
break
|
||||
case 's390x':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, "turso.linux-s390x-gnu.node"),
|
||||
);
|
||||
join(__dirname, 'turso.linux-s390x-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require("./turso.linux-s390x-gnu.node");
|
||||
nativeBinding = require('./turso.linux-s390x-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require("@tursodatabase/turso-linux-s390x-gnu");
|
||||
nativeBinding = require('@tursodatabase/turso-linux-s390x-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e;
|
||||
loadError = e
|
||||
}
|
||||
break;
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on Linux: ${arch}`);
|
||||
throw new Error(`Unsupported architecture on Linux: ${arch}`)
|
||||
}
|
||||
break;
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`);
|
||||
throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`)
|
||||
}
|
||||
|
||||
if (!nativeBinding) {
|
||||
if (loadError) {
|
||||
throw loadError;
|
||||
throw loadError
|
||||
}
|
||||
throw new Error(`Failed to load native binding`);
|
||||
throw new Error(`Failed to load native binding`)
|
||||
}
|
||||
|
||||
const { Database, Statement } = nativeBinding;
|
||||
const { Database, Statement, IteratorStatement } = nativeBinding
|
||||
|
||||
module.exports.Database = Database;
|
||||
module.exports.Statement = Statement;
|
||||
module.exports.Database = Database
|
||||
module.exports.Statement = Statement
|
||||
module.exports.IteratorStatement = IteratorStatement
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@tursodatabase/turso-darwin-universal",
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2-pre.2",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@tursodatabase/turso-linux-x64-gnu",
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2-pre.2",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@tursodatabase/turso-win32-x64-msvc",
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2-pre.2",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
|
||||
4
bindings/javascript/package-lock.json
generated
4
bindings/javascript/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@tursodatabase/turso",
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2-pre.2",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@tursodatabase/turso",
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2-pre.2",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@napi-rs/cli": "^2.18.4",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@tursodatabase/turso",
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2-pre.2",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
|
||||
22
bindings/javascript/sqlite-error.js
Normal file
22
bindings/javascript/sqlite-error.js
Normal file
@@ -0,0 +1,22 @@
|
||||
'use strict';
|
||||
const descriptor = { value: 'SqliteError', writable: true, enumerable: false, configurable: true };
|
||||
|
||||
function SqliteError(message, code, rawCode) {
|
||||
if (new.target !== SqliteError) {
|
||||
return new SqliteError(message, code);
|
||||
}
|
||||
if (typeof code !== 'string') {
|
||||
throw new TypeError('Expected second argument to be a string');
|
||||
}
|
||||
Error.call(this, message);
|
||||
descriptor.value = '' + message;
|
||||
Object.defineProperty(this, 'message', descriptor);
|
||||
Error.captureStackTrace(this, SqliteError);
|
||||
this.code = code;
|
||||
this.rawCode = rawCode
|
||||
}
|
||||
Object.setPrototypeOf(SqliteError, Error);
|
||||
Object.setPrototypeOf(SqliteError.prototype, Error.prototype);
|
||||
Object.defineProperty(SqliteError.prototype, 'name', descriptor);
|
||||
module.exports = SqliteError;
|
||||
|
||||
@@ -14,12 +14,18 @@ use turso_core::{LimboError, StepResult};
|
||||
#[derive(Default)]
|
||||
#[napi(object)]
|
||||
pub struct OpenDatabaseOptions {
|
||||
pub readonly: bool,
|
||||
pub file_must_exist: bool,
|
||||
pub timeout: u32,
|
||||
pub readonly: Option<bool>,
|
||||
pub file_must_exist: Option<bool>,
|
||||
pub timeout: Option<u32>,
|
||||
// verbose => Callback,
|
||||
}
|
||||
|
||||
impl OpenDatabaseOptions {
|
||||
fn readonly(&self) -> bool {
|
||||
self.readonly.unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct PragmaOptions {
|
||||
pub simple: bool,
|
||||
@@ -41,7 +47,7 @@ pub struct Database {
|
||||
pub name: String,
|
||||
_db: Arc<turso_core::Database>,
|
||||
conn: Arc<turso_core::Connection>,
|
||||
io: Arc<dyn turso_core::IO>,
|
||||
_io: Arc<dyn turso_core::IO>,
|
||||
}
|
||||
|
||||
impl ObjectFinalize for Database {
|
||||
@@ -55,34 +61,36 @@ impl ObjectFinalize for Database {
|
||||
#[napi]
|
||||
impl Database {
|
||||
#[napi(constructor)]
|
||||
pub fn new(path: String, options: Option<OpenDatabaseOptions>) -> napi::Result<Self> {
|
||||
pub fn new(path: String, options: Option<OpenDatabaseOptions>) -> napi::Result<Self, String> {
|
||||
let memory = path == ":memory:";
|
||||
let io: Arc<dyn turso_core::IO> = if memory {
|
||||
Arc::new(turso_core::MemoryIO::new())
|
||||
} else {
|
||||
Arc::new(turso_core::PlatformIO::new().map_err(into_napi_error)?)
|
||||
Arc::new(turso_core::PlatformIO::new().map_err(into_napi_sqlite_error)?)
|
||||
};
|
||||
let opts = options.unwrap_or_default();
|
||||
let flag = if opts.readonly {
|
||||
let flag = if opts.readonly() {
|
||||
turso_core::OpenFlags::ReadOnly
|
||||
} else {
|
||||
turso_core::OpenFlags::Create
|
||||
};
|
||||
let file = io.open_file(&path, flag, false).map_err(into_napi_error)?;
|
||||
let file = io
|
||||
.open_file(&path, flag, false)
|
||||
.map_err(|err| into_napi_error_with_message("SQLITE_CANTOPEN".to_owned(), err))?;
|
||||
|
||||
let db_file = Arc::new(DatabaseFile::new(file));
|
||||
let db = turso_core::Database::open(io.clone(), &path, db_file, false, false)
|
||||
.map_err(into_napi_error)?;
|
||||
let conn = db.connect().map_err(into_napi_error)?;
|
||||
.map_err(into_napi_sqlite_error)?;
|
||||
let conn = db.connect().map_err(into_napi_sqlite_error)?;
|
||||
|
||||
Ok(Self {
|
||||
readonly: opts.readonly,
|
||||
readonly: opts.readonly(),
|
||||
memory,
|
||||
_db: db,
|
||||
conn,
|
||||
open: true,
|
||||
name: path,
|
||||
io,
|
||||
_io: io,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -114,7 +122,7 @@ impl Database {
|
||||
return Ok(env.get_undefined()?.into_unknown())
|
||||
}
|
||||
turso_core::StepResult::IO => {
|
||||
self.io.run_once().map_err(into_napi_error)?;
|
||||
stmt.run_once().map_err(into_napi_error)?;
|
||||
continue;
|
||||
}
|
||||
step @ turso_core::StepResult::Interrupt
|
||||
@@ -131,16 +139,6 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn readonly(&self) -> bool {
|
||||
self.readonly
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn open(&self) -> bool {
|
||||
self.open
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn backup(&self) {
|
||||
todo!()
|
||||
@@ -176,7 +174,7 @@ impl Database {
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn exec(&self, sql: String) -> napi::Result<()> {
|
||||
pub fn exec(&self, sql: String) -> napi::Result<(), String> {
|
||||
let query_runner = self.conn.query_runner(sql.as_bytes());
|
||||
|
||||
// Since exec doesn't return any values, we can just iterate over the results
|
||||
@@ -185,17 +183,17 @@ impl Database {
|
||||
Ok(Some(mut stmt)) => loop {
|
||||
match stmt.step() {
|
||||
Ok(StepResult::Row) => continue,
|
||||
Ok(StepResult::IO) => self.io.run_once().map_err(into_napi_error)?,
|
||||
Ok(StepResult::IO) => stmt.run_once().map_err(into_napi_sqlite_error)?,
|
||||
Ok(StepResult::Done) => break,
|
||||
Ok(StepResult::Interrupt | StepResult::Busy) => {
|
||||
return Err(napi::Error::new(
|
||||
napi::Status::GenericFailure,
|
||||
"SQLITE_ERROR".to_owned(),
|
||||
"Statement execution interrupted or busy".to_string(),
|
||||
));
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(napi::Error::new(
|
||||
napi::Status::GenericFailure,
|
||||
"SQLITE_ERROR".to_owned(),
|
||||
format!("Error executing SQL: {}", err),
|
||||
));
|
||||
}
|
||||
@@ -204,7 +202,7 @@ impl Database {
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
return Err(napi::Error::new(
|
||||
napi::Status::GenericFailure,
|
||||
"SQLITE_ERROR".to_owned(),
|
||||
format!("Error executing SQL: {}", err),
|
||||
));
|
||||
}
|
||||
@@ -263,7 +261,7 @@ impl Statement {
|
||||
|
||||
#[napi]
|
||||
pub fn get(&self, env: Env, args: Option<Vec<JsUnknown>>) -> napi::Result<JsUnknown> {
|
||||
let mut stmt = self.check_and_bind(args)?;
|
||||
let mut stmt = self.check_and_bind(env, args)?;
|
||||
|
||||
loop {
|
||||
let step = stmt.step().map_err(into_napi_error)?;
|
||||
@@ -308,7 +306,7 @@ impl Statement {
|
||||
}
|
||||
turso_core::StepResult::Done => return Ok(env.get_undefined()?.into_unknown()),
|
||||
turso_core::StepResult::IO => {
|
||||
self.database.io.run_once().map_err(into_napi_error)?;
|
||||
stmt.run_once().map_err(into_napi_error)?;
|
||||
continue;
|
||||
}
|
||||
turso_core::StepResult::Interrupt | turso_core::StepResult::Busy => {
|
||||
@@ -324,7 +322,7 @@ impl Statement {
|
||||
// TODO: Return Info object (https://github.com/WiseLibs/better-sqlite3/blob/master/docs/api.md#runbindparameters---object)
|
||||
#[napi]
|
||||
pub fn run(&self, env: Env, args: Option<Vec<JsUnknown>>) -> napi::Result<JsUnknown> {
|
||||
let stmt = self.check_and_bind(args)?;
|
||||
let stmt = self.check_and_bind(env, args)?;
|
||||
|
||||
self.internal_all(env, stmt)
|
||||
}
|
||||
@@ -335,10 +333,15 @@ impl Statement {
|
||||
env: Env,
|
||||
args: Option<Vec<JsUnknown>>,
|
||||
) -> napi::Result<IteratorStatement> {
|
||||
self.check_and_bind(args)?;
|
||||
if let Some(some_args) = args.as_ref() {
|
||||
if some_args.iter().len() != 0 {
|
||||
self.check_and_bind(env, args)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(IteratorStatement {
|
||||
stmt: Rc::clone(&self.inner),
|
||||
database: self.database.clone(),
|
||||
_database: self.database.clone(),
|
||||
env,
|
||||
presentation_mode: self.presentation_mode.clone(),
|
||||
})
|
||||
@@ -346,7 +349,7 @@ impl Statement {
|
||||
|
||||
#[napi]
|
||||
pub fn all(&self, env: Env, args: Option<Vec<JsUnknown>>) -> napi::Result<JsUnknown> {
|
||||
let stmt = self.check_and_bind(args)?;
|
||||
let stmt = self.check_and_bind(env, args)?;
|
||||
|
||||
self.internal_all(env, stmt)
|
||||
}
|
||||
@@ -401,7 +404,7 @@ impl Statement {
|
||||
break;
|
||||
}
|
||||
turso_core::StepResult::IO => {
|
||||
self.database.io.run_once().map_err(into_napi_error)?;
|
||||
stmt.run_once().map_err(into_napi_error)?;
|
||||
}
|
||||
turso_core::StepResult::Interrupt | turso_core::StepResult::Busy => {
|
||||
return Err(napi::Error::new(
|
||||
@@ -444,8 +447,9 @@ impl Statement {
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn bind(&mut self, args: Option<Vec<JsUnknown>>) -> napi::Result<Self> {
|
||||
self.check_and_bind(args)?;
|
||||
pub fn bind(&mut self, env: Env, args: Option<Vec<JsUnknown>>) -> napi::Result<Self, String> {
|
||||
self.check_and_bind(env, args)
|
||||
.map_err(with_sqlite_error_message)?;
|
||||
self.binded = true;
|
||||
|
||||
Ok(self.clone())
|
||||
@@ -455,16 +459,22 @@ impl Statement {
|
||||
/// and bind values do variables. The expected type for args is `Option<Vec<JsUnknown>>`
|
||||
fn check_and_bind(
|
||||
&self,
|
||||
env: Env,
|
||||
args: Option<Vec<JsUnknown>>,
|
||||
) -> napi::Result<RefMut<'_, turso_core::Statement>> {
|
||||
let mut stmt = self.inner.borrow_mut();
|
||||
stmt.reset();
|
||||
if let Some(args) = args {
|
||||
if self.binded {
|
||||
return Err(napi::Error::new(
|
||||
napi::Status::InvalidArg,
|
||||
"This statement already has bound parameters",
|
||||
));
|
||||
let err = napi::Error::new(
|
||||
into_convertible_type_error_message("TypeError"),
|
||||
"The bind() method can only be invoked once per statement object",
|
||||
);
|
||||
unsafe {
|
||||
napi::JsTypeError::from(err).throw_into(env.raw());
|
||||
}
|
||||
|
||||
return Err(napi::Error::from_status(napi::Status::PendingException));
|
||||
}
|
||||
|
||||
for (i, elem) in args.into_iter().enumerate() {
|
||||
@@ -480,7 +490,7 @@ impl Statement {
|
||||
#[napi(iterator)]
|
||||
pub struct IteratorStatement {
|
||||
stmt: Rc<RefCell<turso_core::Statement>>,
|
||||
database: Database,
|
||||
_database: Database,
|
||||
env: Env,
|
||||
presentation_mode: PresentationMode,
|
||||
}
|
||||
@@ -528,7 +538,7 @@ impl Generator for IteratorStatement {
|
||||
}
|
||||
turso_core::StepResult::Done => return None,
|
||||
turso_core::StepResult::IO => {
|
||||
self.database.io.run_once().ok()?;
|
||||
stmt.run_once().ok()?;
|
||||
continue;
|
||||
}
|
||||
turso_core::StepResult::Interrupt | turso_core::StepResult::Busy => return None,
|
||||
@@ -630,6 +640,29 @@ impl turso_core::DatabaseStorage for DatabaseFile {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn into_napi_error(limbo_error: LimboError) -> napi::Error {
|
||||
fn into_napi_error(limbo_error: LimboError) -> napi::Error {
|
||||
napi::Error::new(napi::Status::GenericFailure, format!("{limbo_error}"))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn into_napi_sqlite_error(limbo_error: LimboError) -> napi::Error<String> {
|
||||
napi::Error::new(String::from("SQLITE_ERROR"), format!("{limbo_error}"))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn into_napi_error_with_message(
|
||||
error_code: String,
|
||||
limbo_error: LimboError,
|
||||
) -> napi::Error<String> {
|
||||
napi::Error::new(error_code, format!("{limbo_error}"))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn with_sqlite_error_message(err: napi::Error) -> napi::Error<String> {
|
||||
napi::Error::new("SQLITE_ERROR".to_owned(), err.reason)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn into_convertible_type_error_message(error_type: &str) -> String {
|
||||
"[TURSO_CONVERT_TYPE]".to_owned() + error_type
|
||||
}
|
||||
|
||||
@@ -2,6 +2,28 @@
|
||||
|
||||
const { Database: NativeDB } = require("./index.js");
|
||||
|
||||
const SqliteError = require("./sqlite-error.js");
|
||||
|
||||
const convertibleErrorTypes = { TypeError };
|
||||
const CONVERTIBLE_ERROR_PREFIX = '[TURSO_CONVERT_TYPE]';
|
||||
|
||||
function convertError(err) {
|
||||
if ((err.code ?? '').startsWith(CONVERTIBLE_ERROR_PREFIX)) {
|
||||
return createErrorByName(err.code.substring(CONVERTIBLE_ERROR_PREFIX.length), err.message);
|
||||
}
|
||||
|
||||
return new SqliteError(err.message, err.code, err.rawCode);
|
||||
}
|
||||
|
||||
function createErrorByName(name, message) {
|
||||
const ErrorConstructor = convertibleErrorTypes[name];
|
||||
if (!ErrorConstructor) {
|
||||
throw new Error(`unknown error type ${name} from Turso`);
|
||||
}
|
||||
|
||||
return new ErrorConstructor(message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Database represents a connection that can prepare and execute SQL statements.
|
||||
*/
|
||||
@@ -145,7 +167,11 @@ class Database {
|
||||
* @param {string} sql - The SQL statement string to execute.
|
||||
*/
|
||||
exec(sql) {
|
||||
this.db.exec(sql);
|
||||
try {
|
||||
this.db.exec(sql);
|
||||
} catch (err) {
|
||||
throw convertError(err);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -264,8 +290,13 @@ class Statement {
|
||||
* @returns this - Statement with binded parameters
|
||||
*/
|
||||
bind(...bindParameters) {
|
||||
return this.stmt.bind(bindParameters.flat());
|
||||
try {
|
||||
return new Statement(this.stmt.bind(bindParameters.flat()), this.db);
|
||||
} catch (err) {
|
||||
throw convertError(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = Database;
|
||||
module.exports.SqliteError = SqliteError;
|
||||
|
||||
@@ -93,17 +93,24 @@ impl Cursor {
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})?;
|
||||
|
||||
if stmt_is_dml && self.conn.conn.get_auto_commit() {
|
||||
self.conn.conn.execute("BEGIN").map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!(
|
||||
"Failed to start transaction after DDL: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
// For DDL and DML statements,
|
||||
// we need to execute the statement immediately
|
||||
if stmt_is_ddl || stmt_is_dml || stmt_is_tx {
|
||||
let mut stmt = stmt.borrow_mut();
|
||||
while let turso_core::StepResult::IO = stmt
|
||||
.borrow_mut()
|
||||
.step()
|
||||
.map_err(|e| PyErr::new::<OperationalError, _>(format!("Step error: {:?}", e)))?
|
||||
{
|
||||
self.conn
|
||||
.io
|
||||
.run_once()
|
||||
stmt.run_once()
|
||||
.map_err(|e| PyErr::new::<OperationalError, _>(format!("IO error: {:?}", e)))?;
|
||||
}
|
||||
}
|
||||
@@ -132,7 +139,7 @@ impl Cursor {
|
||||
return Ok(Some(py_row));
|
||||
}
|
||||
turso_core::StepResult::IO => {
|
||||
self.conn.io.run_once().map_err(|e| {
|
||||
stmt.run_once().map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("IO error: {:?}", e))
|
||||
})?;
|
||||
}
|
||||
@@ -168,7 +175,7 @@ impl Cursor {
|
||||
results.push(py_row);
|
||||
}
|
||||
turso_core::StepResult::IO => {
|
||||
self.conn.io.run_once().map_err(|e| {
|
||||
stmt.run_once().map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("IO error: {:?}", e))
|
||||
})?;
|
||||
}
|
||||
@@ -233,7 +240,7 @@ fn stmt_is_tx(sql: &str) -> bool {
|
||||
#[derive(Clone)]
|
||||
pub struct Connection {
|
||||
conn: Arc<turso_core::Connection>,
|
||||
io: Arc<dyn turso_core::IO>,
|
||||
_io: Arc<dyn turso_core::IO>,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
@@ -298,9 +305,11 @@ impl Connection {
|
||||
|
||||
impl Drop for Connection {
|
||||
fn drop(&mut self) {
|
||||
self.conn
|
||||
.close()
|
||||
.expect("Failed to drop (close) connection");
|
||||
if Arc::strong_count(&self.conn) == 1 {
|
||||
self.conn
|
||||
.close()
|
||||
.expect("Failed to drop (close) connection");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -308,7 +317,7 @@ impl Drop for Connection {
|
||||
#[pyfunction]
|
||||
pub fn connect(path: &str) -> Result<Connection> {
|
||||
match turso_core::Connection::from_uri(path, false, false) {
|
||||
Ok((io, conn)) => Ok(Connection { conn, io }),
|
||||
Ok((io, conn)) => Ok(Connection { conn, _io: io }),
|
||||
Err(e) => Err(PyErr::new::<ProgrammingError, _>(format!(
|
||||
"Failed to create connection: {:?}",
|
||||
e
|
||||
|
||||
@@ -158,6 +158,25 @@ def test_commit(provider):
|
||||
assert record
|
||||
|
||||
|
||||
# Test case for: https://github.com/tursodatabase/turso/issues/2002
|
||||
@pytest.mark.parametrize("provider", ["sqlite3", "turso"])
|
||||
def test_first_rollback(provider, tmp_path):
|
||||
db_file = tmp_path / "test_first_rollback.db"
|
||||
|
||||
conn = connect(provider, str(db_file))
|
||||
cur = conn.cursor()
|
||||
cur.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, username TEXT)")
|
||||
cur.execute("INSERT INTO users VALUES (1, 'alice')")
|
||||
cur.execute("INSERT INTO users VALUES (2, 'bob')")
|
||||
|
||||
conn.rollback()
|
||||
|
||||
cur.execute("SELECT * FROM users")
|
||||
users = cur.fetchall()
|
||||
|
||||
assert users == []
|
||||
conn.close()
|
||||
|
||||
@pytest.mark.parametrize("provider", ["sqlite3", "turso"])
|
||||
def test_with_statement(provider):
|
||||
with connect(provider, "tests/database.db") as conn:
|
||||
|
||||
@@ -569,7 +569,7 @@ mod tests {
|
||||
Ok(_) => panic!("Query succeeded after WAL deletion and DB reopen, but was expected to fail because the table definition should have been in the WAL."),
|
||||
Err(Error::SqlExecutionFailure(msg)) => {
|
||||
assert!(
|
||||
msg.contains("test_large_persistence not found"),
|
||||
msg.contains("no such table: test_large_persistence"),
|
||||
"Expected 'test_large_persistence not found' error, but got: {}",
|
||||
msg
|
||||
);
|
||||
|
||||
4
bindings/wasm/package-lock.json
generated
4
bindings/wasm/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "limbo-wasm",
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2-pre.2",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "limbo-wasm",
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2-pre.2",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.49.1",
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"collaborators": [
|
||||
"the Limbo authors"
|
||||
],
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2-pre.2",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
143
cli/app.rs
143
cli/app.rs
@@ -1,6 +1,6 @@
|
||||
use crate::{
|
||||
commands::{
|
||||
args::{EchoMode, TimerMode},
|
||||
args::{EchoMode, HeadersMode, TimerMode},
|
||||
import::ImportFile,
|
||||
Command, CommandParser,
|
||||
},
|
||||
@@ -24,6 +24,7 @@ use std::{
|
||||
},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
|
||||
use turso_core::{Connection, Database, LimboError, OpenFlags, Statement, StepResult, Value};
|
||||
@@ -95,7 +96,7 @@ macro_rules! query_internal {
|
||||
$body(row)?;
|
||||
}
|
||||
StepResult::IO => {
|
||||
$self.io.run_once()?;
|
||||
rows.run_once()?;
|
||||
}
|
||||
StepResult::Interrupt => break,
|
||||
StepResult::Done => break,
|
||||
@@ -175,7 +176,6 @@ impl Limbo {
|
||||
pub fn with_readline(mut self, mut rl: Editor<LimboHelper, DefaultHistory>) -> Self {
|
||||
let h = LimboHelper::new(
|
||||
self.conn.clone(),
|
||||
self.io.clone(),
|
||||
self.config.as_ref().map(|c| c.highlight.clone()),
|
||||
);
|
||||
rl.set_helper(Some(h));
|
||||
@@ -644,8 +644,7 @@ impl Limbo {
|
||||
let _ = self.show_info();
|
||||
}
|
||||
Command::Import(args) => {
|
||||
let mut import_file =
|
||||
ImportFile::new(self.conn.clone(), self.io.clone(), &mut self.writer);
|
||||
let mut import_file = ImportFile::new(self.conn.clone(), &mut self.writer);
|
||||
import_file.import(args)
|
||||
}
|
||||
Command::LoadExtension(args) => {
|
||||
@@ -676,6 +675,12 @@ impl Limbo {
|
||||
TimerMode::Off => false,
|
||||
};
|
||||
}
|
||||
Command::Headers(headers_mode) => {
|
||||
self.opts.headers = match headers_mode.mode {
|
||||
HeadersMode::On => true,
|
||||
HeadersMode::Off => false,
|
||||
};
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -688,62 +693,83 @@ impl Limbo {
|
||||
) -> anyhow::Result<()> {
|
||||
match output {
|
||||
Ok(Some(ref mut rows)) => match self.opts.output_mode {
|
||||
OutputMode::List => loop {
|
||||
if self.interrupt_count.load(Ordering::SeqCst) > 0 {
|
||||
println!("Query interrupted.");
|
||||
return Ok(());
|
||||
}
|
||||
OutputMode::List => {
|
||||
let mut headers_printed = false;
|
||||
loop {
|
||||
if self.interrupt_count.load(Ordering::SeqCst) > 0 {
|
||||
println!("Query interrupted.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let start = Instant::now();
|
||||
let start = Instant::now();
|
||||
|
||||
match rows.step() {
|
||||
Ok(StepResult::Row) => {
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.execute_time_elapsed_samples.push(start.elapsed());
|
||||
}
|
||||
let row = rows.row().unwrap();
|
||||
for (i, value) in row.get_values().enumerate() {
|
||||
if i > 0 {
|
||||
let _ = self.writer.write(b"|");
|
||||
match rows.step() {
|
||||
Ok(StepResult::Row) => {
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.execute_time_elapsed_samples.push(start.elapsed());
|
||||
}
|
||||
if matches!(value, Value::Null) {
|
||||
let _ = self.writer.write(self.opts.null_value.as_bytes())?;
|
||||
} else {
|
||||
let _ = self.writer.write(format!("{}", value).as_bytes())?;
|
||||
|
||||
// Print headers if enabled and not already printed
|
||||
if self.opts.headers && !headers_printed {
|
||||
for i in 0..rows.num_columns() {
|
||||
if i > 0 {
|
||||
let _ = self.writer.write(b"|");
|
||||
}
|
||||
let _ =
|
||||
self.writer.write(rows.get_column_name(i).as_bytes());
|
||||
}
|
||||
let _ = self.writeln("");
|
||||
headers_printed = true;
|
||||
}
|
||||
|
||||
let row = rows.row().unwrap();
|
||||
for (i, value) in row.get_values().enumerate() {
|
||||
if i > 0 {
|
||||
let _ = self.writer.write(b"|");
|
||||
}
|
||||
if matches!(value, Value::Null) {
|
||||
let _ =
|
||||
self.writer.write(self.opts.null_value.as_bytes())?;
|
||||
} else {
|
||||
let _ =
|
||||
self.writer.write(format!("{}", value).as_bytes())?;
|
||||
}
|
||||
}
|
||||
let _ = self.writeln("");
|
||||
}
|
||||
Ok(StepResult::IO) => {
|
||||
let start = Instant::now();
|
||||
rows.run_once()?;
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.io_time_elapsed_samples.push(start.elapsed());
|
||||
}
|
||||
}
|
||||
let _ = self.writeln("");
|
||||
}
|
||||
Ok(StepResult::IO) => {
|
||||
let start = Instant::now();
|
||||
self.io.run_once()?;
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.io_time_elapsed_samples.push(start.elapsed());
|
||||
Ok(StepResult::Interrupt) => break,
|
||||
Ok(StepResult::Done) => {
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.execute_time_elapsed_samples.push(start.elapsed());
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(StepResult::Interrupt) => break,
|
||||
Ok(StepResult::Done) => {
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.execute_time_elapsed_samples.push(start.elapsed());
|
||||
Ok(StepResult::Busy) => {
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.execute_time_elapsed_samples.push(start.elapsed());
|
||||
}
|
||||
let _ = self.writeln("database is busy");
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
Ok(StepResult::Busy) => {
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.execute_time_elapsed_samples.push(start.elapsed());
|
||||
Err(err) => {
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.execute_time_elapsed_samples.push(start.elapsed());
|
||||
}
|
||||
let report =
|
||||
miette::Error::from(err).with_source_code(sql.to_owned());
|
||||
let _ = self.write_fmt(format_args!("{:?}", report));
|
||||
break;
|
||||
}
|
||||
let _ = self.writeln("database is busy");
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.execute_time_elapsed_samples.push(start.elapsed());
|
||||
}
|
||||
let _ = self.writeln(err.to_string());
|
||||
break;
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
OutputMode::Pretty => {
|
||||
if self.interrupt_count.load(Ordering::SeqCst) > 0 {
|
||||
println!("Query interrupted.");
|
||||
@@ -806,7 +832,7 @@ impl Limbo {
|
||||
}
|
||||
Ok(StepResult::IO) => {
|
||||
let start = Instant::now();
|
||||
self.io.run_once()?;
|
||||
rows.run_once()?;
|
||||
if let Some(ref mut stats) = statistics {
|
||||
stats.io_time_elapsed_samples.push(start.elapsed());
|
||||
}
|
||||
@@ -881,7 +907,12 @@ impl Limbo {
|
||||
.with_thread_ids(true)
|
||||
.with_ansi(should_emit_ansi),
|
||||
)
|
||||
.with(EnvFilter::from_default_env().add_directive("rustyline=off".parse().unwrap()))
|
||||
.with(
|
||||
EnvFilter::builder()
|
||||
.with_default_directive(LevelFilter::OFF.into())
|
||||
.from_env_lossy()
|
||||
.add_directive("rustyline=off".parse().unwrap()),
|
||||
)
|
||||
.try_init()
|
||||
{
|
||||
println!("Unable to setup tracing appender: {:?}", e);
|
||||
@@ -913,7 +944,7 @@ impl Limbo {
|
||||
}
|
||||
}
|
||||
StepResult::IO => {
|
||||
self.io.run_once()?;
|
||||
rows.run_once()?;
|
||||
}
|
||||
StepResult::Interrupt => break,
|
||||
StepResult::Done => break,
|
||||
@@ -969,7 +1000,7 @@ impl Limbo {
|
||||
}
|
||||
}
|
||||
StepResult::IO => {
|
||||
self.io.run_once()?;
|
||||
rows.run_once()?;
|
||||
}
|
||||
StepResult::Interrupt => break,
|
||||
StepResult::Done => break,
|
||||
@@ -1020,7 +1051,7 @@ impl Limbo {
|
||||
}
|
||||
}
|
||||
StepResult::IO => {
|
||||
self.io.run_once()?;
|
||||
rows.run_once()?;
|
||||
}
|
||||
StepResult::Interrupt => break,
|
||||
StepResult::Done => break,
|
||||
|
||||
@@ -124,3 +124,14 @@ pub struct TimerArgs {
|
||||
#[arg(value_enum)]
|
||||
pub mode: TimerMode,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Args)]
|
||||
pub struct HeadersArgs {
|
||||
pub mode: HeadersMode,
|
||||
}
|
||||
|
||||
#[derive(ValueEnum, Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum HeadersMode {
|
||||
On,
|
||||
Off,
|
||||
}
|
||||
|
||||
@@ -21,17 +21,12 @@ pub struct ImportArgs {
|
||||
|
||||
pub struct ImportFile<'a> {
|
||||
conn: Arc<Connection>,
|
||||
io: Arc<dyn turso_core::IO>,
|
||||
writer: &'a mut dyn Write,
|
||||
}
|
||||
|
||||
impl<'a> ImportFile<'a> {
|
||||
pub fn new(
|
||||
conn: Arc<Connection>,
|
||||
io: Arc<dyn turso_core::IO>,
|
||||
writer: &'a mut dyn Write,
|
||||
) -> Self {
|
||||
Self { conn, io, writer }
|
||||
pub fn new(conn: Arc<Connection>, writer: &'a mut dyn Write) -> Self {
|
||||
Self { conn, writer }
|
||||
}
|
||||
|
||||
pub fn import(&mut self, args: ImportArgs) {
|
||||
@@ -79,7 +74,7 @@ impl<'a> ImportFile<'a> {
|
||||
while let Ok(x) = rows.step() {
|
||||
match x {
|
||||
turso_core::StepResult::IO => {
|
||||
self.io.run_once().unwrap();
|
||||
rows.run_once().unwrap();
|
||||
}
|
||||
turso_core::StepResult::Done => break,
|
||||
turso_core::StepResult::Interrupt => break,
|
||||
|
||||
@@ -2,8 +2,8 @@ pub mod args;
|
||||
pub mod import;
|
||||
|
||||
use args::{
|
||||
CwdArgs, EchoArgs, ExitArgs, IndexesArgs, LoadExtensionArgs, NullValueArgs, OpcodesArgs,
|
||||
OpenArgs, OutputModeArgs, SchemaArgs, SetOutputArgs, TablesArgs, TimerArgs,
|
||||
CwdArgs, EchoArgs, ExitArgs, HeadersArgs, IndexesArgs, LoadExtensionArgs, NullValueArgs,
|
||||
OpcodesArgs, OpenArgs, OutputModeArgs, SchemaArgs, SetOutputArgs, TablesArgs, TimerArgs,
|
||||
};
|
||||
use clap::Parser;
|
||||
use import::ImportArgs;
|
||||
@@ -77,6 +77,9 @@ pub enum Command {
|
||||
ListIndexes(IndexesArgs),
|
||||
#[command(name = "timer", display_name = ".timer")]
|
||||
Timer(TimerArgs),
|
||||
/// Toggle column headers on/off in list mode
|
||||
#[command(name = "headers", display_name = ".headers")]
|
||||
Headers(HeadersArgs),
|
||||
}
|
||||
|
||||
const _HELP_TEMPLATE: &str = "{before-help}{name}
|
||||
|
||||
@@ -40,11 +40,7 @@ pub struct LimboHelper {
|
||||
}
|
||||
|
||||
impl LimboHelper {
|
||||
pub fn new(
|
||||
conn: Arc<Connection>,
|
||||
io: Arc<dyn turso_core::IO>,
|
||||
syntax_config: Option<HighlightConfig>,
|
||||
) -> Self {
|
||||
pub fn new(conn: Arc<Connection>, syntax_config: Option<HighlightConfig>) -> Self {
|
||||
// Load only predefined syntax
|
||||
let ps = from_uncompressed_data(include_bytes!(concat!(
|
||||
env!("OUT_DIR"),
|
||||
@@ -59,7 +55,7 @@ impl LimboHelper {
|
||||
}
|
||||
}
|
||||
LimboHelper {
|
||||
completer: SqlCompleter::new(conn, io),
|
||||
completer: SqlCompleter::new(conn),
|
||||
syntax_set: ps,
|
||||
theme_set: ts,
|
||||
syntax_config: syntax_config.unwrap_or_default(),
|
||||
@@ -141,7 +137,6 @@ impl Highlighter for LimboHelper {
|
||||
|
||||
pub struct SqlCompleter<C: Parser + Send + Sync + 'static> {
|
||||
conn: Arc<Connection>,
|
||||
io: Arc<dyn turso_core::IO>,
|
||||
// Has to be a ref cell as Rustyline takes immutable reference to self
|
||||
// This problem would be solved with Reedline as it uses &mut self for completions
|
||||
cmd: RefCell<clap::Command>,
|
||||
@@ -149,10 +144,9 @@ pub struct SqlCompleter<C: Parser + Send + Sync + 'static> {
|
||||
}
|
||||
|
||||
impl<C: Parser + Send + Sync + 'static> SqlCompleter<C> {
|
||||
pub fn new(conn: Arc<Connection>, io: Arc<dyn turso_core::IO>) -> Self {
|
||||
pub fn new(conn: Arc<Connection>) -> Self {
|
||||
Self {
|
||||
conn,
|
||||
io,
|
||||
cmd: C::command().into(),
|
||||
_cmd_phantom: PhantomData,
|
||||
}
|
||||
@@ -228,7 +222,7 @@ impl<C: Parser + Send + Sync + 'static> SqlCompleter<C> {
|
||||
candidates.push(pair);
|
||||
}
|
||||
StepResult::IO => {
|
||||
try_result!(self.io.run_once(), (prefix_pos, candidates));
|
||||
try_result!(rows.run_once(), (prefix_pos, candidates));
|
||||
}
|
||||
StepResult::Interrupt => break,
|
||||
StepResult::Done => break,
|
||||
|
||||
14
cli/input.rs
14
cli/input.rs
@@ -83,6 +83,7 @@ pub struct Settings {
|
||||
pub io: Io,
|
||||
pub tracing_output: Option<String>,
|
||||
pub timer: bool,
|
||||
pub headers: bool,
|
||||
}
|
||||
|
||||
impl From<Opts> for Settings {
|
||||
@@ -107,6 +108,7 @@ impl From<Opts> for Settings {
|
||||
},
|
||||
tracing_output: opts.tracing_output,
|
||||
timer: false,
|
||||
headers: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -115,7 +117,7 @@ impl std::fmt::Display for Settings {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Settings:\nOutput mode: {}\nDB: {}\nOutput: {}\nNull value: {}\nCWD: {}\nEcho: {}",
|
||||
"Settings:\nOutput mode: {}\nDB: {}\nOutput: {}\nNull value: {}\nCWD: {}\nEcho: {}\nHeaders: {}",
|
||||
self.output_mode,
|
||||
self.db_file,
|
||||
match self.is_stdout {
|
||||
@@ -127,6 +129,10 @@ impl std::fmt::Display for Settings {
|
||||
match self.echo {
|
||||
true => "on",
|
||||
false => "off",
|
||||
},
|
||||
match self.headers {
|
||||
true => "on",
|
||||
false => "off",
|
||||
}
|
||||
)
|
||||
}
|
||||
@@ -221,6 +227,12 @@ pub const AFTER_HELP_MSG: &str = r#"Usage Examples:
|
||||
14. To show names of indexes:
|
||||
.indexes ?TABLE?
|
||||
|
||||
15. To turn on column headers in list mode:
|
||||
.headers on
|
||||
|
||||
16. To turn off column headers in list mode:
|
||||
.headers off
|
||||
|
||||
Note:
|
||||
- All SQL commands must end with a semicolon (;).
|
||||
- Special commands start with a dot (.) and are not required to end with a semicolon."#;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use std::sync::Arc;
|
||||
use turso_core::{Database, PlatformIO, IO};
|
||||
use turso_core::{Database, PlatformIO};
|
||||
|
||||
fn rusqlite_open() -> rusqlite::Connection {
|
||||
let sqlite_conn = rusqlite::Connection::open("../testing/testing.db").unwrap();
|
||||
@@ -79,7 +79,6 @@ fn bench_execute_select_rows(criterion: &mut Criterion) {
|
||||
let mut stmt = limbo_conn
|
||||
.prepare(format!("SELECT * FROM users LIMIT {}", *i))
|
||||
.unwrap();
|
||||
let io = io.clone();
|
||||
b.iter(|| {
|
||||
loop {
|
||||
match stmt.step().unwrap() {
|
||||
@@ -87,7 +86,7 @@ fn bench_execute_select_rows(criterion: &mut Criterion) {
|
||||
black_box(stmt.row());
|
||||
}
|
||||
turso_core::StepResult::IO => {
|
||||
let _ = io.run_once();
|
||||
stmt.run_once().unwrap();
|
||||
}
|
||||
turso_core::StepResult::Done => {
|
||||
break;
|
||||
@@ -141,7 +140,6 @@ fn bench_execute_select_1(criterion: &mut Criterion) {
|
||||
|
||||
group.bench_function("limbo_execute_select_1", |b| {
|
||||
let mut stmt = limbo_conn.prepare("SELECT 1").unwrap();
|
||||
let io = io.clone();
|
||||
b.iter(|| {
|
||||
loop {
|
||||
match stmt.step().unwrap() {
|
||||
@@ -149,7 +147,7 @@ fn bench_execute_select_1(criterion: &mut Criterion) {
|
||||
black_box(stmt.row());
|
||||
}
|
||||
turso_core::StepResult::IO => {
|
||||
let _ = io.run_once();
|
||||
stmt.run_once().unwrap();
|
||||
}
|
||||
turso_core::StepResult::Done => {
|
||||
break;
|
||||
@@ -194,7 +192,6 @@ fn bench_execute_select_count(criterion: &mut Criterion) {
|
||||
|
||||
group.bench_function("limbo_execute_select_count", |b| {
|
||||
let mut stmt = limbo_conn.prepare("SELECT count() FROM users").unwrap();
|
||||
let io = io.clone();
|
||||
b.iter(|| {
|
||||
loop {
|
||||
match stmt.step().unwrap() {
|
||||
@@ -202,7 +199,7 @@ fn bench_execute_select_count(criterion: &mut Criterion) {
|
||||
black_box(stmt.row());
|
||||
}
|
||||
turso_core::StepResult::IO => {
|
||||
let _ = io.run_once();
|
||||
stmt.run_once().unwrap();
|
||||
}
|
||||
turso_core::StepResult::Done => {
|
||||
break;
|
||||
|
||||
@@ -4,7 +4,7 @@ use pprof::{
|
||||
flamegraph::Options,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use turso_core::{Database, PlatformIO, IO};
|
||||
use turso_core::{Database, PlatformIO};
|
||||
|
||||
// Title: JSONB Function Benchmarking
|
||||
|
||||
@@ -447,13 +447,12 @@ fn bench(criterion: &mut Criterion) {
|
||||
|
||||
group.bench_function("Limbo", |b| {
|
||||
let mut stmt = limbo_conn.prepare(&query).unwrap();
|
||||
let io = io.clone();
|
||||
b.iter(|| {
|
||||
loop {
|
||||
match stmt.step().unwrap() {
|
||||
turso_core::StepResult::Row => {}
|
||||
turso_core::StepResult::IO => {
|
||||
let _ = io.run_once();
|
||||
stmt.run_once().unwrap();
|
||||
}
|
||||
turso_core::StepResult::Done => {
|
||||
break;
|
||||
@@ -606,13 +605,12 @@ fn bench_sequential_jsonb(criterion: &mut Criterion) {
|
||||
|
||||
group.bench_function("Limbo - Sequential", |b| {
|
||||
let mut stmt = limbo_conn.prepare(&query).unwrap();
|
||||
let io = io.clone();
|
||||
b.iter(|| {
|
||||
loop {
|
||||
match stmt.step().unwrap() {
|
||||
turso_core::StepResult::Row => {}
|
||||
turso_core::StepResult::IO => {
|
||||
let _ = io.run_once();
|
||||
stmt.run_once().unwrap();
|
||||
}
|
||||
turso_core::StepResult::Done => {
|
||||
break;
|
||||
@@ -899,13 +897,12 @@ fn bench_json_patch(criterion: &mut Criterion) {
|
||||
|
||||
group.bench_function("Limbo", |b| {
|
||||
let mut stmt = limbo_conn.prepare(&query).unwrap();
|
||||
let io = io.clone();
|
||||
b.iter(|| {
|
||||
loop {
|
||||
match stmt.step().unwrap() {
|
||||
turso_core::StepResult::Row => {}
|
||||
turso_core::StepResult::IO => {
|
||||
let _ = io.run_once();
|
||||
stmt.run_once().unwrap();
|
||||
}
|
||||
turso_core::StepResult::Done => {
|
||||
break;
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::sync::Arc;
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, SamplingMode};
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use turso_core::{Database, PlatformIO, IO as _};
|
||||
use turso_core::{Database, PlatformIO};
|
||||
|
||||
const TPC_H_PATH: &str = "../perf/tpc-h/TPC-H.db";
|
||||
|
||||
@@ -97,7 +97,7 @@ fn bench_tpc_h_queries(criterion: &mut Criterion) {
|
||||
black_box(stmt.row());
|
||||
}
|
||||
turso_core::StepResult::IO => {
|
||||
let _ = io.run_once();
|
||||
stmt.run_once().unwrap();
|
||||
}
|
||||
turso_core::StepResult::Done => {
|
||||
break;
|
||||
|
||||
@@ -65,7 +65,10 @@ pub unsafe extern "C" fn execute(
|
||||
return ResultCode::OK;
|
||||
}
|
||||
Ok(StepResult::IO) => {
|
||||
let _ = conn.pager.io.run_once();
|
||||
let res = stmt.run_once();
|
||||
if res.is_err() {
|
||||
return ResultCode::Error;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
Ok(StepResult::Interrupt) => return ResultCode::Interrupt,
|
||||
@@ -154,7 +157,6 @@ pub unsafe extern "C" fn stmt_step(stmt: *mut Stmt) -> ResultCode {
|
||||
tracing::error!("stmt_step: null connection or context");
|
||||
return ResultCode::Error;
|
||||
}
|
||||
let conn: &Connection = unsafe { &*(stmt._conn as *const Connection) };
|
||||
let stmt_ctx: &mut Statement = unsafe { &mut *(stmt._ctx as *mut Statement) };
|
||||
while let Ok(res) = stmt_ctx.step() {
|
||||
match res {
|
||||
@@ -162,7 +164,10 @@ pub unsafe extern "C" fn stmt_step(stmt: *mut Stmt) -> ResultCode {
|
||||
StepResult::Done => return ResultCode::EOF,
|
||||
StepResult::IO => {
|
||||
// always handle IO step result internally.
|
||||
let _ = conn.pager.io.run_once();
|
||||
let res = stmt_ctx.run_once();
|
||||
if res.is_err() {
|
||||
return ResultCode::Error;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
StepResult::Interrupt => return ResultCode::Interrupt,
|
||||
|
||||
@@ -616,7 +616,8 @@ impl Func {
|
||||
}
|
||||
}
|
||||
pub fn resolve_function(name: &str, arg_count: usize) -> Result<Self, LimboError> {
|
||||
match name {
|
||||
let normalized_name = crate::util::normalize_ident(name);
|
||||
match normalized_name.as_str() {
|
||||
"avg" => {
|
||||
if arg_count != 1 {
|
||||
crate::bail_parse_error!("wrong number of arguments to function {}()", name)
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::{
|
||||
io::{ErrorKind, Read, Seek, Write},
|
||||
sync::Arc,
|
||||
};
|
||||
use tracing::{debug, trace};
|
||||
use tracing::{debug, instrument, trace, Level};
|
||||
|
||||
struct OwnedCallbacks(UnsafeCell<Callbacks>);
|
||||
// We assume we locking on IO level is done by user.
|
||||
@@ -219,6 +219,7 @@ impl IO for UnixIO {
|
||||
Ok(unix_file)
|
||||
}
|
||||
|
||||
#[instrument(err, skip_all, level = Level::INFO)]
|
||||
fn run_once(&self) -> Result<()> {
|
||||
if self.callbacks.is_empty() {
|
||||
return Ok(());
|
||||
@@ -333,6 +334,7 @@ impl File for UnixFile<'_> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(err, skip_all, level = Level::INFO)]
|
||||
fn pread(&self, pos: usize, c: Completion) -> Result<Arc<Completion>> {
|
||||
let file = self.file.borrow();
|
||||
let result = {
|
||||
@@ -366,6 +368,7 @@ impl File for UnixFile<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(err, skip_all, level = Level::INFO)]
|
||||
fn pwrite(
|
||||
&self,
|
||||
pos: usize,
|
||||
@@ -401,6 +404,7 @@ impl File for UnixFile<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(err, skip_all, level = Level::INFO)]
|
||||
fn sync(&self, c: Completion) -> Result<Arc<Completion>> {
|
||||
let file = self.file.borrow();
|
||||
let result = fs::fsync(file.as_fd());
|
||||
@@ -415,6 +419,7 @@ impl File for UnixFile<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(err, skip_all, level = Level::INFO)]
|
||||
fn size(&self) -> Result<u64> {
|
||||
let file = self.file.borrow();
|
||||
Ok(file.metadata()?.len())
|
||||
|
||||
127
core/lib.rs
127
core/lib.rs
@@ -43,6 +43,7 @@ static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
use crate::storage::{header_accessor, wal::DummyWAL};
|
||||
use crate::translate::optimizer::optimize_plan;
|
||||
use crate::translate::pragma::TURSO_CDC_DEFAULT_TABLE_NAME;
|
||||
use crate::util::{OpenMode, OpenOptions};
|
||||
use crate::vtab::VirtualTable;
|
||||
use core::str;
|
||||
@@ -97,7 +98,7 @@ pub type Result<T, E = LimboError> = std::result::Result<T, E>;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
enum TransactionState {
|
||||
Write { change_schema: bool },
|
||||
Write { schema_did_change: bool },
|
||||
Read,
|
||||
None,
|
||||
}
|
||||
@@ -217,7 +218,7 @@ impl Database {
|
||||
if is_empty == 2 {
|
||||
// parse schema
|
||||
let conn = db.connect()?;
|
||||
let schema_version = get_schema_version(&conn, &io)?;
|
||||
let schema_version = get_schema_version(&conn)?;
|
||||
schema.write().schema_version = schema_version;
|
||||
let rows = conn.query("SELECT * FROM sqlite_schema")?;
|
||||
let mut schema = schema
|
||||
@@ -225,7 +226,7 @@ impl Database {
|
||||
.expect("lock on schema should succeed first try");
|
||||
let syms = conn.syms.borrow();
|
||||
if let Err(LimboError::ExtensionError(e)) =
|
||||
parse_schema_rows(rows, &mut schema, io, &syms, None)
|
||||
parse_schema_rows(rows, &mut schema, &syms, None)
|
||||
{
|
||||
// this means that a vtab exists and we no longer have the module loaded. we print
|
||||
// a warning to the user to load the module
|
||||
@@ -278,6 +279,8 @@ impl Database {
|
||||
cache_size: Cell::new(default_cache_size),
|
||||
readonly: Cell::new(false),
|
||||
wal_checkpoint_disabled: Cell::new(false),
|
||||
capture_data_changes: RefCell::new(CaptureDataChangesMode::Off),
|
||||
closed: Cell::new(false),
|
||||
});
|
||||
if let Err(e) = conn.register_builtins() {
|
||||
return Err(LimboError::ExtensionError(e));
|
||||
@@ -330,6 +333,8 @@ impl Database {
|
||||
cache_size: Cell::new(default_cache_size),
|
||||
readonly: Cell::new(false),
|
||||
wal_checkpoint_disabled: Cell::new(false),
|
||||
capture_data_changes: RefCell::new(CaptureDataChangesMode::Off),
|
||||
closed: Cell::new(false),
|
||||
});
|
||||
|
||||
if let Err(e) = conn.register_builtins() {
|
||||
@@ -390,7 +395,7 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_schema_version(conn: &Arc<Connection>, io: &Arc<dyn IO>) -> Result<u32> {
|
||||
fn get_schema_version(conn: &Arc<Connection>) -> Result<u32> {
|
||||
let mut rows = conn
|
||||
.query("PRAGMA schema_version")?
|
||||
.ok_or(LimboError::InternalError(
|
||||
@@ -409,7 +414,7 @@ fn get_schema_version(conn: &Arc<Connection>, io: &Arc<dyn IO>) -> Result<u32> {
|
||||
schema_version = Some(row.get::<i64>(0)? as u32);
|
||||
}
|
||||
StepResult::IO => {
|
||||
io.run_once()?;
|
||||
rows.run_once()?;
|
||||
}
|
||||
StepResult::Interrupt => {
|
||||
return Err(LimboError::InternalError(
|
||||
@@ -434,6 +439,39 @@ fn get_schema_version(conn: &Arc<Connection>, io: &Arc<dyn IO>) -> Result<u32> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub enum CaptureDataChangesMode {
|
||||
Off,
|
||||
RowidOnly { table: String },
|
||||
}
|
||||
|
||||
impl CaptureDataChangesMode {
|
||||
pub fn parse(value: &str) -> Result<CaptureDataChangesMode> {
|
||||
let (mode, table) = value
|
||||
.split_once(",")
|
||||
.unwrap_or((value, TURSO_CDC_DEFAULT_TABLE_NAME));
|
||||
match mode {
|
||||
"off" => Ok(CaptureDataChangesMode::Off),
|
||||
"rowid-only" => Ok(CaptureDataChangesMode::RowidOnly { table: table.to_string() }),
|
||||
_ => Err(LimboError::InvalidArgument(
|
||||
"unexpected pragma value: expected '<mode>' or '<mode>,<cdc-table-name>' parameter where mode is one of off|rowid-only".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
pub fn mode_name(&self) -> &str {
|
||||
match self {
|
||||
CaptureDataChangesMode::Off => "off",
|
||||
CaptureDataChangesMode::RowidOnly { .. } => "rowid-only",
|
||||
}
|
||||
}
|
||||
pub fn table(&self) -> Option<&str> {
|
||||
match self {
|
||||
CaptureDataChangesMode::Off => None,
|
||||
CaptureDataChangesMode::RowidOnly { table } => Some(table.as_str()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Connection {
|
||||
_db: Arc<Database>,
|
||||
pager: Rc<Pager>,
|
||||
@@ -450,11 +488,16 @@ pub struct Connection {
|
||||
cache_size: Cell<i32>,
|
||||
readonly: Cell<bool>,
|
||||
wal_checkpoint_disabled: Cell<bool>,
|
||||
capture_data_changes: RefCell<CaptureDataChangesMode>,
|
||||
closed: Cell<bool>,
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn prepare(self: &Arc<Connection>, sql: impl AsRef<str>) -> Result<Statement> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
if sql.as_ref().is_empty() {
|
||||
return Err(LimboError::InvalidArgument(
|
||||
"The supplied SQL string contains no statements".to_string(),
|
||||
@@ -494,8 +537,11 @@ impl Connection {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn query(self: &Arc<Connection>, sql: impl AsRef<str>) -> Result<Option<Statement>> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let sql = sql.as_ref();
|
||||
tracing::trace!("Querying: {}", sql);
|
||||
let mut parser = Parser::new(sql.as_bytes());
|
||||
@@ -510,12 +556,15 @@ impl Connection {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub(crate) fn run_cmd(
|
||||
self: &Arc<Connection>,
|
||||
cmd: Cmd,
|
||||
input: &str,
|
||||
) -> Result<Option<Statement>> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let syms = self.syms.borrow();
|
||||
match cmd {
|
||||
Cmd::Stmt(ref stmt) | Cmd::Explain(ref stmt) => {
|
||||
@@ -563,8 +612,11 @@ impl Connection {
|
||||
|
||||
/// Execute will run a query from start to finish taking ownership of I/O because it will run pending I/Os if it didn't finish.
|
||||
/// TODO: make this api async
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn execute(self: &Arc<Connection>, sql: impl AsRef<str>) -> Result<()> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let sql = sql.as_ref();
|
||||
let mut parser = Parser::new(sql.as_bytes());
|
||||
while let Some(cmd) = parser.next()? {
|
||||
@@ -610,7 +662,7 @@ impl Connection {
|
||||
if matches!(res, StepResult::Done) {
|
||||
break;
|
||||
}
|
||||
self._db.io.run_once()?;
|
||||
self.run_once()?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -618,6 +670,20 @@ impl Connection {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_once(&self) -> Result<()> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let res = self._db.io.run_once();
|
||||
if res.is_err() {
|
||||
let state = self.transaction_state.get();
|
||||
if let TransactionState::Write { schema_did_change } = state {
|
||||
self.pager.rollback(schema_did_change, self)?
|
||||
}
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(feature = "fs")]
|
||||
pub fn from_uri(
|
||||
uri: &str,
|
||||
@@ -676,6 +742,9 @@ impl Connection {
|
||||
/// If the WAL size is over the checkpoint threshold, it will checkpoint the WAL to
|
||||
/// the database file and then fsync the database file.
|
||||
pub fn cacheflush(&self) -> Result<PagerCacheflushStatus> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
self.pager.cacheflush(self.wal_checkpoint_disabled.get())
|
||||
}
|
||||
|
||||
@@ -685,12 +754,19 @@ impl Connection {
|
||||
}
|
||||
|
||||
pub fn checkpoint(&self) -> Result<CheckpointResult> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
self.pager
|
||||
.wal_checkpoint(self.wal_checkpoint_disabled.get())
|
||||
}
|
||||
|
||||
/// Close a connection and checkpoint.
|
||||
pub fn close(&self) -> Result<()> {
|
||||
if self.closed.get() {
|
||||
return Ok(());
|
||||
}
|
||||
self.closed.set(true);
|
||||
self.pager
|
||||
.checkpoint_shutdown(self.wal_checkpoint_disabled.get())
|
||||
}
|
||||
@@ -724,6 +800,13 @@ impl Connection {
|
||||
self.cache_size.set(size);
|
||||
}
|
||||
|
||||
pub fn get_capture_data_changes(&self) -> std::cell::Ref<'_, CaptureDataChangesMode> {
|
||||
self.capture_data_changes.borrow()
|
||||
}
|
||||
pub fn set_capture_data_changes(&self, opts: CaptureDataChangesMode) {
|
||||
self.capture_data_changes.replace(opts);
|
||||
}
|
||||
|
||||
#[cfg(feature = "fs")]
|
||||
pub fn open_new(&self, path: &str, vfs: &str) -> Result<(Arc<dyn IO>, Arc<Database>)> {
|
||||
Database::open_with_vfs(&self._db, path, vfs)
|
||||
@@ -751,12 +834,15 @@ impl Connection {
|
||||
}
|
||||
|
||||
pub fn parse_schema_rows(self: &Arc<Connection>) -> Result<()> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let rows = self.query("SELECT * FROM sqlite_schema")?;
|
||||
let mut schema = self.schema.borrow_mut();
|
||||
{
|
||||
let syms = self.syms.borrow();
|
||||
if let Err(LimboError::ExtensionError(e)) =
|
||||
parse_schema_rows(rows, &mut schema, self.pager.io.clone(), &syms, None)
|
||||
parse_schema_rows(rows, &mut schema, &syms, None)
|
||||
{
|
||||
// this means that a vtab exists and we no longer have the module loaded. we print
|
||||
// a warning to the user to load the module
|
||||
@@ -769,6 +855,9 @@ impl Connection {
|
||||
// Clearly there is something to improve here, Vec<Vec<Value>> isn't a couple of tea
|
||||
/// Query the current rows/values of `pragma_name`.
|
||||
pub fn pragma_query(self: &Arc<Connection>, pragma_name: &str) -> Result<Vec<Vec<Value>>> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let pragma = format!("PRAGMA {}", pragma_name);
|
||||
let mut stmt = self.prepare(pragma)?;
|
||||
let mut results = Vec::new();
|
||||
@@ -797,6 +886,9 @@ impl Connection {
|
||||
pragma_name: &str,
|
||||
pragma_value: V,
|
||||
) -> Result<Vec<Vec<Value>>> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let pragma = format!("PRAGMA {} = {}", pragma_name, pragma_value);
|
||||
let mut stmt = self.prepare(pragma)?;
|
||||
let mut results = Vec::new();
|
||||
@@ -827,6 +919,9 @@ impl Connection {
|
||||
pragma_name: &str,
|
||||
pragma_value: V,
|
||||
) -> Result<Vec<Vec<Value>>> {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let pragma = format!("PRAGMA {}({})", pragma_name, pragma_value);
|
||||
let mut stmt = self.prepare(pragma)?;
|
||||
let mut results = Vec::new();
|
||||
@@ -883,7 +978,15 @@ impl Statement {
|
||||
}
|
||||
|
||||
pub fn run_once(&self) -> Result<()> {
|
||||
self.pager.io.run_once()
|
||||
let res = self.pager.io.run_once();
|
||||
if res.is_err() {
|
||||
let state = self.program.connection.transaction_state.get();
|
||||
if let TransactionState::Write { schema_did_change } = state {
|
||||
self.pager
|
||||
.rollback(schema_did_change, &self.program.connection)?
|
||||
}
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub fn num_columns(&self) -> usize {
|
||||
|
||||
@@ -7,21 +7,21 @@ use turso_sqlite3_parser::ast::PragmaName;
|
||||
bitflags! {
|
||||
// Flag names match those used in SQLite:
|
||||
// https://github.com/sqlite/sqlite/blob/b3c1884b65400da85636458298bd77cbbfdfb401/tool/mkpragmatab.tcl#L22-L29
|
||||
struct PragmaFlags: u8 {
|
||||
const NeedSchema = 0x01;
|
||||
const NoColumns = 0x02;
|
||||
const NoColumns1 = 0x04;
|
||||
const ReadOnly = 0x08;
|
||||
const Result0 = 0x10;
|
||||
const Result1 = 0x20;
|
||||
const SchemaOpt = 0x40;
|
||||
const SchemaReq = 0x80;
|
||||
pub struct PragmaFlags: u8 {
|
||||
const NeedSchema = 0x01; /* Force schema load before running */
|
||||
const NoColumns = 0x02; /* OP_ResultRow called with zero columns */
|
||||
const NoColumns1 = 0x04; /* zero columns if RHS argument is present */
|
||||
const ReadOnly = 0x08; /* Read-only HEADER_VALUE */
|
||||
const Result0 = 0x10; /* Acts as query when no argument */
|
||||
const Result1 = 0x20; /* Acts as query when has one argument */
|
||||
const SchemaOpt = 0x40; /* Schema restricts name search if present */
|
||||
const SchemaReq = 0x80; /* Schema required - "main" is default */
|
||||
}
|
||||
}
|
||||
|
||||
struct Pragma {
|
||||
flags: PragmaFlags,
|
||||
columns: &'static [&'static str],
|
||||
pub struct Pragma {
|
||||
pub flags: PragmaFlags,
|
||||
pub columns: &'static [&'static str],
|
||||
}
|
||||
|
||||
impl Pragma {
|
||||
@@ -30,7 +30,7 @@ impl Pragma {
|
||||
}
|
||||
}
|
||||
|
||||
fn pragma_for(pragma: PragmaName) -> Pragma {
|
||||
pub fn pragma_for(pragma: PragmaName) -> Pragma {
|
||||
use PragmaName::*;
|
||||
|
||||
match pragma {
|
||||
@@ -77,6 +77,10 @@ fn pragma_for(pragma: PragmaName) -> Pragma {
|
||||
PragmaFlags::NeedSchema | PragmaFlags::ReadOnly | PragmaFlags::Result0,
|
||||
&["message"],
|
||||
),
|
||||
UnstableCaptureDataChangesConn => Pragma::new(
|
||||
PragmaFlags::NeedSchema | PragmaFlags::Result0 | PragmaFlags::SchemaReq,
|
||||
&["mode", "table"],
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,7 @@ use crate::error::LimboError;
|
||||
use crate::io::CompletionType;
|
||||
use crate::{io::Completion, Buffer, Result};
|
||||
use std::{cell::RefCell, sync::Arc};
|
||||
use tracing::{instrument, Level};
|
||||
|
||||
/// DatabaseStorage is an interface a database file that consists of pages.
|
||||
///
|
||||
@@ -32,6 +33,7 @@ unsafe impl Sync for DatabaseFile {}
|
||||
|
||||
#[cfg(feature = "fs")]
|
||||
impl DatabaseStorage for DatabaseFile {
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn read_page(&self, page_idx: usize, c: Completion) -> Result<()> {
|
||||
let r = c.as_read();
|
||||
let size = r.buf().len();
|
||||
@@ -44,6 +46,7 @@ impl DatabaseStorage for DatabaseFile {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn write_page(
|
||||
&self,
|
||||
page_idx: usize,
|
||||
@@ -60,11 +63,13 @@ impl DatabaseStorage for DatabaseFile {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn sync(&self, c: Completion) -> Result<()> {
|
||||
let _ = self.file.sync(c)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn size(&self) -> Result<u64> {
|
||||
self.file.size()
|
||||
}
|
||||
@@ -85,6 +90,7 @@ unsafe impl Send for FileMemoryStorage {}
|
||||
unsafe impl Sync for FileMemoryStorage {}
|
||||
|
||||
impl DatabaseStorage for FileMemoryStorage {
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn read_page(&self, page_idx: usize, c: Completion) -> Result<()> {
|
||||
let r = match c.completion_type {
|
||||
CompletionType::Read(ref r) => r,
|
||||
@@ -100,6 +106,7 @@ impl DatabaseStorage for FileMemoryStorage {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn write_page(
|
||||
&self,
|
||||
page_idx: usize,
|
||||
@@ -115,11 +122,13 @@ impl DatabaseStorage for FileMemoryStorage {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn sync(&self, c: Completion) -> Result<()> {
|
||||
let _ = self.file.sync(c)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn size(&self) -> Result<u64> {
|
||||
self.file.size()
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ use std::collections::HashSet;
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tracing::{trace, Level};
|
||||
use tracing::{instrument, trace, Level};
|
||||
|
||||
use super::btree::{btree_init_page, BTreePage};
|
||||
use super::page_cache::{CacheError, CacheResizeResult, DumbLruPageCache, PageCacheKey};
|
||||
@@ -471,6 +471,7 @@ impl Pager {
|
||||
|
||||
/// This method is used to allocate a new root page for a btree, both for tables and indexes
|
||||
/// FIXME: handle no room in page cache
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn btree_create(&self, flags: &CreateBTreeFlags) -> Result<CursorResult<u32>> {
|
||||
let page_type = match flags {
|
||||
_ if flags.is_table() => PageType::TableLeaf,
|
||||
@@ -479,7 +480,7 @@ impl Pager {
|
||||
};
|
||||
#[cfg(feature = "omit_autovacuum")]
|
||||
{
|
||||
let page = self.do_allocate_page(page_type, 0, BtreePageAllocMode::Any);
|
||||
let page = self.do_allocate_page(page_type, 0, BtreePageAllocMode::Any)?;
|
||||
let page_id = page.get().get().id;
|
||||
Ok(CursorResult::Ok(page_id as u32))
|
||||
}
|
||||
@@ -490,7 +491,7 @@ impl Pager {
|
||||
let auto_vacuum_mode = self.auto_vacuum_mode.borrow();
|
||||
match *auto_vacuum_mode {
|
||||
AutoVacuumMode::None => {
|
||||
let page = self.do_allocate_page(page_type, 0, BtreePageAllocMode::Any);
|
||||
let page = self.do_allocate_page(page_type, 0, BtreePageAllocMode::Any)?;
|
||||
let page_id = page.get().get().id;
|
||||
Ok(CursorResult::Ok(page_id as u32))
|
||||
}
|
||||
@@ -514,7 +515,7 @@ impl Pager {
|
||||
page_type,
|
||||
0,
|
||||
BtreePageAllocMode::Exact(root_page_num),
|
||||
);
|
||||
)?;
|
||||
let allocated_page_id = page.get().get().id as u32;
|
||||
if allocated_page_id != root_page_num {
|
||||
// TODO(Zaid): Handle swapping the allocated page with the desired root page
|
||||
@@ -558,8 +559,8 @@ impl Pager {
|
||||
page_type: PageType,
|
||||
offset: usize,
|
||||
_alloc_mode: BtreePageAllocMode,
|
||||
) -> BTreePage {
|
||||
let page = self.allocate_page().unwrap();
|
||||
) -> Result<BTreePage> {
|
||||
let page = self.allocate_page()?;
|
||||
let page = Arc::new(BTreePageInner {
|
||||
page: RefCell::new(page),
|
||||
});
|
||||
@@ -569,7 +570,7 @@ impl Pager {
|
||||
page.get().get().id,
|
||||
page.get().get_contents().page_type()
|
||||
);
|
||||
page
|
||||
Ok(page)
|
||||
}
|
||||
|
||||
/// The "usable size" of a database page is the page size specified by the 2-byte integer at offset 16
|
||||
@@ -589,6 +590,7 @@ impl Pager {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn begin_read_tx(&self) -> Result<CursorResult<LimboResult>> {
|
||||
// We allocate the first page lazily in the first transaction
|
||||
match self.maybe_allocate_page1()? {
|
||||
@@ -598,6 +600,7 @@ impl Pager {
|
||||
Ok(CursorResult::Ok(self.wal.borrow_mut().begin_read_tx()?))
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn maybe_allocate_page1(&self) -> Result<CursorResult<()>> {
|
||||
if self.is_empty.load(Ordering::SeqCst) < DB_STATE_INITIALIZED {
|
||||
if let Ok(_lock) = self.init_lock.try_lock() {
|
||||
@@ -621,6 +624,7 @@ impl Pager {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn begin_write_tx(&self) -> Result<CursorResult<LimboResult>> {
|
||||
// TODO(Diego): The only possibly allocate page1 here is because OpenEphemeral needs a write transaction
|
||||
// we should have a unique API to begin transactions, something like sqlite3BtreeBeginTrans
|
||||
@@ -631,10 +635,11 @@ impl Pager {
|
||||
Ok(CursorResult::Ok(self.wal.borrow_mut().begin_write_tx()?))
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn end_tx(
|
||||
&self,
|
||||
rollback: bool,
|
||||
change_schema: bool,
|
||||
schema_did_change: bool,
|
||||
connection: &Connection,
|
||||
wal_checkpoint_disabled: bool,
|
||||
) -> Result<PagerCacheflushStatus> {
|
||||
@@ -648,7 +653,7 @@ impl Pager {
|
||||
match cacheflush_status {
|
||||
PagerCacheflushStatus::IO => Ok(PagerCacheflushStatus::IO),
|
||||
PagerCacheflushStatus::Done(_) => {
|
||||
let maybe_schema_pair = if change_schema {
|
||||
let maybe_schema_pair = if schema_did_change {
|
||||
let schema = connection.schema.borrow().clone();
|
||||
// Lock first before writing to the database schema in case someone tries to read the schema before it's updated
|
||||
let db_schema = connection._db.schema.write();
|
||||
@@ -666,13 +671,14 @@ impl Pager {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn end_read_tx(&self) -> Result<()> {
|
||||
self.wal.borrow().end_read_tx()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads a page from the database.
|
||||
#[tracing::instrument(skip_all, level = Level::DEBUG)]
|
||||
#[tracing::instrument(skip_all, level = Level::INFO)]
|
||||
pub fn read_page(&self, page_idx: usize) -> Result<PageRef, LimboError> {
|
||||
tracing::trace!("read_page(page_idx = {})", page_idx);
|
||||
let mut page_cache = self.page_cache.write();
|
||||
@@ -759,11 +765,12 @@ impl Pager {
|
||||
/// In the base case, it will write the dirty pages to the WAL and then fsync the WAL.
|
||||
/// If the WAL size is over the checkpoint threshold, it will checkpoint the WAL to
|
||||
/// the database file and then fsync the database file.
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn cacheflush(&self, wal_checkpoint_disabled: bool) -> Result<PagerCacheflushStatus> {
|
||||
let mut checkpoint_result = CheckpointResult::default();
|
||||
loop {
|
||||
let res = loop {
|
||||
let state = self.flush_info.borrow().state;
|
||||
trace!("cacheflush {:?}", state);
|
||||
trace!(?state);
|
||||
match state {
|
||||
FlushState::Start => {
|
||||
let db_size = header_accessor::get_database_size(self)?;
|
||||
@@ -795,7 +802,6 @@ impl Pager {
|
||||
let in_flight = *self.flush_info.borrow().in_flight_writes.borrow();
|
||||
if in_flight == 0 {
|
||||
self.flush_info.borrow_mut().state = FlushState::SyncWal;
|
||||
self.wal.borrow_mut().finish_append_frames_commit()?;
|
||||
} else {
|
||||
return Ok(PagerCacheflushStatus::IO);
|
||||
}
|
||||
@@ -807,9 +813,7 @@ impl Pager {
|
||||
|
||||
if wal_checkpoint_disabled || !self.wal.borrow().should_checkpoint() {
|
||||
self.flush_info.borrow_mut().state = FlushState::Start;
|
||||
return Ok(PagerCacheflushStatus::Done(
|
||||
PagerCacheflushResult::WalWritten,
|
||||
));
|
||||
break PagerCacheflushResult::WalWritten;
|
||||
}
|
||||
self.flush_info.borrow_mut().state = FlushState::Checkpoint;
|
||||
}
|
||||
@@ -831,16 +835,17 @@ impl Pager {
|
||||
return Ok(PagerCacheflushStatus::IO);
|
||||
} else {
|
||||
self.flush_info.borrow_mut().state = FlushState::Start;
|
||||
break;
|
||||
break PagerCacheflushResult::Checkpointed(checkpoint_result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(PagerCacheflushStatus::Done(
|
||||
PagerCacheflushResult::Checkpointed(checkpoint_result),
|
||||
))
|
||||
};
|
||||
// We should only signal that we finished appenind frames after wal sync to avoid inconsistencies when sync fails
|
||||
self.wal.borrow_mut().finish_append_frames_commit()?;
|
||||
Ok(PagerCacheflushStatus::Done(res))
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn wal_get_frame(
|
||||
&self,
|
||||
frame_no: u32,
|
||||
@@ -856,11 +861,12 @@ impl Pager {
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO, target = "pager_checkpoint",)]
|
||||
pub fn checkpoint(&self) -> Result<CheckpointStatus> {
|
||||
let mut checkpoint_result = CheckpointResult::default();
|
||||
loop {
|
||||
let state = *self.checkpoint_state.borrow();
|
||||
trace!("pager_checkpoint(state={:?})", state);
|
||||
trace!(?state);
|
||||
match state {
|
||||
CheckpointState::Checkpoint => {
|
||||
let in_flight = self.checkpoint_inflight.clone();
|
||||
@@ -932,6 +938,7 @@ impl Pager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn wal_checkpoint(&self, wal_checkpoint_disabled: bool) -> Result<CheckpointResult> {
|
||||
if wal_checkpoint_disabled {
|
||||
return Ok(CheckpointResult {
|
||||
@@ -947,7 +954,7 @@ impl Pager {
|
||||
CheckpointMode::Passive,
|
||||
) {
|
||||
Ok(CheckpointStatus::IO) => {
|
||||
let _ = self.io.run_once();
|
||||
self.io.run_once()?;
|
||||
}
|
||||
Ok(CheckpointStatus::Done(res)) => {
|
||||
checkpoint_result = res;
|
||||
@@ -965,6 +972,7 @@ impl Pager {
|
||||
|
||||
// Providing a page is optional, if provided it will be used to avoid reading the page from disk.
|
||||
// This is implemented in accordance with sqlite freepage2() function.
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn free_page(&self, page: Option<PageRef>, page_id: usize) -> Result<()> {
|
||||
tracing::trace!("free_page(page_id={})", page_id);
|
||||
const TRUNK_PAGE_HEADER_SIZE: usize = 8;
|
||||
@@ -1036,6 +1044,7 @@ impl Pager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn allocate_page1(&self) -> Result<CursorResult<PageRef>> {
|
||||
let state = self.allocate_page1_state.borrow().clone();
|
||||
match state {
|
||||
@@ -1111,6 +1120,7 @@ impl Pager {
|
||||
*/
|
||||
// FIXME: handle no room in page cache
|
||||
#[allow(clippy::readonly_write_lock)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn allocate_page(&self) -> Result<PageRef> {
|
||||
let old_db_size = header_accessor::get_database_size(self)?;
|
||||
#[allow(unused_mut)]
|
||||
@@ -1195,12 +1205,18 @@ impl Pager {
|
||||
(page_size - reserved_space) as usize
|
||||
}
|
||||
|
||||
pub fn rollback(&self, change_schema: bool, connection: &Connection) -> Result<(), LimboError> {
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn rollback(
|
||||
&self,
|
||||
schema_did_change: bool,
|
||||
connection: &Connection,
|
||||
) -> Result<(), LimboError> {
|
||||
tracing::debug!(schema_did_change);
|
||||
self.dirty_pages.borrow_mut().clear();
|
||||
let mut cache = self.page_cache.write();
|
||||
cache.unset_dirty_all_pages();
|
||||
cache.clear().expect("failed to clear page cache");
|
||||
if change_schema {
|
||||
if schema_did_change {
|
||||
let prev_schema = connection._db.schema.read().clone();
|
||||
connection.schema.replace(prev_schema);
|
||||
}
|
||||
|
||||
@@ -45,11 +45,18 @@
|
||||
|
||||
use tracing::{instrument, Level};
|
||||
|
||||
use super::pager::PageRef;
|
||||
use super::wal::LimboRwLock;
|
||||
use crate::error::LimboError;
|
||||
use crate::fast_lock::SpinLock;
|
||||
use crate::io::{
|
||||
Buffer, Complete, Completion, CompletionType, ReadCompletion, SyncCompletion, WriteCompletion,
|
||||
};
|
||||
use crate::storage::btree::offset::{
|
||||
BTREE_CELL_CONTENT_AREA, BTREE_CELL_COUNT, BTREE_FIRST_FREEBLOCK, BTREE_FRAGMENTED_BYTES_COUNT,
|
||||
BTREE_PAGE_TYPE, BTREE_RIGHTMOST_PTR,
|
||||
};
|
||||
use crate::storage::btree::{payload_overflow_threshold_max, payload_overflow_threshold_min};
|
||||
use crate::storage::buffer_pool::BufferPool;
|
||||
use crate::storage::database::DatabaseStorage;
|
||||
use crate::storage::pager::Pager;
|
||||
@@ -65,9 +72,6 @@ use std::rc::Rc;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::pager::PageRef;
|
||||
use super::wal::LimboRwLock;
|
||||
|
||||
/// The size of the database header in bytes.
|
||||
pub const DATABASE_HEADER_SIZE: usize = 100;
|
||||
// DEFAULT_CACHE_SIZE negative values mean that we store the amount of pages a XKiB of memory can hold.
|
||||
@@ -88,6 +92,9 @@ pub const DEFAULT_PAGE_SIZE: u16 = 4096;
|
||||
|
||||
pub const DATABASE_HEADER_PAGE_ID: usize = 1;
|
||||
|
||||
/// The minimum size of a cell in bytes.
|
||||
pub const MINIMUM_CELL_SIZE: usize = 4;
|
||||
|
||||
/// The database header.
|
||||
/// The first 100 bytes of the database file comprise the database file header.
|
||||
/// The database file header is divided into fields as shown by the table below.
|
||||
@@ -357,6 +364,8 @@ pub struct OverflowCell {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PageContent {
|
||||
/// the position where page content starts. it's 100 for page 1(database file header is 100 bytes),
|
||||
/// 0 for all other pages.
|
||||
pub offset: usize,
|
||||
pub buffer: Arc<RefCell<Buffer>>,
|
||||
pub overflow_cells: Vec<OverflowCell>,
|
||||
@@ -373,6 +382,7 @@ impl Clone for PageContent {
|
||||
}
|
||||
}
|
||||
|
||||
const CELL_POINTER_SIZE_BYTES: usize = 2;
|
||||
impl PageContent {
|
||||
pub fn new(offset: usize, buffer: Arc<RefCell<Buffer>>) -> Self {
|
||||
Self {
|
||||
@@ -383,7 +393,7 @@ impl PageContent {
|
||||
}
|
||||
|
||||
pub fn page_type(&self) -> PageType {
|
||||
self.read_u8(0).try_into().unwrap()
|
||||
self.read_u8(BTREE_PAGE_TYPE).try_into().unwrap()
|
||||
}
|
||||
|
||||
pub fn maybe_page_type(&self) -> Option<PageType> {
|
||||
@@ -452,19 +462,14 @@ impl PageContent {
|
||||
buf[self.offset + pos..self.offset + pos + 4].copy_from_slice(&value.to_be_bytes());
|
||||
}
|
||||
|
||||
/// The second field of the b-tree page header is the offset of the first freeblock, or zero if there are no freeblocks on the page.
|
||||
/// A freeblock is a structure used to identify unallocated space within a b-tree page.
|
||||
/// Freeblocks are organized as a chain.
|
||||
///
|
||||
/// To be clear, freeblocks do not mean the regular unallocated free space to the left of the cell content area pointer, but instead
|
||||
/// blocks of at least 4 bytes WITHIN the cell content area that are not in use due to e.g. deletions.
|
||||
/// The offset of the first freeblock, or zero if there are no freeblocks on the page.
|
||||
pub fn first_freeblock(&self) -> u16 {
|
||||
self.read_u16(1)
|
||||
self.read_u16(BTREE_FIRST_FREEBLOCK)
|
||||
}
|
||||
|
||||
/// The number of cells on the page.
|
||||
pub fn cell_count(&self) -> usize {
|
||||
self.read_u16(3) as usize
|
||||
self.read_u16(BTREE_CELL_COUNT) as usize
|
||||
}
|
||||
|
||||
/// The size of the cell pointer array in bytes.
|
||||
@@ -486,11 +491,13 @@ impl PageContent {
|
||||
}
|
||||
|
||||
/// The start of the cell content area.
|
||||
/// SQLite strives to place cells as far toward the end of the b-tree page as it can,
|
||||
/// in order to leave space for future growth of the cell pointer array.
|
||||
/// = the cell content area pointer moves leftward as cells are added to the page
|
||||
pub fn cell_content_area(&self) -> u16 {
|
||||
self.read_u16(5)
|
||||
pub fn cell_content_area(&self) -> u32 {
|
||||
let offset = self.read_u16(BTREE_CELL_CONTENT_AREA);
|
||||
if offset == 0 {
|
||||
MAX_PAGE_SIZE
|
||||
} else {
|
||||
offset as u32
|
||||
}
|
||||
}
|
||||
|
||||
/// The size of the page header in bytes.
|
||||
@@ -504,16 +511,15 @@ impl PageContent {
|
||||
}
|
||||
}
|
||||
|
||||
/// The total number of bytes in all fragments is stored in the fifth field of the b-tree page header.
|
||||
/// Fragments are isolated groups of 1, 2, or 3 unused bytes within the cell content area.
|
||||
/// The total number of bytes in all fragments
|
||||
pub fn num_frag_free_bytes(&self) -> u8 {
|
||||
self.read_u8(7)
|
||||
self.read_u8(BTREE_FRAGMENTED_BYTES_COUNT)
|
||||
}
|
||||
|
||||
pub fn rightmost_pointer(&self) -> Option<u32> {
|
||||
match self.page_type() {
|
||||
PageType::IndexInterior => Some(self.read_u32(8)),
|
||||
PageType::TableInterior => Some(self.read_u32(8)),
|
||||
PageType::IndexInterior => Some(self.read_u32(BTREE_RIGHTMOST_PTR)),
|
||||
PageType::TableInterior => Some(self.read_u32(BTREE_RIGHTMOST_PTR)),
|
||||
PageType::IndexLeaf => None,
|
||||
PageType::TableLeaf => None,
|
||||
}
|
||||
@@ -521,48 +527,35 @@ impl PageContent {
|
||||
|
||||
pub fn rightmost_pointer_raw(&self) -> Option<*mut u8> {
|
||||
match self.page_type() {
|
||||
PageType::IndexInterior | PageType::TableInterior => {
|
||||
Some(unsafe { self.as_ptr().as_mut_ptr().add(self.offset + 8) })
|
||||
}
|
||||
PageType::IndexInterior | PageType::TableInterior => Some(unsafe {
|
||||
self.as_ptr()
|
||||
.as_mut_ptr()
|
||||
.add(self.offset + BTREE_RIGHTMOST_PTR)
|
||||
}),
|
||||
PageType::IndexLeaf => None,
|
||||
PageType::TableLeaf => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cell_get(
|
||||
&self,
|
||||
idx: usize,
|
||||
payload_overflow_threshold_max: usize,
|
||||
payload_overflow_threshold_min: usize,
|
||||
usable_size: usize,
|
||||
) -> Result<BTreeCell> {
|
||||
pub fn cell_get(&self, idx: usize, usable_size: usize) -> Result<BTreeCell> {
|
||||
tracing::trace!("cell_get(idx={})", idx);
|
||||
let buf = self.as_ptr();
|
||||
|
||||
let ncells = self.cell_count();
|
||||
// the page header is 12 bytes for interior pages, 8 bytes for leaf pages
|
||||
// this is because the 4 last bytes in the interior page's header are used for the rightmost pointer.
|
||||
let cell_pointer_array_start = self.header_size();
|
||||
assert!(
|
||||
idx < ncells,
|
||||
"cell_get: idx out of bounds: idx={}, ncells={}",
|
||||
idx,
|
||||
ncells
|
||||
);
|
||||
let cell_pointer = cell_pointer_array_start + (idx * 2);
|
||||
let cell_pointer_array_start = self.header_size();
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_POINTER_SIZE_BYTES);
|
||||
let cell_pointer = self.read_u16(cell_pointer) as usize;
|
||||
|
||||
// SAFETY: this buffer is valid as long as the page is alive. We could store the page in the cell and do some lifetime magic
|
||||
// but that is extra memory for no reason at all. Just be careful like in the old times :).
|
||||
let static_buf: &'static [u8] = unsafe { std::mem::transmute::<&[u8], &'static [u8]>(buf) };
|
||||
read_btree_cell(
|
||||
static_buf,
|
||||
&self.page_type(),
|
||||
cell_pointer,
|
||||
payload_overflow_threshold_max,
|
||||
payload_overflow_threshold_min,
|
||||
usable_size,
|
||||
)
|
||||
read_btree_cell(static_buf, self, cell_pointer, usable_size)
|
||||
}
|
||||
|
||||
/// Read the rowid of a table interior cell.
|
||||
@@ -570,30 +563,31 @@ impl PageContent {
|
||||
pub fn cell_table_interior_read_rowid(&self, idx: usize) -> Result<i64> {
|
||||
debug_assert!(self.page_type() == PageType::TableInterior);
|
||||
let buf = self.as_ptr();
|
||||
const INTERIOR_PAGE_HEADER_SIZE_BYTES: usize = 12;
|
||||
let cell_pointer_array_start = INTERIOR_PAGE_HEADER_SIZE_BYTES;
|
||||
let cell_pointer = cell_pointer_array_start + (idx * 2);
|
||||
let cell_pointer_array_start = self.header_size();
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_POINTER_SIZE_BYTES);
|
||||
let cell_pointer = self.read_u16(cell_pointer) as usize;
|
||||
const LEFT_CHILD_PAGE_SIZE_BYTES: usize = 4;
|
||||
let (rowid, _) = read_varint(&buf[cell_pointer + LEFT_CHILD_PAGE_SIZE_BYTES..])?;
|
||||
Ok(rowid as i64)
|
||||
}
|
||||
|
||||
/// Read the left child page of a table interior cell.
|
||||
/// Read the left child page of a table interior cell or an index interior cell.
|
||||
#[inline(always)]
|
||||
pub fn cell_table_interior_read_left_child_page(&self, idx: usize) -> Result<u32> {
|
||||
debug_assert!(self.page_type() == PageType::TableInterior);
|
||||
pub fn cell_interior_read_left_child_page(&self, idx: usize) -> u32 {
|
||||
debug_assert!(
|
||||
self.page_type() == PageType::TableInterior
|
||||
|| self.page_type() == PageType::IndexInterior
|
||||
);
|
||||
let buf = self.as_ptr();
|
||||
const INTERIOR_PAGE_HEADER_SIZE_BYTES: usize = 12;
|
||||
let cell_pointer_array_start = INTERIOR_PAGE_HEADER_SIZE_BYTES;
|
||||
let cell_pointer = cell_pointer_array_start + (idx * 2);
|
||||
let cell_pointer_array_start = self.header_size();
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_POINTER_SIZE_BYTES);
|
||||
let cell_pointer = self.read_u16(cell_pointer) as usize;
|
||||
Ok(u32::from_be_bytes([
|
||||
u32::from_be_bytes([
|
||||
buf[cell_pointer],
|
||||
buf[cell_pointer + 1],
|
||||
buf[cell_pointer + 2],
|
||||
buf[cell_pointer + 3],
|
||||
]))
|
||||
])
|
||||
}
|
||||
|
||||
/// Read the rowid of a table leaf cell.
|
||||
@@ -601,9 +595,8 @@ impl PageContent {
|
||||
pub fn cell_table_leaf_read_rowid(&self, idx: usize) -> Result<i64> {
|
||||
debug_assert!(self.page_type() == PageType::TableLeaf);
|
||||
let buf = self.as_ptr();
|
||||
const LEAF_PAGE_HEADER_SIZE_BYTES: usize = 8;
|
||||
let cell_pointer_array_start = LEAF_PAGE_HEADER_SIZE_BYTES;
|
||||
let cell_pointer = cell_pointer_array_start + (idx * 2);
|
||||
let cell_pointer_array_start = self.header_size();
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_POINTER_SIZE_BYTES);
|
||||
let cell_pointer = self.read_u16(cell_pointer) as usize;
|
||||
let mut pos = cell_pointer;
|
||||
let (_, nr) = read_varint(&buf[pos..])?;
|
||||
@@ -623,21 +616,19 @@ impl PageContent {
|
||||
(self.offset + header_size, self.cell_pointer_array_size())
|
||||
}
|
||||
|
||||
/// Get region of a cell's payload
|
||||
pub fn cell_get_raw_region(
|
||||
&self,
|
||||
idx: usize,
|
||||
payload_overflow_threshold_max: usize,
|
||||
payload_overflow_threshold_min: usize,
|
||||
usable_size: usize,
|
||||
) -> (usize, usize) {
|
||||
/// Get region(start end length) of a cell's payload
|
||||
pub fn cell_get_raw_region(&self, idx: usize, usable_size: usize) -> (usize, usize) {
|
||||
let buf = self.as_ptr();
|
||||
let ncells = self.cell_count();
|
||||
let (cell_pointer_array_start, _) = self.cell_pointer_array_offset_and_size();
|
||||
assert!(idx < ncells, "cell_get: idx out of bounds");
|
||||
let cell_pointer = cell_pointer_array_start + (idx * 2); // pointers are 2 bytes each
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_POINTER_SIZE_BYTES);
|
||||
let cell_pointer = self.read_u16_no_offset(cell_pointer) as usize;
|
||||
let start = cell_pointer;
|
||||
let payload_overflow_threshold_max =
|
||||
payload_overflow_threshold_max(self.page_type(), usable_size as u16);
|
||||
let payload_overflow_threshold_min =
|
||||
payload_overflow_threshold_min(self.page_type(), usable_size as u16);
|
||||
let len = match self.page_type() {
|
||||
PageType::IndexInterior => {
|
||||
let (len_payload, n_payload) = read_varint(&buf[cell_pointer + 4..]).unwrap();
|
||||
@@ -668,7 +659,11 @@ impl PageContent {
|
||||
if overflows {
|
||||
to_read + n_payload
|
||||
} else {
|
||||
len_payload as usize + n_payload
|
||||
let mut size = len_payload as usize + n_payload;
|
||||
if size < MINIMUM_CELL_SIZE {
|
||||
size = MINIMUM_CELL_SIZE;
|
||||
}
|
||||
size
|
||||
}
|
||||
}
|
||||
PageType::TableLeaf => {
|
||||
@@ -683,7 +678,11 @@ impl PageContent {
|
||||
if overflows {
|
||||
to_read + n_payload + n_rowid
|
||||
} else {
|
||||
len_payload as usize + n_payload + n_rowid
|
||||
let mut size = len_payload as usize + n_payload + n_rowid;
|
||||
if size < MINIMUM_CELL_SIZE {
|
||||
size = MINIMUM_CELL_SIZE;
|
||||
}
|
||||
size
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -727,6 +726,7 @@ impl PageContent {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn begin_read_page(
|
||||
db_file: Arc<dyn DatabaseStorage>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
@@ -773,6 +773,7 @@ pub fn finish_read_page(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn begin_write_btree_page(
|
||||
pager: &Pager,
|
||||
page: &PageRef,
|
||||
@@ -791,13 +792,14 @@ pub fn begin_write_btree_page(
|
||||
};
|
||||
|
||||
*write_counter.borrow_mut() += 1;
|
||||
let clone_counter = write_counter.clone();
|
||||
let write_complete = {
|
||||
let buf_copy = buffer.clone();
|
||||
Box::new(move |bytes_written: i32| {
|
||||
tracing::trace!("finish_write_btree_page");
|
||||
let buf_copy = buf_copy.clone();
|
||||
let buf_len = buf_copy.borrow().len();
|
||||
*write_counter.borrow_mut() -= 1;
|
||||
*clone_counter.borrow_mut() -= 1;
|
||||
|
||||
page_finish.clear_dirty();
|
||||
if bytes_written < buf_len as i32 {
|
||||
@@ -806,10 +808,15 @@ pub fn begin_write_btree_page(
|
||||
})
|
||||
};
|
||||
let c = Completion::new(CompletionType::Write(WriteCompletion::new(write_complete)));
|
||||
page_source.write_page(page_id, buffer.clone(), c)?;
|
||||
Ok(())
|
||||
let res = page_source.write_page(page_id, buffer.clone(), c);
|
||||
if res.is_err() {
|
||||
// Avoid infinite loop if write page fails
|
||||
*write_counter.borrow_mut() -= 1;
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn begin_sync(db_file: Arc<dyn DatabaseStorage>, syncing: Rc<RefCell<bool>>) -> Result<()> {
|
||||
assert!(!*syncing.borrow());
|
||||
*syncing.borrow_mut() = true;
|
||||
@@ -834,15 +841,15 @@ pub enum BTreeCell {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TableInteriorCell {
|
||||
pub _left_child_page: u32,
|
||||
pub _rowid: i64,
|
||||
pub left_child_page: u32,
|
||||
pub rowid: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TableLeafCell {
|
||||
pub _rowid: i64,
|
||||
pub rowid: i64,
|
||||
/// Payload of cell, if it overflows it won't include overflowed payload.
|
||||
pub _payload: &'static [u8],
|
||||
pub payload: &'static [u8],
|
||||
/// This is the complete payload size including overflow pages.
|
||||
pub payload_size: u64,
|
||||
pub first_overflow_page: Option<u32>,
|
||||
@@ -860,21 +867,22 @@ pub struct IndexInteriorCell {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IndexLeafCell {
|
||||
pub payload: &'static [u8],
|
||||
pub first_overflow_page: Option<u32>,
|
||||
/// This is the complete payload size including overflow pages.
|
||||
pub payload_size: u64,
|
||||
pub first_overflow_page: Option<u32>,
|
||||
}
|
||||
|
||||
/// read_btree_cell contructs a BTreeCell which is basically a wrapper around pointer to the payload of a cell.
|
||||
/// buffer input "page" is static because we want the cell to point to the data in the page in case it has any payload.
|
||||
pub fn read_btree_cell(
|
||||
page: &'static [u8],
|
||||
page_type: &PageType,
|
||||
page_content: &PageContent,
|
||||
pos: usize,
|
||||
max_local: usize,
|
||||
min_local: usize,
|
||||
usable_size: usize,
|
||||
) -> Result<BTreeCell> {
|
||||
let page_type = page_content.page_type();
|
||||
let max_local = payload_overflow_threshold_max(page_type, usable_size as u16);
|
||||
let min_local = payload_overflow_threshold_min(page_type, usable_size as u16);
|
||||
match page_type {
|
||||
PageType::IndexInterior => {
|
||||
let mut pos = pos;
|
||||
@@ -904,8 +912,8 @@ pub fn read_btree_cell(
|
||||
pos += 4;
|
||||
let (rowid, _) = read_varint(&page[pos..])?;
|
||||
Ok(BTreeCell::TableInteriorCell(TableInteriorCell {
|
||||
_left_child_page: left_child_page,
|
||||
_rowid: rowid as i64,
|
||||
left_child_page,
|
||||
rowid: rowid as i64,
|
||||
}))
|
||||
}
|
||||
PageType::IndexLeaf => {
|
||||
@@ -939,8 +947,8 @@ pub fn read_btree_cell(
|
||||
let (payload, first_overflow_page) =
|
||||
read_payload(&page[pos..pos + to_read], payload_size as usize);
|
||||
Ok(BTreeCell::TableLeafCell(TableLeafCell {
|
||||
_rowid: rowid as i64,
|
||||
_payload: payload,
|
||||
rowid: rowid as i64,
|
||||
payload,
|
||||
first_overflow_page,
|
||||
payload_size,
|
||||
}))
|
||||
@@ -1312,6 +1320,7 @@ pub fn read_entire_wal_dumb(file: &Arc<dyn File>) -> Result<Arc<UnsafeCell<WalFi
|
||||
],
|
||||
write_lock: LimboRwLock::new(),
|
||||
loaded: AtomicBool::new(false),
|
||||
checkpoint_lock: LimboRwLock::new(),
|
||||
}));
|
||||
let wal_file_shared_for_completion = wal_file_shared_ret.clone();
|
||||
|
||||
@@ -1336,6 +1345,7 @@ pub fn read_entire_wal_dumb(file: &Arc<dyn File>) -> Result<Arc<UnsafeCell<WalFi
|
||||
u32::from_be_bytes([buf_slice[24], buf_slice[25], buf_slice[26], buf_slice[27]]);
|
||||
header_locked.checksum_2 =
|
||||
u32::from_be_bytes([buf_slice[28], buf_slice[29], buf_slice[30], buf_slice[31]]);
|
||||
tracing::debug!("read_entire_wal_dumb(header={:?})", *header_locked);
|
||||
|
||||
// Read frames into frame_cache and pages_in_frames
|
||||
if buf_slice.len() < WAL_HEADER_SIZE {
|
||||
@@ -1418,6 +1428,13 @@ pub fn read_entire_wal_dumb(file: &Arc<dyn File>) -> Result<Arc<UnsafeCell<WalFi
|
||||
use_native_endian_checksum,
|
||||
);
|
||||
|
||||
tracing::debug!(
|
||||
"read_entire_wal_dumb(frame_h_checksum=({}, {}), calculated_frame_checksum=({}, {}))",
|
||||
frame_h_checksum_1,
|
||||
frame_h_checksum_2,
|
||||
calculated_frame_checksum.0,
|
||||
calculated_frame_checksum.1
|
||||
);
|
||||
if calculated_frame_checksum != (frame_h_checksum_1, frame_h_checksum_2) {
|
||||
panic!(
|
||||
"WAL frame checksum mismatch. Expected ({}, {}), Got ({}, {})",
|
||||
@@ -1444,13 +1461,13 @@ pub fn read_entire_wal_dumb(file: &Arc<dyn File>) -> Result<Arc<UnsafeCell<WalFi
|
||||
let is_commit_record = frame_h_db_size > 0;
|
||||
if is_commit_record {
|
||||
wfs_data.max_frame.store(frame_idx, Ordering::SeqCst);
|
||||
wfs_data.last_checksum = cumulative_checksum;
|
||||
}
|
||||
|
||||
frame_idx += 1;
|
||||
current_offset += WAL_FRAME_HEADER_SIZE + page_size;
|
||||
}
|
||||
|
||||
wfs_data.last_checksum = cumulative_checksum;
|
||||
wfs_data.loaded.store(true, Ordering::SeqCst);
|
||||
});
|
||||
let c = Completion::new(CompletionType::Read(ReadCompletion::new(
|
||||
@@ -1481,7 +1498,7 @@ pub fn begin_read_wal_frame(
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
#[instrument(skip(io, page, write_counter, wal_header, checksums), level = Level::TRACE)]
|
||||
#[instrument(err,skip(io, page, write_counter, wal_header, checksums), level = Level::INFO)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn begin_write_wal_frame(
|
||||
io: &Arc<dyn File>,
|
||||
@@ -1540,6 +1557,11 @@ pub fn begin_write_wal_frame(
|
||||
);
|
||||
header.checksum_1 = final_checksum.0;
|
||||
header.checksum_2 = final_checksum.1;
|
||||
tracing::trace!(
|
||||
"begin_write_wal_frame(checksum=({}, {}))",
|
||||
header.checksum_1,
|
||||
header.checksum_2
|
||||
);
|
||||
|
||||
buf[16..20].copy_from_slice(&header.checksum_1.to_be_bytes());
|
||||
buf[20..24].copy_from_slice(&header.checksum_2.to_be_bytes());
|
||||
@@ -1548,13 +1570,14 @@ pub fn begin_write_wal_frame(
|
||||
(Arc::new(RefCell::new(buffer)), final_checksum)
|
||||
};
|
||||
|
||||
let clone_counter = write_counter.clone();
|
||||
*write_counter.borrow_mut() += 1;
|
||||
let write_complete = {
|
||||
let buf_copy = buffer.clone();
|
||||
Box::new(move |bytes_written: i32| {
|
||||
let buf_copy = buf_copy.clone();
|
||||
let buf_len = buf_copy.borrow().len();
|
||||
*write_counter.borrow_mut() -= 1;
|
||||
*clone_counter.borrow_mut() -= 1;
|
||||
|
||||
page_finish.clear_dirty();
|
||||
if bytes_written < buf_len as i32 {
|
||||
@@ -1564,12 +1587,18 @@ pub fn begin_write_wal_frame(
|
||||
};
|
||||
#[allow(clippy::arc_with_non_send_sync)]
|
||||
let c = Completion::new(CompletionType::Write(WriteCompletion::new(write_complete)));
|
||||
io.pwrite(offset, buffer.clone(), c)?;
|
||||
let res = io.pwrite(offset, buffer.clone(), c);
|
||||
if res.is_err() {
|
||||
// If we do not reduce the counter here on error, we incur an infinite loop when cacheflushing
|
||||
*write_counter.borrow_mut() -= 1;
|
||||
}
|
||||
res?;
|
||||
tracing::trace!("Frame written and synced");
|
||||
Ok(checksums)
|
||||
}
|
||||
|
||||
pub fn begin_write_wal_header(io: &Arc<dyn File>, header: &WalHeader) -> Result<()> {
|
||||
tracing::trace!("begin_write_wal_header");
|
||||
let buffer = {
|
||||
let drop_fn = Rc::new(|_buf| {});
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ use crate::storage::sqlite3_ondisk::{
|
||||
begin_read_wal_frame, begin_write_wal_frame, finish_read_page, WAL_FRAME_HEADER_SIZE,
|
||||
WAL_HEADER_SIZE,
|
||||
};
|
||||
use crate::{Buffer, Result};
|
||||
use crate::{Buffer, LimboError, Result};
|
||||
use crate::{Completion, Page};
|
||||
|
||||
use self::sqlite3_ondisk::{checksum_wal, PageContent, WAL_MAGIC_BE, WAL_MAGIC_LE};
|
||||
@@ -479,6 +479,7 @@ pub struct WalFileShared {
|
||||
/// There is only one write allowed in WAL mode. This lock takes care of ensuring there is only
|
||||
/// one used.
|
||||
pub write_lock: LimboRwLock,
|
||||
pub checkpoint_lock: LimboRwLock,
|
||||
pub loaded: AtomicBool,
|
||||
}
|
||||
|
||||
@@ -499,6 +500,7 @@ impl fmt::Debug for WalFileShared {
|
||||
|
||||
impl Wal for WalFile {
|
||||
/// Begin a read transaction.
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn begin_read_tx(&mut self) -> Result<LimboResult> {
|
||||
let max_frame_in_wal = self.get_shared().max_frame.load(Ordering::SeqCst);
|
||||
|
||||
@@ -564,6 +566,7 @@ impl Wal for WalFile {
|
||||
|
||||
/// End a read transaction.
|
||||
#[inline(always)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn end_read_tx(&self) -> Result<LimboResult> {
|
||||
tracing::debug!("end_read_tx(lock={})", self.max_frame_read_lock_index);
|
||||
let read_lock = &mut self.get_shared().read_locks[self.max_frame_read_lock_index];
|
||||
@@ -572,6 +575,7 @@ impl Wal for WalFile {
|
||||
}
|
||||
|
||||
/// Begin a write transaction
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn begin_write_tx(&mut self) -> Result<LimboResult> {
|
||||
let busy = !self.get_shared().write_lock.write();
|
||||
tracing::debug!("begin_write_transaction(busy={})", busy);
|
||||
@@ -582,6 +586,7 @@ impl Wal for WalFile {
|
||||
}
|
||||
|
||||
/// End a write transaction
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn end_write_tx(&self) -> Result<LimboResult> {
|
||||
tracing::debug!("end_write_txn");
|
||||
self.get_shared().write_lock.unlock();
|
||||
@@ -589,6 +594,7 @@ impl Wal for WalFile {
|
||||
}
|
||||
|
||||
/// Find the latest frame containing a page.
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn find_frame(&self, page_id: u64) -> Result<Option<u64>> {
|
||||
let shared = self.get_shared();
|
||||
let frames = shared.frame_cache.lock();
|
||||
@@ -606,6 +612,7 @@ impl Wal for WalFile {
|
||||
}
|
||||
|
||||
/// Read a frame from the WAL.
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn read_frame(&self, frame_id: u64, page: PageRef, buffer_pool: Arc<BufferPool>) -> Result<()> {
|
||||
tracing::debug!("read_frame({})", frame_id);
|
||||
let offset = self.frame_offset(frame_id);
|
||||
@@ -624,6 +631,7 @@ impl Wal for WalFile {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn read_frame_raw(
|
||||
&self,
|
||||
frame_id: u64,
|
||||
@@ -650,6 +658,7 @@ impl Wal for WalFile {
|
||||
}
|
||||
|
||||
/// Write a frame to the WAL.
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn append_frame(
|
||||
&mut self,
|
||||
page: PageRef,
|
||||
@@ -660,12 +669,7 @@ impl Wal for WalFile {
|
||||
let max_frame = self.max_frame;
|
||||
let frame_id = if max_frame == 0 { 1 } else { max_frame + 1 };
|
||||
let offset = self.frame_offset(frame_id);
|
||||
tracing::debug!(
|
||||
"append_frame(frame={}, offset={}, page_id={})",
|
||||
frame_id,
|
||||
offset,
|
||||
page_id
|
||||
);
|
||||
tracing::debug!(frame_id, offset, page_id);
|
||||
let checksums = {
|
||||
let shared = self.get_shared();
|
||||
let header = shared.wal_header.clone();
|
||||
@@ -699,13 +703,14 @@ impl Wal for WalFile {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn should_checkpoint(&self) -> bool {
|
||||
let shared = self.get_shared();
|
||||
let frame_id = shared.max_frame.load(Ordering::SeqCst) as usize;
|
||||
frame_id >= self.checkpoint_threshold
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn checkpoint(
|
||||
&mut self,
|
||||
pager: &Pager,
|
||||
@@ -724,6 +729,10 @@ impl Wal for WalFile {
|
||||
// TODO(pere): check what frames are safe to checkpoint between many readers!
|
||||
self.ongoing_checkpoint.min_frame = self.min_frame;
|
||||
let shared = self.get_shared();
|
||||
let busy = !shared.checkpoint_lock.write();
|
||||
if busy {
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
let mut max_safe_frame = shared.max_frame.load(Ordering::SeqCst);
|
||||
for (read_lock_idx, read_lock) in shared.read_locks.iter_mut().enumerate() {
|
||||
let this_mark = read_lock.value.load(Ordering::SeqCst);
|
||||
@@ -747,8 +756,8 @@ impl Wal for WalFile {
|
||||
self.ongoing_checkpoint.state = CheckpointState::ReadFrame;
|
||||
tracing::trace!(
|
||||
"checkpoint_start(min_frame={}, max_frame={})",
|
||||
self.ongoing_checkpoint.min_frame,
|
||||
self.ongoing_checkpoint.max_frame,
|
||||
self.ongoing_checkpoint.min_frame
|
||||
);
|
||||
}
|
||||
CheckpointState::ReadFrame => {
|
||||
@@ -831,6 +840,7 @@ impl Wal for WalFile {
|
||||
return Ok(CheckpointStatus::IO);
|
||||
}
|
||||
let shared = self.get_shared();
|
||||
shared.checkpoint_lock.unlock();
|
||||
|
||||
// Record two num pages fields to return as checkpoint result to caller.
|
||||
// Ref: pnLog, pnCkpt on https://www.sqlite.org/c3ref/wal_checkpoint_v2.html
|
||||
@@ -869,7 +879,7 @@ impl Wal for WalFile {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::DEBUG)]
|
||||
#[instrument(err, skip_all, level = Level::INFO)]
|
||||
fn sync(&mut self) -> Result<WalFsyncStatus> {
|
||||
match self.sync_state.get() {
|
||||
SyncState::NotSyncing => {
|
||||
@@ -911,6 +921,7 @@ impl Wal for WalFile {
|
||||
self.min_frame
|
||||
}
|
||||
|
||||
#[instrument(err, skip_all, level = Level::INFO)]
|
||||
fn rollback(&mut self) -> Result<()> {
|
||||
// TODO(pere): have to remove things from frame_cache because they are no longer valid.
|
||||
// TODO(pere): clear page cache in pager.
|
||||
@@ -918,7 +929,7 @@ impl Wal for WalFile {
|
||||
// TODO(pere): implement proper hashmap, this sucks :).
|
||||
let shared = self.get_shared();
|
||||
let max_frame = shared.max_frame.load(Ordering::SeqCst);
|
||||
tracing::trace!("rollback(to_max_frame={})", max_frame);
|
||||
tracing::debug!(to_max_frame = max_frame);
|
||||
let mut frame_cache = shared.frame_cache.lock();
|
||||
for (_, frames) in frame_cache.iter_mut() {
|
||||
let mut last_valid_frame = frames.len();
|
||||
@@ -936,14 +947,11 @@ impl Wal for WalFile {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn finish_append_frames_commit(&mut self) -> Result<()> {
|
||||
let shared = self.get_shared();
|
||||
shared.max_frame.store(self.max_frame, Ordering::SeqCst);
|
||||
tracing::trace!(
|
||||
"finish_append_frames_commit(max_frame={}, last_checksum={:?})",
|
||||
self.max_frame,
|
||||
self.last_checksum
|
||||
);
|
||||
tracing::trace!(self.max_frame, ?self.last_checksum);
|
||||
shared.last_checksum = self.last_checksum;
|
||||
Ok(())
|
||||
}
|
||||
@@ -969,6 +977,7 @@ impl WalFile {
|
||||
}
|
||||
|
||||
let header = unsafe { shared.get().as_mut().unwrap().wal_header.lock() };
|
||||
let last_checksum = unsafe { (*shared.get()).last_checksum };
|
||||
Self {
|
||||
io,
|
||||
// default to max frame in WAL, so that when we read schema we can read from WAL too if it's there.
|
||||
@@ -987,7 +996,7 @@ impl WalFile {
|
||||
sync_state: Cell::new(SyncState::NotSyncing),
|
||||
min_frame: 0,
|
||||
max_frame_read_lock_index: 0,
|
||||
last_checksum: (0, 0),
|
||||
last_checksum,
|
||||
start_pages_in_frames: 0,
|
||||
header: *header,
|
||||
}
|
||||
@@ -1075,6 +1084,7 @@ impl WalFileShared {
|
||||
let checksum = header.lock();
|
||||
(checksum.checksum_1, checksum.checksum_2)
|
||||
};
|
||||
tracing::debug!("new_shared(header={:?})", header);
|
||||
let shared = WalFileShared {
|
||||
wal_header: header,
|
||||
min_frame: AtomicU64::new(0),
|
||||
@@ -1094,6 +1104,7 @@ impl WalFileShared {
|
||||
nreads: AtomicU32::new(0),
|
||||
value: AtomicU32::new(READMARK_NOT_USED),
|
||||
},
|
||||
checkpoint_lock: LimboRwLock::new(),
|
||||
loaded: AtomicBool::new(true),
|
||||
};
|
||||
Ok(Arc::new(UnsafeCell::new(shared)))
|
||||
|
||||
@@ -11,7 +11,7 @@ use turso_sqlite3_parser::ast::{CompoundOperator, SortOrder};
|
||||
|
||||
use tracing::Level;
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn emit_program_for_compound_select(
|
||||
program: &mut ProgramBuilder,
|
||||
plan: Plan,
|
||||
@@ -150,9 +150,9 @@ fn emit_compound_select(
|
||||
CompoundOperator::Union => {
|
||||
let mut new_dedupe_index = false;
|
||||
let dedupe_index = match right_most.query_destination {
|
||||
QueryDestination::EphemeralIndex { cursor_id, index } => {
|
||||
(cursor_id, index.clone())
|
||||
}
|
||||
QueryDestination::EphemeralIndex {
|
||||
cursor_id, index, ..
|
||||
} => (cursor_id, index.clone()),
|
||||
_ => {
|
||||
new_dedupe_index = true;
|
||||
create_dedupe_index(program, &right_most, schema)?
|
||||
@@ -161,6 +161,7 @@ fn emit_compound_select(
|
||||
plan.query_destination = QueryDestination::EphemeralIndex {
|
||||
cursor_id: dedupe_index.0,
|
||||
index: dedupe_index.1.clone(),
|
||||
is_delete: false,
|
||||
};
|
||||
let compound_select = Plan::CompoundSelect {
|
||||
left,
|
||||
@@ -182,20 +183,18 @@ fn emit_compound_select(
|
||||
right_most.query_destination = QueryDestination::EphemeralIndex {
|
||||
cursor_id: dedupe_index.0,
|
||||
index: dedupe_index.1.clone(),
|
||||
is_delete: false,
|
||||
};
|
||||
emit_query(program, &mut right_most, &mut right_most_ctx)?;
|
||||
|
||||
if new_dedupe_index {
|
||||
let label_jump_over_dedupe = program.allocate_label();
|
||||
read_deduplicated_union_rows(
|
||||
read_deduplicated_union_or_except_rows(
|
||||
program,
|
||||
dedupe_index.0,
|
||||
dedupe_index.1.as_ref(),
|
||||
limit_ctx,
|
||||
label_jump_over_dedupe,
|
||||
yield_reg,
|
||||
);
|
||||
program.preassign_label_to_next_insn(label_jump_over_dedupe);
|
||||
}
|
||||
}
|
||||
CompoundOperator::Intersect => {
|
||||
@@ -211,6 +210,7 @@ fn emit_compound_select(
|
||||
plan.query_destination = QueryDestination::EphemeralIndex {
|
||||
cursor_id: left_cursor_id,
|
||||
index: left_index.clone(),
|
||||
is_delete: false,
|
||||
};
|
||||
let compound_select = Plan::CompoundSelect {
|
||||
left,
|
||||
@@ -234,6 +234,7 @@ fn emit_compound_select(
|
||||
right_most.query_destination = QueryDestination::EphemeralIndex {
|
||||
cursor_id: right_cursor_id,
|
||||
index: right_index,
|
||||
is_delete: false,
|
||||
};
|
||||
emit_query(program, &mut right_most, &mut right_most_ctx)?;
|
||||
read_intersect_rows(
|
||||
@@ -246,8 +247,49 @@ fn emit_compound_select(
|
||||
yield_reg,
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
crate::bail_parse_error!("unimplemented compound select operator: {:?}", operator);
|
||||
CompoundOperator::Except => {
|
||||
let mut new_index = false;
|
||||
let (cursor_id, index) = match right_most.query_destination {
|
||||
QueryDestination::EphemeralIndex {
|
||||
cursor_id, index, ..
|
||||
} => (cursor_id, index),
|
||||
_ => {
|
||||
new_index = true;
|
||||
create_dedupe_index(program, &right_most, schema)?
|
||||
}
|
||||
};
|
||||
plan.query_destination = QueryDestination::EphemeralIndex {
|
||||
cursor_id,
|
||||
index: index.clone(),
|
||||
is_delete: false,
|
||||
};
|
||||
let compound_select = Plan::CompoundSelect {
|
||||
left,
|
||||
right_most: plan,
|
||||
limit,
|
||||
offset,
|
||||
order_by,
|
||||
};
|
||||
emit_compound_select(
|
||||
program,
|
||||
compound_select,
|
||||
schema,
|
||||
syms,
|
||||
None,
|
||||
yield_reg,
|
||||
reg_result_cols_start,
|
||||
)?;
|
||||
right_most.query_destination = QueryDestination::EphemeralIndex {
|
||||
cursor_id,
|
||||
index: index.clone(),
|
||||
is_delete: true,
|
||||
};
|
||||
emit_query(program, &mut right_most, &mut right_most_ctx)?;
|
||||
if new_index {
|
||||
read_deduplicated_union_or_except_rows(
|
||||
program, cursor_id, &index, limit_ctx, yield_reg,
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
None => {
|
||||
@@ -302,15 +344,16 @@ fn create_dedupe_index(
|
||||
Ok((cursor_id, dedupe_index.clone()))
|
||||
}
|
||||
|
||||
/// Emits the bytecode for reading deduplicated rows from the ephemeral index created for UNION operators.
|
||||
fn read_deduplicated_union_rows(
|
||||
/// Emits the bytecode for reading deduplicated rows from the ephemeral index created for
|
||||
/// UNION or EXCEPT operators.
|
||||
fn read_deduplicated_union_or_except_rows(
|
||||
program: &mut ProgramBuilder,
|
||||
dedupe_cursor_id: usize,
|
||||
dedupe_index: &Index,
|
||||
limit_ctx: Option<LimitCtx>,
|
||||
label_limit_reached: BranchOffset,
|
||||
yield_reg: Option<usize>,
|
||||
) {
|
||||
let label_close = program.allocate_label();
|
||||
let label_dedupe_next = program.allocate_label();
|
||||
let label_dedupe_loop_start = program.allocate_label();
|
||||
let dedupe_cols_start_reg = program.alloc_registers(dedupe_index.columns.len());
|
||||
@@ -348,7 +391,7 @@ fn read_deduplicated_union_rows(
|
||||
if let Some(limit_ctx) = limit_ctx {
|
||||
program.emit_insn(Insn::DecrJumpZero {
|
||||
reg: limit_ctx.reg_limit,
|
||||
target_pc: label_limit_reached,
|
||||
target_pc: label_close,
|
||||
})
|
||||
}
|
||||
program.preassign_label_to_next_insn(label_dedupe_next);
|
||||
@@ -356,6 +399,7 @@ fn read_deduplicated_union_rows(
|
||||
cursor_id: dedupe_cursor_id,
|
||||
pc_if_next: label_dedupe_loop_start,
|
||||
});
|
||||
program.preassign_label_to_next_insn(label_close);
|
||||
program.emit_insn(Insn::Close {
|
||||
cursor_id: dedupe_cursor_id,
|
||||
});
|
||||
|
||||
@@ -31,7 +31,7 @@ use crate::vdbe::builder::{CursorKey, CursorType, ProgramBuilder};
|
||||
use crate::vdbe::insn::{CmpInsFlags, IdxInsertFlags, InsertFlags, RegisterOrLiteral};
|
||||
use crate::vdbe::CursorID;
|
||||
use crate::vdbe::{insn::Insn, BranchOffset};
|
||||
use crate::{Result, SymbolTable};
|
||||
use crate::{bail_parse_error, Result, SymbolTable};
|
||||
|
||||
pub struct Resolver<'a> {
|
||||
pub schema: &'a Schema,
|
||||
@@ -149,6 +149,8 @@ pub struct TranslateCtx<'a> {
|
||||
/// - First: all `GROUP BY` expressions, in the order they appear in the `GROUP BY` clause.
|
||||
/// - Then: remaining non-aggregate expressions that are not part of `GROUP BY`.
|
||||
pub non_aggregate_expressions: Vec<(&'a Expr, bool)>,
|
||||
/// Cursor id for turso_cdc table (if capture_data_changes=on is set and query can modify the data)
|
||||
pub cdc_cursor_id: Option<usize>,
|
||||
}
|
||||
|
||||
impl<'a> TranslateCtx<'a> {
|
||||
@@ -175,6 +177,7 @@ impl<'a> TranslateCtx<'a> {
|
||||
result_columns_to_skip_in_orderby_sorter: None,
|
||||
resolver: Resolver::new(schema, syms),
|
||||
non_aggregate_expressions: Vec::new(),
|
||||
cdc_cursor_id: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -198,7 +201,7 @@ pub enum TransactionMode {
|
||||
|
||||
/// Main entry point for emitting bytecode for a SQL query
|
||||
/// Takes a query plan and generates the corresponding bytecode program
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn emit_program(
|
||||
program: &mut ProgramBuilder,
|
||||
plan: Plan,
|
||||
@@ -216,7 +219,7 @@ pub fn emit_program(
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn emit_program_for_select(
|
||||
program: &mut ProgramBuilder,
|
||||
mut plan: SelectPlan,
|
||||
@@ -255,7 +258,7 @@ fn emit_program_for_select(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn emit_query<'a>(
|
||||
program: &mut ProgramBuilder,
|
||||
plan: &'a mut SelectPlan,
|
||||
@@ -395,7 +398,7 @@ pub fn emit_query<'a>(
|
||||
Ok(t_ctx.reg_result_cols_start.unwrap())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn emit_program_for_delete(
|
||||
program: &mut ProgramBuilder,
|
||||
plan: DeletePlan,
|
||||
@@ -562,10 +565,27 @@ fn emit_delete_insns(
|
||||
start_reg,
|
||||
num_regs,
|
||||
cursor_id: index_cursor_id,
|
||||
raise_error_if_no_matching_entry: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(turso_cdc_cursor_id) = t_ctx.cdc_cursor_id {
|
||||
let rowid_reg = program.alloc_register();
|
||||
program.emit_insn(Insn::RowId {
|
||||
cursor_id: main_table_cursor_id,
|
||||
dest: rowid_reg,
|
||||
});
|
||||
emit_cdc_insns(
|
||||
program,
|
||||
&t_ctx.resolver,
|
||||
OperationMode::DELETE,
|
||||
turso_cdc_cursor_id,
|
||||
rowid_reg,
|
||||
table_reference.table.get_name(),
|
||||
)?;
|
||||
}
|
||||
|
||||
program.emit_insn(Insn::Delete {
|
||||
cursor_id: main_table_cursor_id,
|
||||
});
|
||||
@@ -580,7 +600,7 @@ fn emit_delete_insns(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn emit_program_for_update(
|
||||
program: &mut ProgramBuilder,
|
||||
mut plan: UpdatePlan,
|
||||
@@ -699,7 +719,7 @@ fn emit_program_for_update(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn emit_update_insns(
|
||||
plan: &UpdatePlan,
|
||||
t_ctx: &TranslateCtx,
|
||||
@@ -1064,6 +1084,7 @@ fn emit_update_insns(
|
||||
start_reg,
|
||||
num_regs,
|
||||
cursor_id: idx_cursor_id,
|
||||
raise_error_if_no_matching_entry: true,
|
||||
});
|
||||
|
||||
// Insert new index key (filled further above with values from set_clauses)
|
||||
@@ -1076,6 +1097,53 @@ fn emit_update_insns(
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(cdc_cursor_id) = t_ctx.cdc_cursor_id {
|
||||
let rowid_reg = program.alloc_register();
|
||||
if has_user_provided_rowid {
|
||||
program.emit_insn(Insn::RowId {
|
||||
cursor_id,
|
||||
dest: rowid_reg,
|
||||
});
|
||||
emit_cdc_insns(
|
||||
program,
|
||||
&t_ctx.resolver,
|
||||
OperationMode::DELETE,
|
||||
cdc_cursor_id,
|
||||
rowid_reg,
|
||||
table_ref.table.get_name(),
|
||||
)?;
|
||||
program.emit_insn(Insn::Copy {
|
||||
src_reg: rowid_set_clause_reg.expect(
|
||||
"rowid_set_clause_reg must be set because has_user_provided_rowid is true",
|
||||
),
|
||||
dst_reg: rowid_reg,
|
||||
amount: 1,
|
||||
});
|
||||
emit_cdc_insns(
|
||||
program,
|
||||
&t_ctx.resolver,
|
||||
OperationMode::INSERT,
|
||||
cdc_cursor_id,
|
||||
rowid_reg,
|
||||
table_ref.table.get_name(),
|
||||
)?;
|
||||
} else {
|
||||
program.emit_insn(Insn::Copy {
|
||||
src_reg: rowid_set_clause_reg.unwrap_or(beg),
|
||||
dst_reg: rowid_reg,
|
||||
amount: 1,
|
||||
});
|
||||
emit_cdc_insns(
|
||||
program,
|
||||
&t_ctx.resolver,
|
||||
OperationMode::UPDATE,
|
||||
cdc_cursor_id,
|
||||
rowid_reg,
|
||||
table_ref.table.get_name(),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
// If we are updating the rowid, we cannot rely on overwrite on the
|
||||
// Insert instruction to update the cell. We need to first delete the current cell
|
||||
// and later insert the updated record
|
||||
@@ -1115,6 +1183,79 @@ fn emit_update_insns(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn emit_cdc_insns(
|
||||
program: &mut ProgramBuilder,
|
||||
resolver: &Resolver,
|
||||
operation_mode: OperationMode,
|
||||
cdc_cursor_id: usize,
|
||||
rowid_reg: usize,
|
||||
table_name: &str,
|
||||
) -> Result<()> {
|
||||
// (operation_id INTEGER PRIMARY KEY AUTOINCREMENT, operation_time INTEGER, operation_type INTEGER, table_name TEXT, id)
|
||||
let turso_cdc_registers = program.alloc_registers(5);
|
||||
program.emit_insn(Insn::Null {
|
||||
dest: turso_cdc_registers,
|
||||
dest_end: None,
|
||||
});
|
||||
program.mark_last_insn_constant();
|
||||
|
||||
let Some(unixepoch_fn) = resolver.resolve_function("unixepoch", 0) else {
|
||||
bail_parse_error!("no function {}", "unixepoch");
|
||||
};
|
||||
let unixepoch_fn_ctx = crate::function::FuncCtx {
|
||||
func: unixepoch_fn,
|
||||
arg_count: 0,
|
||||
};
|
||||
|
||||
program.emit_insn(Insn::Function {
|
||||
constant_mask: 0,
|
||||
start_reg: 0,
|
||||
dest: turso_cdc_registers + 1,
|
||||
func: unixepoch_fn_ctx,
|
||||
});
|
||||
|
||||
let operation_type = match operation_mode {
|
||||
OperationMode::INSERT => 1,
|
||||
OperationMode::UPDATE | OperationMode::SELECT => 0,
|
||||
OperationMode::DELETE => -1,
|
||||
};
|
||||
program.emit_int(operation_type, turso_cdc_registers + 2);
|
||||
program.mark_last_insn_constant();
|
||||
|
||||
program.emit_string8(table_name.to_string(), turso_cdc_registers + 3);
|
||||
program.mark_last_insn_constant();
|
||||
|
||||
program.emit_insn(Insn::Copy {
|
||||
src_reg: rowid_reg,
|
||||
dst_reg: turso_cdc_registers + 4,
|
||||
amount: 0,
|
||||
});
|
||||
|
||||
let rowid_reg = program.alloc_register();
|
||||
program.emit_insn(Insn::NewRowid {
|
||||
cursor: cdc_cursor_id,
|
||||
rowid_reg,
|
||||
prev_largest_reg: 0, // todo(sivukhin): properly set value here from sqlite_sequence table when AUTOINCREMENT will be properly implemented in Turso
|
||||
});
|
||||
|
||||
let record_reg = program.alloc_register();
|
||||
program.emit_insn(Insn::MakeRecord {
|
||||
start_reg: turso_cdc_registers,
|
||||
count: 5,
|
||||
dest_reg: record_reg,
|
||||
index_name: None,
|
||||
});
|
||||
|
||||
program.emit_insn(Insn::Insert {
|
||||
cursor: cdc_cursor_id,
|
||||
key_reg: rowid_reg,
|
||||
record_reg,
|
||||
flag: InsertFlags::new(),
|
||||
table_name: "".to_string(),
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize the limit/offset counters and registers.
|
||||
/// In case of compound SELECTs, the limit counter is initialized only once,
|
||||
/// hence [LimitCtx::initialize_counter] being false in those cases.
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::function::JsonFunc;
|
||||
use crate::function::{Func, FuncCtx, MathFuncArity, ScalarFunc, VectorFunc};
|
||||
use crate::functions::datetime;
|
||||
use crate::schema::{Affinity, Table, Type};
|
||||
use crate::util::{exprs_are_equivalent, normalize_ident, parse_numeric_literal};
|
||||
use crate::util::{exprs_are_equivalent, parse_numeric_literal};
|
||||
use crate::vdbe::builder::CursorKey;
|
||||
use crate::vdbe::{
|
||||
builder::ProgramBuilder,
|
||||
@@ -27,7 +27,7 @@ pub struct ConditionMetadata {
|
||||
pub jump_target_when_false: BranchOffset,
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
fn emit_cond_jump(program: &mut ProgramBuilder, cond_meta: ConditionMetadata, reg: usize) {
|
||||
if cond_meta.jump_if_condition_is_true {
|
||||
program.emit_insn(Insn::If {
|
||||
@@ -131,7 +131,7 @@ macro_rules! expect_arguments_even {
|
||||
}};
|
||||
}
|
||||
|
||||
#[instrument(skip(program, referenced_tables, expr, resolver), level = Level::TRACE)]
|
||||
#[instrument(skip(program, referenced_tables, expr, resolver), level = Level::INFO)]
|
||||
pub fn translate_condition_expr(
|
||||
program: &mut ProgramBuilder,
|
||||
referenced_tables: &TableReferences,
|
||||
@@ -680,8 +680,7 @@ pub fn translate_expr(
|
||||
order_by: _,
|
||||
} => {
|
||||
let args_count = if let Some(args) = args { args.len() } else { 0 };
|
||||
let func_name = normalize_ident(name.0.as_str());
|
||||
let func_type = resolver.resolve_function(&func_name, args_count);
|
||||
let func_type = resolver.resolve_function(&name.0, args_count);
|
||||
|
||||
if func_type.is_none() {
|
||||
crate::bail_parse_error!("unknown function {}", name.0);
|
||||
@@ -694,7 +693,7 @@ pub fn translate_expr(
|
||||
|
||||
match &func_ctx.func {
|
||||
Func::Agg(_) => {
|
||||
crate::bail_parse_error!("aggregation function in non-aggregation context")
|
||||
crate::bail_parse_error!("misuse of aggregate function {}()", name.0)
|
||||
}
|
||||
Func::External(_) => {
|
||||
let regs = program.alloc_registers(args_count);
|
||||
|
||||
@@ -6,6 +6,7 @@ use turso_sqlite3_parser::ast::{
|
||||
|
||||
use crate::error::{SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PRIMARYKEY};
|
||||
use crate::schema::{IndexColumn, Table};
|
||||
use crate::translate::emitter::{emit_cdc_insns, OperationMode};
|
||||
use crate::util::normalize_ident;
|
||||
use crate::vdbe::builder::ProgramBuilderOpts;
|
||||
use crate::vdbe::insn::{IdxInsertFlags, InsertFlags, RegisterOrLiteral};
|
||||
@@ -116,6 +117,26 @@ pub fn translate_insert(
|
||||
let halt_label = program.allocate_label();
|
||||
let loop_start_label = program.allocate_label();
|
||||
|
||||
let cdc_table = program.capture_data_changes_mode().table();
|
||||
let cdc_table = if let Some(cdc_table) = cdc_table {
|
||||
if table.get_name() != cdc_table {
|
||||
let Some(turso_cdc_table) = schema.get_table(cdc_table) else {
|
||||
crate::bail_parse_error!("no such table: {}", cdc_table);
|
||||
};
|
||||
let Some(cdc_btree) = turso_cdc_table.btree().clone() else {
|
||||
crate::bail_parse_error!("no such table: {}", cdc_table);
|
||||
};
|
||||
Some((
|
||||
program.alloc_cursor_id(CursorType::BTreeTable(cdc_btree.clone())),
|
||||
cdc_btree,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut yield_reg_opt = None;
|
||||
let mut temp_table_ctx = None;
|
||||
let (num_values, cursor_id) = match body {
|
||||
@@ -328,6 +349,15 @@ pub fn translate_insert(
|
||||
&resolver,
|
||||
)?;
|
||||
}
|
||||
// Open turso_cdc table btree for writing if necessary
|
||||
if let Some((cdc_cursor_id, cdc_btree)) = &cdc_table {
|
||||
program.emit_insn(Insn::OpenWrite {
|
||||
cursor_id: *cdc_cursor_id,
|
||||
root_page: cdc_btree.root_page.into(),
|
||||
name: cdc_btree.name.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
// Open all the index btrees for writing
|
||||
for idx_cursor in idx_cursors.iter() {
|
||||
program.emit_insn(Insn::OpenWrite {
|
||||
@@ -414,6 +444,18 @@ pub fn translate_insert(
|
||||
_ => (),
|
||||
}
|
||||
|
||||
// Write record to the turso_cdc table if necessary
|
||||
if let Some((cdc_cursor_id, _)) = &cdc_table {
|
||||
emit_cdc_insns(
|
||||
&mut program,
|
||||
&resolver,
|
||||
OperationMode::INSERT,
|
||||
*cdc_cursor_id,
|
||||
rowid_reg,
|
||||
&table_name.0,
|
||||
)?;
|
||||
}
|
||||
|
||||
let index_col_mappings = resolve_indicies_for_insert(schema, table.as_ref(), &column_mappings)?;
|
||||
for index_col_mapping in index_col_mappings {
|
||||
// find which cursor we opened earlier for this index
|
||||
|
||||
@@ -117,6 +117,33 @@ pub fn init_loop(
|
||||
t_ctx.meta_left_joins.len() == tables.joined_tables().len(),
|
||||
"meta_left_joins length does not match tables length"
|
||||
);
|
||||
|
||||
let cdc_table = program.capture_data_changes_mode().table();
|
||||
if cdc_table.is_some()
|
||||
&& matches!(
|
||||
mode,
|
||||
OperationMode::INSERT | OperationMode::UPDATE | OperationMode::DELETE
|
||||
)
|
||||
{
|
||||
assert!(tables.joined_tables().len() == 1);
|
||||
let cdc_table_name = cdc_table.unwrap();
|
||||
if tables.joined_tables()[0].table.get_name() != cdc_table_name {
|
||||
let Some(cdc_table) = t_ctx.resolver.schema.get_table(cdc_table_name) else {
|
||||
crate::bail_parse_error!("no such table: {}", cdc_table_name);
|
||||
};
|
||||
let Some(cdc_btree) = cdc_table.btree().clone() else {
|
||||
crate::bail_parse_error!("no such table: {}", cdc_table_name);
|
||||
};
|
||||
let cdc_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(cdc_btree.clone()));
|
||||
program.emit_insn(Insn::OpenWrite {
|
||||
cursor_id: cdc_cursor_id,
|
||||
root_page: cdc_btree.root_page.into(),
|
||||
name: cdc_btree.name.clone(),
|
||||
});
|
||||
t_ctx.cdc_cursor_id = Some(cdc_cursor_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize ephemeral indexes for distinct aggregates
|
||||
for (i, agg) in aggregates
|
||||
.iter_mut()
|
||||
|
||||
@@ -53,7 +53,7 @@ use transaction::{translate_tx_begin, translate_tx_commit};
|
||||
use turso_sqlite3_parser::ast::{self, Delete, Insert};
|
||||
use update::translate_update;
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn translate(
|
||||
schema: &Schema,
|
||||
@@ -75,6 +75,7 @@ pub fn translate(
|
||||
|
||||
let mut program = ProgramBuilder::new(
|
||||
query_mode,
|
||||
connection.get_capture_data_changes().clone(),
|
||||
// These options will be extended whithin each translate program
|
||||
ProgramBuilderOpts {
|
||||
num_cursors: 1,
|
||||
|
||||
@@ -324,6 +324,8 @@ pub enum QueryDestination {
|
||||
cursor_id: CursorID,
|
||||
/// The index that will be used to store the results.
|
||||
index: Arc<Index>,
|
||||
/// Whether this is a delete operation that will remove the index entries
|
||||
is_delete: bool,
|
||||
},
|
||||
/// The results of the query are stored in an ephemeral table,
|
||||
/// later used by the parent query.
|
||||
|
||||
@@ -51,8 +51,7 @@ pub fn resolve_aggregates(
|
||||
} else {
|
||||
0
|
||||
};
|
||||
match Func::resolve_function(normalize_ident(name.0.as_str()).as_str(), args_count)
|
||||
{
|
||||
match Func::resolve_function(&name.0, args_count) {
|
||||
Ok(Func::Agg(f)) => {
|
||||
let distinctness = Distinctness::from_ast(distinctness.as_ref());
|
||||
if !schema.indexes_enabled() && distinctness.is_distinct() {
|
||||
@@ -84,9 +83,7 @@ pub fn resolve_aggregates(
|
||||
}
|
||||
}
|
||||
Expr::FunctionCallStar { name, .. } => {
|
||||
if let Ok(Func::Agg(f)) =
|
||||
Func::resolve_function(normalize_ident(name.0.as_str()).as_str(), 0)
|
||||
{
|
||||
if let Ok(Func::Agg(f)) = Func::resolve_function(&name.0, 0) {
|
||||
aggs.push(Aggregate {
|
||||
func: f,
|
||||
args: vec![],
|
||||
@@ -208,7 +205,7 @@ pub fn bind_column_references(
|
||||
let matching_tbl = referenced_tables
|
||||
.find_table_and_internal_id_by_identifier(&normalized_table_name);
|
||||
if matching_tbl.is_none() {
|
||||
crate::bail_parse_error!("Table {} not found", normalized_table_name);
|
||||
crate::bail_parse_error!("no such table: {}", normalized_table_name);
|
||||
}
|
||||
let (tbl_id, tbl) = matching_tbl.unwrap();
|
||||
let normalized_id = normalize_ident(id.0.as_str());
|
||||
@@ -320,7 +317,7 @@ fn parse_from_clause_table(
|
||||
}
|
||||
}
|
||||
|
||||
crate::bail_parse_error!("Table {} not found", normalized_qualified_name);
|
||||
crate::bail_parse_error!("no such table: {}", normalized_qualified_name);
|
||||
}
|
||||
ast::SelectTable::Select(subselect, maybe_alias) => {
|
||||
let Plan::Select(subplan) = prepare_select_plan(
|
||||
|
||||
@@ -3,17 +3,19 @@
|
||||
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
use turso_sqlite3_parser::ast::PragmaName;
|
||||
use turso_sqlite3_parser::ast::{self, Expr};
|
||||
use turso_sqlite3_parser::ast::{self, ColumnDefinition, Expr};
|
||||
use turso_sqlite3_parser::ast::{PragmaName, QualifiedName};
|
||||
|
||||
use crate::pragma::pragma_for;
|
||||
use crate::schema::Schema;
|
||||
use crate::storage::pager::AutoVacuumMode;
|
||||
use crate::storage::sqlite3_ondisk::MIN_PAGE_CACHE_SIZE;
|
||||
use crate::storage::wal::CheckpointMode;
|
||||
use crate::util::{normalize_ident, parse_signed_number};
|
||||
use crate::translate::schema::translate_create_table;
|
||||
use crate::util::{normalize_ident, parse_signed_number, parse_string};
|
||||
use crate::vdbe::builder::{ProgramBuilder, ProgramBuilderOpts};
|
||||
use crate::vdbe::insn::{Cookie, Insn};
|
||||
use crate::{bail_parse_error, storage, LimboError, Value};
|
||||
use crate::{bail_parse_error, storage, CaptureDataChangesMode, LimboError, Value};
|
||||
use std::str::FromStr;
|
||||
use strum::IntoEnumIterator;
|
||||
|
||||
@@ -57,17 +59,15 @@ pub fn translate_pragma(
|
||||
Err(_) => bail_parse_error!("Not a valid pragma name"),
|
||||
};
|
||||
|
||||
match body {
|
||||
None => {
|
||||
query_pragma(pragma, schema, None, pager, connection, &mut program)?;
|
||||
}
|
||||
let mut program = match body {
|
||||
None => query_pragma(pragma, schema, None, pager, connection, program)?,
|
||||
Some(ast::PragmaBody::Equals(value) | ast::PragmaBody::Call(value)) => match pragma {
|
||||
PragmaName::TableInfo => {
|
||||
query_pragma(pragma, schema, Some(value), pager, connection, &mut program)?;
|
||||
query_pragma(pragma, schema, Some(value), pager, connection, program)?
|
||||
}
|
||||
_ => {
|
||||
write = true;
|
||||
update_pragma(pragma, schema, value, pager, connection, &mut program)?;
|
||||
update_pragma(pragma, schema, value, pager, connection, program)?
|
||||
}
|
||||
},
|
||||
};
|
||||
@@ -85,8 +85,8 @@ fn update_pragma(
|
||||
value: ast::Expr,
|
||||
pager: Rc<Pager>,
|
||||
connection: Arc<crate::Connection>,
|
||||
program: &mut ProgramBuilder,
|
||||
) -> crate::Result<()> {
|
||||
mut program: ProgramBuilder,
|
||||
) -> crate::Result<ProgramBuilder> {
|
||||
match pragma {
|
||||
PragmaName::CacheSize => {
|
||||
let cache_size = match parse_signed_number(&value)? {
|
||||
@@ -95,42 +95,33 @@ fn update_pragma(
|
||||
_ => bail_parse_error!("Invalid value for cache size pragma"),
|
||||
};
|
||||
update_cache_size(cache_size, pager, connection)?;
|
||||
Ok(())
|
||||
}
|
||||
PragmaName::JournalMode => {
|
||||
query_pragma(
|
||||
PragmaName::JournalMode,
|
||||
schema,
|
||||
None,
|
||||
pager,
|
||||
connection,
|
||||
program,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
PragmaName::LegacyFileFormat => Ok(()),
|
||||
PragmaName::WalCheckpoint => {
|
||||
query_pragma(
|
||||
PragmaName::WalCheckpoint,
|
||||
schema,
|
||||
Some(value),
|
||||
pager,
|
||||
connection,
|
||||
program,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
PragmaName::PageCount => {
|
||||
query_pragma(
|
||||
PragmaName::PageCount,
|
||||
schema,
|
||||
None,
|
||||
pager,
|
||||
connection,
|
||||
program,
|
||||
)?;
|
||||
Ok(())
|
||||
Ok(program)
|
||||
}
|
||||
PragmaName::JournalMode => query_pragma(
|
||||
PragmaName::JournalMode,
|
||||
schema,
|
||||
None,
|
||||
pager,
|
||||
connection,
|
||||
program,
|
||||
),
|
||||
PragmaName::LegacyFileFormat => Ok(program),
|
||||
PragmaName::WalCheckpoint => query_pragma(
|
||||
PragmaName::WalCheckpoint,
|
||||
schema,
|
||||
Some(value),
|
||||
pager,
|
||||
connection,
|
||||
program,
|
||||
),
|
||||
PragmaName::PageCount => query_pragma(
|
||||
PragmaName::PageCount,
|
||||
schema,
|
||||
None,
|
||||
pager,
|
||||
connection,
|
||||
program,
|
||||
),
|
||||
PragmaName::UserVersion => {
|
||||
let data = parse_signed_number(&value)?;
|
||||
let version_value = match data {
|
||||
@@ -145,7 +136,7 @@ fn update_pragma(
|
||||
value: version_value,
|
||||
p5: 1,
|
||||
});
|
||||
Ok(())
|
||||
Ok(program)
|
||||
}
|
||||
PragmaName::SchemaVersion => {
|
||||
// TODO: Implement updating schema_version
|
||||
@@ -214,9 +205,33 @@ fn update_pragma(
|
||||
value: auto_vacuum_mode - 1,
|
||||
p5: 0,
|
||||
});
|
||||
Ok(())
|
||||
Ok(program)
|
||||
}
|
||||
PragmaName::IntegrityCheck => unreachable!("integrity_check cannot be set"),
|
||||
PragmaName::UnstableCaptureDataChangesConn => {
|
||||
let value = parse_string(&value)?;
|
||||
// todo(sivukhin): ideally, we should consistently update capture_data_changes connection flag only after successfull execution of schema change statement
|
||||
// but for now, let's keep it as is...
|
||||
let opts = CaptureDataChangesMode::parse(&value)?;
|
||||
if let Some(table) = &opts.table() {
|
||||
// make sure that we have table created
|
||||
program = translate_create_table(
|
||||
QualifiedName::single(ast::Name(table.to_string())),
|
||||
false,
|
||||
ast::CreateTableBody::columns_and_constraints_from_definition(
|
||||
turso_cdc_table_columns(),
|
||||
None,
|
||||
ast::TableOptions::NONE,
|
||||
)
|
||||
.unwrap(),
|
||||
true,
|
||||
schema,
|
||||
program,
|
||||
)?;
|
||||
}
|
||||
connection.set_capture_data_changes(opts);
|
||||
Ok(program)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,8 +241,8 @@ fn query_pragma(
|
||||
value: Option<ast::Expr>,
|
||||
pager: Rc<Pager>,
|
||||
connection: Arc<crate::Connection>,
|
||||
program: &mut ProgramBuilder,
|
||||
) -> crate::Result<()> {
|
||||
mut program: ProgramBuilder,
|
||||
) -> crate::Result<ProgramBuilder> {
|
||||
let register = program.alloc_register();
|
||||
match pragma {
|
||||
PragmaName::CacheSize => {
|
||||
@@ -365,11 +380,25 @@ fn query_pragma(
|
||||
program.emit_result_row(register, 1);
|
||||
}
|
||||
PragmaName::IntegrityCheck => {
|
||||
translate_integrity_check(schema, program)?;
|
||||
translate_integrity_check(schema, &mut program)?;
|
||||
}
|
||||
PragmaName::UnstableCaptureDataChangesConn => {
|
||||
let pragma = pragma_for(pragma);
|
||||
let second_column = program.alloc_register();
|
||||
let opts = connection.get_capture_data_changes();
|
||||
program.emit_string8(opts.mode_name().to_string(), register);
|
||||
if let Some(table) = &opts.table() {
|
||||
program.emit_string8(table.to_string(), second_column);
|
||||
} else {
|
||||
program.emit_null(second_column, None);
|
||||
}
|
||||
program.emit_result_row(register, 2);
|
||||
program.add_pragma_result_column(pragma.columns[0].to_string());
|
||||
program.add_pragma_result_column(pragma.columns[1].to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(program)
|
||||
}
|
||||
|
||||
fn update_auto_vacuum_mode(
|
||||
@@ -435,3 +464,53 @@ fn update_cache_size(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub const TURSO_CDC_DEFAULT_TABLE_NAME: &str = "turso_cdc";
|
||||
fn turso_cdc_table_columns() -> Vec<ColumnDefinition> {
|
||||
vec![
|
||||
ast::ColumnDefinition {
|
||||
col_name: ast::Name("operation_id".to_string()),
|
||||
col_type: Some(ast::Type {
|
||||
name: "INTEGER".to_string(),
|
||||
size: None,
|
||||
}),
|
||||
constraints: vec![ast::NamedColumnConstraint {
|
||||
name: None,
|
||||
constraint: ast::ColumnConstraint::PrimaryKey {
|
||||
order: None,
|
||||
conflict_clause: None,
|
||||
auto_increment: true,
|
||||
},
|
||||
}],
|
||||
},
|
||||
ast::ColumnDefinition {
|
||||
col_name: ast::Name("operation_time".to_string()),
|
||||
col_type: Some(ast::Type {
|
||||
name: "INTEGER".to_string(),
|
||||
size: None,
|
||||
}),
|
||||
constraints: vec![],
|
||||
},
|
||||
ast::ColumnDefinition {
|
||||
col_name: ast::Name("operation_type".to_string()),
|
||||
col_type: Some(ast::Type {
|
||||
name: "INTEGER".to_string(),
|
||||
size: None,
|
||||
}),
|
||||
constraints: vec![],
|
||||
},
|
||||
ast::ColumnDefinition {
|
||||
col_name: ast::Name("table_name".to_string()),
|
||||
col_type: Some(ast::Type {
|
||||
name: "TEXT".to_string(),
|
||||
size: None,
|
||||
}),
|
||||
constraints: vec![],
|
||||
},
|
||||
ast::ColumnDefinition {
|
||||
col_name: ast::Name("id".to_string()),
|
||||
col_type: None,
|
||||
constraints: vec![],
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
@@ -85,21 +85,31 @@ pub fn emit_result_row_and_limit(
|
||||
QueryDestination::EphemeralIndex {
|
||||
cursor_id: index_cursor_id,
|
||||
index: dedupe_index,
|
||||
is_delete,
|
||||
} => {
|
||||
let record_reg = program.alloc_register();
|
||||
program.emit_insn(Insn::MakeRecord {
|
||||
start_reg: result_columns_start_reg,
|
||||
count: plan.result_columns.len(),
|
||||
dest_reg: record_reg,
|
||||
index_name: Some(dedupe_index.name.clone()),
|
||||
});
|
||||
program.emit_insn(Insn::IdxInsert {
|
||||
cursor_id: *index_cursor_id,
|
||||
record_reg,
|
||||
unpacked_start: None,
|
||||
unpacked_count: None,
|
||||
flags: IdxInsertFlags::new(),
|
||||
});
|
||||
if *is_delete {
|
||||
program.emit_insn(Insn::IdxDelete {
|
||||
start_reg: result_columns_start_reg,
|
||||
num_regs: plan.result_columns.len(),
|
||||
cursor_id: *index_cursor_id,
|
||||
raise_error_if_no_matching_entry: false,
|
||||
});
|
||||
} else {
|
||||
let record_reg = program.alloc_register();
|
||||
program.emit_insn(Insn::MakeRecord {
|
||||
start_reg: result_columns_start_reg,
|
||||
count: plan.result_columns.len(),
|
||||
dest_reg: record_reg,
|
||||
index_name: Some(dedupe_index.name.clone()),
|
||||
});
|
||||
program.emit_insn(Insn::IdxInsert {
|
||||
cursor_id: *index_cursor_id,
|
||||
record_reg,
|
||||
unpacked_start: None,
|
||||
unpacked_count: None,
|
||||
flags: IdxInsertFlags::new().no_op_duplicate(),
|
||||
});
|
||||
}
|
||||
}
|
||||
QueryDestination::EphemeralTable {
|
||||
cursor_id: table_cursor_id,
|
||||
|
||||
@@ -124,15 +124,6 @@ pub fn prepare_select_plan(
|
||||
|
||||
let mut left = Vec::with_capacity(compounds.len());
|
||||
for CompoundSelect { select, operator } in compounds {
|
||||
// TODO: add support for EXCEPT
|
||||
if operator != ast::CompoundOperator::UnionAll
|
||||
&& operator != ast::CompoundOperator::Union
|
||||
&& operator != ast::CompoundOperator::Intersect
|
||||
{
|
||||
crate::bail_parse_error!(
|
||||
"only UNION ALL, UNION and INTERSECT are supported for compound SELECTs"
|
||||
);
|
||||
}
|
||||
left.push((last, operator));
|
||||
last = prepare_one_select_plan(
|
||||
schema,
|
||||
@@ -215,6 +206,14 @@ fn prepare_one_select_plan(
|
||||
|
||||
let mut table_references = TableReferences::new(vec![], outer_query_refs.to_vec());
|
||||
|
||||
if from.is_none() {
|
||||
for column in &columns {
|
||||
if matches!(column, ResultColumn::Star) {
|
||||
crate::bail_parse_error!("no tables specified");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the FROM clause into a vec of TableReferences. Fold all the join conditions expressions into the WHERE clause.
|
||||
parse_from(
|
||||
schema,
|
||||
@@ -298,7 +297,7 @@ fn prepare_one_select_plan(
|
||||
.find(|t| t.identifier == name_normalized);
|
||||
|
||||
if referenced_table.is_none() {
|
||||
crate::bail_parse_error!("Table {} not found", name.0);
|
||||
crate::bail_parse_error!("no such table: {}", name.0);
|
||||
}
|
||||
let table = referenced_table.unwrap();
|
||||
let num_columns = table.columns().len();
|
||||
@@ -349,10 +348,7 @@ fn prepare_one_select_plan(
|
||||
if distinctness.is_distinct() && args_count != 1 {
|
||||
crate::bail_parse_error!("DISTINCT aggregate functions must have exactly one argument");
|
||||
}
|
||||
match Func::resolve_function(
|
||||
normalize_ident(name.0.as_str()).as_str(),
|
||||
args_count,
|
||||
) {
|
||||
match Func::resolve_function(&name.0, args_count) {
|
||||
Ok(Func::Agg(f)) => {
|
||||
let agg_args = match (args, &f) {
|
||||
(None, crate::function::AggFunc::Count0) => {
|
||||
@@ -451,11 +447,8 @@ fn prepare_one_select_plan(
|
||||
ast::Expr::FunctionCallStar {
|
||||
name,
|
||||
filter_over: _,
|
||||
} => {
|
||||
if let Ok(Func::Agg(f)) = Func::resolve_function(
|
||||
normalize_ident(name.0.as_str()).as_str(),
|
||||
0,
|
||||
) {
|
||||
} => match Func::resolve_function(&name.0, 0) {
|
||||
Ok(Func::Agg(f)) => {
|
||||
let agg = Aggregate {
|
||||
func: f,
|
||||
args: vec![ast::Expr::Literal(ast::Literal::Numeric(
|
||||
@@ -473,13 +466,25 @@ fn prepare_one_select_plan(
|
||||
expr: expr.clone(),
|
||||
contains_aggregates: true,
|
||||
});
|
||||
} else {
|
||||
}
|
||||
Ok(_) => {
|
||||
crate::bail_parse_error!(
|
||||
"Invalid aggregate function: {}",
|
||||
name.0
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => match e {
|
||||
crate::LimboError::ParseError(e) => {
|
||||
crate::bail_parse_error!("{}", e);
|
||||
}
|
||||
_ => {
|
||||
crate::bail_parse_error!(
|
||||
"Invalid aggregate function: {}",
|
||||
name.0
|
||||
);
|
||||
}
|
||||
},
|
||||
},
|
||||
expr => {
|
||||
let contains_aggregates =
|
||||
resolve_aggregates(schema, expr, &mut aggregate_expressions)?;
|
||||
|
||||
@@ -82,6 +82,7 @@ pub fn emit_subquery(
|
||||
reg_limit_offset_sum: None,
|
||||
resolver: Resolver::new(t_ctx.resolver.schema, t_ctx.resolver.symbol_table),
|
||||
non_aggregate_expressions: Vec::new(),
|
||||
cdc_cursor_id: None,
|
||||
};
|
||||
let subquery_body_end_label = program.allocate_label();
|
||||
program.emit_insn(Insn::InitCoroutine {
|
||||
|
||||
@@ -141,7 +141,13 @@ where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
s.parse().map_err(serde::de::Error::custom)
|
||||
match crate::numeric::str_to_f64(s) {
|
||||
Some(result) => Ok(match result {
|
||||
crate::numeric::StrToF64::Fractional(non_nan) => non_nan.into(),
|
||||
crate::numeric::StrToF64::Decimal(non_nan) => non_nan.into(),
|
||||
}),
|
||||
None => Err(serde::de::Error::custom("")),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -231,6 +237,20 @@ impl Value {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_blob(&self) -> &Vec<u8> {
|
||||
match self {
|
||||
Value::Blob(b) => b,
|
||||
_ => panic!("as_blob must be called only for Value::Blob"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_blob_mut(&mut self) -> &mut Vec<u8> {
|
||||
match self {
|
||||
Value::Blob(b) => b,
|
||||
_ => panic!("as_blob must be called only for Value::Blob"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_text(text: &str) -> Self {
|
||||
Value::Text(Text::new(text))
|
||||
}
|
||||
@@ -738,7 +758,9 @@ pub struct ImmutableRecord {
|
||||
// We have to be super careful with this buffer since we make values point to the payload we need to take care reallocations
|
||||
// happen in a controlled manner. If we realocate with values that should be correct, they will now point to undefined data.
|
||||
// We don't use pin here because it would make it imposible to reuse the buffer if we need to push a new record in the same struct.
|
||||
payload: Vec<u8>,
|
||||
//
|
||||
// payload is the Vec<u8> but in order to use Register which holds ImmutableRecord as a Value - we store Vec<u8> as Value::Blob
|
||||
payload: Value,
|
||||
pub values: Vec<RefValue>,
|
||||
recreating: bool,
|
||||
}
|
||||
@@ -828,7 +850,7 @@ impl<'a> AppendWriter<'a> {
|
||||
impl ImmutableRecord {
|
||||
pub fn new(payload_capacity: usize, value_capacity: usize) -> Self {
|
||||
Self {
|
||||
payload: Vec::with_capacity(payload_capacity),
|
||||
payload: Value::Blob(Vec::with_capacity(payload_capacity)),
|
||||
values: Vec::with_capacity(value_capacity),
|
||||
recreating: false,
|
||||
}
|
||||
@@ -977,7 +999,7 @@ impl ImmutableRecord {
|
||||
|
||||
writer.assert_finish_capacity();
|
||||
Self {
|
||||
payload: buf,
|
||||
payload: Value::Blob(buf),
|
||||
values,
|
||||
recreating: false,
|
||||
}
|
||||
@@ -985,7 +1007,7 @@ impl ImmutableRecord {
|
||||
|
||||
pub fn start_serialization(&mut self, payload: &[u8]) {
|
||||
self.recreating = true;
|
||||
self.payload.extend_from_slice(payload);
|
||||
self.payload.as_blob_mut().extend_from_slice(payload);
|
||||
}
|
||||
pub fn end_serialization(&mut self) {
|
||||
assert!(self.recreating);
|
||||
@@ -998,15 +1020,19 @@ impl ImmutableRecord {
|
||||
}
|
||||
|
||||
pub fn invalidate(&mut self) {
|
||||
self.payload.clear();
|
||||
self.payload.as_blob_mut().clear();
|
||||
self.values.clear();
|
||||
}
|
||||
|
||||
pub fn is_invalidated(&self) -> bool {
|
||||
self.payload.is_empty()
|
||||
self.payload.as_blob().is_empty()
|
||||
}
|
||||
|
||||
pub fn get_payload(&self) -> &[u8] {
|
||||
self.payload.as_blob()
|
||||
}
|
||||
|
||||
pub fn as_blob_value(&self) -> &Value {
|
||||
&self.payload
|
||||
}
|
||||
}
|
||||
@@ -1042,20 +1068,20 @@ impl Clone for ImmutableRecord {
|
||||
RefValue::Float(f) => RefValue::Float(*f),
|
||||
RefValue::Text(text_ref) => {
|
||||
// let's update pointer
|
||||
let ptr_start = self.payload.as_ptr() as usize;
|
||||
let ptr_start = self.payload.as_blob().as_ptr() as usize;
|
||||
let ptr_end = text_ref.value.data as usize;
|
||||
let len = ptr_end - ptr_start;
|
||||
let new_ptr = unsafe { new_payload.as_ptr().add(len) };
|
||||
let new_ptr = unsafe { new_payload.as_blob().as_ptr().add(len) };
|
||||
RefValue::Text(TextRef {
|
||||
value: RawSlice::new(new_ptr, text_ref.value.len),
|
||||
subtype: text_ref.subtype.clone(),
|
||||
})
|
||||
}
|
||||
RefValue::Blob(raw_slice) => {
|
||||
let ptr_start = self.payload.as_ptr() as usize;
|
||||
let ptr_start = self.payload.as_blob().as_ptr() as usize;
|
||||
let ptr_end = raw_slice.data as usize;
|
||||
let len = ptr_end - ptr_start;
|
||||
let new_ptr = unsafe { new_payload.as_ptr().add(len) };
|
||||
let new_ptr = unsafe { new_payload.as_blob().as_ptr().add(len) };
|
||||
RefValue::Blob(RawSlice::new(new_ptr, raw_slice.len))
|
||||
}
|
||||
};
|
||||
|
||||
59
core/util.rs
59
core/util.rs
@@ -3,7 +3,7 @@ use crate::{
|
||||
schema::{self, Column, Schema, Type},
|
||||
translate::{collate::CollationSeq, expr::walk_expr, plan::JoinOrderMember},
|
||||
types::{Value, ValueType},
|
||||
LimboError, OpenFlags, Result, Statement, StepResult, SymbolTable, IO,
|
||||
LimboError, OpenFlags, Result, Statement, StepResult, SymbolTable,
|
||||
};
|
||||
use std::{rc::Rc, sync::Arc};
|
||||
use turso_sqlite3_parser::ast::{
|
||||
@@ -51,7 +51,6 @@ struct UnparsedFromSqlIndex {
|
||||
pub fn parse_schema_rows(
|
||||
rows: Option<Statement>,
|
||||
schema: &mut Schema,
|
||||
io: Arc<dyn IO>,
|
||||
syms: &SymbolTable,
|
||||
mv_tx_id: Option<u64>,
|
||||
) -> Result<()> {
|
||||
@@ -130,7 +129,7 @@ pub fn parse_schema_rows(
|
||||
StepResult::IO => {
|
||||
// TODO: How do we ensure that the I/O we submitted to
|
||||
// read the schema is actually complete?
|
||||
io.run_once()?;
|
||||
rows.run_once()?;
|
||||
}
|
||||
StepResult::Interrupt => break,
|
||||
StepResult::Done => break,
|
||||
@@ -1044,6 +1043,41 @@ pub fn parse_signed_number(expr: &Expr) -> Result<Value> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_string(expr: &Expr) -> Result<String> {
|
||||
match expr {
|
||||
Expr::Name(ast::Name(s)) if s.len() >= 2 && s.starts_with("'") && s.ends_with("'") => {
|
||||
Ok(s[1..s.len() - 1].to_string())
|
||||
}
|
||||
_ => Err(LimboError::InvalidArgument(format!(
|
||||
"string parameter expected, got {:?} instead",
|
||||
expr
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub fn parse_pragma_bool(expr: &Expr) -> Result<bool> {
|
||||
const TRUE_VALUES: &[&str] = &["yes", "true", "on"];
|
||||
const FALSE_VALUES: &[&str] = &["no", "false", "off"];
|
||||
if let Ok(number) = parse_signed_number(expr) {
|
||||
if let Value::Integer(x @ (0 | 1)) = number {
|
||||
return Ok(x != 0);
|
||||
}
|
||||
} else if let Expr::Name(name) = expr {
|
||||
let ident = normalize_ident(&name.0);
|
||||
if TRUE_VALUES.contains(&ident.as_str()) {
|
||||
return Ok(true);
|
||||
}
|
||||
if FALSE_VALUES.contains(&ident.as_str()) {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
Err(LimboError::InvalidArgument(
|
||||
"boolean pragma value must be either 0|1 integer or yes|true|on|no|false|off token"
|
||||
.to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
// for TVF's we need these at planning time so we cannot emit translate_expr
|
||||
pub fn vtable_args(args: &[ast::Expr]) -> Vec<turso_ext::Value> {
|
||||
let mut vtable_args = Vec::new();
|
||||
@@ -1076,7 +1110,7 @@ pub fn vtable_args(args: &[ast::Expr]) -> Vec<turso_ext::Value> {
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use turso_sqlite3_parser::ast::{self, Expr, Id, Literal, Operator::*, Type};
|
||||
use turso_sqlite3_parser::ast::{self, Expr, Id, Literal, Name, Operator::*, Type};
|
||||
|
||||
#[test]
|
||||
fn test_normalize_ident() {
|
||||
@@ -2031,4 +2065,21 @@ pub mod tests {
|
||||
Value::Float(-9.223_372_036_854_776e18)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_pragma_bool() {
|
||||
assert!(parse_pragma_bool(&Expr::Literal(Literal::Numeric("1".into()))).unwrap(),);
|
||||
assert!(parse_pragma_bool(&Expr::Name(Name("true".into()))).unwrap(),);
|
||||
assert!(parse_pragma_bool(&Expr::Name(Name("on".into()))).unwrap(),);
|
||||
assert!(parse_pragma_bool(&Expr::Name(Name("yes".into()))).unwrap(),);
|
||||
|
||||
assert!(!parse_pragma_bool(&Expr::Literal(Literal::Numeric("0".into()))).unwrap(),);
|
||||
assert!(!parse_pragma_bool(&Expr::Name(Name("false".into()))).unwrap(),);
|
||||
assert!(!parse_pragma_bool(&Expr::Name(Name("off".into()))).unwrap(),);
|
||||
assert!(!parse_pragma_bool(&Expr::Name(Name("no".into()))).unwrap(),);
|
||||
|
||||
assert!(parse_pragma_bool(&Expr::Name(Name("nono".into()))).is_err());
|
||||
assert!(parse_pragma_bool(&Expr::Name(Name("10".into()))).is_err());
|
||||
assert!(parse_pragma_bool(&Expr::Name(Name("-1".into()))).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::{
|
||||
emitter::TransactionMode,
|
||||
plan::{ResultSetColumn, TableReferences},
|
||||
},
|
||||
Connection, Value, VirtualTable,
|
||||
CaptureDataChangesMode, Connection, Value, VirtualTable,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
@@ -110,6 +110,7 @@ pub struct ProgramBuilder {
|
||||
nested_level: usize,
|
||||
init_label: BranchOffset,
|
||||
start_offset: BranchOffset,
|
||||
capture_data_changes_mode: CaptureDataChangesMode,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -149,7 +150,11 @@ pub struct ProgramBuilderOpts {
|
||||
}
|
||||
|
||||
impl ProgramBuilder {
|
||||
pub fn new(query_mode: QueryMode, opts: ProgramBuilderOpts) -> Self {
|
||||
pub fn new(
|
||||
query_mode: QueryMode,
|
||||
capture_data_changes_mode: CaptureDataChangesMode,
|
||||
opts: ProgramBuilderOpts,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_reference_counter: TableRefIdCounter::new(),
|
||||
next_free_register: 1,
|
||||
@@ -172,9 +177,14 @@ impl ProgramBuilder {
|
||||
// These labels will be filled when `prologue()` is called
|
||||
init_label: BranchOffset::Placeholder,
|
||||
start_offset: BranchOffset::Placeholder,
|
||||
capture_data_changes_mode,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn capture_data_changes_mode(&self) -> &CaptureDataChangesMode {
|
||||
&self.capture_data_changes_mode
|
||||
}
|
||||
|
||||
pub fn extend(&mut self, opts: &ProgramBuilderOpts) {
|
||||
self.insns.reserve(opts.approx_num_insns);
|
||||
self.cursor_ref.reserve(opts.num_cursors);
|
||||
@@ -291,7 +301,7 @@ impl ProgramBuilder {
|
||||
});
|
||||
}
|
||||
|
||||
#[instrument(skip(self), level = Level::TRACE)]
|
||||
#[instrument(skip(self), level = Level::INFO)]
|
||||
pub fn emit_insn(&mut self, insn: Insn) {
|
||||
let function = insn.to_function();
|
||||
// This seemingly empty trace here is needed so that a function span is emmited with it
|
||||
|
||||
@@ -1699,22 +1699,22 @@ pub fn op_transaction(
|
||||
} else {
|
||||
let current_state = conn.transaction_state.get();
|
||||
let (new_transaction_state, updated) = match (current_state, write) {
|
||||
(TransactionState::Write { change_schema }, true) => {
|
||||
(TransactionState::Write { change_schema }, false)
|
||||
(TransactionState::Write { schema_did_change }, true) => {
|
||||
(TransactionState::Write { schema_did_change }, false)
|
||||
}
|
||||
(TransactionState::Write { change_schema }, false) => {
|
||||
(TransactionState::Write { change_schema }, false)
|
||||
(TransactionState::Write { schema_did_change }, false) => {
|
||||
(TransactionState::Write { schema_did_change }, false)
|
||||
}
|
||||
(TransactionState::Read, true) => (
|
||||
TransactionState::Write {
|
||||
change_schema: false,
|
||||
schema_did_change: false,
|
||||
},
|
||||
true,
|
||||
),
|
||||
(TransactionState::Read, false) => (TransactionState::Read, false),
|
||||
(TransactionState::None, true) => (
|
||||
TransactionState::Write {
|
||||
change_schema: false,
|
||||
schema_did_change: false,
|
||||
},
|
||||
true,
|
||||
),
|
||||
@@ -1766,9 +1766,9 @@ pub fn op_auto_commit(
|
||||
super::StepResult::Busy => Ok(InsnFunctionStepResult::Busy),
|
||||
};
|
||||
}
|
||||
let change_schema =
|
||||
if let TransactionState::Write { change_schema } = conn.transaction_state.get() {
|
||||
change_schema
|
||||
let schema_did_change =
|
||||
if let TransactionState::Write { schema_did_change } = conn.transaction_state.get() {
|
||||
schema_did_change
|
||||
} else {
|
||||
false
|
||||
};
|
||||
@@ -1776,7 +1776,7 @@ pub fn op_auto_commit(
|
||||
if *auto_commit != conn.auto_commit.get() {
|
||||
if *rollback {
|
||||
// TODO(pere): add rollback I/O logic once we implement rollback journal
|
||||
pager.rollback(change_schema, &conn)?;
|
||||
pager.rollback(schema_did_change, &conn)?;
|
||||
conn.auto_commit.replace(true);
|
||||
} else {
|
||||
conn.auto_commit.replace(*auto_commit);
|
||||
@@ -3454,6 +3454,7 @@ pub fn op_function(
|
||||
let pattern = &state.registers[*start_reg];
|
||||
let text = &state.registers[*start_reg + 1];
|
||||
let result = match (pattern.get_owned_value(), text.get_owned_value()) {
|
||||
(Value::Null, _) | (_, Value::Null) => Value::Null,
|
||||
(Value::Text(pattern), Value::Text(text)) => {
|
||||
let cache = if *constant_mask > 0 {
|
||||
Some(&mut state.regex_cache.glob)
|
||||
@@ -3462,8 +3463,16 @@ pub fn op_function(
|
||||
};
|
||||
Value::Integer(exec_glob(cache, pattern.as_str(), text.as_str()) as i64)
|
||||
}
|
||||
_ => {
|
||||
unreachable!("Like on non-text registers");
|
||||
// Convert any other value types to text for GLOB comparison
|
||||
(pattern_val, text_val) => {
|
||||
let pattern_str = pattern_val.to_string();
|
||||
let text_str = text_val.to_string();
|
||||
let cache = if *constant_mask > 0 {
|
||||
Some(&mut state.regex_cache.glob)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Value::Integer(exec_glob(cache, &pattern_str, &text_str) as i64)
|
||||
}
|
||||
};
|
||||
state.registers[*dest] = Register::Value(result);
|
||||
@@ -4240,6 +4249,14 @@ pub fn op_yield(
|
||||
Ok(InsnFunctionStepResult::Step)
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Copy, Clone)]
|
||||
pub enum OpInsertState {
|
||||
Insert,
|
||||
/// Updating last_insert_rowid may return IO, so we need a separate state for it so that we don't
|
||||
/// start inserting the same row multiple times.
|
||||
UpdateLastRowid,
|
||||
}
|
||||
|
||||
pub fn op_insert(
|
||||
program: &Program,
|
||||
state: &mut ProgramState,
|
||||
@@ -4248,7 +4265,7 @@ pub fn op_insert(
|
||||
mv_store: Option<&Rc<MvStore>>,
|
||||
) -> Result<InsnFunctionStepResult> {
|
||||
let Insn::Insert {
|
||||
cursor,
|
||||
cursor: cursor_id,
|
||||
key_reg,
|
||||
record_reg,
|
||||
flag,
|
||||
@@ -4257,9 +4274,27 @@ pub fn op_insert(
|
||||
else {
|
||||
unreachable!("unexpected Insn {:?}", insn)
|
||||
};
|
||||
|
||||
if state.op_insert_state == OpInsertState::UpdateLastRowid {
|
||||
let maybe_rowid = {
|
||||
let mut cursor = state.get_cursor(*cursor_id);
|
||||
let cursor = cursor.as_btree_mut();
|
||||
return_if_io!(cursor.rowid())
|
||||
};
|
||||
if let Some(rowid) = maybe_rowid {
|
||||
program.connection.update_last_rowid(rowid);
|
||||
|
||||
let prev_changes = program.n_change.get();
|
||||
program.n_change.set(prev_changes + 1);
|
||||
}
|
||||
state.op_insert_state = OpInsertState::Insert;
|
||||
state.pc += 1;
|
||||
return Ok(InsnFunctionStepResult::Step);
|
||||
}
|
||||
|
||||
{
|
||||
let mut cursor = state.get_cursor(*cursor);
|
||||
let cursor = cursor.as_btree_mut();
|
||||
let mut cursor_ref = state.get_cursor(*cursor_id);
|
||||
let cursor = cursor_ref.as_btree_mut();
|
||||
|
||||
let key = match &state.registers[*key_reg].get_owned_value() {
|
||||
Value::Integer(i) => *i,
|
||||
@@ -4279,18 +4314,19 @@ pub fn op_insert(
|
||||
};
|
||||
|
||||
return_if_io!(cursor.insert(&BTreeKey::new_table_rowid(key, Some(record.as_ref())), true));
|
||||
// Only update last_insert_rowid for regular table inserts, not schema modifications
|
||||
if cursor.root_page() != 1 {
|
||||
if let Some(rowid) = return_if_io!(cursor.rowid()) {
|
||||
program.connection.update_last_rowid(rowid);
|
||||
|
||||
let prev_changes = program.n_change.get();
|
||||
program.n_change.set(prev_changes + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state.pc += 1;
|
||||
// Only update last_insert_rowid for regular table inserts, not schema modifications
|
||||
let root_page = {
|
||||
let mut cursor = state.get_cursor(*cursor_id);
|
||||
let cursor = cursor.as_btree_mut();
|
||||
cursor.root_page()
|
||||
};
|
||||
if root_page != 1 {
|
||||
state.op_insert_state = OpInsertState::UpdateLastRowid;
|
||||
} else {
|
||||
state.pc += 1;
|
||||
}
|
||||
Ok(InsnFunctionStepResult::Step)
|
||||
}
|
||||
|
||||
@@ -4353,6 +4389,7 @@ pub fn op_idx_delete(
|
||||
cursor_id,
|
||||
start_reg,
|
||||
num_regs,
|
||||
raise_error_if_no_matching_entry,
|
||||
} = insn
|
||||
else {
|
||||
unreachable!("unexpected Insn {:?}", insn)
|
||||
@@ -4368,7 +4405,7 @@ pub fn op_idx_delete(
|
||||
);
|
||||
match &state.op_idx_delete_state {
|
||||
Some(OpIdxDeleteState::Seeking(record)) => {
|
||||
{
|
||||
let found = {
|
||||
let mut cursor = state.get_cursor(*cursor_id);
|
||||
let cursor = cursor.as_btree_mut();
|
||||
let found = return_if_io!(
|
||||
@@ -4380,6 +4417,21 @@ pub fn op_idx_delete(
|
||||
cursor.root_page(),
|
||||
record
|
||||
);
|
||||
found
|
||||
};
|
||||
|
||||
if !found {
|
||||
// If P5 is not zero, then raise an SQLITE_CORRUPT_INDEX error if no matching index entry is found
|
||||
// Also, do not raise this (self-correcting and non-critical) error if in writable_schema mode.
|
||||
if *raise_error_if_no_matching_entry {
|
||||
return Err(LimboError::Corrupt(format!(
|
||||
"IdxDelete: no matching index entry found for record {:?}",
|
||||
record
|
||||
)));
|
||||
}
|
||||
state.pc += 1;
|
||||
state.op_idx_delete_state = None;
|
||||
return Ok(InsnFunctionStepResult::Step);
|
||||
}
|
||||
state.op_idx_delete_state = Some(OpIdxDeleteState::Verifying);
|
||||
}
|
||||
@@ -4390,12 +4442,7 @@ pub fn op_idx_delete(
|
||||
return_if_io!(cursor.rowid())
|
||||
};
|
||||
|
||||
if rowid.is_none() {
|
||||
// If P5 is not zero, then raise an SQLITE_CORRUPT_INDEX error if no matching
|
||||
// index entry is found. This happens when running an UPDATE or DELETE statement and the
|
||||
// index entry to be updated or deleted is not found. For some uses of IdxDelete
|
||||
// (example: the EXCEPT operator) it does not matter that no matching entry is found.
|
||||
// For those cases, P5 is zero. Also, do not raise this (self-correcting and non-critical) error if in writable_schema mode.
|
||||
if rowid.is_none() && *raise_error_if_no_matching_entry {
|
||||
return Err(LimboError::Corrupt(format!(
|
||||
"IdxDelete: no matching index entry found for record {:?}",
|
||||
make_record(&state.registers, start_reg, num_regs)
|
||||
@@ -4423,6 +4470,17 @@ pub fn op_idx_delete(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Copy, Clone)]
|
||||
pub enum OpIdxInsertState {
|
||||
/// Optional seek step done before an unique constraint check.
|
||||
SeekIfUnique,
|
||||
/// Optional unique constraint check done before an insert.
|
||||
UniqueConstraintCheck,
|
||||
/// Main insert step. This is always performed. Usually the state machine just
|
||||
/// skips to this step unless the insertion is made into a unique index.
|
||||
Insert { moved_before: bool },
|
||||
}
|
||||
|
||||
pub fn op_idx_insert(
|
||||
program: &Program,
|
||||
state: &mut ProgramState,
|
||||
@@ -4430,69 +4488,118 @@ pub fn op_idx_insert(
|
||||
pager: &Rc<Pager>,
|
||||
mv_store: Option<&Rc<MvStore>>,
|
||||
) -> Result<InsnFunctionStepResult> {
|
||||
if let Insn::IdxInsert {
|
||||
let Insn::IdxInsert {
|
||||
cursor_id,
|
||||
record_reg,
|
||||
flags,
|
||||
..
|
||||
} = *insn
|
||||
{
|
||||
let (_, cursor_type) = program.cursor_ref.get(cursor_id).unwrap();
|
||||
let CursorType::BTreeIndex(index_meta) = cursor_type else {
|
||||
panic!("IdxInsert: not a BTree index cursor");
|
||||
};
|
||||
{
|
||||
let mut cursor = state.get_cursor(cursor_id);
|
||||
let cursor = cursor.as_btree_mut();
|
||||
let record = match &state.registers[record_reg] {
|
||||
Register::Record(ref r) => r,
|
||||
o => {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"expected record, got {:?}",
|
||||
o
|
||||
)));
|
||||
}
|
||||
};
|
||||
// To make this reentrant in case of `moved_before` = false, we need to check if the previous cursor.insert started
|
||||
// a write/balancing operation. If it did, it means we already moved to the place we wanted.
|
||||
let moved_before = if cursor.is_write_in_progress() {
|
||||
true
|
||||
} else if index_meta.unique {
|
||||
// check for uniqueness violation
|
||||
match cursor.key_exists_in_index(record)? {
|
||||
CursorResult::Ok(true) => {
|
||||
return Err(LimboError::Constraint(
|
||||
"UNIQUE constraint failed: duplicate key".into(),
|
||||
))
|
||||
}
|
||||
CursorResult::IO => return Ok(InsnFunctionStepResult::IO),
|
||||
CursorResult::Ok(false) => {}
|
||||
};
|
||||
// uniqueness check already moved us to the correct place in the index.
|
||||
// the uniqueness check uses SeekOp::GE, which means a non-matching entry
|
||||
// will now be positioned at the insertion point where there currently is
|
||||
// a) nothing, or
|
||||
// b) the first entry greater than the key we are inserting.
|
||||
// In both cases, we can insert the new entry without moving again.
|
||||
//
|
||||
// This is re-entrant, because once we call cursor.insert() with moved_before=true,
|
||||
// we will immediately set BTreeCursor::state to CursorState::Write(WriteInfo::new()),
|
||||
// in BTreeCursor::insert_into_page; thus, if this function is called again,
|
||||
// moved_before will again be true due to cursor.is_write_in_progress() returning true.
|
||||
true
|
||||
} else {
|
||||
flags.has(IdxInsertFlags::USE_SEEK)
|
||||
};
|
||||
else {
|
||||
unreachable!("unexpected Insn {:?}", insn)
|
||||
};
|
||||
|
||||
// Start insertion of row. This might trigger a balance procedure which will take care of moving to different pages,
|
||||
// therefore, we don't want to seek again if that happens, meaning we don't want to return on io without moving to the following opcode
|
||||
// because it could trigger a movement to child page after a balance root which will leave the current page as the root page.
|
||||
return_if_io!(cursor.insert(&BTreeKey::new_index_key(record), moved_before));
|
||||
let record_to_insert = match &state.registers[record_reg] {
|
||||
Register::Record(ref r) => r,
|
||||
o => {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"expected record, got {:?}",
|
||||
o
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
match state.op_idx_insert_state {
|
||||
OpIdxInsertState::SeekIfUnique => {
|
||||
let (_, cursor_type) = program.cursor_ref.get(cursor_id).unwrap();
|
||||
let CursorType::BTreeIndex(index_meta) = cursor_type else {
|
||||
panic!("IdxInsert: not a BTreeIndex cursor");
|
||||
};
|
||||
if !index_meta.unique {
|
||||
state.op_idx_insert_state = OpIdxInsertState::Insert {
|
||||
moved_before: false,
|
||||
};
|
||||
return Ok(InsnFunctionStepResult::Step);
|
||||
}
|
||||
{
|
||||
let mut cursor = state.get_cursor(cursor_id);
|
||||
let cursor = cursor.as_btree_mut();
|
||||
|
||||
return_if_io!(cursor.seek(
|
||||
SeekKey::IndexKey(record_to_insert),
|
||||
SeekOp::GE { eq_only: true }
|
||||
));
|
||||
}
|
||||
state.op_idx_insert_state = OpIdxInsertState::UniqueConstraintCheck;
|
||||
Ok(InsnFunctionStepResult::Step)
|
||||
}
|
||||
OpIdxInsertState::UniqueConstraintCheck => {
|
||||
let ignore_conflict = 'i: {
|
||||
let mut cursor = state.get_cursor(cursor_id);
|
||||
let cursor = cursor.as_btree_mut();
|
||||
let record_opt = return_if_io!(cursor.record());
|
||||
let Some(record) = record_opt.as_ref() else {
|
||||
// Cursor not pointing at a record — table is empty or past last
|
||||
break 'i false;
|
||||
};
|
||||
// Cursor is pointing at a record; if the index has a rowid, exclude it from the comparison since it's a pointer to the table row;
|
||||
// UNIQUE indexes disallow duplicates like (a=1,b=2,rowid=1) and (a=1,b=2,rowid=2).
|
||||
let existing_key = if cursor.has_rowid() {
|
||||
&record.get_values()[..record.count().saturating_sub(1)]
|
||||
} else {
|
||||
record.get_values()
|
||||
};
|
||||
let inserted_key_vals = &record_to_insert.get_values();
|
||||
if existing_key.len() != inserted_key_vals.len() {
|
||||
break 'i false;
|
||||
}
|
||||
|
||||
let conflict = compare_immutable(
|
||||
existing_key,
|
||||
inserted_key_vals,
|
||||
cursor.key_sort_order(),
|
||||
&cursor.collations,
|
||||
) == std::cmp::Ordering::Equal;
|
||||
if conflict {
|
||||
if flags.has(IdxInsertFlags::NO_OP_DUPLICATE) {
|
||||
break 'i true;
|
||||
}
|
||||
return Err(LimboError::Constraint(
|
||||
"UNIQUE constraint failed: duplicate key".into(),
|
||||
));
|
||||
}
|
||||
|
||||
false
|
||||
};
|
||||
state.op_idx_insert_state = if ignore_conflict {
|
||||
state.pc += 1;
|
||||
OpIdxInsertState::SeekIfUnique
|
||||
} else {
|
||||
OpIdxInsertState::Insert { moved_before: true }
|
||||
};
|
||||
Ok(InsnFunctionStepResult::Step)
|
||||
}
|
||||
OpIdxInsertState::Insert { moved_before } => {
|
||||
{
|
||||
let mut cursor = state.get_cursor(cursor_id);
|
||||
let cursor = cursor.as_btree_mut();
|
||||
// To make this reentrant in case of `moved_before` = false, we need to check if the previous cursor.insert started
|
||||
// a write/balancing operation. If it did, it means we already moved to the place we wanted.
|
||||
let moved_before = moved_before
|
||||
|| cursor.is_write_in_progress()
|
||||
|| flags.has(IdxInsertFlags::USE_SEEK);
|
||||
// Start insertion of row. This might trigger a balance procedure which will take care of moving to different pages,
|
||||
// therefore, we don't want to seek again if that happens, meaning we don't want to return on io without moving to the following opcode
|
||||
// because it could trigger a movement to child page after a balance root which will leave the current page as the root page.
|
||||
return_if_io!(
|
||||
cursor.insert(&BTreeKey::new_index_key(record_to_insert), moved_before)
|
||||
);
|
||||
}
|
||||
state.op_idx_insert_state = OpIdxInsertState::SeekIfUnique;
|
||||
state.pc += 1;
|
||||
// TODO: flag optimizations, update n_change if OPFLAG_NCHANGE
|
||||
Ok(InsnFunctionStepResult::Step)
|
||||
}
|
||||
// TODO: flag optimizations, update n_change if OPFLAG_NCHANGE
|
||||
state.pc += 1;
|
||||
}
|
||||
Ok(InsnFunctionStepResult::Step)
|
||||
}
|
||||
|
||||
pub fn op_new_rowid(
|
||||
@@ -4969,7 +5076,6 @@ pub fn op_parse_schema(
|
||||
parse_schema_rows(
|
||||
Some(stmt),
|
||||
&mut new_schema,
|
||||
conn.pager.io.clone(),
|
||||
&conn.syms.borrow(),
|
||||
state.mv_tx_id,
|
||||
)?;
|
||||
@@ -4984,7 +5090,6 @@ pub fn op_parse_schema(
|
||||
parse_schema_rows(
|
||||
Some(stmt),
|
||||
&mut new_schema,
|
||||
conn.pager.io.clone(),
|
||||
&conn.syms.borrow(),
|
||||
state.mv_tx_id,
|
||||
)?;
|
||||
@@ -5056,8 +5161,8 @@ pub fn op_set_cookie(
|
||||
Cookie::SchemaVersion => {
|
||||
// we update transaction state to indicate that the schema has changed
|
||||
match program.connection.transaction_state.get() {
|
||||
TransactionState::Write { change_schema } => {
|
||||
program.connection.transaction_state.set(TransactionState::Write { change_schema: true });
|
||||
TransactionState::Write { schema_did_change } => {
|
||||
program.connection.transaction_state.set(TransactionState::Write { schema_did_change: true });
|
||||
},
|
||||
TransactionState::Read => unreachable!("invalid transaction state for SetCookie: TransactionState::Read, should be write"),
|
||||
TransactionState::None => unreachable!("invalid transaction state for SetCookie: TransactionState::None, should be write"),
|
||||
|
||||
@@ -1120,13 +1120,14 @@ pub fn insn_to_str(
|
||||
cursor_id,
|
||||
start_reg,
|
||||
num_regs,
|
||||
raise_error_if_no_matching_entry,
|
||||
} => (
|
||||
"IdxDelete",
|
||||
*cursor_id as i32,
|
||||
*start_reg as i32,
|
||||
*num_regs as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
*raise_error_if_no_matching_entry as u16,
|
||||
"".to_string(),
|
||||
),
|
||||
Insn::NewRowid {
|
||||
|
||||
@@ -63,6 +63,7 @@ impl IdxInsertFlags {
|
||||
pub const APPEND: u8 = 0x01; // Hint: insert likely at the end
|
||||
pub const NCHANGE: u8 = 0x02; // Increment the change counter
|
||||
pub const USE_SEEK: u8 = 0x04; // Skip seek if last one was same key
|
||||
pub const NO_OP_DUPLICATE: u8 = 0x08; // Do not error on duplicate key
|
||||
pub fn new() -> Self {
|
||||
IdxInsertFlags(0)
|
||||
}
|
||||
@@ -93,6 +94,14 @@ impl IdxInsertFlags {
|
||||
}
|
||||
self
|
||||
}
|
||||
/// If this is set, we will not error on duplicate key.
|
||||
/// This is a bit of a hack we use to make ephemeral indexes for UNION work --
|
||||
/// instead we should allow overwriting index interior cells, which we currently don't;
|
||||
/// this should (and will) be fixed in a future PR.
|
||||
pub fn no_op_duplicate(mut self) -> Self {
|
||||
self.0 |= IdxInsertFlags::NO_OP_DUPLICATE;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
@@ -733,10 +742,15 @@ pub enum Insn {
|
||||
cursor_id: CursorID,
|
||||
},
|
||||
|
||||
/// If P5 is not zero, then raise an SQLITE_CORRUPT_INDEX error if no matching index entry
|
||||
/// is found. This happens when running an UPDATE or DELETE statement and the index entry to
|
||||
/// be updated or deleted is not found. For some uses of IdxDelete (example: the EXCEPT operator)
|
||||
/// it does not matter that no matching entry is found. For those cases, P5 is zero.
|
||||
IdxDelete {
|
||||
start_reg: usize,
|
||||
num_regs: usize,
|
||||
cursor_id: CursorID,
|
||||
raise_error_if_no_matching_entry: bool, // P5
|
||||
},
|
||||
|
||||
NewRowid {
|
||||
|
||||
@@ -29,6 +29,8 @@ use crate::{
|
||||
function::{AggFunc, FuncCtx},
|
||||
storage::{pager::PagerCacheflushStatus, sqlite3_ondisk::SmallVec},
|
||||
translate::plan::TableReferences,
|
||||
vdbe::execute::OpIdxInsertState,
|
||||
vdbe::execute::OpInsertState,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -250,6 +252,8 @@ pub struct ProgramState {
|
||||
op_idx_delete_state: Option<OpIdxDeleteState>,
|
||||
op_integrity_check_state: OpIntegrityCheckState,
|
||||
op_open_ephemeral_state: OpOpenEphemeralState,
|
||||
op_idx_insert_state: OpIdxInsertState,
|
||||
op_insert_state: OpInsertState,
|
||||
}
|
||||
|
||||
impl ProgramState {
|
||||
@@ -276,6 +280,8 @@ impl ProgramState {
|
||||
op_idx_delete_state: None,
|
||||
op_integrity_check_state: OpIntegrityCheckState::Start,
|
||||
op_open_ephemeral_state: OpOpenEphemeralState::Start,
|
||||
op_idx_insert_state: OpIdxInsertState::SeekIfUnique,
|
||||
op_insert_state: OpInsertState::Insert,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -334,7 +340,11 @@ impl Register {
|
||||
pub fn get_owned_value(&self) -> &Value {
|
||||
match self {
|
||||
Register::Value(v) => v,
|
||||
_ => unreachable!(),
|
||||
Register::Record(r) => {
|
||||
assert!(!r.is_invalidated());
|
||||
r.as_blob_value()
|
||||
}
|
||||
_ => panic!("register holds unexpected value: {:?}", self),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -368,6 +378,7 @@ pub struct Program {
|
||||
}
|
||||
|
||||
impl Program {
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn step(
|
||||
&self,
|
||||
state: &mut ProgramState,
|
||||
@@ -375,6 +386,14 @@ impl Program {
|
||||
pager: Rc<Pager>,
|
||||
) -> Result<StepResult> {
|
||||
loop {
|
||||
if self.connection.closed.get() {
|
||||
// Connection is closed for whatever reason, rollback the transaction.
|
||||
let state = self.connection.transaction_state.get();
|
||||
if let TransactionState::Write { schema_did_change } = state {
|
||||
pager.rollback(schema_did_change, &self.connection)?
|
||||
}
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
if state.is_interrupted() {
|
||||
return Ok(StepResult::Interrupt);
|
||||
}
|
||||
@@ -382,8 +401,14 @@ impl Program {
|
||||
let _ = state.result_row.take();
|
||||
let (insn, insn_function) = &self.insns[state.pc as usize];
|
||||
trace_insn(self, state.pc as InsnReference, insn);
|
||||
let res = insn_function(self, state, insn, &pager, mv_store.as_ref())?;
|
||||
match res {
|
||||
let res = insn_function(self, state, insn, &pager, mv_store.as_ref());
|
||||
if res.is_err() {
|
||||
let state = self.connection.transaction_state.get();
|
||||
if let TransactionState::Write { schema_did_change } = state {
|
||||
pager.rollback(schema_did_change, &self.connection)?
|
||||
}
|
||||
}
|
||||
match res? {
|
||||
InsnFunctionStepResult::Step => {}
|
||||
InsnFunctionStepResult::Done => return Ok(StepResult::Done),
|
||||
InsnFunctionStepResult::IO => return Ok(StepResult::IO),
|
||||
@@ -394,7 +419,7 @@ impl Program {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, level = Level::TRACE)]
|
||||
#[instrument(skip_all, level = Level::INFO)]
|
||||
pub fn commit_txn(
|
||||
&self,
|
||||
pager: Rc<Pager>,
|
||||
@@ -422,7 +447,8 @@ impl Program {
|
||||
program_state.commit_state
|
||||
);
|
||||
if program_state.commit_state == CommitState::Committing {
|
||||
let TransactionState::Write { change_schema } = connection.transaction_state.get()
|
||||
let TransactionState::Write { schema_did_change } =
|
||||
connection.transaction_state.get()
|
||||
else {
|
||||
unreachable!("invalid state for write commit step")
|
||||
};
|
||||
@@ -431,18 +457,18 @@ impl Program {
|
||||
&mut program_state.commit_state,
|
||||
&connection,
|
||||
rollback,
|
||||
change_schema,
|
||||
schema_did_change,
|
||||
)
|
||||
} else if auto_commit {
|
||||
let current_state = connection.transaction_state.get();
|
||||
tracing::trace!("Auto-commit state: {:?}", current_state);
|
||||
match current_state {
|
||||
TransactionState::Write { change_schema } => self.step_end_write_txn(
|
||||
TransactionState::Write { schema_did_change } => self.step_end_write_txn(
|
||||
&pager,
|
||||
&mut program_state.commit_state,
|
||||
&connection,
|
||||
rollback,
|
||||
change_schema,
|
||||
schema_did_change,
|
||||
),
|
||||
TransactionState::Read => {
|
||||
connection.transaction_state.replace(TransactionState::None);
|
||||
@@ -460,26 +486,32 @@ impl Program {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, pager, connection), level = Level::TRACE)]
|
||||
#[instrument(skip(self, pager, connection), level = Level::INFO)]
|
||||
fn step_end_write_txn(
|
||||
&self,
|
||||
pager: &Rc<Pager>,
|
||||
commit_state: &mut CommitState,
|
||||
connection: &Connection,
|
||||
rollback: bool,
|
||||
change_schema: bool,
|
||||
schema_did_change: bool,
|
||||
) -> Result<StepResult> {
|
||||
let cacheflush_status = pager.end_tx(
|
||||
rollback,
|
||||
change_schema,
|
||||
schema_did_change,
|
||||
connection,
|
||||
connection.wal_checkpoint_disabled.get(),
|
||||
)?;
|
||||
match cacheflush_status {
|
||||
PagerCacheflushStatus::Done(_) => {
|
||||
PagerCacheflushStatus::Done(status) => {
|
||||
if self.change_cnt_on {
|
||||
self.connection.set_changes(self.n_change.get());
|
||||
}
|
||||
if matches!(
|
||||
status,
|
||||
crate::storage::pager::PagerCacheflushResult::Rollback
|
||||
) {
|
||||
pager.rollback(schema_did_change, connection)?;
|
||||
}
|
||||
connection.transaction_state.replace(TransactionState::None);
|
||||
*commit_state = CommitState::Ready;
|
||||
}
|
||||
@@ -553,7 +585,7 @@ fn make_record(registers: &[Register], start_reg: &usize, count: &usize) -> Immu
|
||||
ImmutableRecord::from_registers(regs, regs.len())
|
||||
}
|
||||
|
||||
#[instrument(skip(program), level = Level::TRACE)]
|
||||
#[instrument(skip(program), level = Level::INFO)]
|
||||
fn trace_insn(program: &Program, addr: InsnReference, insn: &Insn) {
|
||||
if !tracing::enabled!(tracing::Level::TRACE) {
|
||||
return;
|
||||
|
||||
@@ -194,7 +194,7 @@ fn do_fuzz(expr: Expr) -> Result<Corpus, Box<dyn Error>> {
|
||||
loop {
|
||||
use turso_core::StepResult;
|
||||
match stmt.step()? {
|
||||
StepResult::IO => io.run_once()?,
|
||||
StepResult::IO => stmt.run_once()?,
|
||||
StepResult::Row => {
|
||||
let row = stmt.row().unwrap();
|
||||
assert_eq!(row.len(), 1, "expr: {:?}", expr);
|
||||
|
||||
@@ -45,4 +45,3 @@ def main() -> None:
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
@@ -52,4 +52,3 @@ def main() -> None:
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
curl --fail -u "$ANTITHESIS_USER:$ANTITHESIS_PASSWD" \
|
||||
-X POST https://$ANTITHESIS_TENANT.antithesis.com/api/v1/launch/limbo \
|
||||
-d "{\"params\": { \"antithesis.description\":\"basic_test on main\",
|
||||
\"custom.duration\":\"4\",
|
||||
\"custom.duration\":\"8\",
|
||||
\"antithesis.config_image\":\"$ANTITHESIS_DOCKER_REPO/limbo-config:antithesis-latest\",
|
||||
\"antithesis.images\":\"$ANTITHESIS_DOCKER_REPO/limbo-workload:antithesis-latest\",
|
||||
\"antithesis.report.recipients\":\"$ANTITHESIS_EMAIL\"
|
||||
|
||||
111
scripts/gen-changelog.py
Executable file
111
scripts/gen-changelog.py
Executable file
@@ -0,0 +1,111 @@
|
||||
#!/usr/bin/env python3
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
def get_git_merges(prev_version):
|
||||
"""Get merge commits since the previous version tag."""
|
||||
try:
|
||||
command = f"git log {prev_version}..HEAD | grep 'Merge '"
|
||||
result = subprocess.run(command, shell=True, check=True, text=True, capture_output=True)
|
||||
|
||||
merge_lines = []
|
||||
for line in result.stdout.strip().split("\n"):
|
||||
if not line.strip() or "Merge:" in line:
|
||||
continue
|
||||
|
||||
# Extract the commit message and author
|
||||
match = re.search(r"Merge '([^']+)' from ([^(]+)", line)
|
||||
if match:
|
||||
message = match.group(1).strip()
|
||||
author = match.group(2).strip()
|
||||
merge_lines.append((message, author))
|
||||
|
||||
return merge_lines
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error: Failed to get git merge logs: {e}")
|
||||
return []
|
||||
|
||||
|
||||
def categorize_commits(merge_lines):
|
||||
"""Categorize commits into Added, Updated, Fixed."""
|
||||
categories = defaultdict(list)
|
||||
|
||||
for message, author in merge_lines:
|
||||
# Format the line for our output
|
||||
formatted_line = f"* {message} ({author})"
|
||||
|
||||
# Categorize based on keywords in the commit message
|
||||
message_lower = message.lower()
|
||||
if re.search(r"add|new|implement|support|initial|introduce", message_lower):
|
||||
categories["Added"].append(formatted_line)
|
||||
elif re.search(r"fix|bug|issue|error|crash|resolve|typo", message_lower):
|
||||
categories["Fixed"].append(formatted_line)
|
||||
else:
|
||||
categories["Updated"].append(formatted_line)
|
||||
|
||||
return categories
|
||||
|
||||
|
||||
def format_changelog(categories):
|
||||
"""Format the categorized commits into a changelog."""
|
||||
changelog = "## Unreleased\n"
|
||||
|
||||
for category in ["Added", "Updated", "Fixed"]:
|
||||
changelog += f"### {category}\n"
|
||||
|
||||
if not categories[category]:
|
||||
changelog += "\n"
|
||||
continue
|
||||
|
||||
for commit_message in categories[category]:
|
||||
changelog += f"{commit_message}\n"
|
||||
|
||||
changelog += "\n"
|
||||
|
||||
return changelog
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python changelog_generator.py <previous_version_tag>")
|
||||
print("Example: python changelog_generator.py v0.0.17")
|
||||
sys.exit(1)
|
||||
|
||||
prev_version = sys.argv[1]
|
||||
|
||||
# Get merge commits since previous version
|
||||
merge_lines = get_git_merges(prev_version)
|
||||
|
||||
if not merge_lines:
|
||||
print(f"No merge commits found since {prev_version}")
|
||||
return
|
||||
|
||||
# Categorize commits
|
||||
categories = categorize_commits(merge_lines)
|
||||
|
||||
# Format changelog
|
||||
changelog = format_changelog(categories)
|
||||
|
||||
# Output changelog
|
||||
print(changelog)
|
||||
|
||||
# Optionally write to file
|
||||
write_to_file = input("Write to CHANGELOG.md? (y/n): ")
|
||||
if write_to_file.lower() == "y":
|
||||
try:
|
||||
with open("CHANGELOG.md", "r") as f:
|
||||
content = f.read()
|
||||
with open("CHANGELOG.md", "w") as f:
|
||||
f.write(changelog + content)
|
||||
print("Changelog written to CHANGELOG.md")
|
||||
except FileNotFoundError:
|
||||
with open("CHANGELOG.md", "w") as f:
|
||||
f.write(changelog)
|
||||
print("Created new CHANGELOG.md file")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,8 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get the directory where this script is located
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
# Go to the project root (one level up from scripts/)
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
TURSODB="$PROJECT_ROOT/target/debug/tursodb"
|
||||
|
||||
# Add experimental features for testing
|
||||
EXPERIMENTAL_FLAGS=""
|
||||
|
||||
# if RUST_LOG is non-empty, enable tracing output
|
||||
if [ -n "$RUST_LOG" ]; then
|
||||
target/debug/tursodb -m list -t testing/test.log "$@"
|
||||
"$TURSODB" -m list -q $EXPERIMENTAL_FLAGS -t testing/test.log "$@"
|
||||
else
|
||||
target/debug/tursodb -m list "$@"
|
||||
"$TURSODB" -m list -q $EXPERIMENTAL_FLAGS "$@"
|
||||
fi
|
||||
|
||||
@@ -1,8 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get the directory where this script is located
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
# Go to the project root (one level up from scripts/)
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
TURSODB="$PROJECT_ROOT/target/debug/tursodb"
|
||||
|
||||
# Add experimental features for testing
|
||||
EXPERIMENTAL_FLAGS="--experimental-indexes"
|
||||
|
||||
# if RUST_LOG is non-empty, enable tracing output
|
||||
if [ -n "$RUST_LOG" ]; then
|
||||
target/debug/tursodb --experimental-indexes -m list -t testing/test.log "$@"
|
||||
"$TURSODB" -m list -q $EXPERIMENTAL_FLAGS -t testing/test.log "$@"
|
||||
else
|
||||
target/debug/tursodb --experimental-indexes -m list "$@"
|
||||
"$TURSODB" -m list -q $EXPERIMENTAL_FLAGS "$@"
|
||||
fi
|
||||
|
||||
@@ -2,28 +2,8 @@
|
||||
|
||||
set -e
|
||||
|
||||
iterations=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--iterations)
|
||||
iterations="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
echo "Usage: $0 [--max-iterations N]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -n "$iterations" ]]; then
|
||||
echo "Running limbo_sim for $iterations iterations..."
|
||||
for ((i=1; i<=iterations; i++)); do
|
||||
echo "Iteration $i of $iterations"
|
||||
cargo run -p limbo_sim
|
||||
done
|
||||
echo "Completed $iterations iterations"
|
||||
if [[ -n "$@" ]]; then
|
||||
cargo run -p limbo_sim -- "$@"
|
||||
else
|
||||
echo "Running limbo_sim in infinite loop..."
|
||||
while true; do
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import { spawn } from "bun";
|
||||
import { GithubClient } from "./github";
|
||||
import { SlackClient } from "./slack";
|
||||
import { extractFailureInfo } from "./logParse";
|
||||
import { randomSeed } from "./random";
|
||||
|
||||
@@ -12,12 +13,14 @@ const PER_RUN_TIMEOUT_SECONDS = Number.isInteger(Number(process.env.PER_RUN_TIME
|
||||
const LOG_TO_STDOUT = process.env.LOG_TO_STDOUT === "true";
|
||||
|
||||
const github = new GithubClient();
|
||||
const slack = new SlackClient();
|
||||
|
||||
process.env.RUST_BACKTRACE = "1";
|
||||
|
||||
console.log("Starting limbo_sim in a loop...");
|
||||
console.log(`Git hash: ${github.GIT_HASH}`);
|
||||
console.log(`GitHub issues enabled: ${github.mode === 'real'}`);
|
||||
console.log(`Slack notifications enabled: ${slack.mode === 'real'}`);
|
||||
console.log(`Time limit: ${TIME_LIMIT_MINUTES} minutes`);
|
||||
console.log(`Log simulator output to stdout: ${LOG_TO_STDOUT}`);
|
||||
console.log(`Sleep between runs: ${SLEEP_BETWEEN_RUNS_SECONDS} seconds`);
|
||||
@@ -69,7 +72,7 @@ const timeouter = (seconds: number, runNumber: number) => {
|
||||
return timeouterPromise;
|
||||
}
|
||||
|
||||
const run = async (seed: string, bin: string, args: string[]) => {
|
||||
const run = async (seed: string, bin: string, args: string[]): Promise<boolean> => {
|
||||
const proc = spawn([`/app/${bin}`, ...args], {
|
||||
stdout: LOG_TO_STDOUT ? "inherit" : "pipe",
|
||||
stderr: LOG_TO_STDOUT ? "inherit" : "pipe",
|
||||
@@ -77,6 +80,7 @@ const run = async (seed: string, bin: string, args: string[]) => {
|
||||
});
|
||||
|
||||
const timeout = timeouter(PER_RUN_TIMEOUT_SECONDS, runNumber);
|
||||
let issuePosted = false;
|
||||
|
||||
try {
|
||||
const exitCode = await Promise.race([proc.exited, timeout]);
|
||||
@@ -102,6 +106,7 @@ const run = async (seed: string, bin: string, args: string[]) => {
|
||||
command: args.join(" "),
|
||||
stackTrace: failureInfo,
|
||||
});
|
||||
issuePosted = true;
|
||||
} else {
|
||||
await github.postGitHubIssue({
|
||||
type: "assertion",
|
||||
@@ -109,6 +114,7 @@ const run = async (seed: string, bin: string, args: string[]) => {
|
||||
command: args.join(" "),
|
||||
failureInfo,
|
||||
});
|
||||
issuePosted = true;
|
||||
}
|
||||
} catch (err2) {
|
||||
console.error(`Error extracting simulator seed and stack trace: ${err2}`);
|
||||
@@ -134,6 +140,7 @@ const run = async (seed: string, bin: string, args: string[]) => {
|
||||
command: args.join(" "),
|
||||
output: lastLines,
|
||||
});
|
||||
issuePosted = true;
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
@@ -141,12 +148,16 @@ const run = async (seed: string, bin: string, args: string[]) => {
|
||||
// @ts-ignore
|
||||
timeout.clear();
|
||||
}
|
||||
|
||||
return issuePosted;
|
||||
}
|
||||
|
||||
// Main execution loop
|
||||
const startTime = new Date();
|
||||
const limboSimArgs = process.argv.slice(2);
|
||||
let runNumber = 0;
|
||||
let totalIssuesPosted = 0;
|
||||
|
||||
while (new Date().getTime() - startTime.getTime() < TIME_LIMIT_MINUTES * 60 * 1000) {
|
||||
const timestamp = new Date().toISOString();
|
||||
const args = [...limboSimArgs];
|
||||
@@ -160,13 +171,29 @@ while (new Date().getTime() - startTime.getTime() < TIME_LIMIT_MINUTES * 60 * 10
|
||||
args.push(...loop);
|
||||
|
||||
console.log(`[${timestamp}]: Running "limbo_sim ${args.join(" ")}" - (seed ${seed}, run number ${runNumber})`);
|
||||
await run(seed, "limbo_sim", args);
|
||||
const issuePosted = await run(seed, "limbo_sim", args);
|
||||
|
||||
if (issuePosted) {
|
||||
totalIssuesPosted++;
|
||||
}
|
||||
|
||||
runNumber++;
|
||||
|
||||
SLEEP_BETWEEN_RUNS_SECONDS > 0 && (await sleep(SLEEP_BETWEEN_RUNS_SECONDS));
|
||||
}
|
||||
|
||||
// Post summary to Slack after the run completes
|
||||
const endTime = new Date();
|
||||
const timeElapsed = Math.floor((endTime.getTime() - startTime.getTime()) / 1000);
|
||||
console.log(`\nRun completed! Total runs: ${runNumber}, Issues posted: ${totalIssuesPosted}, Time elapsed: ${timeElapsed}s`);
|
||||
|
||||
await slack.postRunSummary({
|
||||
totalRuns: runNumber,
|
||||
issuesPosted: totalIssuesPosted,
|
||||
timeElapsed,
|
||||
gitHash: github.GIT_HASH,
|
||||
});
|
||||
|
||||
async function sleep(sec: number) {
|
||||
return new Promise(resolve => setTimeout(resolve, sec * 1000));
|
||||
}
|
||||
|
||||
154
simulator-docker-runner/slack.ts
Normal file
154
simulator-docker-runner/slack.ts
Normal file
@@ -0,0 +1,154 @@
|
||||
export class SlackClient {
|
||||
private botToken: string;
|
||||
private channel: string;
|
||||
mode: 'real' | 'dry-run';
|
||||
|
||||
constructor() {
|
||||
this.botToken = process.env.SLACK_BOT_TOKEN || "";
|
||||
this.channel = process.env.SLACK_CHANNEL || "#simulator-results-fake";
|
||||
this.mode = this.botToken ? 'real' : 'dry-run';
|
||||
|
||||
if (this.mode === 'real') {
|
||||
if (this.channel === "#simulator-results-fake") {
|
||||
throw new Error("SLACK_CHANNEL must be set to a real channel when running in real mode");
|
||||
}
|
||||
} else {
|
||||
if (this.channel !== "#simulator-results-fake") {
|
||||
throw new Error("SLACK_CHANNEL must be set to #simulator-results-fake when running in dry-run mode");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async postRunSummary(stats: {
|
||||
totalRuns: number;
|
||||
issuesPosted: number;
|
||||
timeElapsed: number;
|
||||
gitHash: string;
|
||||
}): Promise<void> {
|
||||
const blocks = this.createSummaryBlocks(stats);
|
||||
const fallbackText = this.createFallbackText(stats);
|
||||
|
||||
if (this.mode === 'dry-run') {
|
||||
console.log(`Dry-run mode: Would post to Slack channel ${this.channel}`);
|
||||
console.log(`Fallback text: ${fallbackText}`);
|
||||
console.log(`Blocks: ${JSON.stringify(blocks, null, 2)}`);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch('https://slack.com/api/chat.postMessage', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${this.botToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
channel: this.channel,
|
||||
text: fallbackText,
|
||||
blocks: blocks,
|
||||
}),
|
||||
});
|
||||
|
||||
const result = await response.json();
|
||||
|
||||
if (!result.ok) {
|
||||
console.error(`Failed to post to Slack: ${result.error}`);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`Successfully posted summary to Slack channel ${this.channel}`);
|
||||
} catch (error) {
|
||||
console.error(`Error posting to Slack: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
private createFallbackText(stats: {
|
||||
totalRuns: number;
|
||||
issuesPosted: number;
|
||||
timeElapsed: number;
|
||||
gitHash: string;
|
||||
}): string {
|
||||
const { totalRuns, issuesPosted, timeElapsed, gitHash } = stats;
|
||||
const hours = Math.floor(timeElapsed / 3600);
|
||||
const minutes = Math.floor((timeElapsed % 3600) / 60);
|
||||
const seconds = Math.floor(timeElapsed % 60);
|
||||
const timeString = `${hours}h ${minutes}m ${seconds}s`;
|
||||
const gitShortHash = gitHash.substring(0, 7);
|
||||
|
||||
return `🤖 Turso Simulator Run Complete - ${totalRuns} runs, ${issuesPosted} issues posted, ${timeString} elapsed (${gitShortHash})`;
|
||||
}
|
||||
|
||||
private createSummaryBlocks(stats: {
|
||||
totalRuns: number;
|
||||
issuesPosted: number;
|
||||
timeElapsed: number;
|
||||
gitHash: string;
|
||||
}): any[] {
|
||||
const { totalRuns, issuesPosted, timeElapsed, gitHash } = stats;
|
||||
const hours = Math.floor(timeElapsed / 3600);
|
||||
const minutes = Math.floor((timeElapsed % 3600) / 60);
|
||||
const seconds = Math.floor(timeElapsed % 60);
|
||||
const timeString = `${hours}h ${minutes}m ${seconds}s`;
|
||||
|
||||
const statusEmoji = issuesPosted > 0 ? "🔴" : "✅";
|
||||
const statusText = issuesPosted > 0 ? `${issuesPosted} issues found` : "No issues found";
|
||||
const gitShortHash = gitHash.substring(0, 7);
|
||||
|
||||
return [
|
||||
{
|
||||
"type": "header",
|
||||
"text": {
|
||||
"type": "plain_text",
|
||||
"text": "🤖 Turso Simulator Run Complete"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": `${statusEmoji} *${statusText}*`
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "divider"
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"fields": [
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": `*Total runs:*\n${totalRuns}`
|
||||
},
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": `*Issues posted:*\n${issuesPosted}`
|
||||
},
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": `*Time elapsed:*\n${timeString}`
|
||||
},
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": `*Git hash:*\n\`${gitShortHash}\``
|
||||
},
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": `*See open issues:*\n<https://github.com/tursodatabase/turso/issues?q=is%3Aissue%20state%3Aopen%20simulator%20author%3Aapp%2Fturso-github-handyman|Open issues>`
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "divider"
|
||||
},
|
||||
{
|
||||
"type": "context",
|
||||
"elements": [
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": `Full git hash: \`${gitHash}\` | Timestamp: ${new Date().toISOString()}`
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
}
|
||||
}
|
||||
@@ -8,8 +8,6 @@ use std::{
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use tracing;
|
||||
|
||||
use turso_core::{Connection, Result, StepResult, IO};
|
||||
|
||||
use crate::{
|
||||
@@ -258,20 +256,26 @@ pub(crate) struct InteractionStats {
|
||||
pub(crate) create_count: usize,
|
||||
pub(crate) create_index_count: usize,
|
||||
pub(crate) drop_count: usize,
|
||||
pub(crate) begin_count: usize,
|
||||
pub(crate) commit_count: usize,
|
||||
pub(crate) rollback_count: usize,
|
||||
}
|
||||
|
||||
impl Display for InteractionStats {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Read: {}, Write: {}, Delete: {}, Update: {}, Create: {}, CreateIndex: {}, Drop: {}",
|
||||
"Read: {}, Write: {}, Delete: {}, Update: {}, Create: {}, CreateIndex: {}, Drop: {}, Begin: {}, Commit: {}, Rollback: {}",
|
||||
self.read_count,
|
||||
self.write_count,
|
||||
self.delete_count,
|
||||
self.update_count,
|
||||
self.create_count,
|
||||
self.create_index_count,
|
||||
self.drop_count
|
||||
self.drop_count,
|
||||
self.begin_count,
|
||||
self.commit_count,
|
||||
self.rollback_count,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -301,7 +305,7 @@ impl Display for Interaction {
|
||||
}
|
||||
}
|
||||
|
||||
type AssertionFunc = dyn Fn(&Vec<ResultSet>, &SimulatorEnv) -> Result<bool>;
|
||||
type AssertionFunc = dyn Fn(&Vec<ResultSet>, &mut SimulatorEnv) -> Result<bool>;
|
||||
|
||||
enum AssertionAST {
|
||||
Pick(),
|
||||
@@ -349,6 +353,9 @@ impl InteractionPlan {
|
||||
create_count: 0,
|
||||
create_index_count: 0,
|
||||
drop_count: 0,
|
||||
begin_count: 0,
|
||||
commit_count: 0,
|
||||
rollback_count: 0,
|
||||
};
|
||||
|
||||
fn query_stat(q: &Query, stats: &mut InteractionStats) {
|
||||
@@ -360,9 +367,11 @@ impl InteractionPlan {
|
||||
Query::Drop(_) => stats.drop_count += 1,
|
||||
Query::Update(_) => stats.update_count += 1,
|
||||
Query::CreateIndex(_) => stats.create_index_count += 1,
|
||||
Query::Begin(_) => stats.begin_count += 1,
|
||||
Query::Commit(_) => stats.commit_count += 1,
|
||||
Query::Rollback(_) => stats.rollback_count += 1,
|
||||
}
|
||||
}
|
||||
|
||||
for interactions in &self.plan {
|
||||
match interactions {
|
||||
Interactions::Property(property) => {
|
||||
@@ -458,7 +467,7 @@ impl Interaction {
|
||||
out.push(r);
|
||||
}
|
||||
StepResult::IO => {
|
||||
io.run_once().unwrap();
|
||||
rows.run_once().unwrap();
|
||||
}
|
||||
StepResult::Interrupt => {}
|
||||
StepResult::Done => {
|
||||
@@ -477,7 +486,7 @@ impl Interaction {
|
||||
pub(crate) fn execute_assertion(
|
||||
&self,
|
||||
stack: &Vec<ResultSet>,
|
||||
env: &SimulatorEnv,
|
||||
env: &mut SimulatorEnv,
|
||||
) -> Result<()> {
|
||||
match self {
|
||||
Self::Assertion(assertion) => {
|
||||
@@ -502,7 +511,7 @@ impl Interaction {
|
||||
pub(crate) fn execute_assumption(
|
||||
&self,
|
||||
stack: &Vec<ResultSet>,
|
||||
env: &SimulatorEnv,
|
||||
env: &mut SimulatorEnv,
|
||||
) -> Result<()> {
|
||||
match self {
|
||||
Self::Assumption(assumption) => {
|
||||
@@ -682,6 +691,7 @@ fn reopen_database(env: &mut SimulatorEnv) {
|
||||
env.connections.clear();
|
||||
|
||||
// Clear all open files
|
||||
// TODO: for correct reporting of faults we should get all the recorded numbers and transfer to the new file
|
||||
env.io.files.borrow_mut().clear();
|
||||
|
||||
// 2. Re-open database
|
||||
|
||||
@@ -10,10 +10,13 @@ use crate::{
|
||||
CompoundOperator, CompoundSelect, Distinctness, ResultColumn, SelectBody,
|
||||
SelectInner,
|
||||
},
|
||||
select::{Distinctness, ResultColumn},
|
||||
transaction::{Begin, Commit, Rollback},
|
||||
update::Update,
|
||||
Create, Delete, Drop, Insert, Query, Select,
|
||||
},
|
||||
table::SimValue,
|
||||
FAULT_ERROR_MSG,
|
||||
},
|
||||
runner::env::SimulatorEnv,
|
||||
};
|
||||
@@ -52,6 +55,8 @@ pub(crate) enum Property {
|
||||
queries: Vec<Query>,
|
||||
/// The select query
|
||||
select: Select,
|
||||
/// Interactive query information if any
|
||||
interactive: Option<InteractiveQueryInfo>,
|
||||
},
|
||||
/// Double Create Failure is a property in which creating
|
||||
/// the same table twice leads to an error.
|
||||
@@ -167,6 +172,12 @@ pub(crate) enum Property {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct InteractiveQueryInfo {
|
||||
start_with_immediate: bool,
|
||||
end_with_commit: bool,
|
||||
}
|
||||
|
||||
impl Property {
|
||||
pub(crate) fn name(&self) -> &str {
|
||||
match self {
|
||||
@@ -192,6 +203,7 @@ impl Property {
|
||||
row_index,
|
||||
queries,
|
||||
select,
|
||||
interactive,
|
||||
} => {
|
||||
let (table, values) = if let Insert::Values { table, values } = insert {
|
||||
(table, values)
|
||||
@@ -214,7 +226,7 @@ impl Property {
|
||||
message: format!("table {} exists", insert.table()),
|
||||
func: Box::new({
|
||||
let table_name = table.clone();
|
||||
move |_: &Vec<ResultSet>, env: &SimulatorEnv| {
|
||||
move |_: &Vec<ResultSet>, env: &mut SimulatorEnv| {
|
||||
Ok(env.tables.iter().any(|t| t.name == table_name))
|
||||
}
|
||||
}),
|
||||
@@ -222,14 +234,26 @@ impl Property {
|
||||
|
||||
let assertion = Interaction::Assertion(Assertion {
|
||||
message: format!(
|
||||
"row [{:?}] not found in table {}",
|
||||
"row [{:?}] not found in table {}, interactive={} commit={}, rollback={}",
|
||||
row.iter().map(|v| v.to_string()).collect::<Vec<String>>(),
|
||||
insert.table(),
|
||||
interactive.is_some(),
|
||||
interactive
|
||||
.as_ref()
|
||||
.map(|i| i.end_with_commit)
|
||||
.unwrap_or(false),
|
||||
interactive
|
||||
.as_ref()
|
||||
.map(|i| !i.end_with_commit)
|
||||
.unwrap_or(false),
|
||||
),
|
||||
func: Box::new(move |stack: &Vec<ResultSet>, _: &SimulatorEnv| {
|
||||
func: Box::new(move |stack: &Vec<ResultSet>, _| {
|
||||
let rows = stack.last().unwrap();
|
||||
match rows {
|
||||
Ok(rows) => Ok(rows.iter().any(|r| r == &row)),
|
||||
Ok(rows) => {
|
||||
let found = rows.iter().any(|r| r == &row);
|
||||
Ok(found)
|
||||
}
|
||||
Err(err) => Err(LimboError::InternalError(err.to_string())),
|
||||
}
|
||||
}),
|
||||
@@ -250,7 +274,7 @@ impl Property {
|
||||
let assumption = Interaction::Assumption(Assertion {
|
||||
message: "Double-Create-Failure should not be called on an existing table"
|
||||
.to_string(),
|
||||
func: Box::new(move |_: &Vec<ResultSet>, env: &SimulatorEnv| {
|
||||
func: Box::new(move |_: &Vec<ResultSet>, env: &mut SimulatorEnv| {
|
||||
Ok(!env.tables.iter().any(|t| t.name == table_name))
|
||||
}),
|
||||
});
|
||||
@@ -308,7 +332,7 @@ impl Property {
|
||||
|
||||
let assertion = Interaction::Assertion(Assertion {
|
||||
message: "select query should respect the limit clause".to_string(),
|
||||
func: Box::new(move |stack: &Vec<ResultSet>, _: &SimulatorEnv| {
|
||||
func: Box::new(move |stack: &Vec<ResultSet>, _| {
|
||||
let last = stack.last().unwrap();
|
||||
match last {
|
||||
Ok(rows) => Ok(limit >= rows.len()),
|
||||
@@ -332,7 +356,7 @@ impl Property {
|
||||
message: format!("table {table} exists"),
|
||||
func: Box::new({
|
||||
let table = table.clone();
|
||||
move |_: &Vec<ResultSet>, env: &SimulatorEnv| {
|
||||
move |_: &Vec<ResultSet>, env: &mut SimulatorEnv| {
|
||||
Ok(env.tables.iter().any(|t| t.name == table))
|
||||
}
|
||||
}),
|
||||
@@ -377,7 +401,7 @@ impl Property {
|
||||
message: format!("table {table} exists"),
|
||||
func: Box::new({
|
||||
let table = table.clone();
|
||||
move |_: &Vec<ResultSet>, env: &SimulatorEnv| {
|
||||
move |_, env: &mut SimulatorEnv| {
|
||||
Ok(env.tables.iter().any(|t| t.name == table))
|
||||
}
|
||||
}),
|
||||
@@ -419,7 +443,7 @@ impl Property {
|
||||
message: format!("table {table} exists"),
|
||||
func: Box::new({
|
||||
let table = table.clone();
|
||||
move |_: &Vec<ResultSet>, env: &SimulatorEnv| {
|
||||
move |_: &Vec<ResultSet>, env: &mut SimulatorEnv| {
|
||||
Ok(env.tables.iter().any(|t| t.name == table))
|
||||
}
|
||||
}),
|
||||
@@ -439,7 +463,7 @@ impl Property {
|
||||
|
||||
let assertion = Interaction::Assertion(Assertion {
|
||||
message: "select queries should return the same amount of results".to_string(),
|
||||
func: Box::new(move |stack: &Vec<ResultSet>, _: &SimulatorEnv| {
|
||||
func: Box::new(move |stack: &Vec<ResultSet>, _| {
|
||||
let select_star = stack.last().unwrap();
|
||||
let select_predicate = stack.get(stack.len() - 2).unwrap();
|
||||
match (select_predicate, select_star) {
|
||||
@@ -487,7 +511,35 @@ impl Property {
|
||||
}
|
||||
Property::FaultyQuery { query, tables } => {
|
||||
let checks = assert_all_table_values(tables);
|
||||
let first = std::iter::once(Interaction::FaultyQuery(query.clone()));
|
||||
let query_clone = query.clone();
|
||||
let assumption = Assertion {
|
||||
// A fault may not occur as we first signal we want a fault injected,
|
||||
// then when IO is called the fault triggers. It may happen that a fault is injected
|
||||
// but no IO happens right after it
|
||||
message: "fault occured".to_string(),
|
||||
func: Box::new(move |stack, env| {
|
||||
let last = stack.last().unwrap();
|
||||
match last {
|
||||
Ok(_) => {
|
||||
query_clone.shadow(env);
|
||||
Ok(true)
|
||||
}
|
||||
Err(err) => {
|
||||
let msg = format!("{}", err);
|
||||
if msg.contains(FAULT_ERROR_MSG) {
|
||||
Ok(true)
|
||||
} else {
|
||||
Err(LimboError::InternalError(msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
}),
|
||||
};
|
||||
let first = [
|
||||
Interaction::FaultyQuery(query.clone()),
|
||||
Interaction::Assumption(assumption),
|
||||
]
|
||||
.into_iter();
|
||||
Vec::from_iter(first.chain(checks))
|
||||
}
|
||||
Property::WhereTrueFalseNull { select, predicate } => {
|
||||
@@ -673,9 +725,11 @@ fn assert_all_table_values(tables: &[String]) -> impl Iterator<Item = Interactio
|
||||
),
|
||||
func: Box::new({
|
||||
let table = table.clone();
|
||||
move |stack: &Vec<ResultSet>, env: &SimulatorEnv| {
|
||||
move |stack: &Vec<ResultSet>, env: &mut SimulatorEnv| {
|
||||
let table = env.tables.iter().find(|t| t.name == table).ok_or_else(|| {
|
||||
LimboError::InternalError(format!("table {table} should exist"))
|
||||
LimboError::InternalError(format!(
|
||||
"table {table} should exist in simulator env",
|
||||
))
|
||||
})?;
|
||||
let last = stack.last().unwrap();
|
||||
match last {
|
||||
@@ -760,12 +814,26 @@ fn property_insert_values_select<R: rand::Rng>(
|
||||
values: rows,
|
||||
};
|
||||
|
||||
// Choose if we want queries to be executed in an interactive transaction
|
||||
let interactive = if rng.gen_bool(0.5) {
|
||||
Some(InteractiveQueryInfo {
|
||||
start_with_immediate: rng.gen_bool(0.5),
|
||||
end_with_commit: rng.gen_bool(0.5),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
// Create random queries respecting the constraints
|
||||
let mut queries = Vec::new();
|
||||
// - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort)
|
||||
// - [x] The inserted row will not be deleted.
|
||||
// - [x] The inserted row will not be updated.
|
||||
// - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented)
|
||||
if let Some(ref interactive) = interactive {
|
||||
queries.push(Query::Begin(Begin {
|
||||
immediate: interactive.start_with_immediate,
|
||||
}));
|
||||
}
|
||||
for _ in 0..rng.gen_range(0..3) {
|
||||
let query = Query::arbitrary_from(rng, (env, remaining));
|
||||
match &query {
|
||||
@@ -799,6 +867,13 @@ fn property_insert_values_select<R: rand::Rng>(
|
||||
}
|
||||
queries.push(query);
|
||||
}
|
||||
if let Some(ref interactive) = interactive {
|
||||
queries.push(if interactive.end_with_commit {
|
||||
Query::Commit(Commit)
|
||||
} else {
|
||||
Query::Rollback(Rollback)
|
||||
});
|
||||
}
|
||||
|
||||
// Select the row
|
||||
let select_query = Select::simple(
|
||||
@@ -811,6 +886,7 @@ fn property_insert_values_select<R: rand::Rng>(
|
||||
row_index,
|
||||
queries,
|
||||
select: select_query,
|
||||
interactive,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,2 +1,4 @@
|
||||
pub mod query;
|
||||
pub mod table;
|
||||
|
||||
pub(crate) const FAULT_ERROR_MSG: &str = "Injected fault";
|
||||
|
||||
@@ -13,6 +13,11 @@ use update::Update;
|
||||
use crate::{
|
||||
generation::Shadow,
|
||||
model::table::{SimValue, Table},
|
||||
model::{
|
||||
query::transaction::{Begin, Commit, Rollback},
|
||||
table::SimValue,
|
||||
},
|
||||
runner::env::SimulatorEnv,
|
||||
};
|
||||
|
||||
pub mod create;
|
||||
@@ -22,6 +27,7 @@ pub mod drop;
|
||||
pub mod insert;
|
||||
pub mod predicate;
|
||||
pub mod select;
|
||||
pub mod transaction;
|
||||
pub mod update;
|
||||
|
||||
// This type represents the potential queries on the database.
|
||||
@@ -34,6 +40,9 @@ pub(crate) enum Query {
|
||||
Update(Update),
|
||||
Drop(Drop),
|
||||
CreateIndex(CreateIndex),
|
||||
Begin(Begin),
|
||||
Commit(Commit),
|
||||
Rollback(Rollback),
|
||||
}
|
||||
|
||||
impl Query {
|
||||
@@ -49,6 +58,7 @@ impl Query {
|
||||
Query::CreateIndex(CreateIndex { table_name, .. }) => {
|
||||
HashSet::from_iter([table_name.clone()])
|
||||
}
|
||||
Query::Begin(_) | Query::Commit(_) | Query::Rollback(_) => HashSet::new(),
|
||||
}
|
||||
}
|
||||
pub(crate) fn uses(&self) -> Vec<String> {
|
||||
@@ -61,6 +71,7 @@ impl Query {
|
||||
| Query::Update(Update { table, .. })
|
||||
| Query::Drop(Drop { table, .. }) => vec![table.clone()],
|
||||
Query::CreateIndex(CreateIndex { table_name, .. }) => vec![table_name.clone()],
|
||||
Query::Begin(..) | Query::Commit(..) | Query::Rollback(..) => vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -77,6 +88,9 @@ impl Shadow for Query {
|
||||
Query::Update(update) => update.shadow(env),
|
||||
Query::Drop(drop) => drop.shadow(env),
|
||||
Query::CreateIndex(create_index) => Ok(create_index.shadow(env)),
|
||||
Query::Begin(begin) => begin.shadow(env),
|
||||
Query::Commit(commit) => commit.shadow(env),
|
||||
Query::Rollback(rollback) => rollback.shadow(env),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -91,6 +105,9 @@ impl Display for Query {
|
||||
Self::Update(update) => write!(f, "{update}"),
|
||||
Self::Drop(drop) => write!(f, "{drop}"),
|
||||
Self::CreateIndex(create_index) => write!(f, "{create_index}"),
|
||||
Self::Begin(begin) => write!(f, "{begin}"),
|
||||
Self::Commit(commit) => write!(f, "{commit}"),
|
||||
Self::Rollback(rollback) => write!(f, "{rollback}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
57
simulator/model/query/transaction.rs
Normal file
57
simulator/model/query/transaction.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
use std::fmt::Display;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{model::table::SimValue, runner::env::SimulatorEnv};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub(crate) struct Begin {
|
||||
pub(crate) immediate: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub(crate) struct Commit;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub(crate) struct Rollback;
|
||||
|
||||
impl Begin {
|
||||
pub(crate) fn shadow(&self, env: &mut SimulatorEnv) -> Vec<Vec<SimValue>> {
|
||||
env.tables_snapshot = Some(env.tables.clone());
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
impl Commit {
|
||||
pub(crate) fn shadow(&self, env: &mut SimulatorEnv) -> Vec<Vec<SimValue>> {
|
||||
env.tables_snapshot = None;
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
impl Rollback {
|
||||
pub(crate) fn shadow(&self, env: &mut SimulatorEnv) -> Vec<Vec<SimValue>> {
|
||||
if let Some(tables) = env.tables_snapshot.take() {
|
||||
env.tables = tables;
|
||||
}
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Begin {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "BEGIN {}", if self.immediate { "IMMEDIATE" } else { "" })
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Commit {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "COMMIT")
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Rollback {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "ROLLBACK")
|
||||
}
|
||||
}
|
||||
@@ -58,7 +58,7 @@ pub struct SimulatorCLI {
|
||||
pub disable_delete: bool,
|
||||
#[clap(long, help = "disable CREATE Statement", default_value_t = false)]
|
||||
pub disable_create: bool,
|
||||
#[clap(long, help = "disable CREATE INDEX Statement", default_value_t = false)]
|
||||
#[clap(long, help = "disable CREATE INDEX Statement", default_value_t = true)]
|
||||
pub disable_create_index: bool,
|
||||
#[clap(long, help = "disable DROP Statement", default_value_t = false)]
|
||||
pub disable_drop: bool,
|
||||
@@ -100,7 +100,7 @@ pub struct SimulatorCLI {
|
||||
pub disable_union_all_preserves_cardinality: bool,
|
||||
#[clap(long, help = "disable FsyncNoWait Property", default_value_t = true)]
|
||||
pub disable_fsync_no_wait: bool,
|
||||
#[clap(long, help = "disable FaultyQuery Property", default_value_t = true)]
|
||||
#[clap(long, help = "disable FaultyQuery Property", default_value_t = false)]
|
||||
pub disable_faulty_query: bool,
|
||||
#[clap(long, help = "disable Reopen-Database fault", default_value_t = false)]
|
||||
pub disable_reopen_database: bool,
|
||||
@@ -110,6 +110,10 @@ pub struct SimulatorCLI {
|
||||
default_value_t = 0
|
||||
)]
|
||||
pub latency_probability: usize,
|
||||
#[clap(long, help = "Enable experimental MVCC feature")]
|
||||
pub experimental_mvcc: bool,
|
||||
#[clap(long, help = "Enable experimental indexing feature")]
|
||||
pub experimental_indexes: bool,
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug, Clone, Serialize, Deserialize, PartialEq, PartialOrd, Eq, Ord)]
|
||||
|
||||
@@ -111,6 +111,18 @@ fn execute_query_rusqlite(
|
||||
connection.execute(create_index.to_string().as_str(), ())?;
|
||||
Ok(vec![])
|
||||
}
|
||||
Query::Begin(begin) => {
|
||||
connection.execute(begin.to_string().as_str(), ())?;
|
||||
Ok(vec![])
|
||||
}
|
||||
Query::Commit(commit) => {
|
||||
connection.execute(commit.to_string().as_str(), ())?;
|
||||
Ok(vec![])
|
||||
}
|
||||
Query::Rollback(rollback) => {
|
||||
connection.execute(rollback.to_string().as_str(), ())?;
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ pub(crate) struct SimulatorEnv {
|
||||
pub(crate) paths: Paths,
|
||||
pub(crate) type_: SimulationType,
|
||||
pub(crate) phase: SimulationPhase,
|
||||
pub tables_snapshot: Option<Vec<Table>>,
|
||||
}
|
||||
|
||||
impl UnwindSafe for SimulatorEnv {}
|
||||
@@ -55,6 +56,7 @@ impl SimulatorEnv {
|
||||
paths: self.paths.clone(),
|
||||
type_: self.type_,
|
||||
phase: self.phase,
|
||||
tables_snapshot: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,6 +209,8 @@ impl SimulatorEnv {
|
||||
max_time_simulation: cli_opts.maximum_time,
|
||||
disable_reopen_database: cli_opts.disable_reopen_database,
|
||||
latency_probability: cli_opts.latency_probability,
|
||||
experimental_mvcc: cli_opts.experimental_mvcc,
|
||||
experimental_indexes: cli_opts.experimental_indexes,
|
||||
};
|
||||
|
||||
let io =
|
||||
@@ -224,7 +228,12 @@ impl SimulatorEnv {
|
||||
std::fs::remove_file(&wal_path).unwrap();
|
||||
}
|
||||
|
||||
let db = match Database::open_file(io.clone(), db_path.to_str().unwrap(), false, true) {
|
||||
let db = match Database::open_file(
|
||||
io.clone(),
|
||||
db_path.to_str().unwrap(),
|
||||
opts.experimental_mvcc,
|
||||
opts.experimental_indexes,
|
||||
) {
|
||||
Ok(db) => db,
|
||||
Err(e) => {
|
||||
panic!("error opening simulator test file {db_path:?}: {e:?}");
|
||||
@@ -245,6 +254,7 @@ impl SimulatorEnv {
|
||||
db,
|
||||
type_: simulation_type,
|
||||
phase: SimulationPhase::Test,
|
||||
tables_snapshot: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -362,6 +372,8 @@ pub(crate) struct SimulatorOpts {
|
||||
pub(crate) page_size: usize,
|
||||
pub(crate) max_time_simulation: usize,
|
||||
pub(crate) latency_probability: usize,
|
||||
pub(crate) experimental_mvcc: bool,
|
||||
pub(crate) experimental_indexes: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user