diff --git a/.github/workflows/napi-sync.yml b/.github/workflows/napi-sync.yml index edce61b96..7f854cb75 100644 --- a/.github/workflows/napi-sync.yml +++ b/.github/workflows/napi-sync.yml @@ -78,7 +78,7 @@ jobs: .cargo-cache target/ key: ${{ matrix.settings.target }}-cargo-${{ matrix.settings.host }} - - uses: goto-bus-stop/setup-zig@v2 + - uses: mlugg/setup-zig@v2 if: ${{ matrix.settings.target == 'armv7-unknown-linux-gnueabihf' || matrix.settings.target == 'armv7-unknown-linux-musleabihf' }} with: version: 0.13.0 @@ -175,11 +175,13 @@ jobs: if git log -1 --pretty=%B | grep "^Turso [0-9]\+\.[0-9]\+\.[0-9]\+$"; then echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc - npm publish --access public + make publish-native + make publish-browser elif git log -1 --pretty=%B | grep "^Turso [0-9]\+\.[0-9]\+\.[0-9]\+"; then echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc - npm publish --tag next --access public + make publish-native-next + make publish-browser-next else echo "Not a release, skipping publish" fi diff --git a/.github/workflows/napi.yml b/.github/workflows/napi.yml index 1095f99d5..c53237552 100644 --- a/.github/workflows/napi.yml +++ b/.github/workflows/napi.yml @@ -19,6 +19,10 @@ defaults: run: working-directory: bindings/javascript +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + jobs: build: timeout-minutes: 20 @@ -27,20 +31,18 @@ jobs: matrix: settings: - host: windows-latest - build: | - yarn build --target x86_64-pc-windows-msvc - yarn test target: x86_64-pc-windows-msvc + build: yarn workspace @tursodatabase/database napi-build --target x86_64-pc-windows-msvc - host: ubuntu-latest target: x86_64-unknown-linux-gnu docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-debian - build: yarn build --target x86_64-unknown-linux-gnu + build: yarn workspace @tursodatabase/database napi-build --target x86_64-unknown-linux-gnu - host: macos-latest target: aarch64-apple-darwin - build: yarn build --target aarch64-apple-darwin + build: yarn workspace @tursodatabase/database napi-build --target aarch64-apple-darwin - host: blacksmith-2vcpu-ubuntu-2404-arm target: aarch64-unknown-linux-gnu - build: yarn build --target aarch64-unknown-linux-gnu + build: yarn workspace @tursodatabase/database napi-build --target aarch64-unknown-linux-gnu - host: ubuntu-latest target: wasm32-wasip1-threads setup: | @@ -52,7 +54,7 @@ jobs: export CMAKE_BUILD_PARALLEL_LEVEL=$(nproc) export TARGET_CXXFLAGS="--target=wasm32-wasi-threads --sysroot=$(pwd)/wasi-sdk-25.0-x86_64-linux/share/wasi-sysroot -pthread -mllvm -wasm-enable-sjlj -lsetjmp" export TARGET_CFLAGS="$TARGET_CXXFLAGS" - yarn build --target wasm32-wasip1-threads + yarn workspace @tursodatabase/database-browser build name: stable - ${{ matrix.settings.target }} - node@20 runs-on: ${{ matrix.settings.host }} steps: @@ -78,7 +80,7 @@ jobs: .cargo-cache target/ key: ${{ matrix.settings.target }}-cargo-${{ matrix.settings.host }} - - uses: goto-bus-stop/setup-zig@v2 + - uses: mlugg/setup-zig@v2 if: ${{ matrix.settings.target == 'armv7-unknown-linux-gnueabihf' || matrix.settings.target == 'armv7-unknown-linux-musleabihf' }} with: version: 0.13.0 @@ -88,6 +90,8 @@ jobs: shell: bash - name: Install dependencies run: yarn install + - name: Build common + run: yarn workspace @tursodatabase/database-common build - name: Setup node x86 uses: actions/setup-node@v4 if: matrix.settings.target == 'x86_64-pc-windows-msvc' @@ -110,8 +114,8 @@ jobs: with: name: bindings-${{ matrix.settings.target }} path: | - bindings/javascript/${{ env.APP_NAME }}.*.node - bindings/javascript/${{ env.APP_NAME }}.*.wasm + bindings/javascript/packages/native/${{ env.APP_NAME }}.*.node + bindings/javascript/packages/browser/${{ env.APP_NAME }}.*.wasm if-no-files-found: error test-linux-x64-gnu-binding: name: Test bindings on Linux-x64-gnu - node@${{ matrix.node }} @@ -131,20 +135,21 @@ jobs: node-version: ${{ matrix.node }} - name: Install dependencies run: yarn install - - name: Download artifacts + - name: Build common + run: yarn workspace @tursodatabase/database-common build + - name: Download all artifacts uses: actions/download-artifact@v4 with: - name: bindings-x86_64-unknown-linux-gnu - path: bindings/javascript + path: bindings/javascript/packages + merge-multiple: true - name: List packages run: ls -R . shell: bash - name: Test bindings - run: docker run --rm -v $(pwd):/build -w /build node:${{ matrix.node }}-slim yarn test + run: docker run --rm -v $(pwd):/build -w /build node:${{ matrix.node }}-slim yarn workspace @tursodatabase/database test publish: name: Publish runs-on: ubuntu-latest - if: startsWith(github.ref, 'refs/tags/v') permissions: contents: read id-token: write @@ -156,33 +161,35 @@ jobs: uses: useblacksmith/setup-node@v5 with: node-version: 20 - - name: Install dependencies - run: yarn install - - name: create npm dirs - run: yarn napi create-npm-dirs - name: Download all artifacts uses: actions/download-artifact@v4 with: - path: bindings/javascript/artifacts - - name: Move artifacts - run: yarn artifacts - - name: List packages - run: ls -R ./npm - shell: bash + path: bindings/javascript/packages + merge-multiple: true + - name: Install dependencies + run: yarn install + - name: Install dependencies + run: yarn tsc-build - name: Publish + if: "startsWith(github.ref, 'refs/tags/v')" run: | npm config set provenance true if git log -1 --pretty=%B | grep "^Turso [0-9]\+\.[0-9]\+\.[0-9]\+$"; then echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc - npm publish --access public + npm publish --workspaces --access public elif git log -1 --pretty=%B | grep "^Turso [0-9]\+\.[0-9]\+\.[0-9]\+"; then echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc - npm publish --tag next --access public + npm publish --workspaces --access public --tag next else - echo "Not a release, skipping publish" + echo "git log structure is unexpected, skip publishing" + npm publish --workspaces --dry-run fi env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} NPM_TOKEN: ${{ secrets.NPM_TOKEN }} + - name: Publish (dry-run) + if: "!startsWith(github.ref, 'refs/tags/v')" + run: | + npm publish --workspaces --dry-run \ No newline at end of file diff --git a/.gitignore b/.gitignore index 896698406..b851e8025 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ *.so *.ipynb +*.o # Python .mypy_cache/ @@ -41,3 +42,5 @@ simulator.log **/*.txt profile.json.gz simulator-output/ + +&1 diff --git a/COMPAT.md b/COMPAT.md index 879b022f8..ebbb3f53b 100644 --- a/COMPAT.md +++ b/COMPAT.md @@ -393,7 +393,7 @@ Modifiers: | jsonb_group_array(value) | Yes | | | json_group_object(label,value) | Yes | | | jsonb_group_object(name,value) | Yes | | -| json_each(json) | | | +| json_each(json) | Yes | | | json_each(json,path) | | | | json_tree(json) | | | | json_tree(json,path) | | | diff --git a/Cargo.lock b/Cargo.lock index 93f8cd35f..17c0a084d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -667,7 +667,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_tester" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "anyhow", "assert_cmd", @@ -1800,9 +1800,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -1822,7 +1822,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.10.0", + "indexmap 2.11.0", "is-terminal", "itoa", "log", @@ -2126,7 +2126,7 @@ dependencies = [ [[package]] name = "limbo_completion" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "mimalloc", "turso_ext", @@ -2134,7 +2134,7 @@ dependencies = [ [[package]] name = "limbo_crypto" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "blake3", "data-encoding", @@ -2147,7 +2147,7 @@ dependencies = [ [[package]] name = "limbo_csv" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "csv", "mimalloc", @@ -2157,7 +2157,7 @@ dependencies = [ [[package]] name = "limbo_ipaddr" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "ipnetwork", "mimalloc", @@ -2166,7 +2166,7 @@ dependencies = [ [[package]] name = "limbo_percentile" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "mimalloc", "turso_ext", @@ -2174,7 +2174,7 @@ dependencies = [ [[package]] name = "limbo_regexp" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "mimalloc", "regex", @@ -2183,7 +2183,7 @@ dependencies = [ [[package]] name = "limbo_sim" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "anyhow", "chrono", @@ -2192,7 +2192,7 @@ dependencies = [ "env_logger 0.10.2", "garde", "hex", - "indexmap 2.10.0", + "indexmap 2.11.0", "itertools 0.14.0", "json5", "log", @@ -2216,7 +2216,7 @@ dependencies = [ [[package]] name = "limbo_sqlite_test_ext" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "cc", ] @@ -2774,7 +2774,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eac26e981c03a6e53e0aee43c113e3202f5581d5360dae7bd2c70e800dd0451d" dependencies = [ "base64", - "indexmap 2.10.0", + "indexmap 2.11.0", "quick-xml 0.32.0", "serde", "time", @@ -2971,7 +2971,7 @@ dependencies = [ [[package]] name = "py-turso" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "anyhow", "pyo3", @@ -3666,12 +3666,13 @@ checksum = "d372029cb5195f9ab4e4b9aef550787dce78b124fcaee8d82519925defcd6f0d" [[package]] name = "sql_generation" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "anarchist-readable-name-generator-lib 0.2.0", "anyhow", "garde", "hex", + "indexmap 2.11.0", "itertools 0.14.0", "rand 0.9.2", "rand_chacha 0.9.0", @@ -4064,7 +4065,7 @@ version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" dependencies = [ - "indexmap 2.10.0", + "indexmap 2.11.0", "serde", "serde_spanned", "toml_datetime", @@ -4086,7 +4087,7 @@ version = "0.22.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" dependencies = [ - "indexmap 2.10.0", + "indexmap 2.11.0", "serde", "serde_spanned", "toml_datetime", @@ -4175,7 +4176,7 @@ dependencies = [ [[package]] name = "turso" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", @@ -4187,7 +4188,7 @@ dependencies = [ [[package]] name = "turso-java" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "jni", "thiserror 2.0.12", @@ -4196,7 +4197,7 @@ dependencies = [ [[package]] name = "turso_cli" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "anyhow", "cfg-if", @@ -4229,7 +4230,7 @@ dependencies = [ [[package]] name = "turso_core" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "aegis", "aes", @@ -4288,7 +4289,7 @@ dependencies = [ [[package]] name = "turso_dart" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "flutter_rust_bridge", "turso_core", @@ -4296,7 +4297,7 @@ dependencies = [ [[package]] name = "turso_ext" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "chrono", "getrandom 0.3.2", @@ -4305,7 +4306,7 @@ dependencies = [ [[package]] name = "turso_ext_tests" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "env_logger 0.11.7", "lazy_static", @@ -4316,7 +4317,7 @@ dependencies = [ [[package]] name = "turso_macros" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "proc-macro2", "quote", @@ -4325,18 +4326,19 @@ dependencies = [ [[package]] name = "turso_node" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "napi", "napi-build", "napi-derive", + "tracing", "tracing-subscriber", "turso_core", ] [[package]] name = "turso_parser" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "bitflags 2.9.0", "criterion", @@ -4352,7 +4354,7 @@ dependencies = [ [[package]] name = "turso_sqlite3" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "env_logger 0.11.7", "libc", @@ -4365,13 +4367,13 @@ dependencies = [ [[package]] name = "turso_sqlite3_parser" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "bitflags 2.9.0", "cc", "env_logger 0.11.7", "fallible-iterator", - "indexmap 2.10.0", + "indexmap 2.11.0", "log", "memchr", "miette", @@ -4383,7 +4385,7 @@ dependencies = [ [[package]] name = "turso_stress" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "anarchist-readable-name-generator-lib 0.1.2", "antithesis_sdk", @@ -4399,7 +4401,7 @@ dependencies = [ [[package]] name = "turso_sync_engine" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "base64", "bytes", @@ -4425,7 +4427,7 @@ dependencies = [ [[package]] name = "turso_sync_js" -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" dependencies = [ "genawaiter", "http", diff --git a/Cargo.toml b/Cargo.toml index 61027dd19..e74c036b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,29 +33,29 @@ members = [ exclude = ["perf/latency/limbo"] [workspace.package] -version = "0.1.5-pre.2" +version = "0.1.5-pre.4" authors = ["the Limbo authors"] edition = "2021" license = "MIT" repository = "https://github.com/tursodatabase/turso" [workspace.dependencies] -turso = { path = "bindings/rust", version = "0.1.5-pre.2" } -turso_node = { path = "bindings/javascript", version = "0.1.5-pre.2" } -limbo_completion = { path = "extensions/completion", version = "0.1.5-pre.2" } -turso_core = { path = "core", version = "0.1.5-pre.2" } -turso_sync_engine = { path = "sync/engine", version = "0.1.5-pre.2" } -limbo_crypto = { path = "extensions/crypto", version = "0.1.5-pre.2" } -limbo_csv = { path = "extensions/csv", version = "0.1.5-pre.2" } -turso_ext = { path = "extensions/core", version = "0.1.5-pre.2" } -turso_ext_tests = { path = "extensions/tests", version = "0.1.5-pre.2" } -limbo_ipaddr = { path = "extensions/ipaddr", version = "0.1.5-pre.2" } -turso_macros = { path = "macros", version = "0.1.5-pre.2" } -limbo_percentile = { path = "extensions/percentile", version = "0.1.5-pre.2" } -limbo_regexp = { path = "extensions/regexp", version = "0.1.5-pre.2" } -turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.1.5-pre.2" } -limbo_uuid = { path = "extensions/uuid", version = "0.1.5-pre.2" } -turso_parser = { path = "parser", version = "0.1.5-pre.2" } +turso = { path = "bindings/rust", version = "0.1.5-pre.4" } +turso_node = { path = "bindings/javascript", version = "0.1.5-pre.4" } +limbo_completion = { path = "extensions/completion", version = "0.1.5-pre.4" } +turso_core = { path = "core", version = "0.1.5-pre.4" } +turso_sync_engine = { path = "sync/engine", version = "0.1.5-pre.4" } +limbo_crypto = { path = "extensions/crypto", version = "0.1.5-pre.4" } +limbo_csv = { path = "extensions/csv", version = "0.1.5-pre.4" } +turso_ext = { path = "extensions/core", version = "0.1.5-pre.4" } +turso_ext_tests = { path = "extensions/tests", version = "0.1.5-pre.4" } +limbo_ipaddr = { path = "extensions/ipaddr", version = "0.1.5-pre.4" } +turso_macros = { path = "macros", version = "0.1.5-pre.4" } +limbo_percentile = { path = "extensions/percentile", version = "0.1.5-pre.4" } +limbo_regexp = { path = "extensions/regexp", version = "0.1.5-pre.4" } +turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.1.5-pre.4" } +limbo_uuid = { path = "extensions/uuid", version = "0.1.5-pre.4" } +turso_parser = { path = "parser", version = "0.1.5-pre.4" } sql_generation = { path = "sql_generation" } strum = { version = "0.26", features = ["derive"] } strum_macros = "0.26" diff --git a/Dockerfile.antithesis b/Dockerfile.antithesis index 3c4a1b2f9..05184d727 100644 --- a/Dockerfile.antithesis +++ b/Dockerfile.antithesis @@ -110,6 +110,7 @@ WORKDIR /app COPY ./antithesis-tests/bank-test/*.py /opt/antithesis/test/v1/bank-test/ COPY ./antithesis-tests/stress-composer/*.py /opt/antithesis/test/v1/stress-composer/ COPY ./antithesis-tests/stress /opt/antithesis/test/v1/stress +COPY ./antithesis-tests/stress-io_uring /opt/antithesis/test/v1/stress-io_uring COPY ./antithesis-tests/stress-unreliable /opt/antithesis/test/v1/stress-unreliable RUN chmod 777 -R /opt/antithesis/test/v1 diff --git a/antithesis-tests/stress-composer/first_setup.py b/antithesis-tests/stress-composer/first_setup.py index 74f2a9eb2..cc3b6d790 100755 --- a/antithesis-tests/stress-composer/first_setup.py +++ b/antithesis-tests/stress-composer/first_setup.py @@ -35,7 +35,7 @@ cur_init.execute("CREATE TABLE schemas (schema TEXT, tbl INT)") cur_init.execute("CREATE TABLE indexes (idx_name TEXT, tbl_name TEXT, idx_type TEXT, cols TEXT)") try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Error connecting to database: {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_alter_table.py b/antithesis-tests/stress-composer/parallel_driver_alter_table.py index 82afd351b..be0551e41 100755 --- a/antithesis-tests/stress-composer/parallel_driver_alter_table.py +++ b/antithesis-tests/stress-composer/parallel_driver_alter_table.py @@ -29,7 +29,7 @@ print(f"Selected table: tbl_{selected_tbl} with {tbl_schema['colCount']} columns # Connect to the main database try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_create_index.py b/antithesis-tests/stress-composer/parallel_driver_create_index.py index 6016eab3c..e384dcb2d 100755 --- a/antithesis-tests/stress-composer/parallel_driver_create_index.py +++ b/antithesis-tests/stress-composer/parallel_driver_create_index.py @@ -29,7 +29,7 @@ tbl_schema = json.loads(schema_json) # Connect to the main database try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_create_table.py b/antithesis-tests/stress-composer/parallel_driver_create_table.py index 9988413ed..5d786d2da 100755 --- a/antithesis-tests/stress-composer/parallel_driver_create_table.py +++ b/antithesis-tests/stress-composer/parallel_driver_create_table.py @@ -18,7 +18,7 @@ cur_init = con_init.cursor() # Connect to the main database try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_delete.py b/antithesis-tests/stress-composer/parallel_driver_delete.py index d9f6f61a7..226a823c5 100755 --- a/antithesis-tests/stress-composer/parallel_driver_delete.py +++ b/antithesis-tests/stress-composer/parallel_driver_delete.py @@ -32,7 +32,7 @@ pk = tbl_schema["pk"] cols = [f"col_{col}" for col in range(tbl_schema["colCount"]) if col != pk] try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_drop_index.py b/antithesis-tests/stress-composer/parallel_driver_drop_index.py index 17e42a1e3..2033b4e5e 100755 --- a/antithesis-tests/stress-composer/parallel_driver_drop_index.py +++ b/antithesis-tests/stress-composer/parallel_driver_drop_index.py @@ -15,7 +15,7 @@ cur_init = con_init.cursor() # Connect to the main database try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_drop_table.py b/antithesis-tests/stress-composer/parallel_driver_drop_table.py index 78c02a11c..d065d7442 100755 --- a/antithesis-tests/stress-composer/parallel_driver_drop_table.py +++ b/antithesis-tests/stress-composer/parallel_driver_drop_table.py @@ -24,7 +24,7 @@ selected_idx = get_random() % len(existing_schemas) selected_tbl = existing_schemas[selected_idx][0] try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_insert.py b/antithesis-tests/stress-composer/parallel_driver_insert.py index 8a50e1460..ddf7c96cc 100755 --- a/antithesis-tests/stress-composer/parallel_driver_insert.py +++ b/antithesis-tests/stress-composer/parallel_driver_insert.py @@ -28,7 +28,7 @@ tbl_schema = json.loads(schema_json) cols = ", ".join([f"col_{col}" for col in range(tbl_schema["colCount"])]) try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_integritycheck.py b/antithesis-tests/stress-composer/parallel_driver_integritycheck.py index dde4f0f2b..5b85388a4 100755 --- a/antithesis-tests/stress-composer/parallel_driver_integritycheck.py +++ b/antithesis-tests/stress-composer/parallel_driver_integritycheck.py @@ -28,7 +28,7 @@ tbl_schema = json.loads(schema_json) cols = ", ".join([f"col_{col}" for col in range(tbl_schema["colCount"])]) try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_rollback.py b/antithesis-tests/stress-composer/parallel_driver_rollback.py index 6045a7779..9a9509641 100755 --- a/antithesis-tests/stress-composer/parallel_driver_rollback.py +++ b/antithesis-tests/stress-composer/parallel_driver_rollback.py @@ -28,7 +28,7 @@ tbl_schema = json.loads(schema_json) cols = ", ".join([f"col_{col}" for col in range(tbl_schema["colCount"])]) try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_schema_rollback.py b/antithesis-tests/stress-composer/parallel_driver_schema_rollback.py index 431a1b6a4..62ad68e96 100755 --- a/antithesis-tests/stress-composer/parallel_driver_schema_rollback.py +++ b/antithesis-tests/stress-composer/parallel_driver_schema_rollback.py @@ -29,7 +29,7 @@ tbl_schema = json.loads(schema_json) tbl_name = f"tbl_{selected_tbl}" try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-composer/parallel_driver_update.py b/antithesis-tests/stress-composer/parallel_driver_update.py index 3231b3844..609be596b 100755 --- a/antithesis-tests/stress-composer/parallel_driver_update.py +++ b/antithesis-tests/stress-composer/parallel_driver_update.py @@ -32,7 +32,7 @@ pk = tbl_schema["pk"] cols = [f"col_{col}" for col in range(tbl_schema["colCount"]) if col != pk] # print(cols) try: - con = turso.connect("stress_composer.db", experimental_indexes=True) + con = turso.connect("stress_composer.db") except Exception as e: print(f"Failed to open stress_composer.db. Exiting... {e}") exit(0) diff --git a/antithesis-tests/stress-io_uring/singleton_driver_stress.sh b/antithesis-tests/stress-io_uring/singleton_driver_stress.sh new file mode 100755 index 000000000..87e0d09a0 --- /dev/null +++ b/antithesis-tests/stress-io_uring/singleton_driver_stress.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +/bin/turso_stress --silent --nr-threads 2 --nr-iterations 10000 --vfs io_uring diff --git a/bindings/javascript/.gitignore b/bindings/javascript/.gitignore index b2a0e5629..43964d947 100644 --- a/bindings/javascript/.gitignore +++ b/bindings/javascript/.gitignore @@ -196,3 +196,5 @@ Cargo.lock *.node *.wasm + +npm diff --git a/bindings/javascript/.npmignore b/bindings/javascript/.npmignore index ec144db2a..4c24ea24a 100644 --- a/bindings/javascript/.npmignore +++ b/bindings/javascript/.npmignore @@ -11,3 +11,5 @@ yarn.lock .yarn __test__ renovate.json +examples +perf diff --git a/bindings/javascript/Cargo.toml b/bindings/javascript/Cargo.toml index 077f07fb3..a3b2384fe 100644 --- a/bindings/javascript/Cargo.toml +++ b/bindings/javascript/Cargo.toml @@ -15,9 +15,11 @@ turso_core = { workspace = true } napi = { version = "3.1.3", default-features = false, features = ["napi6"] } napi-derive = { version = "3.1.1", default-features = true } tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } +tracing.workspace = true [features] encryption = ["turso_core/encryption"] +browser = [] [build-dependencies] napi-build = "2.2.3" diff --git a/bindings/javascript/Makefile b/bindings/javascript/Makefile new file mode 100644 index 000000000..9bca855b1 --- /dev/null +++ b/bindings/javascript/Makefile @@ -0,0 +1,20 @@ +pack-native: + npm publish --dry-run && npm pack +pack-browser: + cp package.json package.native.json + cp package.browser.json package.json + npm publish --dry-run && npm pack; cp package.native.json package.json + +publish-native: + npm publish --access public +publish-browser: + cp package.json package.native.json + cp package.browser.json package.json + npm publish --access public; cp package.native.json package.json + +publish-native-next: + npm publish --tag next --access public +publish-browser-next: + cp package.json package.native.json + cp package.browser.json package.json + npm publish --tag next --access public; cp package.native.json package.json diff --git a/bindings/javascript/browser.js b/bindings/javascript/browser.js deleted file mode 100644 index 1959855f1..000000000 --- a/bindings/javascript/browser.js +++ /dev/null @@ -1 +0,0 @@ -export * from '@tursodatabase/database-wasm32-wasi' diff --git a/bindings/javascript/index.js b/bindings/javascript/index.js deleted file mode 100644 index 6bff52d98..000000000 --- a/bindings/javascript/index.js +++ /dev/null @@ -1,398 +0,0 @@ -// prettier-ignore -/* eslint-disable */ -// @ts-nocheck -/* auto-generated by NAPI-RS */ - -import { createRequire } from 'node:module' -const require = createRequire(import.meta.url) -const __dirname = new URL('.', import.meta.url).pathname - -const { readFileSync } = require('node:fs') -let nativeBinding = null -const loadErrors = [] - -const isMusl = () => { - let musl = false - if (process.platform === 'linux') { - musl = isMuslFromFilesystem() - if (musl === null) { - musl = isMuslFromReport() - } - if (musl === null) { - musl = isMuslFromChildProcess() - } - } - return musl -} - -const isFileMusl = (f) => f.includes('libc.musl-') || f.includes('ld-musl-') - -const isMuslFromFilesystem = () => { - try { - return readFileSync('/usr/bin/ldd', 'utf-8').includes('musl') - } catch { - return null - } -} - -const isMuslFromReport = () => { - let report = null - if (typeof process.report?.getReport === 'function') { - process.report.excludeNetwork = true - report = process.report.getReport() - } - if (!report) { - return null - } - if (report.header && report.header.glibcVersionRuntime) { - return false - } - if (Array.isArray(report.sharedObjects)) { - if (report.sharedObjects.some(isFileMusl)) { - return true - } - } - return false -} - -const isMuslFromChildProcess = () => { - try { - return require('child_process').execSync('ldd --version', { encoding: 'utf8' }).includes('musl') - } catch (e) { - // If we reach this case, we don't know if the system is musl or not, so is better to just fallback to false - return false - } -} - -function requireNative() { - if (process.env.NAPI_RS_NATIVE_LIBRARY_PATH) { - try { - nativeBinding = require(process.env.NAPI_RS_NATIVE_LIBRARY_PATH); - } catch (err) { - loadErrors.push(err) - } - } else if (process.platform === 'android') { - if (process.arch === 'arm64') { - try { - return require('./turso.android-arm64.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-android-arm64') - } catch (e) { - loadErrors.push(e) - } - } else if (process.arch === 'arm') { - try { - return require('./turso.android-arm-eabi.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-android-arm-eabi') - } catch (e) { - loadErrors.push(e) - } - } else { - loadErrors.push(new Error(`Unsupported architecture on Android ${process.arch}`)) - } - } else if (process.platform === 'win32') { - if (process.arch === 'x64') { - try { - return require('./turso.win32-x64-msvc.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-win32-x64-msvc') - } catch (e) { - loadErrors.push(e) - } - } else if (process.arch === 'ia32') { - try { - return require('./turso.win32-ia32-msvc.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-win32-ia32-msvc') - } catch (e) { - loadErrors.push(e) - } - } else if (process.arch === 'arm64') { - try { - return require('./turso.win32-arm64-msvc.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-win32-arm64-msvc') - } catch (e) { - loadErrors.push(e) - } - } else { - loadErrors.push(new Error(`Unsupported architecture on Windows: ${process.arch}`)) - } - } else if (process.platform === 'darwin') { - try { - return require('./turso.darwin-universal.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-darwin-universal') - } catch (e) { - loadErrors.push(e) - } - if (process.arch === 'x64') { - try { - return require('./turso.darwin-x64.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-darwin-x64') - } catch (e) { - loadErrors.push(e) - } - } else if (process.arch === 'arm64') { - try { - return require('./turso.darwin-arm64.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-darwin-arm64') - } catch (e) { - loadErrors.push(e) - } - } else { - loadErrors.push(new Error(`Unsupported architecture on macOS: ${process.arch}`)) - } - } else if (process.platform === 'freebsd') { - if (process.arch === 'x64') { - try { - return require('./turso.freebsd-x64.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-freebsd-x64') - } catch (e) { - loadErrors.push(e) - } - } else if (process.arch === 'arm64') { - try { - return require('./turso.freebsd-arm64.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-freebsd-arm64') - } catch (e) { - loadErrors.push(e) - } - } else { - loadErrors.push(new Error(`Unsupported architecture on FreeBSD: ${process.arch}`)) - } - } else if (process.platform === 'linux') { - if (process.arch === 'x64') { - if (isMusl()) { - try { - return require('./turso.linux-x64-musl.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-x64-musl') - } catch (e) { - loadErrors.push(e) - } - } else { - try { - return require('./turso.linux-x64-gnu.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-x64-gnu') - } catch (e) { - loadErrors.push(e) - } - } - } else if (process.arch === 'arm64') { - if (isMusl()) { - try { - return require('./turso.linux-arm64-musl.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-arm64-musl') - } catch (e) { - loadErrors.push(e) - } - } else { - try { - return require('./turso.linux-arm64-gnu.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-arm64-gnu') - } catch (e) { - loadErrors.push(e) - } - } - } else if (process.arch === 'arm') { - if (isMusl()) { - try { - return require('./turso.linux-arm-musleabihf.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-arm-musleabihf') - } catch (e) { - loadErrors.push(e) - } - } else { - try { - return require('./turso.linux-arm-gnueabihf.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-arm-gnueabihf') - } catch (e) { - loadErrors.push(e) - } - } - } else if (process.arch === 'riscv64') { - if (isMusl()) { - try { - return require('./turso.linux-riscv64-musl.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-riscv64-musl') - } catch (e) { - loadErrors.push(e) - } - } else { - try { - return require('./turso.linux-riscv64-gnu.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-riscv64-gnu') - } catch (e) { - loadErrors.push(e) - } - } - } else if (process.arch === 'ppc64') { - try { - return require('./turso.linux-ppc64-gnu.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-ppc64-gnu') - } catch (e) { - loadErrors.push(e) - } - } else if (process.arch === 's390x') { - try { - return require('./turso.linux-s390x-gnu.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-s390x-gnu') - } catch (e) { - loadErrors.push(e) - } - } else { - loadErrors.push(new Error(`Unsupported architecture on Linux: ${process.arch}`)) - } - } else if (process.platform === 'openharmony') { - if (process.arch === 'arm64') { - try { - return require('./turso.linux-arm64-ohos.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-arm64-ohos') - } catch (e) { - loadErrors.push(e) - } - } else if (process.arch === 'x64') { - try { - return require('./turso.linux-x64-ohos.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-x64-ohos') - } catch (e) { - loadErrors.push(e) - } - } else if (process.arch === 'arm') { - try { - return require('./turso.linux-arm-ohos.node') - } catch (e) { - loadErrors.push(e) - } - try { - return require('@tursodatabase/database-linux-arm-ohos') - } catch (e) { - loadErrors.push(e) - } - } else { - loadErrors.push(new Error(`Unsupported architecture on OpenHarmony: ${process.arch}`)) - } - } else { - loadErrors.push(new Error(`Unsupported OS: ${process.platform}, architecture: ${process.arch}`)) - } -} - -nativeBinding = requireNative() - -if (!nativeBinding || process.env.NAPI_RS_FORCE_WASI) { - try { - nativeBinding = require('./turso.wasi.cjs') - } catch (err) { - if (process.env.NAPI_RS_FORCE_WASI) { - loadErrors.push(err) - } - } - if (!nativeBinding) { - try { - nativeBinding = require('@tursodatabase/database-wasm32-wasi') - } catch (err) { - if (process.env.NAPI_RS_FORCE_WASI) { - loadErrors.push(err) - } - } - } -} - -if (!nativeBinding) { - if (loadErrors.length > 0) { - throw new Error( - `Cannot find native binding. ` + - `npm has a bug related to optional dependencies (https://github.com/npm/cli/issues/4828). ` + - 'Please try `npm i` again after removing both package-lock.json and node_modules directory.', - { cause: loadErrors } - ) - } - throw new Error(`Failed to load native binding`) -} - -const { Database, Statement } = nativeBinding -export { Database } -export { Statement } diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json index 4a17ef3f0..55292c90c 100644 --- a/bindings/javascript/package-lock.json +++ b/bindings/javascript/package-lock.json @@ -1,29 +1,61 @@ { - "name": "@tursodatabase/database", - "version": "0.1.5-pre.2", + "name": "javascript", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "@tursodatabase/database", - "version": "0.1.5-pre.2", + "workspaces": [ + "packages/common", + "packages/native", + "packages/browser" + ] + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, "license": "MIT", - "devDependencies": { - "@napi-rs/cli": "^3.0.4", - "@napi-rs/wasm-runtime": "^1.0.1", - "ava": "^6.0.1", - "better-sqlite3": "^11.9.1", - "typescript": "^5.9.2" + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" }, "engines": { - "node": ">= 10" + "node": ">=6.9.0" + } + }, + "node_modules/@babel/code-frame/node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz", + "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" } }, "node_modules/@emnapi/core": { "version": "1.4.5", "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.4.5.tgz", "integrity": "sha512-XsLw1dEOpkSX/WucdqUhPWP7hDxSvZiY+fsUC14h+FtQ2Ifni4znbBt8punRX+Uj2JG/uDb8nEHVKvrVlvdZ5Q==", - "dev": true, "license": "MIT", "dependencies": { "@emnapi/wasi-threads": "1.0.4", @@ -34,7 +66,6 @@ "version": "1.4.5", "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.4.5.tgz", "integrity": "sha512-++LApOtY0pEEz1zrd9vy1/zXVaVJJ/EbAF3u0fXIzPJEDtnITsBGbbK0EkM72amhl/R5b+5xx0Y/QhcVOpuulg==", - "dev": true, "license": "MIT", "dependencies": { "tslib": "^2.4.0" @@ -44,12 +75,28 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.0.4.tgz", "integrity": "sha512-PJR+bOmMOPH8AtcTGAyYNiuJ3/Fcoj2XN/gBEWzDIKh254XO+mM9XoXHk5GNEhodxeMznbg7BlRojVbKN+gC6g==", - "dev": true, "license": "MIT", "dependencies": { "tslib": "^2.4.0" } }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.9.tgz", + "integrity": "sha512-BlB7bIcLT3G26urh5Dmse7fiLmLXnRlopw4s8DalgZ8ef79Jj4aUcYbk90g8iCa2467HX8SAIidbL7gsqXHdRw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@inquirer/checkbox": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/@inquirer/checkbox/-/checkbox-4.2.0.tgz", @@ -125,16 +172,6 @@ } } }, - "node_modules/@inquirer/core/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/@inquirer/core/node_modules/ansi-styles": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", @@ -454,88 +491,17 @@ } } }, - "node_modules/@isaacs/cliui": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", - "dev": true, - "license": "ISC", - "dependencies": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@isaacs/cliui/node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", "dev": true, "license": "MIT" }, - "node_modules/@isaacs/cliui/node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@isaacs/fs-minipass": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", - "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", - "dev": true, - "license": "ISC", - "dependencies": { - "minipass": "^7.0.4" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/@mapbox/node-pre-gyp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-2.0.0.tgz", - "integrity": "sha512-llMXd39jtP0HpQLVI37Bf1m2ADlEb35GYSh1SDSLsBhR+5iCxiNGlT31yqbNtVHygHAtMy6dWFERpU2JgufhPg==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "consola": "^3.2.3", - "detect-libc": "^2.0.0", - "https-proxy-agent": "^7.0.5", - "node-fetch": "^2.6.7", - "nopt": "^8.0.0", - "semver": "^7.5.3", - "tar": "^7.4.0" - }, - "bin": { - "node-pre-gyp": "bin/node-pre-gyp" - }, - "engines": { - "node": ">=18" - } - }, "node_modules/@napi-rs/cli": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@napi-rs/cli/-/cli-3.0.4.tgz", - "integrity": "sha512-ilbCI69DVDQcIUSUB504LM1+Nhvo0jKycWAzzPJ22YwUoWrru/w0+V5sfjPINgkshQ4Ykv+oZOJXk9Kg1ZBUvg==", + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/@napi-rs/cli/-/cli-3.1.5.tgz", + "integrity": "sha512-Wn6ZPw27qJiEWglGjkaAa70AHuLtyPya6FvjINYJ5U20uvbRhoB0Ta2+bFTAFfUb9R+wvuFvog9JQdy65OmFAQ==", "dev": true, "license": "MIT", "dependencies": { @@ -547,9 +513,9 @@ "colorette": "^2.0.20", "debug": "^4.4.0", "emnapi": "^1.4.0", + "es-toolkit": "^1.39.8", "find-up": "^7.0.0", "js-yaml": "^4.1.0", - "lodash-es": "^4.17.21", "semver": "^7.7.1", "typanion": "^3.14.0" }, @@ -675,108 +641,6 @@ "@napi-rs/lzma-win32-x64-msvc": "1.4.4" } }, - "node_modules/@napi-rs/lzma-android-arm-eabi": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-android-arm-eabi/-/lzma-android-arm-eabi-1.4.4.tgz", - "integrity": "sha512-smZtN41ebtYw+vxn1q3IXhns1hUzFNUcgHxknZKFQSKaybYZ4KxMiiBIw5UqJ9rw1dkaHqokcC1YeAfu8vfG2A==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-android-arm64": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-android-arm64/-/lzma-android-arm64-1.4.4.tgz", - "integrity": "sha512-s+h9bM3Z31FL0IPfWF4kBCebWxJBtpFvje6ikzmeUg1/jjWAP81IJC5j75zz5TEWt+Zf3Bip0uVlQhCZmqlpKA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-darwin-arm64": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-darwin-arm64/-/lzma-darwin-arm64-1.4.4.tgz", - "integrity": "sha512-aF5wxA0SFlRalxeyz7TpmFuztHlG9D0qew+1gz0tiRs4gituT3CCsR0PSBZ2LbalTY/7RqmYP4ssLQus+p8tqg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-darwin-x64": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-darwin-x64/-/lzma-darwin-x64-1.4.4.tgz", - "integrity": "sha512-80gD9kvXPPBz6V4C7SXcPo0o7ySlneDVRpebAHN1DubIEwhdrMFuqmtaATwT5MTraZSrQ4CHF275MQuwiHtlGw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-freebsd-x64": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-freebsd-x64/-/lzma-freebsd-x64-1.4.4.tgz", - "integrity": "sha512-wd+jwYQRIzkGtUvInYLWSrqRtDatIvwNm/w9k43f+oABBsnP4veJkyKGGm4SQQa35Ki8IXVzYdGTa4eSTi+Org==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-linux-arm-gnueabihf": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-linux-arm-gnueabihf/-/lzma-linux-arm-gnueabihf-1.4.4.tgz", - "integrity": "sha512-KiMgBugjFQfgeZTebuBVHL8ta/nZ2cfzd0Jge0e0y/WX/p7ZkVyCox/TTu9EU2H9OeBAFKTRmIDoqhHlBbkqyA==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, "node_modules/@napi-rs/lzma-linux-arm64-gnu": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/@napi-rs/lzma-linux-arm64-gnu/-/lzma-linux-arm64-gnu-1.4.4.tgz", @@ -811,91 +675,6 @@ "node": ">= 10" } }, - "node_modules/@napi-rs/lzma-linux-ppc64-gnu": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-linux-ppc64-gnu/-/lzma-linux-ppc64-gnu-1.4.4.tgz", - "integrity": "sha512-QzNVcCdq6j4LYOtLUDEyE9wg8tY8kmbQ6TZrqjYQUD2nebTW24lmzFhdeI3xzUzVN5rRt4js1UnL1cPCT5HrSw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-linux-riscv64-gnu": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-linux-riscv64-gnu/-/lzma-linux-riscv64-gnu-1.4.4.tgz", - "integrity": "sha512-7jpyKpBX0LpklkmGBzz1cQJ/QRN+E6h1xSZVeN6KCtLBrCd6LCX3owZMRzSYmdpI6Zr30DrWo0HOUZiKMzgzBg==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-linux-s390x-gnu": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-linux-s390x-gnu/-/lzma-linux-s390x-gnu-1.4.4.tgz", - "integrity": "sha512-ngUxVZIytn2UHY92RnijtT11VhWO32mfa1LFX03GWMWdQl50bV/IqcZR0WYRWlBCd7DZrOf16AY2IR/lwovE7A==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-linux-x64-gnu": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-linux-x64-gnu/-/lzma-linux-x64-gnu-1.4.4.tgz", - "integrity": "sha512-mUGH8hpWJU4FXhn61cD7sHTUEBiWU5JYOhh6ErCIZ0BOoBH/0kYPptfqvJA6G9EfVIcfbtYKxJYYtFC5sbf+eA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-linux-x64-musl": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-linux-x64-musl/-/lzma-linux-x64-musl-1.4.4.tgz", - "integrity": "sha512-ysM4mYSfWGO2h8YZVn0GH7zMZX42hU0h7IomC4/oBJmAk5BIlOGnRB8XQmyz1A7neSi6aByjAlZmW4CrZlI9Uw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, "node_modules/@napi-rs/lzma-wasm32-wasi": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/@napi-rs/lzma-wasm32-wasi/-/lzma-wasm32-wasi-1.4.4.tgz", @@ -913,57 +692,6 @@ "node": ">=14.0.0" } }, - "node_modules/@napi-rs/lzma-win32-arm64-msvc": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-win32-arm64-msvc/-/lzma-win32-arm64-msvc-1.4.4.tgz", - "integrity": "sha512-GqoJu7iL7OTqkBQGLps7rXQHZ5sdcZF7tOY06rlYO03ZNkUCjhNpmkuUsPXVnGstqgoGwzMNW6TcSsO/YWotEw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-win32-ia32-msvc": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-win32-ia32-msvc/-/lzma-win32-ia32-msvc-1.4.4.tgz", - "integrity": "sha512-cnExNqWKl0JkLcKlFVuqUrTuQsYP8nstWGT3fz7mPhgqHFOgGmd1l9tDFhqgul7Kt0QTddZRbKl6jlkV7DjSQw==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/lzma-win32-x64-msvc": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/@napi-rs/lzma-win32-x64-msvc/-/lzma-win32-x64-msvc-1.4.4.tgz", - "integrity": "sha512-15SoQgMgktF73ZnLQPkzCwtxyQ+4VuD8n5Puis1H48QRjUNnXXpqTGFyWdLPdd14vcxbndgcYvJtSjOXTfNHiw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, "node_modules/@napi-rs/tar": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/@napi-rs/tar/-/tar-1.0.0.tgz", @@ -992,108 +720,6 @@ "@napi-rs/tar-win32-x64-msvc": "1.0.0" } }, - "node_modules/@napi-rs/tar-android-arm-eabi": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-android-arm-eabi/-/tar-android-arm-eabi-1.0.0.tgz", - "integrity": "sha512-oEntU16IkWykPJnSwv/VIICzIt2SwEsz45z2Ab+EXOas10EB+pu0z31AiSNI5pr1CaJcadbf1JGMI9aOtbAuRQ==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/tar-android-arm64": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-android-arm64/-/tar-android-arm64-1.0.0.tgz", - "integrity": "sha512-b2X7nQ/wH2VGzzl4KhVOR/gHqxIuqrUjMY8VKJYxAGdCrmUPRfc47kersiu6DG706kSv9T+BxeeUQvwqnXZRXQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/tar-darwin-arm64": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-darwin-arm64/-/tar-darwin-arm64-1.0.0.tgz", - "integrity": "sha512-m1Ug1452/DOUbJGSuJuHRTUCBQOXY0arGqXCHuSiaQhBQQjgBhlbHWCv291gV8CytFYd5lvSyiG2gFUU26Qd7A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/tar-darwin-x64": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-darwin-x64/-/tar-darwin-x64-1.0.0.tgz", - "integrity": "sha512-1RiC53g1y4pxX7P2L9sbZcqsw6dfXvGnTNwXHDjg4ATZncZa7uoPUWa7aHAGcQm8ZBO4P0ICt2SHOepstDWWTg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/tar-freebsd-x64": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-freebsd-x64/-/tar-freebsd-x64-1.0.0.tgz", - "integrity": "sha512-uLaYn+eO3ZY2ojbohdlRFcuqYP+j2alovtuLdFvCzzsArg4DSnmcJvEQ+I4l99lfyThYB1c8GA64oxSOfmn/UA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/tar-linux-arm-gnueabihf": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-linux-arm-gnueabihf/-/tar-linux-arm-gnueabihf-1.0.0.tgz", - "integrity": "sha512-PhGIaT45i1Fj5iY6NiWYTLPUOHb7rXiwnqKhco+IXOeIclaGcEVoAbhrLiLGQrfv9viLdyhzAxECoOr+zKnApw==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, "node_modules/@napi-rs/tar-linux-arm64-gnu": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/@napi-rs/tar-linux-arm64-gnu/-/tar-linux-arm64-gnu-1.0.0.tgz", @@ -1128,74 +754,6 @@ "node": ">= 10" } }, - "node_modules/@napi-rs/tar-linux-ppc64-gnu": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-linux-ppc64-gnu/-/tar-linux-ppc64-gnu-1.0.0.tgz", - "integrity": "sha512-IbB4I8RFcvKI/zGsboUQPmlKoXfXgNOMiJw7Cbe7T1OBeYzDy6n/yEUEaG4zIbocxqjRVsF4ElrW1V/0Ihlqzg==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/tar-linux-s390x-gnu": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-linux-s390x-gnu/-/tar-linux-s390x-gnu-1.0.0.tgz", - "integrity": "sha512-Tl4HSo07u3TLsNQ4KEVfYKdHVNfF/k0o5KQlyGnePiO34Kb+NfaqSKMspjSkrmXKEc0PjB+u9af3BZdTUwml4Q==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/tar-linux-x64-gnu": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-linux-x64-gnu/-/tar-linux-x64-gnu-1.0.0.tgz", - "integrity": "sha512-Xe57Yz4MKSeG6HGECiIHuBKFwAuqs2fzwblTdMd1CoSgaaUc/K/dKTDWZwPtjC0Hh5pM86K0WZuwggbsjmFGNg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/tar-linux-x64-musl": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-linux-x64-musl/-/tar-linux-x64-musl-1.0.0.tgz", - "integrity": "sha512-VA4RXspXyelNAtaFEf2ZLnTYXRILVlH20OGV0oqzuUcQzpwEwK2cJbYtYHK+yCYpxrNbEGsAwN+12LYJMW+NlA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, "node_modules/@napi-rs/tar-wasm32-wasi": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/@napi-rs/tar-wasm32-wasi/-/tar-wasm32-wasi-1.0.0.tgz", @@ -1213,62 +771,10 @@ "node": ">=14.0.0" } }, - "node_modules/@napi-rs/tar-win32-arm64-msvc": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-win32-arm64-msvc/-/tar-win32-arm64-msvc-1.0.0.tgz", - "integrity": "sha512-VdUjZK8jh6mvGRiurK3ms6Yt2hbBbtYjzKCn78Mnme2KGC585Kx1jXl7HShvreCgqh3r0162OSygoE7d/I0Jlw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/tar-win32-ia32-msvc": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-win32-ia32-msvc/-/tar-win32-ia32-msvc-1.0.0.tgz", - "integrity": "sha512-8d/4iRXROPXLoe+4FEqXkpgP2KD9A45VUf76WfT6nXZwzQuoh+9WCJNRPVs5vfXV1SMnG9Z32WNc2ivCq0+HZw==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/tar-win32-x64-msvc": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/tar-win32-x64-msvc/-/tar-win32-x64-msvc-1.0.0.tgz", - "integrity": "sha512-HHtL1g0niVa4xDvyfi9wQtCTDDKkhDlaOb3bmayTqWs29mk+pcVHBST3OdXaaViSaduqdG9meosU5sOj5iKQAQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, "node_modules/@napi-rs/wasm-runtime": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.0.1.tgz", - "integrity": "sha512-KVlQ/jgywZpixGCKMNwxStmmbYEMyokZpCf2YuIChhfJA2uqfAKNEM8INz7zzTo55iEXfBhIIs3VqYyqzDLj8g==", - "dev": true, + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.0.3.tgz", + "integrity": "sha512-rZxtMsLwjdXkMUGC3WwsPwLNVqVqnTJT6MNIB6e+5fhMcSCPP0AOsNWuMQ5mdCq6HNjs/ZeWAEchpqeprqBD2Q==", "license": "MIT", "dependencies": { "@emnapi/core": "^1.4.5", @@ -1301,91 +807,6 @@ "@napi-rs/wasm-tools-win32-x64-msvc": "1.0.0" } }, - "node_modules/@napi-rs/wasm-tools-android-arm-eabi": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-android-arm-eabi/-/wasm-tools-android-arm-eabi-1.0.0.tgz", - "integrity": "sha512-Ks0hplmrYatIjSi8XeTObCi0x13AOQD41IQXpBjrz+UK71gDkbxyLWO7B/ckuels3mC1DW3OCQCv+q0lPnaG/A==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/wasm-tools-android-arm64": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-android-arm64/-/wasm-tools-android-arm64-1.0.0.tgz", - "integrity": "sha512-Ppu1/YGLSC/ohkOA8R5YfDh1dCuCHWJObu/BTorAY55YDXIiWy400CoungbYwoRT53K+ixNrg8/zRHnpuqwkRg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/wasm-tools-darwin-arm64": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-darwin-arm64/-/wasm-tools-darwin-arm64-1.0.0.tgz", - "integrity": "sha512-EUU7NvmmKASMLecu7hUHhv9XN2Thf8j+2/zCCMuFuAAlY+eZiOVfrajbZ/RE8CZ4oyfkb0bWFg/CQcmcXAatTw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/wasm-tools-darwin-x64": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-darwin-x64/-/wasm-tools-darwin-x64-1.0.0.tgz", - "integrity": "sha512-hlX21sqy0AEnmn2abarmCXV3fpyIQN+fKqeHNuawti9ZpaJCL6gZCtUGqpUxURjXNjXSI8rywInJE2YmeVQSJQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/wasm-tools-freebsd-x64": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-freebsd-x64/-/wasm-tools-freebsd-x64-1.0.0.tgz", - "integrity": "sha512-T9SOSfIgrdEGQzzquKMOfD3PF6TxG5hL2o5voZtLUALA0yjO+GnpFyv8tAcxKYd7ngWzzK5Uwk7e1z9PcsQZMg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">= 10" - } - }, "node_modules/@napi-rs/wasm-tools-linux-arm64-gnu": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-linux-arm64-gnu/-/wasm-tools-linux-arm64-gnu-1.0.0.tgz", @@ -1420,40 +841,6 @@ "node": ">= 10" } }, - "node_modules/@napi-rs/wasm-tools-linux-x64-gnu": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-linux-x64-gnu/-/wasm-tools-linux-x64-gnu-1.0.0.tgz", - "integrity": "sha512-wpRkiy0QBM/zpaGAn5I1HfddQul0vGrdlindT2UHtOYK1zvam524M6LJXBtmhBkXS5a4F2HZiZXns8Wuc7dq4w==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/wasm-tools-linux-x64-musl": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-linux-x64-musl/-/wasm-tools-linux-x64-musl-1.0.0.tgz", - "integrity": "sha512-Ua94ruWB18uKyIz/nj+by2ZxfBbFzbqiiD564ocBHGbrUffpR6Us74uVwxO7rImc/WvCfJqap9ezqmaTvmK7SA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10" - } - }, "node_modules/@napi-rs/wasm-tools-wasm32-wasi": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-wasm32-wasi/-/wasm-tools-wasm32-wasi-1.0.0.tgz", @@ -1471,95 +858,6 @@ "node": ">=14.0.0" } }, - "node_modules/@napi-rs/wasm-tools-win32-arm64-msvc": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-win32-arm64-msvc/-/wasm-tools-win32-arm64-msvc-1.0.0.tgz", - "integrity": "sha512-1kv+DM7z6c9OLcjMtO1/kfdxS5hwXtW1OLIHBU41dtKz5jD3quapYrCjB7AVEZh/JVM765UaLOl31huVucJjRw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/wasm-tools-win32-ia32-msvc": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-win32-ia32-msvc/-/wasm-tools-win32-ia32-msvc-1.0.0.tgz", - "integrity": "sha512-OwcyXtU2Zi3YVHYjmomM3u7jRNPY1j+IPehqCVEqd60jOTOXRZNPGoAvOC7Lw6HX/RGzOJnIcJZbVfKrz5WN1g==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@napi-rs/wasm-tools-win32-x64-msvc": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-tools-win32-x64-msvc/-/wasm-tools-win32-x64-msvc-1.0.0.tgz", - "integrity": "sha512-xat6gnp/G/WCe6U6HKzawotz9zpqsM5a+Dx+S0MPX4AKP7+oztC2/6tkp8KtOPT2bMRMekNntXadHKk0XqW61Q==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, - "engines": { - "node": ">= 8" - } - }, "node_modules/@octokit/auth-token": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-6.0.0.tgz", @@ -1726,63 +1024,120 @@ "@octokit/openapi-types": "^25.1.0" } }, - "node_modules/@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "node_modules/@polka/url": { + "version": "1.0.0-next.29", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.29.tgz", + "integrity": "sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.50.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.50.1.tgz", + "integrity": "sha512-2AbMhFFkTo6Ptna1zO7kAXXDLi7H9fGTbVaIq2AAYO7yzcAsuTNWPHhb2aTA6GPiP+JXh85Y8CiS54iZoj4opw==", + "cpu": [ + "arm64" + ], "dev": true, "license": "MIT", "optional": true, - "engines": { - "node": ">=14" - } + "os": [ + "linux" + ] }, - "node_modules/@rollup/pluginutils": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.2.0.tgz", - "integrity": "sha512-qWJ2ZTbmumwiLFomfzTyt5Kng4hwPi9rwCYN4SHb6eaRU1KNO4ccxINHr/VhH4GgPlt1XfSTLX2LBTme8ne4Zw==", + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.50.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.50.1.tgz", + "integrity": "sha512-Cgef+5aZwuvesQNw9eX7g19FfKX5/pQRIyhoXLCiBOrWopjo7ycfB292TX9MDcDijiuIJlx1IzJz3IoCPfqs9w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@testing-library/dom": { + "version": "10.4.1", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz", + "integrity": "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==", "dev": true, "license": "MIT", "dependencies": { - "@types/estree": "^1.0.0", - "estree-walker": "^2.0.2", - "picomatch": "^4.0.2" + "@babel/code-frame": "^7.10.4", + "@babel/runtime": "^7.12.5", + "@types/aria-query": "^5.0.1", + "aria-query": "5.3.0", + "dom-accessibility-api": "^0.5.9", + "lz-string": "^1.5.0", + "picocolors": "1.1.1", + "pretty-format": "^27.0.2" }, "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" - }, - "peerDependenciesMeta": { - "rollup": { - "optional": true - } + "node": ">=18" } }, - "node_modules/@sindresorhus/merge-streams": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-2.3.0.tgz", - "integrity": "sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg==", + "node_modules/@testing-library/user-event": { + "version": "14.6.1", + "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.6.1.tgz", + "integrity": "sha512-vq7fv0rnt+QTXgPxr5Hjc210p6YKq2kmdziLgnsZGgLJ9e6VAShx1pACLuRjd/AS/sr7phAR58OIIpf0LlmQNw==", "dev": true, "license": "MIT", "engines": { - "node": ">=18" + "node": ">=12", + "npm": ">=6" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "peerDependencies": { + "@testing-library/dom": ">=7.21.4" } }, + "node_modules/@tursodatabase/database": { + "resolved": "packages/native", + "link": true + }, + "node_modules/@tursodatabase/database-browser": { + "resolved": "packages/browser", + "link": true + }, + "node_modules/@tursodatabase/database-common": { + "resolved": "packages/common", + "link": true + }, "node_modules/@tybys/wasm-util": { "version": "0.10.0", "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.0.tgz", "integrity": "sha512-VyyPYFlOMNylG45GoAe0xDoLwWuowvf92F9kySqzYh8vmYm7D2u4iUJKa1tOUpS70Ku13ASrOkS4ScXFsTaCNQ==", - "dev": true, "license": "MIT", "dependencies": { "tslib": "^2.4.0" } }, + "node_modules/@types/aria-query": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", + "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/chai": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", + "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/estree": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", @@ -1790,87 +1145,175 @@ "dev": true, "license": "MIT" }, - "node_modules/@vercel/nft": { - "version": "0.29.4", - "resolved": "https://registry.npmjs.org/@vercel/nft/-/nft-0.29.4.tgz", - "integrity": "sha512-6lLqMNX3TuycBPABycx7A9F1bHQR7kiQln6abjFbPrf5C/05qHM9M5E4PeTE59c7z8g6vHnx1Ioihb2AQl7BTA==", + "node_modules/@types/node": { + "version": "24.3.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.3.1.tgz", + "integrity": "sha512-3vXmQDXy+woz+gnrTvuvNrPzekOi+Ds0ReMxw0LzBiK3a+1k0kQn9f2NWk+lgD4rJehFUmYy2gMhJ2ZI+7YP9g==", "dev": true, "license": "MIT", "dependencies": { - "@mapbox/node-pre-gyp": "^2.0.0", - "@rollup/pluginutils": "^5.1.3", - "acorn": "^8.6.0", - "acorn-import-attributes": "^1.9.5", - "async-sema": "^3.1.1", - "bindings": "^1.4.0", - "estree-walker": "2.0.2", - "glob": "^10.4.5", - "graceful-fs": "^4.2.9", - "node-gyp-build": "^4.2.2", - "picomatch": "^4.0.2", - "resolve-from": "^5.0.0" - }, - "bin": { - "nft": "out/cli.js" - }, - "engines": { - "node": ">=18" + "undici-types": "~7.10.0" } }, - "node_modules/abbrev": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-3.0.1.tgz", - "integrity": "sha512-AO2ac6pjRB3SJmGJo+v5/aK6Omggp6fsLrs6wN9bd35ulu4cCwaAU9+7ZhXjeqHVkaHThLuzH0nZr0YpCDhygg==", - "dev": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/acorn": { - "version": "8.15.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", - "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "node_modules/@vitest/browser": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/browser/-/browser-3.2.4.tgz", + "integrity": "sha512-tJxiPrWmzH8a+w9nLKlQMzAKX/7VjFs50MWgcAj7p9XQ7AQ9/35fByFYptgPELyLw+0aixTnC4pUWV+APcZ/kw==", "dev": true, "license": "MIT", - "bin": { - "acorn": "bin/acorn" + "dependencies": { + "@testing-library/dom": "^10.4.0", + "@testing-library/user-event": "^14.6.1", + "@vitest/mocker": "3.2.4", + "@vitest/utils": "3.2.4", + "magic-string": "^0.30.17", + "sirv": "^3.0.1", + "tinyrainbow": "^2.0.0", + "ws": "^8.18.2" + }, + "funding": { + "url": "https://opencollective.com/vitest" }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-import-attributes": { - "version": "1.9.5", - "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", - "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", - "dev": true, - "license": "MIT", "peerDependencies": { - "acorn": "^8" + "playwright": "*", + "vitest": "3.2.4", + "webdriverio": "^7.0.0 || ^8.0.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "playwright": { + "optional": true + }, + "safaridriver": { + "optional": true + }, + "webdriverio": { + "optional": true + } } }, - "node_modules/acorn-walk": { - "version": "8.3.4", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", - "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "node_modules/@vitest/expect": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", "dev": true, "license": "MIT", "dependencies": { - "acorn": "^8.11.0" + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" }, - "engines": { - "node": ">=0.4.0" + "funding": { + "url": "https://opencollective.com/vitest" } }, - "node_modules/agent-base": { - "version": "7.1.4", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", - "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "node_modules/@vitest/mocker": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.4.tgz", + "integrity": "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==", "dev": true, "license": "MIT", - "engines": { - "node": ">= 14" + "dependencies": { + "@vitest/spy": "3.2.4", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.17" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/mocker/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.4.tgz", + "integrity": "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "3.2.4", + "pathe": "^2.0.3", + "strip-literal": "^3.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.4.tgz", + "integrity": "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "magic-string": "^0.30.17", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" } }, "node_modules/ansi-escapes": { @@ -1903,172 +1346,48 @@ } }, "node_modules/ansi-regex": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", - "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, "license": "MIT", "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" + "node": ">=8" } }, "node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", "engines": { - "node": ">=12" + "node": ">=10" }, "funding": { "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "node_modules/aria-query": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", "dev": true, - "license": "MIT", + "license": "Apache-2.0", "dependencies": { - "sprintf-js": "~1.0.2" + "dequal": "^2.0.3" } }, - "node_modules/array-find-index": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz", - "integrity": "sha512-M1HQyIXcBGtVywBt8WVdim+lrNaK7VHp99Qt5pSNziXznKHViIBbXWtfRTpEFpF/c4FdfxNAsCCwPp5phBYJtw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/arrgv": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/arrgv/-/arrgv-1.0.2.tgz", - "integrity": "sha512-a4eg4yhp7mmruZDQFqVMlxNRFGi/i1r87pt8SDHy0/I8PqSXoUTlWZRdAZo0VXgvEARcujbtTk8kiZRi1uDGRw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/arrify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-3.0.0.tgz", - "integrity": "sha512-tLkvA81vQG/XqE2mjDkGQHoOINtMHtysSnemrmoGe6PydDPMRbVugqyk4A6V/WDWEfm3l+0d8anA9r8cv/5Jaw==", + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", "dev": true, "license": "MIT", "engines": { "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/async-sema": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/async-sema/-/async-sema-3.1.1.tgz", - "integrity": "sha512-tLRNUXati5MFePdAk8dw7Qt7DpxPB60ofAgn8WRhW6a2rcimZnYBP9oxHiv0OHy+Wz7kPMG+t4LGdt31+4EmGg==", - "dev": true, - "license": "MIT" - }, - "node_modules/ava": { - "version": "6.4.1", - "resolved": "https://registry.npmjs.org/ava/-/ava-6.4.1.tgz", - "integrity": "sha512-vxmPbi1gZx9zhAjHBgw81w/iEDKcrokeRk/fqDTyA2DQygZ0o+dUGRHFOtX8RA5N0heGJTTsIk7+xYxitDb61Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vercel/nft": "^0.29.4", - "acorn": "^8.15.0", - "acorn-walk": "^8.3.4", - "ansi-styles": "^6.2.1", - "arrgv": "^1.0.2", - "arrify": "^3.0.0", - "callsites": "^4.2.0", - "cbor": "^10.0.9", - "chalk": "^5.4.1", - "chunkd": "^2.0.1", - "ci-info": "^4.3.0", - "ci-parallel-vars": "^1.0.1", - "cli-truncate": "^4.0.0", - "code-excerpt": "^4.0.0", - "common-path-prefix": "^3.0.0", - "concordance": "^5.0.4", - "currently-unhandled": "^0.4.1", - "debug": "^4.4.1", - "emittery": "^1.2.0", - "figures": "^6.1.0", - "globby": "^14.1.0", - "ignore-by-default": "^2.1.0", - "indent-string": "^5.0.0", - "is-plain-object": "^5.0.0", - "is-promise": "^4.0.0", - "matcher": "^5.0.0", - "memoize": "^10.1.0", - "ms": "^2.1.3", - "p-map": "^7.0.3", - "package-config": "^5.0.0", - "picomatch": "^4.0.2", - "plur": "^5.1.0", - "pretty-ms": "^9.2.0", - "resolve-cwd": "^3.0.0", - "stack-utils": "^2.0.6", - "strip-ansi": "^7.1.0", - "supertap": "^3.0.1", - "temp-dir": "^3.0.0", - "write-file-atomic": "^6.0.0", - "yargs": "^17.7.2" - }, - "bin": { - "ava": "entrypoints/cli.mjs" - }, - "engines": { - "node": "^18.18 || ^20.8 || ^22 || ^23 || >=24" - }, - "peerDependencies": { - "@ava/typescript": "*" - }, - "peerDependenciesMeta": { - "@ava/typescript": { - "optional": true - } - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, "node_modules/before-after-hook": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-4.0.0.tgz", @@ -2076,132 +1395,31 @@ "dev": true, "license": "Apache-2.0" }, - "node_modules/better-sqlite3": { - "version": "11.10.0", - "resolved": "https://registry.npmjs.org/better-sqlite3/-/better-sqlite3-11.10.0.tgz", - "integrity": "sha512-EwhOpyXiOEL/lKzHz9AW1msWFNzGc/z+LzeB3/jnFJpxu+th2yqvzsSWas1v9jgs9+xiXJcD5A8CJxAG2TaghQ==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "dependencies": { - "bindings": "^1.5.0", - "prebuild-install": "^7.1.1" - } - }, - "node_modules/bindings": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", - "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", "dev": true, "license": "MIT", - "dependencies": { - "file-uri-to-path": "1.0.0" - } - }, - "node_modules/bl": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", - "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", - "dev": true, - "license": "MIT", - "dependencies": { - "buffer": "^5.5.0", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, - "node_modules/blueimp-md5": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/blueimp-md5/-/blueimp-md5-2.19.0.tgz", - "integrity": "sha512-DRQrD6gJyy8FbiE4s+bDoXS9hiW3Vbx5uCdwvcCf3zLHL+Iv7LtGHLpr+GZV8rHG8tK766FGYBwRbu8pELTt+w==", - "dev": true, - "license": "MIT" - }, - "node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/braces": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", - "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "dev": true, - "license": "MIT", - "dependencies": { - "fill-range": "^7.1.1" - }, "engines": { "node": ">=8" } }, - "node_modules/buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "node_modules/callsites": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-4.2.0.tgz", - "integrity": "sha512-kfzR4zzQtAE9PC7CzZsjl3aBNbXWuXiSeOCdLcPpBfGW8YuCqQHcRPFDbr/BPVmd3EEPVpuFzLyuT/cUhPr4OQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cbor": { - "version": "10.0.9", - "resolved": "https://registry.npmjs.org/cbor/-/cbor-10.0.9.tgz", - "integrity": "sha512-KEWYehb/vJkRmigctVQLsz73Us2RNnITo/wOwQV5AtZpLGH1r2PPlsNHdsX460YuHZCyhLklbYzAOuJfOeg34Q==", + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", "dev": true, "license": "MIT", "dependencies": { - "nofilter": "^3.0.2" + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" }, "engines": { - "node": ">=20" - } - }, - "node_modules/chalk": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.1.tgz", - "integrity": "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.17.0 || ^14.13 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" + "node": ">=18" } }, "node_modules/chardet": { @@ -2211,61 +1429,14 @@ "dev": true, "license": "MIT" }, - "node_modules/chownr": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", - "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/chunkd": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/chunkd/-/chunkd-2.0.1.tgz", - "integrity": "sha512-7d58XsFmOq0j6el67Ug9mHf9ELUXsQXYJBkyxhH/k+6Ke0qXRnv0kbemx+Twc6fRJ07C49lcbdgm9FL1Ei/6SQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/ci-info": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.3.0.tgz", - "integrity": "sha512-l+2bNRMiQgcfILUi33labAZYIWlH1kWDp+ecNo5iisRKrbm0xcRyCww71/YU0Fkw0mAFpz9bJayXPjey6vkmaQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/ci-parallel-vars": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ci-parallel-vars/-/ci-parallel-vars-1.0.1.tgz", - "integrity": "sha512-uvzpYrpmidaoxvIQHM+rKSrigjOe9feHYbw4uOI2gdfe1C3xIlxO+kVXq83WQWNniTf8bAxVpy+cQeFQsMERKg==", - "dev": true, - "license": "MIT" - }, - "node_modules/cli-truncate": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-4.0.0.tgz", - "integrity": "sha512-nPdaFdQ0h/GEigbPClz11D0v/ZJEwxmeVZGeMo3Z5StPtUTkA9o1lD6QwoirYiSDzbcwn2XcjwmCp68W1IS4TA==", + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", "dev": true, "license": "MIT", - "dependencies": { - "slice-ansi": "^5.0.0", - "string-width": "^7.0.0" - }, "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">= 16" } }, "node_modules/cli-width": { @@ -2294,123 +1465,6 @@ "typanion": "*" } }, - "node_modules/cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/cliui/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/cliui/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/cliui/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/code-excerpt": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/code-excerpt/-/code-excerpt-4.0.0.tgz", - "integrity": "sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA==", - "dev": true, - "license": "MIT", - "dependencies": { - "convert-to-spaces": "^2.0.1" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - } - }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -2438,94 +1492,6 @@ "dev": true, "license": "MIT" }, - "node_modules/common-path-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", - "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==", - "dev": true, - "license": "ISC" - }, - "node_modules/concordance": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/concordance/-/concordance-5.0.4.tgz", - "integrity": "sha512-OAcsnTEYu1ARJqWVGwf4zh4JDfHZEaSNlNccFmt8YjB2l/n19/PF2viLINHc57vO4FKIAFl2FWASIGZZWZ2Kxw==", - "dev": true, - "license": "ISC", - "dependencies": { - "date-time": "^3.1.0", - "esutils": "^2.0.3", - "fast-diff": "^1.2.0", - "js-string-escape": "^1.0.1", - "lodash": "^4.17.15", - "md5-hex": "^3.0.1", - "semver": "^7.3.2", - "well-known-symbols": "^2.0.0" - }, - "engines": { - "node": ">=10.18.0 <11 || >=12.14.0 <13 || >=14" - } - }, - "node_modules/consola": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", - "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^14.18.0 || >=16.10.0" - } - }, - "node_modules/convert-to-spaces": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/convert-to-spaces/-/convert-to-spaces-2.0.1.tgz", - "integrity": "sha512-rcQ1bsQO9799wq24uE5AM2tAILy4gXGIK/njFWcVQkGNZ96edlpY+A7bjwvzjYvLDyzmG1MmMLZhpcsb+klNMQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - } - }, - "node_modules/cross-spawn": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", - "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/currently-unhandled": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", - "integrity": "sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==", - "dev": true, - "license": "MIT", - "dependencies": { - "array-find-index": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/date-time": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/date-time/-/date-time-3.1.0.tgz", - "integrity": "sha512-uqCUKXE5q1PNBXjPqvwhwJf9SwMoAHBgWJ6DcrnS5o+W2JOiIILl0JEdVD8SGujrNS02GGxgwAg2PN2zONgtjg==", - "dev": true, - "license": "MIT", - "dependencies": { - "time-zone": "^1.0.0" - }, - "engines": { - "node": ">=6" - } - }, "node_modules/debug": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", @@ -2544,62 +1510,33 @@ } } }, - "node_modules/decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "mimic-response": "^3.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", "dev": true, "license": "MIT", "engines": { - "node": ">=4.0.0" + "node": ">=6" } }, - "node_modules/detect-libc": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz", - "integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==", + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", "dev": true, - "license": "Apache-2.0", + "license": "MIT", "engines": { - "node": ">=8" + "node": ">=6" } }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "node_modules/dom-accessibility-api": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", + "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", "dev": true, "license": "MIT" }, - "node_modules/emittery": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/emittery/-/emittery-1.2.0.tgz", - "integrity": "sha512-KxdRyyFcS85pH3dnU8Y5yFUm2YJdaHwcBZWrfG8o89ZY9a13/f9itbN+YG3ELbBo9Pg5zvIozstmuV8bX13q6g==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sindresorhus/emittery?sponsor=1" - } - }, "node_modules/emnapi": { "version": "1.4.5", "resolved": "https://registry.npmjs.org/emnapi/-/emnapi-1.4.5.tgz", @@ -2615,85 +1552,74 @@ } } }, - "node_modules/emoji-regex": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz", - "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==", + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", "dev": true, "license": "MIT" }, - "node_modules/end-of-stream": { - "version": "1.4.5", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", - "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", + "node_modules/es-toolkit": { + "version": "1.39.10", + "resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.39.10.tgz", + "integrity": "sha512-E0iGnTtbDhkeczB0T+mxmoVlT4YNweEKBLq7oaU4p11mecdsZpNWOglI4895Vh4usbQ+LsJiuLuI2L0Vdmfm2w==", "dev": true, "license": "MIT", - "dependencies": { - "once": "^1.4.0" - } + "workspaces": [ + "docs", + "benchmarks" + ] }, - "node_modules/escalade": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", - "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "node_modules/esbuild": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.9.tgz", + "integrity": "sha512-CRbODhYyQx3qp7ZEwzxOk4JBqmD/seJrzPa/cGjY1VtIn5E09Oi9/dB4JwctnfZ8Q8iT7rioVv5k/FNT/uf54g==", "dev": true, + "hasInstallScript": true, "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true, - "license": "BSD-2-Clause", "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" + "esbuild": "bin/esbuild" }, "engines": { - "node": ">=4" + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.9", + "@esbuild/android-arm": "0.25.9", + "@esbuild/android-arm64": "0.25.9", + "@esbuild/android-x64": "0.25.9", + "@esbuild/darwin-arm64": "0.25.9", + "@esbuild/darwin-x64": "0.25.9", + "@esbuild/freebsd-arm64": "0.25.9", + "@esbuild/freebsd-x64": "0.25.9", + "@esbuild/linux-arm": "0.25.9", + "@esbuild/linux-arm64": "0.25.9", + "@esbuild/linux-ia32": "0.25.9", + "@esbuild/linux-loong64": "0.25.9", + "@esbuild/linux-mips64el": "0.25.9", + "@esbuild/linux-ppc64": "0.25.9", + "@esbuild/linux-riscv64": "0.25.9", + "@esbuild/linux-s390x": "0.25.9", + "@esbuild/linux-x64": "0.25.9", + "@esbuild/netbsd-arm64": "0.25.9", + "@esbuild/netbsd-x64": "0.25.9", + "@esbuild/openbsd-arm64": "0.25.9", + "@esbuild/openbsd-x64": "0.25.9", + "@esbuild/openharmony-arm64": "0.25.9", + "@esbuild/sunos-x64": "0.25.9", + "@esbuild/win32-arm64": "0.25.9", + "@esbuild/win32-ia32": "0.25.9", + "@esbuild/win32-x64": "0.25.9" } }, - "node_modules/estree-walker": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", - "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "node_modules/expect-type": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", + "integrity": "sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==", "dev": true, - "license": "MIT" - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true, - "license": "BSD-2-Clause", + "license": "Apache-2.0", "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/expand-template": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", - "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", - "dev": true, - "license": "(MIT OR WTFPL)", - "engines": { - "node": ">=6" + "node": ">=12.0.0" } }, "node_modules/external-editor": { @@ -2728,74 +1654,22 @@ ], "license": "MIT" }, - "node_modules/fast-diff": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/fast-diff/-/fast-diff-1.3.0.tgz", - "integrity": "sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==", - "dev": true, - "license": "Apache-2.0" - }, - "node_modules/fast-glob": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", - "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", "dev": true, "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.8" - }, "engines": { - "node": ">=8.6.0" - } - }, - "node_modules/fastq": { - "version": "1.19.1", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", - "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "reusify": "^1.0.4" - } - }, - "node_modules/figures": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-6.1.0.tgz", - "integrity": "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-unicode-supported": "^2.0.0" + "node": ">=12.0.0" }, - "engines": { - "node": ">=18" + "peerDependencies": { + "picomatch": "^3 || ^4" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/file-uri-to-path": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", - "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", - "dev": true, - "license": "MIT" - }, - "node_modules/fill-range": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", - "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "dev": true, - "license": "MIT", - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } } }, "node_modules/find-up": { @@ -2816,19 +1690,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/find-up-simple": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/find-up-simple/-/find-up-simple-1.0.1.tgz", - "integrity": "sha512-afd4O7zpqHeRyg4PfDQsXmlDe2PfdHtJt6Akt8jOWaApLOZk5JXs6VMR29lz03pRe9mpykrRCYIYxaJYcfpncQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/find-up/node_modules/unicorn-magic": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.1.0.tgz", @@ -2842,136 +1703,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/foreground-child": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", - "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", - "dev": true, - "license": "ISC", - "dependencies": { - "cross-spawn": "^7.0.6", - "signal-exit": "^4.0.1" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/fs-constants": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", - "dev": true, - "license": "MIT" - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true, - "license": "ISC", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-east-asian-width": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.3.0.tgz", - "integrity": "sha512-vpeMIQKxczTD/0s2CdEWHcb0eeJe6TFjxb+J5xgX7hScxqrGuyjmv4c1D4A/gelKfyox0gJJwIHF+fLjeaM8kQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/github-from-package": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", - "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", - "dev": true, - "license": "MIT" - }, - "node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", - "dev": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/globby": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-14.1.0.tgz", - "integrity": "sha512-0Ia46fDOaT7k4og1PDW4YbodWWr3scS2vAr2lTbsplOt2WkKp0vQbkI9wKis/T5LV/dqPjO3bpS/z6GTJB82LA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@sindresorhus/merge-streams": "^2.1.0", - "fast-glob": "^3.3.3", - "ignore": "^7.0.3", - "path-type": "^6.0.0", - "slash": "^5.1.0", - "unicorn-magic": "^0.3.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/https-proxy-agent": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", - "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", - "dev": true, - "license": "MIT", - "dependencies": { - "agent-base": "^7.1.2", - "debug": "4" - }, - "engines": { - "node": ">= 14" - } - }, "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -2985,230 +1716,13 @@ "node": ">=0.10.0" } }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "BSD-3-Clause" - }, - "node_modules/ignore": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", - "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/ignore-by-default": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-2.1.0.tgz", - "integrity": "sha512-yiWd4GVmJp0Q6ghmM2B/V3oZGRmjrKLXvHR3TE1nfoXsmoggllfZUQe74EN0fJdPFZu2NIvNdrMMLm3OsV7Ohw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=10 <11 || >=12 <13 || >=14" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/indent-string": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-5.0.0.tgz", - "integrity": "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true, - "license": "ISC" - }, - "node_modules/irregular-plurals": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/irregular-plurals/-/irregular-plurals-3.5.0.tgz", - "integrity": "sha512-1ANGLZ+Nkv1ptFb2pa8oG8Lem4krflKuX/gINiHJHjJUKaJHk/SXk5x6K3J+39/p0h1RQ2saROclJJ+QLvETCQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", - "integrity": "sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/is-plain-object": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", - "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-promise": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz", - "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==", + "node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", "dev": true, "license": "MIT" }, - "node_modules/is-unicode-supported": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-2.1.0.tgz", - "integrity": "sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true, - "license": "ISC" - }, - "node_modules/jackspeak": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", - "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/cliui": "^8.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" - } - }, - "node_modules/js-string-escape": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/js-string-escape/-/js-string-escape-1.0.1.tgz", - "integrity": "sha512-Smw4xcfIQ5LVjAOuJCvN/zIodzA/BBSsluuoSykP+lUvScIi4U6RJLfwHet5cxFnCswUjISV8oAXaqaJDY3chg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, - "license": "MIT", - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/load-json-file": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-7.0.1.tgz", - "integrity": "sha512-Gnxj3ev3mB5TkVBGad0JM6dmLiQL+o0t23JPBZ9sd+yvSLk05mFoqKBw5N8gbbkU4TNXyqCgIrl/VM17OgUIgQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/locate-path": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", @@ -3225,206 +1739,42 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", "dev": true, "license": "MIT" }, - "node_modules/lodash-es": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", - "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", - "dev": true, - "license": "MIT" - }, - "node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/matcher": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/matcher/-/matcher-5.0.0.tgz", - "integrity": "sha512-s2EMBOWtXFc8dgqvoAzKJXxNHibcdJMV0gwqKUaw9E2JBJuGUK7DrNKrA6g/i+v72TT16+6sVm5mS3thaMLQUw==", - "dev": true, - "license": "MIT", - "dependencies": { - "escape-string-regexp": "^5.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/md5-hex": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/md5-hex/-/md5-hex-3.0.1.tgz", - "integrity": "sha512-BUiRtTtV39LIJwinWBjqVsU9xhdnz7/i889V859IBFpuqGAj6LuOvHv5XLbgZ2R7ptJoJaEcxkv88/h25T7Ciw==", - "dev": true, - "license": "MIT", - "dependencies": { - "blueimp-md5": "^2.10.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/memoize": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/memoize/-/memoize-10.1.0.tgz", - "integrity": "sha512-MMbFhJzh4Jlg/poq1si90XRlTZRDHVqdlz2mPyGJ6kqMpyHUyVpDd5gpFAvVehW64+RA1eKE9Yt8aSLY7w2Kgg==", - "dev": true, - "license": "MIT", - "dependencies": { - "mimic-function": "^5.0.1" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sindresorhus/memoize?sponsor=1" - } - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/micromatch": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", - "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "dev": true, - "license": "MIT", - "dependencies": { - "braces": "^3.0.3", - "picomatch": "^2.3.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/micromatch/node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/mimic-function": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/mimic-function/-/mimic-function-5.0.1.tgz", - "integrity": "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/mimic-response": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "dev": true, - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, - "node_modules/minizlib": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz", - "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==", - "dev": true, - "license": "MIT", - "dependencies": { - "minipass": "^7.1.2" - }, - "engines": { - "node": ">= 18" - } - }, - "node_modules/mkdirp": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", - "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==", + "node_modules/lz-string": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz", + "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", "dev": true, "license": "MIT", "bin": { - "mkdirp": "dist/cjs/src/bin.js" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "lz-string": "bin/bin.js" } }, - "node_modules/mkdirp-classic": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", - "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", + "node_modules/magic-string": { + "version": "0.30.18", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.18.tgz", + "integrity": "sha512-yi8swmWbO17qHhwIBNeeZxTceJMeBvWJaId6dyvTSOwTipqeHhMhOrz6513r1sOKnpvQ7zkhlG8tPrpilwTxHQ==", "dev": true, - "license": "MIT" + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/mrmime": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.1.tgz", + "integrity": "sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } }, "node_modules/ms": { "version": "2.1.3", @@ -3443,93 +1793,23 @@ "node": "^18.17.0 || >=20.5.0" } }, - "node_modules/napi-build-utils": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-2.0.0.tgz", - "integrity": "sha512-GEbrYkbfF7MoNaoh2iGG84Mnf/WZfB0GdGEsM8wz7Expx/LlWf5U8t9nvJKXSp3qr5IsEbK04cBGhol/KwOsWA==", + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", "dev": true, - "license": "MIT" - }, - "node_modules/node-abi": { - "version": "3.75.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.75.0.tgz", - "integrity": "sha512-OhYaY5sDsIka7H7AtijtI9jwGYLyl29eQn/W623DiN/MIv5sUqc4g7BIDThX+gb7di9f6xK02nkp8sdfFWZLTg==", - "dev": true, - "license": "MIT", - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "dev": true, - "license": "MIT", - "dependencies": { - "whatwg-url": "^5.0.0" - }, - "engines": { - "node": "4.x || >=6.0.0" - }, - "peerDependencies": { - "encoding": "^0.1.0" - }, - "peerDependenciesMeta": { - "encoding": { - "optional": true + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" } - } - }, - "node_modules/node-gyp-build": { - "version": "4.8.4", - "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.8.4.tgz", - "integrity": "sha512-LA4ZjwlnUblHVgq0oBF3Jl/6h/Nvs5fzBLwdEF4nuxnFdsfajde4WfxtJr3CaiH+F6ewcIB/q4jQ4UzPyid+CQ==", - "dev": true, + ], "license": "MIT", "bin": { - "node-gyp-build": "bin.js", - "node-gyp-build-optional": "optional.js", - "node-gyp-build-test": "build-test.js" - } - }, - "node_modules/nofilter": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/nofilter/-/nofilter-3.1.0.tgz", - "integrity": "sha512-l2NNj07e9afPnhAhvgVrCD/oy2Ai1yfLpuo3EpiO1jFTsB4sFz6oIfAfSZyQzVpkZQ9xS8ZS5g1jCBgq4Hwo0g==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.19" - } - }, - "node_modules/nopt": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-8.1.0.tgz", - "integrity": "sha512-ieGu42u/Qsa4TFktmaKEwM6MQH0pOWnaB3htzh0JRtx84+Mebc0cbZYN5bC+6WTZ4+77xrL9Pn5m7CV6VIkV7A==", - "dev": true, - "license": "ISC", - "dependencies": { - "abbrev": "^3.0.0" - }, - "bin": { - "nopt": "bin/nopt.js" + "nanoid": "bin/nanoid.cjs" }, "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dev": true, - "license": "ISC", - "dependencies": { - "wrappy": "1" + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, "node_modules/os-tmpdir": { @@ -3574,56 +1854,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/p-map": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.3.tgz", - "integrity": "sha512-VkndIv2fIB99swvQoA65bm+fsmt6UNdGeIB0oxBs+WhAhdh08QA04JXpI7rbB9r08/nkbysKoya9rtDERYOYMA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/package-config": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/package-config/-/package-config-5.0.0.tgz", - "integrity": "sha512-GYTTew2slBcYdvRHqjhwaaydVMvn/qrGC323+nKclYioNSLTDUM/lGgtGTgyHVtYcozb+XkE8CNhwcraOmZ9Mg==", - "dev": true, - "license": "MIT", - "dependencies": { - "find-up-simple": "^1.0.0", - "load-json-file": "^7.0.1" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/package-json-from-dist": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", - "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", - "dev": true, - "license": "BlueOak-1.0.0" - }, - "node_modules/parse-ms": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-ms/-/parse-ms-4.0.0.tgz", - "integrity": "sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/path-exists": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", @@ -3634,45 +1864,29 @@ "node": "^12.20.0 || ^14.13.1 || >=16.0.0" } }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", "dev": true, "license": "MIT", "engines": { - "node": ">=8" + "node": ">= 14.16" } }, - "node_modules/path-scurry": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", - "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "engines": { - "node": ">=16 || 14 >=14.18" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/path-type": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-6.0.0.tgz", - "integrity": "sha512-Vj7sf++t5pBD637NSfkxpHSMfWaeig5+DKWLhcqIYx6mWQz5hdJTGDVMQiJcw1ZYkhs7AazKDGpRVji1LJCZUQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } + "license": "ISC" }, "node_modules/picomatch": { "version": "4.0.3", @@ -3687,217 +1901,130 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, - "node_modules/plur": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/plur/-/plur-5.1.0.tgz", - "integrity": "sha512-VP/72JeXqak2KiOzjgKtQen5y3IZHn+9GOuLDafPv0eXa47xq0At93XahYBs26MsifCQ4enGKwbjBTKgb9QJXg==", + "node_modules/playwright": { + "version": "1.55.0", + "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.55.0.tgz", + "integrity": "sha512-sdCWStblvV1YU909Xqx0DhOjPZE4/5lJsIS84IfN9dAZfcl/CIZ5O8l3o0j7hPMjDvqoTF8ZUcc+i/GL5erstA==", "dev": true, - "license": "MIT", + "license": "Apache-2.0", "dependencies": { - "irregular-plurals": "^3.3.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/prebuild-install": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.3.tgz", - "integrity": "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==", - "dev": true, - "license": "MIT", - "dependencies": { - "detect-libc": "^2.0.0", - "expand-template": "^2.0.3", - "github-from-package": "0.0.0", - "minimist": "^1.2.3", - "mkdirp-classic": "^0.5.3", - "napi-build-utils": "^2.0.0", - "node-abi": "^3.3.0", - "pump": "^3.0.0", - "rc": "^1.2.7", - "simple-get": "^4.0.0", - "tar-fs": "^2.0.0", - "tunnel-agent": "^0.6.0" + "playwright-core": "1.55.0" }, "bin": { - "prebuild-install": "bin.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/pretty-ms": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/pretty-ms/-/pretty-ms-9.2.0.tgz", - "integrity": "sha512-4yf0QO/sllf/1zbZWYnvWw3NxCQwLXKzIj0G849LSufP15BXKM0rbD2Z3wVnkMfjdn/CB0Dpp444gYAACdsplg==", - "dev": true, - "license": "MIT", - "dependencies": { - "parse-ms": "^4.0.0" + "playwright": "cli.js" }, "engines": { "node": ">=18" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "optionalDependencies": { + "fsevents": "2.3.2" } }, - "node_modules/pump": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz", - "integrity": "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==", + "node_modules/playwright-core": { + "version": "1.55.0", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.55.0.tgz", + "integrity": "sha512-GvZs4vU3U5ro2nZpeiwyb0zuFaqb9sUiAJuyrWpcGouD8y9/HLgGbNRjIph7zU9D3hnPaisMl9zG9CgFi/biIg==", "dev": true, - "license": "MIT", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" + "license": "Apache-2.0", + "bin": { + "playwright-core": "cli.js" + }, + "engines": { + "node": ">=18" } }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", "dev": true, "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, { "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" + "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, "license": "MIT" }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "node_modules/rollup": { + "version": "4.50.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.50.1.tgz", + "integrity": "sha512-78E9voJHwnXQMiQdiqswVLZwJIzdBKJ1GdI5Zx6XwoFKUIk09/sSrr+05QFzvYb8q6Y9pPV45zzDuYa3907TZA==", "dev": true, - "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "license": "MIT", "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" + "@types/estree": "1.0.8" }, "bin": { - "rc": "cli.js" - } - }, - "node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "dev": true, - "license": "MIT", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" + "rollup": "dist/bin/rollup" }, "engines": { - "node": ">= 6" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/resolve-cwd": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", - "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", - "dev": true, - "license": "MIT", - "dependencies": { - "resolve-from": "^5.0.0" + "node": ">=18.0.0", + "npm": ">=8.0.0" }, - "engines": { - "node": ">=8" + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.50.1", + "@rollup/rollup-android-arm64": "4.50.1", + "@rollup/rollup-darwin-arm64": "4.50.1", + "@rollup/rollup-darwin-x64": "4.50.1", + "@rollup/rollup-freebsd-arm64": "4.50.1", + "@rollup/rollup-freebsd-x64": "4.50.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.50.1", + "@rollup/rollup-linux-arm-musleabihf": "4.50.1", + "@rollup/rollup-linux-arm64-gnu": "4.50.1", + "@rollup/rollup-linux-arm64-musl": "4.50.1", + "@rollup/rollup-linux-loongarch64-gnu": "4.50.1", + "@rollup/rollup-linux-ppc64-gnu": "4.50.1", + "@rollup/rollup-linux-riscv64-gnu": "4.50.1", + "@rollup/rollup-linux-riscv64-musl": "4.50.1", + "@rollup/rollup-linux-s390x-gnu": "4.50.1", + "@rollup/rollup-linux-x64-gnu": "4.50.1", + "@rollup/rollup-linux-x64-musl": "4.50.1", + "@rollup/rollup-openharmony-arm64": "4.50.1", + "@rollup/rollup-win32-arm64-msvc": "4.50.1", + "@rollup/rollup-win32-ia32-msvc": "4.50.1", + "@rollup/rollup-win32-x64-msvc": "4.50.1", + "fsevents": "~2.3.2" } }, - "node_modules/resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/reusify": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", - "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", - "dev": true, - "license": "MIT", - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", @@ -3918,44 +2045,12 @@ "node": ">=10" } }, - "node_modules/serialize-error": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/serialize-error/-/serialize-error-7.0.1.tgz", - "integrity": "sha512-8I8TjW5KMOKsZQTvoxjuSIa7foAwPWGOts+6o7sgjz41/qMD9VQHEDxi6PBvK2l0MXUmqZyNpUK+T2tQaaElvw==", - "dev": true, - "license": "MIT", - "dependencies": { - "type-fest": "^0.13.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/shebang-command": { + "node_modules/siginfo": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", "dev": true, - "license": "MIT", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } + "license": "ISC" }, "node_modules/signal-exit": { "version": "4.1.0", @@ -3970,336 +2065,117 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/simple-concat": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", - "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/simple-get": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", - "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "decompress-response": "^6.0.0", - "once": "^1.3.1", - "simple-concat": "^1.0.0" - } - }, - "node_modules/slash": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-5.1.0.tgz", - "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/slice-ansi": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-5.0.0.tgz", - "integrity": "sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==", + "node_modules/sirv": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-3.0.2.tgz", + "integrity": "sha512-2wcC/oGxHis/BoHkkPwldgiPSYcpZK3JU28WoMVv55yHJgcZ8rlXvuG9iZggz+sU1d4bRgIGASwyWqjxu3FM0g==", "dev": true, "license": "MIT", "dependencies": { - "ansi-styles": "^6.0.0", - "is-fullwidth-code-point": "^4.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/slice-ansi?sponsor=1" - } - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", - "dev": true, - "license": "BSD-3-Clause" - }, - "node_modules/stack-utils": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", - "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "escape-string-regexp": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/stack-utils/node_modules/escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "dev": true, - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/string-width": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", - "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^10.3.0", - "get-east-asian-width": "^1.0.0", - "strip-ansi": "^7.1.0" + "@polka/url": "^1.0.0-next.24", + "mrmime": "^2.0.0", + "totalist": "^3.0.0" }, "engines": { "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/string-width-cjs": { - "name": "string-width", - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/string-width-cjs/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/strip-ansi-cjs": { - "name": "strip-ansi", - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", - "dev": true, - "license": "MIT", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, - "node_modules/supertap": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/supertap/-/supertap-3.0.1.tgz", - "integrity": "sha512-u1ZpIBCawJnO+0QePsEiOknOfCRq0yERxiAchT0i4li0WHNUJbf0evXXSXOcCAR4M8iMDoajXYmstm/qO81Isw==", + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", "dev": true, - "license": "MIT", - "dependencies": { - "indent-string": "^5.0.0", - "js-yaml": "^3.14.1", - "serialize-error": "^7.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - } + "license": "MIT" }, - "node_modules/tar": { - "version": "7.4.3", - "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", - "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "node_modules/std-env": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.9.0.tgz", + "integrity": "sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==", "dev": true, - "license": "ISC", - "dependencies": { - "@isaacs/fs-minipass": "^4.0.0", - "chownr": "^3.0.0", - "minipass": "^7.1.2", - "minizlib": "^3.0.1", - "mkdirp": "^3.0.1", - "yallist": "^5.0.0" - }, - "engines": { - "node": ">=18" - } + "license": "MIT" }, - "node_modules/tar-fs": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.3.tgz", - "integrity": "sha512-090nwYJDmlhwFwEW3QQl+vaNnxsO2yVsd45eTKRBzSzu+hlb1w2K9inVq5b0ngXuLVqQ4ApvsUHHnu/zQNkWAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "chownr": "^1.1.1", - "mkdirp-classic": "^0.5.2", - "pump": "^3.0.0", - "tar-stream": "^2.1.4" - } - }, - "node_modules/tar-fs/node_modules/chownr": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", - "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", - "dev": true, - "license": "ISC" - }, - "node_modules/tar-stream": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", - "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "bl": "^4.0.3", - "end-of-stream": "^1.4.1", - "fs-constants": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.1.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/temp-dir": { + "node_modules/strip-literal": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-3.0.0.tgz", - "integrity": "sha512-nHc6S/bwIilKHNRgK/3jlhDoIHcp45YgyiwcAk46Tr0LfEqGBVpmiAyuiuxeVE44m3mXnEeVhaipLOEWmH+Njw==", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.0.0.tgz", + "integrity": "sha512-TcccoMhJOM3OebGhSBEmp3UZ2SfDMZUEBdRA/9ynfLi8yYajyWX3JiXArcJt4Umh4vISpspkQIY8ZZoCqjbviA==", "dev": true, "license": "MIT", - "engines": { - "node": ">=14.16" + "dependencies": { + "js-tokens": "^9.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" } }, - "node_modules/time-zone": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/time-zone/-/time-zone-1.0.0.tgz", - "integrity": "sha512-TIsDdtKo6+XrPtiTm1ssmMngN1sAhyKnTO2kunQWqNPWIVvCm15Wmw4SWInwTVgJ5u/Tr04+8Ei9TNcw4x4ONA==", + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", "dev": true, "license": "MIT", "engines": { - "node": ">=4" + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/tinyrainbow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", + "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.3.tgz", + "integrity": "sha512-t2T/WLB2WRgZ9EpE4jgPJ9w+i66UZfDc8wHh0xrwiRNN+UwH98GIJkTeZqX9rg0i0ptwzqW+uYeIF0T4F8LR7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" } }, "node_modules/tmp": { @@ -4315,46 +2191,22 @@ "node": ">=0.6.0" } }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "node_modules/totalist": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", + "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", "dev": true, "license": "MIT", - "dependencies": { - "is-number": "^7.0.0" - }, "engines": { - "node": ">=8.0" + "node": ">=6" } }, - "node_modules/tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", - "dev": true, - "license": "MIT" - }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", - "dev": true, "license": "0BSD" }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, "node_modules/typanion": { "version": "3.14.0", "resolved": "https://registry.npmjs.org/typanion/-/typanion-3.14.0.tgz", @@ -4365,19 +2217,6 @@ "website" ] }, - "node_modules/type-fest": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.13.1.tgz", - "integrity": "sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==", - "dev": true, - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/typescript": { "version": "5.9.2", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.2.tgz", @@ -4392,18 +2231,12 @@ "node": ">=14.17" } }, - "node_modules/unicorn-magic": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", - "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", + "node_modules/undici-types": { + "version": "7.10.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.10.0.tgz", + "integrity": "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag==", "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } + "license": "MIT" }, "node_modules/universal-user-agent": { "version": "7.0.3", @@ -4412,313 +2245,214 @@ "dev": true, "license": "ISC" }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "dev": true, - "license": "MIT" - }, - "node_modules/webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", - "dev": true, - "license": "BSD-2-Clause" - }, - "node_modules/well-known-symbols": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/well-known-symbols/-/well-known-symbols-2.0.0.tgz", - "integrity": "sha512-ZMjC3ho+KXo0BfJb7JgtQ5IBuvnShdlACNkKkdsqBmYw3bPAaJfPeYUo6tLUaT5tG/Gkh7xkpBhKRQ9e7pyg9Q==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=6" - } - }, - "node_modules/whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "node_modules/vite": { + "version": "7.1.5", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.1.5.tgz", + "integrity": "sha512-4cKBO9wR75r0BeIWWWId9XK9Lj6La5X846Zw9dFfzMRw38IlTk2iCcUt6hsyiDRcPidc55ZParFYDXi0nXOeLQ==", "dev": true, "license": "MIT", "dependencies": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" + "esbuild": "^0.25.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" }, "bin": { - "node-which": "bin/node-which" + "vite": "bin/vite.js" }, "engines": { - "node": ">= 8" - } - }, - "node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" + "node": "^20.19.0 || >=22.12.0" }, "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } } }, - "node_modules/wrap-ansi-cjs": { - "name": "wrap-ansi", - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "node_modules/vite-node": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.2.4.tgz", + "integrity": "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==", "dev": true, "license": "MIT", "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" + "cac": "^6.7.14", + "debug": "^4.4.1", + "es-module-lexer": "^1.7.0", + "pathe": "^2.0.3", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "bin": { + "vite-node": "vite-node.mjs" }, "engines": { - "node": ">=10" + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" }, "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + "url": "https://opencollective.com/vitest" } }, - "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "node_modules/vitest": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.4.tgz", + "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==", "dev": true, "license": "MIT", "dependencies": { - "color-convert": "^2.0.1" + "@types/chai": "^5.2.2", + "@vitest/expect": "3.2.4", + "@vitest/mocker": "3.2.4", + "@vitest/pretty-format": "^3.2.4", + "@vitest/runner": "3.2.4", + "@vitest/snapshot": "3.2.4", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "debug": "^4.4.1", + "expect-type": "^1.2.1", + "magic-string": "^0.30.17", + "pathe": "^2.0.3", + "picomatch": "^4.0.2", + "std-env": "^3.9.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.14", + "tinypool": "^1.1.1", + "tinyrainbow": "^2.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", + "vite-node": "3.2.4", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" }, "engines": { - "node": ">=8" + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" }, "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/debug": "^4.1.12", + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@vitest/browser": "3.2.4", + "@vitest/ui": "3.2.4", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/debug": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } } }, - "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/wrap-ansi-cjs/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", "dev": true, "license": "MIT", "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" }, "engines": { "node": ">=8" } }, - "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi/node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true, - "license": "MIT" - }, - "node_modules/wrap-ansi/node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/write-file-atomic": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-6.0.0.tgz", - "integrity": "sha512-GmqrO8WJ1NuzJ2DrziEI2o57jKAVIQNf8a18W3nCYU3H7PNWqCCVTeH6/NQE93CIllIgQS98rrmVkYgTX9fFJQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "imurmurhash": "^0.1.4", - "signal-exit": "^4.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/yallist": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", - "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "dev": true, - "license": "MIT", - "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", "dev": true, "license": "MIT", "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/yargs/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" + "node": ">=10.0.0" }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" }, - "engines": { - "node": ">=8" + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } } }, "node_modules/yocto-queue": { @@ -4746,6 +2480,53 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "packages/browser": { + "name": "@tursodatabase/database-browser", + "version": "0.1.5-pre.3", + "license": "MIT", + "dependencies": { + "@napi-rs/wasm-runtime": "^1.0.3", + "@tursodatabase/database-common": "^0.1.5-pre.3" + }, + "devDependencies": { + "@napi-rs/cli": "^3.1.5", + "@vitest/browser": "^3.2.4", + "playwright": "^1.55.0", + "typescript": "^5.9.2", + "vitest": "^3.2.4" + } + }, + "packages/common": { + "name": "@tursodatabase/database-common", + "version": "0.1.5-pre.3", + "license": "MIT", + "devDependencies": { + "typescript": "^5.9.2" + } + }, + "packages/core": { + "name": "@tursodatabase/database-core", + "version": "0.1.5-pre.3", + "extraneous": true, + "license": "MIT", + "devDependencies": { + "typescript": "^5.9.2" + } + }, + "packages/native": { + "name": "@tursodatabase/database", + "version": "0.1.5-pre.3", + "license": "MIT", + "dependencies": { + "@tursodatabase/database-common": "^0.1.5-pre.3" + }, + "devDependencies": { + "@napi-rs/cli": "^3.1.5", + "@types/node": "^24.3.1", + "typescript": "^5.9.2", + "vitest": "^3.2.4" + } } } } \ No newline at end of file diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json index 833ded332..dfa82ece4 100644 --- a/bindings/javascript/package.json +++ b/bindings/javascript/package.json @@ -1,64 +1,13 @@ { - "name": "@tursodatabase/database", - "version": "0.1.5-pre.2", - "repository": { - "type": "git", - "url": "https://github.com/tursodatabase/turso" - }, - "description": "The Turso database library", - "module": "./dist/promise.js", - "main": "./dist/promise.js", - "type": "module", - "exports": { - ".": "./dist/promise.js", - "./compat": "./dist/compat.js" - }, - "files": [ - "browser.js", - "index.js", - "index.d.ts", - "dist/**" - ], - "types": "index.d.ts", - "napi": { - "binaryName": "turso", - "targets": [ - "x86_64-unknown-linux-gnu", - "x86_64-pc-windows-msvc", - "universal-apple-darwin", - "aarch64-unknown-linux-gnu", - "wasm32-wasip1-threads" - ] - }, - "license": "MIT", - "devDependencies": { - "@napi-rs/cli": "^3.0.4", - "@napi-rs/wasm-runtime": "^1.0.1", - "ava": "^6.0.1", - "better-sqlite3": "^11.9.1", - "typescript": "^5.9.2" - }, - "ava": { - "timeout": "3m" - }, - "engines": { - "node": ">= 10" - }, "scripts": { - "artifacts": "napi artifacts", - "build": "npm exec tsc && napi build --platform --release --esm", - "build:debug": "npm exec tsc && napi build --platform", - "prepublishOnly": "npm exec tsc && napi prepublish -t npm", - "test": "true", - "universal": "napi universalize", - "version": "napi version" + "build": "npm run build --workspaces", + "tsc-build": "npm run tsc-build --workspaces", + "test": "npm run test --workspaces" }, - "packageManager": "yarn@4.9.2", - "imports": { - "#entry-point": { - "types": "./index.d.ts", - "browser": "./browser.js", - "node": "./index.js" - } - } + "workspaces": [ + "packages/common", + "packages/native", + "packages/browser" + ], + "version": "0.1.5-pre.4" } \ No newline at end of file diff --git a/bindings/javascript/packages/browser/README.md b/bindings/javascript/packages/browser/README.md new file mode 100644 index 000000000..e443f495e --- /dev/null +++ b/bindings/javascript/packages/browser/README.md @@ -0,0 +1,124 @@ +

+

Turso Database for JavaScript in Browser

+

+ +

+ npm + +

+

+ Chat with other users of Turso on Discord +

+ +--- + +## About + +This package is the Turso embedded database library for JavaScript in Browser. + +> **⚠️ Warning:** This software is ALPHA, only use for development, testing, and experimentation. We are working to make it production ready, but do not use it for critical data right now. + +## Features + +- **SQLite compatible:** SQLite query language and file format support ([status](https://github.com/tursodatabase/turso/blob/main/COMPAT.md)). +- **In-process**: No network overhead, runs directly in your Node.js process +- **TypeScript support**: Full TypeScript definitions included + +## Installation + +```bash +npm install @tursodatabase/database-browser +``` + +## Getting Started + +### In-Memory Database + +```javascript +import { connect } from '@tursodatabase/database-browser'; + +// Create an in-memory database +const db = await connect(':memory:'); + +// Create a table +await db.exec('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)'); + +// Insert data +const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)'); +await insert.run('Alice', 'alice@example.com'); +await insert.run('Bob', 'bob@example.com'); + +// Query data +const users = await db.prepare('SELECT * FROM users').all(); +console.log(users); +// Output: [ +// { id: 1, name: 'Alice', email: 'alice@example.com' }, +// { id: 2, name: 'Bob', email: 'bob@example.com' } +// ] +``` + +### File-Based Database + +```javascript +import { connect } from '@tursodatabase/database-browser'; + +// Create or open a database file +const db = await connect('my-database.db'); + +// Create a table +await db.exec(` + CREATE TABLE IF NOT EXISTS posts ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + title TEXT NOT NULL, + content TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP + ) +`); + +// Insert a post +const insertPost = db.prepare('INSERT INTO posts (title, content) VALUES (?, ?)'); +const result = await insertPost.run('Hello World', 'This is my first blog post!'); + +console.log(`Inserted post with ID: ${result.lastInsertRowid}`); +``` + +### Transactions + +```javascript +import { connect } from '@tursodatabase/database-browser'; + +const db = await connect('transactions.db'); + +// Using transactions for atomic operations +const transaction = db.transaction(async (users) => { + const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)'); + for (const user of users) { + await insert.run(user.name, user.email); + } +}); + +// Execute transaction +await transaction([ + { name: 'Alice', email: 'alice@example.com' }, + { name: 'Bob', email: 'bob@example.com' } +]); +``` + +## API Reference + +For complete API documentation, see [JavaScript API Reference](../../../../docs/javascript-api-reference.md). + +## Related Packages + +* The [@tursodatabase/serverless](https://www.npmjs.com/package/@tursodatabase/serverless) package provides a serverless driver with the same API. +* The [@tursodatabase/sync](https://www.npmjs.com/package/@tursodatabase/sync) package provides bidirectional sync between a local Turso database and Turso Cloud. + +## License + +This project is licensed under the [MIT license](../../LICENSE.md). + +## Support + +- [GitHub Issues](https://github.com/tursodatabase/turso/issues) +- [Documentation](https://docs.turso.tech) +- [Discord Community](https://tur.so/discord) diff --git a/bindings/javascript/turso.wasi-browser.js b/bindings/javascript/packages/browser/index.js similarity index 60% rename from bindings/javascript/turso.wasi-browser.js rename to bindings/javascript/packages/browser/index.js index b17db8b4d..be8564969 100644 --- a/bindings/javascript/turso.wasi-browser.js +++ b/bindings/javascript/packages/browser/index.js @@ -1,7 +1,7 @@ import { createOnMessage as __wasmCreateOnMessageForFsProxy, getDefaultContext as __emnapiGetDefaultContext, - instantiateNapiModuleSync as __emnapiInstantiateNapiModuleSync, + instantiateNapiModule as __emnapiInstantiateNapiModule, WASI as __WASI, } from '@napi-rs/wasm-runtime' @@ -23,19 +23,25 @@ const __sharedMemory = new WebAssembly.Memory({ const __wasmFile = await fetch(__wasmUrl).then((res) => res.arrayBuffer()) +export let MainWorker = null; + +function panic(name) { + throw new Error(`method ${name} must be invoked only from the main thread`); +} + const { instance: __napiInstance, module: __wasiModule, napiModule: __napiModule, -} = __emnapiInstantiateNapiModuleSync(__wasmFile, { +} = await __emnapiInstantiateNapiModule(__wasmFile, { context: __emnapiContext, - asyncWorkPoolSize: 4, + asyncWorkPoolSize: 1, wasi: __wasi, onCreateWorker() { - const worker = new Worker(new URL('./wasi-worker-browser.mjs', import.meta.url), { + const worker = new Worker(new URL('./worker.mjs', import.meta.url), { type: 'module', }) - + MainWorker = worker; return worker }, overwriteImports(importObject) { @@ -44,6 +50,13 @@ const { ...importObject.napi, ...importObject.emnapi, memory: __sharedMemory, + is_web_worker: () => false, + lookup_file: () => panic("lookup_file"), + read: () => panic("read"), + write: () => panic("write"), + sync: () => panic("sync"), + truncate: () => panic("truncate"), + size: () => panic("size"), } return importObject }, @@ -57,4 +70,8 @@ const { }) export default __napiModule.exports export const Database = __napiModule.exports.Database +export const Opfs = __napiModule.exports.Opfs +export const OpfsFile = __napiModule.exports.OpfsFile export const Statement = __napiModule.exports.Statement +export const connect = __napiModule.exports.connect +export const initThreadPool = __napiModule.exports.initThreadPool diff --git a/bindings/javascript/packages/browser/package.json b/bindings/javascript/packages/browser/package.json new file mode 100644 index 000000000..31883d384 --- /dev/null +++ b/bindings/javascript/packages/browser/package.json @@ -0,0 +1,44 @@ +{ + "name": "@tursodatabase/database-browser", + "version": "0.1.5-pre.3", + "repository": { + "type": "git", + "url": "https://github.com/tursodatabase/turso" + }, + "license": "MIT", + "main": "dist/promise.js", + "packageManager": "yarn@4.9.2", + "files": [ + "index.js", + "worker.mjs", + "turso.wasm32-wasi.wasm", + "dist/**", + "README.md" + ], + "devDependencies": { + "@napi-rs/cli": "^3.1.5", + "@vitest/browser": "^3.2.4", + "playwright": "^1.55.0", + "typescript": "^5.9.2", + "vitest": "^3.2.4" + }, + "scripts": { + "napi-build": "napi build --features browser --release --platform --target wasm32-wasip1-threads --no-js --manifest-path ../../Cargo.toml --output-dir . && rm index.d.ts turso.wasi* wasi* browser.js", + "tsc-build": "npm exec tsc", + "build": "npm run napi-build && npm run tsc-build", + "test": "CI=1 vitest --browser=chromium --run && CI=1 vitest --browser=firefox --run" + }, + "napi": { + "binaryName": "turso", + "targets": [ + "wasm32-wasip1-threads" + ] + }, + "imports": { + "#index": "./index.js" + }, + "dependencies": { + "@napi-rs/wasm-runtime": "^1.0.3", + "@tursodatabase/database-common": "^0.1.5-pre.3" + } +} diff --git a/bindings/javascript/packages/browser/promise.test.ts b/bindings/javascript/packages/browser/promise.test.ts new file mode 100644 index 000000000..87bd130be --- /dev/null +++ b/bindings/javascript/packages/browser/promise.test.ts @@ -0,0 +1,95 @@ +import { expect, test, afterEach } from 'vitest' +import { connect } from './promise.js' + +test('in-memory db', async () => { + const db = await connect(":memory:"); + await db.exec("CREATE TABLE t(x)"); + await db.exec("INSERT INTO t VALUES (1), (2), (3)"); + const stmt = db.prepare("SELECT * FROM t WHERE x % 2 = ?"); + const rows = await stmt.all([1]); + expect(rows).toEqual([{ x: 1 }, { x: 3 }]); +}) + +test('on-disk db', async () => { + const path = `test-${(Math.random() * 10000) | 0}.db`; + const db1 = await connect(path); + await db1.exec("CREATE TABLE t(x)"); + await db1.exec("INSERT INTO t VALUES (1), (2), (3)"); + const stmt1 = db1.prepare("SELECT * FROM t WHERE x % 2 = ?"); + expect(stmt1.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); + const rows1 = await stmt1.all([1]); + expect(rows1).toEqual([{ x: 1 }, { x: 3 }]); + await db1.close(); + stmt1.close(); + + const db2 = await connect(path); + const stmt2 = db2.prepare("SELECT * FROM t WHERE x % 2 = ?"); + expect(stmt2.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); + const rows2 = await stmt2.all([1]); + expect(rows2).toEqual([{ x: 1 }, { x: 3 }]); + db2.close(); +}) + +test('attach', async () => { + const path1 = `test-${(Math.random() * 10000) | 0}.db`; + const path2 = `test-${(Math.random() * 10000) | 0}.db`; + const db1 = await connect(path1); + await db1.exec("CREATE TABLE t(x)"); + await db1.exec("INSERT INTO t VALUES (1), (2), (3)"); + const db2 = await connect(path2); + await db2.exec("CREATE TABLE q(x)"); + await db2.exec("INSERT INTO q VALUES (4), (5), (6)"); + + await db1.exec(`ATTACH '${path2}' as secondary`); + + const stmt = db1.prepare("SELECT * FROM t UNION ALL SELECT * FROM secondary.q"); + expect(stmt.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); + const rows = await stmt.all([1]); + expect(rows).toEqual([{ x: 1 }, { x: 2 }, { x: 3 }, { x: 4 }, { x: 5 }, { x: 6 }]); +}) + +test('blobs', async () => { + const db = await connect(":memory:"); + const rows = await db.prepare("SELECT x'1020' as x").all(); + expect(rows).toEqual([{ x: new Uint8Array([16, 32]) }]) +}) + + +test('example-1', async () => { + const db = await connect(':memory:'); + await db.exec('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)'); + + const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)'); + await insert.run('Alice', 'alice@example.com'); + await insert.run('Bob', 'bob@example.com'); + + const users = await db.prepare('SELECT * FROM users').all(); + expect(users).toEqual([ + { id: 1, name: 'Alice', email: 'alice@example.com' }, + { id: 2, name: 'Bob', email: 'bob@example.com' } + ]); +}) + +test('example-2', async () => { + const db = await connect(':memory:'); + await db.exec('CREATE TABLE users (name, email)'); + // Using transactions for atomic operations + const transaction = db.transaction(async (users) => { + const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)'); + for (const user of users) { + await insert.run(user.name, user.email); + } + }); + + // Execute transaction + await transaction([ + { name: 'Alice', email: 'alice@example.com' }, + { name: 'Bob', email: 'bob@example.com' } + ]); + + const rows = await db.prepare('SELECT * FROM users').all(); + expect(rows).toEqual([ + { name: 'Alice', email: 'alice@example.com' }, + { name: 'Bob', email: 'bob@example.com' } + ]); +}) \ No newline at end of file diff --git a/bindings/javascript/packages/browser/promise.ts b/bindings/javascript/packages/browser/promise.ts new file mode 100644 index 000000000..8f713f958 --- /dev/null +++ b/bindings/javascript/packages/browser/promise.ts @@ -0,0 +1,78 @@ +import { DatabasePromise, NativeDatabase, DatabaseOpts, SqliteError } from "@tursodatabase/database-common" +import { connect as nativeConnect, initThreadPool, MainWorker } from "#index"; + +let workerRequestId = 0; +class Database extends DatabasePromise { + files: string[]; + constructor(db: NativeDatabase, files: string[], opts: DatabaseOpts = {}) { + super(db, opts) + this.files = files; + } + async close() { + let currentId = workerRequestId; + workerRequestId += this.files.length; + + let tasks = []; + for (const file of this.files) { + (MainWorker as any).postMessage({ __turso__: "unregister", path: file, id: currentId }); + tasks.push(waitFor(currentId)); + currentId += 1; + } + await Promise.all(tasks); + this.db.close(); + } +} + +function waitFor(id: number): Promise { + let waitResolve, waitReject; + const callback = msg => { + if (msg.data.id == id) { + if (msg.data.error != null) { + waitReject(msg.data.error) + } else { + waitResolve() + } + cleanup(); + } + }; + const cleanup = () => (MainWorker as any).removeEventListener("message", callback); + + (MainWorker as any).addEventListener("message", callback); + const result = new Promise((resolve, reject) => { + waitResolve = resolve; + waitReject = reject; + }); + return result; +} + +/** + * Creates a new database connection asynchronously. + * + * @param {string} path - Path to the database file. + * @param {Object} opts - Options for database behavior. + * @returns {Promise} - A promise that resolves to a Database instance. + */ +async function connect(path: string, opts: DatabaseOpts = {}): Promise { + if (path == ":memory:") { + const db = await nativeConnect(path, { tracing: opts.tracing }); + return new Database(db, [], opts); + } + await initThreadPool(); + if (MainWorker == null) { + throw new Error("panic: MainWorker is not set"); + } + + let currentId = workerRequestId; + workerRequestId += 2; + + let dbHandlePromise = waitFor(currentId); + let walHandlePromise = waitFor(currentId + 1); + (MainWorker as any).postMessage({ __turso__: "register", path: `${path}`, id: currentId }); + (MainWorker as any).postMessage({ __turso__: "register", path: `${path}-wal`, id: currentId + 1 }); + await Promise.all([dbHandlePromise, walHandlePromise]); + const db = await nativeConnect(path, { tracing: opts.tracing }); + const files = [path, `${path}-wal`]; + return new Database(db, files, opts); +} + +export { connect, Database, SqliteError } diff --git a/bindings/javascript/packages/browser/tsconfig.json b/bindings/javascript/packages/browser/tsconfig.json new file mode 100644 index 000000000..b46abc167 --- /dev/null +++ b/bindings/javascript/packages/browser/tsconfig.json @@ -0,0 +1,21 @@ +{ + "compilerOptions": { + "skipLibCheck": true, + "declaration": true, + "declarationMap": true, + "module": "nodenext", + "target": "esnext", + "outDir": "dist/", + "lib": [ + "es2020" + ], + "paths": { + "#index": [ + "./index.js" + ] + } + }, + "include": [ + "*" + ] +} \ No newline at end of file diff --git a/bindings/javascript/packages/browser/vitest.config.ts b/bindings/javascript/packages/browser/vitest.config.ts new file mode 100644 index 000000000..deeaec485 --- /dev/null +++ b/bindings/javascript/packages/browser/vitest.config.ts @@ -0,0 +1,23 @@ +import { defineConfig } from 'vitest/config' + +export default defineConfig({ + define: { + 'process.env.NODE_DEBUG_NATIVE': 'false', + }, + server: { + headers: { + "Cross-Origin-Embedder-Policy": "require-corp", + "Cross-Origin-Opener-Policy": "same-origin" + }, + }, + test: { + browser: { + enabled: true, + provider: 'playwright', + instances: [ + { browser: 'chromium' }, + { browser: 'firefox' } + ], + }, + }, +}) diff --git a/bindings/javascript/packages/browser/worker.mjs b/bindings/javascript/packages/browser/worker.mjs new file mode 100644 index 000000000..9c29d4390 --- /dev/null +++ b/bindings/javascript/packages/browser/worker.mjs @@ -0,0 +1,160 @@ +import { instantiateNapiModuleSync, MessageHandler, WASI } from '@napi-rs/wasm-runtime' + +var fileByPath = new Map(); +var fileByHandle = new Map(); +let fileHandles = 0; +var memory = null; + +function getUint8ArrayFromWasm(ptr, len) { + ptr = ptr >>> 0; + return new Uint8Array(memory.buffer).subarray(ptr, ptr + len); +} + + +async function registerFile(path) { + if (fileByPath.has(path)) { + return; + } + const opfsRoot = await navigator.storage.getDirectory(); + const opfsHandle = await opfsRoot.getFileHandle(path, { create: true }); + const opfsSync = await opfsHandle.createSyncAccessHandle(); + fileHandles += 1; + fileByPath.set(path, { handle: fileHandles, sync: opfsSync }); + fileByHandle.set(fileHandles, opfsSync); +} + +async function unregisterFile(path) { + const file = fileByPath.get(path); + if (file == null) { + return; + } + fileByPath.delete(path); + fileByHandle.delete(file.handle); + file.sync.close(); +} + +function lookup_file(pathPtr, pathLen) { + try { + const buffer = getUint8ArrayFromWasm(pathPtr, pathLen); + const notShared = new Uint8Array(buffer.length); + notShared.set(buffer); + const decoder = new TextDecoder('utf-8'); + const path = decoder.decode(notShared); + const file = fileByPath.get(path); + if (file == null) { + return -404; + } + return file.handle; + } catch (e) { + console.error('lookupFile', pathPtr, pathLen, e); + return -1; + } +} +function read(handle, bufferPtr, bufferLen, offset) { + try { + const buffer = getUint8ArrayFromWasm(bufferPtr, bufferLen); + const file = fileByHandle.get(Number(handle)); + const result = file.read(buffer, { at: Number(offset) }); + return result; + } catch (e) { + console.error('read', handle, bufferPtr, bufferLen, offset, e); + return -1; + } +} +function write(handle, bufferPtr, bufferLen, offset) { + try { + const buffer = getUint8ArrayFromWasm(bufferPtr, bufferLen); + const file = fileByHandle.get(Number(handle)); + const result = file.write(buffer, { at: Number(offset) }); + return result; + } catch (e) { + console.error('write', handle, bufferPtr, bufferLen, offset, e); + return -1; + } +} +function sync(handle) { + try { + const file = fileByHandle.get(Number(handle)); + file.flush(); + return 0; + } catch (e) { + console.error('sync', handle, e); + return -1; + } +} +function truncate(handle, size) { + try { + const file = fileByHandle.get(Number(handle)); + const result = file.truncate(size); + return result; + } catch (e) { + console.error('truncate', handle, size, e); + return -1; + } +} +function size(handle) { + try { + const file = fileByHandle.get(Number(handle)); + const size = file.getSize() + return size; + } catch (e) { + console.error('size', handle, e); + return -1; + } +} + +const handler = new MessageHandler({ + onLoad({ wasmModule, wasmMemory }) { + memory = wasmMemory; + const wasi = new WASI({ + print: function () { + // eslint-disable-next-line no-console + console.log.apply(console, arguments) + }, + printErr: function () { + // eslint-disable-next-line no-console + console.error.apply(console, arguments) + }, + }) + return instantiateNapiModuleSync(wasmModule, { + childThread: true, + wasi, + overwriteImports(importObject) { + importObject.env = { + ...importObject.env, + ...importObject.napi, + ...importObject.emnapi, + memory: wasmMemory, + is_web_worker: () => true, + lookup_file: lookup_file, + read: read, + write: write, + sync: sync, + truncate: truncate, + size: size, + } + }, + }) + }, +}) + +globalThis.onmessage = async function (e) { + if (e.data.__turso__ == 'register') { + try { + await registerFile(e.data.path) + self.postMessage({ id: e.data.id }) + } catch (error) { + self.postMessage({ id: e.data.id, error: error }); + } + return; + } else if (e.data.__turso__ == 'unregister') { + try { + await unregisterFile(e.data.path) + self.postMessage({ id: e.data.id }) + } catch (error) { + self.postMessage({ id: e.data.id, error: error }); + } + return; + } + handler.handle(e) +} diff --git a/bindings/javascript/packages/common/README.md b/bindings/javascript/packages/common/README.md new file mode 100644 index 000000000..179123f7f --- /dev/null +++ b/bindings/javascript/packages/common/README.md @@ -0,0 +1,8 @@ +## About + +This package is the Turso embedded database common JS library which is shared between final builds for Node and Browser. + +Do not use this package directly - instead you must use `@tursodatabase/database` or `@tursodatabase/database-browser`. + +> **⚠️ Warning:** This software is ALPHA, only use for development, testing, and experimentation. We are working to make it production ready, but do not use it for critical data right now. + diff --git a/bindings/javascript/bind.ts b/bindings/javascript/packages/common/bind.ts similarity index 100% rename from bindings/javascript/bind.ts rename to bindings/javascript/packages/common/bind.ts diff --git a/bindings/javascript/compat.ts b/bindings/javascript/packages/common/compat.ts similarity index 94% rename from bindings/javascript/compat.ts rename to bindings/javascript/packages/common/compat.ts index 3b99f0772..d7bd493bb 100644 --- a/bindings/javascript/compat.ts +++ b/bindings/javascript/packages/common/compat.ts @@ -1,12 +1,6 @@ -import { Database as NativeDB, Statement as NativeStatement } from "#entry-point"; import { bindParams } from "./bind.js"; - import { SqliteError } from "./sqlite-error.js"; - -// Step result constants -const STEP_ROW = 1; -const STEP_DONE = 2; -const STEP_IO = 3; +import { NativeDatabase, NativeStatement, STEP_IO, STEP_ROW, STEP_DONE } from "./types.js"; const convertibleErrorTypes = { TypeError }; const CONVERTIBLE_ERROR_PREFIX = "[TURSO_CONVERT_TYPE]"; @@ -35,7 +29,7 @@ function createErrorByName(name, message) { * Database represents a connection that can prepare and execute SQL statements. */ class Database { - db: NativeDB; + db: NativeDatabase; memory: boolean; open: boolean; private _inTransaction: boolean = false; @@ -50,15 +44,14 @@ class Database { * @param {boolean} [opts.fileMustExist=false] - If true, throws if database file does not exist. * @param {number} [opts.timeout=0] - Timeout duration in milliseconds for database operations. Defaults to 0 (no timeout). */ - constructor(path: string, opts: any = {}) { + constructor(db: NativeDatabase, opts: any = {}) { opts.readonly = opts.readonly === undefined ? false : opts.readonly; opts.fileMustExist = opts.fileMustExist === undefined ? false : opts.fileMustExist; opts.timeout = opts.timeout === undefined ? 0 : opts.timeout; - this.db = new NativeDB(path); + this.db = db; this.memory = this.db.memory; - const db = this.db; Object.defineProperties(this, { inTransaction: { @@ -66,7 +59,7 @@ class Database { }, name: { get() { - return path; + return db.path; }, }, readonly: { @@ -199,7 +192,7 @@ class Database { } try { - this.db.batch(sql); + this.db.batchSync(sql); } catch (err) { throw convertError(err); } @@ -301,7 +294,7 @@ class Statement { this.stmt.reset(); bindParams(this.stmt, bindParameters); for (; ;) { - const stepResult = this.stmt.step(); + const stepResult = this.stmt.stepSync(); if (stepResult === STEP_IO) { this.db.db.ioLoopSync(); continue; @@ -330,7 +323,7 @@ class Statement { this.stmt.reset(); bindParams(this.stmt, bindParameters); for (; ;) { - const stepResult = this.stmt.step(); + const stepResult = this.stmt.stepSync(); if (stepResult === STEP_IO) { this.db.db.ioLoopSync(); continue; @@ -354,7 +347,7 @@ class Statement { bindParams(this.stmt, bindParameters); while (true) { - const stepResult = this.stmt.step(); + const stepResult = this.stmt.stepSync(); if (stepResult === STEP_IO) { this.db.db.ioLoopSync(); continue; @@ -378,7 +371,7 @@ class Statement { bindParams(this.stmt, bindParameters); const rows: any[] = []; for (; ;) { - const stepResult = this.stmt.step(); + const stepResult = this.stmt.stepSync(); if (stepResult === STEP_IO) { this.db.db.ioLoopSync(); continue; @@ -417,4 +410,4 @@ class Statement { } } -export { Database, SqliteError } \ No newline at end of file +export { Database, Statement } \ No newline at end of file diff --git a/bindings/javascript/packages/common/index.ts b/bindings/javascript/packages/common/index.ts new file mode 100644 index 000000000..35e092d03 --- /dev/null +++ b/bindings/javascript/packages/common/index.ts @@ -0,0 +1,6 @@ +import { NativeDatabase, NativeStatement, DatabaseOpts } from "./types.js"; +import { Database as DatabaseCompat, Statement as StatementCompat } from "./compat.js"; +import { Database as DatabasePromise, Statement as StatementPromise } from "./promise.js"; +import { SqliteError } from "./sqlite-error.js"; + +export { DatabaseCompat, StatementCompat, DatabasePromise, StatementPromise, NativeDatabase, NativeStatement, SqliteError, DatabaseOpts } diff --git a/bindings/javascript/packages/common/package.json b/bindings/javascript/packages/common/package.json new file mode 100644 index 000000000..00cf6ff0e --- /dev/null +++ b/bindings/javascript/packages/common/package.json @@ -0,0 +1,25 @@ +{ + "name": "@tursodatabase/database-common", + "version": "0.1.5-pre.3", + "repository": { + "type": "git", + "url": "https://github.com/tursodatabase/turso" + }, + "type": "module", + "license": "MIT", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "packageManager": "yarn@4.9.2", + "files": [ + "dist/**", + "README.md" + ], + "devDependencies": { + "typescript": "^5.9.2" + }, + "scripts": { + "tsc-build": "npm exec tsc", + "build": "npm run tsc-build", + "test": "echo 'no tests'" + } +} diff --git a/bindings/javascript/promise.ts b/bindings/javascript/packages/common/promise.ts similarity index 88% rename from bindings/javascript/promise.ts rename to bindings/javascript/packages/common/promise.ts index 04df99d9f..e81795833 100644 --- a/bindings/javascript/promise.ts +++ b/bindings/javascript/packages/common/promise.ts @@ -1,12 +1,6 @@ -import { Database as NativeDB, Statement as NativeStatement } from "#entry-point"; import { bindParams } from "./bind.js"; - import { SqliteError } from "./sqlite-error.js"; - -// Step result constants -const STEP_ROW = 1; -const STEP_DONE = 2; -const STEP_IO = 3; +import { NativeDatabase, NativeStatement, STEP_IO, STEP_ROW, STEP_DONE, DatabaseOpts } from "./types.js"; const convertibleErrorTypes = { TypeError }; const CONVERTIBLE_ERROR_PREFIX = "[TURSO_CONVERT_TYPE]"; @@ -35,7 +29,7 @@ function createErrorByName(name, message) { * Database represents a connection that can prepare and execute SQL statements. */ class Database { - db: NativeDB; + db: NativeDatabase; memory: boolean; open: boolean; private _inTransaction: boolean = false; @@ -49,19 +43,18 @@ class Database { * @param {boolean} [opts.fileMustExist=false] - If true, throws if database file does not exist. * @param {number} [opts.timeout=0] - Timeout duration in milliseconds for database operations. Defaults to 0 (no timeout). */ - constructor(path: string, opts: any = {}) { + constructor(db: NativeDatabase, opts: DatabaseOpts = {}) { opts.readonly = opts.readonly === undefined ? false : opts.readonly; opts.fileMustExist = opts.fileMustExist === undefined ? false : opts.fileMustExist; opts.timeout = opts.timeout === undefined ? 0 : opts.timeout; - const db = new NativeDB(path); - this.initialize(db, opts.path, opts.readonly); + this.initialize(db, opts.name, opts.readonly); } static create() { return Object.create(this.prototype); } - initialize(db: NativeDB, name, readonly) { + initialize(db: NativeDatabase, name, readonly) { this.db = db; this.memory = db.memory; Object.defineProperties(this, { @@ -112,22 +105,22 @@ class Database { * * @param {function} fn - The function to wrap in a transaction. */ - transaction(fn) { + transaction(fn: (...any) => Promise) { if (typeof fn !== "function") throw new TypeError("Expected first argument to be a function"); const db = this; const wrapTxn = (mode) => { - return (...bindParameters) => { - db.exec("BEGIN " + mode); + return async (...bindParameters) => { + await db.exec("BEGIN " + mode); db._inTransaction = true; try { - const result = fn(...bindParameters); - db.exec("COMMIT"); + const result = await fn(...bindParameters); + await db.exec("COMMIT"); db._inTransaction = false; return result; } catch (err) { - db.exec("ROLLBACK"); + await db.exec("ROLLBACK"); db._inTransaction = false; throw err; } @@ -147,7 +140,7 @@ class Database { return properties.default.value; } - pragma(source, options) { + async pragma(source, options) { if (options == null) options = {}; if (typeof source !== "string") @@ -158,8 +151,8 @@ class Database { const pragma = `PRAGMA ${source}`; - const stmt = this.prepare(pragma); - const results = stmt.all(); + const stmt = await this.prepare(pragma); + const results = await stmt.all(); return results; } @@ -197,13 +190,13 @@ class Database { * * @param {string} sql - The SQL statement string to execute. */ - exec(sql) { + async exec(sql) { if (!this.open) { throw new TypeError("The database connection is not open"); } try { - this.db.batch(sql); + await this.db.batchAsync(sql); } catch (err) { throw convertError(err); } @@ -228,7 +221,7 @@ class Database { /** * Closes the database connection. */ - close() { + async close() { this.db.close(); } } @@ -305,7 +298,7 @@ class Statement { bindParams(this.stmt, bindParameters); while (true) { - const stepResult = this.stmt.step(); + const stepResult = await this.stmt.stepAsync(); if (stepResult === STEP_IO) { await this.db.db.ioLoopAsync(); continue; @@ -335,7 +328,7 @@ class Statement { bindParams(this.stmt, bindParameters); while (true) { - const stepResult = this.stmt.step(); + const stepResult = await this.stmt.stepAsync(); if (stepResult === STEP_IO) { await this.db.db.ioLoopAsync(); continue; @@ -359,7 +352,7 @@ class Statement { bindParams(this.stmt, bindParameters); while (true) { - const stepResult = this.stmt.step(); + const stepResult = await this.stmt.stepAsync(); if (stepResult === STEP_IO) { await this.db.db.ioLoopAsync(); continue; @@ -384,7 +377,7 @@ class Statement { const rows: any[] = []; while (true) { - const stepResult = this.stmt.step(); + const stepResult = await this.stmt.stepAsync(); if (stepResult === STEP_IO) { await this.db.db.ioLoopAsync(); continue; @@ -421,17 +414,9 @@ class Statement { throw convertError(err); } } -} -/** - * Creates a new database connection asynchronously. - * - * @param {string} path - Path to the database file. - * @param {Object} opts - Options for database behavior. - * @returns {Promise} - A promise that resolves to a Database instance. - */ -async function connect(path: string, opts: any = {}): Promise { - return new Database(path, opts); + close() { + this.stmt.finalize(); + } } - -export { Database, SqliteError, connect } \ No newline at end of file +export { Database, Statement } \ No newline at end of file diff --git a/bindings/javascript/sqlite-error.ts b/bindings/javascript/packages/common/sqlite-error.ts similarity index 100% rename from bindings/javascript/sqlite-error.ts rename to bindings/javascript/packages/common/sqlite-error.ts diff --git a/bindings/javascript/tsconfig.json b/bindings/javascript/packages/common/tsconfig.json similarity index 71% rename from bindings/javascript/tsconfig.json rename to bindings/javascript/packages/common/tsconfig.json index 4722ef092..bf9c13271 100644 --- a/bindings/javascript/tsconfig.json +++ b/bindings/javascript/packages/common/tsconfig.json @@ -1,17 +1,14 @@ { "compilerOptions": { "skipLibCheck": true, + "declaration": true, + "declarationMap": true, "module": "esnext", "target": "esnext", "outDir": "dist/", "lib": [ "es2020" ], - "paths": { - "#entry-point": [ - "./index.js" - ] - } }, "include": [ "*" diff --git a/bindings/javascript/packages/common/types.ts b/bindings/javascript/packages/common/types.ts new file mode 100644 index 000000000..2b843bb9f --- /dev/null +++ b/bindings/javascript/packages/common/types.ts @@ -0,0 +1,46 @@ +export interface DatabaseOpts { + readonly?: boolean, + fileMustExist?: boolean, + timeout?: number + name?: string + tracing?: 'info' | 'debug' | 'trace' +} + +export interface NativeDatabase { + memory: boolean, + path: string, + new(path: string): NativeDatabase; + batchSync(sql: string); + batchAsync(sql: string): Promise; + + ioLoopSync(); + ioLoopAsync(): Promise; + + prepare(sql: string): NativeStatement; + + pluck(pluckMode: boolean); + defaultSafeIntegers(toggle: boolean); + totalChanges(): number; + changes(): number; + lastInsertRowid(): number; + close(); +} + + +// Step result constants +export const STEP_ROW = 1; +export const STEP_DONE = 2; +export const STEP_IO = 3; + +export interface NativeStatement { + stepAsync(): Promise; + stepSync(): number; + + pluck(pluckMode: boolean); + safeIntegers(toggle: boolean); + raw(toggle: boolean); + columns(): string[]; + row(): any; + reset(); + finalize(); +} \ No newline at end of file diff --git a/bindings/javascript/packages/native/README.md b/bindings/javascript/packages/native/README.md new file mode 100644 index 000000000..d5444435c --- /dev/null +++ b/bindings/javascript/packages/native/README.md @@ -0,0 +1,125 @@ +

+

Turso Database for JavaScript in Node

+

+ +

+ npm + +

+

+ Chat with other users of Turso on Discord +

+ +--- + +## About + +This package is the Turso embedded database library for JavaScript in Node. + +> **⚠️ Warning:** This software is ALPHA, only use for development, testing, and experimentation. We are working to make it production ready, but do not use it for critical data right now. + +## Features + +- **SQLite compatible:** SQLite query language and file format support ([status](https://github.com/tursodatabase/turso/blob/main/COMPAT.md)). +- **In-process**: No network overhead, runs directly in your Node.js process +- **TypeScript support**: Full TypeScript definitions included +- **Cross-platform**: Supports Linux (x86 and arm64), macOS, Windows (browser is supported in the separate package `@tursodatabase/database-browser` package) + +## Installation + +```bash +npm install @tursodatabase/database +``` + +## Getting Started + +### In-Memory Database + +```javascript +import { connect } from '@tursodatabase/database'; + +// Create an in-memory database +const db = await connect(':memory:'); + +// Create a table +await db.exec('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)'); + +// Insert data +const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)'); +await insert.run('Alice', 'alice@example.com'); +await insert.run('Bob', 'bob@example.com'); + +// Query data +const users = await db.prepare('SELECT * FROM users').all(); +console.log(users); +// Output: [ +// { id: 1, name: 'Alice', email: 'alice@example.com' }, +// { id: 2, name: 'Bob', email: 'bob@example.com' } +// ] +``` + +### File-Based Database + +```javascript +import { connect } from '@tursodatabase/database'; + +// Create or open a database file +const db = await connect('my-database.db'); + +// Create a table +await db.exec(` + CREATE TABLE IF NOT EXISTS posts ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + title TEXT NOT NULL, + content TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP + ) +`); + +// Insert a post +const insertPost = db.prepare('INSERT INTO posts (title, content) VALUES (?, ?)'); +const result = await insertPost.run('Hello World', 'This is my first blog post!'); + +console.log(`Inserted post with ID: ${result.lastInsertRowid}`); +``` + +### Transactions + +```javascript +import { connect } from '@tursodatabase/database'; + +const db = await connect('transactions.db'); + +// Using transactions for atomic operations +const transaction = db.transaction(async (users) => { + const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)'); + for (const user of users) { + await insert.run(user.name, user.email); + } +}); + +// Execute transaction +await transaction([ + { name: 'Alice', email: 'alice@example.com' }, + { name: 'Bob', email: 'bob@example.com' } +]); +``` + +## API Reference + +For complete API documentation, see [JavaScript API Reference](../../../../docs/javascript-api-reference.md). + +## Related Packages + +* The [@tursodatabase/serverless](https://www.npmjs.com/package/@tursodatabase/serverless) package provides a serverless driver with the same API. +* The [@tursodatabase/sync](https://www.npmjs.com/package/@tursodatabase/sync) package provides bidirectional sync between a local Turso database and Turso Cloud. + +## License + +This project is licensed under the [MIT license](../../LICENSE.md). + +## Support + +- [GitHub Issues](https://github.com/tursodatabase/turso/issues) +- [Documentation](https://docs.turso.tech) +- [Discord Community](https://tur.so/discord) diff --git a/bindings/javascript/packages/native/compat.test.ts b/bindings/javascript/packages/native/compat.test.ts new file mode 100644 index 000000000..c64d4fc79 --- /dev/null +++ b/bindings/javascript/packages/native/compat.test.ts @@ -0,0 +1,67 @@ +import { unlinkSync } from "node:fs"; +import { expect, test } from 'vitest' +import { Database } from './compat.js' + +test('in-memory db', () => { + const db = new Database(":memory:"); + db.exec("CREATE TABLE t(x)"); + db.exec("INSERT INTO t VALUES (1), (2), (3)"); + const stmt = db.prepare("SELECT * FROM t WHERE x % 2 = ?"); + const rows = stmt.all([1]); + expect(rows).toEqual([{ x: 1 }, { x: 3 }]); +}) + +test('on-disk db', () => { + const path = `test-${(Math.random() * 10000) | 0}.db`; + try { + const db1 = new Database(path); + db1.exec("CREATE TABLE t(x)"); + db1.exec("INSERT INTO t VALUES (1), (2), (3)"); + const stmt1 = db1.prepare("SELECT * FROM t WHERE x % 2 = ?"); + expect(stmt1.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); + const rows1 = stmt1.all([1]); + expect(rows1).toEqual([{ x: 1 }, { x: 3 }]); + db1.close(); + + const db2 = new Database(path); + const stmt2 = db2.prepare("SELECT * FROM t WHERE x % 2 = ?"); + expect(stmt2.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); + const rows2 = stmt2.all([1]); + expect(rows2).toEqual([{ x: 1 }, { x: 3 }]); + db2.close(); + } finally { + unlinkSync(path); + unlinkSync(`${path}-wal`); + } +}) + +test('attach', () => { + const path1 = `test-${(Math.random() * 10000) | 0}.db`; + const path2 = `test-${(Math.random() * 10000) | 0}.db`; + try { + const db1 = new Database(path1); + db1.exec("CREATE TABLE t(x)"); + db1.exec("INSERT INTO t VALUES (1), (2), (3)"); + const db2 = new Database(path2); + db2.exec("CREATE TABLE q(x)"); + db2.exec("INSERT INTO q VALUES (4), (5), (6)"); + + db1.exec(`ATTACH '${path2}' as secondary`); + + const stmt = db1.prepare("SELECT * FROM t UNION ALL SELECT * FROM secondary.q"); + expect(stmt.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); + const rows = stmt.all([1]); + expect(rows).toEqual([{ x: 1 }, { x: 2 }, { x: 3 }, { x: 4 }, { x: 5 }, { x: 6 }]); + } finally { + unlinkSync(path1); + unlinkSync(`${path1}-wal`); + unlinkSync(path2); + unlinkSync(`${path2}-wal`); + } +}) + +test('blobs', () => { + const db = new Database(":memory:"); + const rows = db.prepare("SELECT x'1020' as x").all(); + expect(rows).toEqual([{ x: Buffer.from([16, 32]) }]) +}) \ No newline at end of file diff --git a/bindings/javascript/packages/native/compat.ts b/bindings/javascript/packages/native/compat.ts new file mode 100644 index 000000000..105d69e85 --- /dev/null +++ b/bindings/javascript/packages/native/compat.ts @@ -0,0 +1,10 @@ +import { DatabaseCompat, NativeDatabase, SqliteError, DatabaseOpts } from "@tursodatabase/database-common" +import { Database as NativeDB } from "#index"; + +class Database extends DatabaseCompat { + constructor(path: string, opts: DatabaseOpts = {}) { + super(new NativeDB(path, { tracing: opts.tracing }) as unknown as NativeDatabase, opts) + } +} + +export { Database, SqliteError } diff --git a/bindings/javascript/index.d.ts b/bindings/javascript/packages/native/index.d.ts similarity index 81% rename from bindings/javascript/index.d.ts rename to bindings/javascript/packages/native/index.d.ts index 14f852afa..1c510cfdc 100644 --- a/bindings/javascript/index.d.ts +++ b/bindings/javascript/packages/native/index.d.ts @@ -8,13 +8,13 @@ export declare class Database { * # Arguments * * `path` - The path to the database file. */ - constructor(path: string) + constructor(path: string, opts?: DatabaseOpts | undefined | null) /** Returns whether the database is in memory-only mode. */ get memory(): boolean /** Returns whether the database connection is open. */ get open(): boolean /** - * Executes a batch of SQL statements. + * Executes a batch of SQL statements on main thread * * # Arguments * @@ -22,7 +22,17 @@ export declare class Database { * * # Returns */ - batch(sql: string): void + batchSync(sql: string): void + /** + * Executes a batch of SQL statements outside of main thread + * + * # Arguments + * + * * `sql` - The SQL statements to execute. + * + * # Returns + */ + batchAsync(sql: string): Promise /** * Prepares a statement for execution. * @@ -105,10 +115,15 @@ export declare class Statement { */ bindAt(index: number, value: unknown): void /** - * Step the statement and return result code: + * Step the statement and return result code (executed on the main thread): * 1 = Row available, 2 = Done, 3 = I/O needed */ - step(): number + stepSync(): number + /** + * Step the statement and return result code (executed on the background thread): + * 1 = Row available, 2 = Done, 3 = I/O needed + */ + stepAsync(): Promise /** Get the current row data according to the presentation mode */ row(): unknown /** Sets the presentation mode to raw. */ @@ -128,3 +143,7 @@ export declare class Statement { /** Finalizes the statement. */ finalize(): void } + +export interface DatabaseOpts { + tracing?: string +} diff --git a/bindings/javascript/packages/native/index.js b/bindings/javascript/packages/native/index.js new file mode 100644 index 000000000..d69167a1a --- /dev/null +++ b/bindings/javascript/packages/native/index.js @@ -0,0 +1,513 @@ +// prettier-ignore +/* eslint-disable */ +// @ts-nocheck +/* auto-generated by NAPI-RS */ + +import { createRequire } from 'node:module' +const require = createRequire(import.meta.url) +const __dirname = new URL('.', import.meta.url).pathname + +const { readFileSync } = require('node:fs') +let nativeBinding = null +const loadErrors = [] + +const isMusl = () => { + let musl = false + if (process.platform === 'linux') { + musl = isMuslFromFilesystem() + if (musl === null) { + musl = isMuslFromReport() + } + if (musl === null) { + musl = isMuslFromChildProcess() + } + } + return musl +} + +const isFileMusl = (f) => f.includes('libc.musl-') || f.includes('ld-musl-') + +const isMuslFromFilesystem = () => { + try { + return readFileSync('/usr/bin/ldd', 'utf-8').includes('musl') + } catch { + return null + } +} + +const isMuslFromReport = () => { + let report = null + if (typeof process.report?.getReport === 'function') { + process.report.excludeNetwork = true + report = process.report.getReport() + } + if (!report) { + return null + } + if (report.header && report.header.glibcVersionRuntime) { + return false + } + if (Array.isArray(report.sharedObjects)) { + if (report.sharedObjects.some(isFileMusl)) { + return true + } + } + return false +} + +const isMuslFromChildProcess = () => { + try { + return require('child_process').execSync('ldd --version', { encoding: 'utf8' }).includes('musl') + } catch (e) { + // If we reach this case, we don't know if the system is musl or not, so is better to just fallback to false + return false + } +} + +function requireNative() { + if (process.env.NAPI_RS_NATIVE_LIBRARY_PATH) { + try { + nativeBinding = require(process.env.NAPI_RS_NATIVE_LIBRARY_PATH); + } catch (err) { + loadErrors.push(err) + } + } else if (process.platform === 'android') { + if (process.arch === 'arm64') { + try { + return require('./turso.android-arm64.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-android-arm64') + const bindingPackageVersion = require('@tursodatabase/database-android-arm64/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else if (process.arch === 'arm') { + try { + return require('./turso.android-arm-eabi.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-android-arm-eabi') + const bindingPackageVersion = require('@tursodatabase/database-android-arm-eabi/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else { + loadErrors.push(new Error(`Unsupported architecture on Android ${process.arch}`)) + } + } else if (process.platform === 'win32') { + if (process.arch === 'x64') { + try { + return require('./turso.win32-x64-msvc.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-win32-x64-msvc') + const bindingPackageVersion = require('@tursodatabase/database-win32-x64-msvc/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else if (process.arch === 'ia32') { + try { + return require('./turso.win32-ia32-msvc.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-win32-ia32-msvc') + const bindingPackageVersion = require('@tursodatabase/database-win32-ia32-msvc/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else if (process.arch === 'arm64') { + try { + return require('./turso.win32-arm64-msvc.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-win32-arm64-msvc') + const bindingPackageVersion = require('@tursodatabase/database-win32-arm64-msvc/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else { + loadErrors.push(new Error(`Unsupported architecture on Windows: ${process.arch}`)) + } + } else if (process.platform === 'darwin') { + try { + return require('./turso.darwin-universal.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-darwin-universal') + const bindingPackageVersion = require('@tursodatabase/database-darwin-universal/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + if (process.arch === 'x64') { + try { + return require('./turso.darwin-x64.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-darwin-x64') + const bindingPackageVersion = require('@tursodatabase/database-darwin-x64/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else if (process.arch === 'arm64') { + try { + return require('./turso.darwin-arm64.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-darwin-arm64') + const bindingPackageVersion = require('@tursodatabase/database-darwin-arm64/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else { + loadErrors.push(new Error(`Unsupported architecture on macOS: ${process.arch}`)) + } + } else if (process.platform === 'freebsd') { + if (process.arch === 'x64') { + try { + return require('./turso.freebsd-x64.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-freebsd-x64') + const bindingPackageVersion = require('@tursodatabase/database-freebsd-x64/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else if (process.arch === 'arm64') { + try { + return require('./turso.freebsd-arm64.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-freebsd-arm64') + const bindingPackageVersion = require('@tursodatabase/database-freebsd-arm64/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else { + loadErrors.push(new Error(`Unsupported architecture on FreeBSD: ${process.arch}`)) + } + } else if (process.platform === 'linux') { + if (process.arch === 'x64') { + if (isMusl()) { + try { + return require('./turso.linux-x64-musl.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-linux-x64-musl') + const bindingPackageVersion = require('@tursodatabase/database-linux-x64-musl/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else { + try { + return require('./turso.linux-x64-gnu.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-linux-x64-gnu') + const bindingPackageVersion = require('@tursodatabase/database-linux-x64-gnu/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } + } else if (process.arch === 'arm64') { + if (isMusl()) { + try { + return require('./turso.linux-arm64-musl.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-linux-arm64-musl') + const bindingPackageVersion = require('@tursodatabase/database-linux-arm64-musl/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else { + try { + return require('./turso.linux-arm64-gnu.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-linux-arm64-gnu') + const bindingPackageVersion = require('@tursodatabase/database-linux-arm64-gnu/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } + } else if (process.arch === 'arm') { + if (isMusl()) { + try { + return require('./turso.linux-arm-musleabihf.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-linux-arm-musleabihf') + const bindingPackageVersion = require('@tursodatabase/database-linux-arm-musleabihf/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else { + try { + return require('./turso.linux-arm-gnueabihf.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-linux-arm-gnueabihf') + const bindingPackageVersion = require('@tursodatabase/database-linux-arm-gnueabihf/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } + } else if (process.arch === 'riscv64') { + if (isMusl()) { + try { + return require('./turso.linux-riscv64-musl.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-linux-riscv64-musl') + const bindingPackageVersion = require('@tursodatabase/database-linux-riscv64-musl/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else { + try { + return require('./turso.linux-riscv64-gnu.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-linux-riscv64-gnu') + const bindingPackageVersion = require('@tursodatabase/database-linux-riscv64-gnu/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } + } else if (process.arch === 'ppc64') { + try { + return require('./turso.linux-ppc64-gnu.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-linux-ppc64-gnu') + const bindingPackageVersion = require('@tursodatabase/database-linux-ppc64-gnu/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else if (process.arch === 's390x') { + try { + return require('./turso.linux-s390x-gnu.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-linux-s390x-gnu') + const bindingPackageVersion = require('@tursodatabase/database-linux-s390x-gnu/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else { + loadErrors.push(new Error(`Unsupported architecture on Linux: ${process.arch}`)) + } + } else if (process.platform === 'openharmony') { + if (process.arch === 'arm64') { + try { + return require('./turso.openharmony-arm64.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-openharmony-arm64') + const bindingPackageVersion = require('@tursodatabase/database-openharmony-arm64/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else if (process.arch === 'x64') { + try { + return require('./turso.openharmony-x64.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-openharmony-x64') + const bindingPackageVersion = require('@tursodatabase/database-openharmony-x64/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else if (process.arch === 'arm') { + try { + return require('./turso.openharmony-arm.node') + } catch (e) { + loadErrors.push(e) + } + try { + const binding = require('@tursodatabase/database-openharmony-arm') + const bindingPackageVersion = require('@tursodatabase/database-openharmony-arm/package.json').version + if (bindingPackageVersion !== '0.1.5-pre.3' && process.env.NAPI_RS_ENFORCE_VERSION_CHECK && process.env.NAPI_RS_ENFORCE_VERSION_CHECK !== '0') { + throw new Error(`Native binding package version mismatch, expected 0.1.5-pre.3 but got ${bindingPackageVersion}. You can reinstall dependencies to fix this issue.`) + } + return binding + } catch (e) { + loadErrors.push(e) + } + } else { + loadErrors.push(new Error(`Unsupported architecture on OpenHarmony: ${process.arch}`)) + } + } else { + loadErrors.push(new Error(`Unsupported OS: ${process.platform}, architecture: ${process.arch}`)) + } +} + +nativeBinding = requireNative() + +if (!nativeBinding || process.env.NAPI_RS_FORCE_WASI) { + try { + nativeBinding = require('./turso.wasi.cjs') + } catch (err) { + if (process.env.NAPI_RS_FORCE_WASI) { + loadErrors.push(err) + } + } + if (!nativeBinding) { + try { + nativeBinding = require('@tursodatabase/database-wasm32-wasi') + } catch (err) { + if (process.env.NAPI_RS_FORCE_WASI) { + loadErrors.push(err) + } + } + } +} + +if (!nativeBinding) { + if (loadErrors.length > 0) { + throw new Error( + `Cannot find native binding. ` + + `npm has a bug related to optional dependencies (https://github.com/npm/cli/issues/4828). ` + + 'Please try `npm i` again after removing both package-lock.json and node_modules directory.', + { cause: loadErrors } + ) + } + throw new Error(`Failed to load native binding`) +} + +const { Database, Statement } = nativeBinding +export { Database } +export { Statement } diff --git a/bindings/javascript/packages/native/package.json b/bindings/javascript/packages/native/package.json new file mode 100644 index 000000000..666262050 --- /dev/null +++ b/bindings/javascript/packages/native/package.json @@ -0,0 +1,52 @@ +{ + "name": "@tursodatabase/database", + "version": "0.1.5-pre.3", + "repository": { + "type": "git", + "url": "https://github.com/tursodatabase/turso" + }, + "license": "MIT", + "module": "./dist/promise.js", + "main": "./dist/promise.js", + "type": "module", + "exports": { + ".": "./dist/promise.js", + "./compat": "./dist/compat.js" + }, + "files": [ + "index.js", + "dist/**", + "README.md" + ], + "packageManager": "yarn@4.9.2", + "devDependencies": { + "@napi-rs/cli": "^3.1.5", + "@types/node": "^24.3.1", + "typescript": "^5.9.2", + "vitest": "^3.2.4" + }, + "scripts": { + "napi-build": "napi build --platform --release --esm --manifest-path ../../Cargo.toml --output-dir .", + "napi-dirs": "napi create-npm-dirs", + "napi-artifacts": "napi artifacts --output-dir .", + "tsc-build": "npm exec tsc", + "build": "npm run napi-build && npm run tsc-build", + "test": "vitest --run", + "prepublishOnly": "npm run napi-dirs && npm run napi-artifacts && napi prepublish -t npm" + }, + "napi": { + "binaryName": "turso", + "targets": [ + "x86_64-unknown-linux-gnu", + "x86_64-pc-windows-msvc", + "universal-apple-darwin", + "aarch64-unknown-linux-gnu" + ] + }, + "dependencies": { + "@tursodatabase/database-common": "^0.1.5-pre.3" + }, + "imports": { + "#index": "./index.js" + } +} diff --git a/bindings/javascript/packages/native/promise.test.ts b/bindings/javascript/packages/native/promise.test.ts new file mode 100644 index 000000000..d75e3728e --- /dev/null +++ b/bindings/javascript/packages/native/promise.test.ts @@ -0,0 +1,107 @@ +import { unlinkSync } from "node:fs"; +import { expect, test } from 'vitest' +import { connect } from './promise.js' + +test('in-memory db', async () => { + const db = await connect(":memory:"); + await db.exec("CREATE TABLE t(x)"); + await db.exec("INSERT INTO t VALUES (1), (2), (3)"); + const stmt = db.prepare("SELECT * FROM t WHERE x % 2 = ?"); + const rows = await stmt.all([1]); + expect(rows).toEqual([{ x: 1 }, { x: 3 }]); +}) + +test('on-disk db', async () => { + const path = `test-${(Math.random() * 10000) | 0}.db`; + try { + const db1 = await connect(path); + await db1.exec("CREATE TABLE t(x)"); + await db1.exec("INSERT INTO t VALUES (1), (2), (3)"); + const stmt1 = db1.prepare("SELECT * FROM t WHERE x % 2 = ?"); + expect(stmt1.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); + const rows1 = await stmt1.all([1]); + expect(rows1).toEqual([{ x: 1 }, { x: 3 }]); + db1.close(); + + const db2 = await connect(path); + const stmt2 = db2.prepare("SELECT * FROM t WHERE x % 2 = ?"); + expect(stmt2.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); + const rows2 = await stmt2.all([1]); + expect(rows2).toEqual([{ x: 1 }, { x: 3 }]); + db2.close(); + } finally { + unlinkSync(path); + unlinkSync(`${path}-wal`); + } +}) + +test('attach', async () => { + const path1 = `test-${(Math.random() * 10000) | 0}.db`; + const path2 = `test-${(Math.random() * 10000) | 0}.db`; + try { + const db1 = await connect(path1); + await db1.exec("CREATE TABLE t(x)"); + await db1.exec("INSERT INTO t VALUES (1), (2), (3)"); + const db2 = await connect(path2); + await db2.exec("CREATE TABLE q(x)"); + await db2.exec("INSERT INTO q VALUES (4), (5), (6)"); + + await db1.exec(`ATTACH '${path2}' as secondary`); + + const stmt = db1.prepare("SELECT * FROM t UNION ALL SELECT * FROM secondary.q"); + expect(stmt.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); + const rows = await stmt.all([1]); + expect(rows).toEqual([{ x: 1 }, { x: 2 }, { x: 3 }, { x: 4 }, { x: 5 }, { x: 6 }]); + } finally { + unlinkSync(path1); + unlinkSync(`${path1}-wal`); + unlinkSync(path2); + unlinkSync(`${path2}-wal`); + } +}) + +test('blobs', async () => { + const db = await connect(":memory:"); + const rows = await db.prepare("SELECT x'1020' as x").all(); + expect(rows).toEqual([{ x: Buffer.from([16, 32]) }]) +}) + + +test('example-1', async () => { + const db = await connect(':memory:'); + await db.exec('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)'); + + const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)'); + await insert.run('Alice', 'alice@example.com'); + await insert.run('Bob', 'bob@example.com'); + + const users = await db.prepare('SELECT * FROM users').all(); + expect(users).toEqual([ + { id: 1, name: 'Alice', email: 'alice@example.com' }, + { id: 2, name: 'Bob', email: 'bob@example.com' } + ]); +}) + +test('example-2', async () => { + const db = await connect(':memory:'); + await db.exec('CREATE TABLE users (name, email)'); + // Using transactions for atomic operations + const transaction = db.transaction(async (users) => { + const insert = db.prepare('INSERT INTO users (name, email) VALUES (?, ?)'); + for (const user of users) { + await insert.run(user.name, user.email); + } + }); + + // Execute transaction + await transaction([ + { name: 'Alice', email: 'alice@example.com' }, + { name: 'Bob', email: 'bob@example.com' } + ]); + + const rows = await db.prepare('SELECT * FROM users').all(); + expect(rows).toEqual([ + { name: 'Alice', email: 'alice@example.com' }, + { name: 'Bob', email: 'bob@example.com' } + ]); +}) \ No newline at end of file diff --git a/bindings/javascript/packages/native/promise.ts b/bindings/javascript/packages/native/promise.ts new file mode 100644 index 000000000..0131381c0 --- /dev/null +++ b/bindings/javascript/packages/native/promise.ts @@ -0,0 +1,21 @@ +import { DatabasePromise, NativeDatabase, SqliteError, DatabaseOpts } from "@tursodatabase/database-common" +import { Database as NativeDB } from "#index"; + +class Database extends DatabasePromise { + constructor(path: string, opts: DatabaseOpts = {}) { + super(new NativeDB(path, { tracing: opts.tracing }) as unknown as NativeDatabase, opts) + } +} + +/** + * Creates a new database connection asynchronously. + * + * @param {string} path - Path to the database file. + * @param {Object} opts - Options for database behavior. + * @returns {Promise} - A promise that resolves to a Database instance. + */ +async function connect(path: string, opts: any = {}): Promise { + return new Database(path, opts); +} + +export { connect, Database, SqliteError } diff --git a/bindings/javascript/packages/native/tsconfig.json b/bindings/javascript/packages/native/tsconfig.json new file mode 100644 index 000000000..b46abc167 --- /dev/null +++ b/bindings/javascript/packages/native/tsconfig.json @@ -0,0 +1,21 @@ +{ + "compilerOptions": { + "skipLibCheck": true, + "declaration": true, + "declarationMap": true, + "module": "nodenext", + "target": "esnext", + "outDir": "dist/", + "lib": [ + "es2020" + ], + "paths": { + "#index": [ + "./index.js" + ] + } + }, + "include": [ + "*" + ] +} \ No newline at end of file diff --git a/bindings/javascript/perf/package-lock.json b/bindings/javascript/perf/package-lock.json index 8d882350b..bf737b714 100644 --- a/bindings/javascript/perf/package-lock.json +++ b/bindings/javascript/perf/package-lock.json @@ -6,28 +6,34 @@ "": { "name": "turso-perf", "dependencies": { - "@tursodatabase/database": "..", + "@tursodatabase/database": "../packages/native", "better-sqlite3": "^9.5.0", "mitata": "^0.1.11" } }, "..": { + "workspaces": [ + "packages/core", + "packages/native", + "packages/browser" + ] + }, + "../packages/native": { "name": "@tursodatabase/database", - "version": "0.1.4-pre.4", + "version": "0.1.5-pre.3", "license": "MIT", - "devDependencies": { - "@napi-rs/cli": "^3.0.4", - "@napi-rs/wasm-runtime": "^1.0.1", - "ava": "^6.0.1", - "better-sqlite3": "^11.9.1", - "typescript": "^5.9.2" + "dependencies": { + "@tursodatabase/database-common": "^0.1.5-pre.3" }, - "engines": { - "node": ">= 10" + "devDependencies": { + "@napi-rs/cli": "^3.1.5", + "@types/node": "^24.3.1", + "typescript": "^5.9.2", + "vitest": "^3.2.4" } }, "node_modules/@tursodatabase/database": { - "resolved": "..", + "resolved": "../packages/native", "link": true }, "node_modules/base64-js": { diff --git a/bindings/javascript/perf/package.json b/bindings/javascript/perf/package.json index 83210e7f5..93e3d789e 100644 --- a/bindings/javascript/perf/package.json +++ b/bindings/javascript/perf/package.json @@ -2,9 +2,10 @@ "name": "turso-perf", "type": "module", "private": true, + "type": "module", "dependencies": { "better-sqlite3": "^9.5.0", - "@tursodatabase/database": "..", + "@tursodatabase/database": "../packages/native", "mitata": "^0.1.11" } } diff --git a/bindings/javascript/perf/perf-turso.js b/bindings/javascript/perf/perf-turso.js index 24c2fad72..092730265 100644 --- a/bindings/javascript/perf/perf-turso.js +++ b/bindings/javascript/perf/perf-turso.js @@ -1,6 +1,6 @@ import { run, bench, group, baseline } from 'mitata'; -import Database from '@tursodatabase/database'; +import { Database } from '@tursodatabase/database/compat'; const db = new Database(':memory:'); diff --git a/bindings/javascript/src/browser.rs b/bindings/javascript/src/browser.rs new file mode 100644 index 000000000..f9c6bffa9 --- /dev/null +++ b/bindings/javascript/src/browser.rs @@ -0,0 +1,254 @@ +use std::sync::Arc; + +use napi::bindgen_prelude::*; +use napi_derive::napi; +use turso_core::{storage::database::DatabaseFile, Clock, File, Instant, IO}; + +use crate::{init_tracing, is_memory, Database, DatabaseOpts}; + +pub struct NoopTask; + +impl Task for NoopTask { + type Output = (); + type JsValue = (); + fn compute(&mut self) -> Result { + Ok(()) + } + fn resolve(&mut self, _: Env, _: Self::Output) -> Result { + Ok(()) + } +} + +#[napi] +/// turso-db in the the browser requires explicit thread pool initialization +/// so, we just put no-op task on the thread pool and force emnapi to allocate web worker +pub fn init_thread_pool() -> napi::Result> { + Ok(AsyncTask::new(NoopTask)) +} + +pub struct ConnectTask { + path: String, + is_memory: bool, + io: Arc, +} + +pub struct ConnectResult { + db: Arc, + conn: Arc, +} + +unsafe impl Send for ConnectResult {} + +impl Task for ConnectTask { + type Output = ConnectResult; + type JsValue = Database; + + fn compute(&mut self) -> Result { + let file = self + .io + .open_file(&self.path, turso_core::OpenFlags::Create, false) + .map_err(|e| Error::new(Status::GenericFailure, format!("Failed to open file: {e}")))?; + + let db_file = Arc::new(DatabaseFile::new(file)); + let db = turso_core::Database::open(self.io.clone(), &self.path, db_file, false, true) + .map_err(|e| { + Error::new( + Status::GenericFailure, + format!("Failed to open database: {e}"), + ) + })?; + + let conn = db + .connect() + .map_err(|e| Error::new(Status::GenericFailure, format!("Failed to connect: {e}")))?; + + Ok(ConnectResult { db, conn }) + } + + fn resolve(&mut self, _: Env, result: Self::Output) -> Result { + Ok(Database::create( + Some(result.db), + self.io.clone(), + result.conn, + self.is_memory, + )) + } +} + +#[napi] +// we offload connect to the web-worker because: +// 1. browser main-thread do not support Atomic.wait operations +// 2. turso-db use blocking IO [io.wait_for_completion(c)] in few places during initialization path +// +// so, we offload connect to the worker thread +pub fn connect(path: String, opts: Option) -> Result> { + if let Some(opts) = opts { + init_tracing(opts.tracing); + } + let task = if is_memory(&path) { + ConnectTask { + io: Arc::new(turso_core::MemoryIO::new()), + is_memory: true, + path, + } + } else { + let io = Arc::new(Opfs::new()?); + ConnectTask { + io, + is_memory: false, + path, + } + }; + Ok(AsyncTask::new(task)) +} +#[napi] +#[derive(Clone)] +pub struct Opfs; + +#[napi] +#[derive(Clone)] +struct OpfsFile { + handle: i32, +} + +#[napi] +impl Opfs { + #[napi(constructor)] + pub fn new() -> napi::Result { + Ok(Self) + } +} + +impl Clock for Opfs { + fn now(&self) -> Instant { + Instant { secs: 0, micros: 0 } // TODO + } +} + +#[link(wasm_import_module = "env")] +extern "C" { + fn lookup_file(path: *const u8, path_len: usize) -> i32; + fn read(handle: i32, buffer: *mut u8, buffer_len: usize, offset: i32) -> i32; + fn write(handle: i32, buffer: *const u8, buffer_len: usize, offset: i32) -> i32; + fn sync(handle: i32) -> i32; + fn truncate(handle: i32, length: usize) -> i32; + fn size(handle: i32) -> i32; + fn is_web_worker() -> bool; +} + +fn is_web_worker_safe() -> bool { + unsafe { is_web_worker() } +} + +impl IO for Opfs { + fn open_file( + &self, + path: &str, + _: turso_core::OpenFlags, + _: bool, + ) -> turso_core::Result> { + tracing::info!("open_file: {}", path); + let result = unsafe { lookup_file(path.as_ptr(), path.len()) }; + if result >= 0 { + Ok(Arc::new(OpfsFile { handle: result })) + } else if result == -404 { + Err(turso_core::LimboError::InternalError( + "files must be created in advance for OPFS IO".to_string(), + )) + } else { + Err(turso_core::LimboError::InternalError(format!( + "unexpected file lookup error: {result}" + ))) + } + } + + fn remove_file(&self, _: &str) -> turso_core::Result<()> { + Ok(()) + } +} + +impl File for OpfsFile { + fn lock_file(&self, _: bool) -> turso_core::Result<()> { + Ok(()) + } + + fn unlock_file(&self) -> turso_core::Result<()> { + Ok(()) + } + + fn pread( + &self, + pos: u64, + c: turso_core::Completion, + ) -> turso_core::Result { + assert!( + is_web_worker_safe(), + "opfs must be used only from web worker for now" + ); + tracing::debug!("pread({}): pos={}", self.handle, pos); + let handle = self.handle; + let read_c = c.as_read(); + let buffer = read_c.buf_arc(); + let buffer = buffer.as_mut_slice(); + let result = unsafe { read(handle, buffer.as_mut_ptr(), buffer.len(), pos as i32) }; + c.complete(result as i32); + Ok(c) + } + + fn pwrite( + &self, + pos: u64, + buffer: Arc, + c: turso_core::Completion, + ) -> turso_core::Result { + assert!( + is_web_worker_safe(), + "opfs must be used only from web worker for now" + ); + tracing::debug!("pwrite({}): pos={}", self.handle, pos); + let handle = self.handle; + let buffer = buffer.as_slice(); + let result = unsafe { write(handle, buffer.as_ptr(), buffer.len(), pos as i32) }; + c.complete(result as i32); + Ok(c) + } + + fn sync(&self, c: turso_core::Completion) -> turso_core::Result { + assert!( + is_web_worker_safe(), + "opfs must be used only from web worker for now" + ); + tracing::debug!("sync({})", self.handle); + let handle = self.handle; + let result = unsafe { sync(handle) }; + c.complete(result as i32); + Ok(c) + } + + fn truncate( + &self, + len: u64, + c: turso_core::Completion, + ) -> turso_core::Result { + assert!( + is_web_worker_safe(), + "opfs must be used only from web worker for now" + ); + tracing::debug!("truncate({}): len={}", self.handle, len); + let handle = self.handle; + let result = unsafe { truncate(handle, len as usize) }; + c.complete(result as i32); + Ok(c) + } + + fn size(&self) -> turso_core::Result { + assert!( + is_web_worker_safe(), + "size can be called only from web worker context" + ); + tracing::debug!("size({})", self.handle); + let handle = self.handle; + let result = unsafe { size(handle) }; + Ok(result as u64) + } +} diff --git a/bindings/javascript/src/lib.rs b/bindings/javascript/src/lib.rs index 3b0d8a466..928b475ef 100644 --- a/bindings/javascript/src/lib.rs +++ b/bindings/javascript/src/lib.rs @@ -10,14 +10,20 @@ //! - Iterating through query results //! - Managing the I/O event loop +#[cfg(feature = "browser")] +pub mod browser; + use napi::bindgen_prelude::*; use napi::{Env, Task}; use napi_derive::napi; +use std::sync::OnceLock; use std::{ cell::{Cell, RefCell}, num::NonZeroUsize, sync::Arc, }; +use tracing_subscriber::filter::LevelFilter; +use tracing_subscriber::fmt::format::FmtSpan; /// Step result constants const STEP_ROW: u32 = 1; @@ -38,12 +44,107 @@ enum PresentationMode { pub struct Database { _db: Option>, io: Arc, - conn: Arc, + conn: Option>, is_memory: bool, is_open: Cell, default_safe_integers: Cell, } +pub(crate) fn is_memory(path: &str) -> bool { + path == ":memory:" +} + +static TRACING_INIT: OnceLock<()> = OnceLock::new(); +pub(crate) fn init_tracing(level_filter: Option) { + let Some(level_filter) = level_filter else { + return; + }; + let level_filter = match level_filter.as_ref() { + "info" => LevelFilter::INFO, + "debug" => LevelFilter::DEBUG, + "trace" => LevelFilter::TRACE, + _ => return, + }; + TRACING_INIT.get_or_init(|| { + tracing_subscriber::fmt() + .with_ansi(false) + .with_thread_ids(true) + .with_span_events(FmtSpan::ACTIVE) + .with_max_level(level_filter) + .init(); + }); +} + +pub enum DbTask { + Batch { + conn: Arc, + sql: String, + }, + Step { + stmt: Arc>>, + }, +} + +unsafe impl Send for DbTask {} + +impl Task for DbTask { + type Output = u32; + type JsValue = u32; + + fn compute(&mut self) -> Result { + match self { + DbTask::Batch { conn, sql } => { + batch_sync(conn, sql)?; + Ok(0) + } + DbTask::Step { stmt } => step_sync(stmt), + } + } + + fn resolve(&mut self, _: Env, output: Self::Output) -> Result { + Ok(output) + } +} + +#[napi(object)] +pub struct DatabaseOpts { + pub tracing: Option, +} + +fn batch_sync(conn: &Arc, sql: &str) -> napi::Result<()> { + conn.prepare_execute_batch(sql).map_err(|e| { + Error::new( + Status::GenericFailure, + format!("Failed to execute batch: {e}"), + ) + })?; + Ok(()) +} + +fn step_sync(stmt: &Arc>>) -> napi::Result { + let mut stmt_ref = stmt.borrow_mut(); + let stmt = stmt_ref + .as_mut() + .ok_or_else(|| Error::new(Status::GenericFailure, "Statement has been finalized"))?; + + match stmt.step() { + Ok(turso_core::StepResult::Row) => Ok(STEP_ROW), + Ok(turso_core::StepResult::IO) => Ok(STEP_IO), + Ok(turso_core::StepResult::Done) => Ok(STEP_DONE), + Ok(turso_core::StepResult::Interrupt) => Err(Error::new( + Status::GenericFailure, + "Statement was interrupted", + )), + Ok(turso_core::StepResult::Busy) => { + Err(Error::new(Status::GenericFailure, "Database is busy")) + } + Err(e) => Err(Error::new( + Status::GenericFailure, + format!("Step failed: {e}"), + )), + } +} + #[napi] impl Database { /// Creates a new database instance. @@ -51,9 +152,11 @@ impl Database { /// # Arguments /// * `path` - The path to the database file. #[napi(constructor)] - pub fn new(path: String) -> Result { - let is_memory = path == ":memory:"; - let io: Arc = if is_memory { + pub fn new(path: String, opts: Option) -> Result { + if let Some(opts) = opts { + init_tracing(opts.tracing); + } + let io: Arc = if is_memory(&path) { Arc::new(turso_core::MemoryIO::new()) } else { Arc::new(turso_core::PlatformIO::new().map_err(|e| { @@ -61,6 +164,11 @@ impl Database { })?) }; + #[cfg(feature = "browser")] + if !is_memory(&path) { + return Err(Error::new(Status::GenericFailure, "sync constructor is not supported for FS-backed databases in the WASM. Use async connect(...) method instead".to_string())); + } + let file = io .open_file(&path, turso_core::OpenFlags::Create, false) .map_err(|e| Error::new(Status::GenericFailure, format!("Failed to open file: {e}")))?; @@ -78,7 +186,7 @@ impl Database { .connect() .map_err(|e| Error::new(Status::GenericFailure, format!("Failed to connect: {e}")))?; - Ok(Self::create(Some(db), io, conn, is_memory)) + Ok(Self::create(Some(db), io, conn, is_memory(&path))) } pub fn create( @@ -90,13 +198,23 @@ impl Database { Database { _db: db, io, - conn, + conn: Some(conn), is_memory, is_open: Cell::new(true), default_safe_integers: Cell::new(false), } } + fn conn(&self) -> Result> { + let Some(conn) = self.conn.as_ref() else { + return Err(napi::Error::new( + napi::Status::GenericFailure, + "connection is not set", + )); + }; + Ok(conn.clone()) + } + /// Returns whether the database is in memory-only mode. #[napi(getter)] pub fn memory(&self) -> bool { @@ -109,7 +227,7 @@ impl Database { self.is_open.get() } - /// Executes a batch of SQL statements. + /// Executes a batch of SQL statements on main thread /// /// # Arguments /// @@ -117,14 +235,23 @@ impl Database { /// /// # Returns #[napi] - pub fn batch(&self, sql: String) -> Result<()> { - self.conn.prepare_execute_batch(&sql).map_err(|e| { - Error::new( - Status::GenericFailure, - format!("Failed to execute batch: {e}"), - ) - })?; - Ok(()) + pub fn batch_sync(&self, sql: String) -> Result<()> { + batch_sync(&self.conn()?, &sql) + } + + /// Executes a batch of SQL statements outside of main thread + /// + /// # Arguments + /// + /// * `sql` - The SQL statements to execute. + /// + /// # Returns + #[napi] + pub fn batch_async(&self, sql: String) -> Result> { + Ok(AsyncTask::new(DbTask::Batch { + conn: self.conn()?.clone(), + sql, + })) } /// Prepares a statement for execution. @@ -139,14 +266,15 @@ impl Database { #[napi] pub fn prepare(&self, sql: String) -> Result { let stmt = self - .conn + .conn()? .prepare(&sql) .map_err(|e| Error::new(Status::GenericFailure, format!("{e}")))?; let column_names: Vec = (0..stmt.num_columns()) .map(|i| std::ffi::CString::new(stmt.get_column_name(i).to_string()).unwrap()) .collect(); Ok(Statement { - stmt: RefCell::new(Some(stmt)), + #[allow(clippy::arc_with_non_send_sync)] + stmt: Arc::new(RefCell::new(Some(stmt))), column_names, mode: RefCell::new(PresentationMode::Expanded), safe_integers: Cell::new(self.default_safe_integers.get()), @@ -160,7 +288,7 @@ impl Database { /// The rowid of the last row inserted. #[napi] pub fn last_insert_rowid(&self) -> Result { - Ok(self.conn.last_insert_rowid()) + Ok(self.conn()?.last_insert_rowid()) } /// Returns the number of changes made by the last statement. @@ -170,7 +298,7 @@ impl Database { /// The number of changes made by the last statement. #[napi] pub fn changes(&self) -> Result { - Ok(self.conn.changes()) + Ok(self.conn()?.changes()) } /// Returns the total number of changes made by all statements. @@ -180,7 +308,7 @@ impl Database { /// The total number of changes made by all statements. #[napi] pub fn total_changes(&self) -> Result { - Ok(self.conn.total_changes()) + Ok(self.conn()?.total_changes()) } /// Closes the database connection. @@ -189,9 +317,10 @@ impl Database { /// /// `Ok(())` if the database is closed successfully. #[napi] - pub fn close(&self) -> Result<()> { + pub fn close(&mut self) -> Result<()> { self.is_open.set(false); - // Database close is handled automatically when dropped + let _ = self._db.take().unwrap(); + let _ = self.conn.take().unwrap(); Ok(()) } @@ -225,7 +354,7 @@ impl Database { /// A prepared statement. #[napi] pub struct Statement { - stmt: RefCell>, + stmt: Arc>>, column_names: Vec, mode: RefCell, safe_integers: Cell, @@ -344,31 +473,20 @@ impl Statement { Ok(()) } - /// Step the statement and return result code: + /// Step the statement and return result code (executed on the main thread): /// 1 = Row available, 2 = Done, 3 = I/O needed #[napi] - pub fn step(&self) -> Result { - let mut stmt_ref = self.stmt.borrow_mut(); - let stmt = stmt_ref - .as_mut() - .ok_or_else(|| Error::new(Status::GenericFailure, "Statement has been finalized"))?; + pub fn step_sync(&self) -> Result { + step_sync(&self.stmt) + } - match stmt.step() { - Ok(turso_core::StepResult::Row) => Ok(STEP_ROW), - Ok(turso_core::StepResult::Done) => Ok(STEP_DONE), - Ok(turso_core::StepResult::IO) => Ok(STEP_IO), - Ok(turso_core::StepResult::Interrupt) => Err(Error::new( - Status::GenericFailure, - "Statement was interrupted", - )), - Ok(turso_core::StepResult::Busy) => { - Err(Error::new(Status::GenericFailure, "Database is busy")) - } - Err(e) => Err(Error::new( - Status::GenericFailure, - format!("Step failed: {e}"), - )), - } + /// Step the statement and return result code (executed on the background thread): + /// 1 = Row available, 2 = Done, 3 = I/O needed + #[napi] + pub fn step_async(&self) -> Result> { + Ok(AsyncTask::new(DbTask::Step { + stmt: self.stmt.clone(), + })) } /// Get the current row data according to the presentation mode @@ -543,8 +661,17 @@ fn to_js_value<'a>( turso_core::Value::Float(f) => ToNapiValue::into_unknown(*f, env), turso_core::Value::Text(s) => ToNapiValue::into_unknown(s.as_str(), env), turso_core::Value::Blob(b) => { - let buffer = Buffer::from(b.as_slice()); - ToNapiValue::into_unknown(buffer, env) + #[cfg(not(feature = "browser"))] + { + let buffer = Buffer::from(b.as_slice()); + ToNapiValue::into_unknown(buffer, env) + } + // emnapi do not support Buffer + #[cfg(feature = "browser")] + { + let buffer = Uint8Array::from(b.as_slice()); + ToNapiValue::into_unknown(buffer, env) + } } } } diff --git a/bindings/javascript/turso.wasi.cjs b/bindings/javascript/turso.wasi.cjs deleted file mode 100644 index 9aa0078af..000000000 --- a/bindings/javascript/turso.wasi.cjs +++ /dev/null @@ -1,112 +0,0 @@ -/* eslint-disable */ -/* prettier-ignore */ - -/* auto-generated by NAPI-RS */ - -const __nodeFs = require('node:fs') -const __nodePath = require('node:path') -const { WASI: __nodeWASI } = require('node:wasi') -const { Worker } = require('node:worker_threads') - -const { - createOnMessage: __wasmCreateOnMessageForFsProxy, - getDefaultContext: __emnapiGetDefaultContext, - instantiateNapiModuleSync: __emnapiInstantiateNapiModuleSync, -} = require('@napi-rs/wasm-runtime') - -const __rootDir = __nodePath.parse(process.cwd()).root - -const __wasi = new __nodeWASI({ - version: 'preview1', - env: process.env, - preopens: { - [__rootDir]: __rootDir, - } -}) - -const __emnapiContext = __emnapiGetDefaultContext() - -const __sharedMemory = new WebAssembly.Memory({ - initial: 4000, - maximum: 65536, - shared: true, -}) - -let __wasmFilePath = __nodePath.join(__dirname, 'turso.wasm32-wasi.wasm') -const __wasmDebugFilePath = __nodePath.join(__dirname, 'turso.wasm32-wasi.debug.wasm') - -if (__nodeFs.existsSync(__wasmDebugFilePath)) { - __wasmFilePath = __wasmDebugFilePath -} else if (!__nodeFs.existsSync(__wasmFilePath)) { - try { - __wasmFilePath = __nodePath.resolve('@tursodatabase/database-wasm32-wasi') - } catch { - throw new Error('Cannot find turso.wasm32-wasi.wasm file, and @tursodatabase/database-wasm32-wasi package is not installed.') - } -} - -const { instance: __napiInstance, module: __wasiModule, napiModule: __napiModule } = __emnapiInstantiateNapiModuleSync(__nodeFs.readFileSync(__wasmFilePath), { - context: __emnapiContext, - asyncWorkPoolSize: (function() { - const threadsSizeFromEnv = Number(process.env.NAPI_RS_ASYNC_WORK_POOL_SIZE ?? process.env.UV_THREADPOOL_SIZE) - // NaN > 0 is false - if (threadsSizeFromEnv > 0) { - return threadsSizeFromEnv - } else { - return 4 - } - })(), - reuseWorker: true, - wasi: __wasi, - onCreateWorker() { - const worker = new Worker(__nodePath.join(__dirname, 'wasi-worker.mjs'), { - env: process.env, - }) - worker.onmessage = ({ data }) => { - __wasmCreateOnMessageForFsProxy(__nodeFs)(data) - } - - // The main thread of Node.js waits for all the active handles before exiting. - // But Rust threads are never waited without `thread::join`. - // So here we hack the code of Node.js to prevent the workers from being referenced (active). - // According to https://github.com/nodejs/node/blob/19e0d472728c79d418b74bddff588bea70a403d0/lib/internal/worker.js#L415, - // a worker is consist of two handles: kPublicPort and kHandle. - { - const kPublicPort = Object.getOwnPropertySymbols(worker).find(s => - s.toString().includes("kPublicPort") - ); - if (kPublicPort) { - worker[kPublicPort].ref = () => {}; - } - - const kHandle = Object.getOwnPropertySymbols(worker).find(s => - s.toString().includes("kHandle") - ); - if (kHandle) { - worker[kHandle].ref = () => {}; - } - - worker.unref(); - } - return worker - }, - overwriteImports(importObject) { - importObject.env = { - ...importObject.env, - ...importObject.napi, - ...importObject.emnapi, - memory: __sharedMemory, - } - return importObject - }, - beforeInit({ instance }) { - for (const name of Object.keys(instance.exports)) { - if (name.startsWith('__napi_register__')) { - instance.exports[name]() - } - } - }, -}) -module.exports = __napiModule.exports -module.exports.Database = __napiModule.exports.Database -module.exports.Statement = __napiModule.exports.Statement diff --git a/bindings/javascript/wasi-worker-browser.mjs b/bindings/javascript/wasi-worker-browser.mjs deleted file mode 100644 index 8b1b17221..000000000 --- a/bindings/javascript/wasi-worker-browser.mjs +++ /dev/null @@ -1,32 +0,0 @@ -import { instantiateNapiModuleSync, MessageHandler, WASI } from '@napi-rs/wasm-runtime' - -const handler = new MessageHandler({ - onLoad({ wasmModule, wasmMemory }) { - const wasi = new WASI({ - print: function () { - // eslint-disable-next-line no-console - console.log.apply(console, arguments) - }, - printErr: function() { - // eslint-disable-next-line no-console - console.error.apply(console, arguments) - }, - }) - return instantiateNapiModuleSync(wasmModule, { - childThread: true, - wasi, - overwriteImports(importObject) { - importObject.env = { - ...importObject.env, - ...importObject.napi, - ...importObject.emnapi, - memory: wasmMemory, - } - }, - }) - }, -}) - -globalThis.onmessage = function (e) { - handler.handle(e) -} diff --git a/bindings/javascript/wasi-worker.mjs b/bindings/javascript/wasi-worker.mjs deleted file mode 100644 index 84b448fcc..000000000 --- a/bindings/javascript/wasi-worker.mjs +++ /dev/null @@ -1,63 +0,0 @@ -import fs from "node:fs"; -import { createRequire } from "node:module"; -import { parse } from "node:path"; -import { WASI } from "node:wasi"; -import { parentPort, Worker } from "node:worker_threads"; - -const require = createRequire(import.meta.url); - -const { instantiateNapiModuleSync, MessageHandler, getDefaultContext } = require("@napi-rs/wasm-runtime"); - -if (parentPort) { - parentPort.on("message", (data) => { - globalThis.onmessage({ data }); - }); -} - -Object.assign(globalThis, { - self: globalThis, - require, - Worker, - importScripts: function (f) { - ;(0, eval)(fs.readFileSync(f, "utf8") + "//# sourceURL=" + f); - }, - postMessage: function (msg) { - if (parentPort) { - parentPort.postMessage(msg); - } - }, -}); - -const emnapiContext = getDefaultContext(); - -const __rootDir = parse(process.cwd()).root; - -const handler = new MessageHandler({ - onLoad({ wasmModule, wasmMemory }) { - const wasi = new WASI({ - version: 'preview1', - env: process.env, - preopens: { - [__rootDir]: __rootDir, - }, - }); - - return instantiateNapiModuleSync(wasmModule, { - childThread: true, - wasi, - context: emnapiContext, - overwriteImports(importObject) { - importObject.env = { - ...importObject.env, - ...importObject.napi, - ...importObject.emnapi, - memory: wasmMemory - }; - }, - }); - }, -}); - -globalThis.onmessage = function (e) { - handler.handle(e); -}; diff --git a/bindings/javascript/yarn.lock b/bindings/javascript/yarn.lock index a0bb18897..e1925d556 100644 --- a/bindings/javascript/yarn.lock +++ b/bindings/javascript/yarn.lock @@ -5,6 +5,31 @@ __metadata: version: 8 cacheKey: 10c0 +"@babel/code-frame@npm:^7.10.4": + version: 7.27.1 + resolution: "@babel/code-frame@npm:7.27.1" + dependencies: + "@babel/helper-validator-identifier": "npm:^7.27.1" + js-tokens: "npm:^4.0.0" + picocolors: "npm:^1.1.1" + checksum: 10c0/5dd9a18baa5fce4741ba729acc3a3272c49c25cb8736c4b18e113099520e7ef7b545a4096a26d600e4416157e63e87d66db46aa3fbf0a5f2286da2705c12da00 + languageName: node + linkType: hard + +"@babel/helper-validator-identifier@npm:^7.27.1": + version: 7.27.1 + resolution: "@babel/helper-validator-identifier@npm:7.27.1" + checksum: 10c0/c558f11c4871d526498e49d07a84752d1800bf72ac0d3dad100309a2eaba24efbf56ea59af5137ff15e3a00280ebe588560534b0e894a4750f8b1411d8f78b84 + languageName: node + linkType: hard + +"@babel/runtime@npm:^7.12.5": + version: 7.28.4 + resolution: "@babel/runtime@npm:7.28.4" + checksum: 10c0/792ce7af9750fb9b93879cc9d1db175701c4689da890e6ced242ea0207c9da411ccf16dc04e689cc01158b28d7898c40d75598f4559109f761c12ce01e959bf7 + languageName: node + linkType: hard + "@emnapi/core@npm:^1.4.5": version: 1.4.5 resolution: "@emnapi/core@npm:1.4.5" @@ -33,6 +58,188 @@ __metadata: languageName: node linkType: hard +"@esbuild/aix-ppc64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/aix-ppc64@npm:0.25.9" + conditions: os=aix & cpu=ppc64 + languageName: node + linkType: hard + +"@esbuild/android-arm64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/android-arm64@npm:0.25.9" + conditions: os=android & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/android-arm@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/android-arm@npm:0.25.9" + conditions: os=android & cpu=arm + languageName: node + linkType: hard + +"@esbuild/android-x64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/android-x64@npm:0.25.9" + conditions: os=android & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/darwin-arm64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/darwin-arm64@npm:0.25.9" + conditions: os=darwin & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/darwin-x64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/darwin-x64@npm:0.25.9" + conditions: os=darwin & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/freebsd-arm64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/freebsd-arm64@npm:0.25.9" + conditions: os=freebsd & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/freebsd-x64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/freebsd-x64@npm:0.25.9" + conditions: os=freebsd & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/linux-arm64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/linux-arm64@npm:0.25.9" + conditions: os=linux & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/linux-arm@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/linux-arm@npm:0.25.9" + conditions: os=linux & cpu=arm + languageName: node + linkType: hard + +"@esbuild/linux-ia32@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/linux-ia32@npm:0.25.9" + conditions: os=linux & cpu=ia32 + languageName: node + linkType: hard + +"@esbuild/linux-loong64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/linux-loong64@npm:0.25.9" + conditions: os=linux & cpu=loong64 + languageName: node + linkType: hard + +"@esbuild/linux-mips64el@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/linux-mips64el@npm:0.25.9" + conditions: os=linux & cpu=mips64el + languageName: node + linkType: hard + +"@esbuild/linux-ppc64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/linux-ppc64@npm:0.25.9" + conditions: os=linux & cpu=ppc64 + languageName: node + linkType: hard + +"@esbuild/linux-riscv64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/linux-riscv64@npm:0.25.9" + conditions: os=linux & cpu=riscv64 + languageName: node + linkType: hard + +"@esbuild/linux-s390x@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/linux-s390x@npm:0.25.9" + conditions: os=linux & cpu=s390x + languageName: node + linkType: hard + +"@esbuild/linux-x64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/linux-x64@npm:0.25.9" + conditions: os=linux & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/netbsd-arm64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/netbsd-arm64@npm:0.25.9" + conditions: os=netbsd & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/netbsd-x64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/netbsd-x64@npm:0.25.9" + conditions: os=netbsd & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/openbsd-arm64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/openbsd-arm64@npm:0.25.9" + conditions: os=openbsd & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/openbsd-x64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/openbsd-x64@npm:0.25.9" + conditions: os=openbsd & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/openharmony-arm64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/openharmony-arm64@npm:0.25.9" + conditions: os=openharmony & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/sunos-x64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/sunos-x64@npm:0.25.9" + conditions: os=sunos & cpu=x64 + languageName: node + linkType: hard + +"@esbuild/win32-arm64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/win32-arm64@npm:0.25.9" + conditions: os=win32 & cpu=arm64 + languageName: node + linkType: hard + +"@esbuild/win32-ia32@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/win32-ia32@npm:0.25.9" + conditions: os=win32 & cpu=ia32 + languageName: node + linkType: hard + +"@esbuild/win32-x64@npm:0.25.9": + version: 0.25.9 + resolution: "@esbuild/win32-x64@npm:0.25.9" + conditions: os=win32 & cpu=x64 + languageName: node + linkType: hard + "@inquirer/checkbox@npm:^4.2.0": version: 4.2.0 resolution: "@inquirer/checkbox@npm:4.2.0" @@ -281,26 +488,16 @@ __metadata: languageName: node linkType: hard -"@mapbox/node-pre-gyp@npm:^2.0.0": - version: 2.0.0 - resolution: "@mapbox/node-pre-gyp@npm:2.0.0" - dependencies: - consola: "npm:^3.2.3" - detect-libc: "npm:^2.0.0" - https-proxy-agent: "npm:^7.0.5" - node-fetch: "npm:^2.6.7" - nopt: "npm:^8.0.0" - semver: "npm:^7.5.3" - tar: "npm:^7.4.0" - bin: - node-pre-gyp: bin/node-pre-gyp - checksum: 10c0/7d874c7f6f5560a87be7207f28d9a4e53b750085a82167608fd573aab8073645e95b3608f69e244df0e1d24e90a66525aeae708aba82ca73ff668ed0ab6abda6 +"@jridgewell/sourcemap-codec@npm:^1.5.5": + version: 1.5.5 + resolution: "@jridgewell/sourcemap-codec@npm:1.5.5" + checksum: 10c0/f9e538f302b63c0ebc06eecb1dd9918dd4289ed36147a0ddce35d6ea4d7ebbda243cda7b2213b6a5e1d8087a298d5cf630fb2bd39329cdecb82017023f6081a0 languageName: node linkType: hard -"@napi-rs/cli@npm:^3.0.4": - version: 3.0.4 - resolution: "@napi-rs/cli@npm:3.0.4" +"@napi-rs/cli@npm:^3.1.5": + version: 3.1.5 + resolution: "@napi-rs/cli@npm:3.1.5" dependencies: "@inquirer/prompts": "npm:^7.4.0" "@napi-rs/cross-toolchain": "npm:^1.0.0" @@ -310,9 +507,9 @@ __metadata: colorette: "npm:^2.0.20" debug: "npm:^4.4.0" emnapi: "npm:^1.4.0" + es-toolkit: "npm:^1.39.8" find-up: "npm:^7.0.0" js-yaml: "npm:^4.1.0" - lodash-es: "npm:^4.17.21" semver: "npm:^7.7.1" typanion: "npm:^3.14.0" peerDependencies: @@ -324,9 +521,9 @@ __metadata: emnapi: optional: true bin: - napi: ./dist/cli.js - napi-raw: ./cli.mjs - checksum: 10c0/0473827231926ad6d4ffa11288fd489d1777e3586c435e5824aafe40a5e4379067726458ce8acbc2ac83ea4626fe52a3c16b0561c24cb077d2bae090153a6eb0 + napi: dist/cli.js + napi-raw: cli.mjs + checksum: 10c0/fe28bcc40f81eb4c368b4f23156f1057583de21a41400b78821829fa1aa95db8268a33fa824741c864af28a464530f05712df135a10013c6b0e4ceef4c31f324 languageName: node linkType: hard @@ -713,14 +910,14 @@ __metadata: languageName: node linkType: hard -"@napi-rs/wasm-runtime@npm:^1.0.1": - version: 1.0.1 - resolution: "@napi-rs/wasm-runtime@npm:1.0.1" +"@napi-rs/wasm-runtime@npm:^1.0.1, @napi-rs/wasm-runtime@npm:^1.0.3": + version: 1.0.3 + resolution: "@napi-rs/wasm-runtime@npm:1.0.3" dependencies: "@emnapi/core": "npm:^1.4.5" "@emnapi/runtime": "npm:^1.4.5" "@tybys/wasm-util": "npm:^0.10.0" - checksum: 10c0/3244105b75637d8d39e76782921fe46e48105bcd390db01a10dc7b596ee99af0f06b7f2b841d7632e756bd3220a5d595b9d426a5453da1ccc895900b894d098f + checksum: 10c0/7918d82477e75931b6e35bb003464382eb93e526362f81a98bf8610407a67b10f4d041931015ad48072c89db547deb7e471dfb91f4ab11ac63a24d8580297f75 languageName: node linkType: hard @@ -865,33 +1062,6 @@ __metadata: languageName: node linkType: hard -"@nodelib/fs.scandir@npm:2.1.5": - version: 2.1.5 - resolution: "@nodelib/fs.scandir@npm:2.1.5" - dependencies: - "@nodelib/fs.stat": "npm:2.0.5" - run-parallel: "npm:^1.1.9" - checksum: 10c0/732c3b6d1b1e967440e65f284bd06e5821fedf10a1bea9ed2bb75956ea1f30e08c44d3def9d6a230666574edbaf136f8cfd319c14fd1f87c66e6a44449afb2eb - languageName: node - linkType: hard - -"@nodelib/fs.stat@npm:2.0.5, @nodelib/fs.stat@npm:^2.0.2": - version: 2.0.5 - resolution: "@nodelib/fs.stat@npm:2.0.5" - checksum: 10c0/88dafe5e3e29a388b07264680dc996c17f4bda48d163a9d4f5c1112979f0ce8ec72aa7116122c350b4e7976bc5566dc3ddb579be1ceaacc727872eb4ed93926d - languageName: node - linkType: hard - -"@nodelib/fs.walk@npm:^1.2.3": - version: 1.2.8 - resolution: "@nodelib/fs.walk@npm:1.2.8" - dependencies: - "@nodelib/fs.scandir": "npm:2.1.5" - fastq: "npm:^1.6.0" - checksum: 10c0/db9de047c3bb9b51f9335a7bb46f4fcfb6829fb628318c12115fbaf7d369bfce71c15b103d1fc3b464812d936220ee9bc1c8f762d032c9f6be9acc99249095b1 - languageName: node - linkType: hard - "@npmcli/agent@npm:^3.0.0": version: 3.0.0 resolution: "@npmcli/agent@npm:3.0.0" @@ -1045,38 +1215,216 @@ __metadata: languageName: node linkType: hard -"@rollup/pluginutils@npm:^5.1.3": - version: 5.2.0 - resolution: "@rollup/pluginutils@npm:5.2.0" +"@polka/url@npm:^1.0.0-next.24": + version: 1.0.0-next.29 + resolution: "@polka/url@npm:1.0.0-next.29" + checksum: 10c0/0d58e081844095cb029d3c19a659bfefd09d5d51a2f791bc61eba7ea826f13d6ee204a8a448c2f5a855c17df07b37517373ff916dd05801063c0568ae9937684 + languageName: node + linkType: hard + +"@rollup/rollup-android-arm-eabi@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-android-arm-eabi@npm:4.50.1" + conditions: os=android & cpu=arm + languageName: node + linkType: hard + +"@rollup/rollup-android-arm64@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-android-arm64@npm:4.50.1" + conditions: os=android & cpu=arm64 + languageName: node + linkType: hard + +"@rollup/rollup-darwin-arm64@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-darwin-arm64@npm:4.50.1" + conditions: os=darwin & cpu=arm64 + languageName: node + linkType: hard + +"@rollup/rollup-darwin-x64@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-darwin-x64@npm:4.50.1" + conditions: os=darwin & cpu=x64 + languageName: node + linkType: hard + +"@rollup/rollup-freebsd-arm64@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-freebsd-arm64@npm:4.50.1" + conditions: os=freebsd & cpu=arm64 + languageName: node + linkType: hard + +"@rollup/rollup-freebsd-x64@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-freebsd-x64@npm:4.50.1" + conditions: os=freebsd & cpu=x64 + languageName: node + linkType: hard + +"@rollup/rollup-linux-arm-gnueabihf@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-arm-gnueabihf@npm:4.50.1" + conditions: os=linux & cpu=arm & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-arm-musleabihf@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-arm-musleabihf@npm:4.50.1" + conditions: os=linux & cpu=arm & libc=musl + languageName: node + linkType: hard + +"@rollup/rollup-linux-arm64-gnu@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-arm64-gnu@npm:4.50.1" + conditions: os=linux & cpu=arm64 & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-arm64-musl@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-arm64-musl@npm:4.50.1" + conditions: os=linux & cpu=arm64 & libc=musl + languageName: node + linkType: hard + +"@rollup/rollup-linux-loongarch64-gnu@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-loongarch64-gnu@npm:4.50.1" + conditions: os=linux & cpu=loong64 & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-ppc64-gnu@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-ppc64-gnu@npm:4.50.1" + conditions: os=linux & cpu=ppc64 & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-riscv64-gnu@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-riscv64-gnu@npm:4.50.1" + conditions: os=linux & cpu=riscv64 & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-riscv64-musl@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-riscv64-musl@npm:4.50.1" + conditions: os=linux & cpu=riscv64 & libc=musl + languageName: node + linkType: hard + +"@rollup/rollup-linux-s390x-gnu@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-s390x-gnu@npm:4.50.1" + conditions: os=linux & cpu=s390x & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-x64-gnu@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-x64-gnu@npm:4.50.1" + conditions: os=linux & cpu=x64 & libc=glibc + languageName: node + linkType: hard + +"@rollup/rollup-linux-x64-musl@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-linux-x64-musl@npm:4.50.1" + conditions: os=linux & cpu=x64 & libc=musl + languageName: node + linkType: hard + +"@rollup/rollup-openharmony-arm64@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-openharmony-arm64@npm:4.50.1" + conditions: os=openharmony & cpu=arm64 + languageName: node + linkType: hard + +"@rollup/rollup-win32-arm64-msvc@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-win32-arm64-msvc@npm:4.50.1" + conditions: os=win32 & cpu=arm64 + languageName: node + linkType: hard + +"@rollup/rollup-win32-ia32-msvc@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-win32-ia32-msvc@npm:4.50.1" + conditions: os=win32 & cpu=ia32 + languageName: node + linkType: hard + +"@rollup/rollup-win32-x64-msvc@npm:4.50.1": + version: 4.50.1 + resolution: "@rollup/rollup-win32-x64-msvc@npm:4.50.1" + conditions: os=win32 & cpu=x64 + languageName: node + linkType: hard + +"@testing-library/dom@npm:^10.4.0": + version: 10.4.1 + resolution: "@testing-library/dom@npm:10.4.1" dependencies: - "@types/estree": "npm:^1.0.0" - estree-walker: "npm:^2.0.2" - picomatch: "npm:^4.0.2" + "@babel/code-frame": "npm:^7.10.4" + "@babel/runtime": "npm:^7.12.5" + "@types/aria-query": "npm:^5.0.1" + aria-query: "npm:5.3.0" + dom-accessibility-api: "npm:^0.5.9" + lz-string: "npm:^1.5.0" + picocolors: "npm:1.1.1" + pretty-format: "npm:^27.0.2" + checksum: 10c0/19ce048012d395ad0468b0dbcc4d0911f6f9e39464d7a8464a587b29707eed5482000dad728f5acc4ed314d2f4d54f34982999a114d2404f36d048278db815b1 + languageName: node + linkType: hard + +"@testing-library/user-event@npm:^14.6.1": + version: 14.6.1 + resolution: "@testing-library/user-event@npm:14.6.1" peerDependencies: - rollup: ^1.20.0||^2.0.0||^3.0.0||^4.0.0 - peerDependenciesMeta: - rollup: - optional: true - checksum: 10c0/794890d512751451bcc06aa112366ef47ea8f9125dac49b1abf72ff8b079518b09359de9c60a013b33266541634e765ae61839c749fae0edb59a463418665c55 + "@testing-library/dom": ">=7.21.4" + checksum: 10c0/75fea130a52bf320d35d46ed54f3eec77e71a56911b8b69a3fe29497b0b9947b2dc80d30f04054ad4ce7f577856ae3e5397ea7dff0ef14944d3909784c7a93fe languageName: node linkType: hard -"@sindresorhus/merge-streams@npm:^2.1.0": - version: 2.3.0 - resolution: "@sindresorhus/merge-streams@npm:2.3.0" - checksum: 10c0/69ee906f3125fb2c6bb6ec5cdd84e8827d93b49b3892bce8b62267116cc7e197b5cccf20c160a1d32c26014ecd14470a72a5e3ee37a58f1d6dadc0db1ccf3894 - languageName: node - linkType: hard - -"@tursodatabase/database@workspace:.": +"@tursodatabase/database-browser@workspace:packages/browser": version: 0.0.0-use.local - resolution: "@tursodatabase/database@workspace:." + resolution: "@tursodatabase/database-browser@workspace:packages/browser" dependencies: - "@napi-rs/cli": "npm:^3.0.4" - "@napi-rs/wasm-runtime": "npm:^1.0.1" - ava: "npm:^6.0.1" - better-sqlite3: "npm:^11.9.1" + "@napi-rs/cli": "npm:^3.1.5" + "@napi-rs/wasm-runtime": "npm:^1.0.3" + "@tursodatabase/database-common": "npm:^0.1.5-pre.3" + "@vitest/browser": "npm:^3.2.4" + playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" + vitest: "npm:^3.2.4" + languageName: unknown + linkType: soft + +"@tursodatabase/database-common@npm:^0.1.5-pre.3, @tursodatabase/database-common@workspace:packages/common": + version: 0.0.0-use.local + resolution: "@tursodatabase/database-common@workspace:packages/common" + dependencies: + typescript: "npm:^5.9.2" + languageName: unknown + linkType: soft + +"@tursodatabase/database@workspace:packages/native": + version: 0.0.0-use.local + resolution: "@tursodatabase/database@workspace:packages/native" + dependencies: + "@napi-rs/cli": "npm:^3.1.5" + "@tursodatabase/database-common": "npm:^0.1.5-pre.3" + "@types/node": "npm:^24.3.1" + typescript: "npm:^5.9.2" + vitest: "npm:^3.2.4" languageName: unknown linkType: soft @@ -1089,32 +1437,152 @@ __metadata: languageName: node linkType: hard -"@types/estree@npm:^1.0.0": +"@types/aria-query@npm:^5.0.1": + version: 5.0.4 + resolution: "@types/aria-query@npm:5.0.4" + checksum: 10c0/dc667bc6a3acc7bba2bccf8c23d56cb1f2f4defaa704cfef595437107efaa972d3b3db9ec1d66bc2711bfc35086821edd32c302bffab36f2e79b97f312069f08 + languageName: node + linkType: hard + +"@types/chai@npm:^5.2.2": + version: 5.2.2 + resolution: "@types/chai@npm:5.2.2" + dependencies: + "@types/deep-eql": "npm:*" + checksum: 10c0/49282bf0e8246800ebb36f17256f97bd3a8c4fb31f92ad3c0eaa7623518d7e87f1eaad4ad206960fcaf7175854bdff4cb167e4fe96811e0081b4ada83dd533ec + languageName: node + linkType: hard + +"@types/deep-eql@npm:*": + version: 4.0.2 + resolution: "@types/deep-eql@npm:4.0.2" + checksum: 10c0/bf3f811843117900d7084b9d0c852da9a044d12eb40e6de73b552598a6843c21291a8a381b0532644574beecd5e3491c5ff3a0365ab86b15d59862c025384844 + languageName: node + linkType: hard + +"@types/estree@npm:1.0.8, @types/estree@npm:^1.0.0": version: 1.0.8 resolution: "@types/estree@npm:1.0.8" checksum: 10c0/39d34d1afaa338ab9763f37ad6066e3f349444f9052b9676a7cc0252ef9485a41c6d81c9c4e0d26e9077993354edf25efc853f3224dd4b447175ef62bdcc86a5 languageName: node linkType: hard -"@vercel/nft@npm:^0.29.4": - version: 0.29.4 - resolution: "@vercel/nft@npm:0.29.4" +"@types/node@npm:^24.3.1": + version: 24.3.1 + resolution: "@types/node@npm:24.3.1" dependencies: - "@mapbox/node-pre-gyp": "npm:^2.0.0" - "@rollup/pluginutils": "npm:^5.1.3" - acorn: "npm:^8.6.0" - acorn-import-attributes: "npm:^1.9.5" - async-sema: "npm:^3.1.1" - bindings: "npm:^1.4.0" - estree-walker: "npm:2.0.2" - glob: "npm:^10.4.5" - graceful-fs: "npm:^4.2.9" - node-gyp-build: "npm:^4.2.2" - picomatch: "npm:^4.0.2" - resolve-from: "npm:^5.0.0" - bin: - nft: out/cli.js - checksum: 10c0/84ba32c685f9d7c2c849b1e8c963d3b7eb09d122e666143ed97c3776f5b04a4745605e1d29fd81383f72b1d1c0d7d58e39f06dc92f021b5de079dfa4e8523574 + undici-types: "npm:~7.10.0" + checksum: 10c0/99b86fc32294fcd61136ca1f771026443a1e370e9f284f75e243b29299dd878e18c193deba1ce29a374932db4e30eb80826e1049b9aad02d36f5c30b94b6f928 + languageName: node + linkType: hard + +"@vitest/browser@npm:^3.2.4": + version: 3.2.4 + resolution: "@vitest/browser@npm:3.2.4" + dependencies: + "@testing-library/dom": "npm:^10.4.0" + "@testing-library/user-event": "npm:^14.6.1" + "@vitest/mocker": "npm:3.2.4" + "@vitest/utils": "npm:3.2.4" + magic-string: "npm:^0.30.17" + sirv: "npm:^3.0.1" + tinyrainbow: "npm:^2.0.0" + ws: "npm:^8.18.2" + peerDependencies: + playwright: "*" + vitest: 3.2.4 + webdriverio: ^7.0.0 || ^8.0.0 || ^9.0.0 + peerDependenciesMeta: + playwright: + optional: true + safaridriver: + optional: true + webdriverio: + optional: true + checksum: 10c0/0db39daad675aad187eff27d5a7f17a9f533d7abc7476ee1a0b83a9c62a7227b24395f4814e034ecb2ebe39f1a2dec0a8c6a7f79b8d5680c3ac79e408727d742 + languageName: node + linkType: hard + +"@vitest/expect@npm:3.2.4": + version: 3.2.4 + resolution: "@vitest/expect@npm:3.2.4" + dependencies: + "@types/chai": "npm:^5.2.2" + "@vitest/spy": "npm:3.2.4" + "@vitest/utils": "npm:3.2.4" + chai: "npm:^5.2.0" + tinyrainbow: "npm:^2.0.0" + checksum: 10c0/7586104e3fd31dbe1e6ecaafb9a70131e4197dce2940f727b6a84131eee3decac7b10f9c7c72fa5edbdb68b6f854353bd4c0fa84779e274207fb7379563b10db + languageName: node + linkType: hard + +"@vitest/mocker@npm:3.2.4": + version: 3.2.4 + resolution: "@vitest/mocker@npm:3.2.4" + dependencies: + "@vitest/spy": "npm:3.2.4" + estree-walker: "npm:^3.0.3" + magic-string: "npm:^0.30.17" + peerDependencies: + msw: ^2.4.9 + vite: ^5.0.0 || ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + checksum: 10c0/f7a4aea19bbbf8f15905847ee9143b6298b2c110f8b64789224cb0ffdc2e96f9802876aa2ca83f1ec1b6e1ff45e822abb34f0054c24d57b29ab18add06536ccd + languageName: node + linkType: hard + +"@vitest/pretty-format@npm:3.2.4, @vitest/pretty-format@npm:^3.2.4": + version: 3.2.4 + resolution: "@vitest/pretty-format@npm:3.2.4" + dependencies: + tinyrainbow: "npm:^2.0.0" + checksum: 10c0/5ad7d4278e067390d7d633e307fee8103958806a419ca380aec0e33fae71b44a64415f7a9b4bc11635d3c13d4a9186111c581d3cef9c65cc317e68f077456887 + languageName: node + linkType: hard + +"@vitest/runner@npm:3.2.4": + version: 3.2.4 + resolution: "@vitest/runner@npm:3.2.4" + dependencies: + "@vitest/utils": "npm:3.2.4" + pathe: "npm:^2.0.3" + strip-literal: "npm:^3.0.0" + checksum: 10c0/e8be51666c72b3668ae3ea348b0196656a4a5adb836cb5e270720885d9517421815b0d6c98bfdf1795ed02b994b7bfb2b21566ee356a40021f5bf4f6ed4e418a + languageName: node + linkType: hard + +"@vitest/snapshot@npm:3.2.4": + version: 3.2.4 + resolution: "@vitest/snapshot@npm:3.2.4" + dependencies: + "@vitest/pretty-format": "npm:3.2.4" + magic-string: "npm:^0.30.17" + pathe: "npm:^2.0.3" + checksum: 10c0/f8301a3d7d1559fd3d59ed51176dd52e1ed5c2d23aa6d8d6aa18787ef46e295056bc726a021698d8454c16ed825ecba163362f42fa90258bb4a98cfd2c9424fc + languageName: node + linkType: hard + +"@vitest/spy@npm:3.2.4": + version: 3.2.4 + resolution: "@vitest/spy@npm:3.2.4" + dependencies: + tinyspy: "npm:^4.0.3" + checksum: 10c0/6ebf0b4697dc238476d6b6a60c76ba9eb1dd8167a307e30f08f64149612fd50227682b876420e4c2e09a76334e73f72e3ebf0e350714dc22474258292e202024 + languageName: node + linkType: hard + +"@vitest/utils@npm:3.2.4": + version: 3.2.4 + resolution: "@vitest/utils@npm:3.2.4" + dependencies: + "@vitest/pretty-format": "npm:3.2.4" + loupe: "npm:^3.1.4" + tinyrainbow: "npm:^2.0.0" + checksum: 10c0/024a9b8c8bcc12cf40183c246c244b52ecff861c6deb3477cbf487ac8781ad44c68a9c5fd69f8c1361878e55b97c10d99d511f2597f1f7244b5e5101d028ba64 languageName: node linkType: hard @@ -1125,33 +1593,6 @@ __metadata: languageName: node linkType: hard -"acorn-import-attributes@npm:^1.9.5": - version: 1.9.5 - resolution: "acorn-import-attributes@npm:1.9.5" - peerDependencies: - acorn: ^8 - checksum: 10c0/5926eaaead2326d5a86f322ff1b617b0f698aa61dc719a5baa0e9d955c9885cc71febac3fb5bacff71bbf2c4f9c12db2056883c68c53eb962c048b952e1e013d - languageName: node - linkType: hard - -"acorn-walk@npm:^8.3.4": - version: 8.3.4 - resolution: "acorn-walk@npm:8.3.4" - dependencies: - acorn: "npm:^8.11.0" - checksum: 10c0/76537ac5fb2c37a64560feaf3342023dadc086c46da57da363e64c6148dc21b57d49ace26f949e225063acb6fb441eabffd89f7a3066de5ad37ab3e328927c62 - languageName: node - linkType: hard - -"acorn@npm:^8.11.0, acorn@npm:^8.15.0, acorn@npm:^8.6.0": - version: 8.15.0 - resolution: "acorn@npm:8.15.0" - bin: - acorn: bin/acorn - checksum: 10c0/dec73ff59b7d6628a01eebaece7f2bdb8bb62b9b5926dcad0f8931f2b8b79c2be21f6c68ac095592adb5adb15831a3635d9343e6a91d028bbe85d564875ec3ec - languageName: node - linkType: hard - "agent-base@npm:^7.1.0, agent-base@npm:^7.1.2": version: 7.1.4 resolution: "agent-base@npm:7.1.4" @@ -1176,9 +1617,9 @@ __metadata: linkType: hard "ansi-regex@npm:^6.0.1": - version: 6.1.0 - resolution: "ansi-regex@npm:6.1.0" - checksum: 10c0/a91daeddd54746338478eef88af3439a7edf30f8e23196e2d6ed182da9add559c601266dbef01c2efa46a958ad6f1f8b176799657616c702b5b02e799e7fd8dc + version: 6.2.2 + resolution: "ansi-regex@npm:6.2.2" + checksum: 10c0/05d4acb1d2f59ab2cf4b794339c7b168890d44dda4bf0ce01152a8da0213aca207802f930442ce8cd22d7a92f44907664aac6508904e75e038fa944d2601b30f languageName: node linkType: hard @@ -1191,19 +1632,17 @@ __metadata: languageName: node linkType: hard -"ansi-styles@npm:^6.0.0, ansi-styles@npm:^6.1.0, ansi-styles@npm:^6.2.1": - version: 6.2.1 - resolution: "ansi-styles@npm:6.2.1" - checksum: 10c0/5d1ec38c123984bcedd996eac680d548f31828bd679a66db2bdf11844634dde55fec3efa9c6bb1d89056a5e79c1ac540c4c784d592ea1d25028a92227d2f2d5c +"ansi-styles@npm:^5.0.0": + version: 5.2.0 + resolution: "ansi-styles@npm:5.2.0" + checksum: 10c0/9c4ca80eb3c2fb7b33841c210d2f20807f40865d27008d7c3f707b7f95cab7d67462a565e2388ac3285b71cb3d9bb2173de8da37c57692a362885ec34d6e27df languageName: node linkType: hard -"argparse@npm:^1.0.7": - version: 1.0.10 - resolution: "argparse@npm:1.0.10" - dependencies: - sprintf-js: "npm:~1.0.2" - checksum: 10c0/b2972c5c23c63df66bca144dbc65d180efa74f25f8fd9b7d9a0a6c88ae839db32df3d54770dcb6460cf840d232b60695d1a6b1053f599d84e73f7437087712de +"ansi-styles@npm:^6.1.0": + version: 6.2.3 + resolution: "ansi-styles@npm:6.2.3" + checksum: 10c0/23b8a4ce14e18fb854693b95351e286b771d23d8844057ed2e7d083cd3e708376c3323707ec6a24365f7d7eda3ca00327fe04092e29e551499ec4c8b7bfac868 languageName: node linkType: hard @@ -1214,86 +1653,19 @@ __metadata: languageName: node linkType: hard -"array-find-index@npm:^1.0.1": - version: 1.0.2 - resolution: "array-find-index@npm:1.0.2" - checksum: 10c0/86b9485c74ddd324feab807e10a6de3f9c1683856267236fac4bb4d4667ada6463e106db3f6c540ae6b720e0442b590ec701d13676df4c6af30ebf4da09b4f57 - languageName: node - linkType: hard - -"arrgv@npm:^1.0.2": - version: 1.0.2 - resolution: "arrgv@npm:1.0.2" - checksum: 10c0/7e6e782e6b749923ac7cbc4048ef6fe0844c4a59bfc8932fcd4c44566ba25eed46501f94dd7cf3c7297da88f3f599ca056bfb77d0c2484aebc92f04239f69124 - languageName: node - linkType: hard - -"arrify@npm:^3.0.0": - version: 3.0.0 - resolution: "arrify@npm:3.0.0" - checksum: 10c0/2e26601b8486f29780f1f70f7ac05a226755814c2a3ab42e196748f650af1dc310cd575a11dd4b9841c70fd7460b2dd2b8fe6fb7a3375878e2660706efafa58e - languageName: node - linkType: hard - -"async-sema@npm:^3.1.1": - version: 3.1.1 - resolution: "async-sema@npm:3.1.1" - checksum: 10c0/a16da9f7f2dbdd00a969bf264b7ad331b59df3eac2b38f529b881c5cc8662594e68ed096d927ec2aabdc13454379cdc6d677bcdb0a3d2db338fb4be17957832b - languageName: node - linkType: hard - -"ava@npm:^6.0.1": - version: 6.4.1 - resolution: "ava@npm:6.4.1" +"aria-query@npm:5.3.0": + version: 5.3.0 + resolution: "aria-query@npm:5.3.0" dependencies: - "@vercel/nft": "npm:^0.29.4" - acorn: "npm:^8.15.0" - acorn-walk: "npm:^8.3.4" - ansi-styles: "npm:^6.2.1" - arrgv: "npm:^1.0.2" - arrify: "npm:^3.0.0" - callsites: "npm:^4.2.0" - cbor: "npm:^10.0.9" - chalk: "npm:^5.4.1" - chunkd: "npm:^2.0.1" - ci-info: "npm:^4.3.0" - ci-parallel-vars: "npm:^1.0.1" - cli-truncate: "npm:^4.0.0" - code-excerpt: "npm:^4.0.0" - common-path-prefix: "npm:^3.0.0" - concordance: "npm:^5.0.4" - currently-unhandled: "npm:^0.4.1" - debug: "npm:^4.4.1" - emittery: "npm:^1.2.0" - figures: "npm:^6.1.0" - globby: "npm:^14.1.0" - ignore-by-default: "npm:^2.1.0" - indent-string: "npm:^5.0.0" - is-plain-object: "npm:^5.0.0" - is-promise: "npm:^4.0.0" - matcher: "npm:^5.0.0" - memoize: "npm:^10.1.0" - ms: "npm:^2.1.3" - p-map: "npm:^7.0.3" - package-config: "npm:^5.0.0" - picomatch: "npm:^4.0.2" - plur: "npm:^5.1.0" - pretty-ms: "npm:^9.2.0" - resolve-cwd: "npm:^3.0.0" - stack-utils: "npm:^2.0.6" - strip-ansi: "npm:^7.1.0" - supertap: "npm:^3.0.1" - temp-dir: "npm:^3.0.0" - write-file-atomic: "npm:^6.0.0" - yargs: "npm:^17.7.2" - peerDependencies: - "@ava/typescript": "*" - peerDependenciesMeta: - "@ava/typescript": - optional: true - bin: - ava: entrypoints/cli.mjs - checksum: 10c0/21972df1031ef46533ea1b7daa132a5fc66841c8a221b6901163d12d2a1cac39bfd8a6d3459da7eb9344fa90fc02f237f2fe2aac8785d04bf5894fa43625be28 + dequal: "npm:^2.0.3" + checksum: 10c0/2bff0d4eba5852a9dd578ecf47eaef0e82cc52569b48469b0aac2db5145db0b17b7a58d9e01237706d1e14b7a1b0ac9b78e9c97027ad97679dd8f91b85da1469 + languageName: node + linkType: hard + +"assertion-error@npm:^2.0.1": + version: 2.0.1 + resolution: "assertion-error@npm:2.0.1" + checksum: 10c0/bbbcb117ac6480138f8c93cf7f535614282dea9dc828f540cdece85e3c665e8f78958b96afac52f29ff883c72638e6a87d469ecc9fe5bc902df03ed24a55dba8 languageName: node linkType: hard @@ -1304,13 +1676,6 @@ __metadata: languageName: node linkType: hard -"base64-js@npm:^1.3.1": - version: 1.5.1 - resolution: "base64-js@npm:1.5.1" - checksum: 10c0/f23823513b63173a001030fae4f2dabe283b99a9d324ade3ad3d148e218134676f1ee8568c877cd79ec1c53158dcf2d2ba527a97c606618928ba99dd930102bf - languageName: node - linkType: hard - "before-after-hook@npm:^4.0.0": version: 4.0.0 resolution: "before-after-hook@npm:4.0.0" @@ -1318,44 +1683,6 @@ __metadata: languageName: node linkType: hard -"better-sqlite3@npm:^11.9.1": - version: 11.10.0 - resolution: "better-sqlite3@npm:11.10.0" - dependencies: - bindings: "npm:^1.5.0" - node-gyp: "npm:latest" - prebuild-install: "npm:^7.1.1" - checksum: 10c0/1fffbf9e5fc9d24847a3ecf09491bceab1c294b46ba41df1c449dc20b6f5c5d9d94ff24becd0b1632ee282bd21278b7fea53a5a6215bb99209ded0ae05eda3b0 - languageName: node - linkType: hard - -"bindings@npm:^1.4.0, bindings@npm:^1.5.0": - version: 1.5.0 - resolution: "bindings@npm:1.5.0" - dependencies: - file-uri-to-path: "npm:1.0.0" - checksum: 10c0/3dab2491b4bb24124252a91e656803eac24292473e56554e35bbfe3cc1875332cfa77600c3bac7564049dc95075bf6fcc63a4609920ff2d64d0fe405fcf0d4ba - languageName: node - linkType: hard - -"bl@npm:^4.0.3": - version: 4.1.0 - resolution: "bl@npm:4.1.0" - dependencies: - buffer: "npm:^5.5.0" - inherits: "npm:^2.0.4" - readable-stream: "npm:^3.4.0" - checksum: 10c0/02847e1d2cb089c9dc6958add42e3cdeaf07d13f575973963335ac0fdece563a50ac770ac4c8fa06492d2dd276f6cc3b7f08c7cd9c7a7ad0f8d388b2a28def5f - languageName: node - linkType: hard - -"blueimp-md5@npm:^2.10.0": - version: 2.19.0 - resolution: "blueimp-md5@npm:2.19.0" - checksum: 10c0/85d04343537dd99a288c62450341dcce7380d3454c81f8e5a971ddd80307d6f9ef51b5b92ad7d48aaaa92fd6d3a1f6b2f4fada068faae646887f7bfabc17a346 - languageName: node - linkType: hard - "brace-expansion@npm:^2.0.1": version: 2.0.2 resolution: "brace-expansion@npm:2.0.2" @@ -1365,22 +1692,10 @@ __metadata: languageName: node linkType: hard -"braces@npm:^3.0.3": - version: 3.0.3 - resolution: "braces@npm:3.0.3" - dependencies: - fill-range: "npm:^7.1.1" - checksum: 10c0/7c6dfd30c338d2997ba77500539227b9d1f85e388a5f43220865201e407e076783d0881f2d297b9f80951b4c957fcf0b51c1d2d24227631643c3f7c284b0aa04 - languageName: node - linkType: hard - -"buffer@npm:^5.5.0": - version: 5.7.1 - resolution: "buffer@npm:5.7.1" - dependencies: - base64-js: "npm:^1.3.1" - ieee754: "npm:^1.1.13" - checksum: 10c0/27cac81cff434ed2876058d72e7c4789d11ff1120ef32c9de48f59eab58179b66710c488987d295ae89a228f835fc66d088652dffeb8e3ba8659f80eb091d55e +"cac@npm:^6.7.14": + version: 6.7.14 + resolution: "cac@npm:6.7.14" + checksum: 10c0/4ee06aaa7bab8981f0d54e5f5f9d4adcd64058e9697563ce336d8a3878ed018ee18ebe5359b2430eceae87e0758e62ea2019c3f52ae6e211b1bd2e133856cd10 languageName: node linkType: hard @@ -1404,26 +1719,16 @@ __metadata: languageName: node linkType: hard -"callsites@npm:^4.2.0": - version: 4.2.0 - resolution: "callsites@npm:4.2.0" - checksum: 10c0/8f7e269ec09fc0946bb22d838a8bc7932e1909ab4a833b964749f4d0e8bdeaa1f253287c4f911f61781f09620b6925ccd19a5ea4897489c4e59442c660c312a3 - languageName: node - linkType: hard - -"cbor@npm:^10.0.9": - version: 10.0.9 - resolution: "cbor@npm:10.0.9" +"chai@npm:^5.2.0": + version: 5.3.3 + resolution: "chai@npm:5.3.3" dependencies: - nofilter: "npm:^3.0.2" - checksum: 10c0/49b59036c340ab0c6f4fa39aaf37ed6cb2bec6d54ec27b45a03f5df0fcd5767594b0abb5cbf44d69bdd8593d6a2e131c3e7017c511bacf05f5aa4ff2af82d07d - languageName: node - linkType: hard - -"chalk@npm:^5.4.1": - version: 5.4.1 - resolution: "chalk@npm:5.4.1" - checksum: 10c0/b23e88132c702f4855ca6d25cb5538b1114343e41472d5263ee8a37cccfccd9c4216d111e1097c6a27830407a1dc81fecdf2a56f2c63033d4dbbd88c10b0dcef + assertion-error: "npm:^2.0.1" + check-error: "npm:^2.1.1" + deep-eql: "npm:^5.0.1" + loupe: "npm:^3.1.0" + pathval: "npm:^2.0.0" + checksum: 10c0/b360fd4d38861622e5010c2f709736988b05c7f31042305fa3f4e9911f6adb80ccfb4e302068bf8ed10e835c2e2520cba0f5edc13d878b886987e5aa62483f53 languageName: node linkType: hard @@ -1434,10 +1739,10 @@ __metadata: languageName: node linkType: hard -"chownr@npm:^1.1.1": - version: 1.1.4 - resolution: "chownr@npm:1.1.4" - checksum: 10c0/ed57952a84cc0c802af900cf7136de643d3aba2eecb59d29344bc2f3f9bf703a301b9d84cdc71f82c3ffc9ccde831b0d92f5b45f91727d6c9da62f23aef9d9db +"check-error@npm:^2.1.1": + version: 2.1.1 + resolution: "check-error@npm:2.1.1" + checksum: 10c0/979f13eccab306cf1785fa10941a590b4e7ea9916ea2a4f8c87f0316fc3eab07eabefb6e587424ef0f88cbcd3805791f172ea739863ca3d7ce2afc54641c7f0e languageName: node linkType: hard @@ -1448,37 +1753,6 @@ __metadata: languageName: node linkType: hard -"chunkd@npm:^2.0.1": - version: 2.0.1 - resolution: "chunkd@npm:2.0.1" - checksum: 10c0/4e0c5aac6048ecedfa4cd0a5f6c4f010c70a7b7645aeca7bfeb47cb0733c3463054f0ced3f2667b2e0e67edd75d68a8e05481b01115ba3f8a952a93026254504 - languageName: node - linkType: hard - -"ci-info@npm:^4.3.0": - version: 4.3.0 - resolution: "ci-info@npm:4.3.0" - checksum: 10c0/60d3dfe95d75c01454ec1cfd5108617dd598a28a2a3e148bd7e1523c1c208b5f5a3007cafcbe293e6fd0a5a310cc32217c5dc54743eeabc0a2bec80072fc055c - languageName: node - linkType: hard - -"ci-parallel-vars@npm:^1.0.1": - version: 1.0.1 - resolution: "ci-parallel-vars@npm:1.0.1" - checksum: 10c0/80952f699cbbc146092b077b4f3e28d085620eb4e6be37f069b4dbb3db0ee70e8eec3beef4ebe70ff60631e9fc743b9d0869678489f167442cac08b260e5ac08 - languageName: node - linkType: hard - -"cli-truncate@npm:^4.0.0": - version: 4.0.0 - resolution: "cli-truncate@npm:4.0.0" - dependencies: - slice-ansi: "npm:^5.0.0" - string-width: "npm:^7.0.0" - checksum: 10c0/d7f0b73e3d9b88cb496e6c086df7410b541b56a43d18ade6a573c9c18bd001b1c3fba1ad578f741a4218fdc794d042385f8ac02c25e1c295a2d8b9f3cb86eb4c - languageName: node - linkType: hard - "cli-width@npm:^4.1.0": version: 4.1.0 resolution: "cli-width@npm:4.1.0" @@ -1497,26 +1771,6 @@ __metadata: languageName: node linkType: hard -"cliui@npm:^8.0.1": - version: 8.0.1 - resolution: "cliui@npm:8.0.1" - dependencies: - string-width: "npm:^4.2.0" - strip-ansi: "npm:^6.0.1" - wrap-ansi: "npm:^7.0.0" - checksum: 10c0/4bda0f09c340cbb6dfdc1ed508b3ca080f12992c18d68c6be4d9cf51756033d5266e61ec57529e610dacbf4da1c634423b0c1b11037709cc6b09045cbd815df5 - languageName: node - linkType: hard - -"code-excerpt@npm:^4.0.0": - version: 4.0.0 - resolution: "code-excerpt@npm:4.0.0" - dependencies: - convert-to-spaces: "npm:^2.0.1" - checksum: 10c0/b6c5a06e039cecd2ab6a0e10ee0831de8362107d1f298ca3558b5f9004cb8e0260b02dd6c07f57b9a0e346c76864d2873311ee1989809fdeb05bd5fbbadde773 - languageName: node - linkType: hard - "color-convert@npm:^2.0.1": version: 2.0.1 resolution: "color-convert@npm:2.0.1" @@ -1540,43 +1794,6 @@ __metadata: languageName: node linkType: hard -"common-path-prefix@npm:^3.0.0": - version: 3.0.0 - resolution: "common-path-prefix@npm:3.0.0" - checksum: 10c0/c4a74294e1b1570f4a8ab435285d185a03976c323caa16359053e749db4fde44e3e6586c29cd051100335e11895767cbbd27ea389108e327d62f38daf4548fdb - languageName: node - linkType: hard - -"concordance@npm:^5.0.4": - version: 5.0.4 - resolution: "concordance@npm:5.0.4" - dependencies: - date-time: "npm:^3.1.0" - esutils: "npm:^2.0.3" - fast-diff: "npm:^1.2.0" - js-string-escape: "npm:^1.0.1" - lodash: "npm:^4.17.15" - md5-hex: "npm:^3.0.1" - semver: "npm:^7.3.2" - well-known-symbols: "npm:^2.0.0" - checksum: 10c0/59b440f330df3a7c9aa148ba588b3e99aed86acab225b4f01ffcea34ace4cf11f817e31153254e8f38ed48508998dad40b9106951a743c334d751f7ab21afb8a - languageName: node - linkType: hard - -"consola@npm:^3.2.3": - version: 3.4.2 - resolution: "consola@npm:3.4.2" - checksum: 10c0/7cebe57ecf646ba74b300bcce23bff43034ed6fbec9f7e39c27cee1dc00df8a21cd336b466ad32e304ea70fba04ec9e890c200270de9a526ce021ba8a7e4c11a - languageName: node - linkType: hard - -"convert-to-spaces@npm:^2.0.1": - version: 2.0.1 - resolution: "convert-to-spaces@npm:2.0.1" - checksum: 10c0/d90aa0e3b6a27f9d5265a8d32def3c5c855b3e823a9db1f26d772f8146d6b91020a2fdfd905ce8048a73fad3aaf836fef8188c67602c374405e2ae8396c4ac46 - languageName: node - linkType: hard - "cross-spawn@npm:^7.0.6": version: 7.0.6 resolution: "cross-spawn@npm:7.0.6" @@ -1588,24 +1805,6 @@ __metadata: languageName: node linkType: hard -"currently-unhandled@npm:^0.4.1": - version: 0.4.1 - resolution: "currently-unhandled@npm:0.4.1" - dependencies: - array-find-index: "npm:^1.0.1" - checksum: 10c0/32d197689ec32f035910202c1abb0dc6424dce01d7b51779c685119b380d98535c110ffff67a262fc7e367612a7dfd30d3d3055f9a6634b5a9dd1302de7ef11c - languageName: node - linkType: hard - -"date-time@npm:^3.1.0": - version: 3.1.0 - resolution: "date-time@npm:3.1.0" - dependencies: - time-zone: "npm:^1.0.0" - checksum: 10c0/aa3e2e930d74b0b9e90f69de7a16d3376e30f21f1f4ce9a2311d8fec32d760e776efea752dafad0ce188187265235229013036202be053fc2d7979813bfb6ded - languageName: node - linkType: hard - "debug@npm:4, debug@npm:^4.3.4, debug@npm:^4.4.0, debug@npm:^4.4.1": version: 4.4.1 resolution: "debug@npm:4.4.1" @@ -1618,26 +1817,24 @@ __metadata: languageName: node linkType: hard -"decompress-response@npm:^6.0.0": - version: 6.0.0 - resolution: "decompress-response@npm:6.0.0" - dependencies: - mimic-response: "npm:^3.1.0" - checksum: 10c0/bd89d23141b96d80577e70c54fb226b2f40e74a6817652b80a116d7befb8758261ad073a8895648a29cc0a5947021ab66705cb542fa9c143c82022b27c5b175e +"deep-eql@npm:^5.0.1": + version: 5.0.2 + resolution: "deep-eql@npm:5.0.2" + checksum: 10c0/7102cf3b7bb719c6b9c0db2e19bf0aa9318d141581befe8c7ce8ccd39af9eaa4346e5e05adef7f9bd7015da0f13a3a25dcfe306ef79dc8668aedbecb658dd247 languageName: node linkType: hard -"deep-extend@npm:^0.6.0": - version: 0.6.0 - resolution: "deep-extend@npm:0.6.0" - checksum: 10c0/1c6b0abcdb901e13a44c7d699116d3d4279fdb261983122a3783e7273844d5f2537dc2e1c454a23fcf645917f93fbf8d07101c1d03c015a87faa662755212566 +"dequal@npm:^2.0.3": + version: 2.0.3 + resolution: "dequal@npm:2.0.3" + checksum: 10c0/f98860cdf58b64991ae10205137c0e97d384c3a4edc7f807603887b7c4b850af1224a33d88012009f150861cbee4fa2d322c4cc04b9313bee312e47f6ecaa888 languageName: node linkType: hard -"detect-libc@npm:^2.0.0": - version: 2.0.4 - resolution: "detect-libc@npm:2.0.4" - checksum: 10c0/c15541f836eba4b1f521e4eecc28eefefdbc10a94d3b8cb4c507689f332cc111babb95deda66f2de050b22122113189986d5190be97d51b5a2b23b938415e67c +"dom-accessibility-api@npm:^0.5.9": + version: 0.5.16 + resolution: "dom-accessibility-api@npm:0.5.16" + checksum: 10c0/b2c2eda4fae568977cdac27a9f0c001edf4f95a6a6191dfa611e3721db2478d1badc01db5bb4fa8a848aeee13e442a6c2a4386d65ec65a1436f24715a2f8d053 languageName: node linkType: hard @@ -1648,13 +1845,6 @@ __metadata: languageName: node linkType: hard -"emittery@npm:^1.2.0": - version: 1.2.0 - resolution: "emittery@npm:1.2.0" - checksum: 10c0/3b16d67b2cbbc19d44fa124684039956dc94c376cefa8c7b29f4c934d9d370e6819f642cddaa343b83b1fc03fda554a1498e12f5861caf9d6f6394ff4b6e808a - languageName: node - linkType: hard - "emnapi@npm:^1.4.0": version: 1.4.5 resolution: "emnapi@npm:1.4.5" @@ -1667,13 +1857,6 @@ __metadata: languageName: node linkType: hard -"emoji-regex@npm:^10.3.0": - version: 10.4.0 - resolution: "emoji-regex@npm:10.4.0" - checksum: 10c0/a3fcedfc58bfcce21a05a5f36a529d81e88d602100145fcca3dc6f795e3c8acc4fc18fe773fbf9b6d6e9371205edb3afa2668ec3473fa2aa7fd47d2a9d46482d - languageName: node - linkType: hard - "emoji-regex@npm:^8.0.0": version: 8.0.0 resolution: "emoji-regex@npm:8.0.0" @@ -1697,15 +1880,6 @@ __metadata: languageName: node linkType: hard -"end-of-stream@npm:^1.1.0, end-of-stream@npm:^1.4.1": - version: 1.4.5 - resolution: "end-of-stream@npm:1.4.5" - dependencies: - once: "npm:^1.4.0" - checksum: 10c0/b0701c92a10b89afb1cb45bf54a5292c6f008d744eb4382fa559d54775ff31617d1d7bc3ef617575f552e24fad2c7c1a1835948c66b3f3a4be0a6c1f35c883d8 - languageName: node - linkType: hard - "env-paths@npm:^2.2.0": version: 2.2.1 resolution: "env-paths@npm:2.2.1" @@ -1720,55 +1894,127 @@ __metadata: languageName: node linkType: hard -"escalade@npm:^3.1.1": - version: 3.2.0 - resolution: "escalade@npm:3.2.0" - checksum: 10c0/ced4dd3a78e15897ed3be74e635110bbf3b08877b0a41be50dcb325ee0e0b5f65fc2d50e9845194d7c4633f327e2e1c6cce00a71b617c5673df0374201d67f65 +"es-module-lexer@npm:^1.7.0": + version: 1.7.0 + resolution: "es-module-lexer@npm:1.7.0" + checksum: 10c0/4c935affcbfeba7fb4533e1da10fa8568043df1e3574b869385980de9e2d475ddc36769891936dbb07036edb3c3786a8b78ccf44964cd130dedc1f2c984b6c7b languageName: node linkType: hard -"escape-string-regexp@npm:^2.0.0": - version: 2.0.0 - resolution: "escape-string-regexp@npm:2.0.0" - checksum: 10c0/2530479fe8db57eace5e8646c9c2a9c80fa279614986d16dcc6bcaceb63ae77f05a851ba6c43756d816c61d7f4534baf56e3c705e3e0d884818a46808811c507 +"es-toolkit@npm:^1.39.8": + version: 1.39.10 + resolution: "es-toolkit@npm:1.39.10" + dependenciesMeta: + "@trivago/prettier-plugin-sort-imports@4.3.0": + unplugged: true + prettier-plugin-sort-re-exports@0.0.1: + unplugged: true + checksum: 10c0/244dd6be25bc8c7af9f085f5b9aae08169eca760fc7d4735020f8f711b6a572e0bf205400326fa85a7924e20747d315756dba1b3a5f0d2887231374ec3651a98 languageName: node linkType: hard -"escape-string-regexp@npm:^5.0.0": - version: 5.0.0 - resolution: "escape-string-regexp@npm:5.0.0" - checksum: 10c0/6366f474c6f37a802800a435232395e04e9885919873e382b157ab7e8f0feb8fed71497f84a6f6a81a49aab41815522f5839112bd38026d203aea0c91622df95 - languageName: node - linkType: hard - -"esprima@npm:^4.0.0": - version: 4.0.1 - resolution: "esprima@npm:4.0.1" +"esbuild@npm:^0.25.0": + version: 0.25.9 + resolution: "esbuild@npm:0.25.9" + dependencies: + "@esbuild/aix-ppc64": "npm:0.25.9" + "@esbuild/android-arm": "npm:0.25.9" + "@esbuild/android-arm64": "npm:0.25.9" + "@esbuild/android-x64": "npm:0.25.9" + "@esbuild/darwin-arm64": "npm:0.25.9" + "@esbuild/darwin-x64": "npm:0.25.9" + "@esbuild/freebsd-arm64": "npm:0.25.9" + "@esbuild/freebsd-x64": "npm:0.25.9" + "@esbuild/linux-arm": "npm:0.25.9" + "@esbuild/linux-arm64": "npm:0.25.9" + "@esbuild/linux-ia32": "npm:0.25.9" + "@esbuild/linux-loong64": "npm:0.25.9" + "@esbuild/linux-mips64el": "npm:0.25.9" + "@esbuild/linux-ppc64": "npm:0.25.9" + "@esbuild/linux-riscv64": "npm:0.25.9" + "@esbuild/linux-s390x": "npm:0.25.9" + "@esbuild/linux-x64": "npm:0.25.9" + "@esbuild/netbsd-arm64": "npm:0.25.9" + "@esbuild/netbsd-x64": "npm:0.25.9" + "@esbuild/openbsd-arm64": "npm:0.25.9" + "@esbuild/openbsd-x64": "npm:0.25.9" + "@esbuild/openharmony-arm64": "npm:0.25.9" + "@esbuild/sunos-x64": "npm:0.25.9" + "@esbuild/win32-arm64": "npm:0.25.9" + "@esbuild/win32-ia32": "npm:0.25.9" + "@esbuild/win32-x64": "npm:0.25.9" + dependenciesMeta: + "@esbuild/aix-ppc64": + optional: true + "@esbuild/android-arm": + optional: true + "@esbuild/android-arm64": + optional: true + "@esbuild/android-x64": + optional: true + "@esbuild/darwin-arm64": + optional: true + "@esbuild/darwin-x64": + optional: true + "@esbuild/freebsd-arm64": + optional: true + "@esbuild/freebsd-x64": + optional: true + "@esbuild/linux-arm": + optional: true + "@esbuild/linux-arm64": + optional: true + "@esbuild/linux-ia32": + optional: true + "@esbuild/linux-loong64": + optional: true + "@esbuild/linux-mips64el": + optional: true + "@esbuild/linux-ppc64": + optional: true + "@esbuild/linux-riscv64": + optional: true + "@esbuild/linux-s390x": + optional: true + "@esbuild/linux-x64": + optional: true + "@esbuild/netbsd-arm64": + optional: true + "@esbuild/netbsd-x64": + optional: true + "@esbuild/openbsd-arm64": + optional: true + "@esbuild/openbsd-x64": + optional: true + "@esbuild/openharmony-arm64": + optional: true + "@esbuild/sunos-x64": + optional: true + "@esbuild/win32-arm64": + optional: true + "@esbuild/win32-ia32": + optional: true + "@esbuild/win32-x64": + optional: true bin: - esparse: ./bin/esparse.js - esvalidate: ./bin/esvalidate.js - checksum: 10c0/ad4bab9ead0808cf56501750fd9d3fb276f6b105f987707d059005d57e182d18a7c9ec7f3a01794ebddcca676773e42ca48a32d67a250c9d35e009ca613caba3 + esbuild: bin/esbuild + checksum: 10c0/aaa1284c75fcf45c82f9a1a117fe8dc5c45628e3386bda7d64916ae27730910b51c5aec7dd45a6ba19256be30ba2935e64a8f011a3f0539833071e06bf76d5b3 languageName: node linkType: hard -"estree-walker@npm:2.0.2, estree-walker@npm:^2.0.2": - version: 2.0.2 - resolution: "estree-walker@npm:2.0.2" - checksum: 10c0/53a6c54e2019b8c914dc395890153ffdc2322781acf4bd7d1a32d7aedc1710807bdcd866ac133903d5629ec601fbb50abe8c2e5553c7f5a0afdd9b6af6c945af +"estree-walker@npm:^3.0.3": + version: 3.0.3 + resolution: "estree-walker@npm:3.0.3" + dependencies: + "@types/estree": "npm:^1.0.0" + checksum: 10c0/c12e3c2b2642d2bcae7d5aa495c60fa2f299160946535763969a1c83fc74518ffa9c2cd3a8b69ac56aea547df6a8aac25f729a342992ef0bbac5f1c73e78995d languageName: node linkType: hard -"esutils@npm:^2.0.3": - version: 2.0.3 - resolution: "esutils@npm:2.0.3" - checksum: 10c0/9a2fe69a41bfdade834ba7c42de4723c97ec776e40656919c62cbd13607c45e127a003f05f724a1ea55e5029a4cf2de444b13009f2af71271e42d93a637137c7 - languageName: node - linkType: hard - -"expand-template@npm:^2.0.3": - version: 2.0.3 - resolution: "expand-template@npm:2.0.3" - checksum: 10c0/1c9e7afe9acadf9d373301d27f6a47b34e89b3391b1ef38b7471d381812537ef2457e620ae7f819d2642ce9c43b189b3583813ec395e2938319abe356a9b2f51 +"expect-type@npm:^1.2.1": + version: 1.2.2 + resolution: "expect-type@npm:1.2.2" + checksum: 10c0/6019019566063bbc7a690d9281d920b1a91284a4a093c2d55d71ffade5ac890cf37a51e1da4602546c4b56569d2ad2fc175a2ccee77d1ae06cb3af91ef84f44b languageName: node linkType: hard @@ -1797,76 +2043,15 @@ __metadata: languageName: node linkType: hard -"fast-diff@npm:^1.2.0": - version: 1.3.0 - resolution: "fast-diff@npm:1.3.0" - checksum: 10c0/5c19af237edb5d5effda008c891a18a585f74bf12953be57923f17a3a4d0979565fc64dbc73b9e20926b9d895f5b690c618cbb969af0cf022e3222471220ad29 - languageName: node - linkType: hard - -"fast-glob@npm:^3.3.3": - version: 3.3.3 - resolution: "fast-glob@npm:3.3.3" - dependencies: - "@nodelib/fs.stat": "npm:^2.0.2" - "@nodelib/fs.walk": "npm:^1.2.3" - glob-parent: "npm:^5.1.2" - merge2: "npm:^1.3.0" - micromatch: "npm:^4.0.8" - checksum: 10c0/f6aaa141d0d3384cf73cbcdfc52f475ed293f6d5b65bfc5def368b09163a9f7e5ec2b3014d80f733c405f58e470ee0cc451c2937685045cddcdeaa24199c43fe - languageName: node - linkType: hard - -"fastq@npm:^1.6.0": - version: 1.19.1 - resolution: "fastq@npm:1.19.1" - dependencies: - reusify: "npm:^1.0.4" - checksum: 10c0/ebc6e50ac7048daaeb8e64522a1ea7a26e92b3cee5cd1c7f2316cdca81ba543aa40a136b53891446ea5c3a67ec215fbaca87ad405f102dd97012f62916905630 - languageName: node - linkType: hard - -"fdir@npm:^6.4.4": - version: 6.4.6 - resolution: "fdir@npm:6.4.6" +"fdir@npm:^6.5.0": + version: 6.5.0 + resolution: "fdir@npm:6.5.0" peerDependencies: picomatch: ^3 || ^4 peerDependenciesMeta: picomatch: optional: true - checksum: 10c0/45b559cff889934ebb8bc498351e5acba40750ada7e7d6bde197768d2fa67c149be8ae7f8ff34d03f4e1eb20f2764116e56440aaa2f6689e9a4aa7ef06acafe9 - languageName: node - linkType: hard - -"figures@npm:^6.1.0": - version: 6.1.0 - resolution: "figures@npm:6.1.0" - dependencies: - is-unicode-supported: "npm:^2.0.0" - checksum: 10c0/9159df4264d62ef447a3931537de92f5012210cf5135c35c010df50a2169377581378149abfe1eb238bd6acbba1c0d547b1f18e0af6eee49e30363cedaffcfe4 - languageName: node - linkType: hard - -"file-uri-to-path@npm:1.0.0": - version: 1.0.0 - resolution: "file-uri-to-path@npm:1.0.0" - checksum: 10c0/3b545e3a341d322d368e880e1c204ef55f1d45cdea65f7efc6c6ce9e0c4d22d802d5629320eb779d006fe59624ac17b0e848d83cc5af7cd101f206cb704f5519 - languageName: node - linkType: hard - -"fill-range@npm:^7.1.1": - version: 7.1.1 - resolution: "fill-range@npm:7.1.1" - dependencies: - to-regex-range: "npm:^5.0.1" - checksum: 10c0/b75b691bbe065472f38824f694c2f7449d7f5004aa950426a2c28f0306c60db9b880c0b0e4ed819997ffb882d1da02cfcfc819bddc94d71627f5269682edf018 - languageName: node - linkType: hard - -"find-up-simple@npm:^1.0.0": - version: 1.0.1 - resolution: "find-up-simple@npm:1.0.1" - checksum: 10c0/ad34de157b7db925d50ff78302fefb28e309f3bc947c93ffca0f9b0bccf9cf1a2dc57d805d5c94ec9fc60f4838f5dbdfd2a48ecd77c23015fa44c6dd5f60bc40 + checksum: 10c0/e345083c4306b3aed6cb8ec551e26c36bab5c511e99ea4576a16750ddc8d3240e63826cc624f5ae17ad4dc82e68a253213b60d556c11bfad064b7607847ed07f languageName: node linkType: hard @@ -1891,13 +2076,6 @@ __metadata: languageName: node linkType: hard -"fs-constants@npm:^1.0.0": - version: 1.0.0 - resolution: "fs-constants@npm:1.0.0" - checksum: 10c0/a0cde99085f0872f4d244e83e03a46aa387b74f5a5af750896c6b05e9077fac00e9932fdf5aef84f2f16634cd473c63037d7a512576da7d5c2b9163d1909f3a8 - languageName: node - linkType: hard - "fs-minipass@npm:^3.0.0": version: 3.0.3 resolution: "fs-minipass@npm:3.0.3" @@ -1907,37 +2085,45 @@ __metadata: languageName: node linkType: hard -"get-caller-file@npm:^2.0.5": - version: 2.0.5 - resolution: "get-caller-file@npm:2.0.5" - checksum: 10c0/c6c7b60271931fa752aeb92f2b47e355eac1af3a2673f47c9589e8f8a41adc74d45551c1bc57b5e66a80609f10ffb72b6f575e4370d61cc3f7f3aaff01757cde - languageName: node - linkType: hard - -"get-east-asian-width@npm:^1.0.0": - version: 1.3.0 - resolution: "get-east-asian-width@npm:1.3.0" - checksum: 10c0/1a049ba697e0f9a4d5514c4623781c5246982bdb61082da6b5ae6c33d838e52ce6726407df285cdbb27ec1908b333cf2820989bd3e986e37bb20979437fdf34b - languageName: node - linkType: hard - -"github-from-package@npm:0.0.0": - version: 0.0.0 - resolution: "github-from-package@npm:0.0.0" - checksum: 10c0/737ee3f52d0a27e26332cde85b533c21fcdc0b09fb716c3f8e522cfaa9c600d4a631dec9fcde179ec9d47cca89017b7848ed4d6ae6b6b78f936c06825b1fcc12 - languageName: node - linkType: hard - -"glob-parent@npm:^5.1.2": - version: 5.1.2 - resolution: "glob-parent@npm:5.1.2" +"fsevents@npm:2.3.2": + version: 2.3.2 + resolution: "fsevents@npm:2.3.2" dependencies: - is-glob: "npm:^4.0.1" - checksum: 10c0/cab87638e2112bee3f839ef5f6e0765057163d39c66be8ec1602f3823da4692297ad4e972de876ea17c44d652978638d2fd583c6713d0eb6591706825020c9ee + node-gyp: "npm:latest" + checksum: 10c0/be78a3efa3e181cda3cf7a4637cb527bcebb0bd0ea0440105a3bb45b86f9245b307dc10a2507e8f4498a7d4ec349d1910f4d73e4d4495b16103106e07eee735b + conditions: os=darwin languageName: node linkType: hard -"glob@npm:^10.2.2, glob@npm:^10.4.5": +"fsevents@npm:~2.3.2, fsevents@npm:~2.3.3": + version: 2.3.3 + resolution: "fsevents@npm:2.3.3" + dependencies: + node-gyp: "npm:latest" + checksum: 10c0/a1f0c44595123ed717febbc478aa952e47adfc28e2092be66b8ab1635147254ca6cfe1df792a8997f22716d4cbafc73309899ff7bfac2ac3ad8cf2e4ecc3ec60 + conditions: os=darwin + languageName: node + linkType: hard + +"fsevents@patch:fsevents@npm%3A2.3.2#optional!builtin": + version: 2.3.2 + resolution: "fsevents@patch:fsevents@npm%3A2.3.2#optional!builtin::version=2.3.2&hash=df0bf1" + dependencies: + node-gyp: "npm:latest" + conditions: os=darwin + languageName: node + linkType: hard + +"fsevents@patch:fsevents@npm%3A~2.3.2#optional!builtin, fsevents@patch:fsevents@npm%3A~2.3.3#optional!builtin": + version: 2.3.3 + resolution: "fsevents@patch:fsevents@npm%3A2.3.3#optional!builtin::version=2.3.3&hash=df0bf1" + dependencies: + node-gyp: "npm:latest" + conditions: os=darwin + languageName: node + linkType: hard + +"glob@npm:^10.2.2": version: 10.4.5 resolution: "glob@npm:10.4.5" dependencies: @@ -1953,21 +2139,7 @@ __metadata: languageName: node linkType: hard -"globby@npm:^14.1.0": - version: 14.1.0 - resolution: "globby@npm:14.1.0" - dependencies: - "@sindresorhus/merge-streams": "npm:^2.1.0" - fast-glob: "npm:^3.3.3" - ignore: "npm:^7.0.3" - path-type: "npm:^6.0.0" - slash: "npm:^5.1.0" - unicorn-magic: "npm:^0.3.0" - checksum: 10c0/527a1063c5958255969620c6fa4444a2b2e9278caddd571d46dfbfa307cb15977afb746e84d682ba5b6c94fc081e8997f80ff05dd235441ba1cb16f86153e58e - languageName: node - linkType: hard - -"graceful-fs@npm:^4.2.6, graceful-fs@npm:^4.2.9": +"graceful-fs@npm:^4.2.6": version: 4.2.11 resolution: "graceful-fs@npm:4.2.11" checksum: 10c0/386d011a553e02bc594ac2ca0bd6d9e4c22d7fa8cfbfc448a6d148c59ea881b092db9dbe3547ae4b88e55f1b01f7c4a2ecc53b310c042793e63aa44cf6c257f2 @@ -1991,7 +2163,7 @@ __metadata: languageName: node linkType: hard -"https-proxy-agent@npm:^7.0.1, https-proxy-agent@npm:^7.0.5": +"https-proxy-agent@npm:^7.0.1": version: 7.0.6 resolution: "https-proxy-agent@npm:7.0.6" dependencies: @@ -2019,27 +2191,6 @@ __metadata: languageName: node linkType: hard -"ieee754@npm:^1.1.13": - version: 1.2.1 - resolution: "ieee754@npm:1.2.1" - checksum: 10c0/b0782ef5e0935b9f12883a2e2aa37baa75da6e66ce6515c168697b42160807d9330de9a32ec1ed73149aea02e0d822e572bca6f1e22bdcbd2149e13b050b17bb - languageName: node - linkType: hard - -"ignore-by-default@npm:^2.1.0": - version: 2.1.0 - resolution: "ignore-by-default@npm:2.1.0" - checksum: 10c0/3a6040dac25ed9da39dee73bf1634fdd1e15b0eb7cf52a6bdec81c310565782d8811c104ce40acb3d690d61c5fc38a91c78e6baee830a8a2232424dbc6b66981 - languageName: node - linkType: hard - -"ignore@npm:^7.0.3": - version: 7.0.5 - resolution: "ignore@npm:7.0.5" - checksum: 10c0/ae00db89fe873064a093b8999fe4cc284b13ef2a178636211842cceb650b9c3e390d3339191acb145d81ed5379d2074840cf0c33a20bdbd6f32821f79eb4ad5d - languageName: node - linkType: hard - "imurmurhash@npm:^0.1.4": version: 0.1.4 resolution: "imurmurhash@npm:0.1.4" @@ -2047,48 +2198,10 @@ __metadata: languageName: node linkType: hard -"indent-string@npm:^5.0.0": - version: 5.0.0 - resolution: "indent-string@npm:5.0.0" - checksum: 10c0/8ee77b57d92e71745e133f6f444d6fa3ed503ad0e1bcd7e80c8da08b42375c07117128d670589725ed07b1978065803fa86318c309ba45415b7fe13e7f170220 - languageName: node - linkType: hard - -"inherits@npm:^2.0.3, inherits@npm:^2.0.4": - version: 2.0.4 - resolution: "inherits@npm:2.0.4" - checksum: 10c0/4e531f648b29039fb7426fb94075e6545faa1eb9fe83c29f0b6d9e7263aceb4289d2d4557db0d428188eeb449cc7c5e77b0a0b2c4e248ff2a65933a0dee49ef2 - languageName: node - linkType: hard - -"ini@npm:~1.3.0": - version: 1.3.8 - resolution: "ini@npm:1.3.8" - checksum: 10c0/ec93838d2328b619532e4f1ff05df7909760b6f66d9c9e2ded11e5c1897d6f2f9980c54dd638f88654b00919ce31e827040631eab0a3969e4d1abefa0719516a - languageName: node - linkType: hard - -"ip-address@npm:^9.0.5": - version: 9.0.5 - resolution: "ip-address@npm:9.0.5" - dependencies: - jsbn: "npm:1.1.0" - sprintf-js: "npm:^1.1.3" - checksum: 10c0/331cd07fafcb3b24100613e4b53e1a2b4feab11e671e655d46dc09ee233da5011284d09ca40c4ecbdfe1d0004f462958675c224a804259f2f78d2465a87824bc - languageName: node - linkType: hard - -"irregular-plurals@npm:^3.3.0": - version: 3.5.0 - resolution: "irregular-plurals@npm:3.5.0" - checksum: 10c0/7c033bbe7325e5a6e0a26949cc6863b6ce273403d4cd5b93bd99b33fecb6605b0884097c4259c23ed0c52c2133bf7d1cdcdd7a0630e8c325161fe269b3447918 - languageName: node - linkType: hard - -"is-extglob@npm:^2.1.1": - version: 2.1.1 - resolution: "is-extglob@npm:2.1.1" - checksum: 10c0/5487da35691fbc339700bbb2730430b07777a3c21b9ebaecb3072512dfd7b4ba78ac2381a87e8d78d20ea08affb3f1971b4af629173a6bf435ff8a4c47747912 +"ip-address@npm:^10.0.1": + version: 10.0.1 + resolution: "ip-address@npm:10.0.1" + checksum: 10c0/1634d79dae18394004775cb6d699dc46b7c23df6d2083164025a2b15240c1164fccde53d0e08bd5ee4fc53913d033ab6b5e395a809ad4b956a940c446e948843 languageName: node linkType: hard @@ -2099,50 +2212,6 @@ __metadata: languageName: node linkType: hard -"is-fullwidth-code-point@npm:^4.0.0": - version: 4.0.0 - resolution: "is-fullwidth-code-point@npm:4.0.0" - checksum: 10c0/df2a717e813567db0f659c306d61f2f804d480752526886954a2a3e2246c7745fd07a52b5fecf2b68caf0a6c79dcdace6166fdf29cc76ed9975cc334f0a018b8 - languageName: node - linkType: hard - -"is-glob@npm:^4.0.1": - version: 4.0.3 - resolution: "is-glob@npm:4.0.3" - dependencies: - is-extglob: "npm:^2.1.1" - checksum: 10c0/17fb4014e22be3bbecea9b2e3a76e9e34ff645466be702f1693e8f1ee1adac84710d0be0bd9f967d6354036fd51ab7c2741d954d6e91dae6bb69714de92c197a - languageName: node - linkType: hard - -"is-number@npm:^7.0.0": - version: 7.0.0 - resolution: "is-number@npm:7.0.0" - checksum: 10c0/b4686d0d3053146095ccd45346461bc8e53b80aeb7671cc52a4de02dbbf7dc0d1d2a986e2fe4ae206984b4d34ef37e8b795ebc4f4295c978373e6575e295d811 - languageName: node - linkType: hard - -"is-plain-object@npm:^5.0.0": - version: 5.0.0 - resolution: "is-plain-object@npm:5.0.0" - checksum: 10c0/893e42bad832aae3511c71fd61c0bf61aa3a6d853061c62a307261842727d0d25f761ce9379f7ba7226d6179db2a3157efa918e7fe26360f3bf0842d9f28942c - languageName: node - linkType: hard - -"is-promise@npm:^4.0.0": - version: 4.0.0 - resolution: "is-promise@npm:4.0.0" - checksum: 10c0/ebd5c672d73db781ab33ccb155fb9969d6028e37414d609b115cc534654c91ccd061821d5b987eefaa97cf4c62f0b909bb2f04db88306de26e91bfe8ddc01503 - languageName: node - linkType: hard - -"is-unicode-supported@npm:^2.0.0": - version: 2.1.0 - resolution: "is-unicode-supported@npm:2.1.0" - checksum: 10c0/a0f53e9a7c1fdbcf2d2ef6e40d4736fdffff1c9f8944c75e15425118ff3610172c87bf7bc6c34d3903b04be59790bb2212ddbe21ee65b5a97030fc50370545a5 - languageName: node - linkType: hard - "isexe@npm:^2.0.0": version: 2.0.0 resolution: "isexe@npm:2.0.0" @@ -2170,22 +2239,17 @@ __metadata: languageName: node linkType: hard -"js-string-escape@npm:^1.0.1": - version: 1.0.1 - resolution: "js-string-escape@npm:1.0.1" - checksum: 10c0/2c33b9ff1ba6b84681c51ca0997e7d5a1639813c95d5b61cb7ad47e55cc28fa4a0b1935c3d218710d8e6bcee5d0cd8c44755231e3a4e45fc604534d9595a3628 +"js-tokens@npm:^4.0.0": + version: 4.0.0 + resolution: "js-tokens@npm:4.0.0" + checksum: 10c0/e248708d377aa058eacf2037b07ded847790e6de892bbad3dac0abba2e759cb9f121b00099a65195616badcb6eca8d14d975cb3e89eb1cfda644756402c8aeed languageName: node linkType: hard -"js-yaml@npm:^3.14.1": - version: 3.14.1 - resolution: "js-yaml@npm:3.14.1" - dependencies: - argparse: "npm:^1.0.7" - esprima: "npm:^4.0.0" - bin: - js-yaml: bin/js-yaml.js - checksum: 10c0/6746baaaeac312c4db8e75fa22331d9a04cccb7792d126ed8ce6a0bbcfef0cedaddd0c5098fade53db067c09fe00aa1c957674b4765610a8b06a5a189e46433b +"js-tokens@npm:^9.0.1": + version: 9.0.1 + resolution: "js-tokens@npm:9.0.1" + checksum: 10c0/68dcab8f233dde211a6b5fd98079783cbcd04b53617c1250e3553ee16ab3e6134f5e65478e41d82f6d351a052a63d71024553933808570f04dbf828d7921e80e languageName: node linkType: hard @@ -2200,20 +2264,6 @@ __metadata: languageName: node linkType: hard -"jsbn@npm:1.1.0": - version: 1.1.0 - resolution: "jsbn@npm:1.1.0" - checksum: 10c0/4f907fb78d7b712e11dea8c165fe0921f81a657d3443dde75359ed52eb2b5d33ce6773d97985a089f09a65edd80b11cb75c767b57ba47391fee4c969f7215c96 - languageName: node - linkType: hard - -"load-json-file@npm:^7.0.1": - version: 7.0.1 - resolution: "load-json-file@npm:7.0.1" - checksum: 10c0/7117459608a0b6329c7f78e6e1f541b3162dd901c29dd5af721fec8b270177d2e3d7999c971f344fff04daac368d052732e2c7146014bc84d15e0b636975e19a - languageName: node - linkType: hard - "locate-path@npm:^7.2.0": version: 7.2.0 resolution: "locate-path@npm:7.2.0" @@ -2223,17 +2273,10 @@ __metadata: languageName: node linkType: hard -"lodash-es@npm:^4.17.21": - version: 4.17.21 - resolution: "lodash-es@npm:4.17.21" - checksum: 10c0/fb407355f7e6cd523a9383e76e6b455321f0f153a6c9625e21a8827d10c54c2a2341bd2ae8d034358b60e07325e1330c14c224ff582d04612a46a4f0479ff2f2 - languageName: node - linkType: hard - -"lodash@npm:^4.17.15": - version: 4.17.21 - resolution: "lodash@npm:4.17.21" - checksum: 10c0/d8cbea072bb08655bb4c989da418994b073a608dffa608b09ac04b43a791b12aeae7cd7ad919aa4c925f33b48490b5cfe6c1f71d827956071dae2e7bb3a6b74c +"loupe@npm:^3.1.0, loupe@npm:^3.1.4": + version: 3.2.1 + resolution: "loupe@npm:3.2.1" + checksum: 10c0/910c872cba291309664c2d094368d31a68907b6f5913e989d301b5c25f30e97d76d77f23ab3bf3b46d0f601ff0b6af8810c10c31b91d2c6b2f132809ca2cc705 languageName: node linkType: hard @@ -2244,6 +2287,24 @@ __metadata: languageName: node linkType: hard +"lz-string@npm:^1.5.0": + version: 1.5.0 + resolution: "lz-string@npm:1.5.0" + bin: + lz-string: bin/bin.js + checksum: 10c0/36128e4de34791838abe979b19927c26e67201ca5acf00880377af7d765b38d1c60847e01c5ec61b1a260c48029084ab3893a3925fd6e48a04011364b089991b + languageName: node + linkType: hard + +"magic-string@npm:^0.30.17": + version: 0.30.18 + resolution: "magic-string@npm:0.30.18" + dependencies: + "@jridgewell/sourcemap-codec": "npm:^1.5.5" + checksum: 10c0/80fba01e13ce1f5c474a0498a5aa462fa158eb56567310747089a0033e432d83a2021ee2c109ac116010cd9dcf90a5231d89fbe3858165f73c00a50a74dbefcd + languageName: node + linkType: hard + "make-fetch-happen@npm:^14.0.3": version: 14.0.3 resolution: "make-fetch-happen@npm:14.0.3" @@ -2263,64 +2324,6 @@ __metadata: languageName: node linkType: hard -"matcher@npm:^5.0.0": - version: 5.0.0 - resolution: "matcher@npm:5.0.0" - dependencies: - escape-string-regexp: "npm:^5.0.0" - checksum: 10c0/eda5471fc9d5b7264d63c81727824adc3585ddb5cfdc5fce5a9b7c86f946ff181610735d330b1c37a84811df872d1290bf4e9401d2be2a414204343701144b18 - languageName: node - linkType: hard - -"md5-hex@npm:^3.0.1": - version: 3.0.1 - resolution: "md5-hex@npm:3.0.1" - dependencies: - blueimp-md5: "npm:^2.10.0" - checksum: 10c0/ee2b4d8da16b527b3a3fe4d7a96720f43afd07b46a82d49421208b5a126235fb75cfb30b80d4029514772c8844273f940bddfbf4155c787f968f3be4060d01e4 - languageName: node - linkType: hard - -"memoize@npm:^10.1.0": - version: 10.1.0 - resolution: "memoize@npm:10.1.0" - dependencies: - mimic-function: "npm:^5.0.1" - checksum: 10c0/6cf71f673b89778b05cd1131f573ba858627daa8fec60f2197328386acf7ab184a89e52527abbd5a605b5ccf5ee12dc0cb96efb651d9a30dcfcc89e9baacc84d - languageName: node - linkType: hard - -"merge2@npm:^1.3.0": - version: 1.4.1 - resolution: "merge2@npm:1.4.1" - checksum: 10c0/254a8a4605b58f450308fc474c82ac9a094848081bf4c06778200207820e5193726dc563a0d2c16468810516a5c97d9d3ea0ca6585d23c58ccfff2403e8dbbeb - languageName: node - linkType: hard - -"micromatch@npm:^4.0.8": - version: 4.0.8 - resolution: "micromatch@npm:4.0.8" - dependencies: - braces: "npm:^3.0.3" - picomatch: "npm:^2.3.1" - checksum: 10c0/166fa6eb926b9553f32ef81f5f531d27b4ce7da60e5baf8c021d043b27a388fb95e46a8038d5045877881e673f8134122b59624d5cecbd16eb50a42e7a6b5ca8 - languageName: node - linkType: hard - -"mimic-function@npm:^5.0.1": - version: 5.0.1 - resolution: "mimic-function@npm:5.0.1" - checksum: 10c0/f3d9464dd1816ecf6bdf2aec6ba32c0728022039d992f178237d8e289b48764fee4131319e72eedd4f7f094e22ded0af836c3187a7edc4595d28dd74368fd81d - languageName: node - linkType: hard - -"mimic-response@npm:^3.1.0": - version: 3.1.0 - resolution: "mimic-response@npm:3.1.0" - checksum: 10c0/0d6f07ce6e03e9e4445bee655202153bdb8a98d67ee8dc965ac140900d7a2688343e6b4c9a72cfc9ef2f7944dfd76eef4ab2482eb7b293a68b84916bac735362 - languageName: node - linkType: hard - "minimatch@npm:^9.0.4": version: 9.0.5 resolution: "minimatch@npm:9.0.5" @@ -2330,13 +2333,6 @@ __metadata: languageName: node linkType: hard -"minimist@npm:^1.2.0, minimist@npm:^1.2.3": - version: 1.2.8 - resolution: "minimist@npm:1.2.8" - checksum: 10c0/19d3fcdca050087b84c2029841a093691a91259a47def2f18222f41e7645a0b7c44ef4b40e88a1e58a40c84d2ef0ee6047c55594d298146d0eb3f6b737c20ce6 - languageName: node - linkType: hard - "minipass-collect@npm:^2.0.1": version: 2.0.1 resolution: "minipass-collect@npm:2.0.1" @@ -2413,13 +2409,6 @@ __metadata: languageName: node linkType: hard -"mkdirp-classic@npm:^0.5.2, mkdirp-classic@npm:^0.5.3": - version: 0.5.3 - resolution: "mkdirp-classic@npm:0.5.3" - checksum: 10c0/95371d831d196960ddc3833cc6907e6b8f67ac5501a6582f47dfae5eb0f092e9f8ce88e0d83afcae95d6e2b61a01741ba03714eeafb6f7a6e9dcc158ac85b168 - languageName: node - linkType: hard - "mkdirp@npm:^3.0.1": version: 3.0.1 resolution: "mkdirp@npm:3.0.1" @@ -2429,6 +2418,13 @@ __metadata: languageName: node linkType: hard +"mrmime@npm:^2.0.0": + version: 2.0.1 + resolution: "mrmime@npm:2.0.1" + checksum: 10c0/af05afd95af202fdd620422f976ad67dc18e6ee29beb03dd1ce950ea6ef664de378e44197246df4c7cdd73d47f2e7143a6e26e473084b9e4aa2095c0ad1e1761 + languageName: node + linkType: hard + "ms@npm:^2.1.3": version: 2.1.3 resolution: "ms@npm:2.1.3" @@ -2443,10 +2439,12 @@ __metadata: languageName: node linkType: hard -"napi-build-utils@npm:^2.0.0": - version: 2.0.0 - resolution: "napi-build-utils@npm:2.0.0" - checksum: 10c0/5833aaeb5cc5c173da47a102efa4680a95842c13e0d9cc70428bd3ee8d96bb2172f8860d2811799b5daa5cbeda779933601492a2028a6a5351c6d0fcf6de83db +"nanoid@npm:^3.3.11": + version: 3.3.11 + resolution: "nanoid@npm:3.3.11" + bin: + nanoid: bin/nanoid.cjs + checksum: 10c0/40e7f70b3d15f725ca072dfc4f74e81fcf1fbb02e491cf58ac0c79093adc9b0a73b152bcde57df4b79cd097e13023d7504acb38404a4da7bc1cd8e887b82fe0b languageName: node linkType: hard @@ -2457,43 +2455,9 @@ __metadata: languageName: node linkType: hard -"node-abi@npm:^3.3.0": - version: 3.75.0 - resolution: "node-abi@npm:3.75.0" - dependencies: - semver: "npm:^7.3.5" - checksum: 10c0/c43a2409407df3737848fd96202b0a49e15039994aecce963969e9ef7342a8fc544aba94e0bfd8155fb9de5f5fe9a4b6ccad8bf509e7c46caf096fc4491d63f2 - languageName: node - linkType: hard - -"node-fetch@npm:^2.6.7": - version: 2.7.0 - resolution: "node-fetch@npm:2.7.0" - dependencies: - whatwg-url: "npm:^5.0.0" - peerDependencies: - encoding: ^0.1.0 - peerDependenciesMeta: - encoding: - optional: true - checksum: 10c0/b55786b6028208e6fbe594ccccc213cab67a72899c9234eb59dba51062a299ea853210fcf526998eaa2867b0963ad72338824450905679ff0fa304b8c5093ae8 - languageName: node - linkType: hard - -"node-gyp-build@npm:^4.2.2": - version: 4.8.4 - resolution: "node-gyp-build@npm:4.8.4" - bin: - node-gyp-build: bin.js - node-gyp-build-optional: optional.js - node-gyp-build-test: build-test.js - checksum: 10c0/444e189907ece2081fe60e75368784f7782cfddb554b60123743dfb89509df89f1f29c03bbfa16b3a3e0be3f48799a4783f487da6203245fa5bed239ba7407e1 - languageName: node - linkType: hard - "node-gyp@npm:latest": - version: 11.3.0 - resolution: "node-gyp@npm:11.3.0" + version: 11.4.2 + resolution: "node-gyp@npm:11.4.2" dependencies: env-paths: "npm:^2.2.0" exponential-backoff: "npm:^3.1.1" @@ -2507,14 +2471,7 @@ __metadata: which: "npm:^5.0.0" bin: node-gyp: bin/node-gyp.js - checksum: 10c0/5f4ad5a729386f7b50096efd4934b06c071dbfbc7d7d541a66d6959a7dccd62f53ff3dc95fffb60bf99d8da1270e23769f82246fcaa6c5645a70c967ae9a3398 - languageName: node - linkType: hard - -"nofilter@npm:^3.0.2": - version: 3.1.0 - resolution: "nofilter@npm:3.1.0" - checksum: 10c0/92459f3864a067b347032263f0b536223cbfc98153913b5dce350cb39c8470bc1813366e41993f22c33cc6400c0f392aa324a4b51e24c22040635c1cdb046499 + checksum: 10c0/0bfd3e96770ed70f07798d881dd37b4267708966d868a0e585986baac487d9cf5831285579fd629a83dc4e434f53e6416ce301097f2ee464cb74d377e4d8bdbe languageName: node linkType: hard @@ -2529,15 +2486,6 @@ __metadata: languageName: node linkType: hard -"once@npm:^1.3.1, once@npm:^1.4.0": - version: 1.4.0 - resolution: "once@npm:1.4.0" - dependencies: - wrappy: "npm:1" - checksum: 10c0/5d48aca287dfefabd756621c5dfce5c91a549a93e9fdb7b8246bc4c4790aa2ec17b34a260530474635147aeb631a2dcc8b32c613df0675f96041cbb8244517d0 - languageName: node - linkType: hard - "os-tmpdir@npm:~1.0.2": version: 1.0.2 resolution: "os-tmpdir@npm:1.0.2" @@ -2563,23 +2511,13 @@ __metadata: languageName: node linkType: hard -"p-map@npm:^7.0.2, p-map@npm:^7.0.3": +"p-map@npm:^7.0.2": version: 7.0.3 resolution: "p-map@npm:7.0.3" checksum: 10c0/46091610da2b38ce47bcd1d8b4835a6fa4e832848a6682cf1652bc93915770f4617afc844c10a77d1b3e56d2472bb2d5622353fa3ead01a7f42b04fc8e744a5c languageName: node linkType: hard -"package-config@npm:^5.0.0": - version: 5.0.0 - resolution: "package-config@npm:5.0.0" - dependencies: - find-up-simple: "npm:^1.0.0" - load-json-file: "npm:^7.0.1" - checksum: 10c0/f6c48930700b73a41d839bf2898b628d23665827488a4f34aed2d05e4a99d7a70a70ada115c3546765947fbc8accff94c0779da21ea084b25df47cb774531eeb - languageName: node - linkType: hard - "package-json-from-dist@npm:^1.0.0": version: 1.0.1 resolution: "package-json-from-dist@npm:1.0.1" @@ -2587,13 +2525,6 @@ __metadata: languageName: node linkType: hard -"parse-ms@npm:^4.0.0": - version: 4.0.0 - resolution: "parse-ms@npm:4.0.0" - checksum: 10c0/a7900f4f1ebac24cbf5e9708c16fb2fd482517fad353aecd7aefb8c2ba2f85ce017913ccb8925d231770404780df46244ea6fec598b3bde6490882358b4d2d16 - languageName: node - linkType: hard - "path-exists@npm:^5.0.0": version: 5.0.0 resolution: "path-exists@npm:5.0.0" @@ -2618,64 +2549,77 @@ __metadata: languageName: node linkType: hard -"path-type@npm:^6.0.0": - version: 6.0.0 - resolution: "path-type@npm:6.0.0" - checksum: 10c0/55baa8b1187d6dc683d5a9cfcc866168d6adff58e5db91126795376d818eee46391e00b2a4d53e44d844c7524a7d96aa68cc68f4f3e500d3d069a39e6535481c +"pathe@npm:^2.0.3": + version: 2.0.3 + resolution: "pathe@npm:2.0.3" + checksum: 10c0/c118dc5a8b5c4166011b2b70608762e260085180bb9e33e80a50dcdb1e78c010b1624f4280c492c92b05fc276715a4c357d1f9edc570f8f1b3d90b6839ebaca1 languageName: node linkType: hard -"picomatch@npm:^2.3.1": - version: 2.3.1 - resolution: "picomatch@npm:2.3.1" - checksum: 10c0/26c02b8d06f03206fc2ab8d16f19960f2ff9e81a658f831ecb656d8f17d9edc799e8364b1f4a7873e89d9702dff96204be0fa26fe4181f6843f040f819dac4be +"pathval@npm:^2.0.0": + version: 2.0.1 + resolution: "pathval@npm:2.0.1" + checksum: 10c0/460f4709479fbf2c45903a65655fc8f0a5f6d808f989173aeef5fdea4ff4f303dc13f7870303999add60ec49d4c14733895c0a869392e9866f1091fa64fd7581 languageName: node linkType: hard -"picomatch@npm:^4.0.2": +"picocolors@npm:1.1.1, picocolors@npm:^1.1.1": + version: 1.1.1 + resolution: "picocolors@npm:1.1.1" + checksum: 10c0/e2e3e8170ab9d7c7421969adaa7e1b31434f789afb9b3f115f6b96d91945041ac3ceb02e9ec6fe6510ff036bcc0bf91e69a1772edc0b707e12b19c0f2d6bcf58 + languageName: node + linkType: hard + +"picomatch@npm:^4.0.2, picomatch@npm:^4.0.3": version: 4.0.3 resolution: "picomatch@npm:4.0.3" checksum: 10c0/9582c951e95eebee5434f59e426cddd228a7b97a0161a375aed4be244bd3fe8e3a31b846808ea14ef2c8a2527a6eeab7b3946a67d5979e81694654f939473ae2 languageName: node linkType: hard -"plur@npm:^5.1.0": - version: 5.1.0 - resolution: "plur@npm:5.1.0" - dependencies: - irregular-plurals: "npm:^3.3.0" - checksum: 10c0/26bb622b8545fcfd47bbf56fbcca66c08693708a232e403fa3589e00003c56c14231ac57c7588ca5db83ef4be1f61383402c4ea954000768f779f8aef6eb6da8 - languageName: node - linkType: hard - -"prebuild-install@npm:^7.1.1": - version: 7.1.3 - resolution: "prebuild-install@npm:7.1.3" - dependencies: - detect-libc: "npm:^2.0.0" - expand-template: "npm:^2.0.3" - github-from-package: "npm:0.0.0" - minimist: "npm:^1.2.3" - mkdirp-classic: "npm:^0.5.3" - napi-build-utils: "npm:^2.0.0" - node-abi: "npm:^3.3.0" - pump: "npm:^3.0.0" - rc: "npm:^1.2.7" - simple-get: "npm:^4.0.0" - tar-fs: "npm:^2.0.0" - tunnel-agent: "npm:^0.6.0" +"playwright-core@npm:1.55.0": + version: 1.55.0 + resolution: "playwright-core@npm:1.55.0" bin: - prebuild-install: bin.js - checksum: 10c0/25919a42b52734606a4036ab492d37cfe8b601273d8dfb1fa3c84e141a0a475e7bad3ab848c741d2f810cef892fcf6059b8c7fe5b29f98d30e0c29ad009bedff + playwright-core: cli.js + checksum: 10c0/c39d6aa30e7a4e73965942ca5e13405ae05c9cb49f755a35f04248c864c0b24cf662d9767f1797b3ec48d1cf4e54774dce4a19c16534bd5cfd2aa3da81c9dc3a languageName: node linkType: hard -"pretty-ms@npm:^9.2.0": - version: 9.2.0 - resolution: "pretty-ms@npm:9.2.0" +"playwright@npm:^1.55.0": + version: 1.55.0 + resolution: "playwright@npm:1.55.0" dependencies: - parse-ms: "npm:^4.0.0" - checksum: 10c0/ab6d066f90e9f77020426986e1b018369f41575674544c539aabec2e63a20fec01166d8cf6571d0e165ad11cfe5a8134a2a48a36d42ab291c59c6deca5264cbb + fsevents: "npm:2.3.2" + playwright-core: "npm:1.55.0" + dependenciesMeta: + fsevents: + optional: true + bin: + playwright: cli.js + checksum: 10c0/51605b7e57a5650e57972c5fdfc09d7a9934cca1cbee5beacca716fa801e25cb5bb7c1663de90c22b300fde884e5545a2b13a0505a93270b660687791c478304 + languageName: node + linkType: hard + +"postcss@npm:^8.5.6": + version: 8.5.6 + resolution: "postcss@npm:8.5.6" + dependencies: + nanoid: "npm:^3.3.11" + picocolors: "npm:^1.1.1" + source-map-js: "npm:^1.2.1" + checksum: 10c0/5127cc7c91ed7a133a1b7318012d8bfa112da9ef092dddf369ae699a1f10ebbd89b1b9f25f3228795b84585c72aabd5ced5fc11f2ba467eedf7b081a66fad024 + languageName: node + linkType: hard + +"pretty-format@npm:^27.0.2": + version: 27.5.1 + resolution: "pretty-format@npm:27.5.1" + dependencies: + ansi-regex: "npm:^5.0.1" + ansi-styles: "npm:^5.0.0" + react-is: "npm:^17.0.1" + checksum: 10c0/0cbda1031aa30c659e10921fa94e0dd3f903ecbbbe7184a729ad66f2b6e7f17891e8c7d7654c458fa4ccb1a411ffb695b4f17bbcd3fe075fabe181027c4040ed languageName: node linkType: hard @@ -2696,68 +2640,10 @@ __metadata: languageName: node linkType: hard -"pump@npm:^3.0.0": - version: 3.0.3 - resolution: "pump@npm:3.0.3" - dependencies: - end-of-stream: "npm:^1.1.0" - once: "npm:^1.3.1" - checksum: 10c0/ada5cdf1d813065bbc99aa2c393b8f6beee73b5de2890a8754c9f488d7323ffd2ca5f5a0943b48934e3fcbd97637d0337369c3c631aeb9614915db629f1c75c9 - languageName: node - linkType: hard - -"queue-microtask@npm:^1.2.2": - version: 1.2.3 - resolution: "queue-microtask@npm:1.2.3" - checksum: 10c0/900a93d3cdae3acd7d16f642c29a642aea32c2026446151f0778c62ac089d4b8e6c986811076e1ae180a694cedf077d453a11b58ff0a865629a4f82ab558e102 - languageName: node - linkType: hard - -"rc@npm:^1.2.7": - version: 1.2.8 - resolution: "rc@npm:1.2.8" - dependencies: - deep-extend: "npm:^0.6.0" - ini: "npm:~1.3.0" - minimist: "npm:^1.2.0" - strip-json-comments: "npm:~2.0.1" - bin: - rc: ./cli.js - checksum: 10c0/24a07653150f0d9ac7168e52943cc3cb4b7a22c0e43c7dff3219977c2fdca5a2760a304a029c20811a0e79d351f57d46c9bde216193a0f73978496afc2b85b15 - languageName: node - linkType: hard - -"readable-stream@npm:^3.1.1, readable-stream@npm:^3.4.0": - version: 3.6.2 - resolution: "readable-stream@npm:3.6.2" - dependencies: - inherits: "npm:^2.0.3" - string_decoder: "npm:^1.1.1" - util-deprecate: "npm:^1.0.1" - checksum: 10c0/e37be5c79c376fdd088a45fa31ea2e423e5d48854be7a22a58869b4e84d25047b193f6acb54f1012331e1bcd667ffb569c01b99d36b0bd59658fb33f513511b7 - languageName: node - linkType: hard - -"require-directory@npm:^2.1.1": - version: 2.1.1 - resolution: "require-directory@npm:2.1.1" - checksum: 10c0/83aa76a7bc1531f68d92c75a2ca2f54f1b01463cb566cf3fbc787d0de8be30c9dbc211d1d46be3497dac5785fe296f2dd11d531945ac29730643357978966e99 - languageName: node - linkType: hard - -"resolve-cwd@npm:^3.0.0": - version: 3.0.0 - resolution: "resolve-cwd@npm:3.0.0" - dependencies: - resolve-from: "npm:^5.0.0" - checksum: 10c0/e608a3ebd15356264653c32d7ecbc8fd702f94c6703ea4ac2fb81d9c359180cba0ae2e6b71faa446631ed6145454d5a56b227efc33a2d40638ac13f8beb20ee4 - languageName: node - linkType: hard - -"resolve-from@npm:^5.0.0": - version: 5.0.0 - resolution: "resolve-from@npm:5.0.0" - checksum: 10c0/b21cb7f1fb746de8107b9febab60095187781137fd803e6a59a76d421444b1531b641bba5857f5dc011974d8a5c635d61cec49e6bd3b7fc20e01f0fafc4efbf2 +"react-is@npm:^17.0.1": + version: 17.0.2 + resolution: "react-is@npm:17.0.2" + checksum: 10c0/2bdb6b93fbb1820b024b496042cce405c57e2f85e777c9aabd55f9b26d145408f9f74f5934676ffdc46f3dcff656d78413a6e43968e7b3f92eea35b3052e9053 languageName: node linkType: hard @@ -2768,28 +2654,89 @@ __metadata: languageName: node linkType: hard -"reusify@npm:^1.0.4": - version: 1.1.0 - resolution: "reusify@npm:1.1.0" - checksum: 10c0/4eff0d4a5f9383566c7d7ec437b671cc51b25963bd61bf127c3f3d3f68e44a026d99b8d2f1ad344afff8d278a8fe70a8ea092650a716d22287e8bef7126bb2fa - languageName: node - linkType: hard - -"run-parallel@npm:^1.1.9": - version: 1.2.0 - resolution: "run-parallel@npm:1.2.0" +"rollup@npm:^4.43.0": + version: 4.50.1 + resolution: "rollup@npm:4.50.1" dependencies: - queue-microtask: "npm:^1.2.2" - checksum: 10c0/200b5ab25b5b8b7113f9901bfe3afc347e19bb7475b267d55ad0eb86a62a46d77510cb0f232507c9e5d497ebda569a08a9867d0d14f57a82ad5564d991588b39 + "@rollup/rollup-android-arm-eabi": "npm:4.50.1" + "@rollup/rollup-android-arm64": "npm:4.50.1" + "@rollup/rollup-darwin-arm64": "npm:4.50.1" + "@rollup/rollup-darwin-x64": "npm:4.50.1" + "@rollup/rollup-freebsd-arm64": "npm:4.50.1" + "@rollup/rollup-freebsd-x64": "npm:4.50.1" + "@rollup/rollup-linux-arm-gnueabihf": "npm:4.50.1" + "@rollup/rollup-linux-arm-musleabihf": "npm:4.50.1" + "@rollup/rollup-linux-arm64-gnu": "npm:4.50.1" + "@rollup/rollup-linux-arm64-musl": "npm:4.50.1" + "@rollup/rollup-linux-loongarch64-gnu": "npm:4.50.1" + "@rollup/rollup-linux-ppc64-gnu": "npm:4.50.1" + "@rollup/rollup-linux-riscv64-gnu": "npm:4.50.1" + "@rollup/rollup-linux-riscv64-musl": "npm:4.50.1" + "@rollup/rollup-linux-s390x-gnu": "npm:4.50.1" + "@rollup/rollup-linux-x64-gnu": "npm:4.50.1" + "@rollup/rollup-linux-x64-musl": "npm:4.50.1" + "@rollup/rollup-openharmony-arm64": "npm:4.50.1" + "@rollup/rollup-win32-arm64-msvc": "npm:4.50.1" + "@rollup/rollup-win32-ia32-msvc": "npm:4.50.1" + "@rollup/rollup-win32-x64-msvc": "npm:4.50.1" + "@types/estree": "npm:1.0.8" + fsevents: "npm:~2.3.2" + dependenciesMeta: + "@rollup/rollup-android-arm-eabi": + optional: true + "@rollup/rollup-android-arm64": + optional: true + "@rollup/rollup-darwin-arm64": + optional: true + "@rollup/rollup-darwin-x64": + optional: true + "@rollup/rollup-freebsd-arm64": + optional: true + "@rollup/rollup-freebsd-x64": + optional: true + "@rollup/rollup-linux-arm-gnueabihf": + optional: true + "@rollup/rollup-linux-arm-musleabihf": + optional: true + "@rollup/rollup-linux-arm64-gnu": + optional: true + "@rollup/rollup-linux-arm64-musl": + optional: true + "@rollup/rollup-linux-loongarch64-gnu": + optional: true + "@rollup/rollup-linux-ppc64-gnu": + optional: true + "@rollup/rollup-linux-riscv64-gnu": + optional: true + "@rollup/rollup-linux-riscv64-musl": + optional: true + "@rollup/rollup-linux-s390x-gnu": + optional: true + "@rollup/rollup-linux-x64-gnu": + optional: true + "@rollup/rollup-linux-x64-musl": + optional: true + "@rollup/rollup-openharmony-arm64": + optional: true + "@rollup/rollup-win32-arm64-msvc": + optional: true + "@rollup/rollup-win32-ia32-msvc": + optional: true + "@rollup/rollup-win32-x64-msvc": + optional: true + fsevents: + optional: true + bin: + rollup: dist/bin/rollup + checksum: 10c0/2029282826d5fb4e308be261b2c28329a4d2bd34304cc3960da69fd21d5acccd0267d6770b1656ffc8f166203ef7e865b4583d5f842a519c8ef059ac71854205 languageName: node linkType: hard -"safe-buffer@npm:^5.0.1, safe-buffer@npm:~5.2.0": - version: 5.2.1 - resolution: "safe-buffer@npm:5.2.1" - checksum: 10c0/6501914237c0a86e9675d4e51d89ca3c21ffd6a31642efeba25ad65720bce6921c9e7e974e5be91a786b25aa058b5303285d3c15dbabf983a919f5f630d349f3 - languageName: node - linkType: hard +"root-workspace-0b6124@workspace:.": + version: 0.0.0-use.local + resolution: "root-workspace-0b6124@workspace:." + languageName: unknown + linkType: soft "safer-buffer@npm:>= 2.1.2 < 3, safer-buffer@npm:>= 2.1.2 < 3.0.0": version: 2.1.2 @@ -2798,7 +2745,7 @@ __metadata: languageName: node linkType: hard -"semver@npm:^7.3.2, semver@npm:^7.3.5, semver@npm:^7.5.3, semver@npm:^7.7.1": +"semver@npm:^7.3.5, semver@npm:^7.7.1": version: 7.7.2 resolution: "semver@npm:7.7.2" bin: @@ -2807,15 +2754,6 @@ __metadata: languageName: node linkType: hard -"serialize-error@npm:^7.0.1": - version: 7.0.1 - resolution: "serialize-error@npm:7.0.1" - dependencies: - type-fest: "npm:^0.13.1" - checksum: 10c0/7982937d578cd901276c8ab3e2c6ed8a4c174137730f1fb0402d005af209a0e84d04acc874e317c936724c7b5b26c7a96ff7e4b8d11a469f4924a4b0ea814c05 - languageName: node - linkType: hard - "shebang-command@npm:^2.0.0": version: 2.0.0 resolution: "shebang-command@npm:2.0.0" @@ -2832,6 +2770,13 @@ __metadata: languageName: node linkType: hard +"siginfo@npm:^2.0.0": + version: 2.0.0 + resolution: "siginfo@npm:2.0.0" + checksum: 10c0/3def8f8e516fbb34cb6ae415b07ccc5d9c018d85b4b8611e3dc6f8be6d1899f693a4382913c9ed51a06babb5201639d76453ab297d1c54a456544acf5c892e34 + languageName: node + linkType: hard + "signal-exit@npm:^4.0.1, signal-exit@npm:^4.1.0": version: 4.1.0 resolution: "signal-exit@npm:4.1.0" @@ -2839,38 +2784,14 @@ __metadata: languageName: node linkType: hard -"simple-concat@npm:^1.0.0": - version: 1.0.1 - resolution: "simple-concat@npm:1.0.1" - checksum: 10c0/62f7508e674414008910b5397c1811941d457dfa0db4fd5aa7fa0409eb02c3609608dfcd7508cace75b3a0bf67a2a77990711e32cd213d2c76f4fd12ee86d776 - languageName: node - linkType: hard - -"simple-get@npm:^4.0.0": - version: 4.0.1 - resolution: "simple-get@npm:4.0.1" +"sirv@npm:^3.0.1": + version: 3.0.2 + resolution: "sirv@npm:3.0.2" dependencies: - decompress-response: "npm:^6.0.0" - once: "npm:^1.3.1" - simple-concat: "npm:^1.0.0" - checksum: 10c0/b0649a581dbca741babb960423248899203165769747142033479a7dc5e77d7b0fced0253c731cd57cf21e31e4d77c9157c3069f4448d558ebc96cf9e1eebcf0 - languageName: node - linkType: hard - -"slash@npm:^5.1.0": - version: 5.1.0 - resolution: "slash@npm:5.1.0" - checksum: 10c0/eb48b815caf0bdc390d0519d41b9e0556a14380f6799c72ba35caf03544d501d18befdeeef074bc9c052acf69654bc9e0d79d7f1de0866284137a40805299eb3 - languageName: node - linkType: hard - -"slice-ansi@npm:^5.0.0": - version: 5.0.0 - resolution: "slice-ansi@npm:5.0.0" - dependencies: - ansi-styles: "npm:^6.0.0" - is-fullwidth-code-point: "npm:^4.0.0" - checksum: 10c0/2d4d40b2a9d5cf4e8caae3f698fe24ae31a4d778701724f578e984dcb485ec8c49f0c04dab59c401821e80fcdfe89cace9c66693b0244e40ec485d72e543914f + "@polka/url": "npm:^1.0.0-next.24" + mrmime: "npm:^2.0.0" + totalist: "npm:^3.0.0" + checksum: 10c0/5930e4397afdb14fbae13751c3be983af4bda5c9aadec832607dc2af15a7162f7d518c71b30e83ae3644b9a24cea041543cc969e5fe2b80af6ce8ea3174b2d04 languageName: node linkType: hard @@ -2893,26 +2814,19 @@ __metadata: linkType: hard "socks@npm:^2.8.3": - version: 2.8.6 - resolution: "socks@npm:2.8.6" + version: 2.8.7 + resolution: "socks@npm:2.8.7" dependencies: - ip-address: "npm:^9.0.5" + ip-address: "npm:^10.0.1" smart-buffer: "npm:^4.2.0" - checksum: 10c0/15b95db4caa359c80bfa880ff3e58f3191b9ffa4313570e501a60ee7575f51e4be664a296f4ee5c2c40544da179db6140be53433ce41ec745f9d51f342557514 + checksum: 10c0/2805a43a1c4bcf9ebf6e018268d87b32b32b06fbbc1f9282573583acc155860dc361500f89c73bfbb157caa1b4ac78059eac0ef15d1811eb0ca75e0bdadbc9d2 languageName: node linkType: hard -"sprintf-js@npm:^1.1.3": - version: 1.1.3 - resolution: "sprintf-js@npm:1.1.3" - checksum: 10c0/09270dc4f30d479e666aee820eacd9e464215cdff53848b443964202bf4051490538e5dd1b42e1a65cf7296916ca17640aebf63dae9812749c7542ee5f288dec - languageName: node - linkType: hard - -"sprintf-js@npm:~1.0.2": - version: 1.0.3 - resolution: "sprintf-js@npm:1.0.3" - checksum: 10c0/ecadcfe4c771890140da5023d43e190b7566d9cf8b2d238600f31bec0fc653f328da4450eb04bd59a431771a8e9cc0e118f0aa3974b683a4981b4e07abc2a5bb +"source-map-js@npm:^1.2.1": + version: 1.2.1 + resolution: "source-map-js@npm:1.2.1" + checksum: 10c0/7bda1fc4c197e3c6ff17de1b8b2c20e60af81b63a52cb32ec5a5d67a20a7d42651e2cb34ebe93833c5a2a084377e17455854fee3e21e7925c64a51b6a52b0faf languageName: node linkType: hard @@ -2925,16 +2839,21 @@ __metadata: languageName: node linkType: hard -"stack-utils@npm:^2.0.6": - version: 2.0.6 - resolution: "stack-utils@npm:2.0.6" - dependencies: - escape-string-regexp: "npm:^2.0.0" - checksum: 10c0/651c9f87667e077584bbe848acaecc6049bc71979f1e9a46c7b920cad4431c388df0f51b8ad7cfd6eed3db97a2878d0fc8b3122979439ea8bac29c61c95eec8a +"stackback@npm:0.0.2": + version: 0.0.2 + resolution: "stackback@npm:0.0.2" + checksum: 10c0/89a1416668f950236dd5ac9f9a6b2588e1b9b62b1b6ad8dff1bfc5d1a15dbf0aafc9b52d2226d00c28dffff212da464eaeebfc6b7578b9d180cef3e3782c5983 languageName: node linkType: hard -"string-width-cjs@npm:string-width@^4.2.0, string-width@npm:^4.1.0, string-width@npm:^4.2.0, string-width@npm:^4.2.3": +"std-env@npm:^3.9.0": + version: 3.9.0 + resolution: "std-env@npm:3.9.0" + checksum: 10c0/4a6f9218aef3f41046c3c7ecf1f98df00b30a07f4f35c6d47b28329bc2531eef820828951c7d7b39a1c5eb19ad8a46e3ddfc7deb28f0a2f3ceebee11bab7ba50 + languageName: node + linkType: hard + +"string-width-cjs@npm:string-width@^4.2.0, string-width@npm:^4.1.0": version: 4.2.3 resolution: "string-width@npm:4.2.3" dependencies: @@ -2956,26 +2875,6 @@ __metadata: languageName: node linkType: hard -"string-width@npm:^7.0.0": - version: 7.2.0 - resolution: "string-width@npm:7.2.0" - dependencies: - emoji-regex: "npm:^10.3.0" - get-east-asian-width: "npm:^1.0.0" - strip-ansi: "npm:^7.1.0" - checksum: 10c0/eb0430dd43f3199c7a46dcbf7a0b34539c76fe3aa62763d0b0655acdcbdf360b3f66f3d58ca25ba0205f42ea3491fa00f09426d3b7d3040e506878fc7664c9b9 - languageName: node - linkType: hard - -"string_decoder@npm:^1.1.1": - version: 1.3.0 - resolution: "string_decoder@npm:1.3.0" - dependencies: - safe-buffer: "npm:~5.2.0" - checksum: 10c0/810614ddb030e271cd591935dcd5956b2410dd079d64ff92a1844d6b7588bf992b3e1b69b0f4d34a3e06e0bd73046ac646b5264c1987b20d0601f81ef35d731d - languageName: node - linkType: hard - "strip-ansi-cjs@npm:strip-ansi@^6.0.1, strip-ansi@npm:^6.0.0, strip-ansi@npm:^6.0.1": version: 6.0.1 resolution: "strip-ansi@npm:6.0.1" @@ -2985,60 +2884,25 @@ __metadata: languageName: node linkType: hard -"strip-ansi@npm:^7.0.1, strip-ansi@npm:^7.1.0": - version: 7.1.0 - resolution: "strip-ansi@npm:7.1.0" +"strip-ansi@npm:^7.0.1": + version: 7.1.2 + resolution: "strip-ansi@npm:7.1.2" dependencies: ansi-regex: "npm:^6.0.1" - checksum: 10c0/a198c3762e8832505328cbf9e8c8381de14a4fa50a4f9b2160138158ea88c0f5549fb50cb13c651c3088f47e63a108b34622ec18c0499b6c8c3a5ddf6b305ac4 + checksum: 10c0/0d6d7a023de33368fd042aab0bf48f4f4077abdfd60e5393e73c7c411e85e1b3a83507c11af2e656188511475776215df9ca589b4da2295c9455cc399ce1858b languageName: node linkType: hard -"strip-json-comments@npm:~2.0.1": - version: 2.0.1 - resolution: "strip-json-comments@npm:2.0.1" - checksum: 10c0/b509231cbdee45064ff4f9fd73609e2bcc4e84a4d508e9dd0f31f70356473fde18abfb5838c17d56fb236f5a06b102ef115438de0600b749e818a35fbbc48c43 - languageName: node - linkType: hard - -"supertap@npm:^3.0.1": - version: 3.0.1 - resolution: "supertap@npm:3.0.1" +"strip-literal@npm:^3.0.0": + version: 3.0.0 + resolution: "strip-literal@npm:3.0.0" dependencies: - indent-string: "npm:^5.0.0" - js-yaml: "npm:^3.14.1" - serialize-error: "npm:^7.0.1" - strip-ansi: "npm:^7.0.1" - checksum: 10c0/8164674f2e280cab875f0fef5bb36c15553c13e29697ff92f4e0d6bc62149f0303a89eee47535413ed145ea72e14a24d065bab233059d48a499ec5ebb4566b0f + js-tokens: "npm:^9.0.1" + checksum: 10c0/d81657f84aba42d4bbaf2a677f7e7f34c1f3de5a6726db8bc1797f9c0b303ba54d4660383a74bde43df401cf37cce1dff2c842c55b077a4ceee11f9e31fba828 languageName: node linkType: hard -"tar-fs@npm:^2.0.0": - version: 2.1.3 - resolution: "tar-fs@npm:2.1.3" - dependencies: - chownr: "npm:^1.1.1" - mkdirp-classic: "npm:^0.5.2" - pump: "npm:^3.0.0" - tar-stream: "npm:^2.1.4" - checksum: 10c0/472ee0c3c862605165163113ab6924f411c07506a1fb24c51a1a80085f0d4d381d86d2fd6b189236c8d932d1cd97b69cce35016767ceb658a35f7584fe77f305 - languageName: node - linkType: hard - -"tar-stream@npm:^2.1.4": - version: 2.2.0 - resolution: "tar-stream@npm:2.2.0" - dependencies: - bl: "npm:^4.0.3" - end-of-stream: "npm:^1.4.1" - fs-constants: "npm:^1.0.0" - inherits: "npm:^2.0.3" - readable-stream: "npm:^3.1.1" - checksum: 10c0/2f4c910b3ee7196502e1ff015a7ba321ec6ea837667220d7bcb8d0852d51cb04b87f7ae471008a6fb8f5b1a1b5078f62f3a82d30c706f20ada1238ac797e7692 - languageName: node - linkType: hard - -"tar@npm:^7.4.0, tar@npm:^7.4.3": +"tar@npm:^7.4.3": version: 7.4.3 resolution: "tar@npm:7.4.3" dependencies: @@ -3052,27 +2916,48 @@ __metadata: languageName: node linkType: hard -"temp-dir@npm:^3.0.0": - version: 3.0.0 - resolution: "temp-dir@npm:3.0.0" - checksum: 10c0/a86978a400984cd5f315b77ebf3fe53bb58c61f192278cafcb1f3fb32d584a21dc8e08b93171d7874b7cc972234d3455c467306cc1bfc4524b622e5ad3bfd671 +"tinybench@npm:^2.9.0": + version: 2.9.0 + resolution: "tinybench@npm:2.9.0" + checksum: 10c0/c3500b0f60d2eb8db65250afe750b66d51623057ee88720b7f064894a6cb7eb93360ca824a60a31ab16dab30c7b1f06efe0795b352e37914a9d4bad86386a20c languageName: node linkType: hard -"time-zone@npm:^1.0.0": - version: 1.0.0 - resolution: "time-zone@npm:1.0.0" - checksum: 10c0/d00ebd885039109011b6e2423ebbf225160927333c2ade6d833e9cc4676db20759f1f3855fafde00d1bd668c243a6aa68938ce71fe58aab0d514e820d59c1d81 +"tinyexec@npm:^0.3.2": + version: 0.3.2 + resolution: "tinyexec@npm:0.3.2" + checksum: 10c0/3efbf791a911be0bf0821eab37a3445c2ba07acc1522b1fa84ae1e55f10425076f1290f680286345ed919549ad67527d07281f1c19d584df3b74326909eb1f90 languageName: node linkType: hard -"tinyglobby@npm:^0.2.12": - version: 0.2.14 - resolution: "tinyglobby@npm:0.2.14" +"tinyglobby@npm:^0.2.12, tinyglobby@npm:^0.2.14, tinyglobby@npm:^0.2.15": + version: 0.2.15 + resolution: "tinyglobby@npm:0.2.15" dependencies: - fdir: "npm:^6.4.4" - picomatch: "npm:^4.0.2" - checksum: 10c0/f789ed6c924287a9b7d3612056ed0cda67306cd2c80c249fd280cf1504742b12583a2089b61f4abbd24605f390809017240e250241f09938054c9b363e51c0a6 + fdir: "npm:^6.5.0" + picomatch: "npm:^4.0.3" + checksum: 10c0/869c31490d0d88eedb8305d178d4c75e7463e820df5a9b9d388291daf93e8b1eb5de1dad1c1e139767e4269fe75f3b10d5009b2cc14db96ff98986920a186844 + languageName: node + linkType: hard + +"tinypool@npm:^1.1.1": + version: 1.1.1 + resolution: "tinypool@npm:1.1.1" + checksum: 10c0/bf26727d01443061b04fa863f571016950888ea994ba0cd8cba3a1c51e2458d84574341ab8dbc3664f1c3ab20885c8cf9ff1cc4b18201f04c2cde7d317fff69b + languageName: node + linkType: hard + +"tinyrainbow@npm:^2.0.0": + version: 2.0.0 + resolution: "tinyrainbow@npm:2.0.0" + checksum: 10c0/c83c52bef4e0ae7fb8ec6a722f70b5b6fa8d8be1c85792e829f56c0e1be94ab70b293c032dc5048d4d37cfe678f1f5babb04bdc65fd123098800148ca989184f + languageName: node + linkType: hard + +"tinyspy@npm:^4.0.3": + version: 4.0.3 + resolution: "tinyspy@npm:4.0.3" + checksum: 10c0/0a92a18b5350945cc8a1da3a22c9ad9f4e2945df80aaa0c43e1b3a3cfb64d8501e607ebf0305e048e3c3d3e0e7f8eb10cea27dc17c21effb73e66c4a3be36373 languageName: node linkType: hard @@ -3085,19 +2970,10 @@ __metadata: languageName: node linkType: hard -"to-regex-range@npm:^5.0.1": - version: 5.0.1 - resolution: "to-regex-range@npm:5.0.1" - dependencies: - is-number: "npm:^7.0.0" - checksum: 10c0/487988b0a19c654ff3e1961b87f471702e708fa8a8dd02a298ef16da7206692e8552a0250e8b3e8759270f62e9d8314616f6da274734d3b558b1fc7b7724e892 - languageName: node - linkType: hard - -"tr46@npm:~0.0.3": - version: 0.0.3 - resolution: "tr46@npm:0.0.3" - checksum: 10c0/047cb209a6b60c742f05c9d3ace8fa510bff609995c129a37ace03476a9b12db4dbf975e74600830ef0796e18882b2381fb5fb1f6b4f96b832c374de3ab91a11 +"totalist@npm:^3.0.0": + version: 3.0.1 + resolution: "totalist@npm:3.0.1" + checksum: 10c0/4bb1fadb69c3edbef91c73ebef9d25b33bbf69afe1e37ce544d5f7d13854cda15e47132f3e0dc4cafe300ddb8578c77c50a65004d8b6e97e77934a69aa924863 languageName: node linkType: hard @@ -3108,15 +2984,6 @@ __metadata: languageName: node linkType: hard -"tunnel-agent@npm:^0.6.0": - version: 0.6.0 - resolution: "tunnel-agent@npm:0.6.0" - dependencies: - safe-buffer: "npm:^5.0.1" - checksum: 10c0/4c7a1b813e7beae66fdbf567a65ec6d46313643753d0beefb3c7973d66fcec3a1e7f39759f0a0b4465883499c6dc8b0750ab8b287399af2e583823e40410a17a - languageName: node - linkType: hard - "typanion@npm:^3.14.0, typanion@npm:^3.8.0": version: 3.14.0 resolution: "typanion@npm:3.14.0" @@ -3124,13 +2991,6 @@ __metadata: languageName: node linkType: hard -"type-fest@npm:^0.13.1": - version: 0.13.1 - resolution: "type-fest@npm:0.13.1" - checksum: 10c0/0c0fa07ae53d4e776cf4dac30d25ad799443e9eef9226f9fddbb69242db86b08584084a99885cfa5a9dfe4c063ebdc9aa7b69da348e735baede8d43f1aeae93b - languageName: node - linkType: hard - "type-fest@npm:^0.21.3": version: 0.21.3 resolution: "type-fest@npm:0.21.3" @@ -3158,6 +3018,13 @@ __metadata: languageName: node linkType: hard +"undici-types@npm:~7.10.0": + version: 7.10.0 + resolution: "undici-types@npm:7.10.0" + checksum: 10c0/8b00ce50e235fe3cc601307f148b5e8fb427092ee3b23e8118ec0a5d7f68eca8cee468c8fc9f15cbb2cf2a3797945ebceb1cbd9732306a1d00e0a9b6afa0f635 + languageName: node + linkType: hard + "unicorn-magic@npm:^0.1.0": version: 0.1.0 resolution: "unicorn-magic@npm:0.1.0" @@ -3165,13 +3032,6 @@ __metadata: languageName: node linkType: hard -"unicorn-magic@npm:^0.3.0": - version: 0.3.0 - resolution: "unicorn-magic@npm:0.3.0" - checksum: 10c0/0a32a997d6c15f1c2a077a15b1c4ca6f268d574cf5b8975e778bb98e6f8db4ef4e86dfcae4e158cd4c7e38fb4dd383b93b13eefddc7f178dea13d3ac8a603271 - languageName: node - linkType: hard - "unique-filename@npm:^4.0.0": version: 4.0.0 resolution: "unique-filename@npm:4.0.0" @@ -3197,34 +3057,129 @@ __metadata: languageName: node linkType: hard -"util-deprecate@npm:^1.0.1": - version: 1.0.2 - resolution: "util-deprecate@npm:1.0.2" - checksum: 10c0/41a5bdd214df2f6c3ecf8622745e4a366c4adced864bc3c833739791aeeeb1838119af7daed4ba36428114b5c67dcda034a79c882e97e43c03e66a4dd7389942 - languageName: node - linkType: hard - -"webidl-conversions@npm:^3.0.0": - version: 3.0.1 - resolution: "webidl-conversions@npm:3.0.1" - checksum: 10c0/5612d5f3e54760a797052eb4927f0ddc01383550f542ccd33d5238cfd65aeed392a45ad38364970d0a0f4fea32e1f4d231b3d8dac4a3bdd385e5cf802ae097db - languageName: node - linkType: hard - -"well-known-symbols@npm:^2.0.0": - version: 2.0.0 - resolution: "well-known-symbols@npm:2.0.0" - checksum: 10c0/cb6c12e98877e8952ec28d13ae6f4fdb54ae1cb49b16a728720276dadd76c930e6cb0e174af3a4620054dd2752546f842540122920c6e31410208abd4958ee6b - languageName: node - linkType: hard - -"whatwg-url@npm:^5.0.0": - version: 5.0.0 - resolution: "whatwg-url@npm:5.0.0" +"vite-node@npm:3.2.4": + version: 3.2.4 + resolution: "vite-node@npm:3.2.4" dependencies: - tr46: "npm:~0.0.3" - webidl-conversions: "npm:^3.0.0" - checksum: 10c0/1588bed84d10b72d5eec1d0faa0722ba1962f1821e7539c535558fb5398d223b0c50d8acab950b8c488b4ba69043fd833cc2697056b167d8ad46fac3995a55d5 + cac: "npm:^6.7.14" + debug: "npm:^4.4.1" + es-module-lexer: "npm:^1.7.0" + pathe: "npm:^2.0.3" + vite: "npm:^5.0.0 || ^6.0.0 || ^7.0.0-0" + bin: + vite-node: vite-node.mjs + checksum: 10c0/6ceca67c002f8ef6397d58b9539f80f2b5d79e103a18367288b3f00a8ab55affa3d711d86d9112fce5a7fa658a212a087a005a045eb8f4758947dd99af2a6c6b + languageName: node + linkType: hard + +"vite@npm:^5.0.0 || ^6.0.0 || ^7.0.0-0": + version: 7.1.5 + resolution: "vite@npm:7.1.5" + dependencies: + esbuild: "npm:^0.25.0" + fdir: "npm:^6.5.0" + fsevents: "npm:~2.3.3" + picomatch: "npm:^4.0.3" + postcss: "npm:^8.5.6" + rollup: "npm:^4.43.0" + tinyglobby: "npm:^0.2.15" + peerDependencies: + "@types/node": ^20.19.0 || >=22.12.0 + jiti: ">=1.21.0" + less: ^4.0.0 + lightningcss: ^1.21.0 + sass: ^1.70.0 + sass-embedded: ^1.70.0 + stylus: ">=0.54.8" + sugarss: ^5.0.0 + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + dependenciesMeta: + fsevents: + optional: true + peerDependenciesMeta: + "@types/node": + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + bin: + vite: bin/vite.js + checksum: 10c0/782d2f20c25541b26d1fb39bef5f194149caff39dc25b7836e25f049ca919f2e2ce186bddb21f3f20f6195354b3579ec637a8ca08d65b117f8b6f81e3e730a9c + languageName: node + linkType: hard + +"vitest@npm:^3.2.4": + version: 3.2.4 + resolution: "vitest@npm:3.2.4" + dependencies: + "@types/chai": "npm:^5.2.2" + "@vitest/expect": "npm:3.2.4" + "@vitest/mocker": "npm:3.2.4" + "@vitest/pretty-format": "npm:^3.2.4" + "@vitest/runner": "npm:3.2.4" + "@vitest/snapshot": "npm:3.2.4" + "@vitest/spy": "npm:3.2.4" + "@vitest/utils": "npm:3.2.4" + chai: "npm:^5.2.0" + debug: "npm:^4.4.1" + expect-type: "npm:^1.2.1" + magic-string: "npm:^0.30.17" + pathe: "npm:^2.0.3" + picomatch: "npm:^4.0.2" + std-env: "npm:^3.9.0" + tinybench: "npm:^2.9.0" + tinyexec: "npm:^0.3.2" + tinyglobby: "npm:^0.2.14" + tinypool: "npm:^1.1.1" + tinyrainbow: "npm:^2.0.0" + vite: "npm:^5.0.0 || ^6.0.0 || ^7.0.0-0" + vite-node: "npm:3.2.4" + why-is-node-running: "npm:^2.3.0" + peerDependencies: + "@edge-runtime/vm": "*" + "@types/debug": ^4.1.12 + "@types/node": ^18.0.0 || ^20.0.0 || >=22.0.0 + "@vitest/browser": 3.2.4 + "@vitest/ui": 3.2.4 + happy-dom: "*" + jsdom: "*" + peerDependenciesMeta: + "@edge-runtime/vm": + optional: true + "@types/debug": + optional: true + "@types/node": + optional: true + "@vitest/browser": + optional: true + "@vitest/ui": + optional: true + happy-dom: + optional: true + jsdom: + optional: true + bin: + vitest: vitest.mjs + checksum: 10c0/5bf53ede3ae6a0e08956d72dab279ae90503f6b5a05298a6a5e6ef47d2fd1ab386aaf48fafa61ed07a0ebfe9e371772f1ccbe5c258dd765206a8218bf2eb79eb languageName: node linkType: hard @@ -3250,7 +3205,19 @@ __metadata: languageName: node linkType: hard -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0, wrap-ansi@npm:^7.0.0": +"why-is-node-running@npm:^2.3.0": + version: 2.3.0 + resolution: "why-is-node-running@npm:2.3.0" + dependencies: + siginfo: "npm:^2.0.0" + stackback: "npm:0.0.2" + bin: + why-is-node-running: cli.js + checksum: 10c0/1cde0b01b827d2cf4cb11db962f3958b9175d5d9e7ac7361d1a7b0e2dc6069a263e69118bd974c4f6d0a890ef4eedfe34cf3d5167ec14203dbc9a18620537054 + languageName: node + linkType: hard + +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": version: 7.0.0 resolution: "wrap-ansi@npm:7.0.0" dependencies: @@ -3283,27 +3250,18 @@ __metadata: languageName: node linkType: hard -"wrappy@npm:1": - version: 1.0.2 - resolution: "wrappy@npm:1.0.2" - checksum: 10c0/56fece1a4018c6a6c8e28fbc88c87e0fbf4ea8fd64fc6c63b18f4acc4bd13e0ad2515189786dd2c30d3eec9663d70f4ecf699330002f8ccb547e4a18231fc9f0 - languageName: node - linkType: hard - -"write-file-atomic@npm:^6.0.0": - version: 6.0.0 - resolution: "write-file-atomic@npm:6.0.0" - dependencies: - imurmurhash: "npm:^0.1.4" - signal-exit: "npm:^4.0.1" - checksum: 10c0/ae2f1c27474758a9aca92037df6c1dd9cb94c4e4983451210bd686bfe341f142662f6aa5913095e572ab037df66b1bfe661ed4ce4c0369ed0e8219e28e141786 - languageName: node - linkType: hard - -"y18n@npm:^5.0.5": - version: 5.0.8 - resolution: "y18n@npm:5.0.8" - checksum: 10c0/4df2842c36e468590c3691c894bc9cdbac41f520566e76e24f59401ba7d8b4811eb1e34524d57e54bc6d864bcb66baab7ffd9ca42bf1eda596618f9162b91249 +"ws@npm:^8.18.2": + version: 8.18.3 + resolution: "ws@npm:8.18.3" + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ">=5.0.2" + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + checksum: 10c0/eac918213de265ef7cb3d4ca348b891a51a520d839aa51cdb8ca93d4fa7ff9f6ccb339ccee89e4075324097f0a55157c89fa3f7147bde9d8d7e90335dc087b53 languageName: node linkType: hard @@ -3321,28 +3279,6 @@ __metadata: languageName: node linkType: hard -"yargs-parser@npm:^21.1.1": - version: 21.1.1 - resolution: "yargs-parser@npm:21.1.1" - checksum: 10c0/f84b5e48169479d2f402239c59f084cfd1c3acc197a05c59b98bab067452e6b3ea46d4dd8ba2985ba7b3d32a343d77df0debd6b343e5dae3da2aab2cdf5886b2 - languageName: node - linkType: hard - -"yargs@npm:^17.7.2": - version: 17.7.2 - resolution: "yargs@npm:17.7.2" - dependencies: - cliui: "npm:^8.0.1" - escalade: "npm:^3.1.1" - get-caller-file: "npm:^2.0.5" - require-directory: "npm:^2.1.1" - string-width: "npm:^4.2.3" - y18n: "npm:^5.0.5" - yargs-parser: "npm:^21.1.1" - checksum: 10c0/ccd7e723e61ad5965fffbb791366db689572b80cca80e0f96aad968dfff4156cd7cd1ad18607afe1046d8241e6fb2d6c08bf7fa7bfb5eaec818735d8feac8f05 - languageName: node - linkType: hard - "yocto-queue@npm:^1.0.0": version: 1.2.1 resolution: "yocto-queue@npm:1.2.1" diff --git a/bindings/rust/src/lib.rs b/bindings/rust/src/lib.rs index dde819d7d..923542cdb 100644 --- a/bindings/rust/src/lib.rs +++ b/bindings/rust/src/lib.rs @@ -82,6 +82,7 @@ pub type Result = std::result::Result; pub struct Builder { path: String, enable_mvcc: bool, + vfs: Option, } impl Builder { @@ -90,6 +91,7 @@ impl Builder { Self { path: path.to_string(), enable_mvcc: false, + vfs: None, } } @@ -98,25 +100,68 @@ impl Builder { self } + pub fn with_io(mut self, vfs: String) -> Self { + self.vfs = Some(vfs); + self + } + /// Build the database. #[allow(unused_variables, clippy::arc_with_non_send_sync)] pub async fn build(self) -> Result { - match self.path.as_str() { - ":memory:" => { - let io: Arc = Arc::new(turso_core::MemoryIO::new()); - let db = turso_core::Database::open_file( - io, - self.path.as_str(), - self.enable_mvcc, - true, - )?; - Ok(Database { inner: db }) + let io = self.get_io()?; + let db = turso_core::Database::open_file(io, self.path.as_str(), self.enable_mvcc, true)?; + Ok(Database { inner: db }) + } + + fn get_io(&self) -> Result> { + let vfs_choice = self.vfs.as_deref().unwrap_or(""); + + if self.path == ":memory:" && vfs_choice.is_empty() { + return Ok(Arc::new(turso_core::MemoryIO::new())); + } + + match vfs_choice { + "memory" => Ok(Arc::new(turso_core::MemoryIO::new())), + "syscall" => { + #[cfg(target_family = "unix")] + { + Ok(Arc::new( + turso_core::UnixIO::new() + .map_err(|e| Error::SqlExecutionFailure(e.to_string()))?, + )) + } + #[cfg(not(target_family = "unix"))] + { + Ok(Arc::new( + turso_core::PlatformIO::new() + .map_err(|e| Error::SqlExecutionFailure(e.to_string()))?, + )) + } } - path => { - let io: Arc = Arc::new(turso_core::PlatformIO::new()?); - let db = turso_core::Database::open_file(io, path, self.enable_mvcc, true)?; - Ok(Database { inner: db }) + #[cfg(target_os = "linux")] + "io_uring" => Ok(Arc::new( + turso_core::UringIO::new() + .map_err(|e| Error::SqlExecutionFailure(e.to_string()))?, + )), + #[cfg(not(target_os = "linux"))] + "io_uring" => Err(Error::SqlExecutionFailure( + "io_uring is only available on Linux targets".to_string(), + )), + "" => { + // Default behavior: memory for ":memory:", platform IO for files + if self.path == ":memory:" { + Ok(Arc::new(turso_core::MemoryIO::new())) + } else { + Ok(Arc::new( + turso_core::PlatformIO::new() + .map_err(|e| Error::SqlExecutionFailure(e.to_string()))?, + )) + } } + _ => Ok(Arc::new( + turso_core::PlatformIO::new() + .map_err(|e| Error::SqlExecutionFailure(e.to_string()))?, + )), } } } diff --git a/cli/app.rs b/cli/app.rs index f3ed8e56d..cb53c5f35 100644 --- a/cli/app.rs +++ b/cli/app.rs @@ -500,33 +500,7 @@ impl Limbo { return Ok(()); } } - if line.trim_start().starts_with("--") { - if let Some(remaining) = line.split_once('\n') { - let after_comment = remaining.1.trim(); - if !after_comment.is_empty() { - if after_comment.ends_with(';') { - self.run_query(after_comment); - if self.opts.echo { - let _ = self.writeln(after_comment); - } - let conn = self.conn.clone(); - let runner = conn.query_runner(after_comment.as_bytes()); - for output in runner { - if let Err(e) = self.print_query_result(after_comment, output, None) { - let _ = self.writeln(e.to_string()); - } - } - self.reset_input(); - return self.handle_input_line(after_comment); - } else { - self.set_multiline_prompt(); - let _ = self.reset_line(line); - return Ok(()); - } - } - } - return Ok(()); - } + self.reset_line(line)?; if line.ends_with(';') { self.buffer_input(line); @@ -1400,7 +1374,7 @@ impl Limbo { // FIXME: we don't yet support PRAGMA foreign_keys=OFF internally, // so for now this hacky boolean that decides not to emit it when cloning if fk { - writeln!(out, "PRAGMA foreign_keys=OFF")?; + writeln!(out, "PRAGMA foreign_keys=OFF;")?; } writeln!(out, "BEGIN TRANSACTION;")?; // FIXME: At this point, SQLite executes the following: diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index 3687bdad1..1c501363a 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -5,16 +5,221 @@ //! //! Based on the DBSP paper: "DBSP: Automatic Incremental View Maintenance for Rich Query Languages" +use crate::incremental::dbsp::Delta; use crate::incremental::expr_compiler::CompiledExpression; +use crate::incremental::hashable_row::HashableRow; use crate::incremental::operator::{ - Delta, FilterOperator, FilterPredicate, IncrementalOperator, ProjectOperator, + EvalState, FilterOperator, FilterPredicate, IncrementalOperator, InputOperator, ProjectOperator, }; +use crate::storage::btree::{BTreeCursor, BTreeKey}; // Note: logical module must be made pub(crate) in translate/mod.rs -use crate::translate::logical::{BinaryOperator, LogicalExpr, LogicalPlan, SchemaRef}; -use crate::types::Value; -use crate::{LimboError, Result}; +use crate::translate::logical::{ + BinaryOperator, LogicalExpr, LogicalPlan, LogicalSchema, SchemaRef, +}; +use crate::types::{IOResult, SeekKey, SeekOp, SeekResult, Value}; +use crate::Pager; +use crate::{return_and_restore_if_io, return_if_io, LimboError, Result}; use std::collections::HashMap; use std::fmt::{self, Display, Formatter}; +use std::rc::Rc; +use std::sync::Arc; + +// The state table is always a key-value store with 3 columns: key, state, and weight. +const OPERATOR_COLUMNS: usize = 3; + +/// State machine for writing a row to the materialized view +#[derive(Debug)] +pub enum WriteViewRow { + /// Initial empty state + Empty, + + /// Reading existing record to get current weight + GetRecord, + + /// Deleting the row (when final weight <= 0) + Delete, + + /// Inserting/updating the row with new weight + Insert { + /// The final weight to write + final_weight: isize, + }, + + /// Completed processing this row + Done, +} + +impl WriteViewRow { + fn new() -> Self { + Self::Empty + } + fn write_row( + &mut self, + cursor: &mut BTreeCursor, + row: HashableRow, + weight: isize, + ) -> Result> { + loop { + match self { + WriteViewRow::Empty => { + let key = SeekKey::TableRowId(row.rowid); + let res = return_if_io!(cursor.seek(key, SeekOp::GE { eq_only: true })); + match res { + SeekResult::Found => *self = WriteViewRow::GetRecord, + _ => { + *self = WriteViewRow::Insert { + final_weight: weight, + } + } + } + } + WriteViewRow::GetRecord => { + let existing_record = return_if_io!(cursor.record()); + let r = existing_record.ok_or_else(|| { + crate::LimboError::InternalError(format!( + "Found rowid {} in storage but could not read record", + row.rowid + )) + })?; + let values = r.get_values(); + + // last value should contain the weight + let existing_weight = match values.last() { + Some(ref_val) => match ref_val.to_owned() { + Value::Integer(w) => w as isize, + _ => { + return Err(crate::LimboError::InternalError(format!( + "Invalid weight value in storage for rowid {}", + row.rowid + ))) + } + }, + None => { + return Err(crate::LimboError::InternalError(format!( + "No weight value found in storage for rowid {}", + row.rowid + ))) + } + }; + let final_weight = existing_weight + weight; + if final_weight <= 0 { + *self = WriteViewRow::Delete + } else { + *self = WriteViewRow::Insert { final_weight } + } + } + WriteViewRow::Delete => { + // Delete the row. Important: when delete returns I/O, the btree operation + // has already completed in memory, so mark as Done to avoid retry + *self = WriteViewRow::Done; + return_if_io!(cursor.delete()); + } + WriteViewRow::Insert { final_weight } => { + let key = SeekKey::TableRowId(row.rowid); + return_if_io!(cursor.seek(key, SeekOp::GE { eq_only: true })); + + // Create the record values: row values + weight + let mut values = row.values.clone(); + values.push(Value::Integer(*final_weight as i64)); + + // Create an ImmutableRecord from the values + let immutable_record = + crate::types::ImmutableRecord::from_values(&values, values.len()); + let btree_key = BTreeKey::new_table_rowid(row.rowid, Some(&immutable_record)); + // Insert the row. Important: when insert returns I/O, the btree operation + // has already completed in memory, so mark as Done to avoid retry + *self = WriteViewRow::Done; + return_if_io!(cursor.insert(&btree_key)); + } + WriteViewRow::Done => { + break; + } + } + } + Ok(IOResult::Done(())) + } +} + +/// State machine for commit operations +pub enum CommitState { + /// Initial state - ready to start commit + Init, + + /// Running circuit with commit_operators flag set to true + CommitOperators { + /// Execute state for running the circuit + execute_state: Box, + /// Persistent cursor for operator state btree (internal_state_root) + state_cursor: Box, + }, + + /// Updating the materialized view with the delta + UpdateView { + /// Delta to write to the view + delta: Delta, + /// Current index in delta.changes being processed + current_index: usize, + /// State for writing individual rows + write_row_state: WriteViewRow, + /// Cursor for view data btree - created fresh for each row + view_cursor: Box, + }, +} + +impl std::fmt::Debug for CommitState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Init => write!(f, "Init"), + Self::CommitOperators { execute_state, .. } => f + .debug_struct("CommitOperators") + .field("execute_state", execute_state) + .field("has_state_cursor", &true) + .finish(), + Self::UpdateView { + delta, + current_index, + write_row_state, + .. + } => f + .debug_struct("UpdateView") + .field("delta", delta) + .field("current_index", current_index) + .field("write_row_state", write_row_state) + .field("has_view_cursor", &true) + .finish(), + } + } +} + +/// State machine for circuit execution across I/O operations +/// Similar to EvalState but for tracking execution state through the circuit +#[derive(Debug)] +pub enum ExecuteState { + /// Empty state so we can allocate the space without executing + Uninitialized, + + /// Initial state - starting circuit execution + Init { + /// Input deltas to process + input_data: DeltaSet, + }, + + /// Processing multiple inputs (for recursive node processing) + ProcessingInputs { + /// Collection of (node_id, state) pairs to process + input_states: Vec<(usize, ExecuteState)>, + /// Current index being processed + current_index: usize, + /// Collected deltas from processed inputs + input_deltas: Vec, + }, + + /// Processing a specific node in the circuit + ProcessingNode { + /// Node's evaluation state (includes the delta in its Init state) + eval_state: Box, + }, +} /// A set of deltas for multiple tables/operators /// This provides a cleaner API for passing deltas through circuit execution @@ -39,6 +244,11 @@ impl DeltaSet { } } + /// Create a DeltaSet from a HashMap + pub fn from_map(deltas: HashMap) -> Self { + Self { deltas } + } + /// Add a delta for a table pub fn insert(&mut self, table_name: String, delta: Delta) { self.deltas.insert(table_name, delta); @@ -96,8 +306,8 @@ pub struct DbspNode { pub operator: DbspOperator, /// Input nodes (edges in the DAG) pub inputs: Vec, - /// The actual executable operator (if applicable) - pub executable: Option>, + /// The actual executable operator + pub executable: Box, } impl std::fmt::Debug for DbspNode { @@ -106,11 +316,51 @@ impl std::fmt::Debug for DbspNode { .field("id", &self.id) .field("operator", &self.operator) .field("inputs", &self.inputs) - .field("has_executable", &self.executable.is_some()) + .field("has_executable", &true) .finish() } } +impl DbspNode { + fn process_node( + &mut self, + pager: Rc, + eval_state: &mut EvalState, + root_page: usize, + commit_operators: bool, + state_cursor: Option<&mut Box>, + ) -> Result> { + // Process delta using the executable operator + let op = &mut self.executable; + + // Use provided cursor or create a local one + let mut local_cursor; + let cursor = if let Some(cursor) = state_cursor { + cursor.as_mut() + } else { + // Create a local cursor if none was provided + local_cursor = BTreeCursor::new_table(None, pager.clone(), root_page, OPERATOR_COLUMNS); + &mut local_cursor + }; + + let state = if commit_operators { + // Clone the delta from eval_state - don't extract it + // in case we need to re-execute due to I/O + let delta = match eval_state { + EvalState::Init { delta } => delta.clone(), + _ => panic!("commit can only be called when eval_state is in Init state"), + }; + let result = return_if_io!(op.commit(delta, cursor)); + // After successful commit, move state to Done + *eval_state = EvalState::Done; + result + } else { + return_if_io!(op.eval(eval_state, cursor)) + }; + Ok(IOResult::Done(state)) + } +} + /// Represents a complete DBSP circuit (DAG of operators) #[derive(Debug)] pub struct DbspCircuit { @@ -120,24 +370,48 @@ pub struct DbspCircuit { next_id: usize, /// Root node ID (the final output) pub(super) root: Option, + /// Output schema of the circuit (schema of the root node) + pub(super) output_schema: SchemaRef, + + /// State machine for commit operation + commit_state: CommitState, + + /// Root page for the main materialized view data + pub(super) main_data_root: usize, + /// Root page for internal DBSP state + pub(super) internal_state_root: usize, } impl DbspCircuit { - /// Create a new empty circuit - pub fn new() -> Self { + /// Create a new empty circuit with initial empty schema + /// The actual output schema will be set when the root node is established + pub fn new(main_data_root: usize, internal_state_root: usize) -> Self { + // Start with an empty schema - will be updated when root is set + let empty_schema = Arc::new(LogicalSchema::new(vec![])); Self { nodes: HashMap::new(), next_id: 0, root: None, + output_schema: empty_schema, + commit_state: CommitState::Init, + main_data_root, + internal_state_root, } } + /// Set the root node and update the output schema + fn set_root(&mut self, root_id: usize, schema: SchemaRef) { + self.root = Some(root_id); + self.output_schema = schema; + } + + /// Get the current materialized state by reading from btree /// Add a node to the circuit fn add_node( &mut self, operator: DbspOperator, inputs: Vec, - executable: Option>, + executable: Box, ) -> usize { let id = self.next_id; self.next_id += 1; @@ -153,11 +427,21 @@ impl DbspCircuit { id } - /// Initialize the circuit with base data. Should be called once before processing deltas. - /// If the database is restarting with materialized views, this can be skipped. - pub fn initialize(&mut self, input_data: HashMap) -> Result { + pub fn run_circuit( + &mut self, + pager: Rc, + execute_state: &mut ExecuteState, + commit_operators: bool, + state_cursor: &mut Box, + ) -> Result> { if let Some(root_id) = self.root { - self.initialize_node(root_id, &input_data) + self.execute_node( + root_id, + pager, + execute_state, + commit_operators, + Some(state_cursor), + ) } else { Err(LimboError::ParseError( "Circuit has no root node".to_string(), @@ -165,80 +449,19 @@ impl DbspCircuit { } } - /// Initialize a specific node and its dependencies - fn initialize_node( - &mut self, - node_id: usize, - input_data: &HashMap, - ) -> Result { - // Clone to avoid borrow checker issues - let inputs = self - .nodes - .get(&node_id) - .ok_or_else(|| LimboError::ParseError("Node not found".to_string()))? - .inputs - .clone(); - - // Initialize inputs first - let mut input_deltas = Vec::new(); - for input_id in inputs { - let delta = self.initialize_node(input_id, input_data)?; - input_deltas.push(delta); - } - - // Get mutable reference to node - let node = self - .nodes - .get_mut(&node_id) - .ok_or_else(|| LimboError::ParseError("Node not found".to_string()))?; - - // Initialize based on operator type - let result = match &node.operator { - DbspOperator::Input { name, .. } => { - // Get data from input map - input_data.get(name).cloned().unwrap_or_else(Delta::new) - } - DbspOperator::Filter { .. } - | DbspOperator::Projection { .. } - | DbspOperator::Aggregate { .. } => { - // Initialize the executable operator - if let Some(ref mut op) = node.executable { - if !input_deltas.is_empty() { - let input_delta = input_deltas[0].clone(); - op.initialize(input_delta); - op.get_current_state() - } else { - Delta::new() - } - } else { - // If no executable, pass through the input - if !input_deltas.is_empty() { - input_deltas[0].clone() - } else { - Delta::new() - } - } - } - }; - - Ok(result) - } - /// Execute the circuit with incremental input data (deltas). - /// Call initialize() first for initial data, then use execute() for updates. /// /// # Arguments - /// * `input_data` - The committed deltas to process - /// * `uncommitted_data` - Uncommitted transaction deltas that should be visible - /// during this execution but not stored in operators. - /// Use DeltaSet::empty() for no uncommitted changes. + /// * `pager` - Pager for btree access + /// * `context` - Execution context for tracking operator states + /// * `execute_state` - State machine containing input deltas and tracking execution progress pub fn execute( - &self, - input_data: HashMap, - uncommitted_data: DeltaSet, - ) -> Result { + &mut self, + pager: Rc, + execute_state: &mut ExecuteState, + ) -> Result> { if let Some(root_id) = self.root { - self.execute_node(root_id, &input_data, &uncommitted_data) + self.execute_node(root_id, pager, execute_state, false, None) } else { Err(LimboError::ParseError( "Circuit has no root node".to_string(), @@ -246,146 +469,243 @@ impl DbspCircuit { } } - /// Commit deltas to the circuit, updating internal operator state. + /// Commit deltas to the circuit, updating internal operator state and persisting to btree. /// This should be called after execute() when you want to make changes permanent. /// /// # Arguments /// * `input_data` - The deltas to commit (same as what was passed to execute) - pub fn commit(&mut self, input_data: HashMap) -> Result<()> { - if let Some(root_id) = self.root { - self.commit_node(root_id, &input_data)?; - } - Ok(()) - } - - /// Commit a specific node in the circuit - fn commit_node( + /// * `pager` - Pager for creating cursors to the btrees + pub fn commit( &mut self, - node_id: usize, - input_data: &HashMap, - ) -> Result { - // Clone to avoid borrow checker issues - let inputs = self - .nodes - .get(&node_id) - .ok_or_else(|| LimboError::ParseError("Node not found".to_string()))? - .inputs - .clone(); - - // Process inputs first - let mut input_deltas = Vec::new(); - for input_id in inputs { - let delta = self.commit_node(input_id, input_data)?; - input_deltas.push(delta); + input_data: HashMap, + pager: Rc, + ) -> Result> { + // No root means nothing to commit + if self.root.is_none() { + return Ok(IOResult::Done(Delta::new())); } - // Get mutable reference to node - let node = self - .nodes - .get_mut(&node_id) - .ok_or_else(|| LimboError::ParseError("Node not found".to_string()))?; + // Get btree root pages + let main_data_root = self.main_data_root; - // Commit based on operator type - let result = match &node.operator { - DbspOperator::Input { name, .. } => { - // For input nodes, just return the committed delta - input_data.get(name).cloned().unwrap_or_else(Delta::new) - } - DbspOperator::Filter { .. } - | DbspOperator::Projection { .. } - | DbspOperator::Aggregate { .. } => { - // Commit the delta to the executable operator - if let Some(ref mut op) = node.executable { - if !input_deltas.is_empty() { - let input_delta = input_deltas[0].clone(); - // Commit updates state and returns the output delta - op.commit(input_delta) + // Add 1 for the weight column that we store in the btree + let num_columns = self.output_schema.columns.len() + 1; + + // Convert input_data to DeltaSet once, outside the loop + let input_delta_set = DeltaSet::from_map(input_data); + + loop { + // Take ownership of the state for processing, to avoid borrow checker issues (we have + // to call run_circuit, which takes &mut self. Because of that, cannot use + // return_if_io. We have to use the version that restores the state before returning. + let mut state = std::mem::replace(&mut self.commit_state, CommitState::Init); + match &mut state { + CommitState::Init => { + // Create state cursor when entering CommitOperators state + let state_cursor = Box::new(BTreeCursor::new_table( + None, + pager.clone(), + self.internal_state_root, + OPERATOR_COLUMNS, + )); + + self.commit_state = CommitState::CommitOperators { + execute_state: Box::new(ExecuteState::Init { + input_data: input_delta_set.clone(), + }), + state_cursor, + }; + } + CommitState::CommitOperators { + ref mut execute_state, + ref mut state_cursor, + } => { + let delta = return_and_restore_if_io!( + &mut self.commit_state, + state, + self.run_circuit(pager.clone(), execute_state, true, state_cursor) + ); + + // Create view cursor when entering UpdateView state + let view_cursor = Box::new(BTreeCursor::new_table( + None, + pager.clone(), + main_data_root, + num_columns, + )); + + self.commit_state = CommitState::UpdateView { + delta, + current_index: 0, + write_row_state: WriteViewRow::new(), + view_cursor, + }; + } + CommitState::UpdateView { + delta, + current_index, + write_row_state, + view_cursor, + } => { + if *current_index >= delta.changes.len() { + self.commit_state = CommitState::Init; + let delta = std::mem::take(delta); + return Ok(IOResult::Done(delta)); } else { - Delta::new() - } - } else { - // If no executable, pass through the input - if !input_deltas.is_empty() { - input_deltas[0].clone() - } else { - Delta::new() + let (row, weight) = delta.changes[*current_index].clone(); + + // If we're starting a new row (Empty state), we need a fresh cursor + // due to btree cursor state machine limitations + if matches!(write_row_state, WriteViewRow::Empty) { + *view_cursor = Box::new(BTreeCursor::new_table( + None, + pager.clone(), + main_data_root, + num_columns, + )); + } + + return_and_restore_if_io!( + &mut self.commit_state, + state, + write_row_state.write_row(view_cursor, row, weight) + ); + + // Move to next row + let delta = std::mem::take(delta); + // Take ownership of view_cursor - we'll create a new one for next row if needed + let view_cursor = std::mem::replace( + view_cursor, + Box::new(BTreeCursor::new_table( + None, + pager.clone(), + main_data_root, + num_columns, + )), + ); + + self.commit_state = CommitState::UpdateView { + delta, + current_index: *current_index + 1, + write_row_state: WriteViewRow::new(), + view_cursor, + }; } } } - }; - Ok(result) + } } /// Execute a specific node in the circuit fn execute_node( - &self, + &mut self, node_id: usize, - input_data: &HashMap, - uncommitted_data: &DeltaSet, - ) -> Result { - // Clone to avoid borrow checker issues - let inputs = self - .nodes - .get(&node_id) - .ok_or_else(|| LimboError::ParseError("Node not found".to_string()))? - .inputs - .clone(); + pager: Rc, + execute_state: &mut ExecuteState, + commit_operators: bool, + state_cursor: Option<&mut Box>, + ) -> Result> { + loop { + match execute_state { + ExecuteState::Uninitialized => { + panic!("Trying to execute an uninitialized ExecuteState state machine"); + } + ExecuteState::Init { input_data } => { + let node = self + .nodes + .get(&node_id) + .ok_or_else(|| LimboError::ParseError("Node not found".to_string()))?; - // Process inputs first - let mut input_deltas = Vec::new(); - for input_id in inputs { - let delta = self.execute_node(input_id, input_data, uncommitted_data)?; - input_deltas.push(delta); + // Check if this is an Input node + match &node.operator { + DbspOperator::Input { name, .. } => { + // Input nodes get their delta directly from input_data + let delta = input_data.get(name); + *execute_state = ExecuteState::ProcessingNode { + eval_state: Box::new(EvalState::Init { delta }), + }; + } + _ => { + // Non-input nodes need to process their inputs + let input_data = std::mem::take(input_data); + let input_node_ids = node.inputs.clone(); + + let input_states: Vec<(usize, ExecuteState)> = input_node_ids + .iter() + .map(|&input_id| { + ( + input_id, + ExecuteState::Init { + input_data: input_data.clone(), + }, + ) + }) + .collect(); + + *execute_state = ExecuteState::ProcessingInputs { + input_states, + current_index: 0, + input_deltas: Vec::new(), + }; + } + } + } + ExecuteState::ProcessingInputs { + input_states, + current_index, + input_deltas, + } => { + if *current_index >= input_states.len() { + // All inputs processed, check we have exactly one delta + // (Input nodes never reach here since they go straight to ProcessingNode) + let delta = if input_deltas.is_empty() { + return Err(LimboError::InternalError( + "execute() cannot be called without a Delta".to_string(), + )); + } else if input_deltas.len() > 1 { + return Err(LimboError::InternalError( + format!("Until joins are supported, only one delta is expected. Got {} deltas", input_deltas.len()), + )); + } else { + input_deltas[0].clone() + }; + + *execute_state = ExecuteState::ProcessingNode { + eval_state: Box::new(EvalState::Init { delta }), + }; + } else { + // Get the (node_id, state) pair for the current index + let (input_node_id, input_state) = &mut input_states[*current_index]; + + let delta = return_if_io!(self.execute_node( + *input_node_id, + pager.clone(), + input_state, + commit_operators, + None // Input nodes don't need state cursor + )); + input_deltas.push(delta); + *current_index += 1; + } + } + ExecuteState::ProcessingNode { eval_state } => { + // Get mutable reference to node for eval + let node = self + .nodes + .get_mut(&node_id) + .ok_or_else(|| LimboError::ParseError("Node not found".to_string()))?; + + let output_delta = return_if_io!(node.process_node( + pager.clone(), + eval_state, + self.internal_state_root, + commit_operators, + state_cursor, + )); + return Ok(IOResult::Done(output_delta)); + } + } } - - // Get reference to node (read-only since we're using eval, not commit) - let node = self - .nodes - .get(&node_id) - .ok_or_else(|| LimboError::ParseError("Node not found".to_string()))?; - - // Execute based on operator type - let result = match &node.operator { - DbspOperator::Input { name, .. } => { - // Get committed data from input map and merge with uncommitted if present - let committed = input_data.get(name).cloned().unwrap_or_else(Delta::new); - let uncommitted = uncommitted_data.get(name); - - // If there's uncommitted data for this table, merge it with committed - if !uncommitted.is_empty() { - let mut combined = committed; - combined.merge(&uncommitted); - combined - } else { - committed - } - } - DbspOperator::Filter { .. } - | DbspOperator::Projection { .. } - | DbspOperator::Aggregate { .. } => { - // Process delta using the executable operator - if let Some(ref op) = node.executable { - if !input_deltas.is_empty() { - // Process the delta through the operator - let input_delta = input_deltas[0].clone(); - - // Use eval to compute result without modifying state - // The uncommitted data has already been merged into input_delta if needed - op.eval(input_delta, None) - } else { - Delta::new() - } - } else { - // If no executable, pass through the input - if !input_deltas.is_empty() { - input_deltas[0].clone() - } else { - Delta::new() - } - } - } - }; - Ok(result) } } @@ -440,16 +760,17 @@ pub struct DbspCompiler { impl DbspCompiler { /// Create a new DBSP compiler - pub fn new() -> Self { + pub fn new(main_data_root: usize, internal_state_root: usize) -> Self { Self { - circuit: DbspCircuit::new(), + circuit: DbspCircuit::new(main_data_root, internal_state_root), } } /// Compile a logical plan to a DBSP circuit pub fn compile(mut self, plan: &LogicalPlan) -> Result { let root_id = self.compile_plan(plan)?; - self.circuit.root = Some(root_id); + let output_schema = plan.schema().clone(); + self.circuit.set_root(root_id, output_schema); Ok(self.circuit) } @@ -486,10 +807,8 @@ impl DbspCompiler { .collect(); // Create the ProjectOperator - let executable: Option> = - ProjectOperator::from_compiled(compiled_exprs, aliases, input_column_names, output_column_names) - .ok() - .map(|op| Box::new(op) as Box); + let executable: Box = + Box::new(ProjectOperator::from_compiled(compiled_exprs, aliases, input_column_names, output_column_names)?); // Create projection node let node_id = self.circuit.add_node( @@ -526,7 +845,7 @@ impl DbspCompiler { let node_id = self.circuit.add_node( DbspOperator::Filter { predicate: dbsp_predicate }, vec![input_id], - Some(executable), + executable, ); Ok(node_id) } @@ -621,15 +940,16 @@ impl DbspCompiler { } } - // Create the AggregateOperator + // Create the AggregateOperator with a unique operator_id + // Use the next_node_id as the operator_id to ensure uniqueness + let operator_id = self.circuit.next_id; use crate::incremental::operator::AggregateOperator; - let executable: Option> = Some( - Box::new(AggregateOperator::new( - group_by_columns, - aggregate_functions.clone(), - input_column_names, - )) - ); + let executable: Box = Box::new(AggregateOperator::new( + operator_id, // Use next_node_id as operator_id + group_by_columns, + aggregate_functions.clone(), + input_column_names, + )); // Create aggregate node let node_id = self.circuit.add_node( @@ -644,14 +964,17 @@ impl DbspCompiler { Ok(node_id) } LogicalPlan::TableScan(scan) => { - // Create input node (no executable needed for input) + // Create input node with InputOperator for uniform handling + let executable: Box = + Box::new(InputOperator::new(scan.table_name.clone())); + let node_id = self.circuit.add_node( DbspOperator::Input { name: scan.table_name.clone(), schema: scan.schema.clone(), }, vec![], - None, + executable, ); Ok(node_id) } @@ -925,10 +1248,15 @@ impl DbspCompiler { #[cfg(test)] mod tests { use super::*; - use crate::incremental::operator::{Delta, FilterOperator, FilterPredicate}; + use crate::incremental::dbsp::Delta; + use crate::incremental::operator::{FilterOperator, FilterPredicate}; use crate::schema::{BTreeTable, Column as SchemaColumn, Schema, Type}; + use crate::storage::pager::CreateBTreeFlags; use crate::translate::logical::LogicalPlanBuilder; use crate::translate::logical::LogicalSchema; + use crate::util::IOExt; + use crate::{Database, MemoryIO, Pager, IO}; + use std::rc::Rc; use std::sync::Arc; use turso_parser::ast; use turso_parser::parser::Parser; @@ -981,17 +1309,76 @@ mod tests { ], has_rowid: true, is_strict: false, - has_autoincrement: false, + has_autoincrement:false, unique_sets: None, }; schema.add_btree_table(Arc::new(users_table)); + let sales_table = BTreeTable { + name: "sales".to_string(), + root_page: 2, + primary_key_columns: vec![], + columns: vec![ + SchemaColumn { + name: Some("product_id".to_string()), + ty: Type::Integer, + ty_str: "INTEGER".to_string(), + primary_key: false, + is_rowid_alias: false, + notnull: false, + default: None, + unique: false, + collation: None, + hidden: false, + }, + SchemaColumn { + name: Some("amount".to_string()), + ty: Type::Integer, + ty_str: "INTEGER".to_string(), + primary_key: false, + is_rowid_alias: false, + notnull: false, + default: None, + unique: false, + collation: None, + hidden: false, + }, + ], + has_rowid: true, + is_strict: false, + has_autoincrement:false, + unique_sets: None, + }; + schema.add_btree_table(Arc::new(sales_table)); + schema }}; } + fn setup_btree_for_circuit() -> (Rc, usize, usize) { + let io: Arc = Arc::new(MemoryIO::new()); + let db = Database::open_file(io.clone(), ":memory:", false, false).unwrap(); + let conn = db.connect().unwrap(); + let pager = conn.pager.borrow().clone(); + + let _ = pager.io.block(|| pager.allocate_page1()).unwrap(); + + let main_root_page = pager + .io + .block(|| pager.btree_create(&CreateBTreeFlags::new_table())) + .unwrap() as usize; + + let dbsp_state_page = pager + .io + .block(|| pager.btree_create(&CreateBTreeFlags::new_table())) + .unwrap() as usize; + + (pager, main_root_page, dbsp_state_page) + } + // Macro to compile SQL to DBSP circuit macro_rules! compile_sql { ($sql:expr) => {{ + let (pager, main_root_page, dbsp_state_page) = setup_btree_for_circuit(); let schema = test_schema!(); let mut parser = Parser::new($sql.as_bytes()); let cmd = parser @@ -1003,7 +1390,12 @@ mod tests { ast::Cmd::Stmt(stmt) => { let mut builder = LogicalPlanBuilder::new(&schema); let logical_plan = builder.build_statement(&stmt).unwrap(); - DbspCompiler::new().compile(&logical_plan).unwrap() + ( + DbspCompiler::new(main_root_page, dbsp_state_page) + .compile(&logical_plan) + .unwrap(), + pager, + ) } _ => panic!("Only SQL statements are supported"), } @@ -1109,40 +1501,72 @@ mod tests { circuit.nodes.get(¤t_id).expect("Node not found") } - // Helper to get the current accumulated state of the circuit (from the root operator) - // This returns the internal state including bookkeeping entries - fn get_current_state(circuit: &DbspCircuit) -> Result { - if let Some(root_id) = circuit.root { - let node = circuit - .nodes - .get(&root_id) - .ok_or_else(|| LimboError::ParseError("Root node not found".to_string()))?; - - if let Some(ref executable) = node.executable { - Ok(executable.get_current_state()) - } else { - // Input nodes don't have executables but also don't have state - Ok(Delta::new()) - } - } else { - Err(LimboError::ParseError( - "Circuit has no root node".to_string(), - )) + // Helper function for tests to execute circuit and extract the Delta result + #[cfg(test)] + fn test_execute( + circuit: &mut DbspCircuit, + inputs: HashMap, + pager: Rc, + ) -> Result { + let mut execute_state = ExecuteState::Init { + input_data: DeltaSet::from_map(inputs), + }; + match circuit.execute(pager, &mut execute_state)? { + IOResult::Done(delta) => Ok(delta), + IOResult::IO(_) => panic!("Unexpected I/O in test"), } } - // Helper to create a DeltaSet from a HashMap (for tests) - fn delta_set_from_map(map: HashMap) -> DeltaSet { - let mut delta_set = DeltaSet::new(); - for (key, value) in map { - delta_set.insert(key, value); + // Helper to get the committed BTree state from main_data_root + // This reads the actual persisted data from the BTree + #[cfg(test)] + fn get_current_state(pager: Rc, circuit: &DbspCircuit) -> Result { + let mut delta = Delta::new(); + + let main_data_root = circuit.main_data_root; + let num_columns = circuit.output_schema.columns.len() + 1; + + // Create a cursor to read the btree + let mut btree_cursor = + BTreeCursor::new_table(None, pager.clone(), main_data_root, num_columns); + + // Rewind to the beginning + pager.io.block(|| btree_cursor.rewind())?; + + // Read all rows from the BTree + loop { + // Check if cursor is empty (no more rows) + if btree_cursor.is_empty() { + break; + } + + // Get the rowid + let rowid = pager.io.block(|| btree_cursor.rowid()).unwrap().unwrap(); + + // Get the record at this position + let record = pager + .io + .block(|| btree_cursor.record()) + .unwrap() + .unwrap() + .to_owned(); + + let values_ref = record.get_values(); + let num_data_columns = values_ref.len() - 1; // Get length before consuming + let values: Vec = values_ref + .into_iter() + .take(num_data_columns) // Skip the weight column + .map(|x| x.to_owned()) + .collect(); + delta.insert(rowid, values); + pager.io.block(|| btree_cursor.next()).unwrap(); } - delta_set + Ok(delta) } #[test] fn test_simple_projection() { - let circuit = compile_sql!("SELECT name FROM users"); + let (circuit, _) = compile_sql!("SELECT name FROM users"); // Circuit has 2 nodes with Projection at root assert_circuit!(circuit, depth: 2, root: Projection); @@ -1154,7 +1578,7 @@ mod tests { #[test] fn test_filter_with_projection() { - let circuit = compile_sql!("SELECT name FROM users WHERE age > 18"); + let (circuit, _) = compile_sql!("SELECT name FROM users WHERE age > 18"); // Circuit has 3 nodes with Projection at root assert_circuit!(circuit, depth: 3, root: Projection); @@ -1168,7 +1592,7 @@ mod tests { #[test] fn test_select_star() { - let mut circuit = compile_sql!("SELECT * FROM users"); + let (mut circuit, pager) = compile_sql!("SELECT * FROM users"); // Create test data let mut input_delta = Delta::new(); @@ -1193,8 +1617,11 @@ mod tests { let mut inputs = HashMap::new(); inputs.insert("users".to_string(), input_delta); - // Initialize circuit with initial data - let result = circuit.initialize(inputs).unwrap(); + let result = test_execute(&mut circuit, inputs.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(inputs.clone(), pager.clone())) + .unwrap(); // Should have all rows with all columns assert_eq!(result.changes.len(), 2); @@ -1208,7 +1635,7 @@ mod tests { #[test] fn test_execute_filter() { - let mut circuit = compile_sql!("SELECT * FROM users WHERE age > 18"); + let (mut circuit, pager) = compile_sql!("SELECT * FROM users WHERE age > 18"); // Create test data let mut input_delta = Delta::new(); @@ -1241,8 +1668,11 @@ mod tests { let mut inputs = HashMap::new(); inputs.insert("users".to_string(), input_delta); - // Initialize circuit with initial data - let result = circuit.initialize(inputs).unwrap(); + let result = test_execute(&mut circuit, inputs.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(inputs.clone(), pager.clone())) + .unwrap(); // Should only have Alice and Charlie (age > 18) assert_eq!( @@ -1285,7 +1715,7 @@ mod tests { #[test] fn test_simple_column_projection() { - let mut circuit = compile_sql!("SELECT name, age FROM users"); + let (mut circuit, pager) = compile_sql!("SELECT name, age FROM users"); // Create test data let mut input_delta = Delta::new(); @@ -1310,8 +1740,11 @@ mod tests { let mut inputs = HashMap::new(); inputs.insert("users".to_string(), input_delta); - // Initialize circuit with initial data - let result = circuit.initialize(inputs).unwrap(); + let result = test_execute(&mut circuit, inputs.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(inputs.clone(), pager.clone())) + .unwrap(); // Should have all rows but only 2 columns (name, age) assert_eq!(result.changes.len(), 2); @@ -1328,7 +1761,7 @@ mod tests { #[test] fn test_simple_aggregation() { // Test COUNT(*) with GROUP BY - let mut circuit = compile_sql!("SELECT age, COUNT(*) FROM users GROUP BY age"); + let (mut circuit, pager) = compile_sql!("SELECT age, COUNT(*) FROM users GROUP BY age"); // Create test data let mut input_delta = Delta::new(); @@ -1361,8 +1794,11 @@ mod tests { let mut inputs = HashMap::new(); inputs.insert("users".to_string(), input_delta); - // Initialize circuit with initial data - let result = circuit.initialize(inputs).unwrap(); + let result = test_execute(&mut circuit, inputs.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(inputs.clone(), pager.clone())) + .unwrap(); // Should have 2 groups: age 25 with count 2, age 30 with count 1 assert_eq!(result.changes.len(), 2); @@ -1393,7 +1829,7 @@ mod tests { #[test] fn test_sum_aggregation() { // Test SUM with GROUP BY - let mut circuit = compile_sql!("SELECT name, SUM(age) FROM users GROUP BY name"); + let (mut circuit, pager) = compile_sql!("SELECT name, SUM(age) FROM users GROUP BY name"); // Create test data - some names appear multiple times let mut input_delta = Delta::new(); @@ -1426,8 +1862,11 @@ mod tests { let mut inputs = HashMap::new(); inputs.insert("users".to_string(), input_delta); - // Initialize circuit with initial data - let result = circuit.initialize(inputs).unwrap(); + let result = test_execute(&mut circuit, inputs.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(inputs.clone(), pager.clone())) + .unwrap(); // Should have 2 groups: Alice with sum 55, Bob with sum 20 assert_eq!(result.changes.len(), 2); @@ -1449,7 +1888,7 @@ mod tests { #[test] fn test_aggregation_without_group_by() { // Test aggregation without GROUP BY - should produce a single row - let mut circuit = compile_sql!("SELECT COUNT(*), SUM(age), AVG(age) FROM users"); + let (mut circuit, pager) = compile_sql!("SELECT COUNT(*), SUM(age), AVG(age) FROM users"); // Create test data let mut input_delta = Delta::new(); @@ -1482,8 +1921,11 @@ mod tests { let mut inputs = HashMap::new(); inputs.insert("users".to_string(), input_delta); - // Initialize circuit with initial data - let result = circuit.initialize(inputs).unwrap(); + let result = test_execute(&mut circuit, inputs.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(inputs.clone(), pager.clone())) + .unwrap(); // Should have exactly 1 row with all aggregates assert_eq!( @@ -1522,7 +1964,7 @@ mod tests { #[test] fn test_expression_projection_execution() { // Test that complex expressions work through VDBE compilation - let mut circuit = compile_sql!("SELECT hex(id) FROM users"); + let (mut circuit, pager) = compile_sql!("SELECT hex(id) FROM users"); // Create test data let mut input_delta = Delta::new(); @@ -1547,8 +1989,11 @@ mod tests { let mut inputs = HashMap::new(); inputs.insert("users".to_string(), input_delta); - // Initialize circuit with initial data - let result = circuit.initialize(inputs).unwrap(); + let result = test_execute(&mut circuit, inputs.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(inputs.clone(), pager.clone())) + .unwrap(); assert_eq!(result.changes.len(), 2); @@ -1587,7 +2032,7 @@ mod tests { fn test_projection_aggregation_projection_pattern() { // Test pattern: projection -> aggregation -> projection // Query: SELECT HEX(SUM(age + 2)) FROM users - let mut circuit = compile_sql!("SELECT HEX(SUM(age + 2)) FROM users"); + let (mut circuit, pager) = compile_sql!("SELECT HEX(SUM(age + 2)) FROM users"); // Initial input data let mut input_delta = Delta::new(); @@ -1619,8 +2064,11 @@ mod tests { let mut input_data = HashMap::new(); input_data.insert("users".to_string(), input_delta); - // Initialize the circuit with the initial data - let result = circuit.initialize(input_data).unwrap(); + let result = test_execute(&mut circuit, input_data.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(input_data.clone(), pager.clone())) + .unwrap(); // Expected: SUM(age + 2) = (25+2) + (30+2) + (35+2) = 27 + 32 + 37 = 96 // HEX(96) should be the hex representation of the string "96" = "3936" @@ -1650,7 +2098,7 @@ mod tests { let mut input_data = HashMap::new(); input_data.insert("users".to_string(), input_delta); - let result = circuit.execute(input_data, DeltaSet::empty()).unwrap(); + let result = test_execute(&mut circuit, input_data, pager.clone()).unwrap(); // Expected: new SUM(age + 2) = 96 + (40+2) = 138 // HEX(138) = hex of "138" = "313338" @@ -1675,7 +2123,8 @@ mod tests { fn test_nested_projection_with_groupby() { // Test pattern: projection -> aggregation with GROUP BY -> projection // Query: SELECT name, HEX(SUM(age * 2)) FROM users GROUP BY name - let mut circuit = compile_sql!("SELECT name, HEX(SUM(age * 2)) FROM users GROUP BY name"); + let (mut circuit, pager) = + compile_sql!("SELECT name, HEX(SUM(age * 2)) FROM users GROUP BY name"); // Initial input data let mut input_delta = Delta::new(); @@ -1707,8 +2156,11 @@ mod tests { let mut input_data = HashMap::new(); input_data.insert("users".to_string(), input_delta); - // Initialize circuit with initial data - let result = circuit.initialize(input_data).unwrap(); + let result = test_execute(&mut circuit, input_data.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(input_data.clone(), pager.clone())) + .unwrap(); // Expected results: // Alice: SUM(25*2 + 35*2) = 50 + 70 = 120, HEX("120") = "313230" @@ -1747,7 +2199,7 @@ mod tests { fn test_transaction_context() { // Test that uncommitted changes are visible within a transaction // but don't affect the operator's internal state - let mut circuit = compile_sql!("SELECT * FROM users WHERE age > 18"); + let (mut circuit, pager) = compile_sql!("SELECT * FROM users WHERE age > 18"); // Initialize with some data let mut init_data = HashMap::new(); @@ -1770,10 +2222,13 @@ mod tests { ); init_data.insert("users".to_string(), delta); - circuit.initialize(init_data).unwrap(); + let _ = test_execute(&mut circuit, init_data.clone(), pager.clone()).unwrap(); + let state = pager + .io + .block(|| circuit.commit(init_data.clone(), pager.clone())) + .unwrap(); - // Verify initial state: only Alice (age > 18) - let state = get_current_state(&circuit).unwrap(); + // Verify initial delta : only Alice (age > 18) assert_eq!(state.changes.len(), 1); assert_eq!(state.changes[0].0.values[1], Value::Text("Alice".into())); @@ -1802,9 +2257,7 @@ mod tests { // Execute with uncommitted data - this simulates processing the uncommitted changes // through the circuit to see what would be visible - let tx_result = circuit - .execute(HashMap::new(), delta_set_from_map(uncommitted.clone())) - .unwrap(); + let tx_result = test_execute(&mut circuit, uncommitted.clone(), pager.clone()).unwrap(); // The result should show Charlie being added (passes filter, age > 18) // David is filtered out (age 15 < 18) @@ -1827,9 +2280,7 @@ mod tests { ); commit_data.insert("users".to_string(), commit_delta); - let commit_result = circuit - .execute(commit_data.clone(), DeltaSet::empty()) - .unwrap(); + let commit_result = test_execute(&mut circuit, commit_data.clone(), pager.clone()).unwrap(); // The commit result should show Charlie being added assert_eq!(commit_result.changes.len(), 1, "Should see Charlie added"); @@ -1839,17 +2290,20 @@ mod tests { ); // Commit the change to make it permanent - circuit.commit(commit_data).unwrap(); + pager + .io + .block(|| circuit.commit(commit_data.clone(), pager.clone())) + .unwrap(); // Now if we execute again with no changes, we should see no delta - let empty_result = circuit.execute(HashMap::new(), DeltaSet::empty()).unwrap(); + let empty_result = test_execute(&mut circuit, HashMap::new(), pager.clone()).unwrap(); assert_eq!(empty_result.changes.len(), 0, "No changes when no new data"); } #[test] fn test_uncommitted_delete() { // Test that uncommitted deletes are handled correctly without affecting operator state - let mut circuit = compile_sql!("SELECT * FROM users WHERE age > 18"); + let (mut circuit, pager) = compile_sql!("SELECT * FROM users WHERE age > 18"); // Initialize with some data let mut init_data = HashMap::new(); @@ -1880,10 +2334,13 @@ mod tests { ); init_data.insert("users".to_string(), delta); - circuit.initialize(init_data).unwrap(); + let _ = test_execute(&mut circuit, init_data.clone(), pager.clone()).unwrap(); + let state = pager + .io + .block(|| circuit.commit(init_data.clone(), pager.clone())) + .unwrap(); - // Verify initial state: Alice, Bob, Charlie (all age > 18) - let state = get_current_state(&circuit).unwrap(); + // Verify initial delta: Alice, Bob, Charlie (all age > 18) assert_eq!(state.changes.len(), 3); // Create uncommitted delete for Bob @@ -1900,9 +2357,7 @@ mod tests { uncommitted.insert("users".to_string(), uncommitted_delta); // Execute with uncommitted delete - let tx_result = circuit - .execute(HashMap::new(), delta_set_from_map(uncommitted.clone())) - .unwrap(); + let tx_result = test_execute(&mut circuit, uncommitted.clone(), pager.clone()).unwrap(); // Result should show the deleted row that passed the filter assert_eq!( @@ -1912,7 +2367,7 @@ mod tests { ); // Verify operator's internal state is unchanged (still has all 3 users) - let state_after = get_current_state(&circuit).unwrap(); + let state_after = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!( state_after.changes.len(), 3, @@ -1932,12 +2387,13 @@ mod tests { ); commit_data.insert("users".to_string(), commit_delta); - let commit_result = circuit - .execute(commit_data.clone(), DeltaSet::empty()) - .unwrap(); + let commit_result = test_execute(&mut circuit, commit_data.clone(), pager.clone()).unwrap(); // Actually commit the delete to update operator state - circuit.commit(commit_data).unwrap(); + pager + .io + .block(|| circuit.commit(commit_data.clone(), pager.clone())) + .unwrap(); // The commit result should show Bob being deleted assert_eq!(commit_result.changes.len(), 1, "Should see Bob deleted"); @@ -1951,7 +2407,7 @@ mod tests { ); // After commit, internal state should have only Alice and Charlie - let final_state = get_current_state(&circuit).unwrap(); + let final_state = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!( final_state.changes.len(), 2, @@ -1977,7 +2433,7 @@ mod tests { #[test] fn test_uncommitted_update() { // Test that uncommitted updates (delete + insert) are handled correctly - let mut circuit = compile_sql!("SELECT * FROM users WHERE age > 18"); + let (mut circuit, pager) = compile_sql!("SELECT * FROM users WHERE age > 18"); // Initialize with some data let mut init_data = HashMap::new(); @@ -2000,7 +2456,11 @@ mod tests { ); // Bob is 17, filtered out init_data.insert("users".to_string(), delta); - circuit.initialize(init_data).unwrap(); + let _ = test_execute(&mut circuit, init_data.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(init_data.clone(), pager.clone())) + .unwrap(); // Create uncommitted update: Bob turns 19 (update from 17 to 19) // This is modeled as delete + insert @@ -2025,9 +2485,7 @@ mod tests { uncommitted.insert("users".to_string(), uncommitted_delta); // Execute with uncommitted update - let tx_result = circuit - .execute(HashMap::new(), delta_set_from_map(uncommitted.clone())) - .unwrap(); + let tx_result = test_execute(&mut circuit, uncommitted.clone(), pager.clone()).unwrap(); // Bob should now appear in the result (age 19 > 18) // Consolidate to see the final state @@ -2063,10 +2521,13 @@ mod tests { commit_data.insert("users".to_string(), commit_delta); // Commit the update - circuit.commit(commit_data).unwrap(); + pager + .io + .block(|| circuit.commit(commit_data.clone(), pager.clone())) + .unwrap(); // After committing, Bob should be in the view's state - let state = get_current_state(&circuit).unwrap(); + let state = get_current_state(pager.clone(), &circuit).unwrap(); let mut consolidated_state = state; consolidated_state.consolidate(); @@ -2095,7 +2556,7 @@ mod tests { #[test] fn test_uncommitted_filtered_delete() { // Test deleting a row that doesn't pass the filter - let mut circuit = compile_sql!("SELECT * FROM users WHERE age > 18"); + let (mut circuit, pager) = compile_sql!("SELECT * FROM users WHERE age > 18"); // Initialize with mixed data let mut init_data = HashMap::new(); @@ -2118,7 +2579,11 @@ mod tests { ); // Bob doesn't pass filter init_data.insert("users".to_string(), delta); - circuit.initialize(init_data).unwrap(); + let _ = test_execute(&mut circuit, init_data.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(init_data.clone(), pager.clone())) + .unwrap(); // Create uncommitted delete for Bob (who isn't in the view because age=15) let mut uncommitted = HashMap::new(); @@ -2134,9 +2599,7 @@ mod tests { uncommitted.insert("users".to_string(), uncommitted_delta); // Execute with uncommitted delete - should produce no output changes - let tx_result = circuit - .execute(HashMap::new(), delta_set_from_map(uncommitted)) - .unwrap(); + let tx_result = test_execute(&mut circuit, uncommitted, pager.clone()).unwrap(); // Bob wasn't in the view, so deleting him produces no output assert_eq!( @@ -2146,7 +2609,7 @@ mod tests { ); // The view state should still only have Alice - let state = get_current_state(&circuit).unwrap(); + let state = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!(state.changes.len(), 1, "View still has only Alice"); assert_eq!(state.changes[0].0.values[1], Value::Text("Alice".into())); } @@ -2154,7 +2617,7 @@ mod tests { #[test] fn test_uncommitted_mixed_operations() { // Test multiple uncommitted operations together - let mut circuit = compile_sql!("SELECT * FROM users WHERE age > 18"); + let (mut circuit, pager) = compile_sql!("SELECT * FROM users WHERE age > 18"); // Initialize with some data let mut init_data = HashMap::new(); @@ -2177,10 +2640,14 @@ mod tests { ); init_data.insert("users".to_string(), delta); - circuit.initialize(init_data).unwrap(); + let _ = test_execute(&mut circuit, init_data.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(init_data.clone(), pager.clone())) + .unwrap(); // Verify initial state - let state = get_current_state(&circuit).unwrap(); + let state = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!(state.changes.len(), 2); // Create uncommitted changes: @@ -2237,9 +2704,7 @@ mod tests { uncommitted.insert("users".to_string(), uncommitted_delta); // Execute with uncommitted changes - let tx_result = circuit - .execute(HashMap::new(), delta_set_from_map(uncommitted.clone())) - .unwrap(); + let tx_result = test_execute(&mut circuit, uncommitted.clone(), pager.clone()).unwrap(); // Result should show all changes: delete Alice, update Bob, insert Charlie and David assert_eq!( @@ -2249,7 +2714,7 @@ mod tests { ); // Verify operator's internal state is unchanged - let state_after = get_current_state(&circuit).unwrap(); + let state_after = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!(state_after.changes.len(), 2, "Still has Alice and Bob"); // Commit all changes @@ -2297,19 +2762,20 @@ mod tests { ); commit_data.insert("users".to_string(), commit_delta); - let commit_result = circuit - .execute(commit_data.clone(), DeltaSet::empty()) - .unwrap(); + let commit_result = test_execute(&mut circuit, commit_data.clone(), pager.clone()).unwrap(); // Should see: Alice deleted, Bob deleted, Bob inserted, Charlie inserted // (David filtered out) assert_eq!(commit_result.changes.len(), 4, "Should see 4 changes"); // Actually commit the changes to update operator state - circuit.commit(commit_data).unwrap(); + pager + .io + .block(|| circuit.commit(commit_data.clone(), pager.clone())) + .unwrap(); // After all commits, execute with no changes should return empty delta - let empty_result = circuit.execute(HashMap::new(), DeltaSet::empty()).unwrap(); + let empty_result = test_execute(&mut circuit, HashMap::new(), pager.clone()).unwrap(); assert_eq!(empty_result.changes.len(), 0, "No changes when no new data"); } @@ -2320,57 +2786,9 @@ mod tests { // and we need to see correct aggregation results within the transaction // Create a sales table schema for testing - let mut schema = Schema::new(false); - let sales_table = BTreeTable { - name: "sales".to_string(), - root_page: 2, - primary_key_columns: vec![], - columns: vec![ - SchemaColumn { - name: Some("product_id".to_string()), - ty: Type::Integer, - ty_str: "INTEGER".to_string(), - primary_key: false, - is_rowid_alias: false, - notnull: false, - default: None, - unique: false, - collation: None, - hidden: false, - }, - SchemaColumn { - name: Some("amount".to_string()), - ty: Type::Integer, - ty_str: "INTEGER".to_string(), - primary_key: false, - is_rowid_alias: false, - notnull: false, - default: None, - unique: false, - collation: None, - hidden: false, - }, - ], - has_rowid: true, - is_strict: false, - has_autoincrement: false, - unique_sets: None, - }; - schema.add_btree_table(Arc::new(sales_table)); + let _ = test_schema!(); - // Parse and compile the aggregation query - let sql = "SELECT product_id, SUM(amount) as total, COUNT(*) as cnt FROM sales GROUP BY product_id"; - let mut parser = Parser::new(sql.as_bytes()); - let cmd = parser.next().unwrap().unwrap(); - - let mut circuit = match cmd { - ast::Cmd::Stmt(stmt) => { - let mut builder = LogicalPlanBuilder::new(&schema); - let logical_plan = builder.build_statement(&stmt).unwrap(); - DbspCompiler::new().compile(&logical_plan).unwrap() - } - _ => panic!("Expected SQL statement"), - }; + let (mut circuit, pager) = compile_sql!("SELECT product_id, SUM(amount) as total, COUNT(*) as cnt FROM sales GROUP BY product_id"); // Initialize with base data: (1, 100), (1, 200), (2, 150), (2, 250) let mut init_data = HashMap::new(); @@ -2381,10 +2799,14 @@ mod tests { delta.insert(4, vec![Value::Integer(2), Value::Integer(250)]); init_data.insert("sales".to_string(), delta); - circuit.initialize(init_data).unwrap(); + let _ = test_execute(&mut circuit, init_data.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(init_data.clone(), pager.clone())) + .unwrap(); // Verify initial state: product 1 total=300, product 2 total=400 - let state = get_current_state(&circuit).unwrap(); + let state = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!(state.changes.len(), 2, "Should have 2 product groups"); // Build a map of product_id -> (total, count) @@ -2432,9 +2854,7 @@ mod tests { uncommitted.insert("sales".to_string(), uncommitted_delta); // Execute with uncommitted data - simulating a read within transaction - let tx_result = circuit - .execute(HashMap::new(), delta_set_from_map(uncommitted.clone())) - .unwrap(); + let tx_result = test_execute(&mut circuit, uncommitted.clone(), pager.clone()).unwrap(); // Result should show the aggregate changes from uncommitted data // Product 1: retraction of (300, 2) and insertion of (350, 3) @@ -2446,7 +2866,7 @@ mod tests { ); // IMPORTANT: Verify operator's internal state is unchanged - let state_after = get_current_state(&circuit).unwrap(); + let state_after = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!( state_after.changes.len(), 2, @@ -2500,9 +2920,7 @@ mod tests { commit_delta.insert(6, vec![Value::Integer(3), Value::Integer(300)]); commit_data.insert("sales".to_string(), commit_delta); - let commit_result = circuit - .execute(commit_data.clone(), DeltaSet::empty()) - .unwrap(); + let commit_result = test_execute(&mut circuit, commit_data.clone(), pager.clone()).unwrap(); // Should see changes for product 1 (updated) and product 3 (new) assert_eq!( @@ -2512,10 +2930,13 @@ mod tests { ); // Actually commit the changes to update operator state - circuit.commit(commit_data).unwrap(); + pager + .io + .block(|| circuit.commit(commit_data.clone(), pager.clone())) + .unwrap(); // After commit, verify final state - let final_state = get_current_state(&circuit).unwrap(); + let final_state = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!( final_state.changes.len(), 3, @@ -2568,7 +2989,7 @@ mod tests { // Test that uncommitted INSERTs are visible within the same transaction // This simulates: BEGIN; INSERT ...; SELECT * FROM view; COMMIT; - let mut circuit = compile_sql!("SELECT * FROM users WHERE age > 18"); + let (mut circuit, pager) = compile_sql!("SELECT * FROM users WHERE age > 18"); // Initialize with some data - need to match the schema (id, name, age) let mut init_data = HashMap::new(); @@ -2591,10 +3012,14 @@ mod tests { ); init_data.insert("users".to_string(), delta); - circuit.initialize(init_data.clone()).unwrap(); + let _ = test_execute(&mut circuit, init_data.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(init_data.clone(), pager.clone())) + .unwrap(); // Verify initial state - let state = get_current_state(&circuit).unwrap(); + let state = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!( state.len(), 2, @@ -2624,9 +3049,7 @@ mod tests { // Execute with uncommitted data - this should return the uncommitted changes // that passed through the filter (age > 18) - let tx_result = circuit - .execute(HashMap::new(), delta_set_from_map(uncommitted.clone())) - .unwrap(); + let tx_result = test_execute(&mut circuit, uncommitted.clone(), pager.clone()).unwrap(); // IMPORTANT: tx_result should contain the filtered uncommitted changes! // Both Charlie (35) and David (20) should pass the age > 18 filter @@ -2650,7 +3073,7 @@ mod tests { ); // CRITICAL: Verify the operator state wasn't modified by uncommitted execution - let state_after_uncommitted = get_current_state(&circuit).unwrap(); + let state_after_uncommitted = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!( state_after_uncommitted.len(), 2, @@ -2682,7 +3105,8 @@ mod tests { // Similar to test_uncommitted_aggregation but explicitly tests rollback semantics // Create a simple aggregation circuit - let mut circuit = compile_sql!("SELECT age, COUNT(*) as cnt FROM users GROUP BY age"); + let (mut circuit, pager) = + compile_sql!("SELECT age, COUNT(*) as cnt FROM users GROUP BY age"); // Initialize with some data let mut init_data = HashMap::new(); @@ -2721,10 +3145,14 @@ mod tests { ); init_data.insert("users".to_string(), delta); - circuit.initialize(init_data).unwrap(); + let _ = test_execute(&mut circuit, init_data.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(init_data.clone(), pager.clone())) + .unwrap(); // Verify initial state: age 25 count=2, age 30 count=2 - let state = get_current_state(&circuit).unwrap(); + let state = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!(state.changes.len(), 2); let initial_counts: HashMap = state @@ -2785,9 +3213,7 @@ mod tests { uncommitted.insert("users".to_string(), uncommitted_delta); // Execute with uncommitted changes - let tx_result = circuit - .execute(HashMap::new(), delta_set_from_map(uncommitted.clone())) - .unwrap(); + let tx_result = test_execute(&mut circuit, uncommitted.clone(), pager.clone()).unwrap(); // Should see the aggregate changes from uncommitted data // Age 25: retraction of count 1 and insertion of count 2 @@ -2798,7 +3224,7 @@ mod tests { ); // Verify internal state is unchanged (simulating rollback by not committing) - let state_after_rollback = get_current_state(&circuit).unwrap(); + let state_after_rollback = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!( state_after_rollback.changes.len(), 2, @@ -2838,8 +3264,10 @@ mod tests { #[test] fn test_circuit_rowid_update_consolidation() { + let (pager, p1, p2) = setup_btree_for_circuit(); + // Test that circuit properly consolidates state when rowid changes - let mut circuit = DbspCircuit::new(); + let mut circuit = DbspCircuit::new(p1, p2); // Create a simple filter node let schema = Arc::new(LogicalSchema::new(vec![ @@ -2847,14 +3275,14 @@ mod tests { ("value".to_string(), Type::Integer), ])); - // First create an input node + // First create an input node with InputOperator let input_id = circuit.add_node( DbspOperator::Input { name: "test".to_string(), schema: schema.clone(), }, vec![], - None, // Input nodes don't have executables + Box::new(InputOperator::new("test".to_string())), ); let filter_op = FilterOperator::new( @@ -2875,10 +3303,10 @@ mod tests { let filter_id = circuit.add_node( DbspOperator::Filter { predicate }, vec![input_id], // Filter takes input from the input node - Some(Box::new(filter_op)), + Box::new(filter_op), ); - circuit.root = Some(filter_id); + circuit.set_root(filter_id, schema.clone()); // Initialize with a row let mut init_data = HashMap::new(); @@ -2886,10 +3314,14 @@ mod tests { delta.insert(5, vec![Value::Integer(5), Value::Integer(20)]); init_data.insert("test".to_string(), delta); - circuit.initialize(init_data).unwrap(); + let _ = test_execute(&mut circuit, init_data.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(init_data.clone(), pager.clone())) + .unwrap(); // Verify initial state - let state = get_current_state(&circuit).unwrap(); + let state = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!(state.changes.len(), 1); assert_eq!(state.changes[0].0.rowid, 5); @@ -2900,15 +3332,16 @@ mod tests { update_delta.insert(3, vec![Value::Integer(3), Value::Integer(20)]); update_data.insert("test".to_string(), update_delta); - circuit - .execute(update_data.clone(), DeltaSet::empty()) - .unwrap(); + test_execute(&mut circuit, update_data.clone(), pager.clone()).unwrap(); // Commit the changes to update operator state - circuit.commit(update_data).unwrap(); + pager + .io + .block(|| circuit.commit(update_data.clone(), pager.clone())) + .unwrap(); // The circuit should consolidate the state properly - let final_state = get_current_state(&circuit).unwrap(); + let final_state = get_current_state(pager.clone(), &circuit).unwrap(); assert_eq!( final_state.changes.len(), 1, @@ -2921,4 +3354,65 @@ mod tests { ); assert_eq!(final_state.changes[0].1, 1); } -} + + #[test] + fn test_circuit_respects_multiplicities() { + let (mut circuit, pager) = compile_sql!("SELECT * from users"); + + // Insert same row twice (multiplicity 2) + let mut delta = Delta::new(); + delta.insert( + 1, + vec![ + Value::Integer(1), + Value::Text("Alice".into()), + Value::Integer(25), + ], + ); + delta.insert( + 1, + vec![ + Value::Integer(1), + Value::Text("Alice".into()), + Value::Integer(25), + ], + ); + + let mut inputs = HashMap::new(); + inputs.insert("users".to_string(), delta); + test_execute(&mut circuit, inputs.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(inputs.clone(), pager.clone())) + .unwrap(); + + // Delete once (should leave multiplicity 1) + let mut delete_one = Delta::new(); + delete_one.delete( + 1, + vec![ + Value::Integer(1), + Value::Text("Alice".into()), + Value::Integer(25), + ], + ); + + let mut inputs = HashMap::new(); + inputs.insert("users".to_string(), delete_one); + test_execute(&mut circuit, inputs.clone(), pager.clone()).unwrap(); + pager + .io + .block(|| circuit.commit(inputs.clone(), pager.clone())) + .unwrap(); + + // With proper DBSP: row still exists (weight 2 - 1 = 1) + let state = get_current_state(pager.clone(), &circuit).unwrap(); + let mut consolidated = state; + consolidated.consolidate(); + assert_eq!( + consolidated.len(), + 1, + "Row should still exist with multiplicity 1" + ); + } +} \ No newline at end of file diff --git a/core/incremental/cursor.rs b/core/incremental/cursor.rs new file mode 100644 index 000000000..1e0e91af8 --- /dev/null +++ b/core/incremental/cursor.rs @@ -0,0 +1,1618 @@ +use crate::{ + incremental::{ + compiler::{DeltaSet, ExecuteState}, + dbsp::{Delta, RowKeyZSet}, + hashable_row::HashableRow, + view::{IncrementalView, ViewTransactionState}, + }, + return_if_io, + storage::btree::BTreeCursor, + types::{IOResult, SeekKey, SeekOp, SeekResult, Value}, + LimboError, Pager, Result, +}; +use std::rc::Rc; +use std::sync::{Arc, Mutex}; + +/// State machine for seek operations +#[derive(Debug)] +enum SeekState { + /// Initial state before seeking + Init, + + /// Actively seeking with btree and uncommitted iterators + Seek { + /// The row we are trying to find + target: i64, + }, + + /// Seek completed successfully + Done, +} + +/// Cursor for reading materialized views that combines: +/// 1. Persistent btree data (committed state) +/// 2. Transaction-specific DBSP deltas (uncommitted changes) +/// +/// Works like a regular table cursor - reads from disk on-demand +/// and overlays transaction changes as needed. +pub struct MaterializedViewCursor { + // Core components + btree_cursor: Box, + view: Arc>, + pager: Rc, + + // Current changes that are uncommitted + uncommitted: RowKeyZSet, + + // Reference to shared transaction state for this specific view - shared with Connection + tx_state: Rc, + + // The transaction state always grows. It never gets reduced. That is in the very nature of + // DBSP, because deletions are just appends with weight < 0. So we will use the length of the + // state to check if we have to recompute the transaction state + last_tx_state_len: usize, + + // Current row cache - only cache the current row we're looking at + current_row: Option<(i64, Vec)>, + + // Execution state for circuit processing + execute_state: ExecuteState, + + // State machine for seek operations + seek_state: SeekState, +} + +impl MaterializedViewCursor { + pub fn new( + btree_cursor: Box, + view: Arc>, + pager: Rc, + tx_state: Rc, + ) -> Result { + Ok(Self { + btree_cursor, + view, + pager, + uncommitted: RowKeyZSet::new(), + tx_state, + last_tx_state_len: 0, + current_row: None, + execute_state: ExecuteState::Uninitialized, + seek_state: SeekState::Init, + }) + } + + /// Compute transaction changes lazily on first access + fn ensure_tx_changes_computed(&mut self) -> Result> { + // Check if we've already processed the current state + let current_len = self.tx_state.len(); + if current_len == self.last_tx_state_len { + return Ok(IOResult::Done(())); + } + + // Get the view and the current transaction state + let mut view_guard = self.view.lock().unwrap(); + let tx_delta = self.tx_state.get_delta(); + + // Process the delta through the circuit to get materialized changes + let mut uncommitted = DeltaSet::new(); + uncommitted.insert(view_guard.base_table().name.clone(), tx_delta); + + let processed_delta = return_if_io!(view_guard.execute_with_uncommitted( + uncommitted, + self.pager.clone(), + &mut self.execute_state + )); + + self.uncommitted = RowKeyZSet::from_delta(&processed_delta); + self.last_tx_state_len = current_len; + Ok(IOResult::Done(())) + } + + // Read the current btree entry as a vector (empty if no current position) + fn read_btree_delta_entry(&mut self) -> Result>> { + let btree_rowid = return_if_io!(self.btree_cursor.rowid()); + let rowid = match btree_rowid { + None => return Ok(IOResult::Done(Vec::new())), + Some(rowid) => rowid, + }; + + let btree_record = return_if_io!(self.btree_cursor.record()); + let btree_ref_values = btree_record + .ok_or_else(|| { + crate::LimboError::InternalError( + "Invalid data in materialized view: found a rowid, but not the row!" + .to_string(), + ) + })? + .get_values(); + + // Convert RefValues to Values (copying for now - can optimize later) + let mut btree_values: Vec = + btree_ref_values.iter().map(|rv| rv.to_owned()).collect(); + + // The last column should be the weight + let weight_value = btree_values.pop().ok_or_else(|| { + crate::LimboError::InternalError( + "Invalid data in materialized view: no weight column found".to_string(), + ) + })?; + + // Convert the Value to isize weight + let weight = match weight_value { + Value::Integer(w) => w as isize, + _ => { + return Err(crate::LimboError::InternalError(format!( + "Invalid data in materialized view: expected integer weight, found {weight_value:?}" + ))) + } + }; + + if !(-1..=1).contains(&weight) { + return Err(crate::LimboError::InternalError(format!( + "Invalid data in materialized view: expected weight -1, 0, or 1, found {weight}" + ))); + } + + Ok(IOResult::Done(vec![( + HashableRow::new(rowid, btree_values), + weight, + )])) + } + + /// Internal seek implementation that doesn't check preconditions + fn do_seek(&mut self, target_rowid: i64, op: SeekOp) -> Result> { + loop { + // Process state machine - need to handle mutable borrow carefully + match &mut self.seek_state { + SeekState::Init => { + self.current_row = None; + self.seek_state = SeekState::Seek { + target: target_rowid, + }; + } + SeekState::Seek { target } => { + let target = *target; + let btree_result = + return_if_io!(self.btree_cursor.seek(SeekKey::TableRowId(target), op)); + + let changes = if btree_result == SeekResult::Found { + return_if_io!(self.read_btree_delta_entry()) + } else { + Vec::new() + }; + + let mut btree_entries = Delta { changes }; + let changes = self.uncommitted.seek(target, op); + + let uncommitted_entries = Delta { changes }; + btree_entries.merge(&uncommitted_entries); + + // if empty pre-zset, means nothing was found. Empty post-zset can mean that + // we just canceled weights. + if btree_entries.is_empty() { + self.seek_state = SeekState::Done; + return Ok(IOResult::Done(SeekResult::NotFound)); + } + + let min_seen = btree_entries + .changes + .first() + .expect("cannot be empty, we just tested for it") + .0 + .rowid; + let max_seen = btree_entries + .changes + .last() + .expect("cannot be empty, we just tested for it") + .0 + .rowid; + + let zset = RowKeyZSet::from_delta(&btree_entries); + let ret = zset.seek(target_rowid, op); + + if !ret.is_empty() { + let (row, _) = &ret[0]; + self.current_row = Some((row.rowid, row.values.clone())); + self.seek_state = SeekState::Done; + return Ok(IOResult::Done(SeekResult::Found)); + } + + let new_target = match op { + SeekOp::GT => Some(max_seen), + SeekOp::GE { eq_only: false } => Some(max_seen + 1), + SeekOp::LT => Some(min_seen), + SeekOp::LE { eq_only: false } => Some(min_seen - 1), + SeekOp::LE { eq_only: true } | SeekOp::GE { eq_only: true } => None, + }; + + if let Some(target) = new_target { + self.seek_state = SeekState::Seek { target }; + } else { + self.seek_state = SeekState::Done; + return Ok(IOResult::Done(SeekResult::NotFound)); + } + } + SeekState::Done => { + // We always return before setting the state to done. Meaning if we got here, + // this is a new seek. + self.seek_state = SeekState::Init; + } + } + } + } + + pub fn seek(&mut self, key: SeekKey, op: SeekOp) -> Result> { + // Ensure transaction changes are computed + return_if_io!(self.ensure_tx_changes_computed()); + + let target_rowid = match &key { + SeekKey::TableRowId(rowid) => *rowid, + SeekKey::IndexKey(_) => { + return Err(LimboError::ParseError( + "Cannot search a materialized view with an index key".to_string(), + )); + } + }; + + self.do_seek(target_rowid, op) + } + + pub fn next(&mut self) -> Result> { + // If cursor is not positioned (no current_row), return false + // This matches BTreeCursor behavior when valid_state == Invalid + let Some((current_rowid, _)) = &self.current_row else { + return Ok(IOResult::Done(false)); + }; + + // Use GT to find the next row after current position + let result = return_if_io!(self.do_seek(*current_rowid, SeekOp::GT)); + Ok(IOResult::Done(result == SeekResult::Found)) + } + + pub fn column(&mut self, col: usize) -> Result> { + if let Some((_, ref values)) = self.current_row { + Ok(IOResult::Done( + values.get(col).cloned().unwrap_or(Value::Null), + )) + } else { + Ok(IOResult::Done(Value::Null)) + } + } + + pub fn rowid(&self) -> Result>> { + Ok(IOResult::Done(self.current_row.as_ref().map(|(id, _)| *id))) + } + + pub fn rewind(&mut self) -> Result> { + return_if_io!(self.ensure_tx_changes_computed()); + // Seek GT from i64::MIN to find the first row using internal do_seek + let _result = return_if_io!(self.do_seek(i64::MIN, SeekOp::GT)); + Ok(IOResult::Done(())) + } + + pub fn is_valid(&self) -> Result { + Ok(self.current_row.is_some()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::util::IOExt; + use crate::{Connection, Database, OpenFlags}; + use std::rc::Rc; + use std::sync::Arc; + + /// Helper to create a test connection with a table and materialized view + fn create_test_connection() -> Result> { + // Create an in-memory database with experimental views enabled + let io = Arc::new(crate::io::MemoryIO::new()); + let db = Database::open_file_with_flags( + io, + ":memory:", + OpenFlags::default(), + crate::DatabaseOpts { + enable_mvcc: false, + enable_indexes: false, + enable_views: true, + enable_strict: false, + }, + )?; + let conn = db.connect()?; + + // Create a test table + conn.execute("CREATE TABLE test_table (id INTEGER PRIMARY KEY, value INTEGER)")?; + + // Create materialized view + conn.execute("CREATE MATERIALIZED VIEW test_view AS SELECT id, value FROM test_table")?; + + Ok(conn) + } + + /// Helper to create a test cursor for the materialized view + fn create_test_cursor( + conn: &Arc, + ) -> Result<(MaterializedViewCursor, Rc, Rc)> { + // Get the schema and view + let view_mutex = conn + .schema + .borrow() + .get_materialized_view("test_view") + .ok_or(crate::LimboError::InternalError( + "View not found".to_string(), + ))?; + + // Get the view's root page + let view = view_mutex.lock().unwrap(); + let root_page = view.get_root_page(); + if root_page == 0 { + return Err(crate::LimboError::InternalError( + "View not materialized".to_string(), + )); + } + let num_columns = view.columns.len(); + drop(view); + + // Create a btree cursor + let pager = conn.get_pager(); + let btree_cursor = Box::new(BTreeCursor::new( + None, // No MvCursor + pager.clone(), + root_page, + num_columns, + )); + + // Get or create transaction state for this view + let tx_state = conn.view_transaction_states.get_or_create("test_view"); + + // Create the materialized view cursor + let cursor = MaterializedViewCursor::new( + btree_cursor, + view_mutex.clone(), + pager.clone(), + tx_state.clone(), + )?; + + Ok((cursor, tx_state, pager)) + } + + /// Helper to populate test table with data through SQL + fn populate_test_table(conn: &Arc, rows: Vec<(i64, i64)>) -> Result<()> { + for (id, value) in rows { + let sql = format!("INSERT INTO test_table (id, value) VALUES ({id}, {value})"); + conn.execute(&sql)?; + } + Ok(()) + } + + /// Helper to apply changes through ViewTransactionState + fn apply_changes_to_tx_state( + tx_state: &ViewTransactionState, + changes: Vec<(i64, Vec, isize)>, + ) { + for (rowid, values, weight) in changes { + if weight > 0 { + tx_state.insert(rowid, values); + } else if weight < 0 { + tx_state.delete(rowid, values); + } + } + } + + #[test] + fn test_seek_key_exists_in_btree() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with test data: rows 1, 3, 5, 7 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50), (7, 70)])?; + + // Create cursor for testing + let (mut cursor, _tx_state, pager) = create_test_cursor(&conn)?; + + // No uncommitted changes - tx_state is already empty + + // Test 1: Seek exact match (row 3) + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(3), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + // Test 2: Seek GE (row 4 should find row 5) + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(4), SeekOp::GE { eq_only: false }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + // Test 3: Seek GT (row 3 should find row 5) + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(3), SeekOp::GT))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + // Test 4: Seek LE (row 4 should find row 3) + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(4), SeekOp::LE { eq_only: false }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + // Test 5: Seek LT (row 5 should find row 3) + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(5), SeekOp::LT))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + Ok(()) + } + + #[test] + fn test_seek_key_exists_only_uncommitted() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 5, 7 + populate_test_table(&conn, vec![(1, 10), (5, 50), (7, 70)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted changes: insert rows 3 and 6 + apply_changes_to_tx_state( + &tx_state, + vec![ + (3, vec![Value::Integer(3), Value::Integer(30)], 1), // Insert row 3 + (6, vec![Value::Integer(6), Value::Integer(60)], 1), // Insert row 6 + ], + ); + + // Test 1: Seek exact match for uncommitted row 3 + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(3), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(30)); + + // Test 2: Seek GE for row 2 should find uncommitted row 3 + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(2), SeekOp::GE { eq_only: false }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + // Test 3: Seek GT for row 5 should find uncommitted row 6 + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(5), SeekOp::GT))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(6)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(60)); + + // Test 4: Seek LE for row 6 should find uncommitted row 6 + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(6), SeekOp::LE { eq_only: false }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(6)); + + Ok(()) + } + + #[test] + fn test_seek_key_deleted_by_uncommitted() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3, 5, 7 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50), (7, 70)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Delete row 3 and 5 in uncommitted changes + apply_changes_to_tx_state( + &tx_state, + vec![ + (3, vec![Value::Integer(3), Value::Integer(30)], -1), // Delete row 3 + (5, vec![Value::Integer(5), Value::Integer(50)], -1), // Delete row 5 + ], + ); + + // Test 1: Seek exact match for deleted row 3 should not find it + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(3), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::NotFound); + + // Test 2: Seek GE for row 2 should skip deleted row 3 and find row 7 + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(2), SeekOp::GE { eq_only: false }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(7)); + + // Test 3: Seek GT for row 1 should skip deleted rows and find row 7 + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(1), SeekOp::GT))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(7)); + + // Test 4: Seek LE for row 5 should find row 1 (skipping deleted 3 and 5) + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(5), SeekOp::LE { eq_only: false }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + Ok(()) + } + + #[test] + fn test_seek_with_updates() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3, 5 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Update row 3 (delete old + insert new) + apply_changes_to_tx_state( + &tx_state, + vec![ + (3, vec![Value::Integer(3), Value::Integer(30)], -1), // Delete old row 3 + (3, vec![Value::Integer(3), Value::Integer(35)], 1), // Insert new row 3 + ], + ); + + // Test: Seek for updated row 3 should find it + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(3), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + // The values should be from the uncommitted set (35 instead of 30) + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(35)); + + Ok(()) + } + + #[test] + fn test_seek_boundary_conditions() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 5, 10 + populate_test_table(&conn, vec![(5, 50), (10, 100)])?; + + // Create cursor for testing + let (mut cursor, _tx_state, pager) = create_test_cursor(&conn)?; + + // No uncommitted changes - tx_state is already empty + + // Test 1: Seek LT for minimum value (should find nothing) + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(1), SeekOp::LT))?; + assert_eq!(result, SeekResult::NotFound); + + // Test 2: Seek GT for maximum value (should find nothing) + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(15), SeekOp::GT))?; + assert_eq!(result, SeekResult::NotFound); + + // Test 3: Seek exact for non-existent key + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(7), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::NotFound); + + Ok(()) + } + + #[test] + fn test_seek_complex_uncommitted_weights() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with row 5 + populate_test_table(&conn, vec![(5, 50)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Complex uncommitted changes with multiple operations on same row + apply_changes_to_tx_state( + &tx_state, + vec![ + (5, vec![Value::Integer(5), Value::Integer(50)], -1), // Delete original + (5, vec![Value::Integer(5), Value::Integer(51)], 1), // Insert update 1 + (5, vec![Value::Integer(5), Value::Integer(51)], -1), // Delete update 1 + (5, vec![Value::Integer(5), Value::Integer(52)], 1), // Insert update 2 + // Net effect: row 5 exists with value 52 + ], + ); + + // Seek for row 5 should find it (net weight = 1 from btree + 0 from uncommitted = 1) + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(5), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + // The final value should be 52 from the last update + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(52)); + + Ok(()) + } + + #[test] + fn test_seek_affected_by_transaction_state_changes() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1 and 3 + populate_test_table(&conn, vec![(1, 10), (3, 30)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Seek for row 2 - doesn't exist + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(2), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::NotFound); + + // Add row 2 to uncommitted + tx_state.insert(2, vec![Value::Integer(2), Value::Integer(20)]); + + // Now seek for row 2 finds it + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(2), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(2)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(20)); + + Ok(()) + } + + #[test] + fn test_rewind_btree_first_uncommitted_later() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3, 5 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted rows 8, 10 (all larger than btree rows) + apply_changes_to_tx_state( + &tx_state, + vec![ + (8, vec![Value::Integer(8), Value::Integer(80)], 1), + (10, vec![Value::Integer(10), Value::Integer(100)], 1), + ], + ); + + // Initially cursor is not positioned + assert!(!cursor.is_valid()?); + + // Rewind should position at first btree row (1) since uncommitted are all larger + pager.io.block(|| cursor.rewind())?; + assert!(cursor.is_valid()?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + Ok(()) + } + + #[test] + fn test_rewind_with_uncommitted_first() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 5, 7 + populate_test_table(&conn, vec![(5, 50), (7, 70)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted row 2 (smaller than any btree row) + apply_changes_to_tx_state( + &tx_state, + vec![(2, vec![Value::Integer(2), Value::Integer(20)], 1)], + ); + + // Rewind should position at row 2 (uncommitted) + pager.io.block(|| cursor.rewind())?; + assert!(cursor.is_valid()?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(2)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(20)); + + Ok(()) + } + + #[test] + fn test_rewind_skip_deleted_first() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3, 5 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Delete row 1 in uncommitted + apply_changes_to_tx_state( + &tx_state, + vec![(1, vec![Value::Integer(1), Value::Integer(10)], -1)], + ); + + // Rewind should skip deleted row 1 and position at row 3 + pager.io.block(|| cursor.rewind())?; + assert!(cursor.is_valid()?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + Ok(()) + } + + #[test] + fn test_rewind_empty_btree_with_uncommitted() -> Result<()> { + let conn = create_test_connection()?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted rows (no btree data) + apply_changes_to_tx_state( + &tx_state, + vec![ + (3, vec![Value::Integer(3), Value::Integer(30)], 1), + (7, vec![Value::Integer(7), Value::Integer(70)], 1), + ], + ); + + // Rewind should find first uncommitted row + pager.io.block(|| cursor.rewind())?; + assert!(cursor.is_valid()?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(30)); + + Ok(()) + } + + #[test] + fn test_rewind_all_deleted() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 2, 4 + populate_test_table(&conn, vec![(2, 20), (4, 40)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Delete all rows in uncommitted + apply_changes_to_tx_state( + &tx_state, + vec![ + (2, vec![Value::Integer(2), Value::Integer(20)], -1), + (4, vec![Value::Integer(4), Value::Integer(40)], -1), + ], + ); + + // Rewind should find no valid rows + pager.io.block(|| cursor.rewind())?; + assert!(!cursor.is_valid()?); + assert_eq!(pager.io.block(|| cursor.rowid())?, None); + + Ok(()) + } + + #[test] + fn test_rewind_with_updates() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3 + populate_test_table(&conn, vec![(1, 10), (3, 30)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Update row 1 (delete + insert with new value) + apply_changes_to_tx_state( + &tx_state, + vec![ + (1, vec![Value::Integer(1), Value::Integer(10)], -1), + (1, vec![Value::Integer(1), Value::Integer(15)], 1), + ], + ); + + // Rewind should position at row 1 with updated value + pager.io.block(|| cursor.rewind())?; + assert!(cursor.is_valid()?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(15)); + + Ok(()) + } + + // ===== NEXT() TEST SUITE ===== + + #[test] + fn test_next_btree_only_sequential() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3, 5, 7 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50), (7, 70)])?; + + // Create cursor for testing + let (mut cursor, _tx_state, pager) = create_test_cursor(&conn)?; + + // Start with rewind to position at first row + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + // Next should move to row 3 + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + // Next should move to row 5 + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + // Next should move to row 7 + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(7)); + + // Next should reach end + assert!(!pager.io.block(|| cursor.next())?); + assert!(!cursor.is_valid()?); + + Ok(()) + } + + #[test] + fn test_next_uncommitted_only() -> Result<()> { + let conn = create_test_connection()?; + + // Create cursor for testing (no btree data) + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted rows 2, 4, 6 + apply_changes_to_tx_state( + &tx_state, + vec![ + (2, vec![Value::Integer(2), Value::Integer(20)], 1), + (4, vec![Value::Integer(4), Value::Integer(40)], 1), + (6, vec![Value::Integer(6), Value::Integer(60)], 1), + ], + ); + + // Start with rewind to position at first row + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(2)); + + // Next should move to row 4 + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(4)); + + // Next should move to row 6 + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(6)); + + // Next should reach end + assert!(!pager.io.block(|| cursor.next())?); + assert!(!cursor.is_valid()?); + + Ok(()) + } + + #[test] + fn test_next_mixed_btree_uncommitted() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 5, 9 + populate_test_table(&conn, vec![(1, 10), (5, 50), (9, 90)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted rows 3, 7 + apply_changes_to_tx_state( + &tx_state, + vec![ + (3, vec![Value::Integer(3), Value::Integer(30)], 1), + (7, vec![Value::Integer(7), Value::Integer(70)], 1), + ], + ); + + // Should iterate in order: 1, 3, 5, 7, 9 + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(7)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(9)); + + assert!(!pager.io.block(|| cursor.next())?); + assert!(!cursor.is_valid()?); + + Ok(()) + } + + #[test] + fn test_next_skip_deleted_rows() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 2, 3, 4, 5 + populate_test_table(&conn, vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Delete rows 2 and 4 in uncommitted + apply_changes_to_tx_state( + &tx_state, + vec![ + (2, vec![Value::Integer(2), Value::Integer(20)], -1), + (4, vec![Value::Integer(4), Value::Integer(40)], -1), + ], + ); + + // Should iterate: 1, 3, 5 (skipping deleted 2 and 4) + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + assert!(!pager.io.block(|| cursor.next())?); + assert!(!cursor.is_valid()?); + + Ok(()) + } + + #[test] + fn test_next_with_updates() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3, 5 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Update row 3 (delete old + insert new) + apply_changes_to_tx_state( + &tx_state, + vec![ + (3, vec![Value::Integer(3), Value::Integer(30)], -1), + (3, vec![Value::Integer(3), Value::Integer(35)], 1), + ], + ); + + // Should iterate all rows with updated values + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(35)); // Updated value + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + assert!(!pager.io.block(|| cursor.next())?); + + Ok(()) + } + + #[test] + fn test_next_from_uninitialized() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 2, 4 + populate_test_table(&conn, vec![(2, 20), (4, 40)])?; + + // Create cursor for testing + let (mut cursor, _tx_state, pager) = create_test_cursor(&conn)?; + + // Cursor not positioned initially + assert!(!cursor.is_valid()?); + + // Next on uninitialized cursor should return false (matching BTreeCursor behavior) + assert!(!pager.io.block(|| cursor.next())?); + assert!(!cursor.is_valid()?); + + // Position cursor with rewind first + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(2)); + + // Now next should work + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(4)); + + assert!(!pager.io.block(|| cursor.next())?); + + Ok(()) + } + + #[test] + fn test_next_empty_table() -> Result<()> { + let conn = create_test_connection()?; + + // Create cursor for testing (empty table) + let (mut cursor, _tx_state, pager) = create_test_cursor(&conn)?; + + // Next on empty table should return false + assert!(!pager.io.block(|| cursor.next())?); + assert!(!cursor.is_valid()?); + + Ok(()) + } + + #[test] + fn test_next_all_deleted() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 2, 3 + populate_test_table(&conn, vec![(1, 10), (2, 20), (3, 30)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Delete all rows + apply_changes_to_tx_state( + &tx_state, + vec![ + (1, vec![Value::Integer(1), Value::Integer(10)], -1), + (2, vec![Value::Integer(2), Value::Integer(20)], -1), + (3, vec![Value::Integer(3), Value::Integer(30)], -1), + ], + ); + + // Next should find nothing + assert!(!pager.io.block(|| cursor.next())?); + assert!(!cursor.is_valid()?); + + Ok(()) + } + + #[test] + fn test_next_complex_interleaving() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 2, 4, 6, 8 + populate_test_table(&conn, vec![(2, 20), (4, 40), (6, 60), (8, 80)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Complex changes: + // - Insert row 1 + // - Delete row 2 + // - Insert row 3 + // - Update row 4 + // - Insert row 5 + // - Delete row 6 + // - Insert row 7 + // - Keep row 8 as-is + // - Insert row 9 + apply_changes_to_tx_state( + &tx_state, + vec![ + (1, vec![Value::Integer(1), Value::Integer(10)], 1), // Insert 1 + (2, vec![Value::Integer(2), Value::Integer(20)], -1), // Delete 2 + (3, vec![Value::Integer(3), Value::Integer(30)], 1), // Insert 3 + (4, vec![Value::Integer(4), Value::Integer(40)], -1), // Delete old 4 + (4, vec![Value::Integer(4), Value::Integer(45)], 1), // Insert new 4 + (5, vec![Value::Integer(5), Value::Integer(50)], 1), // Insert 5 + (6, vec![Value::Integer(6), Value::Integer(60)], -1), // Delete 6 + (7, vec![Value::Integer(7), Value::Integer(70)], 1), // Insert 7 + (9, vec![Value::Integer(9), Value::Integer(90)], 1), // Insert 9 + ], + ); + + // Should iterate: 1, 3, 4(updated), 5, 7, 8, 9 + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(4)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(45)); // Updated value + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(7)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(8)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(9)); + + assert!(!pager.io.block(|| cursor.next())?); + assert!(!cursor.is_valid()?); + + Ok(()) + } + + #[test] + fn test_next_after_seek() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3, 5, 7, 9 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50), (7, 70), (9, 90)])?; + + // Create cursor for testing + let (mut cursor, _tx_state, pager) = create_test_cursor(&conn)?; + + // Seek to row 5 + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(5), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + // Next should move to row 7 + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(7)); + + // Next should move to row 9 + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(9)); + + // Next should reach end + assert!(!pager.io.block(|| cursor.next())?); + + Ok(()) + } + + #[test] + fn test_next_multiple_weights_same_row() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with row 1 + populate_test_table(&conn, vec![(1, 10)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Multiple operations on same row: + apply_changes_to_tx_state( + &tx_state, + vec![ + (1, vec![Value::Integer(1), Value::Integer(10)], -1), // Delete original + (1, vec![Value::Integer(1), Value::Integer(11)], 1), // Insert v1 + (1, vec![Value::Integer(1), Value::Integer(11)], -1), // Delete v1 + (1, vec![Value::Integer(1), Value::Integer(12)], 1), // Insert v2 + (1, vec![Value::Integer(1), Value::Integer(12)], -1), // Delete v2 + // Net weight: 1 (btree) - 1 + 1 - 1 + 1 - 1 = 0 (row deleted) + ], + ); + + // Row should be deleted + assert!(!pager.io.block(|| cursor.next())?); + assert!(!cursor.is_valid()?); + + Ok(()) + } + + #[test] + fn test_next_only_uncommitted_large_gaps() -> Result<()> { + let conn = create_test_connection()?; + + // Create cursor for testing (no btree data) + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted rows with large gaps + apply_changes_to_tx_state( + &tx_state, + vec![ + (100, vec![Value::Integer(100), Value::Integer(1000)], 1), + (500, vec![Value::Integer(500), Value::Integer(5000)], 1), + (999, vec![Value::Integer(999), Value::Integer(9990)], 1), + ], + ); + + // Should iterate through all with large gaps + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(100)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(500)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(999)); + + assert!(!pager.io.block(|| cursor.next())?); + + Ok(()) + } + + #[test] + fn test_multiple_updates_same_row_single_transaction() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 2, 3 + populate_test_table(&conn, vec![(1, 10), (2, 20), (3, 30)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Multiple successive updates to row 2 in the same transaction + // 20 -> 25 -> 28 -> 32 (final value should be 32) + apply_changes_to_tx_state( + &tx_state, + vec![ + (2, vec![Value::Integer(2), Value::Integer(20)], -1), // Delete original + (2, vec![Value::Integer(2), Value::Integer(25)], 1), // First update + (2, vec![Value::Integer(2), Value::Integer(25)], -1), // Delete first update + (2, vec![Value::Integer(2), Value::Integer(28)], 1), // Second update + (2, vec![Value::Integer(2), Value::Integer(28)], -1), // Delete second update + (2, vec![Value::Integer(2), Value::Integer(32)], 1), // Final update + ], + ); + + // Seek to row 2 should find the final value + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(2), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(2)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(32)); + + // Next through all rows to verify only final values are seen + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(10)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(2)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(32)); // Final value + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(30)); + + assert!(!pager.io.block(|| cursor.next())?); + + Ok(()) + } + + #[test] + fn test_empty_materialized_view_with_uncommitted() -> Result<()> { + let conn = create_test_connection()?; + + // Don't populate any data - view is created but empty + // This tests a materialized view that was never populated + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted rows to empty materialized view + apply_changes_to_tx_state( + &tx_state, + vec![ + (5, vec![Value::Integer(5), Value::Integer(50)], 1), + (10, vec![Value::Integer(10), Value::Integer(100)], 1), + (15, vec![Value::Integer(15), Value::Integer(150)], 1), + ], + ); + + // Test seek on empty materialized view with uncommitted data + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(10), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(10)); + + // Test GT seek + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(7), SeekOp::GT))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(10)); + + // Test rewind and next + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(10)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(15)); + + assert!(!pager.io.block(|| cursor.next())?); + + Ok(()) + } + + #[test] + fn test_exact_match_btree_uncommitted_same_rowid_different_values() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3, 5 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted row 3 with different value (not a delete+insert, just insert) + // This simulates a case where uncommitted has a new version of row 3 + apply_changes_to_tx_state( + &tx_state, + vec![ + (3, vec![Value::Integer(3), Value::Integer(35)], 1), // New version with positive weight + ], + ); + + // Exact match seek for row 3 should find the uncommitted version (35) + // because when both exist with positive weight, uncommitted takes precedence + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(3), SeekOp::GE { eq_only: true }))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + // This test verifies which value we get when both btree and uncommitted + // have the same rowid with positive weights + // The expected behavior needs to be defined - typically uncommitted wins + // or they get merged based on the DBSP semantics + + Ok(()) + } + + #[test] + fn test_boundary_value_seeks() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with some normal values + populate_test_table(&conn, vec![(100, 1000), (200, 2000)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted rows at extreme positions + apply_changes_to_tx_state( + &tx_state, + vec![ + ( + i64::MIN + 1, + vec![Value::Integer(i64::MIN + 1), Value::Integer(-999)], + 1, + ), + ( + i64::MAX - 1, + vec![Value::Integer(i64::MAX - 1), Value::Integer(999)], + 1, + ), + ], + ); + + // Test 1: Seek GT with i64::MAX should find nothing + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(i64::MAX), SeekOp::GT))?; + assert_eq!(result, SeekResult::NotFound); + + // Test 2: Seek LT with i64::MIN should find nothing + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(i64::MIN), SeekOp::LT))?; + assert_eq!(result, SeekResult::NotFound); + + // Test 3: Seek GE with i64::MAX - 1 should find our extreme row + let result = pager.io.block(|| { + cursor.seek( + SeekKey::TableRowId(i64::MAX - 1), + SeekOp::GE { eq_only: false }, + ) + })?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(i64::MAX - 1)); + + // Test 4: Seek LE with i64::MIN + 1 should find our extreme low row + let result = pager.io.block(|| { + cursor.seek( + SeekKey::TableRowId(i64::MIN + 1), + SeekOp::LE { eq_only: false }, + ) + })?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(i64::MIN + 1)); + + // Test 5: Seek GT from i64::MIN should find the smallest row + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(i64::MIN), SeekOp::GT))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(i64::MIN + 1)); + + // Test 6: Seek LT from i64::MAX should find the largest row + let result = pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(i64::MAX), SeekOp::LT))?; + assert_eq!(result, SeekResult::Found); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(i64::MAX - 1)); + + Ok(()) + } + + #[test] + fn test_next_concurrent_btree_uncommitted_advance() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 2, 3, 4, 5 + populate_test_table(&conn, vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Delete some btree rows and add replacements in uncommitted + apply_changes_to_tx_state( + &tx_state, + vec![ + (2, vec![Value::Integer(2), Value::Integer(20)], -1), // Delete btree row 2 + (2, vec![Value::Integer(2), Value::Integer(25)], 1), // Replace with new value + (4, vec![Value::Integer(4), Value::Integer(40)], -1), // Delete btree row 4 + ], + ); + + // Should iterate: 1, 2(new), 3, 5 + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(2)); + assert_eq!(pager.io.block(|| cursor.column(1))?, Value::Integer(25)); // New value + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + assert!(!pager.io.block(|| cursor.next())?); + + Ok(()) + } + + #[test] + fn test_transaction_state_changes_mid_iteration() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3, 5 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Start iteration + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + // Move to next row + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + // Now add new uncommitted changes mid-iteration + apply_changes_to_tx_state( + &tx_state, + vec![ + (2, vec![Value::Integer(2), Value::Integer(20)], 1), // Insert before current + (4, vec![Value::Integer(4), Value::Integer(40)], 1), // Insert after current + (6, vec![Value::Integer(6), Value::Integer(60)], 1), // Insert at end + ], + ); + + // Continue iteration - cursor continues from where it was, sees row 5 next + // (new changes are only visible after rewind/seek) + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + // No more rows in original iteration + assert!(!pager.io.block(|| cursor.next())?); + + // Rewind and verify we see all rows including the newly added ones + pager.io.block(|| cursor.rewind())?; + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(2)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(4)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(6)); + + assert!(!pager.io.block(|| cursor.next())?); + + Ok(()) + } + + #[test] + fn test_rewind_after_failed_seek() -> Result<()> { + let conn = create_test_connection()?; + + // Populate table with rows 1, 3, 5 + populate_test_table(&conn, vec![(1, 10), (3, 30), (5, 50)])?; + + // Create cursor for testing + let (mut cursor, tx_state, pager) = create_test_cursor(&conn)?; + + // Add uncommitted row 2 + apply_changes_to_tx_state( + &tx_state, + vec![(2, vec![Value::Integer(2), Value::Integer(20)], 1)], + ); + + // Seek to non-existent row 4 with exact match + assert_eq!( + pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(4), SeekOp::GE { eq_only: true }))?, + SeekResult::NotFound + ); + assert!(!cursor.is_valid()?); + + // Rewind should work correctly after failed seek + pager.io.block(|| cursor.rewind())?; + assert!(cursor.is_valid()?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + // Verify we can iterate through all rows + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(2)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(3)); + + assert!(pager.io.block(|| cursor.next())?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(5)); + + assert!(!pager.io.block(|| cursor.next())?); + + // Try another failed seek (GT on maximum value) + assert_eq!( + pager + .io + .block(|| cursor.seek(SeekKey::TableRowId(5), SeekOp::GT))?, + SeekResult::NotFound + ); + assert!(!cursor.is_valid()?); + + // Rewind again + pager.io.block(|| cursor.rewind())?; + assert!(cursor.is_valid()?); + assert_eq!(pager.io.block(|| cursor.rowid())?, Some(1)); + + Ok(()) + } +} diff --git a/core/incremental/dbsp.rs b/core/incremental/dbsp.rs index 962ceb9c8..607fd562b 100644 --- a/core/incremental/dbsp.rs +++ b/core/incremental/dbsp.rs @@ -1,19 +1,86 @@ // Simplified DBSP integration for incremental view maintenance // For now, we'll use a basic approach and can expand to full DBSP later -use std::collections::HashMap; +use super::hashable_row::HashableRow; +use crate::Value; +use std::collections::{BTreeMap, HashMap}; + +type DeltaEntry = (HashableRow, isize); +/// A delta represents ordered changes to data +#[derive(Debug, Clone, Default)] +pub struct Delta { + /// Ordered list of changes: (row, weight) where weight is +1 for insert, -1 for delete + /// It is crucial that this is ordered. Imagine the case of an update, which becomes a delete + + /// insert. If this is not ordered, it would be applied in arbitrary order and break the view. + pub changes: Vec, +} + +impl Delta { + pub fn new() -> Self { + Self { + changes: Vec::new(), + } + } + + pub fn insert(&mut self, row_key: i64, values: Vec) { + let row = HashableRow::new(row_key, values); + self.changes.push((row, 1)); + } + + pub fn delete(&mut self, row_key: i64, values: Vec) { + let row = HashableRow::new(row_key, values); + self.changes.push((row, -1)); + } + + pub fn is_empty(&self) -> bool { + self.changes.is_empty() + } + + pub fn len(&self) -> usize { + self.changes.len() + } + + /// Merge another delta into this one + /// This preserves the order of operations - no consolidation is done + /// to maintain the full history of changes + pub fn merge(&mut self, other: &Delta) { + // Simply append all changes from other, preserving order + self.changes.extend(other.changes.iter().cloned()); + } + + /// Consolidate changes by combining entries with the same HashableRow + pub fn consolidate(&mut self) { + if self.changes.is_empty() { + return; + } + + // Use a HashMap to accumulate weights + let mut consolidated: HashMap = HashMap::new(); + + for (row, weight) in self.changes.drain(..) { + *consolidated.entry(row).or_insert(0) += weight; + } + + // Convert back to vec, filtering out zero weights + self.changes = consolidated + .into_iter() + .filter(|(_, weight)| *weight != 0) + .collect(); + } +} /// A simplified ZSet for incremental computation /// Each element has a weight: positive for additions, negative for deletions #[derive(Clone, Debug, Default)] pub struct SimpleZSet { - data: HashMap, + data: BTreeMap, } -impl SimpleZSet { +#[allow(dead_code)] +impl SimpleZSet { pub fn new() -> Self { Self { - data: HashMap::new(), + data: BTreeMap::new(), } } @@ -45,36 +112,121 @@ impl SimpleZSet { self.insert(item.clone(), weight); } } -} -/// A simplified stream for incremental computation -#[derive(Clone, Debug)] -pub struct SimpleStream { - current: SimpleZSet, -} - -impl SimpleStream { - pub fn from_zset(zset: SimpleZSet) -> Self { - Self { current: zset } + /// Get the weight for a specific item (0 if not present) + pub fn get(&self, item: &T) -> isize { + self.data.get(item).copied().unwrap_or(0) } - /// Apply a delta (change) to the stream - pub fn apply_delta(&mut self, delta: &SimpleZSet) { - self.current.merge(delta); + /// Get the first element (smallest key) in the Z-set + pub fn first(&self) -> Option<(&T, isize)> { + self.data.iter().next().map(|(k, &v)| (k, v)) } - /// Get the current state as a vector of items (only positive weights) - pub fn to_vec(&self) -> Vec { - self.current.to_vec() + /// Get the last element (largest key) in the Z-set + pub fn last(&self) -> Option<(&T, isize)> { + self.data.iter().next_back().map(|(k, &v)| (k, v)) + } + + /// Get a range of elements + pub fn range(&self, range: R) -> impl Iterator + '_ + where + R: std::ops::RangeBounds, + { + self.data.range(range).map(|(k, &v)| (k, v)) + } + + /// Check if empty + pub fn is_empty(&self) -> bool { + self.data.is_empty() + } + + /// Get the number of elements + pub fn len(&self) -> usize { + self.data.len() } } // Type aliases for convenience -use super::hashable_row::HashableRow; - pub type RowKey = HashableRow; pub type RowKeyZSet = SimpleZSet; -pub type RowKeyStream = SimpleStream; + +impl RowKeyZSet { + /// Create a Z-set from a Delta by consolidating all changes + pub fn from_delta(delta: &Delta) -> Self { + let mut zset = Self::new(); + + // Add all changes from the delta, consolidating as we go + for (row, weight) in &delta.changes { + zset.insert(row.clone(), *weight); + } + + zset + } + + /// Seek to find ALL entries for the best matching rowid + /// For GT/GE: returns all entries for the smallest rowid that satisfies the condition + /// For LT/LE: returns all entries for the largest rowid that satisfies the condition + /// Returns empty vec if no match found + pub fn seek(&self, target: i64, op: crate::types::SeekOp) -> Vec<(HashableRow, isize)> { + use crate::types::SeekOp; + + // First find the best matching rowid + let best_rowid = match op { + SeekOp::GT => { + // Find smallest rowid > target + self.data + .iter() + .filter(|(row, _)| row.rowid > target) + .map(|(row, _)| row.rowid) + .min() + } + SeekOp::GE { eq_only: false } => { + // Find smallest rowid >= target + self.data + .iter() + .filter(|(row, _)| row.rowid >= target) + .map(|(row, _)| row.rowid) + .min() + } + SeekOp::GE { eq_only: true } | SeekOp::LE { eq_only: true } => { + // Need exact match + if self.data.iter().any(|(row, _)| row.rowid == target) { + Some(target) + } else { + None + } + } + SeekOp::LT => { + // Find largest rowid < target + self.data + .iter() + .filter(|(row, _)| row.rowid < target) + .map(|(row, _)| row.rowid) + .max() + } + SeekOp::LE { eq_only: false } => { + // Find largest rowid <= target + self.data + .iter() + .filter(|(row, _)| row.rowid <= target) + .map(|(row, _)| row.rowid) + .max() + } + }; + + // Now get ALL entries with that rowid + match best_rowid { + Some(rowid) => self + .data + .iter() + .filter(|(row, _)| row.rowid == rowid) + .map(|(k, &v)| (k.clone(), v)) + .collect(), + None => Vec::new(), + } + } +} #[cfg(test)] mod tests { diff --git a/core/incremental/hashable_row.rs b/core/incremental/hashable_row.rs index 46be59bde..799f88e87 100644 --- a/core/incremental/hashable_row.rs +++ b/core/incremental/hashable_row.rs @@ -78,3 +78,23 @@ impl Hash for HashableRow { self.cached_hash.hash(state); } } + +impl PartialOrd for HashableRow { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for HashableRow { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + // First compare by rowid, then by values if rowids are equal + // This ensures Ord is consistent with Eq (which compares all fields) + match self.rowid.cmp(&other.rowid) { + std::cmp::Ordering::Equal => { + // If rowids are equal, compare values to maintain consistency with Eq + self.values.cmp(&other.values) + } + other => other, + } + } +} diff --git a/core/incremental/mod.rs b/core/incremental/mod.rs index 4c26b91ba..755a27351 100644 --- a/core/incremental/mod.rs +++ b/core/incremental/mod.rs @@ -1,4 +1,5 @@ pub mod compiler; +pub mod cursor; pub mod dbsp; pub mod expr_compiler; pub mod hashable_row; diff --git a/core/incremental/operator.rs b/core/incremental/operator.rs index 4f148e943..46a933b87 100644 --- a/core/incremental/operator.rs +++ b/core/incremental/operator.rs @@ -2,15 +2,321 @@ // Operator DAG for DBSP-style incremental computation // Based on Feldera DBSP design but adapted for Turso's architecture +use crate::function::{AggFunc, Func}; +use crate::incremental::dbsp::Delta; use crate::incremental::expr_compiler::CompiledExpression; use crate::incremental::hashable_row::HashableRow; -use crate::types::Text; -use crate::{Connection, Database, SymbolTable, Value}; -use std::collections::{HashMap, HashSet}; +use crate::storage::btree::{BTreeCursor, BTreeKey}; +use crate::types::{IOResult, SeekKey, SeekOp, SeekResult, Text}; +use crate::{ + return_and_restore_if_io, return_if_io, Connection, Database, Result, SymbolTable, Value, +}; +use std::collections::{BTreeMap, HashMap}; use std::fmt::{self, Debug, Display}; -use std::sync::Arc; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; use turso_macros::match_ignore_ascii_case; +use turso_parser::ast::{As, Expr, Literal, Name, OneSelect, Operator, ResultColumn}; + +#[derive(Debug)] +pub enum ReadRecord { + GetRecord, + Done { state: Option }, +} + +impl ReadRecord { + fn new() -> Self { + ReadRecord::GetRecord + } + + fn read_record( + &mut self, + key: SeekKey, + aggregates: &[AggregateFunction], + cursor: &mut BTreeCursor, + ) -> Result>> { + loop { + match self { + ReadRecord::GetRecord => { + let res = return_if_io!(cursor.seek(key.clone(), SeekOp::GE { eq_only: true })); + if !matches!(res, SeekResult::Found) { + *self = ReadRecord::Done { state: None }; + } else { + let record = return_if_io!(cursor.record()); + let r = record.ok_or_else(|| { + crate::LimboError::InternalError(format!( + "Found key {key:?} in aggregate storage but could not read record" + )) + })?; + let values = r.get_values(); + let blob = values[1].to_owned(); + + let (state, _group_key) = match blob { + Value::Blob(blob) => AggregateState::from_blob(&blob, aggregates) + .ok_or_else(|| { + crate::LimboError::InternalError(format!( + "Cannot deserialize aggregate state {blob:?}", + )) + }), + _ => Err(crate::LimboError::ParseError( + "Value in aggregator not blob".to_string(), + )), + }?; + *self = ReadRecord::Done { state: Some(state) } + } + } + ReadRecord::Done { state } => return Ok(IOResult::Done(state.clone())), + } + } + } +} + +#[derive(Debug)] +pub(crate) enum WriteRecord { + GetRecord, + Delete { final_weight: isize }, + Insert { final_weight: isize }, + Done, +} +impl WriteRecord { + fn new() -> Self { + WriteRecord::GetRecord + } + + fn write_record( + &mut self, + key: SeekKey, + record: HashableRow, + weight: isize, + cursor: &mut BTreeCursor, + ) -> Result> { + loop { + match self { + WriteRecord::GetRecord => { + let res = return_if_io!(cursor.seek(key.clone(), SeekOp::GE { eq_only: true })); + if !matches!(res, SeekResult::Found) { + *self = WriteRecord::Insert { + final_weight: weight, + }; + } else { + let existing_record = return_if_io!(cursor.record()); + let r = existing_record.ok_or_else(|| { + crate::LimboError::InternalError(format!( + "Found key {key:?} in aggregate storage but could not read record" + )) + })?; + let values = r.get_values(); + // values[2] should contain the weight + let existing_weight = match values[2].to_owned() { + Value::Integer(w) => w as isize, + _ => { + return Err(crate::LimboError::InternalError(format!( + "Invalid weight value in aggregate storage for key {key:?}" + ))) + } + }; + let final_weight = existing_weight + weight; + if final_weight <= 0 { + *self = WriteRecord::Delete { final_weight } + } else { + *self = WriteRecord::Insert { final_weight } + } + } + } + WriteRecord::Delete { final_weight: _ } => { + let res = return_if_io!(cursor.seek(key.clone(), SeekOp::GE { eq_only: true })); + if !matches!(res, SeekResult::Found) { + return Err(crate::LimboError::InternalError(format!( + "record not found for {key:?}, but we had just GetRecord! Should not be possible" + ))); + } + // Done - row was deleted and weights cancel out. + // If we iniated the delete we will complete, so Done has to be set + // before so we don't come back here. + *self = WriteRecord::Done; + return_if_io!(cursor.delete()); + } + WriteRecord::Insert { final_weight } => { + return_if_io!(cursor.seek(key.clone(), SeekOp::GE { eq_only: true })); + // Build the key and insert the record + let key_i64 = match key { + SeekKey::TableRowId(id) => id, + _ => { + return Err(crate::LimboError::InternalError( + "Expected TableRowId for aggregate storage".to_string(), + )) + } + }; + // Create the record values: key, blob, weight + let record_values = vec![ + Value::Integer(key_i64), + record.values[0].clone(), // The blob with serialized state + Value::Integer(*final_weight as i64), + ]; + + // Create an ImmutableRecord from the values + let immutable_record = crate::types::ImmutableRecord::from_values( + &record_values, + record_values.len(), + ); + let btree_key = BTreeKey::new_table_rowid(key_i64, Some(&immutable_record)); + + *self = WriteRecord::Done; + return_if_io!(cursor.insert(&btree_key)); + } + WriteRecord::Done => { + return Ok(IOResult::Done(())); + } + } + } + } +} + +type ComputedStates = HashMap, AggregateState)>; // group_key_str -> (group_key, state) +#[derive(Debug)] +enum AggregateCommitState { + Idle, + Eval { + eval_state: EvalState, + }, + PersistDelta { + delta: Delta, + computed_states: ComputedStates, + current_idx: usize, + write_record: WriteRecord, + }, + Done { + delta: Delta, + }, + Invalid, +} + +// eval() has uncommitted data, so it can't be a member attribute of the Operator. +// The state has to be kept by the caller +#[derive(Debug)] +pub enum EvalState { + Uninitialized, + Init { + delta: Delta, + }, + FetchData { + delta: Delta, // Keep original delta for merge operation + current_idx: usize, + groups_to_read: Vec<(String, Vec)>, // Changed to Vec for index-based access + existing_groups: HashMap, + old_values: HashMap>, + read_record_state: Box, + }, + Done, +} + +impl From for EvalState { + fn from(delta: Delta) -> Self { + EvalState::Init { delta } + } +} + +impl EvalState { + fn from_delta(delta: Delta) -> Self { + Self::Init { delta } + } + + fn delta_ref(&self) -> &Delta { + match self { + EvalState::Init { delta } => delta, + _ => panic!("delta_ref() can only be called when in Init state",), + } + } + pub fn extract_delta(&mut self) -> Delta { + match self { + EvalState::Init { delta } => { + let extracted = std::mem::take(delta); + *self = EvalState::Uninitialized; + extracted + } + _ => panic!("extract_delta() can only be called when in Init state"), + } + } + + fn advance(&mut self, groups_to_read: BTreeMap>) { + let delta = match self { + EvalState::Init { delta } => std::mem::take(delta), + _ => panic!("advance() can only be called when in Init state, current state: {self:?}"), + }; + + let _ = std::mem::replace( + self, + EvalState::FetchData { + delta, + current_idx: 0, + groups_to_read: groups_to_read.into_iter().collect(), // Convert BTreeMap to Vec + existing_groups: HashMap::new(), + old_values: HashMap::new(), + read_record_state: Box::new(ReadRecord::new()), + }, + ); + } + fn process_delta( + &mut self, + operator: &mut AggregateOperator, + cursor: &mut BTreeCursor, + ) -> Result> { + loop { + match self { + EvalState::Uninitialized => { + panic!("Cannot process_delta with Uninitialized state"); + } + EvalState::Init { .. } => { + panic!("State machine not supposed to reach the init state! advance() should have been called"); + } + EvalState::FetchData { + delta, + current_idx, + groups_to_read, + existing_groups, + old_values, + read_record_state, + } => { + if *current_idx >= groups_to_read.len() { + // All groups processed, compute final output + let result = + operator.merge_delta_with_existing(delta, existing_groups, old_values); + *self = EvalState::Done; + return Ok(IOResult::Done(result)); + } else { + // Get the current group to read + let (group_key_str, group_key) = &groups_to_read[*current_idx]; + + let seek_key = operator.generate_storage_key(group_key_str); + let key = SeekKey::TableRowId(seek_key); + + let state = return_if_io!(read_record_state.read_record( + key, + &operator.aggregates, + cursor + )); + + // Anything that mutates state has to happen after return_if_io! + // Unfortunately there's no good way to enforce that without turning + // this into a hot mess of mem::takes. + if let Some(state) = state { + let mut old_row = group_key.clone(); + old_row.extend(state.to_values(&operator.aggregates)); + old_values.insert(group_key_str.clone(), old_row); + existing_groups.insert(group_key_str.clone(), state.clone()); + } + + // All attributes mutated in place. + *current_idx += 1; + *read_record_state = Box::new(ReadRecord::new()); + } + } + EvalState::Done => { + return Ok(IOResult::Done((Delta::new(), HashMap::new()))); + } + } + } + } +} /// Tracks computation counts to verify incremental behavior (for tests now), and in the future /// should be used to provide statistics. @@ -56,69 +362,6 @@ impl ComputationTracker { } } -/// A delta represents ordered changes to data -#[derive(Debug, Clone, Default)] -pub struct Delta { - /// Ordered list of changes: (row, weight) where weight is +1 for insert, -1 for delete - /// It is crucial that this is ordered. Imagine the case of an update, which becomes a delete + - /// insert. If this is not ordered, it would be applied in arbitrary order and break the view. - pub changes: Vec<(HashableRow, isize)>, -} - -impl Delta { - pub fn new() -> Self { - Self { - changes: Vec::new(), - } - } - - pub fn insert(&mut self, row_key: i64, values: Vec) { - let row = HashableRow::new(row_key, values); - self.changes.push((row, 1)); - } - - pub fn delete(&mut self, row_key: i64, values: Vec) { - let row = HashableRow::new(row_key, values); - self.changes.push((row, -1)); - } - - pub fn is_empty(&self) -> bool { - self.changes.is_empty() - } - - pub fn len(&self) -> usize { - self.changes.len() - } - - /// Merge another delta into this one - /// This preserves the order of operations - no consolidation is done - /// to maintain the full history of changes - pub fn merge(&mut self, other: &Delta) { - // Simply append all changes from other, preserving order - self.changes.extend(other.changes.iter().cloned()); - } - - /// Consolidate changes by combining entries with the same HashableRow - pub fn consolidate(&mut self) { - if self.changes.is_empty() { - return; - } - - // Use a HashMap to accumulate weights - let mut consolidated: HashMap = HashMap::new(); - - for (row, weight) in self.changes.drain(..) { - *consolidated.entry(row).or_insert(0) += weight; - } - - // Convert back to vec, filtering out zero weights - self.changes = consolidated - .into_iter() - .filter(|(_, weight)| *weight != 0) - .collect(); - } -} - #[cfg(test)] mod hashable_row_tests { use super::*; @@ -240,8 +483,6 @@ impl FilterPredicate { /// Parse a SQL AST expression into a FilterPredicate /// This centralizes all SQL-to-predicate parsing logic pub fn from_sql_expr(expr: &turso_parser::ast::Expr) -> crate::Result { - use turso_parser::ast::*; - let Expr::Binary(lhs, op, rhs) = expr else { return Err(crate::LimboError::ParseError( "Unsupported WHERE clause for incremental views: not a binary expression" @@ -323,8 +564,6 @@ impl FilterPredicate { /// Parse a WHERE clause from a SELECT statement pub fn from_select(select: &turso_parser::ast::Select) -> crate::Result { - use turso_parser::ast::*; - if let OneSelect::Select { ref where_clause, .. } = select.body.select @@ -391,8 +630,6 @@ impl AggregateFunction { func: &crate::function::Func, input_column: Option, ) -> Option { - use crate::function::{AggFunc, Func}; - match func { Func::Agg(agg_func) => { match agg_func { @@ -412,34 +649,77 @@ impl AggregateFunction { /// Operator DAG (Directed Acyclic Graph) /// Base trait for incremental operators pub trait IncrementalOperator: Debug { - /// Initialize with base data - fn initialize(&mut self, data: Delta); - - /// Evaluate the operator with a delta, without modifying internal state - /// This is used during query execution to compute results including uncommitted changes + /// Evaluate the operator with a state, without modifying internal state + /// This is used during query execution to compute results + /// May need to read from storage to get current state (e.g., for aggregates) /// /// # Arguments - /// * `delta` - The committed delta to process - /// * `uncommitted` - Optional uncommitted changes from the current transaction - fn eval(&self, delta: Delta, uncommitted: Option) -> Delta; + /// * `state` - The evaluation state (may be in progress from a previous I/O operation) + /// * `cursor` - Cursor for reading operator state from storage + /// + /// # Returns + /// The output delta from the evaluation + fn eval(&mut self, state: &mut EvalState, cursor: &mut BTreeCursor) -> Result>; /// Commit a delta to the operator's internal state and return the output /// This is called when a transaction commits, making changes permanent /// Returns the output delta (what downstream operators should see) - fn commit(&mut self, delta: Delta) -> Delta; - - /// Get current accumulated state - fn get_current_state(&self) -> Delta; + /// The cursor parameter is for operators that need to persist state + fn commit(&mut self, delta: Delta, cursor: &mut BTreeCursor) -> Result>; /// Set computation tracker fn set_tracker(&mut self, tracker: Arc>); } +/// Input operator - passes through input data unchanged +/// This operator is used for input nodes in the circuit to provide a uniform interface +#[derive(Debug)] +pub struct InputOperator { + name: String, +} + +impl InputOperator { + pub fn new(name: String) -> Self { + Self { name } + } + + pub fn name(&self) -> &str { + &self.name + } +} + +impl IncrementalOperator for InputOperator { + fn eval( + &mut self, + state: &mut EvalState, + _cursor: &mut BTreeCursor, + ) -> Result> { + match state { + EvalState::Init { delta } => { + let output = std::mem::take(delta); + *state = EvalState::Done; + Ok(IOResult::Done(output)) + } + _ => unreachable!( + "InputOperator doesn't execute the state machine. Should be in Init state" + ), + } + } + + fn commit(&mut self, delta: Delta, _cursor: &mut BTreeCursor) -> Result> { + // Input operator passes through the delta unchanged during commit + Ok(IOResult::Done(delta)) + } + + fn set_tracker(&mut self, _tracker: Arc>) { + // Input operator doesn't need tracking + } +} + /// Filter operator - filters rows based on predicate #[derive(Debug)] pub struct FilterOperator { predicate: FilterPredicate, - current_state: Delta, column_names: Vec, tracker: Option>>, } @@ -448,7 +728,6 @@ impl FilterOperator { pub fn new(predicate: FilterPredicate, column_names: Vec) -> Self { Self { predicate, - current_state: Delta::new(), column_names, tracker: None, } @@ -549,33 +828,22 @@ impl FilterOperator { } impl IncrementalOperator for FilterOperator { - fn initialize(&mut self, data: Delta) { - // Process initial data through filter - for (row, weight) in data.changes { - if let Some(tracker) = &self.tracker { - tracker.lock().unwrap().record_filter(); - } - - if self.evaluate_predicate(&row.values) { - self.current_state.changes.push((row, weight)); - } - } - } - - fn eval(&self, delta: Delta, uncommitted: Option) -> Delta { - let mut output_delta = Delta::new(); - - // Merge delta with uncommitted if present - let combined_delta = if let Some(uncommitted) = uncommitted { - let mut combined = delta; - combined.merge(&uncommitted); - combined - } else { - delta + fn eval( + &mut self, + state: &mut EvalState, + _cursor: &mut BTreeCursor, + ) -> Result> { + let delta = match state { + EvalState::Init { delta } => std::mem::take(delta), + _ => unreachable!( + "FilterOperator doesn't execute the state machine. Should be in Init state" + ), }; - // Process the combined delta through the filter - for (row, weight) in combined_delta.changes { + let mut output_delta = Delta::new(); + + // Process the delta through the filter + for (row, weight) in delta.changes { if let Some(tracker) = &self.tracker { tracker.lock().unwrap().record_filter(); } @@ -588,10 +856,11 @@ impl IncrementalOperator for FilterOperator { } } - output_delta + *state = EvalState::Done; + Ok(IOResult::Done(output_delta)) } - fn commit(&mut self, delta: Delta) -> Delta { + fn commit(&mut self, delta: Delta, _cursor: &mut BTreeCursor) -> Result> { let mut output_delta = Delta::new(); // Commit the delta to our internal state @@ -605,19 +874,11 @@ impl IncrementalOperator for FilterOperator { // For deletes, this means the row was in the view (its values pass the filter) // For inserts, this means the row should be in the view if self.evaluate_predicate(&row.values) { - self.current_state.changes.push((row.clone(), weight)); output_delta.changes.push((row, weight)); } } - output_delta - } - - fn get_current_state(&self) -> Delta { - // Return a consolidated view of the current state - let mut consolidated = self.current_state.clone(); - consolidated.consolidate(); - consolidated + Ok(IOResult::Done(output_delta)) } fn set_tracker(&mut self, tracker: Arc>) { @@ -631,7 +892,6 @@ pub struct ProjectOperator { columns: Vec, input_column_names: Vec, output_column_names: Vec, - current_state: Delta, tracker: Option>>, // Internal in-memory connection for expression evaluation // Programs are very dependent on having a connection, so give it one. @@ -652,7 +912,6 @@ impl std::fmt::Debug for ProjectOperator { .field("columns", &self.columns) .field("input_column_names", &self.input_column_names) .field("output_column_names", &self.output_column_names) - .field("current_state", &self.current_state) .field("tracker", &self.tracker) .finish_non_exhaustive() } @@ -665,8 +924,6 @@ impl ProjectOperator { input_column_names: Vec, schema: &crate::schema::Schema, ) -> crate::Result { - use turso_parser::ast::*; - // Set up internal connection for expression evaluation let io = Arc::new(crate::MemoryIO::new()); let db = Database::open_file( @@ -769,7 +1026,6 @@ impl ProjectOperator { columns, input_column_names, output_column_names, - current_state: Delta::new(), tracker: None, internal_conn, }) @@ -809,7 +1065,6 @@ impl ProjectOperator { columns, input_column_names, output_column_names, - current_state: Delta::new(), tracker: None, internal_conn, }) @@ -839,7 +1094,6 @@ impl ProjectOperator { } fn evaluate_expression(&self, expr: &turso_parser::ast::Expr, values: &[Value]) -> Value { - use turso_parser::ast::*; match expr { Expr::Id(name) => { if let Some(idx) = self @@ -970,44 +1224,35 @@ impl ProjectOperator { } impl IncrementalOperator for ProjectOperator { - fn initialize(&mut self, data: Delta) { - for (row, weight) in &data.changes { - if let Some(tracker) = &self.tracker { - tracker.lock().unwrap().record_project(); - } - - let projected = self.project_values(&row.values); - let projected_row = HashableRow::new(row.rowid, projected); - self.current_state.changes.push((projected_row, *weight)); - } - } - - fn eval(&self, delta: Delta, uncommitted: Option) -> Delta { - let mut output_delta = Delta::new(); - - // Merge delta with uncommitted if present - let combined_delta = if let Some(uncommitted) = uncommitted { - let mut combined = delta; - combined.merge(&uncommitted); - combined - } else { - delta + fn eval( + &mut self, + state: &mut EvalState, + _cursor: &mut BTreeCursor, + ) -> Result> { + let delta = match state { + EvalState::Init { delta } => std::mem::take(delta), + _ => unreachable!( + "ProjectOperator doesn't execute the state machine. Should be in Init state" + ), }; - for (row, weight) in &combined_delta.changes { + let mut output_delta = Delta::new(); + + for (row, weight) in delta.changes { if let Some(tracker) = &self.tracker { tracker.lock().unwrap().record_project(); } let projected = self.project_values(&row.values); let projected_row = HashableRow::new(row.rowid, projected); - output_delta.changes.push((projected_row, *weight)); + output_delta.changes.push((projected_row, weight)); } - output_delta + *state = EvalState::Done; + Ok(IOResult::Done(output_delta)) } - fn commit(&mut self, delta: Delta) -> Delta { + fn commit(&mut self, delta: Delta, _cursor: &mut BTreeCursor) -> Result> { let mut output_delta = Delta::new(); // Commit the delta to our internal state and build output @@ -1017,20 +1262,10 @@ impl IncrementalOperator for ProjectOperator { } let projected = self.project_values(&row.values); let projected_row = HashableRow::new(row.rowid, projected); - self.current_state - .changes - .push((projected_row.clone(), *weight)); output_delta.changes.push((projected_row, *weight)); } - output_delta - } - - fn get_current_state(&self) -> Delta { - // Return a consolidated view of the current state - let mut consolidated = self.current_state.clone(); - consolidated.consolidate(); - consolidated + Ok(crate::types::IOResult::Done(output_delta)) } fn set_tracker(&mut self, tracker: Arc>) { @@ -1040,28 +1275,29 @@ impl IncrementalOperator for ProjectOperator { /// Aggregate operator - performs incremental aggregation with GROUP BY /// Maintains running totals/counts that are updated incrementally -#[derive(Debug, Clone)] +/// +/// Note that the AggregateOperator essentially implements a ZSet, even +/// though the ZSet structure is never used explicitly. The on-disk btree +/// plays the role of the set! +#[derive(Debug)] pub struct AggregateOperator { + // Unique operator ID for indexing in persistent storage + operator_id: usize, // GROUP BY columns group_by: Vec, // Aggregate functions to compute aggregates: Vec, // Column names from input pub input_column_names: Vec, - // Aggregation state: group_key_str -> aggregate values - // For each group, we store the aggregate results - // We use String representation of group keys since Value doesn't implement Hash - group_states: HashMap, - // Map to keep track of actual group key values for output - group_key_values: HashMap>, - // Current output state as a Delta - current_state: Delta, tracker: Option>>, + + // State machine for commit operation + commit_state: AggregateCommitState, } /// State for a single group's aggregates #[derive(Debug, Clone)] -struct AggregateState { +pub struct AggregateState { // For COUNT: just the count count: i64, // For SUM: column_name -> sum value @@ -1081,6 +1317,158 @@ impl AggregateState { } } + // Serialize the aggregate state to a binary blob including group key values + // The reason we serialize it like this, instead of just writing the actual values, is that + // The same table may have different aggregators in the circuit. They will all have different + // columns. + fn to_blob(&self, aggregates: &[AggregateFunction], group_key: &[Value]) -> Vec { + let mut blob = Vec::new(); + + // Write version byte for future compatibility + blob.push(1u8); + + // Write number of group key values + blob.extend_from_slice(&(group_key.len() as u32).to_le_bytes()); + + // Write each group key value + for value in group_key { + // Write value type tag + match value { + Value::Null => blob.push(0u8), + Value::Integer(i) => { + blob.push(1u8); + blob.extend_from_slice(&i.to_le_bytes()); + } + Value::Float(f) => { + blob.push(2u8); + blob.extend_from_slice(&f.to_le_bytes()); + } + Value::Text(s) => { + blob.push(3u8); + let text_str = s.as_str(); + let bytes = text_str.as_bytes(); + blob.extend_from_slice(&(bytes.len() as u32).to_le_bytes()); + blob.extend_from_slice(bytes); + } + Value::Blob(b) => { + blob.push(4u8); + blob.extend_from_slice(&(b.len() as u32).to_le_bytes()); + blob.extend_from_slice(b); + } + } + } + + // Write count as 8 bytes (little-endian) + blob.extend_from_slice(&self.count.to_le_bytes()); + + // Write each aggregate's state + for agg in aggregates { + match agg { + AggregateFunction::Sum(col_name) => { + let sum = self.sums.get(col_name).copied().unwrap_or(0.0); + blob.extend_from_slice(&sum.to_le_bytes()); + } + AggregateFunction::Avg(col_name) => { + let (sum, count) = self.avgs.get(col_name).copied().unwrap_or((0.0, 0)); + blob.extend_from_slice(&sum.to_le_bytes()); + blob.extend_from_slice(&count.to_le_bytes()); + } + AggregateFunction::Count => { + // Count is already written above + } + } + } + + blob + } + + /// Deserialize aggregate state from a binary blob + /// Returns the aggregate state and the group key values + fn from_blob(blob: &[u8], aggregates: &[AggregateFunction]) -> Option<(Self, Vec)> { + let mut cursor = 0; + + // Check version byte + if blob.get(cursor) != Some(&1u8) { + return None; + } + cursor += 1; + + // Read number of group key values + let num_group_keys = + u32::from_le_bytes(blob.get(cursor..cursor + 4)?.try_into().ok()?) as usize; + cursor += 4; + + // Read group key values + let mut group_key = Vec::new(); + for _ in 0..num_group_keys { + let value_type = *blob.get(cursor)?; + cursor += 1; + + let value = match value_type { + 0 => Value::Null, + 1 => { + let i = i64::from_le_bytes(blob.get(cursor..cursor + 8)?.try_into().ok()?); + cursor += 8; + Value::Integer(i) + } + 2 => { + let f = f64::from_le_bytes(blob.get(cursor..cursor + 8)?.try_into().ok()?); + cursor += 8; + Value::Float(f) + } + 3 => { + let len = + u32::from_le_bytes(blob.get(cursor..cursor + 4)?.try_into().ok()?) as usize; + cursor += 4; + let bytes = blob.get(cursor..cursor + len)?; + cursor += len; + let text_str = std::str::from_utf8(bytes).ok()?; + Value::Text(text_str.to_string().into()) + } + 4 => { + let len = + u32::from_le_bytes(blob.get(cursor..cursor + 4)?.try_into().ok()?) as usize; + cursor += 4; + let bytes = blob.get(cursor..cursor + len)?; + cursor += len; + Value::Blob(bytes.to_vec()) + } + _ => return None, + }; + group_key.push(value); + } + + // Read count + let count = i64::from_le_bytes(blob.get(cursor..cursor + 8)?.try_into().ok()?); + cursor += 8; + + let mut state = Self::new(); + state.count = count; + + // Read each aggregate's state + for agg in aggregates { + match agg { + AggregateFunction::Sum(col_name) => { + let sum = f64::from_le_bytes(blob.get(cursor..cursor + 8)?.try_into().ok()?); + cursor += 8; + state.sums.insert(col_name.clone(), sum); + } + AggregateFunction::Avg(col_name) => { + let sum = f64::from_le_bytes(blob.get(cursor..cursor + 8)?.try_into().ok()?); + cursor += 8; + let count = i64::from_le_bytes(blob.get(cursor..cursor + 8)?.try_into().ok()?); + cursor += 8; + state.avgs.insert(col_name.clone(), (sum, count)); + } + AggregateFunction::Count => { + // Count was already read above + } + } + } + + Some((state, group_key)) + } + /// Apply a delta to this aggregate state fn apply_delta( &mut self, @@ -1168,25 +1556,146 @@ impl AggregateState { impl AggregateOperator { pub fn new( + operator_id: usize, group_by: Vec, aggregates: Vec, input_column_names: Vec, ) -> Self { Self { + operator_id, group_by, aggregates, input_column_names, - group_states: HashMap::new(), - group_key_values: HashMap::new(), - current_state: Delta::new(), tracker: None, + commit_state: AggregateCommitState::Idle, } } + fn eval_internal( + &mut self, + state: &mut EvalState, + cursor: &mut BTreeCursor, + ) -> Result> { + match state { + EvalState::Uninitialized => { + panic!("Cannot eval AggregateOperator with Uninitialized state"); + } + EvalState::Init { delta } => { + if delta.changes.is_empty() { + *state = EvalState::Done; + return Ok(IOResult::Done((Delta::new(), HashMap::new()))); + } + + let mut groups_to_read = BTreeMap::new(); + for (row, _weight) in &delta.changes { + // Extract group key using cloned fields + let group_key = self.extract_group_key(&row.values); + let group_key_str = Self::group_key_to_string(&group_key); + groups_to_read.insert(group_key_str, group_key); + } + state.advance(groups_to_read); + } + EvalState::FetchData { .. } => { + // Already in progress, continue processing on process_delta below. + } + EvalState::Done => { + panic!("unreachable state! should have returned"); + } + } + + // Process the delta through the state machine + let result = return_if_io!(state.process_delta(self, cursor)); + Ok(IOResult::Done(result)) + } + + fn merge_delta_with_existing( + &mut self, + delta: &Delta, + existing_groups: &mut HashMap, + old_values: &mut HashMap>, + ) -> (Delta, HashMap, AggregateState)>) { + let mut output_delta = Delta::new(); + let mut temp_keys: HashMap> = HashMap::new(); + + // Process each change in the delta + for (row, weight) in &delta.changes { + if let Some(tracker) = &self.tracker { + tracker.lock().unwrap().record_aggregation(); + } + + // Extract group key + let group_key = self.extract_group_key(&row.values); + let group_key_str = Self::group_key_to_string(&group_key); + + let state = existing_groups + .entry(group_key_str.clone()) + .or_insert_with(AggregateState::new); + + temp_keys.insert(group_key_str.clone(), group_key.clone()); + + // Apply the delta to the temporary state + state.apply_delta( + &row.values, + *weight, + &self.aggregates, + &self.input_column_names, + ); + } + + // Generate output delta from temporary states and collect final states + let mut final_states = HashMap::new(); + + for (group_key_str, state) in existing_groups { + let group_key = temp_keys.get(group_key_str).cloned().unwrap_or_default(); + + // Generate a unique rowid for this group + let result_key = self.generate_group_rowid(group_key_str); + + if let Some(old_row_values) = old_values.get(group_key_str) { + let old_row = HashableRow::new(result_key, old_row_values.clone()); + output_delta.changes.push((old_row, -1)); + } + + // Always store the state for persistence (even if count=0, we need to delete it) + final_states.insert(group_key_str.clone(), (group_key.clone(), state.clone())); + + // Only include groups with count > 0 in the output delta + if state.count > 0 { + // Build output row: group_by columns + aggregate values + let mut output_values = group_key.clone(); + output_values.extend(state.to_values(&self.aggregates)); + + let output_row = HashableRow::new(result_key, output_values); + output_delta.changes.push((output_row, 1)); + } + } + (output_delta, final_states) + } + pub fn set_tracker(&mut self, tracker: Arc>) { self.tracker = Some(tracker); } + /// Generate a rowid for a group + /// For no GROUP BY: always returns 0 + /// For GROUP BY: returns a hash of the group key string + fn generate_group_rowid(&self, group_key_str: &str) -> i64 { + if self.group_by.is_empty() { + 0 + } else { + group_key_str + .bytes() + .fold(0i64, |acc, b| acc.wrapping_mul(31).wrapping_add(b as i64)) + } + } + + /// Generate the composite key for BTree storage + /// Combines operator_id and group hash + fn generate_storage_key(&self, group_key_str: &str) -> i64 { + let group_hash = self.generate_group_rowid(group_key_str); + (self.operator_id as i64) << 32 | (group_hash & 0xFFFFFFFF) + } + /// Extract group key values from a row fn extract_group_key(&self, values: &[Value]) -> Vec { let mut key = Vec::new(); @@ -1214,215 +1723,102 @@ impl AggregateOperator { .join(",") } - /// Process a delta and update aggregate state incrementally - pub fn process_delta(&mut self, delta: Delta) -> Delta { - let mut output_delta = Delta::new(); - - // Track which groups were modified and their old values - let mut modified_groups = HashSet::new(); - let mut old_values: HashMap> = HashMap::new(); - - // Process each change in the delta - for (row, weight) in &delta.changes { - if let Some(tracker) = &self.tracker { - tracker.lock().unwrap().record_aggregation(); - } - - // Extract group key - let group_key = self.extract_group_key(&row.values); - let group_key_str = Self::group_key_to_string(&group_key); - - // Store old aggregate values BEFORE applying the delta - // (only for the first time we see this group in this batch) - if !modified_groups.contains(&group_key_str) { - if let Some(state) = self.group_states.get(&group_key_str) { - let mut old_row = group_key.clone(); - old_row.extend(state.to_values(&self.aggregates)); - old_values.insert(group_key_str.clone(), old_row); - } - } - - modified_groups.insert(group_key_str.clone()); - - // Store the actual group key values - self.group_key_values - .insert(group_key_str.clone(), group_key.clone()); - - // Get or create aggregate state for this group - let state = self - .group_states - .entry(group_key_str.clone()) - .or_insert_with(AggregateState::new); - - // Apply the delta to the aggregate state - state.apply_delta( - &row.values, - *weight, - &self.aggregates, - &self.input_column_names, - ); - } - - // Generate output delta for modified groups - for group_key_str in modified_groups { - // Get the actual group key values - let group_key = self - .group_key_values - .get(&group_key_str) - .cloned() - .unwrap_or_default(); - - // Generate a unique key for this group - // We use a hash of the group key to ensure consistency - let result_key = group_key_str - .bytes() - .fold(0i64, |acc, b| acc.wrapping_mul(31).wrapping_add(b as i64)); - - // Emit retraction for old value if it existed - if let Some(old_row_values) = old_values.get(&group_key_str) { - let old_row = HashableRow::new(result_key, old_row_values.clone()); - output_delta.changes.push((old_row.clone(), -1)); - // Also remove from current state - self.current_state.changes.push((old_row, -1)); - } - - if let Some(state) = self.group_states.get(&group_key_str) { - // Build output row: group_by columns + aggregate values - let mut output_values = group_key.clone(); - output_values.extend(state.to_values(&self.aggregates)); - - // Check if group should be removed (count is 0) - if state.count > 0 { - // Add to output delta with positive weight - let output_row = HashableRow::new(result_key, output_values.clone()); - output_delta.changes.push((output_row.clone(), 1)); - - // Update current state - self.current_state.changes.push((output_row, 1)); - } else { - // Group has count=0, remove from state - // (we already emitted the retraction above if needed) - self.group_states.remove(&group_key_str); - self.group_key_values.remove(&group_key_str); - } - } - } - - // Consolidate current state to handle removals - self.current_state.consolidate(); - - output_delta + fn seek_key_from_str(&self, group_key_str: &str) -> SeekKey { + // Calculate the composite key for seeking + let key_i64 = self.generate_storage_key(group_key_str); + SeekKey::TableRowId(key_i64) } - pub fn get_current_state(&self) -> &Delta { - &self.current_state + fn seek_key(&self, row: HashableRow) -> SeekKey { + // Extract group key for first row + let group_key = self.extract_group_key(&row.values); + let group_key_str = Self::group_key_to_string(&group_key); + self.seek_key_from_str(&group_key_str) } } impl IncrementalOperator for AggregateOperator { - fn initialize(&mut self, data: Delta) { - // Process all initial data - this modifies state during initialization - let _ = self.process_delta(data); + fn eval(&mut self, state: &mut EvalState, cursor: &mut BTreeCursor) -> Result> { + let (delta, _) = return_if_io!(self.eval_internal(state, cursor)); + Ok(IOResult::Done(delta)) } - fn eval(&self, delta: Delta, uncommitted: Option) -> Delta { - // Clone the current state to work with temporarily - let mut temp_group_states = self.group_states.clone(); - let mut temp_group_key_values = self.group_key_values.clone(); - - // Merge delta with uncommitted if present - let combined_delta = if let Some(uncommitted) = uncommitted { - let mut combined = delta; - combined.merge(&uncommitted); - combined - } else { - delta - }; - - let mut output_delta = Delta::new(); - let mut modified_groups = HashSet::new(); - let mut old_values: HashMap> = HashMap::new(); - - // Process each change in the combined delta using temporary state - for (row, weight) in &combined_delta.changes { - if let Some(tracker) = &self.tracker { - tracker.lock().unwrap().record_aggregation(); - } - - // Extract group key - let group_key = self.extract_group_key(&row.values); - let group_key_str = Self::group_key_to_string(&group_key); - - // Store old aggregate values BEFORE applying the delta - if !modified_groups.contains(&group_key_str) { - if let Some(state) = temp_group_states.get(&group_key_str) { - let mut old_row = group_key.clone(); - old_row.extend(state.to_values(&self.aggregates)); - old_values.insert(group_key_str.clone(), old_row); + fn commit(&mut self, delta: Delta, cursor: &mut BTreeCursor) -> Result> { + loop { + // Note: because we std::mem::replace here (without it, the borrow checker goes nuts, + // because we call self.eval_interval, which requires a mutable borrow), we have to + // restore the state if we return I/O. So we can't use return_if_io! + let mut state = + std::mem::replace(&mut self.commit_state, AggregateCommitState::Invalid); + match &mut state { + AggregateCommitState::Invalid => { + panic!("Reached invalid state! State was replaced, and not replaced back"); } - } + AggregateCommitState::Idle => { + let eval_state = EvalState::from_delta(delta.clone()); + self.commit_state = AggregateCommitState::Eval { eval_state }; + } + AggregateCommitState::Eval { ref mut eval_state } => { + let (output_delta, computed_states) = return_and_restore_if_io!( + &mut self.commit_state, + state, + self.eval_internal(eval_state, cursor) + ); + self.commit_state = AggregateCommitState::PersistDelta { + delta: output_delta, + computed_states, + current_idx: 0, + write_record: WriteRecord::new(), + }; + } + AggregateCommitState::PersistDelta { + delta, + computed_states, + current_idx, + write_record, + } => { + let states_vec: Vec<_> = computed_states.iter().collect(); - modified_groups.insert(group_key_str.clone()); - temp_group_key_values.insert(group_key_str.clone(), group_key.clone()); + if *current_idx >= states_vec.len() { + self.commit_state = AggregateCommitState::Done { + delta: delta.clone(), + }; + } else { + let (group_key_str, (group_key, agg_state)) = states_vec[*current_idx]; - // Get or create aggregate state for this group in temporary state - let state = temp_group_states - .entry(group_key_str.clone()) - .or_insert_with(AggregateState::new); + let seek_key = self.seek_key_from_str(group_key_str); - // Apply the delta to the temporary aggregate state - state.apply_delta( - &row.values, - *weight, - &self.aggregates, - &self.input_column_names, - ); - } + // Determine weight: -1 to delete (cancels existing weight=1), 1 to insert/update + let weight = if agg_state.count == 0 { -1 } else { 1 }; - // Generate output delta for modified groups using temporary state - for group_key_str in modified_groups { - let group_key = temp_group_key_values - .get(&group_key_str) - .cloned() - .unwrap_or_default(); + // Serialize the aggregate state with group key (even for deletion, we need a row) + let state_blob = agg_state.to_blob(&self.aggregates, group_key); + let blob_row = HashableRow::new(0, vec![Value::Blob(state_blob)]); - // Generate a unique key for this group - let result_key = group_key_str - .bytes() - .fold(0i64, |acc, b| acc.wrapping_mul(31).wrapping_add(b as i64)); + return_and_restore_if_io!( + &mut self.commit_state, + state, + write_record.write_record(seek_key, blob_row, weight, cursor) + ); - // Emit retraction for old value if it existed - if let Some(old_row_values) = old_values.get(&group_key_str) { - let old_row = HashableRow::new(result_key, old_row_values.clone()); - output_delta.changes.push((old_row, -1)); - } + let delta = std::mem::take(delta); + let computed_states = std::mem::take(computed_states); - if let Some(state) = temp_group_states.get(&group_key_str) { - // Build output row: group_by columns + aggregate values - let mut output_values = group_key.clone(); - output_values.extend(state.to_values(&self.aggregates)); - - // Check if group should be included (count > 0) - if state.count > 0 { - let output_row = HashableRow::new(result_key, output_values); - output_delta.changes.push((output_row, 1)); + self.commit_state = AggregateCommitState::PersistDelta { + delta, + computed_states, + current_idx: *current_idx + 1, + write_record: WriteRecord::new(), // Reset for next write + }; + } + } + AggregateCommitState::Done { delta } => { + self.commit_state = AggregateCommitState::Idle; + let delta = std::mem::take(delta); + return Ok(IOResult::Done(delta)); } } } - - output_delta - } - - fn commit(&mut self, delta: Delta) -> Delta { - // Actually update the internal state when committing and return the output - self.process_delta(delta) - } - - fn get_current_state(&self) -> Delta { - // Return a consolidated view of the current state - let mut consolidated = self.current_state.clone(); - consolidated.consolidate(); - consolidated } fn set_tracker(&mut self, tracker: Arc>) { @@ -1433,10 +1829,101 @@ impl IncrementalOperator for AggregateOperator { #[cfg(test)] mod tests { use super::*; + use crate::storage::pager::CreateBTreeFlags; use crate::types::Text; + use crate::util::IOExt; use crate::Value; + use crate::{Database, MemoryIO, IO}; use std::sync::{Arc, Mutex}; + /// Create a test pager for operator tests + fn create_test_pager() -> (std::rc::Rc, usize) { + let io: Arc = Arc::new(MemoryIO::new()); + let db = Database::open_file(io.clone(), ":memory:", false, false).unwrap(); + let conn = db.connect().unwrap(); + + let pager = conn.pager.borrow().clone(); + + // Allocate page 1 first (database header) + let _ = pager.io.block(|| pager.allocate_page1()); + + // Properly create a BTree for aggregate state using the pager API + let root_page_id = pager + .io + .block(|| pager.btree_create(&CreateBTreeFlags::new_table())) + .expect("Failed to create BTree for aggregate state") + as usize; + + (pager, root_page_id) + } + + /// Read the current state from the BTree (for testing) + /// Returns a Delta with all the current aggregate values + fn get_current_state_from_btree( + agg: &AggregateOperator, + pager: &std::rc::Rc, + cursor: &mut BTreeCursor, + ) -> Delta { + let mut result = Delta::new(); + + // Rewind to start of table + pager.io.block(|| cursor.rewind()).unwrap(); + + loop { + // Check if cursor is empty (no more rows) + if cursor.is_empty() { + break; + } + + // Get the record at this position + let record = pager + .io + .block(|| cursor.record()) + .unwrap() + .unwrap() + .to_owned(); + + let values_ref = record.get_values(); + let values: Vec = values_ref.into_iter().map(|x| x.to_owned()).collect(); + + // Check if this record belongs to our operator + if let Some(Value::Integer(key)) = values.first() { + let operator_part = (key >> 32) as usize; + + // Skip if not our operator + if operator_part != agg.operator_id { + pager.io.block(|| cursor.next()).unwrap(); + continue; + } + + // Get the blob data + if let Some(Value::Blob(blob)) = values.get(1) { + // Deserialize the state + if let Some((state, group_key)) = + AggregateState::from_blob(blob, &agg.aggregates) + { + // Should not have made it this far. + assert!(state.count != 0); + // Build output row: group_by columns + aggregate values + let mut output_values = group_key.clone(); + output_values.extend(state.to_values(&agg.aggregates)); + + let group_key_str = AggregateOperator::group_key_to_string(&group_key); + let rowid = agg.generate_group_rowid(&group_key_str); + + let output_row = HashableRow::new(rowid, output_values); + result.changes.push((output_row, 1)); + } + } + } + + pager.io.block(|| cursor.next()).unwrap(); + } + + result.consolidate(); + result + } + /// Assert that we're doing incremental work, not full recomputation fn assert_incremental(tracker: &ComputationTracker, expected_ops: usize, data_size: usize) { assert!( @@ -1464,8 +1951,13 @@ mod tests { // the operator emits both a retraction (-1) of the old value // and an insertion (+1) of the new value. + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + // Create an aggregate operator for SUM(age) with no GROUP BY let mut agg = AggregateOperator::new( + 1, // operator_id for testing vec![], // No GROUP BY vec![AggregateFunction::Sum("age".to_string())], vec!["id".to_string(), "name".to_string(), "age".to_string()], @@ -1499,10 +1991,13 @@ mod tests { ); // Initialize with initial data - agg.initialize(initial_delta); + pager + .io + .block(|| agg.commit(initial_delta.clone(), &mut cursor)) + .unwrap(); // Verify initial state: SUM(age) = 25 + 30 + 35 = 90 - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state.changes.len(), 1, "Should have one aggregate row"); let (row, weight) = &state.changes[0]; assert_eq!(*weight, 1, "Aggregate row should have weight 1"); @@ -1520,8 +2015,10 @@ mod tests { ); // Process the incremental update - let output_delta = agg.eval(update_delta.clone(), None); - agg.commit(update_delta); + let output_delta = pager + .io + .block(|| agg.commit(update_delta.clone(), &mut cursor)) + .unwrap(); // CRITICAL: The output delta should contain TWO changes: // 1. Retraction of old aggregate value (90) with weight -1 @@ -1568,7 +2065,12 @@ mod tests { // the operator emits both retractions and insertions correctly for each group. // Create an aggregate operator for SUM(score) GROUP BY team + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + let mut agg = AggregateOperator::new( + 1, // operator_id for testing vec!["team".to_string()], // GROUP BY team vec![AggregateFunction::Sum("score".to_string())], vec![ @@ -1610,10 +2112,13 @@ mod tests { ); // Initialize with initial data - agg.initialize(initial_delta); + pager + .io + .block(|| agg.commit(initial_delta.clone(), &mut cursor)) + .unwrap(); // Verify initial state: red team = 30, blue team = 15 - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state.changes.len(), 2, "Should have two groups"); // Find the red and blue team aggregates @@ -1653,8 +2158,10 @@ mod tests { ); // Process the incremental update - let output_delta = agg.eval(update_delta.clone(), None); - agg.commit(update_delta); + let output_delta = pager + .io + .block(|| agg.commit(update_delta.clone(), &mut cursor)) + .unwrap(); // Should have 2 changes: retraction of old red team sum, insertion of new red team sum // Blue team should NOT be affected @@ -1703,8 +2210,13 @@ mod tests { fn test_count_increments_not_recounts() { let tracker = Arc::new(Mutex::new(ComputationTracker::new())); + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + // Create COUNT(*) GROUP BY category let mut agg = AggregateOperator::new( + 1, // operator_id for testing vec!["category".to_string()], vec![AggregateFunction::Count], vec![ @@ -1728,7 +2240,10 @@ mod tests { ], ); } - agg.initialize(initial); + pager + .io + .block(|| agg.commit(initial.clone(), &mut cursor)) + .unwrap(); // Reset tracker for delta processing tracker.lock().unwrap().aggregation_updates = 0; @@ -1744,15 +2259,15 @@ mod tests { ], ); - let _output = agg.eval(delta.clone(), None); - agg.commit(delta); + pager + .io + .block(|| agg.commit(delta.clone(), &mut cursor)) + .unwrap(); - // Should update one group (cat_0) twice - once in eval, once in commit - // This is still incremental - we're not recounting all groups - assert_eq!(tracker.lock().unwrap().aggregation_updates, 2); + assert_eq!(tracker.lock().unwrap().aggregation_updates, 1); // Check the final state - cat_0 should now have count 11 - let final_state = agg.get_current_state(); + let final_state = get_current_state_from_btree(&agg, &pager, &mut cursor); let cat_0 = final_state .changes .iter() @@ -1770,7 +2285,12 @@ mod tests { let tracker = Arc::new(Mutex::new(ComputationTracker::new())); // Create SUM(amount) GROUP BY product + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + let mut agg = AggregateOperator::new( + 1, // operator_id for testing vec!["product".to_string()], vec![AggregateFunction::Sum("amount".to_string())], vec![ @@ -1807,10 +2327,13 @@ mod tests { Value::Integer(150), ], ); - agg.initialize(initial); + pager + .io + .block(|| agg.commit(initial.clone(), &mut cursor)) + .unwrap(); // Check initial state: Widget=250, Gadget=200 - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); let widget_sum = state .changes .iter() @@ -1833,14 +2356,15 @@ mod tests { ], ); - let _output = agg.eval(delta.clone(), None); - agg.commit(delta); + pager + .io + .block(|| agg.commit(delta.clone(), &mut cursor)) + .unwrap(); - // Should update Widget group twice (once in eval, once in commit) - assert_eq!(tracker.lock().unwrap().aggregation_updates, 2); + assert_eq!(tracker.lock().unwrap().aggregation_updates, 1); // Check final state - Widget should now be 300 (250 + 50) - let final_state = agg.get_current_state(); + let final_state = get_current_state_from_btree(&agg, &pager, &mut cursor); let widget = final_state .changes .iter() @@ -1852,7 +2376,12 @@ mod tests { #[test] fn test_count_and_sum_together() { // Test the example from DBSP_ROADMAP: COUNT(*) and SUM(amount) GROUP BY user_id + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + let mut agg = AggregateOperator::new( + 1, // operator_id for testing vec!["user_id".to_string()], vec![ AggregateFunction::Count, @@ -1879,12 +2408,15 @@ mod tests { 3, vec![Value::Integer(3), Value::Integer(2), Value::Integer(150)], ); - agg.initialize(initial); + pager + .io + .block(|| agg.commit(initial.clone(), &mut cursor)) + .unwrap(); // Check initial state // User 1: count=2, sum=300 // User 2: count=1, sum=150 - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state.changes.len(), 2); let user1 = state @@ -1911,11 +2443,13 @@ mod tests { 4, vec![Value::Integer(4), Value::Integer(1), Value::Integer(50)], ); - let _output = agg.eval(delta.clone(), None); - agg.commit(delta); + pager + .io + .block(|| agg.commit(delta.clone(), &mut cursor)) + .unwrap(); // Check final state - user 1 should have updated count and sum - let final_state = agg.get_current_state(); + let final_state = get_current_state_from_btree(&agg, &pager, &mut cursor); let user1 = final_state .changes .iter() @@ -1928,7 +2462,12 @@ mod tests { #[test] fn test_avg_maintains_sum_and_count() { // Test AVG aggregation + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + let mut agg = AggregateOperator::new( + 1, // operator_id for testing vec!["category".to_string()], vec![AggregateFunction::Avg("value".to_string())], vec![ @@ -1964,12 +2503,15 @@ mod tests { Value::Integer(30), ], ); - agg.initialize(initial); + pager + .io + .block(|| agg.commit(initial.clone(), &mut cursor)) + .unwrap(); // Check initial averages // Category A: avg = (10 + 20) / 2 = 15 // Category B: avg = 30 / 1 = 30 - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); let cat_a = state .changes .iter() @@ -1996,11 +2538,13 @@ mod tests { Value::Integer(30), ], ); - let _output = agg.eval(delta.clone(), None); - agg.commit(delta); + pager + .io + .block(|| agg.commit(delta.clone(), &mut cursor)) + .unwrap(); // Check final state - Category A avg should now be (10 + 20 + 30) / 3 = 20 - let final_state = agg.get_current_state(); + let final_state = get_current_state_from_btree(&agg, &pager, &mut cursor); let cat_a = final_state .changes .iter() @@ -2012,7 +2556,12 @@ mod tests { #[test] fn test_delete_updates_aggregates() { // Test that deletes (negative weights) properly update aggregates + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + let mut agg = AggregateOperator::new( + 1, // operator_id for testing vec!["category".to_string()], vec![ AggregateFunction::Count, @@ -2043,10 +2592,13 @@ mod tests { Value::Integer(200), ], ); - agg.initialize(initial); + pager + .io + .block(|| agg.commit(initial.clone(), &mut cursor)) + .unwrap(); // Check initial state: count=2, sum=300 - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert!(!state.changes.is_empty()); let (row, _weight) = &state.changes[0]; assert_eq!(row.values[1], Value::Integer(2)); // count @@ -2063,11 +2615,13 @@ mod tests { ], ); - let _output = agg.eval(delta.clone(), None); - agg.commit(delta); + pager + .io + .block(|| agg.commit(delta.clone(), &mut cursor)) + .unwrap(); // Check final state - should update to count=1, sum=200 - let final_state = agg.get_current_state(); + let final_state = get_current_state_from_btree(&agg, &pager, &mut cursor); let cat_a = final_state .changes .iter() @@ -2083,17 +2637,29 @@ mod tests { let group_by = vec!["category".to_string()]; let input_columns = vec!["category".to_string(), "value".to_string()]; - let mut agg = AggregateOperator::new(group_by, aggregates.clone(), input_columns); + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + + let mut agg = AggregateOperator::new( + 1, // operator_id for testing + group_by, + aggregates.clone(), + input_columns, + ); // Initialize with data let mut init_data = Delta::new(); init_data.insert(1, vec![Value::Text("A".into()), Value::Integer(10)]); init_data.insert(2, vec![Value::Text("A".into()), Value::Integer(20)]); init_data.insert(3, vec![Value::Text("B".into()), Value::Integer(30)]); - agg.initialize(init_data); + pager + .io + .block(|| agg.commit(init_data.clone(), &mut cursor)) + .unwrap(); // Check initial counts - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state.changes.len(), 2); // Find group A and B @@ -2115,14 +2681,16 @@ mod tests { let mut delete_delta = Delta::new(); delete_delta.delete(1, vec![Value::Text("A".into()), Value::Integer(10)]); - let output = agg.eval(delete_delta.clone(), None); - agg.commit(delete_delta); + let output = pager + .io + .block(|| agg.commit(delete_delta.clone(), &mut cursor)) + .unwrap(); // Should emit retraction for old count and insertion for new count assert_eq!(output.changes.len(), 2); // Check final state - let final_state = agg.get_current_state(); + let final_state = get_current_state_from_btree(&agg, &pager, &mut cursor); let group_a_final = final_state .changes .iter() @@ -2134,13 +2702,15 @@ mod tests { let mut delete_all_b = Delta::new(); delete_all_b.delete(3, vec![Value::Text("B".into()), Value::Integer(30)]); - let output_b = agg.eval(delete_all_b.clone(), None); - agg.commit(delete_all_b); + let output_b = pager + .io + .block(|| agg.commit(delete_all_b.clone(), &mut cursor)) + .unwrap(); assert_eq!(output_b.changes.len(), 1); // Only retraction, no new row assert_eq!(output_b.changes[0].1, -1); // Retraction // Final state should not have group B - let final_state2 = agg.get_current_state(); + let final_state2 = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(final_state2.changes.len(), 1); // Only group A remains assert_eq!(final_state2.changes[0].0.values[0], Value::Text("A".into())); } @@ -2151,7 +2721,16 @@ mod tests { let group_by = vec!["category".to_string()]; let input_columns = vec!["category".to_string(), "value".to_string()]; - let mut agg = AggregateOperator::new(group_by, aggregates.clone(), input_columns); + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + + let mut agg = AggregateOperator::new( + 1, // operator_id for testing + group_by, + aggregates.clone(), + input_columns, + ); // Initialize with data let mut init_data = Delta::new(); @@ -2159,10 +2738,13 @@ mod tests { init_data.insert(2, vec![Value::Text("A".into()), Value::Integer(20)]); init_data.insert(3, vec![Value::Text("B".into()), Value::Integer(30)]); init_data.insert(4, vec![Value::Text("B".into()), Value::Integer(15)]); - agg.initialize(init_data); + pager + .io + .block(|| agg.commit(init_data.clone(), &mut cursor)) + .unwrap(); // Check initial sums - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); let group_a = state .changes .iter() @@ -2181,11 +2763,13 @@ mod tests { let mut delete_delta = Delta::new(); delete_delta.delete(2, vec![Value::Text("A".into()), Value::Integer(20)]); - let _ = agg.eval(delete_delta.clone(), None); - agg.commit(delete_delta); + pager + .io + .block(|| agg.commit(delete_delta.clone(), &mut cursor)) + .unwrap(); // Check updated sum - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); let group_a = state .changes .iter() @@ -2198,11 +2782,13 @@ mod tests { delete_all_b.delete(3, vec![Value::Text("B".into()), Value::Integer(30)]); delete_all_b.delete(4, vec![Value::Text("B".into()), Value::Integer(15)]); - let _ = agg.eval(delete_all_b.clone(), None); - agg.commit(delete_all_b); + pager + .io + .block(|| agg.commit(delete_all_b.clone(), &mut cursor)) + .unwrap(); // Group B should be gone - let final_state = agg.get_current_state(); + let final_state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(final_state.changes.len(), 1); // Only group A remains assert_eq!(final_state.changes[0].0.values[0], Value::Text("A".into())); } @@ -2213,17 +2799,29 @@ mod tests { let group_by = vec!["category".to_string()]; let input_columns = vec!["category".to_string(), "value".to_string()]; - let mut agg = AggregateOperator::new(group_by, aggregates.clone(), input_columns); + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + + let mut agg = AggregateOperator::new( + 1, // operator_id for testing + group_by, + aggregates.clone(), + input_columns, + ); // Initialize with data let mut init_data = Delta::new(); init_data.insert(1, vec![Value::Text("A".into()), Value::Integer(10)]); init_data.insert(2, vec![Value::Text("A".into()), Value::Integer(20)]); init_data.insert(3, vec![Value::Text("A".into()), Value::Integer(30)]); - agg.initialize(init_data); + pager + .io + .block(|| agg.commit(init_data.clone(), &mut cursor)) + .unwrap(); // Check initial average - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state.changes.len(), 1); assert_eq!(state.changes[0].0.values[1], Value::Float(20.0)); // AVG = (10+20+30)/3 = 20 @@ -2231,21 +2829,25 @@ mod tests { let mut delete_delta = Delta::new(); delete_delta.delete(2, vec![Value::Text("A".into()), Value::Integer(20)]); - let _ = agg.eval(delete_delta.clone(), None); - agg.commit(delete_delta); + pager + .io + .block(|| agg.commit(delete_delta.clone(), &mut cursor)) + .unwrap(); // Check updated average - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state.changes[0].0.values[1], Value::Float(20.0)); // AVG = (10+30)/2 = 20 (same!) // Delete another to change the average let mut delete_another = Delta::new(); delete_another.delete(3, vec![Value::Text("A".into()), Value::Integer(30)]); - let _ = agg.eval(delete_another.clone(), None); - agg.commit(delete_another); + pager + .io + .block(|| agg.commit(delete_another.clone(), &mut cursor)) + .unwrap(); - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state.changes[0].0.values[1], Value::Float(10.0)); // AVG = 10/1 = 10 } @@ -2260,17 +2862,29 @@ mod tests { let group_by = vec!["category".to_string()]; let input_columns = vec!["category".to_string(), "value".to_string()]; - let mut agg = AggregateOperator::new(group_by, aggregates.clone(), input_columns); + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + + let mut agg = AggregateOperator::new( + 1, // operator_id for testing + group_by, + aggregates.clone(), + input_columns, + ); // Initialize with data let mut init_data = Delta::new(); init_data.insert(1, vec![Value::Text("A".into()), Value::Integer(100)]); init_data.insert(2, vec![Value::Text("A".into()), Value::Integer(200)]); init_data.insert(3, vec![Value::Text("B".into()), Value::Integer(50)]); - agg.initialize(init_data); + pager + .io + .block(|| agg.commit(init_data.clone(), &mut cursor)) + .unwrap(); // Check initial state - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); let group_a = state .changes .iter() @@ -2285,11 +2899,13 @@ mod tests { let mut delete_delta = Delta::new(); delete_delta.delete(1, vec![Value::Text("A".into()), Value::Integer(100)]); - let _ = agg.eval(delete_delta.clone(), None); - agg.commit(delete_delta); + pager + .io + .block(|| agg.commit(delete_delta.clone(), &mut cursor)) + .unwrap(); // Check all aggregates updated correctly - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); let group_a = state .changes .iter() @@ -2304,10 +2920,12 @@ mod tests { let mut insert_delta = Delta::new(); insert_delta.insert(4, vec![Value::Text("A".into()), Value::Float(50.5)]); - let _ = agg.eval(insert_delta.clone(), None); - agg.commit(insert_delta); + pager + .io + .block(|| agg.commit(insert_delta.clone(), &mut cursor)) + .unwrap(); - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); let group_a = state .changes .iter() @@ -2324,6 +2942,10 @@ mod tests { // When a row's rowid changes (e.g., UPDATE t SET a=1 WHERE a=3 on INTEGER PRIMARY KEY), // the operator should properly consolidate the state + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + let mut filter = FilterOperator::new( FilterPredicate::GreaterThan { column: "b".to_string(), @@ -2335,10 +2957,12 @@ mod tests { // Initialize with a row (rowid=3, values=[3, 3]) let mut init_data = Delta::new(); init_data.insert(3, vec![Value::Integer(3), Value::Integer(3)]); - filter.initialize(init_data); + let state = pager + .io + .block(|| filter.commit(init_data.clone(), &mut cursor)) + .unwrap(); // Check initial state - let state = filter.get_current_state(); assert_eq!(state.changes.len(), 1); assert_eq!(state.changes[0].0.rowid, 3); assert_eq!( @@ -2352,29 +2976,15 @@ mod tests { update_delta.delete(3, vec![Value::Integer(3), Value::Integer(3)]); update_delta.insert(1, vec![Value::Integer(1), Value::Integer(3)]); - let output = filter.eval(update_delta.clone(), None); - filter.commit(update_delta); + let output = pager + .io + .block(|| filter.commit(update_delta.clone(), &mut cursor)) + .unwrap(); // The output delta should have both changes (both pass the filter b > 2) assert_eq!(output.changes.len(), 2); assert_eq!(output.changes[0].1, -1); // delete weight assert_eq!(output.changes[1].1, 1); // insert weight - - // The current state should be consolidated to only have rows with positive weight - let final_state = filter.get_current_state(); - - // After consolidation, we should have only one row with rowid=1 - assert_eq!( - final_state.changes.len(), - 1, - "State should be consolidated to have only one row" - ); - assert_eq!(final_state.changes[0].0.rowid, 1); - assert_eq!( - final_state.changes[0].0.values, - vec![Value::Integer(1), Value::Integer(3)] - ); - assert_eq!(final_state.changes[0].1, 1); // positive weight } // ============================================================================ @@ -2388,6 +2998,10 @@ mod tests { #[test] fn test_filter_eval_with_uncommitted() { + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + let mut filter = FilterOperator::new( FilterPredicate::GreaterThan { column: "age".to_string(), @@ -2414,10 +3028,12 @@ mod tests { Value::Integer(20), ], ); - filter.initialize(init_data); + let state = pager + .io + .block(|| filter.commit(init_data.clone(), &mut cursor)) + .unwrap(); // Verify initial state (only Alice passes filter) - let state = filter.get_current_state(); assert_eq!(state.changes.len(), 1); assert_eq!(state.changes[0].0.rowid, 1); @@ -2441,7 +3057,11 @@ mod tests { ); // Eval with uncommitted - should return filtered uncommitted rows - let result = filter.eval(Delta::new(), Some(uncommitted.clone())); + let mut eval_state = uncommitted.clone().into(); + let result = pager + .io + .block(|| filter.eval(&mut eval_state, &mut cursor)) + .unwrap(); assert_eq!( result.changes.len(), 1, @@ -2449,23 +3069,16 @@ mod tests { ); assert_eq!(result.changes[0].0.rowid, 3); - // Verify state hasn't changed - let state_after_eval = filter.get_current_state(); - assert_eq!( - state_after_eval.changes.len(), - 1, - "State should still only have Alice" - ); - assert_eq!(state_after_eval.changes[0].0.rowid, 1); - // Now commit the changes - filter.commit(uncommitted); + let state = pager + .io + .block(|| filter.commit(uncommitted.clone(), &mut cursor)) + .unwrap(); // State should now include Charlie (who passes filter) - let final_state = filter.get_current_state(); assert_eq!( - final_state.changes.len(), - 2, + state.changes.len(), + 1, "State should now have Alice and Charlie" ); } @@ -2473,7 +3086,12 @@ mod tests { #[test] fn test_aggregate_eval_with_uncommitted_preserves_state() { // This is the critical test - aggregations must not modify internal state during eval + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + let mut agg = AggregateOperator::new( + 1, // operator_id for testing vec!["category".to_string()], vec![ AggregateFunction::Count, @@ -2512,10 +3130,13 @@ mod tests { Value::Integer(150), ], ); - agg.initialize(init_data); + pager + .io + .block(|| agg.commit(init_data.clone(), &mut cursor)) + .unwrap(); // Check initial state: A -> (count=2, sum=300), B -> (count=1, sum=150) - let initial_state = agg.get_current_state(); + let initial_state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(initial_state.changes.len(), 2); // Store initial state for comparison @@ -2547,7 +3168,11 @@ mod tests { ); // Eval with uncommitted should return the delta (changes to aggregates) - let result = agg.eval(Delta::new(), Some(uncommitted.clone())); + let mut eval_state = uncommitted.clone().into(); + let result = pager + .io + .block(|| agg.eval(&mut eval_state, &mut cursor)) + .unwrap(); // Result should contain updates for A and new group C // For A: retraction of old (2, 300) and insertion of new (3, 350) @@ -2555,7 +3180,7 @@ mod tests { assert!(!result.changes.is_empty(), "Should have aggregate changes"); // CRITICAL: Verify internal state hasn't changed - let state_after_eval = agg.get_current_state(); + let state_after_eval = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!( state_after_eval.changes.len(), 2, @@ -2579,10 +3204,13 @@ mod tests { ); // Now commit the changes - agg.commit(uncommitted); + pager + .io + .block(|| agg.commit(uncommitted.clone(), &mut cursor)) + .unwrap(); // State should now be updated - let final_state = agg.get_current_state(); + let final_state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(final_state.changes.len(), 3, "Should now have A, B, and C"); let a_final = final_state @@ -2622,7 +3250,12 @@ mod tests { fn test_aggregate_eval_multiple_times_without_commit() { // Test that calling eval multiple times with different uncommitted data // doesn't pollute the internal state + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + let mut agg = AggregateOperator::new( + 1, // operator_id for testing vec![], // No GROUP BY vec![ AggregateFunction::Count, @@ -2635,10 +3268,13 @@ mod tests { let mut init_data = Delta::new(); init_data.insert(1, vec![Value::Integer(1), Value::Integer(100)]); init_data.insert(2, vec![Value::Integer(2), Value::Integer(200)]); - agg.initialize(init_data); + pager + .io + .block(|| agg.commit(init_data.clone(), &mut cursor)) + .unwrap(); // Initial state: count=2, sum=300 - let initial_state = agg.get_current_state(); + let initial_state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(initial_state.changes.len(), 1); assert_eq!(initial_state.changes[0].0.values[0], Value::Integer(2)); assert_eq!(initial_state.changes[0].0.values[1], Value::Float(300.0)); @@ -2646,10 +3282,14 @@ mod tests { // First eval with uncommitted let mut uncommitted1 = Delta::new(); uncommitted1.insert(3, vec![Value::Integer(3), Value::Integer(50)]); - let _ = agg.eval(Delta::new(), Some(uncommitted1)); + let mut eval_state1 = uncommitted1.clone().into(); + let _ = pager + .io + .block(|| agg.eval(&mut eval_state1, &mut cursor)) + .unwrap(); // State should be unchanged - let state1 = agg.get_current_state(); + let state1 = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state1.changes[0].0.values[0], Value::Integer(2)); assert_eq!(state1.changes[0].0.values[1], Value::Float(300.0)); @@ -2657,20 +3297,28 @@ mod tests { let mut uncommitted2 = Delta::new(); uncommitted2.insert(4, vec![Value::Integer(4), Value::Integer(75)]); uncommitted2.insert(5, vec![Value::Integer(5), Value::Integer(25)]); - let _ = agg.eval(Delta::new(), Some(uncommitted2)); + let mut eval_state2 = uncommitted2.clone().into(); + let _ = pager + .io + .block(|| agg.eval(&mut eval_state2, &mut cursor)) + .unwrap(); // State should STILL be unchanged - let state2 = agg.get_current_state(); + let state2 = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state2.changes[0].0.values[0], Value::Integer(2)); assert_eq!(state2.changes[0].0.values[1], Value::Float(300.0)); // Third eval with deletion as uncommitted let mut uncommitted3 = Delta::new(); uncommitted3.delete(1, vec![Value::Integer(1), Value::Integer(100)]); - let _ = agg.eval(Delta::new(), Some(uncommitted3)); + let mut eval_state3 = uncommitted3.clone().into(); + let _ = pager + .io + .block(|| agg.eval(&mut eval_state3, &mut cursor)) + .unwrap(); // State should STILL be unchanged - let state3 = agg.get_current_state(); + let state3 = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state3.changes[0].0.values[0], Value::Integer(2)); assert_eq!(state3.changes[0].0.values[1], Value::Float(300.0)); } @@ -2678,7 +3326,12 @@ mod tests { #[test] fn test_aggregate_eval_with_mixed_committed_and_uncommitted() { // Test eval with both committed delta and uncommitted changes + // Create a persistent pager for the test + let (pager, root_page_id) = create_test_pager(); + let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page_id, 10); + let mut agg = AggregateOperator::new( + 1, // operator_id for testing vec!["type".to_string()], vec![AggregateFunction::Count], vec!["id".to_string(), "type".to_string()], @@ -2688,7 +3341,10 @@ mod tests { let mut init_data = Delta::new(); init_data.insert(1, vec![Value::Integer(1), Value::Text("X".into())]); init_data.insert(2, vec![Value::Integer(2), Value::Text("Y".into())]); - agg.initialize(init_data); + pager + .io + .block(|| agg.commit(init_data.clone(), &mut cursor)) + .unwrap(); // Create a committed delta (to be processed) let mut committed_delta = Delta::new(); @@ -2700,20 +3356,76 @@ mod tests { uncommitted.insert(5, vec![Value::Integer(5), Value::Text("Z".into())]); // Eval with both - should process both but not commit - let result = agg.eval(committed_delta.clone(), Some(uncommitted)); + let mut combined = committed_delta.clone(); + combined.merge(&uncommitted); + let mut eval_state = combined.clone().into(); + let result = pager + .io + .block(|| agg.eval(&mut eval_state, &mut cursor)) + .unwrap(); // Result should reflect changes from both - assert!(!result.changes.is_empty()); + assert!(!result.changes.is_empty(), "Result should not be empty"); + + // Verify the DBSP pattern: retraction (-1) followed by insertion (1) for updates, + // and just insertion (1) for new groups + + // We expect exactly 5 changes: + // - X: retraction + insertion (was 1, now 2) + // - Y: retraction + insertion (was 1, now 2) + // - Z: insertion only (new group with count 1) + assert_eq!( + result.changes.len(), + 5, + "Should have 5 changes (2 retractions + 3 insertions)" + ); + + // Sort by group name then by weight to get predictable order + let mut sorted_changes: Vec<_> = result.changes.iter().collect(); + sorted_changes.sort_by(|a, b| { + let a_group = &a.0.values[0]; + let b_group = &b.0.values[0]; + match a_group.partial_cmp(b_group).unwrap() { + std::cmp::Ordering::Equal => a.1.cmp(&b.1), // Sort by weight if same group + other => other, + } + }); + + // Check X group: should have retraction (-1) for count=1, then insertion (1) for count=2 + assert_eq!(sorted_changes[0].0.values[0], Value::Text("X".into())); + assert_eq!(sorted_changes[0].0.values[1], Value::Integer(1)); // old count + assert_eq!(sorted_changes[0].1, -1); // retraction + + assert_eq!(sorted_changes[1].0.values[0], Value::Text("X".into())); + assert_eq!(sorted_changes[1].0.values[1], Value::Integer(2)); // new count + assert_eq!(sorted_changes[1].1, 1); // insertion + + // Check Y group: should have retraction (-1) for count=1, then insertion (1) for count=2 + assert_eq!(sorted_changes[2].0.values[0], Value::Text("Y".into())); + assert_eq!(sorted_changes[2].0.values[1], Value::Integer(1)); // old count + assert_eq!(sorted_changes[2].1, -1); // retraction + + assert_eq!(sorted_changes[3].0.values[0], Value::Text("Y".into())); + assert_eq!(sorted_changes[3].0.values[1], Value::Integer(2)); // new count + assert_eq!(sorted_changes[3].1, 1); // insertion + + // Check Z group: should only have insertion (1) for count=1 (new group) + assert_eq!(sorted_changes[4].0.values[0], Value::Text("Z".into())); + assert_eq!(sorted_changes[4].0.values[1], Value::Integer(1)); // new count + assert_eq!(sorted_changes[4].1, 1); // insertion only (no retraction as it's new); // But internal state should be unchanged - let state = agg.get_current_state(); + let state = get_current_state_from_btree(&agg, &pager, &mut cursor); assert_eq!(state.changes.len(), 2, "Should still have only X and Y"); // Now commit only the committed_delta - agg.commit(committed_delta); + pager + .io + .block(|| agg.commit(committed_delta.clone(), &mut cursor)) + .unwrap(); // State should now have X count=2, Y count=1 - let final_state = agg.get_current_state(); + let final_state = get_current_state_from_btree(&agg, &pager, &mut cursor); let x = final_state .changes .iter() diff --git a/core/incremental/view.rs b/core/incremental/view.rs index e7ba76980..b15faf847 100644 --- a/core/incremental/view.rs +++ b/core/incremental/view.rs @@ -1,13 +1,16 @@ use super::compiler::{DbspCircuit, DbspCompiler, DeltaSet}; -use super::dbsp::{RowKeyStream, RowKeyZSet}; -use super::operator::{ComputationTracker, Delta, FilterPredicate}; +use super::dbsp::Delta; +use super::operator::{ComputationTracker, FilterPredicate}; use crate::schema::{BTreeTable, Column, Schema}; +use crate::storage::btree::BTreeCursor; use crate::translate::logical::LogicalPlanBuilder; -use crate::types::{IOCompletions, IOResult, Value}; +use crate::types::{IOResult, Value}; use crate::util::extract_view_columns; -use crate::{io_yield_one, Completion, LimboError, Result, Statement}; -use std::collections::{BTreeMap, HashMap}; +use crate::{return_if_io, LimboError, Pager, Result, Statement}; +use std::cell::RefCell; +use std::collections::HashMap; use std::fmt; +use std::rc::Rc; use std::sync::{Arc, Mutex}; use turso_parser::ast; use turso_parser::{ @@ -23,18 +26,26 @@ pub enum PopulateState { Processing { stmt: Box, rows_processed: usize, + /// If we're in the middle of processing a row (merge_delta returned I/O) + pending_row: Option<(i64, Vec)>, // (rowid, values) }, /// Population complete Done, } +/// State machine for merge_delta to handle I/O operations impl fmt::Debug for PopulateState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { PopulateState::Start => write!(f, "Start"), - PopulateState::Processing { rows_processed, .. } => f + PopulateState::Processing { + rows_processed, + pending_row, + .. + } => f .debug_struct("Processing") .field("rows_processed", rows_processed) + .field("has_pending", &pending_row.is_some()) .finish(), PopulateState::Done => write!(f, "Done"), } @@ -45,11 +56,95 @@ impl fmt::Debug for PopulateState { #[derive(Debug, Clone, Default)] pub struct ViewTransactionState { // Per-connection delta for uncommitted changes (contains both weights and values) - pub delta: Delta, + // Using RefCell for interior mutability + delta: RefCell, } -/// Incremental view that maintains a stream of row keys using DBSP-style computation -/// The actual row data is stored as transformed Values +impl ViewTransactionState { + /// Create a new transaction state + pub fn new() -> Self { + Self { + delta: RefCell::new(Delta::new()), + } + } + + /// Insert a row into the delta + pub fn insert(&self, key: i64, values: Vec) { + self.delta.borrow_mut().insert(key, values); + } + + /// Delete a row from the delta + pub fn delete(&self, key: i64, values: Vec) { + self.delta.borrow_mut().delete(key, values); + } + + /// Clear all changes in the delta + pub fn clear(&self) { + self.delta.borrow_mut().changes.clear(); + } + + /// Get a clone of the current delta + pub fn get_delta(&self) -> Delta { + self.delta.borrow().clone() + } + + /// Check if the delta is empty + pub fn is_empty(&self) -> bool { + self.delta.borrow().is_empty() + } + + /// Returns how many elements exist in the delta. + pub fn len(&self) -> usize { + self.delta.borrow().len() + } +} + +/// Container for all view transaction states within a connection +/// Provides interior mutability for the map of view states +#[derive(Debug, Clone, Default)] +pub struct AllViewsTxState { + states: Rc>>>, +} + +impl AllViewsTxState { + /// Create a new container for view transaction states + pub fn new() -> Self { + Self { + states: Rc::new(RefCell::new(HashMap::new())), + } + } + + /// Get or create a transaction state for a view + pub fn get_or_create(&self, view_name: &str) -> Rc { + let mut states = self.states.borrow_mut(); + states + .entry(view_name.to_string()) + .or_insert_with(|| Rc::new(ViewTransactionState::new())) + .clone() + } + + /// Get a transaction state for a view if it exists + pub fn get(&self, view_name: &str) -> Option> { + self.states.borrow().get(view_name).cloned() + } + + /// Clear all transaction states + pub fn clear(&self) { + self.states.borrow_mut().clear(); + } + + /// Check if there are no transaction states + pub fn is_empty(&self) -> bool { + self.states.borrow().is_empty() + } + + /// Get all view names that have transaction states + pub fn get_view_names(&self) -> Vec { + self.states.borrow().keys().cloned().collect() + } +} + +/// Incremental view that maintains its state through a DBSP circuit /// /// This version keeps everything in-memory. This is acceptable for small views, since DBSP /// doesn't have to track the history of changes. Still for very large views (think of the result @@ -62,12 +157,7 @@ pub struct ViewTransactionState { /// Uses DBSP circuits for incremental computation. #[derive(Debug)] pub struct IncrementalView { - // Stream of row keys for this view - stream: RowKeyStream, name: String, - // Store the actual row data as Values, keyed by row_key - // Using BTreeMap for ordered iteration - pub records: BTreeMap>, // WHERE clause predicate for filtering (kept for compatibility) pub where_predicate: FilterPredicate, // The SELECT statement that defines how to transform input data @@ -75,8 +165,6 @@ pub struct IncrementalView { // DBSP circuit that encapsulates the computation circuit: DbspCircuit, - // Track whether circuit has been initialized with data - circuit_initialized: bool, // Tables referenced by this view (extracted from FROM clause and JOINs) base_table: Arc, @@ -88,6 +176,8 @@ pub struct IncrementalView { // We will use this one day to export rows_read, but for now, will just test that we're doing the expected amount of compute #[cfg_attr(not(test), allow(dead_code))] pub tracker: Arc>, + // Root page of the btree storing the materialized state (0 for unmaterialized) + root_page: usize, } impl IncrementalView { @@ -110,6 +200,8 @@ impl IncrementalView { select: &ast::Select, schema: &Schema, _base_table: &Arc, + main_data_root: usize, + internal_state_root: usize, ) -> Result { // Build the logical plan from the SELECT statement let mut builder = LogicalPlanBuilder::new(schema); @@ -117,8 +209,8 @@ impl IncrementalView { let stmt = ast::Stmt::Select(select.clone()); let logical_plan = builder.build_statement(&stmt)?; - // Compile the logical plan to a DBSP circuit - let compiler = DbspCompiler::new(); + // Compile the logical plan to a DBSP circuit with the storage roots + let compiler = DbspCompiler::new(main_data_root, internal_state_root); let circuit = compiler.compile(&logical_plan)?; Ok(circuit) @@ -145,7 +237,37 @@ impl IncrementalView { false } - pub fn from_sql(sql: &str, schema: &Schema) -> Result { + /// Validate a SELECT statement and extract the columns it would produce + /// This is used during CREATE MATERIALIZED VIEW to validate the view before storing it + pub fn validate_and_extract_columns( + select: &ast::Select, + schema: &Schema, + ) -> Result> { + // For now, just extract columns from a simple select + // This will need to be expanded to handle joins, aggregates, etc. + + // Get the base table name + let base_table_name = Self::extract_base_table(select).ok_or_else(|| { + LimboError::ParseError("Cannot extract base table from SELECT".to_string()) + })?; + + // Get the table from schema + let table = schema + .get_table(&base_table_name) + .and_then(|t| t.btree()) + .ok_or_else(|| LimboError::ParseError(format!("Table {base_table_name} not found")))?; + + // For now, return all columns from the base table + // In the future, this should parse the select list and handle projections + Ok(table.columns.clone()) + } + + pub fn from_sql( + sql: &str, + schema: &Schema, + main_data_root: usize, + internal_state_root: usize, + ) -> Result { let mut parser = Parser::new(sql.as_bytes()); let cmd = parser.next_cmd()?; let cmd = cmd.expect("View is an empty statement"); @@ -155,7 +277,13 @@ impl IncrementalView { view_name, columns: _, select, - }) => IncrementalView::from_stmt(view_name, select, schema), + }) => IncrementalView::from_stmt( + view_name, + select, + schema, + main_data_root, + internal_state_root, + ), _ => Err(LimboError::ParseError(format!( "View is not a CREATE MATERIALIZED VIEW statement: {sql}" ))), @@ -166,6 +294,8 @@ impl IncrementalView { view_name: ast::QualifiedName, select: ast::Select, schema: &Schema, + main_data_root: usize, + internal_state_root: usize, ) -> Result { let name = view_name.name.as_str().to_string(); @@ -203,9 +333,12 @@ impl IncrementalView { base_table, view_columns, schema, + main_data_root, + internal_state_root, ) } + #[allow(clippy::too_many_arguments)] pub fn new( name: String, where_predicate: FilterPredicate, @@ -213,30 +346,31 @@ impl IncrementalView { base_table: Arc, columns: Vec, schema: &Schema, + main_data_root: usize, + internal_state_root: usize, ) -> Result { - let records = BTreeMap::new(); - // Create the tracker that will be shared by all operators let tracker = Arc::new(Mutex::new(ComputationTracker::new())); // Compile the SELECT statement into a DBSP circuit - let circuit = Self::try_compile_circuit(&select_stmt, schema, &base_table)?; - - // Circuit will be initialized when we first call merge_delta - let circuit_initialized = false; + let circuit = Self::try_compile_circuit( + &select_stmt, + schema, + &base_table, + main_data_root, + internal_state_root, + )?; Ok(Self { - stream: RowKeyStream::from_zset(RowKeyZSet::new()), name, - records, where_predicate, select_stmt, circuit, - circuit_initialized, base_table, columns, populate_state: PopulateState::Start, tracker, + root_page: main_data_root, }) } @@ -244,6 +378,29 @@ impl IncrementalView { &self.name } + pub fn base_table(&self) -> &Arc { + &self.base_table + } + + /// Execute the circuit with uncommitted changes to get processed delta + pub fn execute_with_uncommitted( + &mut self, + uncommitted: DeltaSet, + pager: Rc, + execute_state: &mut crate::incremental::compiler::ExecuteState, + ) -> crate::Result> { + // Initialize execute_state with the input data + *execute_state = crate::incremental::compiler::ExecuteState::Init { + input_data: uncommitted, + }; + self.circuit.execute(pager, execute_state) + } + + /// Get the root page for this materialized view's btree + pub fn get_root_page(&self) -> usize { + self.root_page + } + /// Get all table names referenced by this view pub fn get_referenced_table_names(&self) -> Vec { vec![self.base_table.name.clone()] @@ -348,132 +505,189 @@ impl IncrementalView { /// Populate the view by scanning the source table using a state machine /// This can be called multiple times and will resume from where it left off + /// This method is only for materialized views and will persist data to the btree pub fn populate_from_table( &mut self, conn: &std::sync::Arc, + pager: &std::rc::Rc, + _btree_cursor: &mut BTreeCursor, ) -> crate::Result> { // If already populated, return immediately if matches!(self.populate_state, PopulateState::Done) { return Ok(IOResult::Done(())); } - const BATCH_SIZE: usize = 100; // Process 100 rows at a time before yielding + // Assert that this is a materialized view with a root page + assert!( + self.root_page != 0, + "populate_from_table should only be called for materialized views with root_page" + ); loop { - match &mut self.populate_state { - PopulateState::Start => { - // Generate the SQL query for populating the view - // It is best to use a standard query than a cursor for two reasons: - // 1) Using a sql query will allow us to be much more efficient in cases where we only want - // some rows, in particular for indexed filters - // 2) There are two types of cursors: index and table. In some situations (like for example - // if the table has an integer primary key), the key will be exclusively in the index - // btree and not in the table btree. Using cursors would force us to be aware of this - // distinction (and others), and ultimately lead to reimplementing the whole query - // machinery (next step is which index is best to use, etc) - let query = self.sql_for_populate()?; + // To avoid borrow checker issues, we need to handle state transitions carefully + let needs_start = matches!(self.populate_state, PopulateState::Start); - // Prepare the statement - let stmt = conn.prepare(&query)?; + if needs_start { + // Generate the SQL query for populating the view + // It is best to use a standard query than a cursor for two reasons: + // 1) Using a sql query will allow us to be much more efficient in cases where we only want + // some rows, in particular for indexed filters + // 2) There are two types of cursors: index and table. In some situations (like for example + // if the table has an integer primary key), the key will be exclusively in the index + // btree and not in the table btree. Using cursors would force us to be aware of this + // distinction (and others), and ultimately lead to reimplementing the whole query + // machinery (next step is which index is best to use, etc) + let query = self.sql_for_populate()?; - self.populate_state = PopulateState::Processing { - stmt: Box::new(stmt), - rows_processed: 0, - }; - // Continue to next state + // Prepare the statement + let stmt = conn.prepare(&query)?; + + self.populate_state = PopulateState::Processing { + stmt: Box::new(stmt), + rows_processed: 0, + pending_row: None, + }; + // Continue to next state + continue; + } + + // Handle Done state + if matches!(self.populate_state, PopulateState::Done) { + return Ok(IOResult::Done(())); + } + + // Handle Processing state - extract state to avoid borrow issues + let (mut stmt, mut rows_processed, pending_row) = + match std::mem::replace(&mut self.populate_state, PopulateState::Done) { + PopulateState::Processing { + stmt, + rows_processed, + pending_row, + } => (stmt, rows_processed, pending_row), + _ => unreachable!("We already handled Start and Done states"), + }; + + // If we have a pending row from a previous I/O interruption, process it first + if let Some((rowid, values)) = pending_row { + // Create a single-row delta for the pending row + let mut single_row_delta = Delta::new(); + single_row_delta.insert(rowid, values.clone()); + + // Process the pending row with the pager + match self.merge_delta(&single_row_delta, pager.clone())? { + IOResult::Done(_) => { + // Row processed successfully, continue to next row + rows_processed += 1; + // Continue to fetch next row from statement + } + IOResult::IO(io) => { + // Still not done, save state with pending row + self.populate_state = PopulateState::Processing { + stmt, + rows_processed, + pending_row: Some((rowid, values)), // Keep the pending row + }; + return Ok(IOResult::IO(io)); + } } + } - PopulateState::Processing { - stmt, - rows_processed, - } => { - // Collect rows into a delta batch - let mut batch_delta = Delta::new(); - let mut batch_count = 0; + // Process rows one at a time - no batching + loop { + // This step() call resumes from where the statement left off + match stmt.step()? { + crate::vdbe::StepResult::Row => { + // Get the row + let row = stmt.row().unwrap(); - loop { - if batch_count >= BATCH_SIZE { - // Process this batch through the standard pipeline - self.merge_delta(&batch_delta); - // Yield control after processing a batch - // TODO: currently this inner statement is the one that is tracking completions - // so as a stop gap we can just return a dummy completion here - io_yield_one!(Completion::new_dummy()); - } + // Extract values from the row + let all_values: Vec = + row.get_values().cloned().collect(); - // This step() call resumes from where the statement left off - match stmt.step()? { - crate::vdbe::StepResult::Row => { - // Get the row - let row = stmt.row().unwrap(); - - // Extract values from the row - let all_values: Vec = - row.get_values().cloned().collect(); - - // Determine how to extract the rowid - // If there's a rowid alias (INTEGER PRIMARY KEY), the rowid is one of the columns - // Otherwise, it's the last value we explicitly selected - let (rowid, values) = if let Some((idx, _)) = - self.base_table.get_rowid_alias_column() - { - // The rowid is the value at the rowid alias column index - let rowid = match all_values.get(idx) { - Some(crate::types::Value::Integer(id)) => *id, - _ => { - // This shouldn't happen - rowid alias must be an integer - *rows_processed += 1; - batch_count += 1; - continue; - } - }; - // All values are table columns (no separate rowid was selected) - (rowid, all_values) - } else { - // The last value is the explicitly selected rowid - let rowid = match all_values.last() { - Some(crate::types::Value::Integer(id)) => *id, - _ => { - // This shouldn't happen - rowid must be an integer - *rows_processed += 1; - batch_count += 1; - continue; - } - }; - // Get all values except the rowid - let values = all_values[..all_values.len() - 1].to_vec(); - (rowid, values) + // Determine how to extract the rowid + // If there's a rowid alias (INTEGER PRIMARY KEY), the rowid is one of the columns + // Otherwise, it's the last value we explicitly selected + let (rowid, values) = + if let Some((idx, _)) = self.base_table.get_rowid_alias_column() { + // The rowid is the value at the rowid alias column index + let rowid = match all_values.get(idx) { + Some(crate::types::Value::Integer(id)) => *id, + _ => { + // This shouldn't happen - rowid alias must be an integer + rows_processed += 1; + continue; + } }; + // All values are table columns (no separate rowid was selected) + (rowid, all_values) + } else { + // The last value is the explicitly selected rowid + let rowid = match all_values.last() { + Some(crate::types::Value::Integer(id)) => *id, + _ => { + // This shouldn't happen - rowid must be an integer + rows_processed += 1; + continue; + } + }; + // Get all values except the rowid + let values = all_values[..all_values.len() - 1].to_vec(); + (rowid, values) + }; - // Add to batch delta - let merge_delta handle filtering and aggregation - batch_delta.insert(rowid, values); + // Create a single-row delta and process it immediately + let mut single_row_delta = Delta::new(); + single_row_delta.insert(rowid, values.clone()); - *rows_processed += 1; - batch_count += 1; + // Process this single row through merge_delta with the pager + match self.merge_delta(&single_row_delta, pager.clone())? { + IOResult::Done(_) => { + // Row processed successfully, continue to next row + rows_processed += 1; } - crate::vdbe::StepResult::Done => { - // Process any remaining rows in the batch - self.merge_delta(&batch_delta); - // All rows processed, move to Done state - self.populate_state = PopulateState::Done; - return Ok(IOResult::Done(())); - } - crate::vdbe::StepResult::Interrupt | crate::vdbe::StepResult::Busy => { - return Err(LimboError::Busy); - } - crate::vdbe::StepResult::IO => { - // Process current batch before yielding - self.merge_delta(&batch_delta); - // The Statement needs to wait for IO - io_yield_one!(Completion::new_dummy()); + IOResult::IO(io) => { + // Save state and return I/O + // We'll resume at the SAME row when called again (don't increment rows_processed) + // The circuit still has unfinished work for this row + self.populate_state = PopulateState::Processing { + stmt, + rows_processed, // Don't increment - row not done yet! + pending_row: Some((rowid, values)), // Save the row for resumption + }; + return Ok(IOResult::IO(io)); } } } - } - PopulateState::Done => { - // Already populated - return Ok(IOResult::Done(())); + crate::vdbe::StepResult::Done => { + // All rows processed, we're done + self.populate_state = PopulateState::Done; + return Ok(IOResult::Done(())); + } + + crate::vdbe::StepResult::Interrupt | crate::vdbe::StepResult::Busy => { + // Save state before returning error + self.populate_state = PopulateState::Processing { + stmt, + rows_processed, + pending_row: None, // No pending row when interrupted between rows + }; + return Err(LimboError::Busy); + } + + crate::vdbe::StepResult::IO => { + // Statement needs I/O - save state and return + self.populate_state = PopulateState::Processing { + stmt, + rows_processed, + pending_row: None, // No pending row when interrupted between rows + }; + // TODO: Get the actual I/O completion from the statement + let completion = crate::io::Completion::new_dummy(); + return Ok(IOResult::IO(crate::types::IOCompletions::Single( + completion, + ))); + } } } } @@ -555,95 +769,23 @@ impl IncrementalView { None } - /// Get the current records as an iterator - for cursor-based access - pub fn iter(&self) -> impl Iterator)> + '_ { - self.stream.to_vec().into_iter().filter_map(move |row| { - self.records - .get(&row.rowid) - .map(|values| (row.rowid, values.clone())) - }) - } - - /// Get current data merged with transaction state - pub fn current_data(&self, tx_state: Option<&ViewTransactionState>) -> Vec<(i64, Vec)> { - if let Some(tx_state) = tx_state { - // Use circuit to process uncommitted changes - let mut uncommitted = DeltaSet::new(); - uncommitted.insert(self.base_table.name.clone(), tx_state.delta.clone()); - - // Execute with uncommitted changes (won't affect circuit state) - match self.circuit.execute(HashMap::new(), uncommitted) { - Ok(processed_delta) => { - // Merge processed delta with committed records - let mut result_map: BTreeMap> = self.records.clone(); - for (row, weight) in &processed_delta.changes { - if *weight > 0 { - result_map.insert(row.rowid, row.values.clone()); - } else if *weight < 0 { - result_map.remove(&row.rowid); - } - } - result_map.into_iter().collect() - } - Err(e) => { - // Return error or panic - no fallback - panic!("Failed to execute circuit with uncommitted data: {e:?}"); - } - } - } else { - // No transaction state: return committed records - self.records.clone().into_iter().collect() - } - } - /// Merge a delta of changes into the view's current state - pub fn merge_delta(&mut self, delta: &Delta) { + pub fn merge_delta( + &mut self, + delta: &Delta, + pager: std::rc::Rc, + ) -> crate::Result> { // Early return if delta is empty if delta.is_empty() { - return; + return Ok(IOResult::Done(())); } - // Use the circuit to process the delta + // Use the circuit to process the delta and write to btree let mut input_data = HashMap::new(); input_data.insert(self.base_table.name.clone(), delta.clone()); - // If circuit hasn't been initialized yet, initialize it first - // This happens during populate_from_table - if !self.circuit_initialized { - // Initialize the circuit with empty state - self.circuit - .initialize(HashMap::new()) - .expect("Failed to initialize circuit"); - self.circuit_initialized = true; - } - - // Execute the circuit to process the delta - let current_delta = match self.circuit.execute(input_data.clone(), DeltaSet::empty()) { - Ok(output) => { - // Commit the changes to the circuit's internal state - self.circuit - .commit(input_data) - .expect("Failed to commit to circuit"); - output - } - Err(e) => { - panic!("Failed to execute circuit: {e:?}"); - } - }; - - // Update records and stream with the processed delta - let mut zset_delta = RowKeyZSet::new(); - - for (row, weight) in ¤t_delta.changes { - if *weight > 0 { - self.records.insert(row.rowid, row.values.clone()); - zset_delta.insert(row.clone(), 1); - } else if *weight < 0 { - self.records.remove(&row.rowid); - zset_delta.insert(row.clone(), -1); - } - } - - self.stream.apply_delta(&zset_delta); + // The circuit now handles all btree I/O internally with the provided pager + let _delta = return_if_io!(self.circuit.commit(input_data, pager)); + Ok(IOResult::Done(())) } } diff --git a/core/io/io_uring.rs b/core/io/io_uring.rs index 393169028..80f8c8444 100644 --- a/core/io/io_uring.rs +++ b/core/io/io_uring.rs @@ -7,6 +7,7 @@ use crate::{turso_assert, LimboError, Result}; use parking_lot::Mutex; use rustix::fs::{self, FlockOperation, OFlags}; use std::ptr::NonNull; +use std::sync::atomic::{AtomicBool, Ordering}; use std::{ collections::{HashMap, VecDeque}, io::ErrorKind, @@ -43,6 +44,10 @@ const MAX_WAIT: usize = 4; /// One memory arena for DB pages and another for WAL frames const ARENA_COUNT: usize = 2; +/// Arbitrary non-zero user_data for barrier operation when handling a partial writev +/// writing a commit frame. +const BARRIER_USER_DATA: u64 = 1; + pub struct UringIO { inner: Arc>, } @@ -56,6 +61,7 @@ struct WrappedIOUring { writev_states: HashMap, overflow: VecDeque, iov_pool: IovecPool, + pending_link: AtomicBool, } struct InnerUringIO { @@ -122,6 +128,7 @@ impl UringIO { pending_ops: 0, writev_states: HashMap::new(), iov_pool: IovecPool::new(), + pending_link: AtomicBool::new(false), }, free_files: (0..FILES).collect(), free_arenas: [const { None }; ARENA_COUNT], @@ -153,6 +160,7 @@ macro_rules! with_fd { /// wrapper type to represent a possibly registered file descriptor, /// only used in WritevState, and piggy-backs on the available methods from /// `UringFile`, so we don't have to store the file on `WritevState`. +#[derive(Clone)] enum Fd { Fixed(u32), RawFd(i32), @@ -194,10 +202,12 @@ struct WritevState { bufs: Vec>, /// we keep the last iovec allocation alive until final CQE last_iov_allocation: Option>, + had_partial: bool, + linked_op: bool, } impl WritevState { - fn new(file: &UringFile, pos: u64, bufs: Vec>) -> Self { + fn new(file: &UringFile, pos: u64, linked: bool, bufs: Vec>) -> Self { let file_id = file .id() .map(Fd::Fixed) @@ -212,6 +222,8 @@ impl WritevState { bufs, last_iov_allocation: None, total_len, + had_partial: false, + linked_op: linked, } } @@ -353,7 +365,7 @@ impl WrappedIOUring { } /// Submit or resubmit a writev operation - fn submit_writev(&mut self, key: u64, mut st: WritevState) { + fn submit_writev(&mut self, key: u64, mut st: WritevState, continue_chain: bool) { st.free_last_iov(&mut self.iov_pool); let mut iov_allocation = self.iov_pool.acquire().unwrap_or_else(|| { // Fallback: allocate a new one if pool is exhausted @@ -391,7 +403,7 @@ impl WrappedIOUring { } // If we have coalesced everything into a single iovec, submit as a single`pwrite` if iov_count == 1 { - let entry = with_fd!(st.file_id, |fd| { + let mut entry = with_fd!(st.file_id, |fd| { if let Some(id) = st.bufs[st.current_buffer_idx].fixed_id() { io_uring::opcode::WriteFixed::new( fd, @@ -413,6 +425,16 @@ impl WrappedIOUring { .user_data(key) } }); + + if st.linked_op && !st.had_partial { + // Starting a new link chain + entry = entry.flags(io_uring::squeue::Flags::IO_LINK); + self.pending_link.store(true, Ordering::Release); + } else if continue_chain && !st.had_partial { + // Continue existing chain + entry = entry.flags(io_uring::squeue::Flags::IO_LINK); + } + self.submit_entry(&entry); return; } @@ -422,12 +444,15 @@ impl WrappedIOUring { let ptr = iov_allocation.as_ptr() as *mut libc::iovec; st.last_iov_allocation = Some(iov_allocation); - let entry = with_fd!(st.file_id, |fd| { + let mut entry = with_fd!(st.file_id, |fd| { io_uring::opcode::Writev::new(fd, ptr, iov_count as u32) .offset(st.file_pos) .build() .user_data(key) }); + if st.linked_op { + entry = entry.flags(io_uring::squeue::Flags::IO_LINK); + } // track the current state in case we get a partial write self.writev_states.insert(key, st); self.submit_entry(&entry); @@ -452,6 +477,19 @@ impl WrappedIOUring { ); // write complete, return iovec to pool state.free_last_iov(&mut self.iov_pool); + if state.linked_op && state.had_partial { + // if it was a linked operation, we need to submit a fsync after this writev + // to ensure data is on disk + self.ring.submit().expect("submit after writev"); + let file_id = state.file_id; + let sync = with_fd!(file_id, |fd| { + io_uring::opcode::Fsync::new(fd) + .build() + .user_data(BARRIER_USER_DATA) + }) + .flags(io_uring::squeue::Flags::IO_DRAIN); + self.submit_entry(&sync); + } completion_from_key(user_data).complete(state.total_written as i32); } remaining => { @@ -461,8 +499,10 @@ impl WrappedIOUring { written, remaining ); - // partial write, submit next - self.submit_writev(user_data, state); + // make sure partial write is recorded, because fsync could happen after this + // and we are not finished writing to disk + state.had_partial = true; + self.submit_writev(user_data, state, false); } } } @@ -530,6 +570,14 @@ impl IO for UringIO { // if we have ongoing writev state, handle it separately and don't call completion ring.handle_writev_completion(state, user_data, result); continue; + } else if user_data == BARRIER_USER_DATA { + // barrier operation, no completion to call + if result < 0 { + let err = std::io::Error::from_raw_os_error(result); + tracing::error!("barrier operation failed: {}", err); + return Err(err.into()); + } + continue; } completion_from_key(user_data).complete(result) } @@ -680,7 +728,7 @@ impl File for UringFile { fn pwrite(&self, pos: u64, buffer: Arc, c: Completion) -> Result { let mut io = self.io.lock(); - let write = { + let mut write = { let ptr = buffer.as_ptr(); let len = buffer.len(); with_fd!(self, |fd| { @@ -708,6 +756,15 @@ impl File for UringFile { } }) }; + if c.needs_link() { + // Start a new link chain + write = write.flags(io_uring::squeue::Flags::IO_LINK); + io.ring.pending_link.store(true, Ordering::Release); + } else if io.ring.pending_link.load(Ordering::Acquire) { + // Continue existing link chain + write = write.flags(io_uring::squeue::Flags::IO_LINK); + } + io.ring.submit_entry(&write); Ok(c) } @@ -720,6 +777,8 @@ impl File for UringFile { .build() .user_data(get_key(c.clone())) }); + // sync always ends the chain of linked operations + io.ring.pending_link.store(false, Ordering::Release); io.ring.submit_entry(&sync); Ok(c) } @@ -734,10 +793,14 @@ impl File for UringFile { if bufs.len().eq(&1) { return self.pwrite(pos, bufs[0].clone(), c.clone()); } + let linked = c.needs_link(); tracing::trace!("pwritev(pos = {}, bufs.len() = {})", pos, bufs.len()); // create state to track ongoing writev operation - let state = WritevState::new(self, pos, bufs); - self.io.lock().ring.submit_writev(get_key(c.clone()), state); + let state = WritevState::new(self, pos, linked, bufs); + let mut io = self.io.lock(); + let continue_chain = !linked && io.ring.pending_link.load(Ordering::Acquire); + io.ring + .submit_writev(get_key(c.clone()), state, continue_chain); Ok(c) } @@ -746,12 +809,16 @@ impl File for UringFile { } fn truncate(&self, len: u64, c: Completion) -> Result { - let truncate = with_fd!(self, |fd| { + let mut truncate = with_fd!(self, |fd| { io_uring::opcode::Ftruncate::new(fd, len) .build() .user_data(get_key(c.clone())) }); - self.io.lock().ring.submit_entry(&truncate); + let mut io = self.io.lock(); + if io.ring.pending_link.load(Ordering::Acquire) { + truncate = truncate.flags(io_uring::squeue::Flags::IO_LINK); + } + io.ring.submit_entry(&truncate); Ok(c) } } diff --git a/core/io/mod.rs b/core/io/mod.rs index 4b877e37b..2922004d2 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -135,6 +135,7 @@ struct CompletionInner { /// None means we completed successfully // Thread safe with OnceLock result: std::sync::OnceLock>, + needs_link: bool, } impl Debug for CompletionType { @@ -161,10 +162,34 @@ impl Completion { inner: Arc::new(CompletionInner { completion_type, result: OnceLock::new(), + needs_link: false, }), } } + pub fn new_linked(completion_type: CompletionType) -> Self { + Self { + inner: Arc::new(CompletionInner { + completion_type, + result: OnceLock::new(), + needs_link: true, + }), + } + } + + pub fn needs_link(&self) -> bool { + self.inner.needs_link + } + + pub fn new_write_linked(complete: F) -> Self + where + F: Fn(Result) + 'static, + { + Self::new_linked(CompletionType::Write(WriteCompletion::new(Box::new( + complete, + )))) + } + pub fn new_write(complete: F) -> Self where F: Fn(Result) + 'static, @@ -226,27 +251,31 @@ impl Completion { } pub fn complete(&self, result: i32) { - if self.inner.result.set(None).is_ok() { - let result = Ok(result); - match &self.inner.completion_type { - CompletionType::Read(r) => r.callback(result), - CompletionType::Write(w) => w.callback(result), - CompletionType::Sync(s) => s.callback(result), // fix - CompletionType::Truncate(t) => t.callback(result), - }; - } + let result = Ok(result); + match &self.inner.completion_type { + CompletionType::Read(r) => r.callback(result), + CompletionType::Write(w) => w.callback(result), + CompletionType::Sync(s) => s.callback(result), // fix + CompletionType::Truncate(t) => t.callback(result), + }; + self.inner + .result + .set(None) + .expect("result must be set only once"); } pub fn error(&self, err: CompletionError) { - if self.inner.result.set(Some(err)).is_ok() { - let result = Err(err); - match &self.inner.completion_type { - CompletionType::Read(r) => r.callback(result), - CompletionType::Write(w) => w.callback(result), - CompletionType::Sync(s) => s.callback(result), // fix - CompletionType::Truncate(t) => t.callback(result), - }; - } + let result = Err(err); + match &self.inner.completion_type { + CompletionType::Read(r) => r.callback(result), + CompletionType::Write(w) => w.callback(result), + CompletionType::Sync(s) => s.callback(result), // fix + CompletionType::Truncate(t) => t.callback(result), + }; + self.inner + .result + .set(Some(err)) + .expect("result must be set only once"); } pub fn abort(&self) { diff --git a/core/json/jsonb.rs b/core/json/jsonb.rs index c4c95aac0..721ae6b91 100644 --- a/core/json/jsonb.rs +++ b/core/json/jsonb.rs @@ -841,6 +841,18 @@ impl JsonbHeader { } } +pub struct ArrayIteratorState { + cursor: usize, + end: usize, + index: usize, +} + +pub struct ObjectIteratorState { + cursor: usize, + end: usize, + index: usize, +} + impl Jsonb { pub fn new(capacity: usize, data: Option<&[u8]>) -> Self { if let Some(data) = data { @@ -2872,6 +2884,94 @@ impl Jsonb { Ok(()) } + + pub fn array_iterator(&self) -> Result { + let (hdr, off) = self.read_header(0)?; + match hdr { + JsonbHeader(ElementType::ARRAY, len) => Ok(ArrayIteratorState { + cursor: off, + end: off + len, + index: 0, + }), + _ => bail_parse_error!("jsonb.array_iterator(): not an array"), + } + } + + pub fn array_iterator_next( + &self, + st: &ArrayIteratorState, + ) -> Option<((usize, Jsonb), ArrayIteratorState)> { + if st.cursor >= st.end { + return None; + } + + let (JsonbHeader(_, payload_len), header_len) = self.read_header(st.cursor).ok()?; + let start = st.cursor; + let stop = start.checked_add(header_len + payload_len)?; + + if stop > st.end || stop > self.data.len() { + return None; + } + + let elem = Jsonb::new(stop - start, Some(&self.data[start..stop])); + let next = ArrayIteratorState { + cursor: stop, + end: st.end, + index: st.index + 1, + }; + + Some(((st.index, elem), next)) + } + + pub fn object_iterator(&self) -> Result { + let (hdr, off) = self.read_header(0)?; + match hdr { + JsonbHeader(ElementType::OBJECT, len) => Ok(ObjectIteratorState { + cursor: off, + end: off + len, + index: 0, + }), + _ => bail_parse_error!("jsonb.object_iterator(): not an object"), + } + } + + pub fn object_iterator_next( + &self, + st: &ObjectIteratorState, + ) -> Option<((usize, Jsonb, Jsonb), ObjectIteratorState)> { + if st.cursor >= st.end { + return None; + } + + // key + let (JsonbHeader(key_ty, key_len), key_hdr_len) = self.read_header(st.cursor).ok()?; + if !key_ty.is_valid_key() { + return None; + } + let key_start = st.cursor; + let key_stop = key_start.checked_add(key_hdr_len + key_len)?; + if key_stop > st.end || key_stop > self.data.len() { + return None; + } + + // value + let (JsonbHeader(_, val_len), val_hdr_len) = self.read_header(key_stop).ok()?; + let val_start = key_stop; + let val_stop = val_start.checked_add(val_hdr_len + val_len)?; + if val_stop > st.end || val_stop > self.data.len() { + return None; + } + + let key = Jsonb::new(key_stop - key_start, Some(&self.data[key_start..key_stop])); + let value = Jsonb::new(val_stop - val_start, Some(&self.data[val_start..val_stop])); + let next = ObjectIteratorState { + cursor: val_stop, + end: st.end, + index: st.index + 1, + }; + + Some(((st.index, key, value), next)) + } } impl std::str::FromStr for Jsonb { diff --git a/core/json/mod.rs b/core/json/mod.rs index caa1b28a0..311c595fa 100644 --- a/core/json/mod.rs +++ b/core/json/mod.rs @@ -3,6 +3,7 @@ mod error; pub(crate) mod jsonb; mod ops; pub(crate) mod path; +pub(crate) mod vtab; use crate::json::error::Error as JsonError; pub use crate::json::ops::{ diff --git a/core/json/vtab.rs b/core/json/vtab.rs new file mode 100644 index 000000000..8957b951c --- /dev/null +++ b/core/json/vtab.rs @@ -0,0 +1,436 @@ +use std::{cell::RefCell, result::Result, sync::Arc}; + +use turso_ext::{ConstraintUsage, ResultCode}; + +use crate::{ + json::{ + convert_dbtype_to_jsonb, + jsonb::{ArrayIteratorState, Jsonb, ObjectIteratorState}, + vtab::columns::Columns, + Conv, + }, + types::Text, + vtab::{InternalVirtualTable, InternalVirtualTableCursor}, + Connection, LimboError, Value, +}; + +use super::jsonb; + +pub struct JsonEachVirtualTable; + +const COL_KEY: usize = 0; +const COL_VALUE: usize = 1; +const COL_TYPE: usize = 2; +const COL_ATOM: usize = 3; +const COL_ID: usize = 4; +const COL_PARENT: usize = 5; +const COL_FULLKEY: usize = 6; +const COL_PATH: usize = 7; +const COL_JSON: usize = 8; +const COL_ROOT: usize = 9; + +impl InternalVirtualTable for JsonEachVirtualTable { + fn name(&self) -> String { + "json_each".to_owned() + } + + fn open( + &self, + _conn: Arc, + ) -> crate::Result>> { + Ok(Arc::new(RefCell::new(JsonEachCursor::default()))) + } + + fn best_index( + &self, + constraints: &[turso_ext::ConstraintInfo], + _order_by: &[turso_ext::OrderByInfo], + ) -> Result { + use turso_ext::ConstraintOp; + + let mut usages = vec![ + ConstraintUsage { + argv_index: None, + omit: false + }; + constraints.len() + ]; + let mut have_json = false; + + for (i, c) in constraints.iter().enumerate() { + if c.usable && c.op == ConstraintOp::Eq && c.column_index as usize == COL_JSON { + usages[i] = ConstraintUsage { + argv_index: Some(1), + omit: true, + }; + have_json = true; + break; + } + } + + Ok(turso_ext::IndexInfo { + idx_num: i32::from(have_json), + idx_str: None, + order_by_consumed: false, + estimated_cost: if have_json { 10.0 } else { 1_000_000.0 }, + estimated_rows: if have_json { 100 } else { u32::MAX }, + constraint_usages: usages, + }) + } + + fn sql(&self) -> String { + "CREATE TABLE json_each( + key ANY, -- key for current element relative to its parent + value ANY, -- value for the current element + type TEXT, -- 'object','array','string','integer', etc. + atom ANY, -- value for primitive types, null for array & object + id INTEGER, -- integer ID for this element + parent INTEGER, -- integer ID for the parent of this element + fullkey TEXT, -- full path describing the current element + path TEXT, -- path to the container of the current row + json JSON HIDDEN, -- 1st input parameter: the raw JSON + root TEXT HIDDEN -- 2nd input parameter: the PATH at which to start + );" + .to_owned() + } +} + +impl std::fmt::Debug for JsonEachVirtualTable { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("JsonEachVirtualTable").finish() + } +} + +enum IteratorState { + Array(ArrayIteratorState), + Object(ObjectIteratorState), + Primitive, + None, +} + +pub struct JsonEachCursor { + rowid: i64, + no_more_rows: bool, + json: Jsonb, + iterator_state: IteratorState, + columns: Columns, +} + +impl Default for JsonEachCursor { + fn default() -> Self { + Self { + rowid: 0, + no_more_rows: false, + json: Jsonb::new(0, None), + iterator_state: IteratorState::None, + columns: Columns::default(), + } + } +} + +impl InternalVirtualTableCursor for JsonEachCursor { + fn filter( + &mut self, + args: &[Value], + _idx_str: Option, + _idx_num: i32, + ) -> Result { + if args.is_empty() { + return Ok(false); + } + if args.len() == 2 { + return Err(LimboError::InvalidArgument( + "2-arg json_each is not supported yet".to_owned(), + )); + } + if args.len() != 1 && args.len() != 2 { + return Err(LimboError::InvalidArgument( + "json_each accepts 1 or 2 arguments".to_owned(), + )); + } + + let db_value = &args[0]; + + let jsonb = convert_dbtype_to_jsonb(db_value, Conv::Strict)?; + + let element_type = jsonb.element_type()?; + self.json = jsonb; + + match element_type { + jsonb::ElementType::ARRAY => { + let iter = self.json.array_iterator()?; + self.iterator_state = IteratorState::Array(iter); + } + jsonb::ElementType::OBJECT => { + let iter = self.json.object_iterator()?; + self.iterator_state = IteratorState::Object(iter); + } + jsonb::ElementType::NULL + | jsonb::ElementType::TRUE + | jsonb::ElementType::FALSE + | jsonb::ElementType::INT + | jsonb::ElementType::INT5 + | jsonb::ElementType::FLOAT + | jsonb::ElementType::FLOAT5 + | jsonb::ElementType::TEXT + | jsonb::ElementType::TEXT5 + | jsonb::ElementType::TEXTJ + | jsonb::ElementType::TEXTRAW => { + self.iterator_state = IteratorState::Primitive; + } + jsonb::ElementType::RESERVED1 + | jsonb::ElementType::RESERVED2 + | jsonb::ElementType::RESERVED3 => { + unreachable!("element type not supported: {element_type:?}"); + } + }; + + self.next() + } + + fn next(&mut self) -> Result { + self.rowid += 1; + if self.no_more_rows { + return Ok(false); + } + + match &self.iterator_state { + IteratorState::Array(state) => { + let Some(((idx, jsonb), new_state)) = self.json.array_iterator_next(state) else { + self.no_more_rows = true; + return Ok(false); + }; + self.iterator_state = IteratorState::Array(new_state); + self.columns = Columns::new(columns::Key::Integer(idx as i64), jsonb); + } + IteratorState::Object(state) => { + let Some(((_idx, key, value), new_state)): Option<( + (usize, Jsonb, Jsonb), + ObjectIteratorState, + )> = self.json.object_iterator_next(state) else { + self.no_more_rows = true; + return Ok(false); + }; + + self.iterator_state = IteratorState::Object(new_state); + let key = key.to_string(); + self.columns = Columns::new(columns::Key::String(key), value); + } + IteratorState::Primitive => { + let json = std::mem::replace(&mut self.json, Jsonb::new(0, None)); + self.columns = Columns::new_from_primitive(json); + self.no_more_rows = true; + } + IteratorState::None => unreachable!(), + }; + + Ok(true) + } + + fn rowid(&self) -> i64 { + self.rowid + } + + fn column(&self, idx: usize) -> Result { + Ok(match idx { + COL_KEY => self.columns.key(), + COL_VALUE => self.columns.value()?, + COL_TYPE => self.columns.ttype(), + COL_ATOM => self.columns.atom()?, + COL_ID => Value::Integer(self.rowid), + COL_PARENT => self.columns.parent(), + COL_FULLKEY => self.columns.fullkey(), + COL_PATH => self.columns.path(), + COL_ROOT => Value::Text(Text::new("json, todo")), + _ => Value::Null, + }) + } +} + +mod columns { + use crate::{ + json::{ + json_string_to_db_type, + jsonb::{self, ElementType, Jsonb}, + OutputVariant, + }, + types::Text, + LimboError, Value, + }; + + #[derive(Debug)] + pub(super) enum Key { + Integer(i64), + String(String), + } + + impl Key { + fn empty() -> Self { + Self::Integer(0) + } + + fn fullkey_representation(&self) -> Value { + match self { + Key::Integer(ref i) => Value::Text(Text::new(&format!("$[{i}]"))), + Key::String(ref text) => { + let mut needs_quoting: bool = false; + + let mut text = (text[1..text.len() - 1]).to_owned(); + if text.contains('.') || text.contains(" ") || text.contains('"') { + needs_quoting = true; + } + + if needs_quoting { + text = format!("\"{text}\""); + } + let s = format!("$.{text}"); + + Value::Text(Text::new(&s)) + } + } + } + + fn key_representation(&self) -> Value { + match self { + Key::Integer(ref i) => Value::Integer(*i), + Key::String(ref s) => Value::Text(Text::new( + &s[1..s.len() - 1].to_owned().replace("\\\"", "\""), + )), + } + } + } + + pub(super) struct Columns { + key: Key, + value: Jsonb, + is_primitive: bool, + } + + impl Default for Columns { + fn default() -> Columns { + Self { + key: Key::empty(), + value: Jsonb::new(0, None), + is_primitive: false, + } + } + } + + impl Columns { + pub(super) fn new(key: Key, value: Jsonb) -> Self { + Self { + key, + value, + is_primitive: false, + } + } + + pub(super) fn new_from_primitive(value: Jsonb) -> Self { + Self { + key: Key::empty(), + value, + is_primitive: true, + } + } + + pub(super) fn atom(&self) -> Result { + Self::atom_from_value(&self.value) + } + + pub(super) fn value(&self) -> Result { + let element_type = self.value.element_type()?; + Ok(match element_type { + ElementType::ARRAY | ElementType::OBJECT => { + json_string_to_db_type(self.value.clone(), element_type, OutputVariant::String)? + } + _ => Self::atom_from_value(&self.value)?, + }) + } + + pub(super) fn key(&self) -> Value { + if self.is_primitive { + return Value::Null; + } + self.key.key_representation() + } + + fn atom_from_value(value: &Jsonb) -> Result { + let element_type = value.element_type().expect("invalid value"); + let string: Result = match element_type { + jsonb::ElementType::NULL => Ok(Value::Null), + jsonb::ElementType::TRUE => Ok(Value::Integer(1)), + jsonb::ElementType::FALSE => Ok(Value::Integer(0)), + jsonb::ElementType::INT | jsonb::ElementType::INT5 => Self::jsonb_to_integer(value), + jsonb::ElementType::FLOAT | jsonb::ElementType::FLOAT5 => { + Self::jsonb_to_float(value) + } + jsonb::ElementType::TEXT + | jsonb::ElementType::TEXTJ + | jsonb::ElementType::TEXT5 + | jsonb::ElementType::TEXTRAW => { + let s = value.to_string(); + let s = (s[1..s.len() - 1]).to_string(); + Ok(Value::Text(Text::new(&s))) + } + jsonb::ElementType::ARRAY => Ok(Value::Null), + jsonb::ElementType::OBJECT => Ok(Value::Null), + jsonb::ElementType::RESERVED1 => Ok(Value::Null), + jsonb::ElementType::RESERVED2 => Ok(Value::Null), + jsonb::ElementType::RESERVED3 => Ok(Value::Null), + }; + + string + } + + fn jsonb_to_integer(value: &Jsonb) -> Result { + let string = value.to_string(); + let int = string.parse::()?; + + Ok(Value::Integer(int)) + } + + fn jsonb_to_float(value: &Jsonb) -> Result { + let string = value.to_string(); + let float = string.parse::()?; + + Ok(Value::Float(float)) + } + + pub(super) fn fullkey(&self) -> Value { + if self.is_primitive { + return Value::Text(Text::new("$")); + } + self.key.fullkey_representation() + } + + pub(super) fn path(&self) -> Value { + Value::Text(Text::new("$")) + } + + pub(super) fn parent(&self) -> Value { + Value::Null + } + + pub(super) fn ttype(&self) -> Value { + let element_type = self.value.element_type().expect("invalid value"); + let ttype = match element_type { + jsonb::ElementType::NULL => "null", + jsonb::ElementType::TRUE => "true", + jsonb::ElementType::FALSE => "false", + jsonb::ElementType::INT | jsonb::ElementType::INT5 => "integer", + jsonb::ElementType::FLOAT | jsonb::ElementType::FLOAT5 => "real", + jsonb::ElementType::TEXT + | jsonb::ElementType::TEXTJ + | jsonb::ElementType::TEXT5 + | jsonb::ElementType::TEXTRAW => "text", + jsonb::ElementType::ARRAY => "array", + jsonb::ElementType::OBJECT => "object", + jsonb::ElementType::RESERVED1 + | jsonb::ElementType::RESERVED2 + | jsonb::ElementType::RESERVED3 => unreachable!(), + }; + + Value::Text(Text::new(ttype)) + } + } +} diff --git a/core/lib.rs b/core/lib.rs index 03532f41b..ebefb551c 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -32,7 +32,6 @@ mod uuid; mod vdbe; mod vector; mod vtab; -mod vtab_view; #[cfg(feature = "fuzz")] pub mod numeric; @@ -40,7 +39,7 @@ pub mod numeric; #[cfg(not(feature = "fuzz"))] mod numeric; -use crate::incremental::view::ViewTransactionState; +use crate::incremental::view::AllViewsTxState; use crate::storage::encryption::CipherMode; use crate::translate::optimizer::optimize_plan; use crate::translate::pragma::TURSO_CDC_DEFAULT_TABLE_NAME; @@ -65,20 +64,23 @@ use parking_lot::RwLock; use schema::Schema; use std::{ borrow::Cow, - cell::{Cell, RefCell, UnsafeCell}, + cell::{Cell, RefCell}, collections::HashMap, fmt::{self, Display}, io::Write, num::NonZero, ops::Deref, rc::Rc, - sync::{atomic::AtomicUsize, Arc, LazyLock, Mutex, Weak}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, LazyLock, Mutex, Weak, + }, }; #[cfg(feature = "fs")] use storage::database::DatabaseFile; pub use storage::database::IOContext; pub use storage::encryption::{EncryptionContext, EncryptionKey}; -use storage::page_cache::DumbLruPageCache; +use storage::page_cache::PageCache; use storage::pager::{AtomicDbState, DbState}; use storage::sqlite3_ondisk::PageSize; pub use storage::{ @@ -185,8 +187,8 @@ pub struct Database { buffer_pool: Arc, // Shared structures of a Database are the parts that are common to multiple threads that might // create DB connections. - _shared_page_cache: Arc>, - maybe_shared_wal: RwLock>>>, + _shared_page_cache: Arc>, + shared_wal: Arc>, db_state: Arc, init_lock: Arc>, open_flags: OpenFlags, @@ -227,9 +229,9 @@ impl fmt::Debug for Database { }; debug_struct.field("init_lock", &init_lock_status); - let wal_status = match self.maybe_shared_wal.try_read().as_deref() { - Some(Some(_)) => "present", - Some(None) => "none", + let wal_status = match self.shared_wal.try_read() { + Some(wal) if wal.enabled.load(Ordering::Relaxed) => "enabled", + Some(_) => "disabled", None => "locked_for_write", }; debug_struct.field("wal_state", &wal_status); @@ -365,7 +367,7 @@ impl Database { flags: OpenFlags, opts: DatabaseOpts, ) -> Result> { - let maybe_shared_wal = WalFileShared::open_shared_if_exists(&io, wal_path)?; + let shared_wal = WalFileShared::open_shared_if_exists(&io, wal_path)?; let mv_store = if opts.enable_mvcc { Some(Arc::new(MvStore::new( @@ -383,7 +385,7 @@ impl Database { DbState::Initialized }; - let shared_page_cache = Arc::new(RwLock::new(DumbLruPageCache::default())); + let shared_page_cache = Arc::new(RwLock::new(PageCache::default())); let syms = SymbolTable::new(); let arena_size = if std::env::var("TESTING").is_ok_and(|v| v.eq_ignore_ascii_case("true")) { BufferPool::TEST_ARENA_SIZE @@ -397,7 +399,7 @@ impl Database { wal_path: wal_path.to_string(), schema: Mutex::new(Arc::new(Schema::new(opts.enable_indexes))), _shared_page_cache: shared_page_cache.clone(), - maybe_shared_wal: RwLock::new(maybe_shared_wal), + shared_wal, db_file, builtin_syms: syms.into(), io: io.clone(), @@ -438,13 +440,6 @@ impl Database { Ok(()) })?; } - // FIXME: the correct way to do this is to just materialize the view. - // But this will allow us to keep going. - let conn = db.connect()?; - let pager = conn.pager.borrow().clone(); - pager - .io - .block(|| conn.schema.borrow().populate_materialized_views(&conn))?; Ok(db) } @@ -486,7 +481,7 @@ impl Database { attached_databases: RefCell::new(DatabaseCatalog::new()), query_only: Cell::new(false), mv_tx_id: Cell::new(None), - view_transaction_states: RefCell::new(HashMap::new()), + view_transaction_states: AllViewsTxState::new(), metrics: RefCell::new(ConnectionMetrics::new()), is_nested_stmt: Cell::new(false), encryption_key: RefCell::new(None), @@ -534,10 +529,10 @@ impl Database { /// 2. PageSize::default(), i.e. 4096 fn determine_actual_page_size( &self, - maybe_shared_wal: Option<&WalFileShared>, + shared_wal: &WalFileShared, requested_page_size: Option, ) -> Result { - if let Some(shared_wal) = maybe_shared_wal { + if shared_wal.enabled.load(Ordering::Relaxed) { let size_in_wal = shared_wal.page_size(); if size_in_wal != 0 { let Some(page_size) = PageSize::new(size_in_wal) else { @@ -560,13 +555,12 @@ impl Database { } fn init_pager(&self, requested_page_size: Option) -> Result { - // Open existing WAL file if present - let mut maybe_shared_wal = self.maybe_shared_wal.write(); - if let Some(shared_wal) = maybe_shared_wal.clone() { - let page_size = self.determine_actual_page_size( - Some(unsafe { &*shared_wal.get() }), - requested_page_size, - )?; + // Check if WAL is enabled + let shared_wal = self.shared_wal.read(); + if shared_wal.enabled.load(Ordering::Relaxed) { + let page_size = self.determine_actual_page_size(&shared_wal, requested_page_size)?; + drop(shared_wal); + let buffer_pool = self.buffer_pool.clone(); if self.db_state.is_initialized() { buffer_pool.finalize_with_page_size(page_size.get() as usize)?; @@ -575,14 +569,14 @@ impl Database { let db_state = self.db_state.clone(); let wal = Rc::new(RefCell::new(WalFile::new( self.io.clone(), - shared_wal, + self.shared_wal.clone(), buffer_pool.clone(), ))); let pager = Pager::new( self.db_file.clone(), Some(wal), self.io.clone(), - Arc::new(RwLock::new(DumbLruPageCache::default())), + Arc::new(RwLock::new(PageCache::default())), buffer_pool.clone(), db_state, self.init_lock.clone(), @@ -590,9 +584,10 @@ impl Database { pager.page_size.set(Some(page_size)); return Ok(pager); } - let buffer_pool = self.buffer_pool.clone(); + let page_size = self.determine_actual_page_size(&shared_wal, requested_page_size)?; + drop(shared_wal); - let page_size = self.determine_actual_page_size(None, requested_page_size)?; + let buffer_pool = self.buffer_pool.clone(); if self.db_state.is_initialized() { buffer_pool.finalize_with_page_size(page_size.get() as usize)?; @@ -604,7 +599,7 @@ impl Database { self.db_file.clone(), None, self.io.clone(), - Arc::new(RwLock::new(DumbLruPageCache::default())), + Arc::new(RwLock::new(PageCache::default())), buffer_pool.clone(), db_state, Arc::new(Mutex::new(())), @@ -614,13 +609,16 @@ impl Database { let file = self .io .open_file(&self.wal_path, OpenFlags::Create, false)?; - let real_shared_wal = WalFileShared::new_shared(file)?; - // Modify Database::maybe_shared_wal to point to the new WAL file so that other connections - // can open the existing WAL. - *maybe_shared_wal = Some(real_shared_wal.clone()); + + // Enable WAL in the existing shared instance + { + let mut shared_wal = self.shared_wal.write(); + shared_wal.create(file)?; + } + let wal = Rc::new(RefCell::new(WalFile::new( self.io.clone(), - real_shared_wal, + self.shared_wal.clone(), buffer_pool, ))); pager.set_wal(wal); @@ -628,6 +626,38 @@ impl Database { Ok(pager) } + #[cfg(feature = "fs")] + pub fn io_for_path(path: &str) -> Result> { + use crate::util::MEMORY_PATH; + let io: Arc = match path.trim() { + MEMORY_PATH => Arc::new(MemoryIO::new()), + _ => Arc::new(PlatformIO::new()?), + }; + Ok(io) + } + + #[cfg(feature = "fs")] + pub fn io_for_vfs + std::fmt::Display>(vfs: S) -> Result> { + let vfsmods = ext::add_builtin_vfs_extensions(None)?; + let io: Arc = match vfsmods + .iter() + .find(|v| v.0 == vfs.as_ref()) + .map(|v| v.1.clone()) + { + Some(vfs) => vfs, + None => match vfs.as_ref() { + "memory" => Arc::new(MemoryIO::new()), + "syscall" => Arc::new(SyscallIO::new()?), + #[cfg(all(target_os = "linux", feature = "io_uring"))] + "io_uring" => Arc::new(UringIO::new()?), + other => { + return Err(LimboError::InvalidArgument(format!("no such VFS: {other}"))); + } + }, + }; + Ok(io) + } + /// Open a new database file with optionally specifying a VFS without an existing database /// connection and symbol table to register extensions. #[cfg(feature = "fs")] @@ -641,40 +671,13 @@ impl Database { where S: AsRef + std::fmt::Display, { - use crate::util::MEMORY_PATH; - let vfsmods = ext::add_builtin_vfs_extensions(None)?; - match vfs { - Some(vfs) => { - let io: Arc = match vfsmods - .iter() - .find(|v| v.0 == vfs.as_ref()) - .map(|v| v.1.clone()) - { - Some(vfs) => vfs, - None => match vfs.as_ref() { - "memory" => Arc::new(MemoryIO::new()), - "syscall" => Arc::new(SyscallIO::new()?), - #[cfg(all(target_os = "linux", feature = "io_uring"))] - "io_uring" => Arc::new(UringIO::new()?), - other => { - return Err(LimboError::InvalidArgument(format!( - "no such VFS: {other}" - ))); - } - }, - }; - let db = Self::open_file_with_flags(io.clone(), path, flags, opts)?; - Ok((io, db)) - } - None => { - let io: Arc = match path.trim() { - MEMORY_PATH => Arc::new(MemoryIO::new()), - _ => Arc::new(PlatformIO::new()?), - }; - let db = Self::open_file_with_flags(io.clone(), path, flags, opts)?; - Ok((io, db)) - } - } + let io = vfs + .map(|vfs| Self::io_for_vfs(vfs)) + .or_else(|| Some(Self::io_for_path(path))) + .transpose()? + .unwrap(); + let db = Self::open_file_with_flags(io.clone(), path, flags, opts)?; + Ok((io, db)) } #[inline] @@ -920,7 +923,7 @@ pub struct Connection { /// Per-connection view transaction states for uncommitted changes. This represents /// one entry per view that was touched in the transaction. - view_transaction_states: RefCell>, + view_transaction_states: AllViewsTxState, /// Connection-level metrics aggregation pub metrics: RefCell, /// Whether the connection is executing a statement initiated by another statement. @@ -1066,7 +1069,7 @@ impl Connection { // Preserve existing views to avoid expensive repopulation. // TODO: We may not need to do this if we materialize our views. - let existing_views = self.schema.borrow().materialized_views.clone(); + let existing_views = self.schema.borrow().incremental_views.clone(); // TODO: this is hack to avoid a cyclical problem with schema reprepare // The problem here is that we prepare a statement here, but when the statement tries @@ -1090,13 +1093,6 @@ impl Connection { self.with_schema_mut(|schema| { *schema = fresh; }); - - { - let schema = self.schema.borrow(); - pager - .io - .block(|| schema.populate_materialized_views(self))?; - } Result::Ok(()) } @@ -1315,12 +1311,17 @@ impl Connection { } #[cfg(feature = "fs")] - fn from_uri_attached(uri: &str, db_opts: DatabaseOpts) -> Result> { + fn from_uri_attached( + uri: &str, + db_opts: DatabaseOpts, + io: Arc, + ) -> Result> { let mut opts = OpenOptions::parse(uri)?; // FIXME: for now, only support read only attach opts.mode = OpenMode::ReadOnly; let flags = opts.get_flags()?; - let (_io, db) = Database::open_new(&opts.path, opts.vfs.as_ref(), flags, db_opts)?; + let io = opts.vfs.map(Database::io_for_vfs).unwrap_or(Ok(io))?; + let db = Database::open_file_with_flags(io.clone(), &opts.path, flags, db_opts)?; if let Some(modeof) = opts.modeof { let perms = std::fs::metadata(modeof)?; std::fs::set_permissions(&opts.path, perms.permissions())?; @@ -1678,7 +1679,11 @@ impl Connection { return Ok(()); } - *self._db.maybe_shared_wal.write() = None; + { + let mut shared_wal = self._db.shared_wal.write(); + shared_wal.enabled.store(false, Ordering::Relaxed); + shared_wal.file = None; + } self.pager.borrow_mut().clear_page_cache(); let pager = self._db.init_pager(Some(size.get() as usize))?; self.pager.replace(Rc::new(pager)); @@ -1723,7 +1728,7 @@ impl Connection { .expect("query must be parsed to statement"); let syms = self.syms.borrow(); self.with_schema_mut(|schema| { - let existing_views = schema.materialized_views.clone(); + let existing_views = schema.incremental_views.clone(); if let Err(LimboError::ExtensionError(e)) = parse_schema_rows(rows, schema, &syms, None, existing_views) { @@ -1874,7 +1879,7 @@ impl Connection { .with_indexes(use_indexes) .with_views(use_views) .with_strict(use_strict); - let db = Self::from_uri_attached(path, db_opts)?; + let db = Self::from_uri_attached(path, db_opts, self._db.io.clone())?; let pager = Rc::new(db.init_pager(None)?); self.attached_databases @@ -2055,16 +2060,16 @@ impl Connection { self.syms.borrow().vtab_modules.keys().cloned().collect() } - pub fn set_encryption_key(&self, key: EncryptionKey) { + pub fn set_encryption_key(&self, key: EncryptionKey) -> Result<()> { tracing::trace!("setting encryption key for connection"); *self.encryption_key.borrow_mut() = Some(key.clone()); - self.set_encryption_context(); + self.set_encryption_context() } - pub fn set_encryption_cipher(&self, cipher_mode: CipherMode) { + pub fn set_encryption_cipher(&self, cipher_mode: CipherMode) -> Result<()> { tracing::trace!("setting encryption cipher for connection"); self.encryption_cipher_mode.replace(Some(cipher_mode)); - self.set_encryption_context(); + self.set_encryption_context() } pub fn get_encryption_cipher_mode(&self) -> Option { @@ -2072,17 +2077,22 @@ impl Connection { } // if both key and cipher are set, set encryption context on pager - fn set_encryption_context(&self) { + fn set_encryption_context(&self) -> Result<()> { let key_ref = self.encryption_key.borrow(); let Some(key) = key_ref.as_ref() else { - return; + return Ok(()); }; let Some(cipher_mode) = self.encryption_cipher_mode.get() else { - return; + return Ok(()); }; tracing::trace!("setting encryption ctx for connection"); let pager = self.pager.borrow(); - pager.set_encryption_context(cipher_mode, key); + if pager.is_encryption_ctx_set() { + return Err(LimboError::InvalidArgument( + "cannot reset encryption attributes if already set in the session".to_string(), + )); + } + pager.set_encryption_context(cipher_mode, key) } } diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 13e28c437..6727784e5 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -271,6 +271,15 @@ pub struct CommitStateMachine { _phantom: PhantomData, } +impl Debug for CommitStateMachine { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("CommitStateMachine") + .field("state", &self.state) + .field("is_finalized", &self.is_finalized) + .finish() + } +} + pub struct WriteRowStateMachine { state: WriteRowState, is_finalized: bool, diff --git a/core/mvcc/database/tests.rs b/core/mvcc/database/tests.rs index 438e897ec..be3a29a5d 100644 --- a/core/mvcc/database/tests.rs +++ b/core/mvcc/database/tests.rs @@ -654,10 +654,10 @@ fn test_future_row() { use crate::mvcc::cursor::MvccLazyCursor; use crate::mvcc::database::{MvStore, Row, RowID}; use crate::types::Text; -use crate::MemoryIO; use crate::RefValue; use crate::Value; use crate::{Database, StepResult}; +use crate::{MemoryIO, Statement}; // Simple atomic clock implementation for testing @@ -1243,3 +1243,76 @@ fn get_rows(conn: &Arc, query: &str) -> Vec> { } rows } + +#[test] +#[ignore] +fn test_concurrent_writes() { + struct ConnectionState { + conn: Arc, + inserts: Vec, + current_statement: Option, + } + let db = MvccTestDbNoConn::new_with_random_db(); + let mut connecitons = Vec::new(); + { + let conn = db.connect(); + conn.execute("CREATE TABLE test (x)").unwrap(); + conn.close().unwrap(); + } + for i in 0..2 { + let conn = db.connect(); + let mut inserts = ((100 * i)..(100 * (i + 1))).collect::>(); + inserts.reverse(); + connecitons.push(ConnectionState { + conn, + inserts, + current_statement: None, + }); + } + + loop { + let mut all_finished = true; + for conn in &mut connecitons { + if !conn.inserts.is_empty() && conn.current_statement.is_none() { + all_finished = false; + break; + } + } + for (conn_id, conn) in connecitons.iter_mut().enumerate() { + println!("connection {conn_id} inserts: {:?}", conn.inserts); + if conn.current_statement.is_none() && !conn.inserts.is_empty() { + let write = conn.inserts.pop().unwrap(); + println!("inserting row {write} from connection {conn_id}"); + conn.current_statement = Some( + conn.conn + .prepare(format!("INSERT INTO test (x) VALUES ({write})")) + .unwrap(), + ); + } + if conn.current_statement.is_none() { + continue; + } + let stmt = conn.current_statement.as_mut().unwrap(); + match stmt.step().unwrap() { + // These you be only possible cases in write concurrency. + // No rows because insert doesn't return + // No interrupt because insert doesn't interrupt + // No busy because insert in mvcc should be multi concurrent write + StepResult::Done => { + conn.current_statement = None; + } + StepResult::IO => { + // let's skip doing I/O here, we want to perform io only after all the statements are stepped + } + _ => { + unreachable!() + } + } + } + db.get_db().io.run_once().unwrap(); + + if all_finished { + break; + } + } +} diff --git a/core/numeric/mod.rs b/core/numeric/mod.rs index 7f7beac10..821c2f4b7 100644 --- a/core/numeric/mod.rs +++ b/core/numeric/mod.rs @@ -352,6 +352,16 @@ const VERTICAL_TAB: char = '\u{b}'; #[derive(Debug, Clone, Copy)] struct DoubleDouble(f64, f64); +impl DoubleDouble { + pub const E100: Self = DoubleDouble(1.0e+100, -1.590_289_110_975_991_8e83); + pub const E10: Self = DoubleDouble(1.0e+10, 0.0); + pub const E1: Self = DoubleDouble(1.0e+01, 0.0); + + pub const NEG_E100: Self = DoubleDouble(1.0e-100, -1.999_189_980_260_288_3e-117); + pub const NEG_E10: Self = DoubleDouble(1.0e-10, -3.643_219_731_549_774e-27); + pub const NEG_E1: Self = DoubleDouble(1.0e-01, -5.551_115_123_125_783e-18); +} + impl From for DoubleDouble { fn from(value: u64) -> Self { let r = value as f64; @@ -371,6 +381,16 @@ impl From for DoubleDouble { } } +impl From for u64 { + fn from(value: DoubleDouble) -> Self { + if value.1 < 0.0 { + value.0 as u64 - value.1.abs() as u64 + } else { + value.0 as u64 + value.1 as u64 + } + } +} + impl From for f64 { fn from(DoubleDouble(a, aa): DoubleDouble) -> Self { a + aa @@ -489,6 +509,10 @@ pub fn str_to_f64(input: impl AsRef) -> Option { } if input.next_if(|ch| matches!(ch, '.')).is_some() { + if matches!(input.peek(), Some('e' | 'E')) { + return None; + } + if had_digits || input.peek().is_some_and(char::is_ascii_digit) { is_fractional = true } @@ -539,28 +563,28 @@ pub fn str_to_f64(input: impl AsRef) -> Option { if exponent > 0 { while exponent >= 100 { exponent -= 100; - result *= DoubleDouble(1.0e+100, -1.590_289_110_975_991_8e83); + result *= DoubleDouble::E100; } while exponent >= 10 { exponent -= 10; - result *= DoubleDouble(1.0e+10, 0.0); + result *= DoubleDouble::E10; } while exponent >= 1 { exponent -= 1; - result *= DoubleDouble(1.0e+01, 0.0); + result *= DoubleDouble::E1; } } else { while exponent <= -100 { exponent += 100; - result *= DoubleDouble(1.0e-100, -1.999_189_980_260_288_3e-117); + result *= DoubleDouble::NEG_E100; } while exponent <= -10 { exponent += 10; - result *= DoubleDouble(1.0e-10, -3.643_219_731_549_774e-27); + result *= DoubleDouble::NEG_E10; } while exponent <= -1 { exponent += 1; - result *= DoubleDouble(1.0e-01, -5.551_115_123_125_783e-18); + result *= DoubleDouble::NEG_E1; } } @@ -573,3 +597,130 @@ pub fn str_to_f64(input: impl AsRef) -> Option { StrToF64::Decimal(result) }) } + +pub fn format_float(v: f64) -> String { + if v.is_nan() { + return "".to_string(); + } + + if v.is_infinite() { + return if v.is_sign_negative() { "-Inf" } else { "Inf" }.to_string(); + } + + if v == 0.0 { + return "0.0".to_string(); + } + + let negative = v < 0.0; + let mut d = DoubleDouble(v.abs(), 0.0); + let mut exp = 0; + + if d.0 > 9.223_372_036_854_775e18 { + while d.0 > 9.223_372_036_854_774e118 { + exp += 100; + d *= DoubleDouble::NEG_E100; + } + while d.0 > 9.223_372_036_854_774e28 { + exp += 10; + d *= DoubleDouble::NEG_E10; + } + while d.0 > 9.223_372_036_854_775e18 { + exp += 1; + d *= DoubleDouble::NEG_E1; + } + } else { + while d.0 < 9.223_372_036_854_775e-83 { + exp -= 100; + d *= DoubleDouble::E100; + } + while d.0 < 9.223_372_036_854_775e7 { + exp -= 10; + d *= DoubleDouble::E10; + } + while d.0 < 9.223_372_036_854_775e17 { + exp -= 1; + d *= DoubleDouble::E1; + } + } + + let v = u64::from(d); + + let mut digits = v.to_string().into_bytes(); + + let precision = 15; + + let mut decimal_pos = digits.len() as i32 + exp; + + 'out: { + if digits.len() > precision { + let round_up = digits[precision] >= b'5'; + digits.truncate(precision); + + if round_up { + for i in (0..precision).rev() { + if digits[i] < b'9' { + digits[i] += 1; + break 'out; + } + digits[i] = b'0'; + } + + digits.insert(0, b'1'); + decimal_pos += 1; + } + } + } + + while digits.len() > 1 && digits[digits.len() - 1] == b'0' { + digits.pop(); + } + + let exp = decimal_pos - 1; + + if (-4..=14).contains(&exp) { + format!( + "{}{}.{}{}", + if negative { "-" } else { Default::default() }, + if decimal_pos > 0 { + let zeroes = (decimal_pos - digits.len() as i32).max(0) as usize; + let digits = digits + .get(0..(decimal_pos.min(digits.len() as i32) as usize)) + .unwrap(); + (unsafe { str::from_utf8_unchecked(digits) }).to_owned() + &"0".repeat(zeroes) + } else { + "0".to_string() + }, + "0".repeat(decimal_pos.min(0).unsigned_abs() as usize), + digits + .get((decimal_pos.max(0) as usize)..) + .filter(|v| !v.is_empty()) + .map(|v| unsafe { str::from_utf8_unchecked(v) }) + .unwrap_or("0") + ) + } else { + format!( + "{}{}.{}e{}{:0width$}", + if negative { "-" } else { "" }, + digits.first().cloned().unwrap_or(b'0') as char, + digits + .get(1..) + .filter(|v| !v.is_empty()) + .map(|v| unsafe { str::from_utf8_unchecked(v) }) + .unwrap_or("0"), + if exp.is_positive() { "+" } else { "-" }, + exp.abs(), + width = if exp > 100 { 3 } else { 2 } + ) + } +} + +#[test] +fn test_decode_float() { + assert_eq!(format_float(9.93e-322), "9.93071948140905e-322"); + assert_eq!(format_float(9.93), "9.93"); + assert_eq!(format_float(0.093), "0.093"); + assert_eq!(format_float(-0.093), "-0.093"); + assert_eq!(format_float(0.0), "0.0"); + assert_eq!(format_float(4.94e-322), "4.94065645841247e-322"); + assert_eq!(format_float(-20228007.0), "-20228007.0"); +} diff --git a/core/schema.rs b/core/schema.rs index f984b9d26..9aef29c89 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1,8 +1,4 @@ use crate::incremental::view::IncrementalView; -use crate::types::IOResult; - -/// Type alias for the materialized views collection -pub type MaterializedViewsMap = HashMap>>; /// Simple view structure for non-materialized views #[derive(Debug, Clone)] @@ -23,12 +19,12 @@ use crate::translate::plan::SelectPlan; use crate::util::{ module_args_from_sql, module_name_from_sql, type_from_name, IOExt, UnparsedFromSqlIndex, }; -use crate::{return_if_io, LimboError, MvCursor, Pager, RefValue, SymbolTable, VirtualTable}; use crate::{util::normalize_ident, Result}; +use crate::{LimboError, MvCursor, Pager, RefValue, SymbolTable, VirtualTable}; use core::fmt; use std::cell::RefCell; use std::collections::hash_map::Entry; -use std::collections::{BTreeSet, HashMap}; +use std::collections::{BTreeSet, HashMap, HashSet}; use std::ops::Deref; use std::rc::Rc; use std::sync::Arc; @@ -42,11 +38,25 @@ use turso_parser::{ const SCHEMA_TABLE_NAME: &str = "sqlite_schema"; const SCHEMA_TABLE_NAME_ALT: &str = "sqlite_master"; +pub const DBSP_TABLE_PREFIX: &str = "__turso_internal_dbsp_state_"; + +/// Check if a table name refers to a system table that should be protected from direct writes +pub fn is_system_table(table_name: &str) -> bool { + let normalized = table_name.to_lowercase(); + normalized == SCHEMA_TABLE_NAME || normalized == SCHEMA_TABLE_NAME_ALT +} #[derive(Debug)] pub struct Schema { pub tables: HashMap>, - pub materialized_views: MaterializedViewsMap, + + /// Track which tables are actually materialized views + pub materialized_view_names: HashSet, + /// Store original SQL for materialized views (for .schema command) + pub materialized_view_sql: HashMap, + /// The incremental view objects (DBSP circuits) + pub incremental_views: HashMap>>, + pub views: ViewsMap, /// table_name to list of indexes for the table @@ -75,12 +85,16 @@ impl Schema { Arc::new(Table::Virtual(Arc::new((*function).clone()))), ); } - let materialized_views: MaterializedViewsMap = HashMap::new(); + let materialized_view_names = HashSet::new(); + let materialized_view_sql = HashMap::new(); + let incremental_views = HashMap::new(); let views: ViewsMap = HashMap::new(); let table_to_materialized_views: HashMap> = HashMap::new(); Self { tables, - materialized_views, + materialized_view_names, + materialized_view_sql, + incremental_views, views, indexes, has_indexes, @@ -96,41 +110,51 @@ impl Schema { .iter() .any(|idx| idx.1.iter().any(|i| i.name == name)) } - pub fn add_materialized_view(&mut self, view: IncrementalView) { + pub fn add_materialized_view(&mut self, view: IncrementalView, table: Arc, sql: String) { let name = normalize_ident(view.name()); - self.materialized_views + + // Add to tables (so it appears as a regular table) + self.tables.insert(name.clone(), table); + + // Track that this is a materialized view + self.materialized_view_names.insert(name.clone()); + self.materialized_view_sql.insert(name.clone(), sql); + + // Store the incremental view (DBSP circuit) + self.incremental_views .insert(name, Arc::new(Mutex::new(view))); } pub fn get_materialized_view(&self, name: &str) -> Option>> { let name = normalize_ident(name); - self.materialized_views.get(&name).cloned() + self.incremental_views.get(&name).cloned() + } + + pub fn is_materialized_view(&self, name: &str) -> bool { + let name = normalize_ident(name); + self.materialized_view_names.contains(&name) } pub fn remove_view(&mut self, name: &str) -> Result<()> { let name = normalize_ident(name); - // Check if we have both a regular view and a materialized view with the same name - // It should be impossible to have both - let has_regular_view = self.views.contains_key(&name); - let has_materialized_view = self.materialized_views.contains_key(&name); - - assert!( - !(has_regular_view && has_materialized_view), - "Found both regular view and materialized view with name: {name}" - ); - - if has_regular_view { + if self.views.contains_key(&name) { self.views.remove(&name); Ok(()) - } else if has_materialized_view { + } else if self.materialized_view_names.contains(&name) { + // Remove from tables + self.tables.remove(&name); + + // Remove from materialized view tracking + self.materialized_view_names.remove(&name); + self.materialized_view_sql.remove(&name); + self.incremental_views.remove(&name); + // Remove from table_to_materialized_views dependencies for views in self.table_to_materialized_views.values_mut() { views.retain(|v| v != &name); } - // Remove the materialized view itself - self.materialized_views.remove(&name); Ok(()) } else { Err(crate::LimboError::ParseError(format!( @@ -159,30 +183,6 @@ impl Schema { .unwrap_or_default() } - /// Get all materialized views that depend on a given table, skip normalizing ident. - /// We are basically assuming we already normalized the ident. - pub fn get_dependent_materialized_views_unnormalized( - &self, - table_name: &str, - ) -> Option<&Vec> { - self.table_to_materialized_views.get(table_name) - } - - /// Populate all materialized views by scanning their source tables - /// Returns IOResult to support async execution - pub fn populate_materialized_views( - &self, - conn: &Arc, - ) -> Result> { - for view in self.materialized_views.values() { - let mut view = view - .lock() - .map_err(|_| LimboError::InternalError("Failed to lock view".to_string()))?; - return_if_io!(view.populate_from_table(conn)); - } - Ok(IOResult::Done(())) - } - /// Add a regular (non-materialized) view pub fn add_view(&mut self, view: View) { let name = normalize_ident(&view.name); @@ -218,6 +218,12 @@ impl Schema { pub fn remove_table(&mut self, table_name: &str) { let name = normalize_ident(table_name); self.tables.remove(&name); + + // If this was a materialized view, also clean up the metadata + if self.materialized_view_names.remove(&name) { + self.incremental_views.remove(&name); + self.materialized_view_sql.remove(&name); + } } pub fn get_btree_table(&self, name: &str) -> Option> { @@ -291,8 +297,10 @@ impl Schema { let mut automatic_indices: HashMap> = HashMap::with_capacity(10); - // Collect materialized views for second pass to populate table_to_materialized_views mapping - let mut materialized_views_to_process: Vec<(String, Vec)> = Vec::new(); + // Store DBSP state table root pages: view_name -> dbsp_state_root_page + let mut dbsp_state_roots: HashMap = HashMap::new(); + // Store materialized view info (SQL and root page) for later creation + let mut materialized_view_info: HashMap = HashMap::new(); if matches!(pager.begin_read_tx()?, LimboResult::Busy) { return Err(LimboError::Busy); @@ -351,6 +359,18 @@ impl Schema { } let table = BTreeTable::from_sql(sql, root_page as usize)?; + + // Check if this is a DBSP state table + if table.name.starts_with(DBSP_TABLE_PREFIX) { + // Extract the view name from _dbsp_state_ + let view_name = table + .name + .strip_prefix(DBSP_TABLE_PREFIX) + .unwrap() + .to_string(); + dbsp_state_roots.insert(view_name, root_page as usize); + } + self.add_btree_table(Arc::new(table)); } "index" => { @@ -412,6 +432,14 @@ impl Schema { }; let name = name_text.as_str(); + // Get the root page (column 3) to determine if this is a materialized view + // Regular views have rootpage = 0, materialized views have rootpage != 0 + let root_page_value = record_cursor.get_value(&row, 3)?; + let RefValue::Integer(root_page_int) = root_page_value else { + return Err(LimboError::ConversionError("Expected integer value".into())); + }; + let root_page = root_page_int as usize; + let sql_value = record_cursor.get_value(&row, 4)?; let RefValue::Text(sql_text) = sql_value else { return Err(LimboError::ConversionError("Expected text value".into())); @@ -423,15 +451,12 @@ impl Schema { if let Ok(Some(Cmd::Stmt(stmt))) = parser.next_cmd() { match stmt { Stmt::CreateMaterializedView { .. } => { - // Create IncrementalView for materialized views - if let Ok(incremental_view) = IncrementalView::from_sql(sql, self) { - let referenced_tables = - incremental_view.get_referenced_table_names(); - let view_name = name.to_string(); - self.add_materialized_view(incremental_view); - materialized_views_to_process - .push((view_name, referenced_tables)); - } + // Store materialized view info for later creation + // We'll create the actual IncrementalView in a later pass + // when we have both the main root page and DBSP state root + let view_name = name.to_string(); + materialized_view_info + .insert(view_name, (sql.to_string(), root_page)); } Stmt::CreateView { view_name: _, @@ -475,14 +500,6 @@ impl Schema { pager.end_read_tx()?; - // Second pass: populate table_to_materialized_views mapping - for (view_name, referenced_tables) in materialized_views_to_process { - // Register this view as dependent on each referenced table - for table_name in referenced_tables { - self.add_materialized_view_dependency(&table_name, &view_name); - } - } - for unparsed_sql_from_index in from_sql_indexes { if !self.indexes_enabled() { self.table_set_has_index(&unparsed_sql_from_index.table_name); @@ -514,6 +531,39 @@ impl Schema { } } + // Third pass: Create materialized views now that we have both root pages + for (view_name, (sql, main_root)) in materialized_view_info { + // Look up the DBSP state root for this view - must exist for materialized views + let dbsp_state_root = dbsp_state_roots.get(&view_name).ok_or_else(|| { + LimboError::InternalError(format!( + "Materialized view {view_name} is missing its DBSP state table" + )) + })?; + + // Create the IncrementalView with both root pages + let incremental_view = + IncrementalView::from_sql(&sql, self, main_root, *dbsp_state_root)?; + let referenced_tables = incremental_view.get_referenced_table_names(); + + // Create a BTreeTable for the materialized view + let table = Arc::new(Table::BTree(Arc::new(BTreeTable { + name: view_name.clone(), + root_page: main_root, + columns: incremental_view.columns.clone(), + primary_key_columns: Vec::new(), + has_rowid: true, + is_strict: false, + unique_sets: None, + }))); + + self.add_materialized_view(incremental_view, table, sql); + + // Register dependencies + for table_name in referenced_tables { + self.add_materialized_view_dependency(&table_name, &view_name); + } + } + Ok(()) } } @@ -559,15 +609,19 @@ impl Clone for Schema { (name.clone(), indexes) }) .collect(); - let materialized_views = self - .materialized_views + let materialized_view_names = self.materialized_view_names.clone(); + let materialized_view_sql = self.materialized_view_sql.clone(); + let incremental_views = self + .incremental_views .iter() .map(|(name, view)| (name.clone(), view.clone())) .collect(); let views = self.views.clone(); Self { tables, - materialized_views, + materialized_view_names, + materialized_view_sql, + incremental_views, views, indexes, has_indexes: self.has_indexes.clone(), @@ -1268,16 +1322,14 @@ impl Affinity { } } - pub fn from_char(char: char) -> Result { + pub fn from_char(char: char) -> Self { match char { - SQLITE_AFF_INTEGER => Ok(Affinity::Integer), - SQLITE_AFF_TEXT => Ok(Affinity::Text), - SQLITE_AFF_NONE => Ok(Affinity::Blob), - SQLITE_AFF_REAL => Ok(Affinity::Real), - SQLITE_AFF_NUMERIC => Ok(Affinity::Numeric), - _ => Err(LimboError::InternalError(format!( - "Invalid affinity character: {char}" - ))), + SQLITE_AFF_INTEGER => Affinity::Integer, + SQLITE_AFF_TEXT => Affinity::Text, + SQLITE_AFF_NONE => Affinity::Blob, + SQLITE_AFF_REAL => Affinity::Real, + SQLITE_AFF_NUMERIC => Affinity::Numeric, + _ => Affinity::Blob, } } @@ -1285,7 +1337,7 @@ impl Affinity { self.aff_mask() as u8 } - pub fn from_char_code(code: u8) -> Result { + pub fn from_char_code(code: u8) -> Self { Self::from_char(code as char) } diff --git a/core/state_machine.rs b/core/state_machine.rs index 0e3f30816..0d776df10 100644 --- a/core/state_machine.rs +++ b/core/state_machine.rs @@ -27,6 +27,7 @@ pub trait StateTransition { fn is_finalized(&self) -> bool; } +#[derive(Debug)] pub struct StateMachine { state: State, is_finalized: bool, diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 8e52ef3ba..9f90f9472 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -595,9 +595,9 @@ impl BTreeCursor { }, overflow_state: OverflowState::Start, stack: PageStack { - current_page: Cell::new(-1), - node_states: RefCell::new([BTreeNodeState::default(); BTCURSOR_MAX_DEPTH + 1]), - stack: RefCell::new([const { None }; BTCURSOR_MAX_DEPTH + 1]), + current_page: -1, + node_states: [BTreeNodeState::default(); BTCURSOR_MAX_DEPTH + 1], + stack: [const { None }; BTCURSOR_MAX_DEPTH + 1], }, reusable_immutable_record: RefCell::new(None), index_info: None, @@ -698,18 +698,24 @@ impl BTreeCursor { #[instrument(skip(self), level = Level::DEBUG, name = "prev")] fn get_prev_record(&mut self) -> Result> { loop { - let page = self.stack.top(); - let contents = page.get_contents(); - let page_type = contents.page_type(); - let is_index = page.is_index(); + let (old_top_idx, page_type, is_index, is_leaf, cell_count) = { + let page = self.stack.top_ref(); + let contents = page.get_contents(); + ( + self.stack.current(), + contents.page_type(), + page.is_index(), + contents.is_leaf(), + contents.cell_count(), + ) + }; - let cell_count = contents.cell_count(); let cell_idx = self.stack.current_cell_index(); // If we are at the end of the page and we haven't just come back from the right child, // we now need to move to the rightmost child. - if self.stack.current_cell_index() == i32::MAX && !self.going_upwards { - let rightmost_pointer = contents.rightmost_pointer(); + if cell_idx == i32::MAX && !self.going_upwards { + let rightmost_pointer = self.stack.top_ref().get_contents().rightmost_pointer(); if let Some(rightmost_pointer) = rightmost_pointer { let past_rightmost_pointer = cell_count as i32 + 1; self.stack.set_cell_index(past_rightmost_pointer); @@ -721,6 +727,7 @@ impl BTreeCursor { continue; } } + if cell_idx >= cell_count as i32 { self.stack.set_cell_index(cell_count as i32 - 1); } else if !self.stack.current_cell_index_less_than_min() { @@ -755,7 +762,7 @@ impl BTreeCursor { // continue to next loop to get record from the new page continue; } - if contents.is_leaf() { + if is_leaf { return Ok(IOResult::Done(true)); } @@ -767,7 +774,11 @@ impl BTreeCursor { } let cell_idx = self.stack.current_cell_index() as usize; - let left_child_page = contents.cell_interior_read_left_child_page(cell_idx); + let left_child_page = self + .stack + .get_page_contents_at_level(old_top_idx) + .unwrap() + .cell_interior_read_left_child_page(cell_idx); if page_type == PageType::IndexInterior { // In backwards iteration, if we haven't just moved to this interior node from the @@ -931,7 +942,7 @@ impl BTreeCursor { return self.continue_payload_overflow_with_offset(buffer, self.usable_space()); } - let page = self.stack.top(); + let page = self.stack.top_ref(); let contents = page.get_contents(); let cell_idx = self.stack.current_cell_index() as usize - 1; @@ -1182,7 +1193,7 @@ impl BTreeCursor { /// Check if any ancestor pages still have cells to iterate. /// If not, traversing back up to parent is of no use because we are at the end of the tree. fn ancestor_pages_have_more_children(&self) -> bool { - let node_states = self.stack.node_states.borrow(); + let node_states = self.stack.node_states; (0..self.stack.current()) .rev() .any(|idx| !node_states[idx].is_at_end()) @@ -1202,11 +1213,24 @@ impl BTreeCursor { } None => return Ok(IOResult::Done(false)), } + } else if self.stack.current_page == -1 { + // This can happen in nested left joins. See: + // https://github.com/tursodatabase/turso/issues/2924 + return Ok(IOResult::Done(false)); } loop { - let mem_page = self.stack.top(); + let mem_page = self.stack.top_ref(); let contents = mem_page.get_contents(); + let cell_idx = self.stack.current_cell_index(); let cell_count = contents.cell_count(); + let is_leaf = contents.is_leaf(); + if cell_idx != -1 && is_leaf && cell_idx as usize + 1 < cell_count { + self.stack.advance(); + return Ok(IOResult::Done(true)); + } + + let mem_page = mem_page.clone(); + let contents = mem_page.get_contents(); tracing::debug!( id = mem_page.get().id, cell = self.stack.current_cell_index(), @@ -1264,15 +1288,15 @@ impl BTreeCursor { } turso_assert!( - cell_idx < contents.cell_count(), + cell_idx < cell_count, "cell index out of bounds: cell_idx={}, cell_count={}, page_type={:?} page_id={}", cell_idx, - contents.cell_count(), + cell_count, contents.page_type(), mem_page.get().id ); - if contents.is_leaf() { + if is_leaf { return Ok(IOResult::Done(true)); } if is_index && self.going_upwards { @@ -1329,7 +1353,7 @@ impl BTreeCursor { MoveToRightState::Start => { if let Some(rightmost_page_id) = rightmost_page_id { // If we know the rightmost page and are already on it, we can skip a seek. - let current_page = self.stack.top(); + let current_page = self.stack.top_ref(); if current_page.get().id == *rightmost_page_id { let contents = current_page.get_contents(); let cell_count = contents.cell_count(); @@ -1345,7 +1369,7 @@ impl BTreeCursor { } } MoveToRightState::ProcessPage => { - let mem_page = self.stack.top(); + let mem_page = self.stack.top_ref(); let page_idx = mem_page.get().id; let contents = mem_page.get_contents(); if contents.is_leaf() { @@ -1380,16 +1404,23 @@ impl BTreeCursor { #[instrument(skip(self), level = Level::DEBUG)] fn tablebtree_move_to(&mut self, rowid: i64, seek_op: SeekOp) -> Result> { loop { - let page = self.stack.top(); - let contents = page.get_contents(); - if contents.is_leaf() { + let (old_top_idx, is_leaf, cell_count) = { + let page = self.stack.top_ref(); + let contents = page.get_contents(); + ( + self.stack.current(), + contents.is_leaf(), + contents.cell_count(), + ) + }; + + if is_leaf { self.seek_state = CursorSeekState::FoundLeaf { eq_seen: Cell::new(false), }; return Ok(IOResult::Done(())); } - let cell_count = contents.cell_count(); if matches!( self.seek_state, CursorSeekState::Start | CursorSeekState::MovingBetweenPages { .. } @@ -1425,8 +1456,11 @@ impl BTreeCursor { let max = max_cell_idx.get(); if min > max { if let Some(nearest_matching_cell) = nearest_matching_cell.get() { - let left_child_page = - contents.cell_interior_read_left_child_page(nearest_matching_cell); + let left_child_page = self + .stack + .get_page_contents_at_level(old_top_idx) + .unwrap() + .cell_interior_read_left_child_page(nearest_matching_cell); self.stack.set_cell_index(nearest_matching_cell as i32); let (mem_page, c) = self.read_page(left_child_page as usize)?; self.stack.push(mem_page); @@ -1439,7 +1473,12 @@ impl BTreeCursor { continue; } self.stack.set_cell_index(cell_count as i32 + 1); - match contents.rightmost_pointer() { + match self + .stack + .get_page_contents_at_level(old_top_idx) + .unwrap() + .rightmost_pointer() + { Some(right_most_pointer) => { let (mem_page, c) = self.read_page(right_most_pointer as usize)?; self.stack.push(mem_page); @@ -1457,7 +1496,11 @@ impl BTreeCursor { } } let cur_cell_idx = (min + max) >> 1; // rustc generates extra insns for (min+max)/2 due to them being isize. we know min&max are >=0 here. - let cell_rowid = contents.cell_table_interior_read_rowid(cur_cell_idx as usize)?; + let cell_rowid = self + .stack + .get_page_contents_at_level(old_top_idx) + .unwrap() + .cell_table_interior_read_rowid(cur_cell_idx as usize)?; // in sqlite btrees left child pages have <= keys. // table btrees can have a duplicate rowid in the interior cell, so for example if we are looking for rowid=10, // and we find an interior cell with rowid=10, we need to move to the left page since (due to the <= rule of sqlite btrees) @@ -1517,9 +1560,17 @@ impl BTreeCursor { let tie_breaker = get_tie_breaker_from_seek_op(cmp); loop { - let page = self.stack.top(); - let contents = page.get_contents(); - if contents.is_leaf() { + let (old_top_idx, is_leaf, cell_count) = { + let page = self.stack.top_ref(); + let contents = page.get_contents(); + ( + self.stack.current(), + contents.is_leaf(), + contents.cell_count(), + ) + }; + + if is_leaf { let eq_seen = match &self.seek_state { CursorSeekState::MovingBetweenPages { eq_seen } => eq_seen.get(), _ => false, @@ -1538,7 +1589,6 @@ impl BTreeCursor { CursorSeekState::MovingBetweenPages { eq_seen } => eq_seen.get(), _ => false, }; - let cell_count = contents.cell_count(); let min_cell_idx = Cell::new(0); let max_cell_idx = Cell::new(cell_count as isize - 1); let nearest_matching_cell = Cell::new(None); @@ -1568,8 +1618,13 @@ impl BTreeCursor { let max = max_cell_idx.get(); if min > max { let Some(leftmost_matching_cell) = nearest_matching_cell.get() else { - self.stack.set_cell_index(contents.cell_count() as i32 + 1); - match contents.rightmost_pointer() { + self.stack.set_cell_index(cell_count as i32 + 1); + match self + .stack + .get_page_contents_at_level(old_top_idx) + .unwrap() + .rightmost_pointer() + { Some(right_most_pointer) => { let (mem_page, c) = self.read_page(right_most_pointer as usize)?; self.stack.push(mem_page); @@ -1586,8 +1641,11 @@ impl BTreeCursor { } } }; - let matching_cell = - contents.cell_get(leftmost_matching_cell, self.usable_space())?; + let matching_cell = self + .stack + .get_page_contents_at_level(old_top_idx) + .unwrap() + .cell_get(leftmost_matching_cell, self.usable_space())?; self.stack.set_cell_index(leftmost_matching_cell as i32); // we don't advance in case of forward iteration and index tree internal nodes because we will visit this node going up. // in backwards iteration, we must retreat because otherwise we would unnecessarily visit this node again. @@ -1605,12 +1663,15 @@ impl BTreeCursor { unreachable!("unexpected cell type: {:?}", matching_cell); }; - turso_assert!( - page.get().id != *left_child_page as usize, - "corrupt: current page and left child page of cell {} are both {}", - leftmost_matching_cell, - page.get().id - ); + { + let page = self.stack.get_page_at_level(old_top_idx).unwrap(); + turso_assert!( + page.get().id != *left_child_page as usize, + "corrupt: current page and left child page of cell {} are both {}", + leftmost_matching_cell, + page.get().id + ); + } let (mem_page, c) = self.read_page(*left_child_page as usize)?; self.stack.push(mem_page); @@ -1625,7 +1686,11 @@ impl BTreeCursor { let cur_cell_idx = (min + max) >> 1; // rustc generates extra insns for (min+max)/2 due to them being isize. we know min&max are >=0 here. self.stack.set_cell_index(cur_cell_idx as i32); - let cell = contents.cell_get(cur_cell_idx as usize, self.usable_space())?; + let cell = self + .stack + .get_page_contents_at_level(old_top_idx) + .unwrap() + .cell_get(cur_cell_idx as usize, self.usable_space())?; let BTreeCell::IndexInteriorCell(IndexInteriorCell { payload, payload_size, @@ -1737,7 +1802,7 @@ impl BTreeCursor { ) { // No need for another move_to_root. Move_to already moves to root return_if_io!(self.move_to(SeekKey::TableRowId(rowid), seek_op)); - let page = self.stack.top(); + let page = self.stack.top_ref(); let contents = page.get_contents(); turso_assert!( contents.is_leaf(), @@ -1779,7 +1844,7 @@ impl BTreeCursor { unreachable!("we must be in a leaf binary search state"); }; - let page = self.stack.top(); + let page = self.stack.top_ref(); let contents = page.get_contents(); loop { @@ -1900,7 +1965,7 @@ impl BTreeCursor { ); }; let eq_seen = eq_seen.get(); - let page = self.stack.top(); + let page = self.stack.top_ref(); let contents = page.get_contents(); let cell_count = contents.cell_count(); @@ -1941,8 +2006,7 @@ impl BTreeCursor { ); }; - let page = self.stack.top(); - let contents = page.get_contents(); + let old_top_idx = self.stack.current(); let iter_dir = seek_op.iteration_direction(); @@ -1958,7 +2022,13 @@ impl BTreeCursor { // set cursor to the position where which would hold the op-boundary if it were present let target_cell = target_cell_when_not_found.get(); self.stack.set_cell_index(target_cell); - let has_record = target_cell >= 0 && target_cell < contents.cell_count() as i32; + let has_record = target_cell >= 0 + && target_cell + < self + .stack + .get_page_contents_at_level(old_top_idx) + .unwrap() + .cell_count() as i32; self.has_record.set(has_record); // Similar logic as in tablebtree_seek(), but for indexes. @@ -1974,7 +2044,11 @@ impl BTreeCursor { let cur_cell_idx = (min + max) >> 1; // rustc generates extra insns for (min+max)/2 due to them being isize. we know min&max are >=0 here. self.stack.set_cell_index(cur_cell_idx as i32); - let cell = contents.cell_get(cur_cell_idx as usize, self.usable_space())?; + let cell = self + .stack + .get_page_contents_at_level(old_top_idx) + .unwrap() + .cell_get(cur_cell_idx as usize, self.usable_space())?; let BTreeCell::IndexLeafCell(IndexLeafCell { payload, first_overflow_page, @@ -2173,7 +2247,7 @@ impl BTreeCursor { tracing::debug!("TableLeafCell: found exact match with cell_idx={cell_idx}, overwriting"); self.has_record.set(true); *write_state = WriteState::Overwrite { - page: page.clone(), + page, cell_idx, state: Some(OverwriteCellState::AllocatePayload), }; @@ -2197,7 +2271,7 @@ impl BTreeCursor { panic!("expected write state"); }; *write_state = WriteState::Overwrite { - page: page.clone(), + page, cell_idx, state: Some(OverwriteCellState::AllocatePayload), }; @@ -2217,7 +2291,7 @@ impl BTreeCursor { panic!("expected write state"); }; *write_state = WriteState::Insert { - page: page.clone(), + page, cell_idx, new_payload: Vec::with_capacity(record_values.len() + 4), fill_cell_payload_state: FillCellPayloadState::Start, @@ -2231,7 +2305,7 @@ impl BTreeCursor { ref mut fill_cell_payload_state, } => { return_if_io!(fill_cell_payload( - page.clone(), + page, bkey.maybe_rowid(), new_payload, *cell_idx, @@ -2277,7 +2351,7 @@ impl BTreeCursor { let mut state = state.take().expect("state should be present"); let cell_idx = *cell_idx; if let IOResult::IO(io) = - self.overwrite_cell(page.clone(), cell_idx, record, &mut state)? + self.overwrite_cell(&page, cell_idx, record, &mut state)? { let CursorState::Write(write_state) = &mut self.state else { panic!("expected write state"); @@ -2354,7 +2428,7 @@ impl BTreeCursor { balance_info.borrow().is_none(), "BalanceInfo should be empty on start" ); - let current_page = self.stack.top(); + let current_page = self.stack.top_ref(); let next_balance_depth = balance_ancestor_at_depth.unwrap_or(self.stack.current()); { @@ -2420,15 +2494,23 @@ impl BTreeCursor { BalanceSubState::NonRootPickSiblings => { // Since we are going to change the btree structure, let's forget our cached knowledge of the rightmost page. let _ = self.move_to_right_state.1.take(); - let parent_page = self.stack.top(); - let parent_contents = parent_page.get_contents(); - let page_type = parent_contents.page_type(); + + let (parent_page_idx, page_type, cell_count, over_cell_count) = { + let parent_page = self.stack.top_ref(); + let parent_contents = parent_page.get_contents(); + ( + self.stack.current(), + parent_contents.page_type(), + parent_contents.cell_count(), + parent_contents.overflow_cells.len(), + ) + }; + turso_assert!( matches!(page_type, PageType::IndexInterior | PageType::TableInterior), "expected index or table interior page" ); - let number_of_cells_in_parent = - parent_contents.cell_count() + parent_contents.overflow_cells.len(); + let number_of_cells_in_parent = cell_count + over_cell_count; // If `seek` moved to rightmost page, cell index will be out of bounds. Meaning cell_count+1. // In any other case, `seek` will stay in the correct index. @@ -2436,7 +2518,11 @@ impl BTreeCursor { self.stack.current_cell_index() as usize == number_of_cells_in_parent + 1; if past_rightmost_pointer { self.stack.retreat(); - } else if !parent_contents.overflow_cells.is_empty() { + } + + let parent_page = self.stack.get_page_at_level(parent_page_idx).unwrap(); + let parent_contents = parent_page.get_contents(); + if !past_rightmost_pointer && over_cell_count > 0 { // The ONLY way we can have an overflow cell in the parent is if we replaced an interior cell from a cell in the child, and that replacement did not fit. // This can only happen on index btrees. if matches!(page_type, PageType::IndexInterior) { @@ -2454,7 +2540,7 @@ impl BTreeCursor { overflow_cell.index ); } - self.pager.add_dirty(&parent_page); + self.pager.add_dirty(parent_page); let parent_contents = parent_page.get_contents(); let page_to_balance_idx = self.stack.current_cell_index() as usize; @@ -2655,7 +2741,7 @@ impl BTreeCursor { } } // Start balancing. - let parent_page = self.stack.top(); + let parent_page = self.stack.top_ref(); let parent_contents = parent_page.get_contents(); let parent_is_root = !self.stack.has_parent(); @@ -3488,14 +3574,13 @@ impl BTreeCursor { ..first_child_contents.offset + header_and_pointer_size], ); - self.stack.set_cell_index(0); // reset cell index, top is already parent sibling_count_new -= 1; // decrease sibling count for debugging and free at the end assert!(sibling_count_new < balance_info.sibling_count); } #[cfg(debug_assertions)] BTreeCursor::post_balance_non_root_validation( - &parent_page, + parent_page, balance_info, parent_contents, pages_to_balance_new, @@ -3506,6 +3591,12 @@ impl BTreeCursor { right_page_id, usable_space, ); + + // Balance-shallower case + if sibling_count_new == 0 { + self.stack.set_cell_index(0); // reset cell index, top is already parent + } + *sub_state = BalanceSubState::FreePages { curr_page: sibling_count_new, sibling_count_new, @@ -3994,14 +4085,9 @@ impl BTreeCursor { // Since we are going to change the btree structure, let's forget our cached knowledge of the rightmost page. let _ = self.move_to_right_state.1.take(); - let is_page_1 = { - let current_root = self.stack.top(); - current_root.get().id == 1 - }; - - let offset = if is_page_1 { DatabaseHeader::SIZE } else { 0 }; - let root = self.stack.top(); + let is_page_1 = root.get().id == 1; + let offset = if is_page_1 { DatabaseHeader::SIZE } else { 0 }; let root_contents = root.get_contents(); // FIXME: handle page cache is full // FIXME: remove sync IO hack @@ -4065,7 +4151,7 @@ impl BTreeCursor { root_contents.overflow_cells.clear(); self.root_page = root.get().id; self.stack.clear(); - self.stack.push(root.clone()); + self.stack.push(root); self.stack.set_cell_index(0); // leave parent pointing at the rightmost pointer (in this case 0, as there are no cells), since we will be balancing the rightmost child page. self.stack.push(child.clone()); Ok(IOResult::Done(())) @@ -4090,7 +4176,7 @@ impl BTreeCursor { } } SeekEndState::ProcessPage => { - let mem_page = self.stack.top(); + let mem_page = self.stack.top_ref(); let contents = mem_page.get_contents(); if contents.is_leaf() { // set cursor just past the last cell to append @@ -4192,7 +4278,6 @@ impl BTreeCursor { if self.valid_state == CursorValidState::Invalid { return Ok(IOResult::Done(false)); } - loop { match self.advance_state { AdvanceState::Start => { @@ -4203,7 +4288,6 @@ impl BTreeCursor { let cursor_has_record = return_if_io!(self.get_next_record()); self.has_record.replace(cursor_has_record); self.invalidate_record(); - self.advance_state = AdvanceState::Start; return Ok(IOResult::Done(cursor_has_record)); } } @@ -4231,7 +4315,6 @@ impl BTreeCursor { let cursor_has_record = return_if_io!(self.get_prev_record()); self.has_record.replace(cursor_has_record); self.invalidate_record(); - self.advance_state = AdvanceState::Start; return Ok(IOResult::Done(cursor_has_record)); } } @@ -4254,7 +4337,7 @@ impl BTreeCursor { return Ok(IOResult::Done(None)); } if self.has_record.get() { - let page = self.stack.top(); + let page = self.stack.top_ref(); let contents = page.get_contents(); let page_type = contents.page_type(); if page_type.is_table() { @@ -4327,7 +4410,7 @@ impl BTreeCursor { return Ok(IOResult::Done(Some(record_ref))); } - let page = self.stack.top(); + let page = self.stack.top_ref(); let contents = page.get_contents(); let cell_idx = self.stack.current_cell_index(); let cell = contents.cell_get(cell_idx as usize, self.usable_space())?; @@ -4432,8 +4515,8 @@ impl BTreeCursor { match delete_state { DeleteState::Start => { - let page = self.stack.top(); - self.pager.add_dirty(&page); + let page = self.stack.top_ref(); + self.pager.add_dirty(page); if matches!( page.get_contents().page_type(), PageType::TableLeaf | PageType::TableInterior @@ -4454,7 +4537,7 @@ impl BTreeCursor { // FIXME: skip this work if we determine deletion wont result in balancing // Right now we calculate the key every time for simplicity/debugging // since it won't affect correctness which is more important - let page = self.stack.top(); + let page = self.stack.top_ref(); let target_key = if page.is_index() { let record = match return_if_io!(self.record()) { Some(record) => record.clone(), @@ -4490,7 +4573,7 @@ impl BTreeCursor { DeleteState::FindCell { post_balancing_seek_key, } => { - let page = self.stack.top(); + let page = self.stack.top_ref(); let cell_idx = self.stack.current_cell_index() as usize; let contents = page.get_contents(); if cell_idx >= contents.cell_count() { @@ -4537,7 +4620,7 @@ impl BTreeCursor { unreachable!("expected clear overflow pages state"); }; - let page = self.stack.top(); + let page = self.stack.top_ref(); let contents = page.get_contents(); if !contents.is_leaf() { @@ -4586,12 +4669,11 @@ impl BTreeCursor { // Ensure we keep the parent page at the same position as before the replacement. self.stack .node_states - .borrow_mut() .get_mut(btree_depth) .expect("parent page should be on the stack") .cell_idx = cell_idx as i32; let (cell_payload, leaf_cell_idx) = { - let leaf_page = self.stack.top(); + let leaf_page = self.stack.top_ref(); let leaf_contents = leaf_page.get_contents(); assert!(leaf_contents.is_leaf()); assert!(leaf_contents.cell_count() > 0); @@ -4628,10 +4710,10 @@ impl BTreeCursor { (cell_payload, leaf_cell_idx) }; - let leaf_page = self.stack.top(); + let leaf_page = self.stack.top_ref(); self.pager.add_dirty(page); - self.pager.add_dirty(&leaf_page); + self.pager.add_dirty(leaf_page); // Step 2: Replace the cell in the parent (interior) page. { @@ -4666,7 +4748,7 @@ impl BTreeCursor { } DeleteState::CheckNeedsBalancing { btree_depth, .. } => { - let page = self.stack.top(); + let page = self.stack.top_ref(); // Check if either the leaf page we took the replacement cell from underflows, or if the interior page we inserted it into overflows OR underflows. // If the latter is true, we must always balance that level regardless of whether the leaf page (or any ancestor pages in between) need balancing. @@ -4903,7 +4985,7 @@ impl BTreeCursor { destroy_info.state = DestroyState::LoadPage; } DestroyState::LoadPage => { - let _page = self.stack.top(); + let _page = self.stack.top_ref(); let destroy_info = self .state @@ -4912,9 +4994,8 @@ impl BTreeCursor { destroy_info.state = DestroyState::ProcessPage; } DestroyState::ProcessPage => { - let page = self.stack.top(); - assert!(page.is_loaded()); // page should be loaded at this time self.stack.advance(); + let page = self.stack.top_ref(); let contents = page.get_contents(); let cell_idx = self.stack.current_cell_index(); @@ -5063,7 +5144,7 @@ impl BTreeCursor { pub fn overwrite_cell( &mut self, - page: PageRef, + page: &PageRef, cell_idx: usize, record: &ImmutableRecord, state: &mut OverwriteCellState, @@ -5089,7 +5170,7 @@ impl BTreeCursor { } => { { return_if_io!(fill_cell_payload( - page.clone(), + page, *rowid, new_payload, cell_idx, @@ -5149,7 +5230,8 @@ impl BTreeCursor { } fn get_immutable_record_or_create(&self) -> std::cell::RefMut<'_, Option> { - if self.reusable_immutable_record.borrow().is_none() { + let mut reusable_immutable_record = self.reusable_immutable_record.borrow_mut(); + if reusable_immutable_record.is_none() { let page_size = self .pager .page_size @@ -5157,9 +5239,9 @@ impl BTreeCursor { .expect("page size is not set") .get(); let record = ImmutableRecord::new(page_size as usize); - self.reusable_immutable_record.replace(Some(record)); + reusable_immutable_record.replace(record); } - self.reusable_immutable_record.borrow_mut() + reusable_immutable_record } fn get_immutable_record(&self) -> std::cell::RefMut<'_, Option> { @@ -5193,8 +5275,8 @@ impl BTreeCursor { } } CountState::Loop => { - mem_page = self.stack.top(); - turso_assert!(mem_page.is_loaded(), "page should be loaded"); + self.stack.advance(); + mem_page = self.stack.top_ref(); contents = mem_page.get_contents(); /* If this is a leaf page or the tree is not an int-key tree, then @@ -5205,7 +5287,6 @@ impl BTreeCursor { self.count += contents.cell_count(); } - self.stack.advance(); let cell_idx = self.stack.current_cell_index() as usize; // Second condition is necessary in case we return if the page is locked in the loop below @@ -5224,7 +5305,7 @@ impl BTreeCursor { // Move to parent self.stack.pop(); - mem_page = self.stack.top(); + mem_page = self.stack.top_ref(); turso_assert!(mem_page.is_loaded(), "page should be loaded"); contents = mem_page.get_contents(); @@ -5884,9 +5965,9 @@ impl CoverageChecker { /// the parent. Using current_page + 1 or higher is undefined behaviour. struct PageStack { /// Pointer to the current page being consumed - current_page: Cell, + current_page: i32, /// List of pages in the stack. Root page will be in index 0 - pub stack: RefCell<[Option; BTCURSOR_MAX_DEPTH + 1]>, + pub stack: [Option; BTCURSOR_MAX_DEPTH + 1], /// List of cell indices in the stack. /// node_states[current_page] is the current cell index being consumed. Similarly /// node_states[current_page-1] is the cell index of the parent of the current page @@ -5894,32 +5975,21 @@ struct PageStack { /// There are two points that need special attention: /// If node_states[current_page] = -1, it indicates that the current iteration has reached the start of the current_page /// If node_states[current_page] = `cell_count`, it means that the current iteration has reached the end of the current_page - node_states: RefCell<[BTreeNodeState; BTCURSOR_MAX_DEPTH + 1]>, + node_states: [BTreeNodeState; BTCURSOR_MAX_DEPTH + 1], } impl PageStack { - fn increment_current(&self) { - self.current_page.set(self.current_page.get() + 1); - } - fn decrement_current(&self) { - assert!(self.current_page.get() > 0); - self.current_page.set(self.current_page.get() - 1); - } /// Push a new page onto the stack. /// This effectively means traversing to a child page. #[instrument(skip_all, level = Level::DEBUG, name = "pagestack::push")] - fn _push(&self, page: PageRef, starting_cell_idx: i32) { - tracing::trace!( - current = self.current_page.get(), - new_page_id = page.get().id, - ); + fn _push(&mut self, page: PageRef, starting_cell_idx: i32) { + tracing::trace!(current = self.current_page, new_page_id = page.get().id,); 'validate: { - let current = self.current_page.get(); + let current = self.current_page; if current == -1 { break 'validate; } - let stack = self.stack.borrow(); - let current_top = stack[current as usize].as_ref(); + let current_top = self.stack[current as usize].as_ref(); if let Some(current_top) = current_top { turso_assert!( current_top.get().id != page.get().id, @@ -5929,19 +5999,19 @@ impl PageStack { } } self.populate_parent_cell_count(); - self.increment_current(); - let current = self.current_page.get(); + self.current_page += 1; + assert!(self.current_page >= 0); + let current = self.current_page as usize; assert!( - current < BTCURSOR_MAX_DEPTH as i32, + current < BTCURSOR_MAX_DEPTH, "corrupted database, stack is bigger than expected" ); - assert!(current >= 0); // Pin the page to prevent it from being evicted while on the stack page.pin(); - self.stack.borrow_mut()[current as usize] = Some(page); - self.node_states.borrow_mut()[current as usize] = BTreeNodeState { + self.stack[current] = Some(page); + self.node_states[current] = BTreeNodeState { cell_idx: starting_cell_idx, cell_count: None, // we don't know the cell count yet, so we set it to None. any code pushing a child page onto the stack MUST set the parent page's cell_count. }; @@ -5953,14 +6023,13 @@ impl PageStack { /// /// This rests on the assumption that the parent page is already in memory whenever a child is pushed onto the stack. /// We currently ensure this by pinning all the pages on [PageStack] to the page cache so that they cannot be evicted. - fn populate_parent_cell_count(&self) { - let stack_empty = self.current_page.get() == -1; + fn populate_parent_cell_count(&mut self) { + let stack_empty = self.current_page == -1; if stack_empty { return; } let current = self.current(); - let stack = self.stack.borrow(); - let page = stack[current].as_ref().unwrap(); + let page = self.stack[current].as_ref().unwrap(); turso_assert!( page.is_pinned(), "parent page {} is not pinned", @@ -5973,59 +6042,64 @@ impl PageStack { ); let contents = page.get_contents(); let cell_count = contents.cell_count() as i32; - self.node_states.borrow_mut()[current].cell_count = Some(cell_count); + self.node_states[current].cell_count = Some(cell_count); } - fn push(&self, page: PageRef) { + fn push(&mut self, page: PageRef) { self._push(page, -1); } - fn push_backwards(&self, page: PageRef) { + fn push_backwards(&mut self, page: PageRef) { self._push(page, i32::MAX); } /// Pop a page off the stack. /// This effectively means traversing back up to a parent page. #[instrument(skip_all, level = Level::DEBUG, name = "pagestack::pop")] - fn pop(&self) { - let current = self.current_page.get(); + fn pop(&mut self) { + let current = self.current_page; assert!(current >= 0); tracing::trace!(current); + let current = current as usize; // Unpin the page before removing it from the stack - if let Some(page) = &self.stack.borrow()[current as usize] { + if let Some(page) = &self.stack[current] { page.unpin(); } - self.node_states.borrow_mut()[current as usize] = BTreeNodeState::default(); - self.stack.borrow_mut()[current as usize] = None; - self.decrement_current(); + assert!(current > 0); + self.node_states[current] = BTreeNodeState::default(); + self.stack[current] = None; + self.current_page -= 1; } /// Get the top page on the stack. /// This is the page that is currently being traversed. - #[instrument(skip(self), level = Level::DEBUG, name = "pagestack::top", )] fn top(&self) -> Arc { - let page = self.stack.borrow()[self.current()] - .as_ref() - .unwrap() - .clone(); - tracing::trace!(current = self.current(), page_id = page.get().id); + let current = self.current(); + let page = self.stack[current].clone().unwrap(); + turso_assert!(page.is_loaded(), "page should be loaded"); + page + } + + fn top_ref(&self) -> &Arc { + let current = self.current(); + let page = self.stack[current].as_ref().unwrap(); turso_assert!(page.is_loaded(), "page should be loaded"); page } /// Current page pointer being used + #[inline(always)] fn current(&self) -> usize { - let current = self.current_page.get() as usize; - assert!(self.current_page.get() >= 0); - current + assert!(self.current_page >= 0); + self.current_page as usize } /// Cell index of the current page fn current_cell_index(&self) -> i32 { let current = self.current(); - self.node_states.borrow()[current].cell_idx + self.node_states[current].cell_idx } /// Check if the current cell index is less than 0. @@ -6037,59 +6111,55 @@ impl PageStack { /// Advance the current cell index of the current page to the next cell. /// We usually advance after going traversing a new page - #[instrument(skip(self), level = Level::DEBUG, name = "pagestack::advance",)] - fn advance(&self) { + #[inline(always)] + fn advance(&mut self) { let current = self.current(); - tracing::trace!( - curr_cell_index = self.node_states.borrow()[current].cell_idx, - node_states = ?self.node_states.borrow().iter().map(|state| state.cell_idx).collect::>(), - ); - self.node_states.borrow_mut()[current].cell_idx += 1; + self.node_states[current].cell_idx += 1; } #[instrument(skip(self), level = Level::DEBUG, name = "pagestack::retreat")] - fn retreat(&self) { + fn retreat(&mut self) { let current = self.current(); tracing::trace!( - curr_cell_index = self.node_states.borrow()[current].cell_idx, - node_states = ?self.node_states.borrow().iter().map(|state| state.cell_idx).collect::>(), + curr_cell_index = self.node_states[current].cell_idx, + node_states = ?self.node_states.iter().map(|state| state.cell_idx).collect::>(), ); - self.node_states.borrow_mut()[current].cell_idx -= 1; + self.node_states[current].cell_idx -= 1; } - fn set_cell_index(&self, idx: i32) { + fn set_cell_index(&mut self, idx: i32) { let current = self.current(); - self.node_states.borrow_mut()[current].cell_idx = idx; + self.node_states[current].cell_idx = idx; } fn has_parent(&self) -> bool { - self.current_page.get() > 0 + self.current_page > 0 } /// Get a page at a specific level in the stack (0 = root, 1 = first child, etc.) - fn get_page_at_level(&self, level: usize) -> Option { - let stack = self.stack.borrow(); - if level < stack.len() { - stack[level].clone() + fn get_page_at_level(&self, level: usize) -> Option<&PageRef> { + if level < self.stack.len() { + self.stack[level].as_ref() } else { None } } - fn unpin_all_if_pinned(&self) { - self.stack - .borrow_mut() - .iter_mut() - .flatten() - .for_each(|page| { - let _ = page.try_unpin(); - }); + fn get_page_contents_at_level(&self, level: usize) -> Option<&mut PageContent> { + self.get_page_at_level(level) + .map(|page| page.get_contents()) } - fn clear(&self) { + fn unpin_all_if_pinned(&mut self) { + self.stack.iter_mut().flatten().for_each(|page| { + let _ = page.try_unpin(); + }); + } + + fn clear(&mut self) { self.unpin_all_if_pinned(); - self.current_page.set(-1); + self.current_page = -1; } } @@ -7141,7 +7211,7 @@ pub enum CopyDataState { /// may require I/O. #[allow(clippy::too_many_arguments)] fn fill_cell_payload( - page: PageRef, + page: &PageRef, int_key: Option, cell_payload: &mut Vec, cell_idx: usize, @@ -7376,7 +7446,7 @@ mod tests { schema::IndexColumn, storage::{ database::DatabaseFile, - page_cache::DumbLruPageCache, + page_cache::PageCache, pager::{AtomicDbState, DbState}, sqlite3_ondisk::PageSize, }, @@ -7453,7 +7523,7 @@ mod tests { run_until_done( || { fill_cell_payload( - page.clone(), + &page, Some(id as i64), &mut payload, pos, @@ -8631,7 +8701,7 @@ mod tests { db_file, Some(wal), io, - Arc::new(parking_lot::RwLock::new(DumbLruPageCache::new(10))), + Arc::new(parking_lot::RwLock::new(PageCache::new(10))), buffer_pool, Arc::new(AtomicDbState::new(DbState::Uninitialized)), Arc::new(Mutex::new(())), @@ -9031,7 +9101,7 @@ mod tests { run_until_done( || { fill_cell_payload( - page.clone(), + &page, Some(i as i64), &mut payload, cell_idx, @@ -9113,7 +9183,7 @@ mod tests { run_until_done( || { fill_cell_payload( - page.clone(), + &page, Some(i), &mut payload, cell_idx, @@ -9486,7 +9556,7 @@ mod tests { run_until_done( || { fill_cell_payload( - page.clone(), + &page, Some(0), &mut payload, 0, @@ -9572,7 +9642,7 @@ mod tests { run_until_done( || { fill_cell_payload( - page.clone(), + &page, Some(0), &mut payload, 0, @@ -9981,7 +10051,7 @@ mod tests { run_until_done( || { fill_cell_payload( - page.clone(), + &page, Some(cell_idx as i64), &mut payload, cell_idx as usize, diff --git a/core/storage/encryption.rs b/core/storage/encryption.rs index 9bebd0f56..7980daf42 100644 --- a/core/storage/encryption.rs +++ b/core/storage/encryption.rs @@ -1,56 +1,80 @@ #![allow(unused_variables, dead_code)] use crate::{LimboError, Result}; +use aegis::aegis128l::Aegis128L; +use aegis::aegis128x2::Aegis128X2; +use aegis::aegis128x4::Aegis128X4; use aegis::aegis256::Aegis256; -use aes_gcm::aead::{AeadCore, OsRng}; -use std::ops::Deref; +use aegis::aegis256x2::Aegis256X2; +use aegis::aegis256x4::Aegis256X4; +use aes_gcm::{ + aead::{Aead, AeadCore, KeyInit, OsRng}, + Aes128Gcm, Aes256Gcm, Key, Nonce, +}; use turso_macros::match_ignore_ascii_case; -// AEGIS-256 supports both 16 and 32 byte tags, we use the 16 byte variant, it is faster -// and provides sufficient security for our use case. -const AEGIS_TAG_SIZE: usize = 16; -const AES256GCM_TAG_SIZE: usize = 16; -#[repr(transparent)] #[derive(Clone)] -pub struct EncryptionKey([u8; 32]); +pub enum EncryptionKey { + Key128([u8; 16]), + Key256([u8; 32]), +} impl EncryptionKey { - pub fn new(key: [u8; 32]) -> Self { - Self(key) + pub fn new_256(key: [u8; 32]) -> Self { + Self::Key256(key) + } + + pub fn new_128(key: [u8; 16]) -> Self { + Self::Key128(key) } pub fn from_hex_string(s: &str) -> Result { let hex_str = s.trim(); let bytes = hex::decode(hex_str) .map_err(|e| LimboError::InvalidArgument(format!("Invalid hex string: {e}")))?; - let key: [u8; 32] = bytes.try_into().map_err(|v: Vec| { - LimboError::InvalidArgument(format!( - "Hex string must decode to exactly 32 bytes, got {}", - v.len() - )) - })?; - Ok(Self(key)) - } - pub fn as_bytes(&self) -> &[u8; 32] { - &self.0 + match bytes.len() { + 16 => { + let key: [u8; 16] = bytes.try_into().unwrap(); + Ok(Self::Key128(key)) + } + 32 => { + let key: [u8; 32] = bytes.try_into().unwrap(); + Ok(Self::Key256(key)) + } + _ => Err(LimboError::InvalidArgument(format!( + "Hex string must decode to exactly 16 or 32 bytes, got {}", + bytes.len() + ))), + } } pub fn as_slice(&self) -> &[u8] { - &self.0 + match self { + Self::Key128(key) => key, + Self::Key256(key) => key, + } } -} -impl Deref for EncryptionKey { - type Target = [u8; 32]; - - fn deref(&self) -> &Self::Target { - &self.0 + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + match self { + Self::Key128(_) => 16, + Self::Key256(_) => 32, + } } -} -impl AsRef<[u8; 32]> for EncryptionKey { - fn as_ref(&self) -> &[u8; 32] { - &self.0 + pub fn as_128(&self) -> Option<&[u8; 16]> { + match self { + Self::Key128(key) => Some(key), + _ => None, + } + } + + pub fn as_256(&self) -> Option<&[u8; 32]> { + match self { + Self::Key256(key) => Some(key), + _ => None, + } } } @@ -65,203 +89,180 @@ impl std::fmt::Debug for EncryptionKey { impl Drop for EncryptionKey { fn drop(&mut self) { // securely zero out the key bytes before dropping - for byte in self.0.iter_mut() { - unsafe { - std::ptr::write_volatile(byte, 0); + match self { + Self::Key128(key) => { + for byte in key.iter_mut() { + unsafe { + std::ptr::write_volatile(byte, 0); + } + } + } + Self::Key256(key) => { + for byte in key.iter_mut() { + unsafe { + std::ptr::write_volatile(byte, 0); + } + } } } } } -pub trait AeadCipher { - fn encrypt(&self, plaintext: &[u8], ad: &[u8]) -> Result<(Vec, Vec)>; - fn decrypt(&self, ciphertext: &[u8], nonce: &[u8], ad: &[u8]) -> Result>; - - fn encrypt_detached(&self, plaintext: &[u8], ad: &[u8]) -> Result<(Vec, Vec, Vec)>; - - fn decrypt_detached( - &self, - ciphertext: &[u8], - nonce: &[u8], - tag: &[u8], - ad: &[u8], - ) -> Result>; -} - -// wrapper struct for AEGIS-256 cipher, because the crate we use is a bit low-level and we add -// some nice abstractions here -// note, the AEGIS has many variants and support for hardware acceleration. Here we just use the -// vanilla version, which is still order of magnitudes faster than AES-GCM in software. Hardware -// based compilation is left for future work. -#[derive(Clone)] -pub struct Aegis256Cipher { - key: EncryptionKey, -} - -impl Aegis256Cipher { - fn new(key: &EncryptionKey) -> Self { - Self { key: key.clone() } - } -} - -impl AeadCipher for Aegis256Cipher { - fn encrypt(&self, plaintext: &[u8], ad: &[u8]) -> Result<(Vec, Vec)> { - let nonce = generate_secure_nonce(); - let (ciphertext, tag) = - Aegis256::::new(self.key.as_bytes(), &nonce).encrypt(plaintext, ad); - - let mut result = ciphertext; - result.extend_from_slice(&tag); - Ok((result, nonce.to_vec())) - } - - fn decrypt(&self, ciphertext: &[u8], nonce: &[u8], ad: &[u8]) -> Result> { - if ciphertext.len() < AEGIS_TAG_SIZE { - return Err(LimboError::InternalError("Ciphertext too short".into())); +macro_rules! define_aegis_cipher { + ($struct_name:ident, $cipher_type:ty, key128, $nonce_size:literal, $name:literal) => { + define_aegis_cipher!(@impl $struct_name, $cipher_type, $nonce_size, $name, 16, as_128); + }; + ($struct_name:ident, $cipher_type:ty, key256, $nonce_size:literal, $name:literal) => { + define_aegis_cipher!(@impl $struct_name, $cipher_type, $nonce_size, $name, 32, as_256); + }; + (@impl $struct_name:ident, $cipher_type:ty, $nonce_size:literal, $name:literal, $key_size:literal, $key_method:ident) => { + #[derive(Clone)] + pub struct $struct_name { + key: EncryptionKey, } - let (ct, tag) = ciphertext.split_at(ciphertext.len() - AEGIS_TAG_SIZE); - let tag_array: [u8; AEGIS_TAG_SIZE] = tag.try_into().map_err(|_| { - LimboError::InternalError(format!("Invalid tag size for AEGIS-256 {AEGIS_TAG_SIZE}")) - })?; - let nonce_array: [u8; 32] = nonce - .try_into() - .map_err(|_| LimboError::InternalError("Invalid nonce size for AEGIS-256".into()))?; + impl $struct_name { + const TAG_SIZE: usize = 16; - Aegis256::::new(self.key.as_bytes(), &nonce_array) - .decrypt(ct, &tag_array, ad) - .map_err(|_| LimboError::InternalError("AEGIS-256 decryption failed".into())) - } + fn new(key: &EncryptionKey) -> Self { + Self { key: key.clone() } + } - fn encrypt_detached(&self, plaintext: &[u8], ad: &[u8]) -> Result<(Vec, Vec, Vec)> { - let nonce = generate_secure_nonce(); - let (ciphertext, tag) = - Aegis256::::new(self.key.as_bytes(), &nonce).encrypt(plaintext, ad); + fn encrypt(&self, plaintext: &[u8], ad: &[u8]) -> Result<(Vec, [u8; $nonce_size])> { + let nonce = generate_secure_nonce::<$nonce_size>(); + let key_bytes = self.key.$key_method() + .ok_or_else(|| -> LimboError { CipherError::InvalidKeySize { cipher: $name, expected: $key_size }.into() })?; + let (ciphertext, tag) = <$cipher_type>::new(key_bytes, &nonce).encrypt(plaintext, ad); + let mut result = ciphertext; + result.extend_from_slice(&tag); + Ok((result, nonce)) + } - Ok((ciphertext, tag.to_vec(), nonce.to_vec())) - } + fn decrypt(&self, ciphertext: &[u8], nonce: &[u8; $nonce_size], ad: &[u8]) -> Result> { + if ciphertext.len() < Self::TAG_SIZE { + return Err(LimboError::from(CipherError::CiphertextTooShort { cipher: $name })); + } + let (ct, tag) = ciphertext.split_at(ciphertext.len() - Self::TAG_SIZE); + let tag_array: [u8; 16] = tag.try_into().map_err(|_| -> LimboError { CipherError::InvalidTagSize { cipher: $name }.into() })?; - fn decrypt_detached( - &self, - ciphertext: &[u8], - nonce: &[u8], - tag: &[u8], - ad: &[u8], - ) -> Result> { - let tag_array: [u8; AEGIS_TAG_SIZE] = tag.try_into().map_err(|_| { - LimboError::InternalError(format!("Invalid tag size for AEGIS-256 {AEGIS_TAG_SIZE}")) - })?; - let nonce_array: [u8; 32] = nonce - .try_into() - .map_err(|_| LimboError::InternalError("Invalid nonce size for AEGIS-256".into()))?; - - Aegis256::::new(self.key.as_bytes(), &nonce_array) - .decrypt(ciphertext, &tag_array, ad) - .map_err(|_| LimboError::InternalError("AEGIS-256 decrypt_detached failed".into())) - } -} - -#[derive(Clone)] -pub struct Aes256GcmCipher { - key: EncryptionKey, -} - -impl Aes256GcmCipher { - fn new(key: &EncryptionKey) -> Self { - Self { key: key.clone() } - } -} - -impl AeadCipher for Aes256GcmCipher { - fn encrypt(&self, plaintext: &[u8], _ad: &[u8]) -> Result<(Vec, Vec)> { - use aes_gcm::aead::{AeadInPlace, KeyInit}; - use aes_gcm::Aes256Gcm; - - let cipher = Aes256Gcm::new_from_slice(self.key.as_bytes()) - .map_err(|_| LimboError::InternalError("Bad AES key".into()))?; - let nonce = Aes256Gcm::generate_nonce(&mut rand::thread_rng()); - let mut buffer = plaintext.to_vec(); - - let tag = cipher - .encrypt_in_place_detached(&nonce, b"", &mut buffer) - .map_err(|_| LimboError::InternalError("AES-GCM encrypt failed".into()))?; - - buffer.extend_from_slice(&tag[..AES256GCM_TAG_SIZE]); - Ok((buffer, nonce.to_vec())) - } - - fn decrypt(&self, ciphertext: &[u8], nonce: &[u8], ad: &[u8]) -> Result> { - use aes_gcm::aead::{AeadInPlace, KeyInit}; - use aes_gcm::{Aes256Gcm, Nonce}; - - if ciphertext.len() < AES256GCM_TAG_SIZE { - return Err(LimboError::InternalError("Ciphertext too short".into())); + let key_bytes = self.key.$key_method() + .ok_or_else(|| -> LimboError { CipherError::InvalidKeySize { cipher: $name, expected: $key_size }.into() })?; + <$cipher_type>::new(key_bytes, nonce) + .decrypt(ct, &tag_array, ad) + .map_err(|_| -> LimboError { CipherError::DecryptionFailed { cipher: $name }.into() }) + } } - let (ct, tag) = ciphertext.split_at(ciphertext.len() - AES256GCM_TAG_SIZE); - let cipher = Aes256Gcm::new_from_slice(self.key.as_bytes()) - .map_err(|_| LimboError::InternalError("Bad AES key".into()))?; - let nonce = Nonce::from_slice(nonce); - - let mut buffer = ct.to_vec(); - cipher - .decrypt_in_place_detached(nonce, ad, &mut buffer, tag.into()) - .map_err(|_| LimboError::InternalError("AES-GCM decrypt failed".into()))?; - - Ok(buffer) - } - - fn encrypt_detached(&self, plaintext: &[u8], ad: &[u8]) -> Result<(Vec, Vec, Vec)> { - use aes_gcm::aead::{AeadInPlace, KeyInit}; - use aes_gcm::Aes256Gcm; - - let cipher = Aes256Gcm::new_from_slice(self.key.as_bytes()) - .map_err(|_| LimboError::InternalError("Bad AES key".into()))?; - let nonce = Aes256Gcm::generate_nonce(&mut rand::thread_rng()); - - let mut buffer = plaintext.to_vec(); - let tag = cipher - .encrypt_in_place_detached(&nonce, ad, &mut buffer) - .map_err(|_| LimboError::InternalError("AES-GCM encrypt_detached failed".into()))?; - - Ok((buffer, nonce.to_vec(), tag.to_vec())) - } - - fn decrypt_detached( - &self, - ciphertext: &[u8], - nonce: &[u8], - tag: &[u8], - ad: &[u8], - ) -> Result> { - use aes_gcm::aead::{AeadInPlace, KeyInit}; - use aes_gcm::{Aes256Gcm, Nonce}; - - let cipher = Aes256Gcm::new_from_slice(self.key.as_bytes()) - .map_err(|_| LimboError::InternalError("Bad AES key".into()))?; - let nonce = Nonce::from_slice(nonce); - - let mut buffer = ciphertext.to_vec(); - cipher - .decrypt_in_place_detached(nonce, ad, &mut buffer, tag.into()) - .map_err(|_| LimboError::InternalError("AES-GCM decrypt_detached failed".into()))?; - - Ok(buffer) - } + impl std::fmt::Debug for $struct_name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct(stringify!($struct_name)) + .field("key", &"") + .finish() + } + } + }; } -impl std::fmt::Debug for Aegis256Cipher { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Aegis256Cipher") - .field("key", &"") - .finish() - } +macro_rules! define_aes_gcm_cipher { + ($struct_name:ident, $cipher_type:ty, key128, $name:literal) => { + define_aes_gcm_cipher!(@impl $struct_name, $cipher_type, $name, 16, as_128); + }; + ($struct_name:ident, $cipher_type:ty, key256, $name:literal) => { + define_aes_gcm_cipher!(@impl $struct_name, $cipher_type, $name, 32, as_256); + }; + (@impl $struct_name:ident, $cipher_type:ty, $name:literal, $key_size:literal, $key_method:ident) => { + #[derive(Clone)] + pub struct $struct_name { + cipher: $cipher_type, + } + + impl $struct_name { + const TAG_SIZE: usize = 16; + const NONCE_SIZE: usize = 12; + + fn new(key: &EncryptionKey) -> Result { + let key_bytes = key.$key_method() + .ok_or_else(|| -> LimboError { CipherError::InvalidKeySize { cipher: $name, expected: $key_size }.into() })?; + let cipher_key: &Key<$cipher_type> = key_bytes.into(); + Ok(Self { + cipher: <$cipher_type>::new(cipher_key), + }) + } + + fn encrypt(&self, plaintext: &[u8], _ad: &[u8]) -> Result<(Vec, [u8; 12])> { + let nonce = <$cipher_type>::generate_nonce(&mut OsRng); + let ciphertext = self.cipher.encrypt(&nonce, plaintext).map_err(|e| { + LimboError::InternalError(format!("{} encryption failed: {e:?}", $name)) + })?; + let mut nonce_array = [0u8; 12]; + nonce_array.copy_from_slice(&nonce); + Ok((ciphertext, nonce_array)) + } + + fn decrypt(&self, ciphertext: &[u8], nonce: &[u8; 12], _ad: &[u8]) -> Result> { + let nonce = Nonce::from_slice(nonce); + self.cipher + .decrypt(nonce, ciphertext) + .map_err(|_| -> LimboError { CipherError::DecryptionFailed { cipher: $name }.into() }) + } + } + + impl std::fmt::Debug for $struct_name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct(stringify!($struct_name)) + .field("key", &"") + .finish() + } + } + }; } +// AES-GCM ciphers +define_aes_gcm_cipher!(Aes128GcmCipher, Aes128Gcm, key128, "AES-128-GCM"); +define_aes_gcm_cipher!(Aes256GcmCipher, Aes256Gcm, key256, "AES-256-GCM"); + +// AEGIS ciphers +define_aegis_cipher!(Aegis256Cipher, Aegis256::<16>, key256, 32, "AEGIS-256"); +define_aegis_cipher!( + Aegis256X2Cipher, + Aegis256X2::<16>, + key256, + 32, + "AEGIS-256X2" +); +define_aegis_cipher!( + Aegis256X4Cipher, + Aegis256X4::<16>, + key256, + 32, + "AEGIS-256X4" +); +define_aegis_cipher!( + Aegis128X2Cipher, + Aegis128X2::<16>, + key128, + 16, + "AEGIS-128X2" +); +define_aegis_cipher!(Aegis128LCipher, Aegis128L::<16>, key128, 16, "AEGIS-128L"); +define_aegis_cipher!( + Aegis128X4Cipher, + Aegis128X4::<16>, + key128, + 16, + "AEGIS-128X4" +); + #[derive(Debug, Clone, Copy, PartialEq)] pub enum CipherMode { + Aes128Gcm, Aes256Gcm, Aegis256, + Aegis128L, + Aegis128X2, + Aegis128X4, + Aegis256X2, + Aegis256X4, } impl TryFrom<&str> for CipherMode { @@ -270,8 +271,14 @@ impl TryFrom<&str> for CipherMode { fn try_from(s: &str) -> Result { let s_bytes = s.as_bytes(); match_ignore_ascii_case!(match s_bytes { + b"aes128gcm" | b"aes-128-gcm" | b"aes_128_gcm" => Ok(CipherMode::Aes128Gcm), b"aes256gcm" | b"aes-256-gcm" | b"aes_256_gcm" => Ok(CipherMode::Aes256Gcm), b"aegis256" | b"aegis-256" | b"aegis_256" => Ok(CipherMode::Aegis256), + b"aegis128l" | b"aegis-128l" | b"aegis_128l" => Ok(CipherMode::Aegis128L), + b"aegis128x2" | b"aegis-128x2" | b"aegis_128x2" => Ok(CipherMode::Aegis128X2), + b"aegis128x4" | b"aegis-128x4" | b"aegis_128x4" => Ok(CipherMode::Aegis128X4), + b"aegis256x2" | b"aegis-256x2" | b"aegis_256x2" => Ok(CipherMode::Aegis256X2), + b"aegis256x4" | b"aegis-256x4" | b"aegis_256x4" => Ok(CipherMode::Aegis256X4), _ => Err(LimboError::InvalidArgument(format!( "Unknown cipher name: {s}" ))), @@ -282,8 +289,14 @@ impl TryFrom<&str> for CipherMode { impl std::fmt::Display for CipherMode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { + CipherMode::Aes128Gcm => write!(f, "aes128gcm"), CipherMode::Aes256Gcm => write!(f, "aes256gcm"), CipherMode::Aegis256 => write!(f, "aegis256"), + CipherMode::Aegis128L => write!(f, "aegis128l"), + CipherMode::Aegis128X2 => write!(f, "aegis128x2"), + CipherMode::Aegis128X4 => write!(f, "aegis128x4"), + CipherMode::Aegis256X2 => write!(f, "aegis256x2"), + CipherMode::Aegis256X4 => write!(f, "aegis256x4"), } } } @@ -293,24 +306,42 @@ impl CipherMode { /// For 128-bit algorithms, it would be 16 bytes, etc. pub fn required_key_size(&self) -> usize { match self { + CipherMode::Aes128Gcm => 16, CipherMode::Aes256Gcm => 32, CipherMode::Aegis256 => 32, + CipherMode::Aegis256X2 => 32, + CipherMode::Aegis256X4 => 32, + CipherMode::Aegis128L => 16, + CipherMode::Aegis128X2 => 16, + CipherMode::Aegis128X4 => 16, } } /// Returns the nonce size for this cipher mode. pub fn nonce_size(&self) -> usize { match self { + CipherMode::Aes128Gcm => 12, CipherMode::Aes256Gcm => 12, CipherMode::Aegis256 => 32, + CipherMode::Aegis256X2 => 32, + CipherMode::Aegis256X4 => 32, + CipherMode::Aegis128L => 16, + CipherMode::Aegis128X2 => 16, + CipherMode::Aegis128X4 => 16, } } /// Returns the authentication tag size for this cipher mode. pub fn tag_size(&self) -> usize { match self { - CipherMode::Aes256Gcm => AES256GCM_TAG_SIZE, - CipherMode::Aegis256 => AEGIS_TAG_SIZE, + CipherMode::Aes128Gcm => 16, + CipherMode::Aes256Gcm => 16, + CipherMode::Aegis256 => 16, + CipherMode::Aegis256X2 => 16, + CipherMode::Aegis256X4 => 16, + CipherMode::Aegis128L => 16, + CipherMode::Aegis128X2 => 16, + CipherMode::Aegis128X4 => 16, } } @@ -322,24 +353,27 @@ impl CipherMode { #[derive(Clone)] pub enum Cipher { - Aes256Gcm(Aes256GcmCipher), - Aegis256(Aegis256Cipher), -} - -impl Cipher { - fn as_aead(&self) -> &dyn AeadCipher { - match self { - Cipher::Aes256Gcm(c) => c, - Cipher::Aegis256(c) => c, - } - } + Aes128Gcm(Box), + Aes256Gcm(Box), + Aegis256(Box), + Aegis256X2(Box), + Aegis256X4(Box), + Aegis128L(Box), + Aegis128X2(Box), + Aegis128X4(Box), } impl std::fmt::Debug for Cipher { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { + Cipher::Aes128Gcm(_) => write!(f, "Cipher::Aes128Gcm"), Cipher::Aes256Gcm(_) => write!(f, "Cipher::Aes256Gcm"), Cipher::Aegis256(_) => write!(f, "Cipher::Aegis256"), + Cipher::Aegis256X2(_) => write!(f, "Cipher::Aegis256X2"), + Cipher::Aegis256X4(_) => write!(f, "Cipher::Aegis256X4"), + Cipher::Aegis128L(_) => write!(f, "Cipher::Aegis128L"), + Cipher::Aegis128X2(_) => write!(f, "Cipher::Aegis128X2"), + Cipher::Aegis128X4(_) => write!(f, "Cipher::Aegis128X4"), } } } @@ -354,18 +388,24 @@ pub struct EncryptionContext { impl EncryptionContext { pub fn new(cipher_mode: CipherMode, key: &EncryptionKey, page_size: usize) -> Result { let required_size = cipher_mode.required_key_size(); - if key.as_slice().len() != required_size { + if key.len() != required_size { return Err(crate::LimboError::InvalidArgument(format!( "Invalid key size for {:?}: expected {} bytes, got {}", cipher_mode, required_size, - key.as_slice().len() + key.len() ))); } let cipher = match cipher_mode { - CipherMode::Aes256Gcm => Cipher::Aes256Gcm(Aes256GcmCipher::new(key)), - CipherMode::Aegis256 => Cipher::Aegis256(Aegis256Cipher::new(key)), + CipherMode::Aes128Gcm => Cipher::Aes128Gcm(Box::new(Aes128GcmCipher::new(key)?)), + CipherMode::Aes256Gcm => Cipher::Aes256Gcm(Box::new(Aes256GcmCipher::new(key)?)), + CipherMode::Aegis256 => Cipher::Aegis256(Box::new(Aegis256Cipher::new(key))), + CipherMode::Aegis256X2 => Cipher::Aegis256X2(Box::new(Aegis256X2Cipher::new(key))), + CipherMode::Aegis256X4 => Cipher::Aegis256X4(Box::new(Aegis256X4Cipher::new(key))), + CipherMode::Aegis128L => Cipher::Aegis128L(Box::new(Aegis128LCipher::new(key))), + CipherMode::Aegis128X2 => Cipher::Aegis128X2(Box::new(Aegis128X2Cipher::new(key))), + CipherMode::Aegis128X4 => Cipher::Aegis128X4(Box::new(Aegis128X4Cipher::new(key))), }; Ok(Self { cipher_mode, @@ -471,21 +511,54 @@ impl EncryptionContext { /// encrypts raw data using the configured cipher, returns ciphertext and nonce fn encrypt_raw(&self, plaintext: &[u8]) -> Result<(Vec, Vec)> { - self.cipher.as_aead().encrypt(plaintext, b"") + const AD: &[u8] = b""; + + macro_rules! encrypt_cipher { + ($cipher:expr) => {{ + let (ciphertext, nonce) = $cipher.encrypt(plaintext, AD)?; + Ok((ciphertext, nonce.to_vec())) + }}; + } + + match &self.cipher { + Cipher::Aes128Gcm(cipher) => encrypt_cipher!(cipher), + Cipher::Aes256Gcm(cipher) => encrypt_cipher!(cipher), + Cipher::Aegis256(cipher) => encrypt_cipher!(cipher), + Cipher::Aegis256X2(cipher) => encrypt_cipher!(cipher), + Cipher::Aegis256X4(cipher) => encrypt_cipher!(cipher), + Cipher::Aegis128L(cipher) => encrypt_cipher!(cipher), + Cipher::Aegis128X2(cipher) => encrypt_cipher!(cipher), + Cipher::Aegis128X4(cipher) => encrypt_cipher!(cipher), + } } fn decrypt_raw(&self, ciphertext: &[u8], nonce: &[u8]) -> Result> { - self.cipher.as_aead().decrypt(ciphertext, nonce, b"") - } + const AD: &[u8] = b""; - fn encrypt_raw_detached(&self, plaintext: &[u8]) -> Result<(Vec, Vec, Vec)> { - self.cipher.as_aead().encrypt_detached(plaintext, b"") - } + macro_rules! decrypt_with_nonce { + ($cipher:expr, $nonce_size:literal, $name:literal) => {{ + let nonce_array: [u8; $nonce_size] = nonce.try_into().map_err(|_| { + LimboError::InternalError(format!( + "Invalid nonce size for {}: expected {}, got {}", + $name, + $nonce_size, + nonce.len() + )) + })?; + $cipher.decrypt(ciphertext, &nonce_array, AD) + }}; + } - fn decrypt_raw_detached(&self, ciphertext: &[u8], nonce: &[u8], tag: &[u8]) -> Result> { - self.cipher - .as_aead() - .decrypt_detached(ciphertext, nonce, tag, b"") + match &self.cipher { + Cipher::Aes128Gcm(cipher) => decrypt_with_nonce!(cipher, 12, "AES-128-GCM"), + Cipher::Aes256Gcm(cipher) => decrypt_with_nonce!(cipher, 12, "AES-256-GCM"), + Cipher::Aegis256(cipher) => decrypt_with_nonce!(cipher, 32, "AEGIS-256"), + Cipher::Aegis256X2(cipher) => decrypt_with_nonce!(cipher, 32, "AEGIS-256X2"), + Cipher::Aegis256X4(cipher) => decrypt_with_nonce!(cipher, 32, "AEGIS-256X4"), + Cipher::Aegis128L(cipher) => decrypt_with_nonce!(cipher, 16, "AEGIS-128L"), + Cipher::Aegis128X2(cipher) => decrypt_with_nonce!(cipher, 16, "AEGIS-128X2"), + Cipher::Aegis128X4(cipher) => decrypt_with_nonce!(cipher, 16, "AEGIS-128X4"), + } } #[cfg(not(feature = "encryption"))] @@ -503,21 +576,118 @@ impl EncryptionContext { } } -fn generate_secure_nonce() -> [u8; 32] { - // use OsRng directly to fill bytes, similar to how AeadCore does it +fn generate_secure_nonce() -> [u8; N] { + // use OsRng directly to fill bytes, generic over nonce size use aes_gcm::aead::rand_core::RngCore; - let mut nonce = [0u8; 32]; + let mut nonce = [0u8; N]; OsRng.fill_bytes(&mut nonce); nonce } -#[cfg(feature = "encryption")] +// Helper functions for consistent error messages +enum CipherError { + InvalidKeySize { + cipher: &'static str, + expected: usize, + }, + InvalidTagSize { + cipher: &'static str, + }, + DecryptionFailed { + cipher: &'static str, + }, + CiphertextTooShort { + cipher: &'static str, + }, +} + +impl From for LimboError { + fn from(err: CipherError) -> Self { + let msg = match err { + CipherError::InvalidKeySize { cipher, expected } => { + format!("{cipher} requires {expected}-byte key") + } + CipherError::InvalidTagSize { cipher } => format!("Invalid tag size for {cipher}"), + CipherError::DecryptionFailed { cipher } => { + format!("{cipher} decryption failed: invalid tag") + } + CipherError::CiphertextTooShort { cipher } => { + format!("Ciphertext too short for {cipher}") + } + }; + LimboError::InternalError(msg) + } +} + #[cfg(test)] mod tests { use super::*; use rand::Rng; const DEFAULT_ENCRYPTED_PAGE_SIZE: usize = 4096; + macro_rules! test_cipher_wrapper { + ($test_name:ident, $cipher_type:ty, $key_gen:expr, $nonce_size:literal, $message:literal) => { + #[test] + #[cfg(feature = "encryption")] + fn $test_name() { + let key = EncryptionKey::from_hex_string(&$key_gen()).unwrap(); + let cipher = <$cipher_type>::new(&key); + + let plaintext = $message.as_bytes(); + let ad = b"additional data"; + + let (ciphertext, nonce) = cipher.encrypt(plaintext, ad).unwrap(); + assert_eq!(nonce.len(), $nonce_size); + assert_ne!(ciphertext[..plaintext.len()], plaintext[..]); + + let decrypted = cipher.decrypt(&ciphertext, &nonce, ad).unwrap(); + assert_eq!(decrypted, plaintext); + } + }; + } + + macro_rules! test_aes_cipher_wrapper { + ($test_name:ident, $cipher_type:ty, $key_gen:expr, $nonce_size:literal, $message:literal) => { + #[test] + #[cfg(feature = "encryption")] + fn $test_name() { + let key = EncryptionKey::from_hex_string(&$key_gen()).unwrap(); + let cipher = <$cipher_type>::new(&key).unwrap(); + + let plaintext = $message.as_bytes(); + let ad = b"additional data"; + + let (ciphertext, nonce) = cipher.encrypt(plaintext, ad).unwrap(); + assert_eq!(nonce.len(), $nonce_size); + assert_ne!(ciphertext[..plaintext.len()], plaintext[..]); + + let decrypted = cipher.decrypt(&ciphertext, &nonce, ad).unwrap(); + assert_eq!(decrypted, plaintext); + } + }; + } + + macro_rules! test_raw_encryption { + ($test_name:ident, $cipher_mode:expr, $key_gen:expr, $nonce_size:literal, $message:literal) => { + #[test] + #[cfg(feature = "encryption")] + fn $test_name() { + let key = EncryptionKey::from_hex_string(&$key_gen()).unwrap(); + let ctx = EncryptionContext::new($cipher_mode, &key, DEFAULT_ENCRYPTED_PAGE_SIZE) + .unwrap(); + + let plaintext = $message.as_bytes(); + let (ciphertext, nonce) = ctx.encrypt_raw(plaintext).unwrap(); + + assert_eq!(nonce.len(), $nonce_size); + assert_ne!(ciphertext[..plaintext.len()], plaintext[..]); + + let decrypted = ctx.decrypt_raw(&ciphertext, &nonce).unwrap(); + assert_eq!(decrypted, plaintext); + } + }; + } + fn generate_random_hex_key() -> String { let mut rng = rand::thread_rng(); let mut bytes = [0u8; 32]; @@ -525,7 +695,62 @@ mod tests { hex::encode(bytes) } + fn generate_random_hex_key_128() -> String { + let mut rng = rand::thread_rng(); + let mut bytes = [0u8; 16]; + rng.fill(&mut bytes); + hex::encode(bytes) + } + + test_aes_cipher_wrapper!( + test_aes128gcm_cipher_wrapper, + Aes128GcmCipher, + generate_random_hex_key_128, + 12, + "Hello, AES-128-GCM!" + ); + + test_raw_encryption!( + test_aes128gcm_raw_encryption, + CipherMode::Aes128Gcm, + generate_random_hex_key_128, + 12, + "Hello, AES-128-GCM!" + ); + #[test] + #[cfg(feature = "encryption")] + fn test_aes128gcm_encrypt_decrypt_round_trip() { + let mut rng = rand::thread_rng(); + let cipher_mode = CipherMode::Aes128Gcm; + let metadata_size = cipher_mode.metadata_size(); + let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; + + let page_data = { + let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; + page.iter_mut() + .take(data_size) + .for_each(|byte| *byte = rng.gen()); + page + }; + + let key = EncryptionKey::from_hex_string(&generate_random_hex_key_128()).unwrap(); + let ctx = EncryptionContext::new(CipherMode::Aes128Gcm, &key, DEFAULT_ENCRYPTED_PAGE_SIZE) + .unwrap(); + + let page_id = 42; + let encrypted = ctx.encrypt_page(&page_data, page_id).unwrap(); + assert_eq!(encrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_ne!(&encrypted[..data_size], &page_data[..data_size]); + assert_ne!(&encrypted[..], &page_data[..]); + + let decrypted = ctx.decrypt_page(&encrypted, page_id).unwrap(); + assert_eq!(decrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_eq!(decrypted, page_data); + } + + #[test] + #[cfg(feature = "encryption")] fn test_aes_encrypt_decrypt_round_trip() { let mut rng = rand::thread_rng(); let cipher_mode = CipherMode::Aes256Gcm; @@ -555,39 +780,24 @@ mod tests { assert_eq!(decrypted, page_data); } - #[test] - fn test_aegis256_cipher_wrapper() { - let key = EncryptionKey::from_hex_string(&generate_random_hex_key()).unwrap(); - let cipher = Aegis256Cipher::new(&key); + test_cipher_wrapper!( + test_aegis256_cipher_wrapper, + Aegis256Cipher, + generate_random_hex_key, + 32, + "Hello, AEGIS-256!" + ); - let plaintext = b"Hello, AEGIS-256!"; - let ad = b"additional data"; - - let (ciphertext, nonce) = cipher.encrypt(plaintext, ad).unwrap(); - assert_eq!(nonce.len(), 32); - assert_ne!(ciphertext[..plaintext.len()], plaintext[..]); - - let decrypted = cipher.decrypt(&ciphertext, &nonce, ad).unwrap(); - assert_eq!(decrypted, plaintext); - } - - #[test] - fn test_aegis256_raw_encryption() { - let key = EncryptionKey::from_hex_string(&generate_random_hex_key()).unwrap(); - let ctx = EncryptionContext::new(CipherMode::Aegis256, &key, DEFAULT_ENCRYPTED_PAGE_SIZE) - .unwrap(); - - let plaintext = b"Hello, AEGIS-256!"; - let (ciphertext, nonce) = ctx.encrypt_raw(plaintext).unwrap(); - - assert_eq!(nonce.len(), 32); // AEGIS-256 uses 32-byte nonces - assert_ne!(ciphertext[..plaintext.len()], plaintext[..]); - - let decrypted = ctx.decrypt_raw(&ciphertext, &nonce).unwrap(); - assert_eq!(decrypted, plaintext); - } + test_raw_encryption!( + test_aegis256_raw_encryption, + CipherMode::Aegis256, + generate_random_hex_key, + 32, + "Hello, AEGIS-256!" + ); #[test] + #[cfg(feature = "encryption")] fn test_aegis256_encrypt_decrypt_round_trip() { let mut rng = rand::thread_rng(); let cipher_mode = CipherMode::Aegis256; @@ -615,4 +825,300 @@ mod tests { assert_eq!(decrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); assert_eq!(decrypted, page_data); } + + test_cipher_wrapper!( + test_aegis128x2_cipher_wrapper, + Aegis128X2Cipher, + generate_random_hex_key_128, + 16, + "Hello, AEGIS-128X2!" + ); + + test_raw_encryption!( + test_aegis128x2_raw_encryption, + CipherMode::Aegis128X2, + generate_random_hex_key_128, + 16, + "Hello, AEGIS-128X2!" + ); + + #[test] + #[cfg(feature = "encryption")] + fn test_aegis128x2_encrypt_decrypt_round_trip() { + let mut rng = rand::thread_rng(); + let cipher_mode = CipherMode::Aegis128X2; + let metadata_size = cipher_mode.metadata_size(); + let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; + + let page_data = { + let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; + page.iter_mut() + .take(data_size) + .for_each(|byte| *byte = rng.gen()); + page + }; + + let key = EncryptionKey::from_hex_string(&generate_random_hex_key_128()).unwrap(); + let ctx = EncryptionContext::new(CipherMode::Aegis128X2, &key, DEFAULT_ENCRYPTED_PAGE_SIZE) + .unwrap(); + + let page_id = 42; + let encrypted = ctx.encrypt_page(&page_data, page_id).unwrap(); + assert_eq!(encrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_ne!(&encrypted[..data_size], &page_data[..data_size]); + + let decrypted = ctx.decrypt_page(&encrypted, page_id).unwrap(); + assert_eq!(decrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_eq!(decrypted, page_data); + } + + test_cipher_wrapper!( + test_aegis128l_cipher_wrapper, + Aegis128LCipher, + generate_random_hex_key_128, + 16, + "Hello, AEGIS-128L!" + ); + + test_raw_encryption!( + test_aegis128l_raw_encryption, + CipherMode::Aegis128L, + generate_random_hex_key_128, + 16, + "Hello, AEGIS-128L!" + ); + + #[test] + #[cfg(feature = "encryption")] + fn test_aegis128l_encrypt_decrypt_round_trip() { + let mut rng = rand::thread_rng(); + let cipher_mode = CipherMode::Aegis128L; + let metadata_size = cipher_mode.metadata_size(); + let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; + + let page_data = { + let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; + page.iter_mut() + .take(data_size) + .for_each(|byte| *byte = rng.gen()); + page + }; + + let key = EncryptionKey::from_hex_string(&generate_random_hex_key_128()).unwrap(); + let ctx = EncryptionContext::new(CipherMode::Aegis128L, &key, DEFAULT_ENCRYPTED_PAGE_SIZE) + .unwrap(); + + let page_id = 42; + let encrypted = ctx.encrypt_page(&page_data, page_id).unwrap(); + assert_eq!(encrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_ne!(&encrypted[..data_size], &page_data[..data_size]); + + let decrypted = ctx.decrypt_page(&encrypted, page_id).unwrap(); + assert_eq!(decrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_eq!(decrypted, page_data); + } + + test_cipher_wrapper!( + test_aegis128x4_cipher_wrapper, + Aegis128X4Cipher, + generate_random_hex_key_128, + 16, + "Hello, AEGIS-128X4!" + ); + + test_raw_encryption!( + test_aegis128x4_raw_encryption, + CipherMode::Aegis128X4, + generate_random_hex_key_128, + 16, + "Hello, AEGIS-128X4!" + ); + + #[test] + #[cfg(feature = "encryption")] + fn test_aegis128x4_encrypt_decrypt_round_trip() { + let mut rng = rand::thread_rng(); + let cipher_mode = CipherMode::Aegis128X4; + let metadata_size = cipher_mode.metadata_size(); + let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; + + let page_data = { + let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; + page.iter_mut() + .take(data_size) + .for_each(|byte| *byte = rng.gen()); + page + }; + + let key = EncryptionKey::from_hex_string(&generate_random_hex_key_128()).unwrap(); + let ctx = EncryptionContext::new(CipherMode::Aegis128X4, &key, DEFAULT_ENCRYPTED_PAGE_SIZE) + .unwrap(); + + let page_id = 42; + let encrypted = ctx.encrypt_page(&page_data, page_id).unwrap(); + assert_eq!(encrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_ne!(&encrypted[..data_size], &page_data[..data_size]); + + let decrypted = ctx.decrypt_page(&encrypted, page_id).unwrap(); + assert_eq!(decrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_eq!(decrypted, page_data); + } + + test_cipher_wrapper!( + test_aegis256x2_cipher_wrapper, + Aegis256X2Cipher, + generate_random_hex_key, + 32, + "Hello, AEGIS-256X2!" + ); + + test_raw_encryption!( + test_aegis256x2_raw_encryption, + CipherMode::Aegis256X2, + generate_random_hex_key, + 32, + "Hello, AEGIS-256X2!" + ); + + #[test] + #[cfg(feature = "encryption")] + fn test_aegis256x2_encrypt_decrypt_round_trip() { + let mut rng = rand::thread_rng(); + let cipher_mode = CipherMode::Aegis256X2; + let metadata_size = cipher_mode.metadata_size(); + let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; + + let page_data = { + let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; + page.iter_mut() + .take(data_size) + .for_each(|byte| *byte = rng.gen()); + page + }; + + let key = EncryptionKey::from_hex_string(&generate_random_hex_key()).unwrap(); + let ctx = EncryptionContext::new(CipherMode::Aegis256X2, &key, DEFAULT_ENCRYPTED_PAGE_SIZE) + .unwrap(); + + let page_id = 42; + let encrypted = ctx.encrypt_page(&page_data, page_id).unwrap(); + assert_eq!(encrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_ne!(&encrypted[..data_size], &page_data[..data_size]); + + let decrypted = ctx.decrypt_page(&encrypted, page_id).unwrap(); + assert_eq!(decrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_eq!(decrypted, page_data); + } + + test_cipher_wrapper!( + test_aegis256x4_cipher_wrapper, + Aegis256X4Cipher, + generate_random_hex_key, + 32, + "Hello, AEGIS-256X4!" + ); + + test_raw_encryption!( + test_aegis256x4_raw_encryption, + CipherMode::Aegis256X4, + generate_random_hex_key, + 32, + "Hello, AEGIS-256X4!" + ); + + #[test] + #[cfg(feature = "encryption")] + fn test_aegis256x4_encrypt_decrypt_round_trip() { + let mut rng = rand::thread_rng(); + let cipher_mode = CipherMode::Aegis256X4; + let metadata_size = cipher_mode.metadata_size(); + let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; + + let page_data = { + let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; + page.iter_mut() + .take(data_size) + .for_each(|byte| *byte = rng.gen()); + page + }; + + let key = EncryptionKey::from_hex_string(&generate_random_hex_key()).unwrap(); + let ctx = EncryptionContext::new(CipherMode::Aegis256X4, &key, DEFAULT_ENCRYPTED_PAGE_SIZE) + .unwrap(); + + let page_id = 42; + let encrypted = ctx.encrypt_page(&page_data, page_id).unwrap(); + assert_eq!(encrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_ne!(&encrypted[..data_size], &page_data[..data_size]); + + let decrypted = ctx.decrypt_page(&encrypted, page_id).unwrap(); + assert_eq!(decrypted.len(), DEFAULT_ENCRYPTED_PAGE_SIZE); + assert_eq!(decrypted, page_data); + } + + #[test] + fn test_cipher_mode_string_parsing() { + // Test AES-128-GCM + let mode = CipherMode::try_from("aes128gcm").unwrap(); + assert_eq!(mode, CipherMode::Aes128Gcm); + assert_eq!(mode.to_string(), "aes128gcm"); + assert_eq!(mode.required_key_size(), 16); + assert_eq!(mode.nonce_size(), 12); + assert_eq!(mode.tag_size(), 16); + + let mode = CipherMode::try_from("aes-128-gcm").unwrap(); + assert_eq!(mode, CipherMode::Aes128Gcm); + + let mode = CipherMode::try_from("aes_128_gcm").unwrap(); + assert_eq!(mode, CipherMode::Aes128Gcm); + + // Test AES-256-GCM + let mode = CipherMode::try_from("aes256gcm").unwrap(); + assert_eq!(mode, CipherMode::Aes256Gcm); + assert_eq!(mode.to_string(), "aes256gcm"); + assert_eq!(mode.required_key_size(), 32); + assert_eq!(mode.nonce_size(), 12); + + // Test that all AEGIS variants can be parsed from strings + let mode = CipherMode::try_from("aegis128x2").unwrap(); + assert_eq!(mode, CipherMode::Aegis128X2); + assert_eq!(mode.to_string(), "aegis128x2"); + assert_eq!(mode.required_key_size(), 16); + assert_eq!(mode.nonce_size(), 16); + assert_eq!(mode.tag_size(), 16); + + let mode = CipherMode::try_from("aegis-128x2").unwrap(); + assert_eq!(mode, CipherMode::Aegis128X2); + + let mode = CipherMode::try_from("aegis_128x2").unwrap(); + assert_eq!(mode, CipherMode::Aegis128X2); + + // Test AEGIS-128L + let mode = CipherMode::try_from("aegis128l").unwrap(); + assert_eq!(mode, CipherMode::Aegis128L); + assert_eq!(mode.to_string(), "aegis128l"); + assert_eq!(mode.required_key_size(), 16); + assert_eq!(mode.nonce_size(), 16); + + // Test AEGIS-128X4 + let mode = CipherMode::try_from("aegis128x4").unwrap(); + assert_eq!(mode, CipherMode::Aegis128X4); + assert_eq!(mode.to_string(), "aegis128x4"); + assert_eq!(mode.required_key_size(), 16); + assert_eq!(mode.nonce_size(), 16); + + // Test AEGIS-256X2 + let mode = CipherMode::try_from("aegis256x2").unwrap(); + assert_eq!(mode, CipherMode::Aegis256X2); + assert_eq!(mode.to_string(), "aegis256x2"); + assert_eq!(mode.required_key_size(), 32); + assert_eq!(mode.nonce_size(), 32); + + // Test AEGIS-256X4 + let mode = CipherMode::try_from("aegis256x4").unwrap(); + assert_eq!(mode, CipherMode::Aegis256X4); + assert_eq!(mode.to_string(), "aegis256x4"); + assert_eq!(mode.required_key_size(), 32); + assert_eq!(mode.nonce_size(), 32); + } } diff --git a/core/storage/page_cache.rs b/core/storage/page_cache.rs index f4b3f7fe9..c125f44c8 100644 --- a/core/storage/page_cache.rs +++ b/core/storage/page_cache.rs @@ -1,8 +1,7 @@ use std::sync::atomic::Ordering; -use std::{cell::RefCell, ptr::NonNull}; use std::sync::Arc; -use tracing::{debug, trace}; +use tracing::trace; use crate::turso_assert; @@ -12,41 +11,100 @@ use super::pager::PageRef; const DEFAULT_PAGE_CACHE_SIZE_IN_PAGES_MAKE_ME_SMALLER_ONCE_WAL_SPILL_IS_IMPLEMENTED: usize = 100000; -#[derive(Debug, Eq, Hash, PartialEq, Clone, Copy)] -pub struct PageCacheKey { - pgno: usize, -} +#[derive(Debug, Copy, Eq, Hash, PartialEq, Clone)] +#[repr(transparent)] +pub struct PageCacheKey(usize); -#[allow(dead_code)] +const NULL: usize = usize::MAX; + +const CLEAR: u8 = 0; +const REF_MAX: u8 = 3; + +#[derive(Clone, Debug)] struct PageCacheEntry { + /// Key identifying this page key: PageCacheKey, - page: PageRef, - prev: Option>, - next: Option>, + /// The cached page, None if this slot is free + page: Option, + /// Reference counter (SIEVE/GClock): starts at zero, bumped on access, + /// decremented during eviction, only pages at 0 are evicted. + ref_bit: u8, + /// Index of next entry in SIEVE queue (older/toward tail) + next: usize, + /// Index of previous entry in SIEVE queue (newer/toward head) + prev: usize, } -pub struct DumbLruPageCache { - capacity: usize, - map: RefCell, - head: RefCell>>, - tail: RefCell>>, +impl Default for PageCacheEntry { + fn default() -> Self { + Self { + key: PageCacheKey(0), + page: None, + ref_bit: CLEAR, + next: NULL, + prev: NULL, + } + } } -unsafe impl Send for DumbLruPageCache {} -unsafe impl Sync for DumbLruPageCache {} + +impl PageCacheEntry { + #[inline] + fn bump_ref(&mut self) { + self.ref_bit = std::cmp::min(self.ref_bit + 1, REF_MAX); + } + + #[inline] + /// Returns the old value + fn decrement_ref(&mut self) -> u8 { + let old = self.ref_bit; + self.ref_bit = old.saturating_sub(1); + old + } + #[inline] + fn clear_ref(&mut self) { + self.ref_bit = CLEAR; + } + #[inline] + fn empty() -> Self { + Self::default() + } + #[inline] + fn reset_links(&mut self) { + self.next = NULL; + self.prev = NULL; + } +} + +/// PageCache implements a variation of the SIEVE algorithm that maintains an intrusive linked list queue of +/// pages which keep a 'reference_bit' to determine how recently/frequently the page has been accessed. +/// The bit is set to `Clear` on initial insertion and then bumped on each access and decremented +/// during eviction scans. +/// +/// The ring is circular. `clock_hand` points at the tail (LRU). +/// Sweep order follows next: tail (LRU) -> head (MRU) -> .. -> tail +/// New pages are inserted after the clock hand in the `next` direction, +/// which places them at head (MRU) (i.e. `tail.next` is the head). +pub struct PageCache { + /// Capacity in pages + capacity: usize, + /// Map of Key -> usize in entries array + map: PageHashMap, + clock_hand: usize, + /// Fixed-size vec holding page entries + entries: Vec, + /// Free list: Stack of available slot indices + freelist: Vec, +} + +unsafe impl Send for PageCache {} +unsafe impl Sync for PageCache {} struct PageHashMap { - // FIXME: do we prefer array buckets or list? Deletes will be slower here which I guess happens often. I will do this for now to test how well it does. buckets: Vec>, capacity: usize, size: usize, } -#[derive(Clone)] -struct HashMapNode { - key: PageCacheKey, - value: NonNull, -} - #[derive(Debug, Clone, PartialEq, thiserror::Error)] pub enum CacheError { #[error("{0}")] @@ -73,33 +131,82 @@ pub enum CacheResizeResult { impl PageCacheKey { pub fn new(pgno: usize) -> Self { - Self { pgno } + Self(pgno) } } -impl DumbLruPageCache { + +impl PageCache { pub fn new(capacity: usize) -> Self { - assert!(capacity > 0, "capacity of cache should be at least 1"); + assert!(capacity > 0); + let freelist = (0..capacity).rev().collect::>(); Self { capacity, - map: RefCell::new(PageHashMap::new(capacity)), - head: RefCell::new(None), - tail: RefCell::new(None), + map: PageHashMap::new(capacity), + clock_hand: NULL, + entries: vec![PageCacheEntry::empty(); capacity], + freelist, } } - pub fn contains_key(&mut self, key: &PageCacheKey) -> bool { - self.map.borrow().contains_key(key) + #[inline] + fn link_after(&mut self, a: usize, b: usize) { + // insert `b` after `a` in a non-empty circular list + let an = self.entries[a].next; + self.entries[b].prev = a; + self.entries[b].next = an; + self.entries[an].prev = b; + self.entries[a].next = b; } + #[inline] + fn link_new_node(&mut self, slot: usize) { + let hand = self.clock_hand; + if hand == NULL { + // first element → points to itself + self.entries[slot].prev = slot; + self.entries[slot].next = slot; + self.clock_hand = slot; + } else { + // insert after the hand (LRU) + self.link_after(hand, slot); + } + } + + #[inline] + fn unlink(&mut self, slot: usize) { + let p = self.entries[slot].prev; + let n = self.entries[slot].next; + + if p == slot && n == slot { + self.clock_hand = NULL; + } else { + self.entries[p].next = n; + self.entries[n].prev = p; + if self.clock_hand == slot { + // stay at LRU position, second-oldest becomes oldest + self.clock_hand = p; + } + } + + self.entries[slot].reset_links(); + } + + #[inline] + fn forward_of(&self, i: usize) -> usize { + self.entries[i].next + } + + pub fn contains_key(&self, key: &PageCacheKey) -> bool { + self.map.contains_key(key) + } + + #[inline] pub fn insert(&mut self, key: PageCacheKey, value: PageRef) -> Result<(), CacheError> { self._insert(key, value, false) } - pub fn insert_ignore_existing( - &mut self, - key: PageCacheKey, - value: PageRef, - ) -> Result<(), CacheError> { + #[inline] + pub fn upsert_page(&mut self, key: PageCacheKey, value: PageRef) -> Result<(), CacheError> { self._insert(key, value, true) } @@ -107,484 +214,547 @@ impl DumbLruPageCache { &mut self, key: PageCacheKey, value: PageRef, - ignore_exists: bool, + update_in_place: bool, ) -> Result<(), CacheError> { trace!("insert(key={:?})", key); - // Check first if page already exists in cache - let existing_ptr = self.map.borrow().get(&key).copied(); - if let Some(ptr) = existing_ptr { - if !ignore_exists { - if let Some(existing_page_ref) = self.get(&key)? { - assert!( - Arc::ptr_eq(&value, &existing_page_ref), - "Attempted to insert different page with same key: {key:?}" - ); - return Err(CacheError::KeyExists); - } - } else { - // ignore_exists is called when the existing entry needs to be updated in place - unsafe { - let entry = ptr.as_ptr(); - (*entry).page = value; - } - self.unlink(ptr); - self.touch(ptr); + let slot = self.map.get(&key); + if let Some(slot) = slot { + let p = self.entries[slot] + .page + .as_ref() + .expect("slot must have a page"); + + if !p.is_loaded() && !p.is_locked() { + // evict, then continue with fresh insert + self._delete(key, true)?; + let slot_index = self.find_free_slot()?; + let entry = &mut self.entries[slot_index]; + entry.key = key; + entry.page = Some(value); + entry.clear_ref(); + self.map.insert(key, slot_index); + self.link_new_node(slot_index); return Ok(()); } - } + let existing = &mut self.entries[slot]; + existing.bump_ref(); + if update_in_place { + existing.page = Some(value); + return Ok(()); + } else { + turso_assert!( + Arc::ptr_eq(existing.page.as_ref().unwrap(), &value), + "Attempted to insert different page with same key: {key:?}" + ); + return Err(CacheError::KeyExists); + } + } // Key doesn't exist, proceed with new entry self.make_room_for(1)?; - let entry = Box::new(PageCacheEntry { - key, - next: None, - prev: None, - page: value, - }); - let ptr_raw = Box::into_raw(entry); - let ptr = unsafe { NonNull::new_unchecked(ptr_raw) }; - self.touch(ptr); - self.map.borrow_mut().insert(key, ptr); + let slot_index = self.find_free_slot()?; + let entry = &mut self.entries[slot_index]; + turso_assert!(entry.page.is_none(), "page must be None in free slot"); + entry.key = key; + entry.page = Some(value); + // Sieve ref bit starts cleared, will be set on first access + entry.clear_ref(); + self.map.insert(key, slot_index); + self.link_new_node(slot_index); Ok(()) } + fn find_free_slot(&mut self) -> Result { + let slot = self.freelist.pop().ok_or_else(|| { + CacheError::InternalError("No free slots available after make_room_for".into()) + })?; + #[cfg(debug_assertions)] + { + turso_assert!( + self.entries[slot].page.is_none(), + "allocating non-free slot {}", + slot + ); + } + turso_assert!( + self.entries[slot].next == NULL && self.entries[slot].prev == NULL, + "freelist slot {} has non-NULL links", + slot + ); + Ok(slot) + } + + fn _delete(&mut self, key: PageCacheKey, clean_page: bool) -> Result<(), CacheError> { + if !self.contains_key(&key) { + return Ok(()); + } + let slot_idx = self + .map + .get(&key) + .ok_or_else(|| CacheError::InternalError("Key exists but not found in map".into()))?; + let entry = self.entries[slot_idx] + .page + .as_ref() + .expect("page in map was None") + .clone(); + if entry.is_locked() { + return Err(CacheError::Locked { + pgno: entry.get().id, + }); + } + if entry.is_dirty() { + return Err(CacheError::Dirty { + pgno: entry.get().id, + }); + } + if entry.is_pinned() { + return Err(CacheError::Pinned { + pgno: entry.get().id, + }); + } + if clean_page { + entry.clear_loaded(); + let _ = entry.get().contents.take(); + } + // unlink from circular list and advance hand if needed + self.unlink(slot_idx); + self.map.remove(&key); + let e = &mut self.entries[slot_idx]; + e.page = None; + e.clear_ref(); + e.reset_links(); + self.freelist.push(slot_idx); + Ok(()) + } + + #[inline] + /// Deletes a page from the cache pub fn delete(&mut self, key: PageCacheKey) -> Result<(), CacheError> { trace!("cache_delete(key={:?})", key); self._delete(key, true) } - // Returns Ok if key is not found - pub fn _delete(&mut self, key: PageCacheKey, clean_page: bool) -> Result<(), CacheError> { - if !self.contains_key(&key) { - return Ok(()); - } - - let ptr = *self.map.borrow().get(&key).unwrap(); - - // Try to detach from LRU list first, can fail - self.detach(ptr, clean_page)?; - let ptr = self.map.borrow_mut().remove(&key).unwrap(); - unsafe { - let _ = Box::from_raw(ptr.as_ptr()); + #[inline] + pub fn get(&mut self, key: &PageCacheKey) -> crate::Result> { + let Some(slot) = self.map.get(key) else { + return Ok(None); }; - Ok(()) - } - - fn get_ptr(&mut self, key: &PageCacheKey) -> Option> { - let m = self.map.borrow_mut(); - let ptr = m.get(key); - ptr.copied() - } - - pub fn get(&mut self, key: &PageCacheKey) -> Result, CacheError> { - if let Some(page) = self.peek(key, true) { - // Because we can abort a read_page completion, this means a page can be in the cache but be unloaded and unlocked. - // However, if we do not evict that page from the page cache, we will return an unloaded page later which will trigger - // assertions later on. This is worsened by the fact that page cache is not per `Statement`, so you can abort a completion - // in one Statement, and trigger some error in the next one if we don't evict the page here. - if !page.is_loaded() && !page.is_locked() { - self.delete(*key)?; - Ok(None) - } else { - Ok(Some(page)) - } - } else { - Ok(None) + // Because we can abort a read_page completion, this means a page can be in the cache but be unloaded and unlocked. + // However, if we do not evict that page from the page cache, we will return an unloaded page later which will trigger + // assertions later on. This is worsened by the fact that page cache is not per `Statement`, so you can abort a completion + // in one Statement, and trigger some error in the next one if we don't evict the page here. + let entry = &mut self.entries[slot]; + let page = entry + .page + .as_ref() + .expect("page in the map to exist") + .clone(); + if !page.is_loaded() && !page.is_locked() { + self.delete(*key)?; + return Ok(None); } + entry.bump_ref(); + Ok(Some(page)) } - /// Get page without promoting entry + #[inline] pub fn peek(&mut self, key: &PageCacheKey, touch: bool) -> Option { - trace!("cache_get(key={:?})", key); - let mut ptr = self.get_ptr(key)?; - let page = unsafe { ptr.as_mut().page.clone() }; + let slot = self.map.get(key)?; + let entry = &mut self.entries[slot]; + let page = entry.page.as_ref()?.clone(); if touch { - self.unlink(ptr); - self.touch(ptr); + entry.bump_ref(); } Some(page) } - // To match SQLite behavior, just set capacity and try to shrink as much as possible. - // In case of failure, the caller should request further evictions (e.g. after I/O). - pub fn resize(&mut self, capacity: usize) -> CacheResizeResult { - let new_map = self.map.borrow().rehash(capacity); - self.map.replace(new_map); - self.capacity = capacity; - match self.make_room_for(0) { - Ok(_) => CacheResizeResult::Done, - Err(_) => CacheResizeResult::PendingEvictions, + /// Resizes the cache to a new capacity + /// If shrinking, attempts to evict pages. + /// If growing, simply increases capacity. + pub fn resize(&mut self, new_cap: usize) -> CacheResizeResult { + if new_cap == self.capacity { + return CacheResizeResult::Done; } - } - - fn _detach( - &mut self, - mut entry: NonNull, - clean_page: bool, - allow_detach_pinned: bool, - ) -> Result<(), CacheError> { - let entry_mut = unsafe { entry.as_mut() }; - if entry_mut.page.is_locked() { - return Err(CacheError::Locked { - pgno: entry_mut.page.get().id, - }); + if new_cap < self.len() { + let need = self.len() - new_cap; + let mut evicted = 0; + while evicted < need { + match self.make_room_for(1) { + Ok(()) => evicted += 1, + Err(CacheError::Full) => return CacheResizeResult::PendingEvictions, + Err(_) => return CacheResizeResult::PendingEvictions, + } + } } - if entry_mut.page.is_dirty() { - return Err(CacheError::Dirty { - pgno: entry_mut.page.get().id, - }); + assert!(new_cap > 0); + // Collect survivors starting from hand, one full cycle + struct Payload { + key: PageCacheKey, + page: PageRef, + ref_bit: u8, } - if entry_mut.page.is_pinned() && !allow_detach_pinned { - return Err(CacheError::Pinned { - pgno: entry_mut.page.get().id, - }); - } - - if clean_page { - entry_mut.page.clear_loaded(); - debug!("clean(page={})", entry_mut.page.get().id); - let _ = entry_mut.page.get().contents.take(); - } - self.unlink(entry); - Ok(()) - } - - fn detach( - &mut self, - entry: NonNull, - clean_page: bool, - ) -> Result<(), CacheError> { - self._detach(entry, clean_page, false) - } - - fn detach_even_if_pinned( - &mut self, - entry: NonNull, - clean_page: bool, - ) -> Result<(), CacheError> { - self._detach(entry, clean_page, true) - } - - fn unlink(&mut self, mut entry: NonNull) { - let (next, prev) = unsafe { - let c = entry.as_mut(); - let next = c.next; - let prev = c.prev; - c.prev = None; - c.next = None; - (next, prev) + let survivors: Vec = { + let mut v = Vec::with_capacity(self.len()); + let start = self.clock_hand; + if start != NULL { + let mut cur = start; + let mut seen = 0usize; + loop { + let e = &self.entries[cur]; + if let Some(ref p) = e.page { + v.push(Payload { + key: e.key, + page: p.clone(), + ref_bit: e.ref_bit, + }); + seen += 1; + } + cur = e.next; + if cur == start || seen >= self.len() { + break; + } + } + } + v }; + // Rebuild storage + self.entries.resize(new_cap, PageCacheEntry::empty()); + self.capacity = new_cap; + let mut new_map = PageHashMap::new(new_cap); - match (prev, next) { - (None, None) => { - self.head.replace(None); - self.tail.replace(None); - } - (None, Some(mut n)) => { - unsafe { n.as_mut().prev = None }; - self.head.borrow_mut().replace(n); - } - (Some(mut p), None) => { - unsafe { p.as_mut().next = None }; - self.tail = RefCell::new(Some(p)); - } - (Some(mut p), Some(mut n)) => unsafe { - let p_mut = p.as_mut(); - p_mut.next = Some(n); - let n_mut = n.as_mut(); - n_mut.prev = Some(p); - }, - }; - } - - /// inserts into head, assuming we detached first - fn touch(&mut self, mut entry: NonNull) { - if let Some(mut head) = *self.head.borrow_mut() { - unsafe { - entry.as_mut().next.replace(head); - let head = head.as_mut(); - head.prev = Some(entry); - } + let used = survivors.len().min(new_cap); + for (i, item) in survivors.iter().enumerate().take(used) { + let e = &mut self.entries[i]; + e.key = item.key; + e.page = Some(item.page.clone()); + e.ref_bit = item.ref_bit; + // link circularly to neighbors by index + let prev = if i == 0 { used - 1 } else { i - 1 }; + let next = if i + 1 == used { 0 } else { i + 1 }; + e.prev = prev; + e.next = next; + new_map.insert(item.key, i); + } + self.map = new_map; + // hand points to slot 0 if there are survivors, else NULL + self.clock_hand = if used > 0 { 0 } else { NULL }; + // rebuild freelist + self.freelist.clear(); + for i in (used..new_cap).rev() { + self.freelist.push(i); } - if self.tail.borrow().is_none() { - self.tail.borrow_mut().replace(entry); - } - self.head.borrow_mut().replace(entry); + CacheResizeResult::Done } + /// Ensures at least `n` free slots are available + /// + /// Uses the SIEVE algorithm to evict pages if necessary: + /// Start at tail (LRU position) + /// If page is marked, decrement mark + /// If page mark was already Cleared, evict it + /// If page is unevictable (dirty/locked/pinned), continue sweep + /// On sweep, pages with ref_bit > 0 are given a second chance by decrementing + /// their ref_bit and leaving them in place; only pages with ref_bit == 0 are evicted. + /// We never relocate nodes during sweeping. + /// because the list is circular, `tail.next == head` and `head.prev == tail`. + /// + /// Returns `CacheError::Full` if not enough pages can be evicted pub fn make_room_for(&mut self, n: usize) -> Result<(), CacheError> { if n > self.capacity { return Err(CacheError::Full); } - - let len = self.len(); - let available = self.capacity.saturating_sub(len); - if n <= available && len <= self.capacity { + let available = self.capacity - self.len(); + if n <= available { return Ok(()); } - let tail = self.tail.borrow().ok_or_else(|| { - CacheError::InternalError(format!( - "Page cache of len {} expected to have a tail pointer", - self.len() - )) - })?; + let mut need = n - available; + let mut examined = 0usize; + let max_examinations = self.len().saturating_mul(REF_MAX as usize + 1); - // Handle len > capacity, too - let available = self.capacity.saturating_sub(len); - let x = n.saturating_sub(available); - let mut need_to_evict = x.saturating_add(len.saturating_sub(self.capacity)); + let mut cur = self.clock_hand; + if cur == NULL || cur >= self.capacity || self.entries[cur].page.is_none() { + return Err(CacheError::Full); + } - let mut current_opt = Some(tail); - while need_to_evict > 0 && current_opt.is_some() { - let current = current_opt.unwrap(); - let entry = unsafe { current.as_ref() }; - // Pick prev before modifying entry - current_opt = entry.prev; - match self.delete(entry.key) { - Err(_) => {} - Ok(_) => need_to_evict -= 1, + while need > 0 && examined < max_examinations { + // compute the next candidate before mutating anything + let next = self.forward_of(cur); + + let evictable_and_clear = { + let e = &mut self.entries[cur]; + if let Some(ref p) = e.page { + if p.is_dirty() || p.is_locked() || p.is_pinned() { + examined += 1; + false + } else if e.ref_bit == CLEAR { + true + } else { + e.decrement_ref(); + examined += 1; + false + } + } else { + examined += 1; + false + } + }; + + if evictable_and_clear { + // Evict the current slot, then continue from the next candidate in sweep direction + self.evict_slot(cur, true)?; + need -= 1; + examined = 0; + + // move on; if the ring became empty, self.clock_hand may be NULL + cur = if next == cur { self.clock_hand } else { next }; + if cur == NULL { + if need == 0 { + break; + } + return Err(CacheError::Full); + } + } else { + // keep sweeping + cur = next; } } - - match need_to_evict > 0 { - true => Err(CacheError::Full), - false => Ok(()), + self.clock_hand = cur; + if need > 0 { + return Err(CacheError::Full); } + Ok(()) } pub fn clear(&mut self) -> Result<(), CacheError> { - let mut current = *self.head.borrow(); - while let Some(current_entry) = current { - unsafe { - self.map.borrow_mut().remove(¤t_entry.as_ref().key); + for e in self.entries.iter() { + if let Some(ref p) = e.page { + if p.is_dirty() { + return Err(CacheError::Dirty { pgno: p.get().id }); + } + p.clear_loaded(); + let _ = p.get().contents.take(); } - let next = unsafe { current_entry.as_ref().next }; - self.detach_even_if_pinned(current_entry, true)?; - unsafe { - assert!(!current_entry.as_ref().page.is_dirty()); - } - unsafe { - let _ = Box::from_raw(current_entry.as_ptr()); - }; - current = next; } - let _ = self.head.take(); - let _ = self.tail.take(); + self.entries.fill(PageCacheEntry::empty()); + self.map.clear(); + self.clock_hand = NULL; + self.freelist.clear(); + for i in (0..self.capacity).rev() { + self.freelist.push(i); + } + Ok(()) + } + + #[inline] + /// preconditions: slot contains Some(page) and is clean/unlocked/unpinned + fn evict_slot(&mut self, slot: usize, clean_page: bool) -> Result<(), CacheError> { + let key = self.entries[slot].key; + if clean_page { + if let Some(ref p) = self.entries[slot].page { + p.clear_loaded(); + let _ = p.get().contents.take(); + } + } + // unlink will advance the hand if it pointed to `slot` + self.unlink(slot); + let _ = self.map.remove(&key); + + let e = &mut self.entries[slot]; + e.page = None; + e.clear_ref(); + e.reset_links(); + self.freelist.push(slot); - assert!(self.head.borrow().is_none()); - assert!(self.tail.borrow().is_none()); - assert!(self.map.borrow().is_empty()); Ok(()) } /// Removes all pages from the cache with pgno greater than len pub fn truncate(&mut self, len: usize) -> Result<(), CacheError> { - let head_ptr = *self.head.borrow(); - let mut current = head_ptr; - while let Some(node) = current { - let node_ref = unsafe { node.as_ref() }; - - current = node_ref.next; - if node_ref.key.pgno <= len { - continue; - } - - self.map.borrow_mut().remove(&node_ref.key); - turso_assert!(!node_ref.page.is_dirty(), "page must be clean"); - turso_assert!(!node_ref.page.is_locked(), "page must be unlocked"); - turso_assert!(!node_ref.page.is_pinned(), "page must be unpinned"); - self.detach(node, true)?; - - unsafe { - let _ = Box::from_raw(node.as_ptr()); - } + let keys_to_delete: Vec = { + self.entries + .iter() + .filter_map(|entry| { + entry.page.as_ref().and({ + if entry.key.0 > len { + Some(entry.key) + } else { + None + } + }) + }) + .collect() + }; + for key in keys_to_delete.iter() { + self.delete(*key)?; } Ok(()) } pub fn print(&self) { - tracing::debug!("page_cache_len={}", self.map.borrow().len()); - let head_ptr = *self.head.borrow(); - let mut current = head_ptr; - while let Some(node) = current { - unsafe { + tracing::debug!("page_cache_len={}", self.map.len()); + let entries = &self.entries; + + for (i, entry_opt) in entries.iter().enumerate() { + if let Some(ref page) = entry_opt.page { tracing::debug!( - "page={:?}, flags={}, pin_count={}", - node.as_ref().key, - node.as_ref().page.get().flags.load(Ordering::SeqCst), - node.as_ref().page.get().pin_count.load(Ordering::SeqCst), + "slot={}, page={:?}, flags={}, pin_count={}, ref_bit={:?}", + i, + entry_opt.key, + page.get().flags.load(Ordering::Relaxed), + page.get().pin_count.load(Ordering::Relaxed), + entry_opt.ref_bit, ); - let node_ref = node.as_ref(); - current = node_ref.next; } } } #[cfg(test)] pub fn keys(&mut self) -> Vec { - let mut this_keys = Vec::new(); - let head_ptr = *self.head.borrow(); - let mut current = head_ptr; - while let Some(node) = current { - unsafe { - this_keys.push(node.as_ref().key); - let node_ref = node.as_ref(); - current = node_ref.next; + let mut keys = Vec::with_capacity(self.len()); + let entries = &self.entries; + for entry in entries.iter() { + if entry.page.is_none() { + continue; } + keys.push(entry.key); } - this_keys + keys } pub fn len(&self) -> usize { - self.map.borrow().len() + self.map.len() } pub fn capacity(&self) -> usize { self.capacity } - #[cfg(test)] - fn get_entry_ptr(&self, key: &PageCacheKey) -> Option> { - self.map.borrow().get(key).copied() - } - - #[cfg(test)] - fn verify_list_integrity(&self) { - let map_len = self.map.borrow().len(); - let head_ptr = *self.head.borrow(); - let tail_ptr: Option> = *self.tail.borrow(); - - if map_len == 0 { - assert!(head_ptr.is_none(), "Head should be None when map is empty"); - assert!(tail_ptr.is_none(), "Tail should be None when map is empty"); - return; - } - - assert!( - head_ptr.is_some(), - "Head should be Some when map is not empty" - ); - assert!( - tail_ptr.is_some(), - "Tail should be Some when map is not empty" - ); - - unsafe { - assert!( - head_ptr.unwrap().as_ref().prev.is_none(), - "Head's prev pointer mismatch" - ); - } - - unsafe { - assert!( - tail_ptr.unwrap().as_ref().next.is_none(), - "Tail's next pointer mismatch" - ); - } - - // Forward traversal - let mut forward_count = 0; - let mut current = head_ptr; - let mut last_ptr: Option> = None; - while let Some(node) = current { - forward_count += 1; - unsafe { - let node_ref = node.as_ref(); - assert_eq!( - node_ref.prev, last_ptr, - "Backward pointer mismatch during forward traversal for key {:?}", - node_ref.key - ); - assert!( - self.map.borrow().contains_key(&node_ref.key), - "Node key {:?} not found in map during forward traversal", - node_ref.key - ); - assert_eq!( - self.map.borrow().get(&node_ref.key).copied(), - Some(node), - "Map pointer mismatch for key {:?}", - node_ref.key - ); - - last_ptr = Some(node); - current = node_ref.next; - } - - if forward_count > map_len + 5 { - panic!( - "Infinite loop suspected in forward integrity check. Size {map_len}, count {forward_count}" - ); - } - } - assert_eq!( - forward_count, map_len, - "Forward count mismatch (counted {forward_count}, map has {map_len})" - ); - assert_eq!( - tail_ptr, last_ptr, - "Tail pointer mismatch after forward traversal" - ); - - // Backward traversal - let mut backward_count = 0; - current = tail_ptr; - last_ptr = None; - while let Some(node) = current { - backward_count += 1; - unsafe { - let node_ref = node.as_ref(); - assert_eq!( - node_ref.next, last_ptr, - "Forward pointer mismatch during backward traversal for key {:?}", - node_ref.key - ); - assert!( - self.map.borrow().contains_key(&node_ref.key), - "Node key {:?} not found in map during backward traversal", - node_ref.key - ); - - last_ptr = Some(node); - current = node_ref.prev; - } - if backward_count > map_len + 5 { - panic!( - "Infinite loop suspected in backward integrity check. Size {map_len}, count {backward_count}" - ); - } - } - assert_eq!( - backward_count, map_len, - "Backward count mismatch (counted {backward_count}, map has {map_len})" - ); - assert_eq!( - head_ptr, last_ptr, - "Head pointer mismatch after backward traversal" - ); - } - pub fn unset_dirty_all_pages(&mut self) { - for node in self.map.borrow_mut().iter_mut() { - unsafe { - let entry = node.value.as_mut(); - entry.page.clear_dirty() - }; + let entries = &self.entries; + for entry in entries.iter() { + if entry.page.is_none() { + continue; + } + entry.page.as_ref().unwrap().clear_dirty(); } } + + #[cfg(test)] + fn verify_cache_integrity(&self) { + let map = &self.map; + let hand = self.clock_hand; + + if hand == NULL { + assert_eq!(map.len(), 0, "map not empty but list is empty"); + } else { + // 0 = unseen, 1 = freelist, 2 = in list + let mut seen = vec![0u8; self.capacity]; + // Walk exactly map.len steps from hand, ensure circular closure + let mut cnt = 0usize; + let mut cur = hand; + loop { + let e = &self.entries[cur]; + + assert!(e.page.is_some(), "list points to empty slot {cur}"); + assert_eq!(seen[cur], 0, "slot {cur} appears twice (list/freelist)"); + seen[cur] = 2; + cnt += 1; + + let n = e.next; + let p = e.prev; + assert_eq!(self.entries[n].prev, cur, "broken next.prev at {cur}"); + assert_eq!(self.entries[p].next, cur, "broken prev.next at {cur}"); + + if n == hand { + break; + } + assert!(cnt <= map.len(), "cycle longer than map len"); + cur = n; + } + assert_eq!( + cnt, + map.len(), + "list length {} != map size {}", + cnt, + map.len() + ); + + // Map bijection + for node in map.iter() { + let slot = node.slot_index; + assert!( + self.entries[slot].page.is_some(), + "map points to empty slot" + ); + assert_eq!(self.entries[slot].key, node.key, "map key mismatch"); + assert_eq!(seen[slot], 2, "map slot {slot} not on list"); + } + + // Freelist disjointness + let mut free_count = 0usize; + for &s in &self.freelist { + free_count += 1; + assert_eq!(seen[s], 0, "slot {s} in both freelist and list"); + assert!( + self.entries[s].page.is_none(), + "freelist slot {s} has a page" + ); + assert_eq!(self.entries[s].next, NULL, "freelist slot {s} next != NULL"); + assert_eq!(self.entries[s].prev, NULL, "freelist slot {s} prev != NULL"); + seen[s] = 1; + } + + // No orphans: every slot is in list or freelist or unused beyond capacity + let orphans = seen.iter().filter(|&&v| v == 0).count(); + assert_eq!( + free_count + cnt + orphans, + self.capacity, + "free {} + list {} + orphans {} != capacity {}", + free_count, + cnt, + orphans, + self.capacity + ); + // In practice orphans==0; assertion above detects mismatches. + } + + // Hand sanity + if hand != NULL { + assert!(hand < self.capacity, "clock_hand out of bounds"); + assert!( + self.entries[hand].page.is_some(), + "clock_hand points to empty slot" + ); + } + } + + #[cfg(test)] + fn slot_of(&self, key: &PageCacheKey) -> Option { + self.map.get(key) + } + #[cfg(test)] + fn ref_of(&self, key: &PageCacheKey) -> Option { + self.slot_of(key).map(|i| self.entries[i].ref_bit) + } } -impl Default for DumbLruPageCache { +impl Default for PageCache { fn default() -> Self { - DumbLruPageCache::new( + PageCache::new( DEFAULT_PAGE_CACHE_SIZE_IN_PAGES_MAKE_ME_SMALLER_ONCE_WAL_SPILL_IS_IMPLEMENTED, ) } } +#[derive(Clone)] +struct HashMapNode { + key: PageCacheKey, + slot_index: usize, +} + +#[allow(dead_code)] impl PageHashMap { pub fn new(capacity: usize) -> PageHashMap { PageHashMap { @@ -594,26 +764,20 @@ impl PageHashMap { } } - /// Insert page into hashmap. If a key was already in the hashmap, then update it and return the previous value. - pub fn insert( - &mut self, - key: PageCacheKey, - value: NonNull, - ) -> Option> { + pub fn insert(&mut self, key: PageCacheKey, slot_index: usize) { let bucket = self.hash(&key); let bucket = &mut self.buckets[bucket]; let mut idx = 0; while let Some(node) = bucket.get_mut(idx) { if node.key == key { - let prev = node.value; - node.value = value; - return Some(prev); + node.slot_index = slot_index; + node.key = key; + return; } idx += 1; } - bucket.push(HashMapNode { key, value }); + bucket.push(HashMapNode { key, slot_index }); self.size += 1; - None } pub fn contains_key(&self, key: &PageCacheKey) -> bool { @@ -621,20 +785,18 @@ impl PageHashMap { self.buckets[bucket].iter().any(|node| node.key == *key) } - pub fn get(&self, key: &PageCacheKey) -> Option<&NonNull> { + pub fn get(&self, key: &PageCacheKey) -> Option { let bucket = self.hash(key); let bucket = &self.buckets[bucket]; - let mut idx = 0; - while let Some(node) = bucket.get(idx) { + for node in bucket { if node.key == *key { - return Some(&node.value); + return Some(node.slot_index); } - idx += 1; } None } - pub fn remove(&mut self, key: &PageCacheKey) -> Option> { + pub fn remove(&mut self, key: &PageCacheKey) -> Option { let bucket = self.hash(key); let bucket = &mut self.buckets[bucket]; let mut idx = 0; @@ -649,38 +811,37 @@ impl PageHashMap { } else { let v = bucket.remove(idx); self.size -= 1; - Some(v.value) + Some(v.slot_index) } } - pub fn is_empty(&self) -> bool { - self.size == 0 + pub fn clear(&mut self) { + for bucket in &mut self.buckets { + bucket.clear(); + } + self.size = 0; } pub fn len(&self) -> usize { self.size } - pub fn iter(&self) -> impl Iterator { - self.buckets.iter().flat_map(|bucket| bucket.iter()) - } - - pub fn iter_mut(&mut self) -> impl Iterator { - self.buckets.iter_mut().flat_map(|bucket| bucket.iter_mut()) + fn iter(&self) -> impl Iterator { + self.buckets.iter().flat_map(|b| b.iter()) } fn hash(&self, key: &PageCacheKey) -> usize { if self.capacity.is_power_of_two() { - key.pgno & (self.capacity - 1) + key.0 & (self.capacity - 1) } else { - key.pgno % self.capacity + key.0 % self.capacity } } - pub fn rehash(&self, new_capacity: usize) -> PageHashMap { + fn rehash(&self, new_capacity: usize) -> PageHashMap { let mut new_hash_map = PageHashMap::new(new_capacity); for node in self.iter() { - new_hash_map.insert(node.key, node.value); + new_hash_map.insert(node.key, node.slot_index); } new_hash_map } @@ -692,31 +853,20 @@ mod tests { use crate::storage::page_cache::CacheError; use crate::storage::pager::{Page, PageRef}; use crate::storage::sqlite3_ondisk::PageContent; - use crate::{BufferPool, IO}; - use std::ptr::NonNull; - use std::sync::OnceLock; - use std::{num::NonZeroUsize, sync::Arc}; - - use lru::LruCache; use rand_chacha::{ rand_core::{RngCore, SeedableRng}, ChaCha8Rng, }; + use std::sync::Arc; fn create_key(id: usize) -> PageCacheKey { PageCacheKey::new(id) } - static TEST_BUFFER_POOL: OnceLock> = OnceLock::new(); - - #[allow(clippy::arc_with_non_send_sync)] pub fn page_with_content(page_id: usize) -> PageRef { let page = Arc::new(Page::new(page_id)); { - let mock_io = Arc::new(crate::PlatformIO::new().unwrap()) as Arc; - let pool = TEST_BUFFER_POOL - .get_or_init(|| BufferPool::begin_init(&mock_io, BufferPool::TEST_ARENA_SIZE)); - let buffer = pool.allocate(4096); + let buffer = crate::Buffer::new_temporary(4096); let page_content = PageContent { offset: 0, buffer: Arc::new(buffer), @@ -728,37 +878,19 @@ mod tests { page } - fn insert_page(cache: &mut DumbLruPageCache, id: usize) -> PageCacheKey { + fn insert_page(cache: &mut PageCache, id: usize) -> PageCacheKey { let key = create_key(id); let page = page_with_content(id); assert!(cache.insert(key, page).is_ok()); key } - fn page_has_content(page: &PageRef) -> bool { - page.is_loaded() && page.get().contents.is_some() - } - - fn insert_and_get_entry( - cache: &mut DumbLruPageCache, - id: usize, - ) -> (PageCacheKey, NonNull) { - let key = create_key(id); - let page = page_with_content(id); - assert!(cache.insert(key, page).is_ok()); - let entry = cache.get_ptr(&key).expect("Entry should exist"); - (key, entry) - } - #[test] - fn test_detach_only_element() { - let mut cache = DumbLruPageCache::default(); + fn test_delete_only_element() { + let mut cache = PageCache::default(); let key1 = insert_page(&mut cache, 1); - cache.verify_list_integrity(); + cache.verify_cache_integrity(); assert_eq!(cache.len(), 1); - assert!(cache.head.borrow().is_some()); - assert!(cache.tail.borrow().is_some()); - assert_eq!(*cache.head.borrow(), *cache.tail.borrow()); assert!(cache.delete(key1).is_ok()); @@ -768,326 +900,401 @@ mod tests { "Length should be 0 after deleting only element" ); assert!( - cache.map.borrow().get(&key1).is_none(), - "Map should not contain key after delete" - ); - assert!(cache.head.borrow().is_none(), "Head should be None"); - assert!(cache.tail.borrow().is_none(), "Tail should be None"); - cache.verify_list_integrity(); - } - - #[test] - fn test_detach_head() { - let mut cache = DumbLruPageCache::default(); - let _key1 = insert_page(&mut cache, 1); // Tail - let key2 = insert_page(&mut cache, 2); // Middle - let key3 = insert_page(&mut cache, 3); // Head - cache.verify_list_integrity(); - assert_eq!(cache.len(), 3); - - let head_ptr_before = cache.head.borrow().unwrap(); - assert_eq!( - unsafe { &head_ptr_before.as_ref().key }, - &key3, - "Initial head check" - ); - - assert!(cache.delete(key3).is_ok()); - - assert_eq!(cache.len(), 2, "Length should be 2 after deleting head"); - assert!( - cache.map.borrow().get(&key3).is_none(), - "Map should not contain deleted head key" - ); - cache.verify_list_integrity(); - - let new_head_ptr = cache.head.borrow().unwrap(); - assert_eq!( - unsafe { &new_head_ptr.as_ref().key }, - &key2, - "New head should be key2" - ); - assert!( - unsafe { new_head_ptr.as_ref().prev.is_none() }, - "New head's prev should be None" - ); - - let tail_ptr = cache.tail.borrow().unwrap(); - assert_eq!( - unsafe { new_head_ptr.as_ref().next }, - Some(tail_ptr), - "New head's next should point to tail (key1)" + !cache.contains_key(&key1), + "Cache should not contain key after delete" ); + cache.verify_cache_integrity(); } #[test] fn test_detach_tail() { - let mut cache = DumbLruPageCache::default(); - let key1 = insert_page(&mut cache, 1); // Tail - let key2 = insert_page(&mut cache, 2); // Middle - let _key3 = insert_page(&mut cache, 3); // Head - cache.verify_list_integrity(); + let mut cache = PageCache::default(); + let key1 = insert_page(&mut cache, 1); // tail + let _key2 = insert_page(&mut cache, 2); // middle + let _key3 = insert_page(&mut cache, 3); // head + cache.verify_cache_integrity(); assert_eq!(cache.len(), 3); - let tail_ptr_before = cache.tail.borrow().unwrap(); - assert_eq!( - unsafe { &tail_ptr_before.as_ref().key }, - &key1, - "Initial tail check" - ); - - assert!(cache.delete(key1).is_ok()); // Delete tail - + // Delete tail + assert!(cache.delete(key1).is_ok()); assert_eq!(cache.len(), 2, "Length should be 2 after deleting tail"); assert!( - cache.map.borrow().get(&key1).is_none(), - "Map should not contain deleted tail key" - ); - cache.verify_list_integrity(); - - let new_tail_ptr = cache.tail.borrow().unwrap(); - assert_eq!( - unsafe { &new_tail_ptr.as_ref().key }, - &key2, - "New tail should be key2" - ); - assert!( - unsafe { new_tail_ptr.as_ref().next.is_none() }, - "New tail's next should be None" - ); - - let head_ptr = cache.head.borrow().unwrap(); - assert_eq!( - unsafe { head_ptr.as_ref().prev }, - None, - "Head's prev should point to new tail (key2)" - ); - assert_eq!( - unsafe { head_ptr.as_ref().next }, - Some(new_tail_ptr), - "Head's next should point to new tail (key2)" - ); - assert_eq!( - unsafe { new_tail_ptr.as_ref().next }, - None, - "Double check new tail's next is None" + !cache.contains_key(&key1), + "Cache should not contain deleted tail key" ); + cache.verify_cache_integrity(); } #[test] - fn test_detach_middle() { - let mut cache = DumbLruPageCache::default(); - let key1 = insert_page(&mut cache, 1); // Tail - let key2 = insert_page(&mut cache, 2); // Middle - let key3 = insert_page(&mut cache, 3); // Middle - let _key4 = insert_page(&mut cache, 4); // Head - cache.verify_list_integrity(); - assert_eq!(cache.len(), 4); - - let head_ptr_before = cache.head.borrow().unwrap(); - let tail_ptr_before = cache.tail.borrow().unwrap(); - - assert!(cache.delete(key2).is_ok()); // Detach a middle element (key2) - - assert_eq!(cache.len(), 3, "Length should be 3 after deleting middle"); - assert!( - cache.map.borrow().get(&key2).is_none(), - "Map should not contain deleted middle key2" - ); - cache.verify_list_integrity(); - - // Check neighbors - let key1_ptr = cache.get_entry_ptr(&key1).expect("Key1 should still exist"); - let key3_ptr = cache.get_entry_ptr(&key3).expect("Key3 should still exist"); - assert_eq!( - unsafe { key3_ptr.as_ref().next }, - Some(key1_ptr), - "Key3's next should point to key1" - ); - assert_eq!( - unsafe { key1_ptr.as_ref().prev }, - Some(key3_ptr), - "Key1's prev should point to key2" - ); - - assert_eq!( - cache.head.borrow().unwrap(), - head_ptr_before, - "Head should remain key4" - ); - assert_eq!( - cache.tail.borrow().unwrap(), - tail_ptr_before, - "Tail should remain key1" - ); - } - - #[test] - #[ignore = "for now let's not track active refs"] - fn test_detach_via_delete() { - let mut cache = DumbLruPageCache::default(); + fn test_insert_existing_key_updates_in_place() { + let mut cache = PageCache::default(); let key1 = create_key(1); - let page1 = page_with_content(1); - assert!(cache.insert(key1, page1.clone()).is_ok()); - assert!(page_has_content(&page1)); - cache.verify_list_integrity(); + let page1_v1 = page_with_content(1); + let page1_v2 = page1_v1.clone(); // Same Arc instance - let result = cache.delete(key1); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), CacheError::ActiveRefs); + assert!(cache.insert(key1, page1_v1.clone()).is_ok()); assert_eq!(cache.len(), 1); - drop(page1); + // Inserting same page instance should return KeyExists error + let result = cache.insert(key1, page1_v2.clone()); + assert_eq!(result, Err(CacheError::KeyExists)); + assert_eq!(cache.len(), 1); - assert!(cache.delete(key1).is_ok()); - assert_eq!(cache.len(), 0); - cache.verify_list_integrity(); + // Verify the page is still accessible + assert!(cache.get(&key1).unwrap().is_some()); + cache.verify_cache_integrity(); } #[test] #[should_panic(expected = "Attempted to insert different page with same key")] - fn test_insert_existing_key_fail() { - let mut cache = DumbLruPageCache::default(); + fn test_insert_different_page_same_key_panics() { + let mut cache = PageCache::default(); let key1 = create_key(1); let page1_v1 = page_with_content(1); - let page1_v2 = page_with_content(1); + let page1_v2 = page_with_content(1); // Different Arc instance + assert!(cache.insert(key1, page1_v1.clone()).is_ok()); assert_eq!(cache.len(), 1); - cache.verify_list_integrity(); - let _ = cache.insert(key1, page1_v2.clone()); // Panic + cache.verify_cache_integrity(); + + // This should panic because it's a different page instance + let _ = cache.insert(key1, page1_v2.clone()); } #[test] - fn test_detach_nonexistent_key() { - let mut cache = DumbLruPageCache::default(); + fn test_delete_nonexistent_key() { + let mut cache = PageCache::default(); let key_nonexist = create_key(99); - assert!(cache.delete(key_nonexist).is_ok()); // no-op + // Deleting non-existent key should be a no-op (returns Ok) + assert!(cache.delete(key_nonexist).is_ok()); + assert_eq!(cache.len(), 0); + cache.verify_cache_integrity(); } #[test] fn test_page_cache_evict() { - let mut cache = DumbLruPageCache::new(1); + let mut cache = PageCache::new(1); let key1 = insert_page(&mut cache, 1); let key2 = insert_page(&mut cache, 2); + + // With capacity=1, inserting key2 should evict key1 assert_eq!(cache.get(&key2).unwrap().unwrap().get().id, 2); + assert!( + cache.get(&key1).unwrap().is_none(), + "key1 should be evicted" + ); + + // key2 should still be accessible + assert_eq!(cache.get(&key2).unwrap().unwrap().get().id, 2); + assert!( + cache.get(&key1).unwrap().is_none(), + "capacity=1 should have evicted the older page" + ); + cache.verify_cache_integrity(); + } + + #[test] + fn test_sieve_touch_non_tail_does_not_affect_immediate_eviction() { + // SIEVE algorithm: touching a non-tail page marks it but doesn't move it. + // The tail (if unmarked) will still be the first eviction candidate. + + // Insert 1,2,3 -> order [3,2,1] with tail=1 + let mut cache = PageCache::new(3); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + let key3 = insert_page(&mut cache, 3); + + // Touch key2 (middle) to mark it with reference bit + assert!(cache.get(&key2).unwrap().is_some()); + + // Insert 4: SIEVE examines tail (key1, unmarked) -> evict key1 + let key4 = insert_page(&mut cache, 4); + + assert!( + cache.get(&key2).unwrap().is_some(), + "marked non-tail (key2) should remain" + ); + assert!(cache.get(&key3).unwrap().is_some(), "key3 should remain"); + assert!( + cache.get(&key4).unwrap().is_some(), + "key4 was just inserted" + ); + assert!( + cache.get(&key1).unwrap().is_none(), + "unmarked tail (key1) should be evicted first" + ); + cache.verify_cache_integrity(); + } + + #[test] + fn clock_second_chance_decrements_tail_then_evicts_next() { + let mut cache = PageCache::new(3); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + let key3 = insert_page(&mut cache, 3); + assert_eq!(cache.len(), 3); + assert!(cache.get(&key1).unwrap().is_some()); + let key4 = insert_page(&mut cache, 4); + assert!(cache.get(&key1).unwrap().is_some(), "key1 should survive"); + assert!(cache.get(&key2).unwrap().is_some(), "key2 remains"); + assert!(cache.get(&key4).unwrap().is_some(), "key4 inserted"); + assert!( + cache.get(&key3).unwrap().is_none(), + "key3 (next after tail) evicted" + ); + assert_eq!(cache.len(), 3); + cache.verify_cache_integrity(); + } + + #[test] + fn test_delete_locked_page() { + let mut cache = PageCache::default(); + let key = insert_page(&mut cache, 1); + let page = cache.get(&key).unwrap().unwrap(); + page.set_locked(); + + assert_eq!(cache.delete(key), Err(CacheError::Locked { pgno: 1 })); + assert_eq!(cache.len(), 1, "Locked page should not be deleted"); + cache.verify_cache_integrity(); + } + + #[test] + fn test_delete_dirty_page() { + let mut cache = PageCache::default(); + let key = insert_page(&mut cache, 1); + let page = cache.get(&key).unwrap().unwrap(); + page.set_dirty(); + + assert_eq!(cache.delete(key), Err(CacheError::Dirty { pgno: 1 })); + assert_eq!(cache.len(), 1, "Dirty page should not be deleted"); + cache.verify_cache_integrity(); + } + + #[test] + fn test_delete_pinned_page() { + let mut cache = PageCache::default(); + let key = insert_page(&mut cache, 1); + let page = cache.get(&key).unwrap().unwrap(); + page.pin(); + + assert_eq!(cache.delete(key), Err(CacheError::Pinned { pgno: 1 })); + assert_eq!(cache.len(), 1, "Pinned page should not be deleted"); + cache.verify_cache_integrity(); + } + + #[test] + fn test_make_room_for_with_dirty_pages() { + let mut cache = PageCache::new(2); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + + // Make both pages dirty (unevictable) + cache.get(&key1).unwrap().unwrap().set_dirty(); + cache.get(&key2).unwrap().unwrap().set_dirty(); + + // Try to insert a third page, should fail because can't evict dirty pages + let key3 = create_key(3); + let page3 = page_with_content(3); + let result = cache.insert(key3, page3); + + assert_eq!(result, Err(CacheError::Full)); + assert_eq!(cache.len(), 2); + cache.verify_cache_integrity(); + } + + #[test] + fn test_page_cache_insert_and_get() { + let mut cache = PageCache::default(); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + + assert_eq!(cache.get(&key1).unwrap().unwrap().get().id, 1); + assert_eq!(cache.get(&key2).unwrap().unwrap().get().id, 2); + cache.verify_cache_integrity(); + } + + #[test] + fn test_page_cache_over_capacity() { + // Test SIEVE eviction when exceeding capacity + let mut cache = PageCache::new(2); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + + // Insert 3: tail (key1, unmarked) should be evicted + let key3 = insert_page(&mut cache, 3); + + assert_eq!(cache.len(), 2); + assert!(cache.get(&key2).unwrap().is_some(), "key2 should remain"); + assert!(cache.get(&key3).unwrap().is_some(), "key3 just inserted"); + assert!( + cache.get(&key1).unwrap().is_none(), + "key1 (oldest, unmarked) should be evicted" + ); + cache.verify_cache_integrity(); + } + + #[test] + fn test_page_cache_delete() { + let mut cache = PageCache::default(); + let key1 = insert_page(&mut cache, 1); + + assert!(cache.delete(key1).is_ok()); assert!(cache.get(&key1).unwrap().is_none()); - } - - #[test] - fn test_detach_locked_page() { - let mut cache = DumbLruPageCache::default(); - let (_, mut entry) = insert_and_get_entry(&mut cache, 1); - unsafe { entry.as_mut().page.set_locked() }; - assert_eq!( - cache.detach(entry, false), - Err(CacheError::Locked { pgno: 1 }) - ); - cache.verify_list_integrity(); - } - - #[test] - fn test_detach_dirty_page() { - let mut cache = DumbLruPageCache::default(); - let (key, mut entry) = insert_and_get_entry(&mut cache, 1); - cache.get(&key).expect("Page should exist"); - unsafe { entry.as_mut().page.set_dirty() }; - assert_eq!( - cache.detach(entry, false), - Err(CacheError::Dirty { pgno: 1 }) - ); - cache.verify_list_integrity(); - } - - #[test] - #[ignore = "for now let's not track active refs"] - fn test_detach_with_active_reference_clean() { - let mut cache = DumbLruPageCache::default(); - let (key, entry) = insert_and_get_entry(&mut cache, 1); - let page_ref = cache.get(&key); - assert_eq!(cache.detach(entry, true), Err(CacheError::ActiveRefs)); - drop(page_ref); - cache.verify_list_integrity(); - } - - #[test] - #[ignore = "for now let's not track active refs"] - fn test_detach_with_active_reference_no_clean() { - let mut cache = DumbLruPageCache::default(); - let (key, entry) = insert_and_get_entry(&mut cache, 1); - cache.get(&key).expect("Page should exist"); - assert!(cache.detach(entry, false).is_ok()); - assert!(cache.map.borrow_mut().remove(&key).is_some()); - cache.verify_list_integrity(); - } - - #[test] - fn test_detach_without_cleaning() { - let mut cache = DumbLruPageCache::default(); - let (key, entry) = insert_and_get_entry(&mut cache, 1); - assert!(cache.detach(entry, false).is_ok()); - assert!(cache.map.borrow_mut().remove(&key).is_some()); - cache.verify_list_integrity(); assert_eq!(cache.len(), 0); + cache.verify_cache_integrity(); } #[test] - fn test_detach_with_cleaning() { - let mut cache = DumbLruPageCache::default(); - let (key, entry) = insert_and_get_entry(&mut cache, 1); - let page = cache.get(&key).unwrap().expect("Page should exist"); - assert!(page_has_content(&page)); - drop(page); - assert!(cache.detach(entry, true).is_ok()); - // Internal testing: the page is still in map, so we use it to check content - let page = cache.peek(&key, false).expect("Page should exist in map"); - assert!(!page_has_content(&page)); - assert!(cache.map.borrow_mut().remove(&key).is_some()); - cache.verify_list_integrity(); + fn test_page_cache_clear() { + let mut cache = PageCache::default(); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + + assert!(cache.clear().is_ok()); + assert!(cache.get(&key1).unwrap().is_none()); + assert!(cache.get(&key2).unwrap().is_none()); + assert_eq!(cache.len(), 0); + cache.verify_cache_integrity(); } #[test] - fn test_detach_only_element_preserves_integrity() { - let mut cache = DumbLruPageCache::default(); - let (_, entry) = insert_and_get_entry(&mut cache, 1); - assert!(cache.detach(entry, false).is_ok()); - assert!( - cache.head.borrow().is_none(), - "Head should be None after detaching only element" - ); - assert!( - cache.tail.borrow().is_none(), - "Tail should be None after detaching only element" - ); + fn test_resize_smaller_success() { + let mut cache = PageCache::default(); + for i in 1..=5 { + let _ = insert_page(&mut cache, i); + } + assert_eq!(cache.len(), 5); + + let result = cache.resize(3); + assert_eq!(result, CacheResizeResult::Done); + assert_eq!(cache.len(), 3); + assert_eq!(cache.capacity(), 3); + + // Should still be able to insert after resize + assert!(cache.insert(create_key(6), page_with_content(6)).is_ok()); + assert_eq!(cache.len(), 3); // One was evicted to make room + cache.verify_cache_integrity(); } #[test] fn test_detach_with_multiple_pages() { - let mut cache = DumbLruPageCache::default(); - let (key1, _) = insert_and_get_entry(&mut cache, 1); - let (key2, entry2) = insert_and_get_entry(&mut cache, 2); - let (key3, _) = insert_and_get_entry(&mut cache, 3); - let head_key = unsafe { cache.head.borrow().unwrap().as_ref().key }; - let tail_key = unsafe { cache.tail.borrow().unwrap().as_ref().key }; - assert_eq!(head_key, key3, "Head should be key3"); - assert_eq!(tail_key, key1, "Tail should be key1"); - assert!(cache.detach(entry2, false).is_ok()); - let head_entry = unsafe { cache.head.borrow().unwrap().as_ref() }; - let tail_entry = unsafe { cache.tail.borrow().unwrap().as_ref() }; - assert_eq!(head_entry.key, key3, "Head should still be key3"); - assert_eq!(tail_entry.key, key1, "Tail should still be key1"); - assert_eq!( - unsafe { head_entry.next.unwrap().as_ref().key }, - key1, - "Head's next should point to tail after middle element detached" + let mut cache = PageCache::default(); + let _key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + let _key3 = insert_page(&mut cache, 3); + + // Delete middle element (key2) + assert!(cache.delete(key2).is_ok()); + + // Verify structure after deletion + assert_eq!(cache.len(), 2); + assert!(!cache.contains_key(&key2)); + + cache.verify_cache_integrity(); + } + + #[test] + fn test_delete_multiple_elements() { + let mut cache = PageCache::default(); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + let key3 = insert_page(&mut cache, 3); + cache.verify_cache_integrity(); + assert_eq!(cache.len(), 3); + + // Delete head (key3) + assert!(cache.delete(key3).is_ok()); + assert_eq!(cache.len(), 2, "Length should be 2 after deleting head"); + assert!( + !cache.contains_key(&key3), + "Cache should not contain deleted head key" ); - assert_eq!( - unsafe { tail_entry.prev.unwrap().as_ref().key }, - key3, - "Tail's prev should point to head after middle element detached" - ); - assert!(cache.map.borrow_mut().remove(&key2).is_some()); - cache.verify_list_integrity(); + cache.verify_cache_integrity(); + + // Delete tail (key1) + assert!(cache.delete(key1).is_ok()); + assert_eq!(cache.len(), 1, "Length should be 1 after deleting two"); + cache.verify_cache_integrity(); + + // Delete last element (key2) + assert!(cache.delete(key2).is_ok()); + assert_eq!(cache.len(), 0, "Length should be 0 after deleting all"); + cache.verify_cache_integrity(); + } + + #[test] + fn test_resize_larger() { + let mut cache = PageCache::new(2); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + assert_eq!(cache.len(), 2); + + let result = cache.resize(5); + assert_eq!(result, CacheResizeResult::Done); + assert_eq!(cache.len(), 2); + assert_eq!(cache.capacity(), 5); + + // Existing pages should still be accessible + assert!(cache.get(&key1).is_ok_and(|p| p.is_some())); + assert!(cache.get(&key2).is_ok_and(|p| p.is_some())); + + // Now we should be able to add 3 more without eviction + for i in 3..=5 { + let _ = insert_page(&mut cache, i); + } + assert_eq!(cache.len(), 5); + cache.verify_cache_integrity(); + } + + #[test] + fn test_resize_same_capacity() { + let mut cache = PageCache::new(3); + for i in 1..=3 { + let _ = insert_page(&mut cache, i); + } + + let result = cache.resize(3); + assert_eq!(result, CacheResizeResult::Done); + assert_eq!(cache.len(), 3); + assert_eq!(cache.capacity(), 3); + cache.verify_cache_integrity(); + } + + #[test] + fn test_truncate_page_cache() { + let mut cache = PageCache::new(10); + let _ = insert_page(&mut cache, 1); + let _ = insert_page(&mut cache, 4); + let _ = insert_page(&mut cache, 8); + let _ = insert_page(&mut cache, 10); + + // Truncate to keep only pages <= 4 + cache.truncate(4).unwrap(); + + assert!(cache.contains_key(&PageCacheKey(1))); + assert!(cache.contains_key(&PageCacheKey(4))); + assert!(!cache.contains_key(&PageCacheKey(8))); + assert!(!cache.contains_key(&PageCacheKey(10))); + assert_eq!(cache.len(), 2); + assert_eq!(cache.capacity(), 10); + cache.verify_cache_integrity(); + } + + #[test] + fn test_truncate_page_cache_remove_all() { + let mut cache = PageCache::new(10); + let _ = insert_page(&mut cache, 8); + let _ = insert_page(&mut cache, 10); + + // Truncate to 4 (removes all pages since they're > 4) + cache.truncate(4).unwrap(); + + assert!(!cache.contains_key(&PageCacheKey(8))); + assert!(!cache.contains_key(&PageCacheKey(10))); + assert_eq!(cache.len(), 0); + assert_eq!(cache.capacity(), 10); + cache.verify_cache_integrity(); } #[test] @@ -1097,245 +1304,135 @@ mod tests { .unwrap() .as_secs(); let mut rng = ChaCha8Rng::seed_from_u64(seed); - tracing::info!("super seed: {}", seed); + tracing::info!("fuzz test seed: {}", seed); + let max_pages = 10; - let mut cache = DumbLruPageCache::new(10); - let mut lru = LruCache::new(NonZeroUsize::new(10).unwrap()); + let mut cache = PageCache::new(10); + let mut reference_map = std::collections::HashMap::new(); for _ in 0..10000 { cache.print(); - for (key, _) in &lru { - tracing::debug!("lru_page={:?}", key); - } + match rng.next_u64() % 2 { 0 => { - // add + // Insert operation let id_page = rng.next_u64() % max_pages; let key = PageCacheKey::new(id_page as usize); #[allow(clippy::arc_with_non_send_sync)] let page = Arc::new(Page::new(id_page as usize)); + if cache.peek(&key, false).is_some() { - continue; // skip duplicate page ids + continue; // Skip duplicate page ids } + tracing::debug!("inserting page {:?}", key); match cache.insert(key, page.clone()) { - Err(CacheError::Full | CacheError::ActiveRefs) => {} // Ignore + Err(CacheError::Full | CacheError::ActiveRefs) => {} // Expected, ignore Err(err) => { - // Any other error should fail the test - panic!("Cache insertion failed: {err:?}"); + panic!("Cache insertion failed unexpectedly: {err:?}"); } Ok(_) => { - lru.push(key, page); + reference_map.insert(key, page); + // Clean up reference_map if cache evicted something + if cache.len() < reference_map.len() { + reference_map.retain(|k, _| cache.contains_key(k)); + } } } - assert!(cache.len() <= 10); + assert!(cache.len() <= 10, "Cache size exceeded capacity"); } 1 => { - // remove + // Delete operation let random = rng.next_u64() % 2 == 0; - let key = if random || lru.is_empty() { + let key = if random || reference_map.is_empty() { let id_page: u64 = rng.next_u64() % max_pages; - PageCacheKey::new(id_page as usize) } else { - let i = rng.next_u64() as usize % lru.len(); - let key: PageCacheKey = *lru.iter().nth(i).unwrap().0; - key + let i = rng.next_u64() as usize % reference_map.len(); + *reference_map.keys().nth(i).unwrap() }; + tracing::debug!("removing page {:?}", key); - lru.pop(&key); + reference_map.remove(&key); assert!(cache.delete(key).is_ok()); } _ => unreachable!(), } - compare_to_lru(&mut cache, &lru); - cache.print(); - for (key, _) in &lru { - tracing::debug!("lru_page={:?}", key); - } - cache.verify_list_integrity(); - for (key, page) in &lru { - println!("getting page {key:?}"); - cache.peek(key, false).unwrap(); - assert_eq!(page.get().id, key.pgno); - } - } - } - pub fn compare_to_lru(cache: &mut DumbLruPageCache, lru: &LruCache) { - let this_keys = cache.keys(); - let mut lru_keys = Vec::new(); - for (lru_key, _) in lru { - lru_keys.push(*lru_key); - } - if this_keys != lru_keys { - cache.print(); - for (lru_key, _) in lru { - tracing::debug!("lru_page={:?}", lru_key); + cache.verify_cache_integrity(); + + // Verify all pages in reference_map are in cache + for (key, page) in &reference_map { + let cached_page = cache.peek(key, false).expect("Page should be in cache"); + assert_eq!(cached_page.get().id, key.0); + assert_eq!(page.get().id, key.0); } - assert_eq!(&this_keys, &lru_keys) } } #[test] - fn test_page_cache_insert_and_get() { - let mut cache = DumbLruPageCache::default(); + fn test_peek_without_touch() { + // Test that peek with touch=false doesn't mark pages + let mut cache = PageCache::new(2); let key1 = insert_page(&mut cache, 1); let key2 = insert_page(&mut cache, 2); - assert_eq!(cache.get(&key1).unwrap().unwrap().get().id, 1); - assert_eq!(cache.get(&key2).unwrap().unwrap().get().id, 2); - } - #[test] - fn test_page_cache_over_capacity() { - let mut cache = DumbLruPageCache::new(2); - let key1 = insert_page(&mut cache, 1); - let key2 = insert_page(&mut cache, 2); + // Peek key1 without touching (no ref bit set) + assert!(cache.peek(&key1, false).is_some()); + + // Insert 3: should evict unmarked tail (key1) let key3 = insert_page(&mut cache, 3); - assert!(cache.get(&key1).unwrap().is_none()); - assert_eq!(cache.get(&key2).unwrap().unwrap().get().id, 2); - assert_eq!(cache.get(&key3).unwrap().unwrap().get().id, 3); + + assert!(cache.get(&key2).unwrap().is_some(), "key2 should remain"); + assert!( + cache.get(&key3).unwrap().is_some(), + "key3 was just inserted" + ); + assert!( + cache.get(&key1).unwrap().is_none(), + "key1 should be evicted since peek(false) didn't mark it" + ); + assert_eq!(cache.len(), 2); + cache.verify_cache_integrity(); } #[test] - fn test_page_cache_delete() { - let mut cache = DumbLruPageCache::default(); - let key1 = insert_page(&mut cache, 1); - assert!(cache.delete(key1).is_ok()); - assert!(cache.get(&key1).unwrap().is_none()); - } - - #[test] - fn test_page_cache_clear() { - let mut cache = DumbLruPageCache::default(); + fn test_peek_with_touch() { + // Test that peek with touch=true marks pages for SIEVE + let mut cache = PageCache::new(2); let key1 = insert_page(&mut cache, 1); let key2 = insert_page(&mut cache, 2); - assert!(cache.clear().is_ok()); - assert!(cache.get(&key1).unwrap().is_none()); - assert!(cache.get(&key2).unwrap().is_none()); - } - #[test] - fn test_page_cache_insert_sequential() { - let mut cache = DumbLruPageCache::default(); - for i in 0..10000 { - let key = insert_page(&mut cache, i); - assert_eq!(cache.peek(&key, false).unwrap().get().id, i); - } - } + // Peek key1 WITH touching (sets ref bit) + assert!(cache.peek(&key1, true).is_some()); - #[test] - fn test_resize_smaller_success() { - let mut cache = DumbLruPageCache::default(); - for i in 1..=5 { - let _ = insert_page(&mut cache, i); - } - assert_eq!(cache.len(), 5); - let result = cache.resize(3); - assert_eq!(result, CacheResizeResult::Done); - assert_eq!(cache.len(), 3); - assert_eq!(cache.capacity, 3); - assert!(cache.insert(create_key(6), page_with_content(6)).is_ok()); - } + // Insert 3: key1 is marked, so it gets second chance + // key2 becomes new tail and gets evicted + let key3 = insert_page(&mut cache, 3); - #[test] - #[should_panic(expected = "Attempted to insert different page with same key")] - fn test_resize_larger() { - let mut cache = DumbLruPageCache::default(); - let _ = insert_page(&mut cache, 1); - let _ = insert_page(&mut cache, 2); + assert!( + cache.get(&key1).unwrap().is_some(), + "key1 should survive (was marked)" + ); + assert!( + cache.get(&key3).unwrap().is_some(), + "key3 was just inserted" + ); + assert!( + cache.get(&key2).unwrap().is_none(), + "key2 should be evicted after key1's second chance" + ); assert_eq!(cache.len(), 2); - let result = cache.resize(5); - assert_eq!(result, CacheResizeResult::Done); - assert_eq!(cache.len(), 2); - assert_eq!(cache.capacity, 5); - assert!(cache.get(&create_key(1)).unwrap().is_some()); - assert!(cache.get(&create_key(2)).unwrap().is_some()); - for i in 3..=5 { - let _ = insert_page(&mut cache, i); - } - assert_eq!(cache.len(), 5); - // FIXME: For now this will assert because we cannot insert a page with same id but different contents of page. - assert!(cache.insert(create_key(4), page_with_content(4)).is_err()); - cache.verify_list_integrity(); + cache.verify_cache_integrity(); } #[test] - #[ignore = "for now let's not track active refs"] - fn test_resize_with_active_references() { - let mut cache = DumbLruPageCache::default(); - let page1 = page_with_content(1); - let page2 = page_with_content(2); - let page3 = page_with_content(3); - assert!(cache.insert(create_key(1), page1.clone()).is_ok()); - assert!(cache.insert(create_key(2), page2.clone()).is_ok()); - assert!(cache.insert(create_key(3), page3.clone()).is_ok()); - assert_eq!(cache.len(), 3); - cache.verify_list_integrity(); - assert_eq!(cache.resize(2), CacheResizeResult::PendingEvictions); - assert_eq!(cache.capacity, 2); - assert_eq!(cache.len(), 3); - drop(page2); - drop(page3); - assert_eq!(cache.resize(1), CacheResizeResult::Done); // Evicted 2 and 3 - assert_eq!(cache.len(), 1); - assert!(cache.insert(create_key(4), page_with_content(4)).is_err()); - cache.verify_list_integrity(); - } - - #[test] - fn test_resize_same_capacity() { - let mut cache = DumbLruPageCache::new(3); - for i in 1..=3 { - let _ = insert_page(&mut cache, i); - } - let result = cache.resize(3); - assert_eq!(result, CacheResizeResult::Done); // no-op - assert_eq!(cache.len(), 3); - assert_eq!(cache.capacity, 3); - cache.verify_list_integrity(); - assert!(cache.insert(create_key(4), page_with_content(4)).is_ok()); - } - - #[test] - fn test_truncate_page_cache() { - let mut cache = DumbLruPageCache::new(10); - let _ = insert_page(&mut cache, 1); - let _ = insert_page(&mut cache, 4); - let _ = insert_page(&mut cache, 8); - let _ = insert_page(&mut cache, 10); - cache.truncate(4).unwrap(); - assert!(cache.contains_key(&PageCacheKey { pgno: 1 })); - assert!(cache.contains_key(&PageCacheKey { pgno: 4 })); - assert!(!cache.contains_key(&PageCacheKey { pgno: 8 })); - assert!(!cache.contains_key(&PageCacheKey { pgno: 10 })); - assert_eq!(cache.len(), 2); - assert_eq!(cache.capacity, 10); - cache.verify_list_integrity(); - assert!(cache.insert(create_key(8), page_with_content(8)).is_ok()); - } - - #[test] - fn test_truncate_page_cache_remove_all() { - let mut cache = DumbLruPageCache::new(10); - let _ = insert_page(&mut cache, 8); - let _ = insert_page(&mut cache, 10); - cache.truncate(4).unwrap(); - assert!(!cache.contains_key(&PageCacheKey { pgno: 8 })); - assert!(!cache.contains_key(&PageCacheKey { pgno: 10 })); - assert_eq!(cache.len(), 0); - assert_eq!(cache.capacity, 10); - cache.verify_list_integrity(); - assert!(cache.insert(create_key(8), page_with_content(8)).is_ok()); - } - - #[test] - #[ignore = "long running test, remove to verify"] + #[ignore = "long running test, remove ignore to verify memory stability"] fn test_clear_memory_stability() { let initial_memory = memory_stats::memory_stats().unwrap().physical_mem; for _ in 0..100000 { - let mut cache = DumbLruPageCache::new(1000); + let mut cache = PageCache::new(1000); for i in 0..1000 { let key = create_key(i); @@ -1348,12 +1445,299 @@ mod tests { } let final_memory = memory_stats::memory_stats().unwrap().physical_mem; - let growth = final_memory.saturating_sub(initial_memory); - println!("Growth: {growth}"); + + println!("Memory growth: {growth} bytes"); assert!( growth < 10_000_000, - "Memory grew by {growth} bytes over 10 cycles" + "Memory grew by {growth} bytes over test cycles (limit: 10MB)", ); } + + #[test] + fn clock_drains_hot_page_within_single_sweep_when_others_are_unevictable() { + // capacity 3: [3(head), 2, 1(tail)] + let mut c = PageCache::new(3); + let k1 = insert_page(&mut c, 1); + let k2 = insert_page(&mut c, 2); + let _k3 = insert_page(&mut c, 3); + + // Make k1 hot: bump to Max + for _ in 0..3 { + assert!(c.get(&k1).unwrap().is_some()); + } + assert!(matches!(c.ref_of(&k1), Some(REF_MAX))); + + // Make other pages unevictable; clock must keep revisiting k1. + c.get(&k2).unwrap().unwrap().set_dirty(); + c.get(&_k3).unwrap().unwrap().set_dirty(); + + // Insert 4 -> sweep rotates as needed, draining k1 and evicting it. + let _k4 = insert_page(&mut c, 4); + + assert!( + c.get(&k1).unwrap().is_none(), + "k1 should be evicted after its credit drains" + ); + assert!(c.get(&k2).unwrap().is_some(), "k2 is dirty (unevictable)"); + assert!(c.get(&_k3).unwrap().is_some(), "k3 is dirty (unevictable)"); + assert!(c.get(&_k4).unwrap().is_some(), "k4 just inserted"); + c.verify_cache_integrity(); + } + + #[test] + fn gclock_hot_survives_scan_pages() { + let mut c = PageCache::new(4); + let _k1 = insert_page(&mut c, 1); + let k2 = insert_page(&mut c, 2); + let _k3 = insert_page(&mut c, 3); + let _k4 = insert_page(&mut c, 4); + + // Make k2 truly hot: three real touches + for _ in 0..3 { + assert!(c.get(&k2).unwrap().is_some()); + } + assert!(matches!(c.ref_of(&k2), Some(REF_MAX))); + + // Now simulate a scan inserting new pages 5..10 (one-hit wonders). + for id in 5..=10 { + let _ = insert_page(&mut c, id); + } + + // Hot k2 should still be present; most single-hit scan pages should churn. + assert!( + c.get(&k2).unwrap().is_some(), + "hot page should survive scan" + ); + // The earliest single-hit page should be gone. + assert!(c.get(&create_key(5)).unwrap().is_none()); + c.verify_cache_integrity(); + } + + #[test] + fn hand_stays_valid_after_deleting_only_element() { + let mut c = PageCache::new(2); + let k = insert_page(&mut c, 1); + assert!(c.delete(k).is_ok()); + // Inserting again should not panic and should succeed + let _ = insert_page(&mut c, 2); + c.verify_cache_integrity(); + } + + #[test] + fn hand_is_reset_after_clear_and_resize() { + let mut c = PageCache::new(3); + for i in 1..=3 { + let _ = insert_page(&mut c, i); + } + c.clear().unwrap(); + // No elements; insert should not rely on stale hand + let _ = insert_page(&mut c, 10); + + // Resize from 1 -> 4 and back should not OOB the hand + assert_eq!(c.resize(4), CacheResizeResult::Done); + assert_eq!(c.resize(1), CacheResizeResult::Done); + let _ = insert_page(&mut c, 11); + c.verify_cache_integrity(); + } + + #[test] + fn resize_preserves_ref_and_recency() { + let mut c = PageCache::new(4); + let _k1 = insert_page(&mut c, 1); + let k2 = insert_page(&mut c, 2); + let _k3 = insert_page(&mut c, 3); + let _k4 = insert_page(&mut c, 4); + // Make k2 hot. + for _ in 0..3 { + assert!(c.get(&k2).unwrap().is_some()); + } + let _r_before = c.ref_of(&k2); + + // Shrink to 3 (one page will be evicted during repack/next insert) + assert_eq!(c.resize(3), CacheResizeResult::Done); + assert!(matches!(c.ref_of(&k2), _r_before)); + + // Force an eviction; hot k2 should survive more passes. + let _ = insert_page(&mut c, 5); + assert!(c.get(&k2).unwrap().is_some()); + c.verify_cache_integrity(); + } + + #[test] + fn test_sieve_second_chance_preserves_marked_page() { + let mut cache = PageCache::new(3); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + let key3 = insert_page(&mut cache, 3); + + // Mark key1 for second chance + assert!(cache.get(&key1).unwrap().is_some()); + + let key4 = insert_page(&mut cache, 4); + // CLOCK sweep from hand: + // - key1 marked -> decrement, continue + // - key3 (MRU) unmarked -> evict + assert!( + cache.get(&key1).unwrap().is_some(), + "key1 had ref bit set, got second chance" + ); + assert!( + cache.get(&key3).unwrap().is_none(), + "key3 (MRU) should be evicted" + ); + assert!(cache.get(&key4).unwrap().is_some(), "key4 just inserted"); + assert!( + cache.get(&key2).unwrap().is_some(), + "key2 (middle) should remain" + ); + cache.verify_cache_integrity(); + } + + #[test] + fn test_clock_sweep_wraps_around() { + // Test that clock hand properly wraps around the circular list + let mut cache = PageCache::new(3); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + let key3 = insert_page(&mut cache, 3); + + // Mark all pages + assert!(cache.get(&key1).unwrap().is_some()); + assert!(cache.get(&key2).unwrap().is_some()); + assert!(cache.get(&key3).unwrap().is_some()); + + // Insert 4: hand will sweep full circle, decrementing all refs + // then sweep again and evict first unmarked page + let key4 = insert_page(&mut cache, 4); + + // One page was evicted after full sweep + assert_eq!(cache.len(), 3); + assert!(cache.get(&key4).unwrap().is_some()); + + // Verify exactly one of the original pages was evicted + let survivors = [key1, key2, key3] + .iter() + .filter(|k| cache.get(k).unwrap().is_some()) + .count(); + assert_eq!(survivors, 2, "Should have 2 survivors from original 3"); + cache.verify_cache_integrity(); + } + + #[test] + fn test_circular_list_single_element() { + let mut cache = PageCache::new(3); + let key1 = insert_page(&mut cache, 1); + + // Single element should point to itself + let slot = cache.slot_of(&key1).unwrap(); + assert_eq!(cache.entries[slot].next, slot); + assert_eq!(cache.entries[slot].prev, slot); + + // Delete single element + assert!(cache.delete(key1).is_ok()); + assert_eq!(cache.clock_hand, NULL); + + // Insert after empty should work + let key2 = insert_page(&mut cache, 2); + let slot2 = cache.slot_of(&key2).unwrap(); + assert_eq!(cache.entries[slot2].next, slot2); + assert_eq!(cache.entries[slot2].prev, slot2); + cache.verify_cache_integrity(); + } + + #[test] + fn test_hand_advances_on_eviction() { + let mut cache = PageCache::new(2); + let _key1 = insert_page(&mut cache, 1); + let _key2 = insert_page(&mut cache, 2); + + // Note initial hand position + let initial_hand = cache.clock_hand; + + // Force eviction + let _key3 = insert_page(&mut cache, 3); + + // Hand should have advanced + let new_hand = cache.clock_hand; + assert_ne!(new_hand, NULL); + // Hand moved during sweep (exact position depends on eviction) + assert!(initial_hand == NULL || new_hand != initial_hand || cache.len() < 2); + cache.verify_cache_integrity(); + } + + #[test] + fn test_multi_level_ref_counting() { + let mut cache = PageCache::new(2); + let key1 = insert_page(&mut cache, 1); + let _key2 = insert_page(&mut cache, 2); + + // Bump key1 to MAX (3 accesses) + for _ in 0..3 { + assert!(cache.get(&key1).unwrap().is_some()); + } + assert_eq!(cache.ref_of(&key1), Some(REF_MAX)); + + // Insert multiple new pages - key1 should survive longer + for i in 3..6 { + let _ = insert_page(&mut cache, i); + } + + // key1 might still be there due to high ref count + // (depends on exact sweep pattern, but it got multiple chances) + cache.verify_cache_integrity(); + } + + #[test] + fn test_resize_maintains_circular_structure() { + let mut cache = PageCache::new(5); + for i in 1..=4 { + let _ = insert_page(&mut cache, i); + } + + // Resize smaller + assert_eq!(cache.resize(2), CacheResizeResult::Done); + assert_eq!(cache.len(), 2); + + // Verify circular structure + if cache.clock_hand != NULL { + let start = cache.clock_hand; + let mut current = start; + let mut count = 0; + loop { + count += 1; + current = cache.entries[current].next; + if current == start { + break; + } + assert!(count <= cache.len(), "Circular list broken after resize"); + } + assert_eq!(count, cache.len()); + } + cache.verify_cache_integrity(); + } + + #[test] + fn test_link_after_correctness() { + let mut cache = PageCache::new(4); + let key1 = insert_page(&mut cache, 1); + let key2 = insert_page(&mut cache, 2); + let key3 = insert_page(&mut cache, 3); + + // Verify circular linkage + let slot1 = cache.slot_of(&key1).unwrap(); + let slot2 = cache.slot_of(&key2).unwrap(); + let slot3 = cache.slot_of(&key3).unwrap(); + + // Should form a circle: 3 -> 2 -> 1 -> 3 (insertion order) + assert_eq!(cache.entries[slot3].next, slot2); + assert_eq!(cache.entries[slot2].next, slot1); + assert_eq!(cache.entries[slot1].next, slot3); + + assert_eq!(cache.entries[slot3].prev, slot1); + assert_eq!(cache.entries[slot2].prev, slot3); + assert_eq!(cache.entries[slot1].prev, slot2); + + cache.verify_cache_integrity(); + } } diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 36196156f..31eb980cd 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -25,7 +25,7 @@ use std::sync::{Arc, Mutex}; use tracing::{instrument, trace, Level}; use super::btree::btree_init_page; -use super::page_cache::{CacheError, CacheResizeResult, DumbLruPageCache, PageCacheKey}; +use super::page_cache::{CacheError, CacheResizeResult, PageCache, PageCacheKey}; use super::sqlite3_ondisk::begin_write_btree_page; use super::wal::CheckpointMode; use crate::storage::encryption::{CipherMode, EncryptionContext, EncryptionKey}; @@ -129,7 +129,7 @@ pub struct PageInner { /// requests unpinning via [Page::unpin], the pin count will still be >0 if the outer /// code path has not yet requested to unpin the page as well. /// - /// Note that [DumbLruPageCache::clear] evicts the pages even if pinned, so as long as + /// Note that [PageCache::clear] evicts the pages even if pinned, so as long as /// we clear the page cache on errors, pins will not 'leak'. pub pin_count: AtomicUsize, /// The WAL frame number this page was loaded from (0 if loaded from main DB file) @@ -464,7 +464,7 @@ pub struct Pager { /// in-memory databases, ephemeral tables and ephemeral indexes do not have a WAL. pub(crate) wal: Option>>, /// A page cache for the database. - page_cache: Arc>, + page_cache: Arc>, /// Buffer pool for temporary data storage. pub buffer_pool: Arc, /// I/O interface for input/output operations. @@ -564,7 +564,7 @@ impl Pager { db_file: Arc, wal: Option>>, io: Arc, - page_cache: Arc>, + page_cache: Arc>, buffer_pool: Arc, db_state: Arc, init_lock: Arc>, @@ -1012,9 +1012,10 @@ impl Pager { // Give a chance for the allocation to happen elsewhere _ => {} } + } else { + // Give a chance for the allocation to happen elsewhere + io_yield_one!(Completion::new_dummy()); } - // Give a chance for the allocation to happen elsewhere - io_yield_one!(Completion::new_dummy()); } Ok(IOResult::Done(())) } @@ -1125,9 +1126,19 @@ impl Pager { let page_key = PageCacheKey::new(page_idx); if let Some(page) = page_cache.get(&page_key)? { tracing::trace!("read_page(page_idx = {}) = cached", page_idx); + turso_assert!( + page_idx == page.get().id, + "attempted to read page {page_idx} but got page {}", + page.get().id + ); return Ok((page.clone(), None)); } let (page, c) = self.read_page_no_cache(page_idx, None, false)?; + turso_assert!( + page_idx == page.get().id, + "attempted to read page {page_idx} but got page {}", + page.get().id + ); self.cache_insert(page_idx, page.clone(), &mut page_cache)?; Ok((page, Some(c))) } @@ -1153,7 +1164,7 @@ impl Pager { &self, page_idx: usize, page: PageRef, - page_cache: &mut DumbLruPageCache, + page_cache: &mut PageCache, ) -> Result<()> { let page_key = PageCacheKey::new(page_idx); match page_cache.insert(page_key, page.clone()) { @@ -1171,7 +1182,7 @@ impl Pager { tracing::trace!("read_page(page_idx = {})", page_idx); let mut page_cache = self.page_cache.write(); let page_key = PageCacheKey::new(page_idx); - Ok(page_cache.get(&page_key)?) + page_cache.get(&page_key) } /// Get a page from cache only if it matches the target frame @@ -1981,7 +1992,7 @@ impl Pager { trunk_page.get_contents().as_ptr().fill(0); let page_key = PageCacheKey::new(trunk_page.get().id); { - let mut page_cache = self.page_cache.write(); + let page_cache = self.page_cache.read(); turso_assert!( page_cache.contains_key(&page_key), "page {} is not in cache", @@ -2013,7 +2024,7 @@ impl Pager { leaf_page.get_contents().as_ptr().fill(0); let page_key = PageCacheKey::new(leaf_page.get().id); { - let mut page_cache = self.page_cache.write(); + let page_cache = self.page_cache.read(); turso_assert!( page_cache.contains_key(&page_key), "page {} is not in cache", @@ -2090,13 +2101,11 @@ impl Pager { // FIXME: use specific page key for writer instead of max frame, this will make readers not conflict assert!(page.is_dirty()); - cache - .insert_ignore_existing(page_key, page.clone()) - .map_err(|e| { - LimboError::InternalError(format!( - "Failed to insert loaded page {id} into cache: {e:?}" - )) - })?; + cache.upsert_page(page_key, page.clone()).map_err(|e| { + LimboError::InternalError(format!( + "Failed to insert loaded page {id} into cache: {e:?}" + )) + })?; page.set_loaded(); Ok(()) } @@ -2165,16 +2174,27 @@ impl Pager { Ok(IOResult::Done(f(header))) } - pub fn set_encryption_context(&self, cipher_mode: CipherMode, key: &EncryptionKey) { + pub fn is_encryption_ctx_set(&self) -> bool { + self.io_ctx.borrow_mut().encryption_context().is_some() + } + + pub fn set_encryption_context( + &self, + cipher_mode: CipherMode, + key: &EncryptionKey, + ) -> Result<()> { let page_size = self.page_size.get().unwrap().get() as usize; - let encryption_ctx = EncryptionContext::new(cipher_mode, key, page_size).unwrap(); + let encryption_ctx = EncryptionContext::new(cipher_mode, key, page_size)?; { let mut io_ctx = self.io_ctx.borrow_mut(); io_ctx.set_encryption(encryption_ctx); } - let Some(wal) = self.wal.as_ref() else { return }; + let Some(wal) = self.wal.as_ref() else { + return Ok(()); + }; wal.borrow_mut() - .set_io_context(self.io_ctx.borrow().clone()) + .set_io_context(self.io_ctx.borrow().clone()); + Ok(()) } } @@ -2395,14 +2415,14 @@ mod tests { use parking_lot::RwLock; - use crate::storage::page_cache::{DumbLruPageCache, PageCacheKey}; + use crate::storage::page_cache::{PageCache, PageCacheKey}; use super::Page; #[test] fn test_shared_cache() { // ensure cache can be shared between threads - let cache = Arc::new(RwLock::new(DumbLruPageCache::new(10))); + let cache = Arc::new(RwLock::new(PageCache::new(10))); let thread = { let cache = cache.clone(); @@ -2435,7 +2455,7 @@ mod ptrmap_tests { use crate::io::{MemoryIO, OpenFlags, IO}; use crate::storage::buffer_pool::BufferPool; use crate::storage::database::{DatabaseFile, DatabaseStorage}; - use crate::storage::page_cache::DumbLruPageCache; + use crate::storage::page_cache::PageCache; use crate::storage::pager::Pager; use crate::storage::sqlite3_ondisk::PageSize; use crate::storage::wal::{WalFile, WalFileShared}; @@ -2464,7 +2484,7 @@ mod ptrmap_tests { let pages = initial_db_pages + 10; let sz = std::cmp::max(std::cmp::min(pages, 64), pages); let buffer_pool = BufferPool::begin_init(&io, (sz * page_size) as usize); - let page_cache = Arc::new(RwLock::new(DumbLruPageCache::new(sz as usize))); + let page_cache = Arc::new(RwLock::new(PageCache::new(sz as usize))); let wal = Rc::new(RefCell::new(WalFile::new( io.clone(), diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index 0bc8d1e67..696d10d05 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -65,7 +65,8 @@ use crate::types::{RawSlice, RefValue, SerialType, SerialTypeKind, TextRef, Text use crate::{ bail_corrupt_error, turso_assert, CompletionError, File, IOContext, Result, WalFileShared, }; -use std::cell::{Cell, UnsafeCell}; +use parking_lot::RwLock; +use std::cell::Cell; use std::collections::{BTreeMap, HashMap}; use std::mem::MaybeUninit; use std::pin::Pin; @@ -995,17 +996,14 @@ pub fn write_pages_vectored( pager: &Pager, batch: BTreeMap>, done_flag: Arc, + final_write: bool, ) -> Result> { if batch.is_empty() { done_flag.store(true, Ordering::Relaxed); return Ok(Vec::new()); } - // batch item array is already sorted by id, so we just need to find contiguous ranges of page_id's - // to submit as `writev`/write_pages calls. - let page_sz = pager.page_size.get().expect("page size is not set").get() as usize; - // Count expected number of runs to create the atomic counter we need to track each batch let mut run_count = 0; let mut prev_id = None; @@ -1023,26 +1021,21 @@ pub fn write_pages_vectored( // Create the atomic counters let runs_left = Arc::new(AtomicUsize::new(run_count)); let done = done_flag.clone(); - // we know how many runs, but we don't know how many buffers per run, so we can only give an - // estimate of the capacity const EST_BUFF_CAPACITY: usize = 32; - // Iterate through the batch, submitting each run as soon as it ends - // We can reuse this across runs without reallocating let mut run_bufs = Vec::with_capacity(EST_BUFF_CAPACITY); let mut run_start_id: Option = None; - // Iterate through the batch + // Track which run we're on to identify the last one + let mut current_run = 0; let mut iter = batch.iter().peekable(); - let mut completions = Vec::new(); + while let Some((id, item)) = iter.next() { // Track the start of the run if run_start_id.is_none() { run_start_id = Some(*id); } - - // Add this page to the current run run_bufs.push(item.clone()); // Check if this is the end of a run @@ -1052,24 +1045,32 @@ pub fn write_pages_vectored( }; if is_end_of_run { + current_run += 1; let start_id = run_start_id.expect("should have a start id"); let runs_left_cl = runs_left.clone(); let done_cl = done.clone(); + // This is the last chunk if it's the last run AND final_write is true + let is_last_chunk = current_run == run_count && final_write; + let total_sz = (page_sz * run_bufs.len()) as i32; - let c = Completion::new_write(move |res| { + let cmp = move |res| { let Ok(res) = res else { return; }; - // writev calls can sometimes return partial writes, but our `pwritev` - // implementation aggregates any partial writes and calls completion with total turso_assert!(total_sz == res, "failed to write expected size"); if runs_left_cl.fetch_sub(1, Ordering::AcqRel) == 1 { done_cl.store(true, Ordering::Release); } - }); + }; - // Submit write operation for this run, decrementing the counter if we error + let c = if is_last_chunk { + Completion::new_write_linked(cmp) + } else { + Completion::new_write(cmp) + }; + + // Submit write operation for this run let io_ctx = &pager.io_ctx.borrow(); match pager.db_file.write_pages( start_id, @@ -1538,8 +1539,22 @@ pub fn read_varint(buf: &[u8]) -> Result<(u64, usize)> { } } } - v = (v << 8) + buf[8] as u64; - Ok((v, 9)) + match buf.get(8) { + Some(&c) => { + // Values requiring 9 bytes must have non-zero in the top 8 bits (value >= 1<<56). + // Since the final value is `(v<<8) + c`, the top 8 bits (v >> 48) must not be 0. + // If those are zero, this should be treated as corrupt. + // Perf? the comparison + branching happens only in parsing 9-byte varint which is rare. + if (v >> 48) == 0 { + bail_corrupt_error!("Invalid varint"); + } + v = (v << 8) + c as u64; + Ok((v, 9)) + } + None => { + bail_corrupt_error!("Invalid varint"); + } + } } pub fn varint_len(value: u64) -> usize { @@ -1608,7 +1623,7 @@ pub fn write_varint_to_vec(value: u64, payload: &mut Vec) { } /// We need to read the WAL file on open to reconstruct the WAL frame cache. -pub fn read_entire_wal_dumb(file: &Arc) -> Result>> { +pub fn read_entire_wal_dumb(file: &Arc) -> Result>> { let size = file.size()?; #[allow(clippy::arc_with_non_send_sync)] let buf_for_pread = Arc::new(Buffer::new_temporary(size as usize)); @@ -1620,14 +1635,15 @@ pub fn read_entire_wal_dumb(file: &Arc) -> Result) -> Result bool { + self.current_page as usize >= self.pages_to_checkpoint.len() + && self.inflight_reads.is_empty() + && !self.pending_writes.is_empty() + } + #[inline] /// Whether or not new reads should be issued during checkpoint processing. fn should_issue_reads(&self) -> bool { @@ -552,7 +560,7 @@ pub struct WalFile { syncing: Rc>, - shared: Arc>, + shared: Arc>, ongoing_checkpoint: OngoingCheckpoint, checkpoint_threshold: usize, // min and max frames for this connection @@ -654,6 +662,7 @@ impl fmt::Debug for WalFile { /// that needs to be communicated between threads so this struct does the job. #[allow(dead_code)] pub struct WalFileShared { + pub enabled: AtomicBool, pub wal_header: Arc>, pub min_frame: AtomicU64, pub max_frame: AtomicU64, @@ -666,7 +675,7 @@ pub struct WalFileShared { // TODO: this will need refactoring because this is incredible memory inefficient. pub frame_cache: Arc>>>, pub last_checksum: (u32, u32), // Check of last frame in WAL, this is a cumulative checksum over all frames in the WAL - pub file: Arc, + pub file: Option>, /// Read locks advertise the maximum WAL frame a reader may access. /// Slot 0 is special, when it is held (shared) the reader bypasses the WAL and uses the main DB file. @@ -688,6 +697,7 @@ pub struct WalFileShared { impl fmt::Debug for WalFileShared { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("WalFileShared") + .field("enabled", &self.enabled.load(Ordering::Relaxed)) .field("wal_header", &self.wal_header) .field("min_frame", &self.min_frame) .field("max_frame", &self.max_frame) @@ -704,8 +714,8 @@ impl fmt::Debug for WalFileShared { /// the case of errors. It is held by the WalFile while checkpoint is ongoing /// then transferred to the CheckpointResult if necessary. enum CheckpointLocks { - Writer { ptr: Arc> }, - Read0 { ptr: Arc> }, + Writer { ptr: Arc> }, + Read0 { ptr: Arc> }, } /// Database checkpointers takes the following locks, in order: @@ -716,62 +726,55 @@ enum CheckpointLocks { /// Exclusive lock on read-mark slots 1-N again. These are immediately released after being taken (RESTART and TRUNCATE only). /// All of the above use blocking locks. impl CheckpointLocks { - fn new(ptr: Arc>, mode: CheckpointMode) -> Result { - let shared = &mut unsafe { &mut *ptr.get() }; - if !shared.checkpoint_lock.write() { - tracing::trace!("CheckpointGuard::new: checkpoint lock failed, returning Busy"); - // we hold the exclusive checkpoint lock no matter which mode for the duration - return Err(LimboError::Busy); + fn new(ptr: Arc>, mode: CheckpointMode) -> Result { + let ptr_clone = ptr.clone(); + { + let shared = ptr.write(); + if !shared.checkpoint_lock.write() { + tracing::trace!("CheckpointGuard::new: checkpoint lock failed, returning Busy"); + return Err(LimboError::Busy); + } + match mode { + CheckpointMode::Passive { .. } => { + if !shared.read_locks[0].write() { + shared.checkpoint_lock.unlock(); + tracing::trace!("CheckpointGuard: read0 lock failed, returning Busy"); + return Err(LimboError::Busy); + } + } + CheckpointMode::Full => { + if !shared.read_locks[0].write() { + shared.checkpoint_lock.unlock(); + tracing::trace!("CheckpointGuard: read0 lock failed (Full), Busy"); + return Err(LimboError::Busy); + } + if !shared.write_lock.write() { + shared.read_locks[0].unlock(); + shared.checkpoint_lock.unlock(); + tracing::trace!("CheckpointGuard: write lock failed (Full), Busy"); + return Err(LimboError::Busy); + } + } + CheckpointMode::Restart | CheckpointMode::Truncate { .. } => { + if !shared.read_locks[0].write() { + shared.checkpoint_lock.unlock(); + tracing::trace!("CheckpointGuard: read0 lock failed, returning Busy"); + return Err(LimboError::Busy); + } + if !shared.write_lock.write() { + shared.checkpoint_lock.unlock(); + shared.read_locks[0].unlock(); + tracing::trace!("CheckpointGuard: write lock failed, returning Busy"); + return Err(LimboError::Busy); + } + } + } } + match mode { - // Passive mode is the only mode not requiring a write lock, as it doesn't block - // readers or writers. It acquires the checkpoint lock to ensure that no other - // concurrent checkpoint happens, and acquires the exclusive read lock 0 - // to ensure that no readers read from a partially checkpointed db file. - CheckpointMode::Passive { .. } => { - let read0 = &mut shared.read_locks[0]; - if !read0.write() { - shared.checkpoint_lock.unlock(); - tracing::trace!("CheckpointGuard: read0 lock failed, returning Busy"); - // for passive and full we need to hold the read0 lock - return Err(LimboError::Busy); - } - Ok(Self::Read0 { ptr }) - } - CheckpointMode::Full => { - // Full blocks writers and holds read0 exclusively (readers may still use >0 slots) - let read0 = &mut shared.read_locks[0]; - if !read0.write() { - shared.checkpoint_lock.unlock(); - tracing::trace!("CheckpointGuard: read0 lock failed (Full), Busy"); - return Err(LimboError::Busy); - } - if !shared.write_lock.write() { - read0.unlock(); - shared.checkpoint_lock.unlock(); - tracing::trace!("CheckpointGuard: write lock failed (Full), Busy"); - return Err(LimboError::Busy); - } - Ok(Self::Writer { ptr }) - } - CheckpointMode::Restart | CheckpointMode::Truncate { .. } => { - // like all modes, we must acquire an exclusive checkpoint lock and lock on read 0 - // to prevent a reader from reading a partially checkpointed db file. - let read0 = &mut shared.read_locks[0]; - if !read0.write() { - shared.checkpoint_lock.unlock(); - tracing::trace!("CheckpointGuard: read0 lock failed, returning Busy"); - return Err(LimboError::Busy); - } - // if we are resetting the log we must hold the write lock for the duration. - // ensures no writer can append frames while we reset the log. - if !shared.write_lock.write() { - shared.checkpoint_lock.unlock(); - read0.unlock(); - tracing::trace!("CheckpointGuard: write lock failed, returning Busy"); - return Err(LimboError::Busy); - } - Ok(Self::Writer { ptr }) + CheckpointMode::Passive { .. } => Ok(Self::Read0 { ptr: ptr_clone }), + CheckpointMode::Full | CheckpointMode::Restart | CheckpointMode::Truncate { .. } => { + Ok(Self::Writer { ptr: ptr_clone }) } } } @@ -780,15 +783,17 @@ impl CheckpointLocks { impl Drop for CheckpointLocks { fn drop(&mut self) { match self { - CheckpointLocks::Writer { ptr: shared } => unsafe { - (*shared.get()).write_lock.unlock(); - (*shared.get()).read_locks[0].unlock(); - (*shared.get()).checkpoint_lock.unlock(); - }, - CheckpointLocks::Read0 { ptr: shared } => unsafe { - (*shared.get()).read_locks[0].unlock(); - (*shared.get()).checkpoint_lock.unlock(); - }, + CheckpointLocks::Writer { ptr: shared } => { + let guard = shared.write(); + guard.write_lock.unlock(); + guard.read_locks[0].unlock(); + guard.checkpoint_lock.unlock(); + } + CheckpointLocks::Read0 { ptr: shared } => { + let guard = shared.write(); + guard.read_locks[0].unlock(); + guard.checkpoint_lock.unlock(); + } } } } @@ -853,7 +858,13 @@ impl Wal for WalFile { // If none found or lagging, try to claim/update a slot if best_idx == -1 || (best_mark as u64) < shared_max { - for (idx, lock) in self.get_shared().read_locks.iter_mut().enumerate().skip(1) { + for (idx, lock) in self + .get_shared_mut() + .read_locks + .iter_mut() + .enumerate() + .skip(1) + { if !lock.write() { continue; // busy slot } @@ -878,11 +889,12 @@ impl Wal for WalFile { // TODO: we should retry here instead of always returning Busy return Ok((LimboResult::Busy, db_changed)); } + let checkpoint_seq = shared.wal_header.lock().checkpoint_seq; ( shared.max_frame.load(Ordering::Acquire), shared.nbackfills.load(Ordering::Acquire), shared.last_checksum, - shared.wal_header.lock().checkpoint_seq, + checkpoint_seq, ) }; @@ -932,8 +944,7 @@ impl Wal for WalFile { fn end_read_tx(&self) { let slot = self.max_frame_read_lock_index.get(); if slot != NO_LOCK_HELD { - let rl = &mut self.get_shared().read_locks[slot]; - rl.unlock(); + self.get_shared_mut().read_locks[slot].unlock(); self.max_frame_read_lock_index.set(NO_LOCK_HELD); tracing::debug!("end_read_tx(slot={slot})"); } else { @@ -944,7 +955,7 @@ impl Wal for WalFile { /// Begin a write transaction #[instrument(skip_all, level = Level::DEBUG)] fn begin_write_tx(&mut self) -> Result { - let shared = self.get_shared(); + let shared = self.get_shared_mut(); // sqlite/src/wal.c 3702 // Cannot start a write transaction without first holding a read // transaction. @@ -957,16 +968,14 @@ impl Wal for WalFile { if !shared.write_lock.write() { return Ok(LimboResult::Busy); } - let (shared_max, nbackfills, last_checksum) = { - let shared = self.get_shared(); - ( - shared.max_frame.load(Ordering::Acquire), - shared.nbackfills.load(Ordering::Acquire), - shared.last_checksum, - ) - }; + let (shared_max, nbackfills, last_checksum) = ( + shared.max_frame.load(Ordering::Acquire), + shared.nbackfills.load(Ordering::Acquire), + shared.last_checksum, + ); if self.max_frame == shared_max { // Snapshot still valid; adopt counters + drop(shared); self.last_checksum = last_checksum; self.min_frame = nbackfills + 1; return Ok(LimboResult::Ok); @@ -1080,8 +1089,14 @@ impl Wal for WalFile { finish_read_page(page.get().id, buf, cloned); frame.set_wal_tag(frame_id, seq); }); + let shared = self.get_shared(); + assert!( + shared.enabled.load(Ordering::Relaxed), + "WAL must be enabled" + ); + let file = shared.file.as_ref().unwrap(); begin_read_wal_frame( - &self.get_shared().file, + file, offset + WAL_FRAME_HEADER_SIZE as u64, buffer_pool, complete, @@ -1138,8 +1153,13 @@ impl Wal for WalFile { } } }); - let c = - begin_read_wal_frame_raw(&self.buffer_pool, &self.get_shared().file, offset, complete)?; + let shared = self.get_shared(); + assert!( + shared.enabled.load(Ordering::Relaxed), + "WAL must be enabled" + ); + let file = shared.file.as_ref().unwrap(); + let c = begin_read_wal_frame_raw(&self.buffer_pool, file, offset, complete)?; Ok(c) } @@ -1194,8 +1214,14 @@ impl Wal for WalFile { } } }); + let shared = self.get_shared(); + assert!( + shared.enabled.load(Ordering::Relaxed), + "WAL must be enabled" + ); + let file = shared.file.as_ref().unwrap(); let c = begin_read_wal_frame( - &self.get_shared().file, + file, offset + WAL_FRAME_HEADER_SIZE as u64, buffer_pool, complete, @@ -1214,8 +1240,16 @@ impl Wal for WalFile { // perform actual write let offset = self.frame_offset(frame_id); - let shared = self.get_shared(); - let header = shared.wal_header.clone(); + let (header, file) = { + let shared = self.get_shared(); + let header = shared.wal_header.clone(); + assert!( + shared.enabled.load(Ordering::Relaxed), + "WAL must be enabled" + ); + let file = shared.file.as_ref().unwrap().clone(); + (header, file) + }; let header = header.lock(); let checksums = self.last_checksum; let (checksums, frame_bytes) = prepare_wal_frame( @@ -1228,7 +1262,7 @@ impl Wal for WalFile { page, ); let c = Completion::new_write(|_| {}); - let c = shared.file.pwrite(offset, frame_bytes, c)?; + let c = file.pwrite(offset, frame_bytes, c)?; self.io.wait_for_completion(c)?; self.complete_append_frame(page_id, frame_id, checksums); if db_size > 0 { @@ -1246,8 +1280,11 @@ impl Wal for WalFile { db_size: u32, ) -> Result { self.ensure_header_if_needed(page_size)?; - let shared = self.get_shared(); - let shared_page_size = shared.wal_header.lock().page_size; + let shared_page_size = { + let shared = self.get_shared(); + let page_size = shared.wal_header.lock().page_size; + page_size + }; turso_assert!( shared_page_size == page_size.get(), "page size mismatch - tried to change page size after WAL header was already initialized: shared.page_size={shared_page_size}, page_size={}", @@ -1307,7 +1344,12 @@ impl Wal for WalFile { page.set_wal_tag(frame_id, seq); } }); - let result = shared.file.pwrite(offset, frame_bytes.clone(), c)?; + assert!( + shared.enabled.load(Ordering::Relaxed), + "WAL must be enabled" + ); + let file = shared.file.as_ref().unwrap(); + let result = file.pwrite(offset, frame_bytes.clone(), c)?; (result, frame_checksums) }; self.complete_append_frame(page_id as u64, frame_id, checksums); @@ -1344,7 +1386,12 @@ impl Wal for WalFile { }); let shared = self.get_shared(); self.syncing.set(true); - let c = shared.file.sync(completion)?; + assert!( + shared.enabled.load(Ordering::Relaxed), + "WAL must be enabled" + ); + let file = shared.file.as_ref().unwrap(); + let c = file.sync(completion)?; Ok(c) } @@ -1392,7 +1439,7 @@ impl Wal for WalFile { #[instrument(skip_all, level = Level::DEBUG)] fn finish_append_frames_commit(&mut self) -> Result<()> { - let shared = self.get_shared(); + let mut shared = self.get_shared_mut(); shared.max_frame.store(self.max_frame, Ordering::Release); tracing::trace!(self.max_frame, ?self.last_checksum); shared.last_checksum = self.last_checksum; @@ -1510,7 +1557,7 @@ impl Wal for WalFile { // single completion for the whole batch let total_len: i32 = iovecs.iter().map(|b| b.len() as i32).sum(); let page_frame_for_cb = page_frame_and_checksum.clone(); - let c = Completion::new_write(move |res: Result| { + let cmp = move |res: Result| { let Ok(bytes_written) = res else { return; }; @@ -1523,9 +1570,21 @@ impl Wal for WalFile { page.clear_dirty(); page.set_wal_tag(*fid, seq); } - }); + }; - let c = self.get_shared().file.pwritev(start_off, iovecs, c)?; + let c = if db_size_on_commit.is_some() { + Completion::new_write_linked(cmp) + } else { + Completion::new_write(cmp) + }; + + let shared = self.get_shared(); + assert!( + shared.enabled.load(Ordering::Relaxed), + "WAL must be enabled" + ); + let file = shared.file.as_ref().unwrap(); + let c = file.pwritev(start_off, iovecs, c)?; Ok(c) } @@ -1542,16 +1601,23 @@ impl Wal for WalFile { impl WalFile { pub fn new( io: Arc, - shared: Arc>, + shared: Arc>, buffer_pool: Arc, ) -> Self { - let header = unsafe { shared.get().as_mut().unwrap().wal_header.lock() }; - let last_checksum = unsafe { (*shared.get()).last_checksum }; + let (header, last_checksum, max_frame) = { + let shared_guard = shared.read(); + let header = *shared_guard.wal_header.lock(); + ( + header, + shared_guard.last_checksum, + shared_guard.max_frame.load(Ordering::Acquire), + ) + }; let now = io.now(); Self { io, // default to max frame in WAL, so that when we read schema we can read from WAL too if it's there. - max_frame: unsafe { (*shared.get()).max_frame.load(Ordering::Acquire) }, + max_frame, shared, ongoing_checkpoint: OngoingCheckpoint { time: now, @@ -1572,7 +1638,7 @@ impl WalFile { last_checksum, prev_checkpoint: CheckpointResult::default(), checkpoint_guard: None, - header: *header, + header, io_ctx: RefCell::new(IOContext::default()), } } @@ -1587,9 +1653,12 @@ impl WalFile { WAL_HEADER_SIZE as u64 + page_offset } - #[allow(clippy::mut_from_ref)] - fn get_shared(&self) -> &mut WalFileShared { - unsafe { self.shared.get().as_mut().unwrap() } + fn get_shared_mut(&self) -> parking_lot::RwLockWriteGuard { + self.shared.write() + } + + fn get_shared(&self) -> parking_lot::RwLockReadGuard { + self.shared.read() } fn complete_append_frame(&mut self, page_id: u64, frame_id: u64, checksums: (u32, u32)) { @@ -1623,41 +1692,48 @@ impl WalFile { } tracing::debug!("ensure_header_if_needed"); self.last_checksum = { - let shared = self.get_shared(); - let mut hdr = shared.wal_header.lock(); - hdr.magic = if cfg!(target_endian = "big") { - WAL_MAGIC_BE - } else { - WAL_MAGIC_LE + let mut shared = self.get_shared_mut(); + let checksum = { + let mut hdr = shared.wal_header.lock(); + hdr.magic = if cfg!(target_endian = "big") { + WAL_MAGIC_BE + } else { + WAL_MAGIC_LE + }; + if hdr.page_size == 0 { + hdr.page_size = page_size.get(); + } + if hdr.salt_1 == 0 && hdr.salt_2 == 0 { + hdr.salt_1 = self.io.generate_random_number() as u32; + hdr.salt_2 = self.io.generate_random_number() as u32; + } + + // recompute header checksum + let prefix = &hdr.as_bytes()[..WAL_HEADER_SIZE - 8]; + let use_native = (hdr.magic & 1) != 0; + let (c1, c2) = checksum_wal(prefix, &hdr, (0, 0), use_native); + hdr.checksum_1 = c1; + hdr.checksum_2 = c2; + (c1, c2) }; - if hdr.page_size == 0 { - hdr.page_size = page_size.get(); - } - if hdr.salt_1 == 0 && hdr.salt_2 == 0 { - hdr.salt_1 = self.io.generate_random_number() as u32; - hdr.salt_2 = self.io.generate_random_number() as u32; - } - - // recompute header checksum - let prefix = &hdr.as_bytes()[..WAL_HEADER_SIZE - 8]; - let use_native = (hdr.magic & 1) != 0; - let (c1, c2) = checksum_wal(prefix, &hdr, (0, 0), use_native); - hdr.checksum_1 = c1; - hdr.checksum_2 = c2; - - shared.last_checksum = (c1, c2); - (c1, c2) + shared.last_checksum = checksum; + checksum }; self.max_frame = 0; let shared = self.get_shared(); + assert!( + shared.enabled.load(Ordering::Relaxed), + "WAL must be enabled" + ); + let file = shared.file.as_ref().unwrap(); self.io .wait_for_completion(sqlite3_ondisk::begin_write_wal_header( - &shared.file, + file, &shared.wal_header.lock(), )?)?; self.io - .wait_for_completion(shared.file.sync(Completion::new_sync(|_| {}))?)?; + .wait_for_completion(file.sync(Completion::new_sync(|_| {}))?)?; shared.initialized.store(true, Ordering::Release); Ok(()) } @@ -1821,7 +1897,10 @@ impl WalFile { let batch_map = self.ongoing_checkpoint.pending_writes.take(); if !batch_map.is_empty() { let done_flag = self.ongoing_checkpoint.add_write(); - completions.extend(write_pages_vectored(pager, batch_map, done_flag)?); + let is_final = self.ongoing_checkpoint.is_final_write(); + completions.extend(write_pages_vectored( + pager, batch_map, done_flag, is_final, + )?); } } @@ -1970,7 +2049,7 @@ impl WalFile { /// We never modify slot values while a reader holds that slot's lock. /// TOOD: implement proper BUSY handling behavior fn determine_max_safe_checkpoint_frame(&self) -> u64 { - let shared = self.get_shared(); + let mut shared = self.get_shared_mut(); let shared_max = shared.max_frame.load(Ordering::Acquire); let mut max_safe_frame = shared_max; @@ -2010,7 +2089,7 @@ impl WalFile { tracing::info!("restart_log(mode={mode:?})"); { // Block all readers - let shared = self.get_shared(); + let mut shared = self.get_shared_mut(); for idx in 1..shared.read_locks.len() { let lock = &mut shared.read_locks[idx]; if !lock.write() { @@ -2028,7 +2107,7 @@ impl WalFile { let unlock = |e: Option<&LimboError>| { // release all read locks we just acquired, the caller will take care of the others - let shared = unsafe { self.shared.get().as_mut().unwrap() }; + let shared = self.shared.write(); for idx in 1..shared.read_locks.len() { shared.read_locks[idx].unlock(); } @@ -2040,14 +2119,16 @@ impl WalFile { } }; // reinitialize in‑memory state - self.get_shared() + self.get_shared_mut() .restart_wal_header(&self.io, mode) .inspect_err(|e| { unlock(Some(e)); })?; let (header, cksm) = { let shared = self.get_shared(); - (*shared.wal_header.lock(), shared.last_checksum) + let header = *shared.wal_header.lock(); + let cksm = shared.last_checksum; + (header, cksm) }; self.last_checksum = cksm; self.header = header; @@ -2061,10 +2142,12 @@ impl WalFile { }); let shared = self.get_shared(); // for now at least, lets do all this IO syncronously - let c = shared - .file - .truncate(0, c) - .inspect_err(|e| unlock(Some(e)))?; + assert!( + shared.enabled.load(Ordering::Relaxed), + "WAL must be enabled" + ); + let file = shared.file.as_ref().unwrap(); + let c = file.truncate(0, c).inspect_err(|e| unlock(Some(e)))?; shared.initialized.store(false, Ordering::Release); self.io .wait_for_completion(c) @@ -2072,12 +2155,10 @@ impl WalFile { // fsync after truncation self.io .wait_for_completion( - shared - .file - .sync(Completion::new_sync(|_| { - tracing::trace!("WAL file synced after reset/truncation"); - })) - .inspect_err(|e| unlock(Some(e)))?, + file.sync(Completion::new_sync(|_| { + tracing::trace!("WAL file synced after reset/truncation"); + })) + .inspect_err(|e| unlock(Some(e)))?, ) .inspect_err(|e| unlock(Some(e)))?; } @@ -2128,8 +2209,14 @@ impl WalFile { }) }; // schedule read of the page payload + let shared = self.get_shared(); + assert!( + shared.enabled.load(Ordering::Relaxed), + "WAL must be enabled" + ); + let file = shared.file.as_ref().unwrap(); let c = begin_read_wal_frame( - &self.get_shared().file, + file, offset + WAL_FRAME_HEADER_SIZE as u64, self.buffer_pool.clone(), complete, @@ -2149,25 +2236,22 @@ impl WalFileShared { pub fn open_shared_if_exists( io: &Arc, path: &str, - ) -> Result>>> { + ) -> Result>> { let file = io.open_file(path, crate::io::OpenFlags::Create, false)?; if file.size()? > 0 { let wal_file_shared = sqlite3_ondisk::read_entire_wal_dumb(&file)?; // TODO: Return a completion instead. let mut max_loops = 100_000; - while !unsafe { &*wal_file_shared.get() } - .loaded - .load(Ordering::Acquire) - { + while !wal_file_shared.read().loaded.load(Ordering::Acquire) { io.run_once()?; max_loops -= 1; if max_loops == 0 { panic!("WAL file not loaded"); } } - Ok(Some(wal_file_shared)) + Ok(wal_file_shared) } else { - Ok(None) + WalFileShared::new_noop() } } @@ -2175,7 +2259,42 @@ impl WalFileShared { Ok(self.initialized.load(Ordering::Acquire)) } - pub fn new_shared(file: Arc) -> Result>> { + pub fn new_noop() -> Result>> { + let wal_header = WalHeader { + magic: 0, + file_format: 0, + page_size: 0, + checkpoint_seq: 0, + salt_1: 0, + salt_2: 0, + checksum_1: 0, + checksum_2: 0, + }; + let read_locks = array::from_fn(|_| TursoRwLock::new()); + for (i, lock) in read_locks.iter().enumerate() { + lock.write(); + lock.set_value_exclusive(if i < 2 { 0 } else { READMARK_NOT_USED }); + lock.unlock(); + } + let shared = WalFileShared { + enabled: AtomicBool::new(false), + wal_header: Arc::new(SpinLock::new(wal_header)), + min_frame: AtomicU64::new(0), + max_frame: AtomicU64::new(0), + nbackfills: AtomicU64::new(0), + frame_cache: Arc::new(SpinLock::new(HashMap::new())), + last_checksum: (0, 0), + file: None, + read_locks, + write_lock: TursoRwLock::new(), + checkpoint_lock: TursoRwLock::new(), + loaded: AtomicBool::new(true), + initialized: AtomicBool::new(false), + }; + Ok(Arc::new(RwLock::new(shared))) + } + + pub fn new_shared(file: Arc) -> Result>> { let magic = if cfg!(target_endian = "big") { WAL_MAGIC_BE } else { @@ -2201,20 +2320,50 @@ impl WalFileShared { lock.unlock(); } let shared = WalFileShared { + enabled: AtomicBool::new(true), wal_header: Arc::new(SpinLock::new(wal_header)), min_frame: AtomicU64::new(0), max_frame: AtomicU64::new(0), nbackfills: AtomicU64::new(0), frame_cache: Arc::new(SpinLock::new(HashMap::new())), last_checksum: (0, 0), - file, + file: Some(file), read_locks, write_lock: TursoRwLock::new(), checkpoint_lock: TursoRwLock::new(), loaded: AtomicBool::new(true), initialized: AtomicBool::new(false), }; - Ok(Arc::new(UnsafeCell::new(shared))) + Ok(Arc::new(RwLock::new(shared))) + } + + pub fn create(&mut self, file: Arc) -> Result<()> { + if self.enabled.load(Ordering::Relaxed) { + return Err(LimboError::InternalError("WAL already enabled".to_string())); + } + + let magic = if cfg!(target_endian = "big") { + WAL_MAGIC_BE + } else { + WAL_MAGIC_LE + }; + + *self.wal_header.lock() = WalHeader { + magic, + file_format: 3007000, + page_size: 0, // Signifies WAL header that is not persistent on disk yet. + checkpoint_seq: 0, + salt_1: 0, + salt_2: 0, + checksum_1: 0, + checksum_2: 0, + }; + + self.file = Some(file); + self.enabled.store(true, Ordering::Relaxed); + self.initialized.store(false, Ordering::Relaxed); + + Ok(()) } pub fn page_size(&self) -> u32 { @@ -2280,10 +2429,11 @@ pub mod test { CheckpointMode, CheckpointResult, Completion, Connection, Database, LimboError, PlatformIO, StepResult, Wal, WalFile, WalFileShared, IO, }; + use parking_lot::RwLock; #[cfg(unix)] use std::os::unix::fs::MetadataExt; use std::{ - cell::{Cell, UnsafeCell}, + cell::Cell, rc::Rc, sync::{atomic::Ordering, Arc}, }; @@ -2310,19 +2460,18 @@ pub mod test { conn.execute("create table test (id integer primary key, value text)") .unwrap(); let _ = conn.execute("insert into test (value) values ('test1'), ('test2'), ('test3')"); - let wal = db.maybe_shared_wal.write(); - let wal_file = wal.as_ref().unwrap(); - let file = unsafe { &mut *wal_file.get() }; + let wal = db.shared_wal.write(); + let wal_file = wal.file.as_ref().unwrap().clone(); let done = Rc::new(Cell::new(false)); let _done = done.clone(); - let _ = file.file.truncate( + let _ = wal_file.truncate( WAL_HEADER_SIZE as u64, Completion::new_trunc(move |_| { let done = _done.clone(); done.set(true); }), ); - assert!(file.file.size().unwrap() == WAL_HEADER_SIZE as u64); + assert!(wal_file.size().unwrap() == WAL_HEADER_SIZE as u64); assert!(done.get()); } @@ -2409,12 +2558,11 @@ pub mod test { pager.io.block(|| wal.checkpoint(pager, mode)).unwrap() } - fn wal_header_snapshot(shared: &Arc>) -> (u32, u32, u32, u32) { + fn wal_header_snapshot(shared: &Arc>) -> (u32, u32, u32, u32) { // (checkpoint_seq, salt1, salt2, page_size) - unsafe { - let hdr = (*shared.get()).wal_header.lock(); - (hdr.checkpoint_seq, hdr.salt_1, hdr.salt_2, hdr.page_size) - } + let shared_guard = shared.read(); + let hdr = shared_guard.wal_header.lock(); + (hdr.checkpoint_seq, hdr.salt_1, hdr.salt_2, hdr.page_size) } #[test] @@ -2437,10 +2585,10 @@ pub mod test { } // Snapshot header & counters before the RESTART checkpoint. - let wal_shared = db.maybe_shared_wal.read().as_ref().unwrap().clone(); + let wal_shared = db.shared_wal.clone(); let (seq_before, salt1_before, salt2_before, _ps_before) = wal_header_snapshot(&wal_shared); - let (mx_before, backfill_before) = unsafe { - let s = &*wal_shared.get(); + let (mx_before, backfill_before) = { + let s = wal_shared.read(); ( s.max_frame.load(Ordering::SeqCst), s.nbackfills.load(Ordering::SeqCst), @@ -2478,8 +2626,8 @@ pub mod test { ); assert_ne!(salt2_after, salt2_before, "salt_2 is randomized"); - let (mx_after, backfill_after) = unsafe { - let s = &*wal_shared.get(); + let (mx_after, backfill_after) = { + let s = wal_shared.read(); ( s.max_frame.load(Ordering::SeqCst), s.nbackfills.load(Ordering::SeqCst), @@ -2510,7 +2658,7 @@ pub mod test { .borrow_mut() .finish_append_frames_commit() .unwrap(); - let new_max = unsafe { (&*wal_shared.get()).max_frame.load(Ordering::SeqCst) }; + let new_max = wal_shared.read().max_frame.load(Ordering::SeqCst); assert_eq!(new_max, 1, "first append after RESTART starts at frame 1"); std::fs::remove_dir_all(path).unwrap(); @@ -2557,11 +2705,7 @@ pub mod test { upper_bound_inclusive: None, }, ); - let maxf = unsafe { - (&*db.maybe_shared_wal.read().as_ref().unwrap().get()) - .max_frame - .load(Ordering::SeqCst) - }; + let maxf = db.shared_wal.read().max_frame.load(Ordering::SeqCst); (res, maxf) }; assert_eq!(res1.num_attempted, max_before); @@ -2670,7 +2814,7 @@ pub mod test { #[test] fn test_wal_read_marks_after_restart() { let (db, _path) = get_database(); - let wal_shared = db.maybe_shared_wal.read().as_ref().unwrap().clone(); + let wal_shared = db.shared_wal.clone(); let conn = db.connect().unwrap(); conn.execute("create table test(id integer primary key, value text)") @@ -2685,8 +2829,8 @@ pub mod test { } // Verify read marks after restart - let read_marks_after: Vec<_> = unsafe { - let s = &*wal_shared.get(); + let read_marks_after: Vec<_> = { + let s = wal_shared.read(); (0..5).map(|i| s.read_locks[i].get_value()).collect() }; @@ -2774,7 +2918,7 @@ pub mod test { #[test] fn test_wal_checkpoint_updates_read_marks() { let (db, _path) = get_database(); - let wal_shared = db.maybe_shared_wal.read().as_ref().unwrap().clone(); + let wal_shared = db.shared_wal.clone(); let conn = db.connect().unwrap(); conn.execute("create table test(id integer primary key, value text)") @@ -2782,7 +2926,7 @@ pub mod test { bulk_inserts(&conn, 10, 5); // get max frame before checkpoint - let max_frame_before = unsafe { (*wal_shared.get()).max_frame.load(Ordering::SeqCst) }; + let max_frame_before = wal_shared.read().max_frame.load(Ordering::SeqCst); { let pager = conn.pager.borrow(); @@ -2797,7 +2941,7 @@ pub mod test { } // check that read mark 1 (default reader) was updated to max_frame - let read_mark_1 = unsafe { (*wal_shared.get()).read_locks[1].get_value() }; + let read_mark_1 = wal_shared.read().read_locks[1].get_value(); assert_eq!( read_mark_1 as u64, max_frame_before, @@ -3269,8 +3413,8 @@ pub mod test { } // Snapshot the current mxFrame before running FULL - let wal_shared = db.maybe_shared_wal.read().as_ref().unwrap().clone(); - let mx_before = unsafe { (&*wal_shared.get()).max_frame.load(Ordering::SeqCst) }; + let wal_shared = db.shared_wal.clone(); + let mx_before = wal_shared.read().max_frame.load(Ordering::SeqCst); assert!(mx_before > 0, "expected frames in WAL before FULL"); // Run FULL checkpoint - must backfill *all* frames up to mx_before @@ -3320,11 +3464,7 @@ pub mod test { for c in completions { db.io.wait_for_completion(c).unwrap(); } - let mx_now = unsafe { - (&*db.maybe_shared_wal.read().as_ref().unwrap().get()) - .max_frame - .load(Ordering::SeqCst) - }; + let mx_now = db.shared_wal.read().max_frame.load(Ordering::SeqCst); assert!(mx_now > r_snapshot); // FULL must return Busy while a reader is stuck behind diff --git a/core/translate/aggregation.rs b/core/translate/aggregation.rs index 7a1de9776..75a89c63d 100644 --- a/core/translate/aggregation.rs +++ b/core/translate/aggregation.rs @@ -115,6 +115,7 @@ pub fn handle_distinct(program: &mut ProgramBuilder, agg: &Aggregate, agg_arg_re count: num_regs, dest_reg: record_reg, index_name: Some(distinct_ctx.ephemeral_index_name.to_string()), + affinity_str: None, }); program.emit_insn(Insn::IdxInsert { cursor_id: distinct_ctx.cursor_id, diff --git a/core/translate/alter.rs b/core/translate/alter.rs index 6cf70fd14..9808d375f 100644 --- a/core/translate/alter.rs +++ b/core/translate/alter.rs @@ -28,6 +28,12 @@ pub fn translate_alter_table( body: alter_table, } = alter; let table_name = table_name.name.as_str(); + + // Check if someone is trying to ALTER a system table + if crate::schema::is_system_table(table_name) { + crate::bail_parse_error!("table {} may not be modified", table_name); + } + if schema.table_has_indexes(table_name) && !schema.indexes_enabled() { // Let's disable altering a table with indices altogether instead of checking column by // column to be extra safe. @@ -135,11 +141,18 @@ pub fn translate_alter_table( let record = program.alloc_register(); + let affinity_str = btree + .columns + .iter() + .map(|col| col.affinity().aff_mask()) + .collect::(); + program.emit_insn(Insn::MakeRecord { start_reg: first_column, count: column_count, dest_reg: record, index_name: None, + affinity_str: Some(affinity_str), }); program.emit_insn(Insn::Insert { @@ -295,6 +308,7 @@ pub fn translate_alter_table( count: sqlite_schema_column_len, dest_reg: record, index_name: None, + affinity_str: None, }); program.emit_insn(Insn::Insert { @@ -436,6 +450,7 @@ pub fn translate_alter_table( count: sqlite_schema_column_len, dest_reg: record, index_name: None, + affinity_str: None, }); program.emit_insn(Insn::Insert { diff --git a/core/translate/analyze.rs b/core/translate/analyze.rs index 0d8f8de4e..d7e68fc04 100644 --- a/core/translate/analyze.rs +++ b/core/translate/analyze.rs @@ -212,6 +212,7 @@ pub fn translate_analyze( count: 3, dest_reg: record_reg, index_name: None, + affinity_str: None, }); program.emit_insn(Insn::NewRowid { cursor: stat_cursor, diff --git a/core/translate/compound_select.rs b/core/translate/compound_select.rs index 15fb4d841..f4d9dd0bf 100644 --- a/core/translate/compound_select.rs +++ b/core/translate/compound_select.rs @@ -1,6 +1,8 @@ use crate::schema::{Index, IndexColumn, Schema}; use crate::translate::emitter::{emit_query, LimitCtx, TranslateCtx}; +use crate::translate::expr::translate_expr; use crate::translate::plan::{Plan, QueryDestination, SelectPlan}; +use crate::translate::result_row::try_fold_expr_to_i64; use crate::vdbe::builder::{CursorType, ProgramBuilder}; use crate::vdbe::insn::Insn; use crate::vdbe::BranchOffset; @@ -31,36 +33,55 @@ pub fn emit_program_for_compound_select( let right_plan = right_most.clone(); // Trivial exit on LIMIT 0 - if let Some(limit) = limit { - if *limit == 0 { - program.result_columns = right_plan.result_columns; - program.table_references.extend(right_plan.table_references); - return Ok(()); - } + if matches!(limit.as_ref().and_then(try_fold_expr_to_i64), Some(v) if v == 0) { + program.result_columns = right_plan.result_columns; + program.table_references.extend(right_plan.table_references); + return Ok(()); } + let right_most_ctx = TranslateCtx::new( + program, + schema, + syms, + right_most.table_references.joined_tables().len(), + ); + // Each subselect shares the same limit_ctx and offset, because the LIMIT, OFFSET applies to // the entire compound select, not just a single subselect. - let limit_ctx = limit.map(|limit| { + let limit_ctx = limit.as_ref().map(|limit| { let reg = program.alloc_register(); - program.emit_insn(Insn::Integer { - value: limit as i64, - dest: reg, - }); + if let Some(val) = try_fold_expr_to_i64(limit) { + program.emit_insn(Insn::Integer { + value: val, + dest: reg, + }); + } else { + program.add_comment(program.offset(), "OFFSET expr"); + _ = translate_expr(program, None, limit, reg, &right_most_ctx.resolver); + program.emit_insn(Insn::MustBeInt { reg }); + } LimitCtx::new_shared(reg) }); - let offset_reg = offset.map(|offset| { + let offset_reg = offset.as_ref().map(|offset_expr| { let reg = program.alloc_register(); - program.emit_insn(Insn::Integer { - value: offset as i64, - dest: reg, - }); + + if let Some(val) = try_fold_expr_to_i64(offset_expr) { + // Compile-time constant offset + program.emit_insn(Insn::Integer { + value: val, + dest: reg, + }); + } else { + program.add_comment(program.offset(), "OFFSET expr"); + _ = translate_expr(program, None, offset_expr, reg, &right_most_ctx.resolver); + program.emit_insn(Insn::MustBeInt { reg }); + } let combined_reg = program.alloc_register(); program.emit_insn(Insn::OffsetLimit { offset_reg: reg, combined_reg, - limit_reg: limit_ctx.unwrap().reg_limit, + limit_reg: limit_ctx.as_ref().unwrap().reg_limit, }); reg @@ -137,8 +158,8 @@ fn emit_compound_select( let compound_select = Plan::CompoundSelect { left, right_most: plan, - limit, - offset, + limit: limit.clone(), + offset: offset.clone(), order_by, }; emit_compound_select( @@ -503,6 +524,7 @@ fn read_intersect_rows( count: column_count, dest_reg: row_content_reg, index_name: None, + affinity_str: None, }); program.emit_insn(Insn::IdxInsert { cursor_id: target_cursor_id, diff --git a/core/translate/delete.rs b/core/translate/delete.rs index 5dd26ee8c..dee30b2af 100644 --- a/core/translate/delete.rs +++ b/core/translate/delete.rs @@ -23,6 +23,12 @@ pub fn translate_delete( connection: &Arc, ) -> Result { let tbl_name = normalize_ident(tbl_name.name.as_str()); + + // Check if this is a system table that should be protected from direct writes + if crate::schema::is_system_table(&tbl_name) { + crate::bail_parse_error!("table {} may not be modified", tbl_name); + } + if schema.table_has_indexes(&tbl_name) && !schema.indexes_enabled() { // Let's disable altering a table with indices altogether instead of checking column by // column to be extra safe. @@ -76,6 +82,12 @@ pub fn prepare_delete_plan( Some(table) => table, None => crate::bail_parse_error!("no such table: {}", tbl_name), }; + + // Check if this is a materialized view + if schema.is_materialized_view(&tbl_name) { + crate::bail_parse_error!("cannot modify materialized view {}", tbl_name); + } + let table = if let Some(table) = table.virtual_table() { Table::Virtual(table.clone()) } else if let Some(table) = table.btree() { @@ -107,7 +119,8 @@ pub fn prepare_delete_plan( )?; // Parse the LIMIT/OFFSET clause - let (resolved_limit, resolved_offset) = limit.map_or(Ok((None, None)), |l| parse_limit(&l))?; + let (resolved_limit, resolved_offset) = + limit.map_or(Ok((None, None)), |mut l| parse_limit(&mut l, connection))?; let plan = DeletePlan { table_references, diff --git a/core/translate/display.rs b/core/translate/display.rs index f183bc3e8..384cf7f54 100644 --- a/core/translate/display.rs +++ b/core/translate/display.rs @@ -217,7 +217,7 @@ impl fmt::Display for UpdatePlan { )?; } } - if let Some(limit) = self.limit { + if let Some(limit) = self.limit.as_ref() { writeln!(f, "LIMIT: {limit}")?; } if let Some(ret) = &self.returning { diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 29cbd210a..9e233a2b5 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -26,6 +26,7 @@ use crate::schema::{BTreeTable, Column, Schema, Table}; use crate::translate::compound_select::emit_program_for_compound_select; use crate::translate::expr::{emit_returning_results, ReturningValueRegisters}; use crate::translate::plan::{DeletePlan, Plan, QueryDestination, Search}; +use crate::translate::result_row::try_fold_expr_to_i64; use crate::translate::values::emit_values; use crate::util::exprs_are_equivalent; use crate::vdbe::builder::{CursorKey, CursorType, ProgramBuilder}; @@ -227,7 +228,7 @@ fn emit_program_for_select( ); // Trivial exit on LIMIT 0 - if let Some(limit) = plan.limit { + if let Some(limit) = plan.limit.as_ref().and_then(try_fold_expr_to_i64) { if limit == 0 { program.result_columns = plan.result_columns; program.table_references.extend(plan.table_references); @@ -256,7 +257,7 @@ pub fn emit_query<'a>( // Emit subqueries first so the results can be read in the main query loop. emit_subqueries(program, t_ctx, &mut plan.table_references)?; - init_limit(program, t_ctx, plan.limit, plan.offset); + init_limit(program, t_ctx, &plan.limit, &plan.offset); // No rows will be read from source table loops if there is a constant false condition eg. WHERE 0 // however an aggregation might still happen, @@ -404,13 +405,15 @@ fn emit_program_for_delete( ); // exit early if LIMIT 0 - if let Some(0) = plan.limit { - program.result_columns = plan.result_columns; - program.table_references.extend(plan.table_references); - return Ok(()); + if let Some(limit) = plan.limit.as_ref().and_then(try_fold_expr_to_i64) { + if limit == 0 { + program.result_columns = plan.result_columns; + program.table_references.extend(plan.table_references); + return Ok(()); + } } - init_limit(program, &mut t_ctx, plan.limit, None); + init_limit(program, &mut t_ctx, &plan.limit, &None); // No rows will be read from source table loops if there is a constant false condition eg. WHERE 0 let after_main_loop_label = program.allocate_label(); @@ -660,13 +663,15 @@ fn emit_program_for_update( ); // Exit on LIMIT 0 - if let Some(0) = plan.limit { - program.result_columns = plan.returning.unwrap_or_default(); - program.table_references.extend(plan.table_references); - return Ok(()); + if let Some(limit) = plan.limit.as_ref().and_then(try_fold_expr_to_i64) { + if limit == 0 { + program.result_columns = plan.returning.unwrap_or_default(); + program.table_references.extend(plan.table_references); + return Ok(()); + } } - init_limit(program, &mut t_ctx, plan.limit, plan.offset); + init_limit(program, &mut t_ctx, &plan.limit, &plan.offset); let after_main_loop_label = program.allocate_label(); t_ctx.label_main_loop_end = Some(after_main_loop_label); if plan.contains_constant_false_condition { @@ -1033,6 +1038,7 @@ fn emit_update_insns( count: num_cols + 1, dest_reg: *record_reg, index_name: Some(index.name.clone()), + affinity_str: None, }); if !index.unique { @@ -1133,11 +1139,19 @@ fn emit_update_insns( } let record_reg = program.alloc_register(); + + let affinity_str = table_ref + .columns() + .iter() + .map(|col| col.affinity().aff_mask()) + .collect::(); + program.emit_insn(Insn::MakeRecord { start_reg: start, count: table_ref.columns().len(), dest_reg: record_reg, index_name: None, + affinity_str: Some(affinity_str), }); if has_user_provided_rowid { @@ -1277,6 +1291,7 @@ fn emit_update_insns( count: 2 * table_ref.columns().len(), dest_reg: record_reg, index_name: None, + affinity_str: None, }); Some(record_reg) } else { @@ -1393,11 +1408,18 @@ pub fn emit_cdc_patch_record( dst_reg: columns_reg + rowid_alias_position, extra_amount: 0, }); + let affinity_str = table + .columns() + .iter() + .map(|col| col.affinity().aff_mask()) + .collect::(); + program.emit_insn(Insn::MakeRecord { start_reg: columns_reg, count: table.columns().len(), dest_reg: record_reg, index_name: None, + affinity_str: Some(affinity_str), }); record_reg } else { @@ -1423,11 +1445,17 @@ pub fn emit_cdc_full_record( program.emit_column_or_rowid(table_cursor_id, i, columns_reg + 1 + i); } } + let affinity_str = columns + .iter() + .map(|col| col.affinity().aff_mask()) + .collect::(); + program.emit_insn(Insn::MakeRecord { start_reg: columns_reg + 1, count: columns.len(), dest_reg: columns_reg, index_name: None, + affinity_str: Some(affinity_str), }); columns_reg } @@ -1530,6 +1558,7 @@ pub fn emit_cdc_insns( count: 8, dest_reg: record_reg, index_name: None, + affinity_str: None, }); program.emit_insn(Insn::Insert { @@ -1541,41 +1570,69 @@ pub fn emit_cdc_insns( }); Ok(()) } - /// Initialize the limit/offset counters and registers. /// In case of compound SELECTs, the limit counter is initialized only once, /// hence [LimitCtx::initialize_counter] being false in those cases. fn init_limit( program: &mut ProgramBuilder, t_ctx: &mut TranslateCtx, - limit: Option, - offset: Option, + limit: &Option>, + offset: &Option>, ) { - if t_ctx.limit_ctx.is_none() { - t_ctx.limit_ctx = limit.map(|_| LimitCtx::new(program)); + if t_ctx.limit_ctx.is_none() && limit.is_some() { + t_ctx.limit_ctx = Some(LimitCtx::new(program)); } - let Some(limit_ctx) = t_ctx.limit_ctx else { + let Some(limit_ctx) = &t_ctx.limit_ctx else { return; }; if limit_ctx.initialize_counter { - program.emit_insn(Insn::Integer { - value: limit.expect("limit must be Some if limit_ctx is Some") as i64, - dest: limit_ctx.reg_limit, - }); + if let Some(expr) = limit { + if let Some(value) = try_fold_expr_to_i64(expr) { + program.emit_insn(Insn::Integer { + value, + dest: limit_ctx.reg_limit, + }); + } else { + let r = limit_ctx.reg_limit; + program.add_comment(program.offset(), "OFFSET expr"); + _ = translate_expr(program, None, expr, r, &t_ctx.resolver); + program.emit_insn(Insn::MustBeInt { reg: r }); + } + } } - if t_ctx.reg_offset.is_none() && offset.is_some_and(|n| n.ne(&0)) { - let reg = program.alloc_register(); - t_ctx.reg_offset = Some(reg); - program.emit_insn(Insn::Integer { - value: offset.unwrap() as i64, - dest: reg, - }); - let combined_reg = program.alloc_register(); - t_ctx.reg_limit_offset_sum = Some(combined_reg); - program.emit_insn(Insn::OffsetLimit { - limit_reg: t_ctx.limit_ctx.unwrap().reg_limit, - offset_reg: reg, - combined_reg, - }); + + if t_ctx.reg_offset.is_none() { + if let Some(expr) = offset { + if let Some(value) = try_fold_expr_to_i64(expr) { + if value != 0 { + let reg = program.alloc_register(); + t_ctx.reg_offset = Some(reg); + program.emit_insn(Insn::Integer { value, dest: reg }); + let combined_reg = program.alloc_register(); + t_ctx.reg_limit_offset_sum = Some(combined_reg); + program.emit_insn(Insn::OffsetLimit { + limit_reg: limit_ctx.reg_limit, + offset_reg: reg, + combined_reg, + }); + } + } else { + let reg = program.alloc_register(); + t_ctx.reg_offset = Some(reg); + let r = reg; + + program.add_comment(program.offset(), "OFFSET expr"); + _ = translate_expr(program, None, expr, r, &t_ctx.resolver); + program.emit_insn(Insn::MustBeInt { reg: r }); + + let combined_reg = program.alloc_register(); + t_ctx.reg_limit_offset_sum = Some(combined_reg); + program.emit_insn(Insn::OffsetLimit { + limit_reg: limit_ctx.reg_limit, + offset_reg: reg, + combined_reg, + }); + } + } } } diff --git a/core/translate/expr.rs b/core/translate/expr.rs index bae89f612..1afdabe0e 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -336,10 +336,19 @@ pub fn translate_condition_expr( resolver, )?; } - ast::Expr::Binary(_, _, _) => { + ast::Expr::Binary(e1, op, e2) => { let result_reg = program.alloc_register(); - translate_expr(program, Some(referenced_tables), expr, result_reg, resolver)?; - emit_cond_jump(program, condition_metadata, result_reg); + binary_expr_shared( + program, + Some(referenced_tables), + e1, + e2, + op, + result_reg, + resolver, + Some(condition_metadata), + emit_binary_condition_insn, + )?; } ast::Expr::Literal(_) | ast::Expr::Cast { .. } @@ -508,80 +517,18 @@ pub fn translate_expr( unreachable!("expression should have been rewritten in optmizer") } ast::Expr::Binary(e1, op, e2) => { - // Check if both sides of the expression are equivalent and reuse the same register if so - if exprs_are_equivalent(e1, e2) { - let shared_reg = program.alloc_register(); - translate_expr(program, referenced_tables, e1, shared_reg, resolver)?; - - emit_binary_insn( - program, - op, - shared_reg, - shared_reg, - target_register, - e1, - e2, - referenced_tables, - )?; - program.reset_collation(); - Ok(target_register) - } else { - let e1_reg = program.alloc_registers(2); - let e2_reg = e1_reg + 1; - - translate_expr(program, referenced_tables, e1, e1_reg, resolver)?; - let left_collation_ctx = program.curr_collation_ctx(); - program.reset_collation(); - - translate_expr(program, referenced_tables, e2, e2_reg, resolver)?; - let right_collation_ctx = program.curr_collation_ctx(); - program.reset_collation(); - - /* - * The rules for determining which collating function to use for a binary comparison - * operator (=, <, >, <=, >=, !=, IS, and IS NOT) are as follows: - * - * 1. If either operand has an explicit collating function assignment using the postfix COLLATE operator, - * then the explicit collating function is used for comparison, - * with precedence to the collating function of the left operand. - * - * 2. If either operand is a column, then the collating function of that column is used - * with precedence to the left operand. For the purposes of the previous sentence, - * a column name preceded by one or more unary "+" operators and/or CAST operators is still considered a column name. - * - * 3. Otherwise, the BINARY collating function is used for comparison. - */ - let collation_ctx = { - match (left_collation_ctx, right_collation_ctx) { - (Some((c_left, true)), _) => Some((c_left, true)), - (_, Some((c_right, true))) => Some((c_right, true)), - (Some((c_left, from_collate_left)), None) => { - Some((c_left, from_collate_left)) - } - (None, Some((c_right, from_collate_right))) => { - Some((c_right, from_collate_right)) - } - (Some((c_left, from_collate_left)), Some((_, false))) => { - Some((c_left, from_collate_left)) - } - _ => None, - } - }; - program.set_collation(collation_ctx); - - emit_binary_insn( - program, - op, - e1_reg, - e2_reg, - target_register, - e1, - e2, - referenced_tables, - )?; - program.reset_collation(); - Ok(target_register) - } + binary_expr_shared( + program, + referenced_tables, + e1, + e2, + op, + target_register, + resolver, + None, + emit_binary_insn, + )?; + Ok(target_register) } ast::Expr::Case { base, @@ -2224,6 +2171,102 @@ pub fn translate_expr( Ok(target_register) } +#[allow(clippy::too_many_arguments)] +fn binary_expr_shared( + program: &mut ProgramBuilder, + referenced_tables: Option<&TableReferences>, + e1: &ast::Expr, + e2: &ast::Expr, + op: &ast::Operator, + target_register: usize, + resolver: &Resolver, + condition_metadata: Option, + emit_fn: impl Fn( + &mut ProgramBuilder, + &ast::Operator, + usize, // left reg + usize, // right reg + usize, // target reg + &ast::Expr, // left expr + &ast::Expr, // right expr + Option<&TableReferences>, + Option, + ) -> Result<()>, +) -> Result { + // Check if both sides of the expression are equivalent and reuse the same register if so + if exprs_are_equivalent(e1, e2) { + let shared_reg = program.alloc_register(); + translate_expr(program, referenced_tables, e1, shared_reg, resolver)?; + + emit_fn( + program, + op, + shared_reg, + shared_reg, + target_register, + e1, + e2, + referenced_tables, + condition_metadata, + )?; + program.reset_collation(); + Ok(target_register) + } else { + let e1_reg = program.alloc_registers(2); + let e2_reg = e1_reg + 1; + + translate_expr(program, referenced_tables, e1, e1_reg, resolver)?; + let left_collation_ctx = program.curr_collation_ctx(); + program.reset_collation(); + + translate_expr(program, referenced_tables, e2, e2_reg, resolver)?; + let right_collation_ctx = program.curr_collation_ctx(); + program.reset_collation(); + + /* + * The rules for determining which collating function to use for a binary comparison + * operator (=, <, >, <=, >=, !=, IS, and IS NOT) are as follows: + * + * 1. If either operand has an explicit collating function assignment using the postfix COLLATE operator, + * then the explicit collating function is used for comparison, + * with precedence to the collating function of the left operand. + * + * 2. If either operand is a column, then the collating function of that column is used + * with precedence to the left operand. For the purposes of the previous sentence, + * a column name preceded by one or more unary "+" operators and/or CAST operators is still considered a column name. + * + * 3. Otherwise, the BINARY collating function is used for comparison. + */ + let collation_ctx = { + match (left_collation_ctx, right_collation_ctx) { + (Some((c_left, true)), _) => Some((c_left, true)), + (_, Some((c_right, true))) => Some((c_right, true)), + (Some((c_left, from_collate_left)), None) => Some((c_left, from_collate_left)), + (None, Some((c_right, from_collate_right))) => Some((c_right, from_collate_right)), + (Some((c_left, from_collate_left)), Some((_, false))) => { + Some((c_left, from_collate_left)) + } + _ => None, + } + }; + program.set_collation(collation_ctx); + + emit_fn( + program, + op, + e1_reg, + e2_reg, + target_register, + e1, + e2, + referenced_tables, + condition_metadata, + )?; + program.reset_collation(); + Ok(target_register) + } +} + #[allow(clippy::too_many_arguments)] fn emit_binary_insn( program: &mut ProgramBuilder, @@ -2234,6 +2277,7 @@ fn emit_binary_insn( lhs_expr: &Expr, rhs_expr: &Expr, referenced_tables: Option<&TableReferences>, + _: Option, ) -> Result<()> { let mut affinity = Affinity::Blob; if op.is_comparison() { @@ -2481,6 +2525,277 @@ fn emit_binary_insn( Ok(()) } +#[allow(clippy::too_many_arguments)] +fn emit_binary_condition_insn( + program: &mut ProgramBuilder, + op: &ast::Operator, + lhs: usize, + rhs: usize, + target_register: usize, + lhs_expr: &Expr, + rhs_expr: &Expr, + referenced_tables: Option<&TableReferences>, + condition_metadata: Option, +) -> Result<()> { + let condition_metadata = condition_metadata + .expect("condition metadata must be provided for emit_binary_insn_conditional"); + let mut affinity = Affinity::Blob; + if op.is_comparison() { + affinity = comparison_affinity(lhs_expr, rhs_expr, referenced_tables); + } + + let opposite_op = match op { + ast::Operator::NotEquals => ast::Operator::Equals, + ast::Operator::Equals => ast::Operator::NotEquals, + ast::Operator::Less => ast::Operator::GreaterEquals, + ast::Operator::LessEquals => ast::Operator::Greater, + ast::Operator::Greater => ast::Operator::LessEquals, + ast::Operator::GreaterEquals => ast::Operator::Less, + ast::Operator::Is => ast::Operator::IsNot, + ast::Operator::IsNot => ast::Operator::Is, + other => *other, + }; + + // For conditional jumps we need to use the opposite comparison operator + // when we intend to jump if the condition is false. Jumping when the condition is false + // is the common case, e.g.: + // WHERE x=1 turns into "jump if x != 1". + // However, in e.g. "WHERE x=1 OR y=2" we want to jump if the condition is true + // when evaluating "x=1", because we are jumping over the "y=2" condition, and if the condition + // is false we move on to the "y=2" condition without jumping. + let op_to_use = if condition_metadata.jump_if_condition_is_true { + *op + } else { + opposite_op + }; + + // Similarly, we "jump if NULL" only when we intend to jump if the condition is false. + let flags = if condition_metadata.jump_if_condition_is_true { + CmpInsFlags::default().with_affinity(affinity) + } else { + CmpInsFlags::default() + .with_affinity(affinity) + .jump_if_null() + }; + + let target_pc = if condition_metadata.jump_if_condition_is_true { + condition_metadata.jump_target_when_true + } else { + condition_metadata.jump_target_when_false + }; + + // For conditional jumps that don't have a clear "opposite op" (e.g. x+y), we check whether the result is nonzero/nonnull + // (or zero/null) depending on the condition metadata. + let eval_result = |program: &mut ProgramBuilder, result_reg: usize| { + if condition_metadata.jump_if_condition_is_true { + program.emit_insn(Insn::If { + reg: result_reg, + target_pc, + jump_if_null: false, + }); + } else { + program.emit_insn(Insn::IfNot { + reg: result_reg, + target_pc, + jump_if_null: true, + }); + } + }; + + match op_to_use { + ast::Operator::NotEquals => { + program.emit_insn(Insn::Ne { + lhs, + rhs, + target_pc, + flags, + collation: program.curr_collation(), + }); + } + ast::Operator::Equals => { + program.emit_insn(Insn::Eq { + lhs, + rhs, + target_pc, + flags, + collation: program.curr_collation(), + }); + } + ast::Operator::Less => { + program.emit_insn(Insn::Lt { + lhs, + rhs, + target_pc, + flags, + collation: program.curr_collation(), + }); + } + ast::Operator::LessEquals => { + program.emit_insn(Insn::Le { + lhs, + rhs, + target_pc, + flags, + collation: program.curr_collation(), + }); + } + ast::Operator::Greater => { + program.emit_insn(Insn::Gt { + lhs, + rhs, + target_pc, + flags, + collation: program.curr_collation(), + }); + } + ast::Operator::GreaterEquals => { + program.emit_insn(Insn::Ge { + lhs, + rhs, + target_pc, + flags, + collation: program.curr_collation(), + }); + } + ast::Operator::Is => { + program.emit_insn(Insn::Eq { + lhs, + rhs, + target_pc, + flags: flags.null_eq(), + collation: program.curr_collation(), + }); + } + ast::Operator::IsNot => { + program.emit_insn(Insn::Ne { + lhs, + rhs, + target_pc, + flags: flags.null_eq(), + collation: program.curr_collation(), + }); + } + ast::Operator::Add => { + program.emit_insn(Insn::Add { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + ast::Operator::Subtract => { + program.emit_insn(Insn::Subtract { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + ast::Operator::Multiply => { + program.emit_insn(Insn::Multiply { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + ast::Operator::Divide => { + program.emit_insn(Insn::Divide { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + ast::Operator::Modulus => { + program.emit_insn(Insn::Remainder { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + ast::Operator::And => { + program.emit_insn(Insn::And { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + ast::Operator::Or => { + program.emit_insn(Insn::Or { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + ast::Operator::BitwiseAnd => { + program.emit_insn(Insn::BitAnd { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + ast::Operator::BitwiseOr => { + program.emit_insn(Insn::BitOr { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + ast::Operator::RightShift => { + program.emit_insn(Insn::ShiftRight { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + ast::Operator::LeftShift => { + program.emit_insn(Insn::ShiftLeft { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + #[cfg(feature = "json")] + op @ (ast::Operator::ArrowRight | ast::Operator::ArrowRightShift) => { + let json_func = match op { + ast::Operator::ArrowRight => JsonFunc::JsonArrowExtract, + ast::Operator::ArrowRightShift => JsonFunc::JsonArrowShiftExtract, + _ => unreachable!(), + }; + + program.emit_insn(Insn::Function { + constant_mask: 0, + start_reg: lhs, + dest: target_register, + func: FuncCtx { + func: Func::Json(json_func), + arg_count: 2, + }, + }); + eval_result(program, target_register); + } + ast::Operator::Concat => { + program.emit_insn(Insn::Concat { + lhs, + rhs, + dest: target_register, + }); + eval_result(program, target_register); + } + other_unimplemented => todo!("{:?}", other_unimplemented), + } + + Ok(()) +} + /// The base logic for translating LIKE and GLOB expressions. /// The logic for handling "NOT LIKE" is different depending on whether the expression /// is a conditional jump or not. This is why the caller handles the "NOT LIKE" behavior; @@ -3185,6 +3500,7 @@ pub fn translate_expr_for_returning( lhs, rhs, None, // No table references needed for RETURNING + None, // No condition metadata needed for RETURNING )?; Ok(target_register) diff --git a/core/translate/index.rs b/core/translate/index.rs index 7d332f0d2..8d1ea14a2 100644 --- a/core/translate/index.rs +++ b/core/translate/index.rs @@ -174,6 +174,7 @@ pub fn translate_create_index( count: columns.len() + 1, dest_reg: record_reg, index_name: Some(idx_name.clone()), + affinity_str: None, }); program.emit_insn(Insn::SorterInsert { cursor_id: sorter_cursor_id, diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 9c5694c0a..d4d020e5c 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -64,6 +64,7 @@ pub fn translate_insert( if with.is_some() { crate::bail_parse_error!("WITH clause is not supported"); } + if on_conflict.is_some() { crate::bail_parse_error!("ON CONFLICT clause is not supported"); } @@ -76,11 +77,22 @@ pub fn translate_insert( ); } let table_name = &tbl_name.name; + + // Check if this is a system table that should be protected from direct writes + if crate::schema::is_system_table(table_name.as_str()) { + crate::bail_parse_error!("table {} may not be modified", table_name); + } + let table = match schema.get_table(table_name.as_str()) { Some(table) => table, None => crate::bail_parse_error!("no such table: {}", table_name), }; + // Check if this is a materialized view + if schema.is_materialized_view(table_name.as_str()) { + crate::bail_parse_error!("cannot modify materialized view {}", table_name); + } + let resolver = Resolver::new(schema, syms); if let Some(virtual_table) = &table.virtual_table() { @@ -242,11 +254,35 @@ pub fn translate_insert( end_offset: yield_label, }); let record_reg = program.alloc_register(); + + let affinity_str = if columns.is_empty() { + btree_table + .columns + .iter() + .filter(|col| !col.hidden) + .map(|col| col.affinity().aff_mask()) + .collect::() + } else { + columns + .iter() + .map(|col_name| { + let column_name = normalize_ident(col_name.as_str()); + table + .get_column_by_name(&column_name) + .unwrap() + .1 + .affinity() + .aff_mask() + }) + .collect::() + }; + program.emit_insn(Insn::MakeRecord { start_reg: yield_reg + 1, count: result.num_result_cols, dest_reg: record_reg, index_name: None, + affinity_str: Some(affinity_str), }); let rowid_reg = program.alloc_register(); @@ -513,6 +549,7 @@ pub fn translate_insert( count: num_cols + 1, dest_reg: record_reg, index_name: Some(index.name.clone()), + affinity_str: None, }); if index.unique { @@ -633,11 +670,18 @@ pub fn translate_insert( }); } // Create and insert the record + let affinity_str = insertion + .col_mappings + .iter() + .map(|col_mapping| col_mapping.column.affinity().aff_mask()) + .collect::(); + program.emit_insn(Insn::MakeRecord { start_reg: insertion.first_col_register(), count: insertion.col_mappings.len(), dest_reg: insertion.record_register(), index_name: None, + affinity_str: Some(affinity_str), }); program.emit_insn(Insn::Insert { cursor: cursor_id, diff --git a/core/translate/main_loop.rs b/core/translate/main_loop.rs index 6774c04d2..e4a5334ee 100644 --- a/core/translate/main_loop.rs +++ b/core/translate/main_loop.rs @@ -196,7 +196,8 @@ pub fn init_loop( t_ctx.meta_left_joins[table_index] = Some(lj_metadata); } } - let (table_cursor_id, index_cursor_id) = table.open_cursors(program, mode)?; + let (table_cursor_id, index_cursor_id) = + table.open_cursors(program, mode, t_ctx.resolver.schema)?; match &table.op { Operation::Scan(Scan::BTreeTable { index, .. }) => match (mode, &table.table) { (OperationMode::SELECT, Table::BTree(btree)) => { @@ -1428,6 +1429,7 @@ fn emit_autoindex( count: num_regs_to_reserve, dest_reg: record_reg, index_name: Some(index.name.clone()), + affinity_str: None, }); program.emit_insn(Insn::IdxInsert { cursor_id: index_cursor_id, diff --git a/core/translate/optimizer/mod.rs b/core/translate/optimizer/mod.rs index ec9237f00..e0b1c6b73 100644 --- a/core/translate/optimizer/mod.rs +++ b/core/translate/optimizer/mod.rs @@ -190,6 +190,34 @@ fn optimize_table_access( let maybe_order_target = compute_order_target(order_by, group_by.as_mut()); let constraints_per_table = constraints_from_where_clause(where_clause, table_references, available_indexes)?; + + // Currently the expressions we evaluate as constraints are binary expressions that will never be true for a NULL operand. + // If there are any constraints on the right hand side table of an outer join that are not part of the outer join condition, + // the outer join can be converted into an inner join. + // for example: + // - SELECT * FROM t1 LEFT JOIN t2 ON false WHERE t2.id = 5 + // there can never be a situation where null columns are emitted for t2 because t2.id = 5 will never be true in that case. + // hence: we can convert the outer join into an inner join. + for (i, t) in table_references + .joined_tables_mut() + .iter_mut() + .enumerate() + .filter(|(_, t)| { + t.join_info + .as_ref() + .is_some_and(|join_info| join_info.outer) + }) + { + if constraints_per_table[i] + .constraints + .iter() + .any(|c| where_clause[c.where_clause_pos.0].from_outer_join.is_none()) + { + t.join_info.as_mut().unwrap().outer = false; + continue; + } + } + let Some(best_join_order_result) = compute_best_join_order( table_references.joined_tables_mut(), maybe_order_target.as_ref(), @@ -339,14 +367,29 @@ fn optimize_table_access( )?, }); } else { + let is_outer_join = joined_tables[table_idx] + .join_info + .as_ref() + .is_some_and(|join_info| join_info.outer); for cref in constraint_refs.iter() { let constraint = &constraints_per_table[table_idx].constraints[cref.constraint_vec_pos]; + let where_term = &mut where_clause[constraint.where_clause_pos.0]; assert!( - !where_clause[constraint.where_clause_pos.0].consumed, - "trying to consume a where clause term twice: {:?}", - where_clause[constraint.where_clause_pos.0] + !where_term.consumed, + "trying to consume a where clause term twice: {where_term:?}", ); + if is_outer_join && where_term.from_outer_join.is_none() { + // Don't consume WHERE terms from outer joins if the where term is not part of the outer join condition. Consider: + // - SELECT * FROM t1 LEFT JOIN t2 ON false WHERE t2.id = 5 + // - there is no row in t2 where t2.id = 5 + // This should never produce any rows with null columns for t2 (because NULL != 5), but if we consume 't2.id = 5' to use it as a seek key, + // this will cause a null row to be emitted for EVERY row of t1. + // Note: in most cases like this, the LEFT JOIN could just be converted into an INNER JOIN (because e.g. t2.id=5 statically excludes any null rows), + // but that optimization should not be done here - it should be done before the join order optimization happens. + continue; + } + where_clause[constraint.where_clause_pos.0].consumed = true; } if let Some(index) = &index { diff --git a/core/translate/order_by.rs b/core/translate/order_by.rs index 129bc8d5b..a4dc56691 100644 --- a/core/translate/order_by.rs +++ b/core/translate/order_by.rs @@ -117,7 +117,13 @@ pub fn emit_order_by( }); program.preassign_label_to_next_insn(sort_loop_start_label); - emit_offset(program, plan, sort_loop_next_label, t_ctx.reg_offset); + emit_offset( + program, + plan, + sort_loop_next_label, + t_ctx.reg_offset, + &t_ctx.resolver, + ); program.emit_insn(Insn::SorterData { cursor_id: sort_cursor, @@ -301,6 +307,7 @@ pub fn sorter_insert( count: column_count, dest_reg: record_reg, index_name: None, + affinity_str: None, }); program.emit_insn(Insn::SorterInsert { cursor_id, diff --git a/core/translate/plan.rs b/core/translate/plan.rs index e43cdbd76..70844c0e3 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -3,7 +3,7 @@ use turso_parser::ast::{self, SortOrder}; use crate::{ function::AggFunc, - schema::{BTreeTable, Column, FromClauseSubquery, Index, Table}, + schema::{BTreeTable, Column, FromClauseSubquery, Index, Schema, Table}, vdbe::{ builder::{CursorKey, CursorType, ProgramBuilder}, insn::{IdxInsertFlags, Insn}, @@ -154,8 +154,8 @@ pub enum Plan { CompoundSelect { left: Vec<(SelectPlan, ast::CompoundOperator)>, right_most: SelectPlan, - limit: Option, - offset: Option, + limit: Option>, + offset: Option>, order_by: Option>, }, Delete(DeletePlan), @@ -264,6 +264,7 @@ impl DistinctCtx { count: num_regs, dest_reg: record_reg, index_name: Some(self.ephemeral_index_name.to_string()), + affinity_str: None, }); program.emit_insn(Insn::IdxInsert { cursor_id: self.cursor_id, @@ -292,9 +293,9 @@ pub struct SelectPlan { /// all the aggregates collected from the result columns, order by, and (TODO) having clauses pub aggregates: Vec, /// limit clause - pub limit: Option, + pub limit: Option>, /// offset clause - pub offset: Option, + pub offset: Option>, /// query contains a constant condition that is always false pub contains_constant_false_condition: bool, /// the destination of the resulting rows from this plan. @@ -378,9 +379,9 @@ pub struct DeletePlan { /// order by clause pub order_by: Vec<(Box, SortOrder)>, /// limit clause - pub limit: Option, + pub limit: Option>, /// offset clause - pub offset: Option, + pub offset: Option>, /// query contains a constant condition that is always false pub contains_constant_false_condition: bool, /// Indexes that must be updated by the delete operation. @@ -394,8 +395,8 @@ pub struct UpdatePlan { pub set_clauses: Vec<(usize, Box)>, pub where_clause: Vec, pub order_by: Vec<(Box, SortOrder)>, - pub limit: Option, - pub offset: Option, + pub limit: Option>, + pub offset: Option>, // TODO: optional RETURNING clause pub returning: Option>, // whether the WHERE clause is always false @@ -852,6 +853,7 @@ impl JoinedTable { &self, program: &mut ProgramBuilder, mode: OperationMode, + schema: &Schema, ) -> Result<(Option, Option)> { let index = self.op.index(); match &self.table { @@ -863,10 +865,17 @@ impl JoinedTable { let table_cursor_id = if table_not_required { None } else { - Some(program.alloc_cursor_id_keyed( - CursorKey::table(self.internal_id), - CursorType::BTreeTable(btree.clone()), - )) + // Check if this is a materialized view + let cursor_type = + if let Some(view_mutex) = schema.get_materialized_view(&btree.name) { + CursorType::MaterializedView(btree.clone(), view_mutex) + } else { + CursorType::BTreeTable(btree.clone()) + }; + Some( + program + .alloc_cursor_id_keyed(CursorKey::table(self.internal_id), cursor_type), + ) }; let index_cursor_id = index.map(|index| { program.alloc_cursor_id_keyed( diff --git a/core/translate/planner.rs b/core/translate/planner.rs index b0b696bb4..ea4cc8f53 100644 --- a/core/translate/planner.rs +++ b/core/translate/planner.rs @@ -3,9 +3,9 @@ use std::sync::Arc; use super::{ expr::walk_expr, plan::{ - Aggregate, ColumnUsedMask, Distinctness, EvalAt, JoinInfo, JoinOrderMember, JoinedTable, - Operation, OuterQueryReference, Plan, QueryDestination, ResultSetColumn, Scan, - TableReferences, WhereTerm, + Aggregate, ColumnUsedMask, Distinctness, EvalAt, IterationDirection, JoinInfo, + JoinOrderMember, JoinedTable, Operation, OuterQueryReference, Plan, QueryDestination, + ResultSetColumn, Scan, TableReferences, WhereTerm, }, select::prepare_select_plan, SymbolTable, @@ -13,6 +13,7 @@ use super::{ use crate::function::{AggFunc, ExtFunc}; use crate::translate::expr::WalkControl; use crate::{ + ast::Limit, function::Func, schema::{Schema, Table}, translate::expr::walk_expr_mut, @@ -23,8 +24,8 @@ use crate::{ use turso_macros::match_ignore_ascii_case; use turso_parser::ast::Literal::Null; use turso_parser::ast::{ - self, As, Expr, FromClause, JoinType, Limit, Literal, Materialized, QualifiedName, - TableInternalId, UnaryOperator, With, + self, As, Expr, FromClause, JoinType, Literal, Materialized, QualifiedName, TableInternalId, + With, }; pub const ROWID: &str = "rowid"; @@ -528,12 +529,29 @@ fn parse_table( schema.get_materialized_view(table_name.as_str()) }); if let Some(view) = view { - // Create a virtual table wrapper for the view - // We'll use the view's columns from the schema - let vtab = crate::vtab_view::create_view_virtual_table( - normalize_ident(table_name.as_str()).as_str(), - view.clone(), - )?; + // Check if this materialized view has persistent storage + let view_guard = view.lock().unwrap(); + let root_page = view_guard.get_root_page(); + + if root_page == 0 { + drop(view_guard); + return Err(crate::LimboError::InternalError( + "Materialized view has no storage allocated".to_string(), + )); + } + + // This is a materialized view with storage - treat it as a regular BTree table + // Create a BTreeTable from the view's metadata + let btree_table = Arc::new(crate::schema::BTreeTable { + name: view_guard.name().to_string(), + root_page, + columns: view_guard.columns.clone(), + primary_key_columns: Vec::new(), + has_rowid: true, + is_strict: false, + unique_sets: None, + }); + drop(view_guard); let alias = maybe_alias .map(|a| match a { @@ -543,12 +561,11 @@ fn parse_table( .map(|a| normalize_ident(a.as_str())); table_references.add_joined_table(JoinedTable { - op: Operation::Scan(Scan::VirtualTable { - idx_num: -1, - idx_str: None, - constraints: Vec::new(), + op: Operation::Scan(Scan::BTreeTable { + iter_dir: IterationDirection::Forwards, + index: None, }), - table: Table::Virtual(vtab), + table: Table::BTree(btree_table), identifier: alias.unwrap_or(normalized_qualified_name), internal_id: table_ref_counter.next(), join_info: None, @@ -1145,44 +1162,6 @@ fn parse_join( Ok(()) } -pub fn parse_limit(limit: &Limit) -> Result<(Option, Option)> { - let offset_val = match &limit.offset { - Some(offset_expr) => match offset_expr.as_ref() { - Expr::Literal(ast::Literal::Numeric(n)) => n.parse().ok(), - // If OFFSET is negative, the result is as if OFFSET is zero - Expr::Unary(UnaryOperator::Negative, expr) => { - if let Expr::Literal(ast::Literal::Numeric(ref n)) = &**expr { - n.parse::().ok().map(|num| -num) - } else { - crate::bail_parse_error!("Invalid OFFSET clause"); - } - } - _ => crate::bail_parse_error!("Invalid OFFSET clause"), - }, - None => Some(0), - }; - - if let Expr::Literal(ast::Literal::Numeric(n)) = limit.expr.as_ref() { - Ok((n.parse().ok(), offset_val)) - } else if let Expr::Unary(UnaryOperator::Negative, expr) = limit.expr.as_ref() { - if let Expr::Literal(ast::Literal::Numeric(n)) = expr.as_ref() { - let limit_val = n.parse::().ok().map(|num| -num); - Ok((limit_val, offset_val)) - } else { - crate::bail_parse_error!("Invalid LIMIT clause"); - } - } else if let Expr::Id(id) = limit.expr.as_ref() { - let id_bytes = id.as_str().as_bytes(); - match_ignore_ascii_case!(match id_bytes { - b"true" => Ok((Some(1), offset_val)), - b"false" => Ok((Some(0), offset_val)), - _ => crate::bail_parse_error!("Invalid LIMIT clause"), - }) - } else { - crate::bail_parse_error!("Invalid LIMIT clause"); - } -} - pub fn break_predicate_at_and_boundaries(predicate: &Expr, out_predicates: &mut Vec) { match predicate { Expr::Binary(left, ast::Operator::And, right) => { @@ -1215,3 +1194,16 @@ where } Ok(None) } + +#[allow(clippy::type_complexity)] +pub fn parse_limit( + limit: &mut Limit, + connection: &std::sync::Arc, +) -> Result<(Option>, Option>)> { + let mut empty_refs = TableReferences::new(Vec::new(), Vec::new()); + bind_column_references(&mut limit.expr, &mut empty_refs, None, connection)?; + if let Some(ref mut off_expr) = limit.offset { + bind_column_references(off_expr, &mut empty_refs, None, connection)?; + } + Ok((Some(limit.expr.clone()), limit.offset.clone())) +} diff --git a/core/translate/pragma.rs b/core/translate/pragma.rs index 707f53b89..3ecf5d6e7 100644 --- a/core/translate/pragma.rs +++ b/core/translate/pragma.rs @@ -318,13 +318,13 @@ fn update_pragma( PragmaName::EncryptionKey => { let value = parse_string(&value)?; let key = EncryptionKey::from_hex_string(&value)?; - connection.set_encryption_key(key); + connection.set_encryption_key(key)?; Ok((program, TransactionMode::None)) } PragmaName::EncryptionCipher => { let value = parse_string(&value)?; let cipher = CipherMode::try_from(value.as_str())?; - connection.set_encryption_cipher(cipher); + connection.set_encryption_cipher(cipher)?; Ok((program, TransactionMode::None)) } PragmaName::Synchronous => { diff --git a/core/translate/result_row.rs b/core/translate/result_row.rs index f2b722988..04b7454f5 100644 --- a/core/translate/result_row.rs +++ b/core/translate/result_row.rs @@ -1,3 +1,5 @@ +use turso_parser::ast::{Expr, Literal, Name, Operator, UnaryOperator}; + use crate::{ vdbe::{ builder::ProgramBuilder, @@ -30,7 +32,7 @@ pub fn emit_select_result( limit_ctx: Option, ) -> Result<()> { if let (Some(jump_to), Some(_)) = (offset_jump_to, label_on_limit_reached) { - emit_offset(program, plan, jump_to, reg_offset); + emit_offset(program, plan, jump_to, reg_offset, resolver); } let start_reg = reg_result_cols_start; @@ -101,6 +103,7 @@ pub fn emit_result_row_and_limit( count: plan.result_columns.len(), dest_reg: record_reg, index_name: Some(dedupe_index.name.clone()), + affinity_str: None, }); program.emit_insn(Insn::IdxInsert { cursor_id: *index_cursor_id, @@ -122,6 +125,7 @@ pub fn emit_result_row_and_limit( count: plan.result_columns.len() - 1, dest_reg: record_reg, index_name: Some(table.name.clone()), + affinity_str: None, }); } program.emit_insn(Insn::Insert { @@ -163,16 +167,68 @@ pub fn emit_offset( plan: &SelectPlan, jump_to: BranchOffset, reg_offset: Option, + resolver: &Resolver, ) { - match plan.offset { - Some(offset) if offset > 0 => { - program.add_comment(program.offset(), "OFFSET"); + let Some(offset_expr) = &plan.offset else { + return; + }; + + if let Some(val) = try_fold_expr_to_i64(offset_expr) { + if val > 0 { + program.add_comment(program.offset(), "OFFSET const"); program.emit_insn(Insn::IfPos { reg: reg_offset.expect("reg_offset must be Some"), target_pc: jump_to, decrement_by: 1, }); } - _ => {} + return; + } + + let r = reg_offset.expect("reg_offset must be Some"); + + program.add_comment(program.offset(), "OFFSET expr"); + + _ = translate_expr(program, None, offset_expr, r, resolver); + + program.emit_insn(Insn::MustBeInt { reg: r }); + + program.emit_insn(Insn::IfPos { + reg: r, + target_pc: jump_to, + decrement_by: 1, + }); +} + +#[allow(clippy::borrowed_box)] +pub fn try_fold_expr_to_i64(expr: &Box) -> Option { + match expr.as_ref() { + Expr::Literal(Literal::Numeric(n)) => n.parse::().ok(), + Expr::Literal(Literal::Null) => Some(0), + Expr::Id(Name::Ident(s)) => { + let lowered = s.to_ascii_lowercase(); + if lowered == "true" { + Some(1) + } else if lowered == "false" { + Some(0) + } else { + None + } + } + Expr::Unary(UnaryOperator::Negative, inner) => try_fold_expr_to_i64(inner).map(|v| -v), + Expr::Unary(UnaryOperator::Positive, inner) => try_fold_expr_to_i64(inner), + Expr::Binary(left, op, right) => { + let l = try_fold_expr_to_i64(left)?; + let r = try_fold_expr_to_i64(right)?; + match op { + Operator::Add => Some(l.saturating_add(r)), + Operator::Subtract => Some(l.saturating_sub(r)), + Operator::Multiply => Some(l.saturating_mul(r)), + Operator::Divide if r != 0 => Some(l.saturating_div(r)), + _ => None, + } + } + + _ => None, } } diff --git a/core/translate/schema.rs b/core/translate/schema.rs index efed36592..f5c2d1523 100644 --- a/core/translate/schema.rs +++ b/core/translate/schema.rs @@ -329,6 +329,7 @@ pub fn emit_schema_entry( count: 5, dest_reg: record_reg, index_name: None, + affinity_str: None, }); program.emit_insn(Insn::Insert { @@ -686,6 +687,7 @@ pub fn translate_create_virtual_table( count: args_vec.len(), dest_reg: args_record_reg, index_name: None, + affinity_str: None, }); Some(args_record_reg) } else { @@ -763,6 +765,14 @@ pub fn translate_drop_table( } let table = table.unwrap(); // safe since we just checked for None + + // Check if this is a materialized view - if so, refuse to drop it with DROP TABLE + if schema.is_materialized_view(tbl_name.name.as_str()) { + bail_parse_error!( + "Cannot DROP TABLE on materialized view {}. Use DROP VIEW instead.", + tbl_name.name.as_str() + ); + } let cdc_table = prepare_cdc_if_necessary(&mut program, schema, SQLITE_TABLEID)?; let null_reg = program.alloc_register(); // r1 @@ -1064,6 +1074,7 @@ pub fn translate_drop_table( count: 5, dest_reg: new_record_register, index_name: None, + affinity_str: None, }); program.emit_insn(Insn::Delete { cursor_id: sqlite_schema_cursor_id_1, diff --git a/core/translate/select.rs b/core/translate/select.rs index cee03b87a..37de54b4d 100644 --- a/core/translate/select.rs +++ b/core/translate/select.rs @@ -150,8 +150,7 @@ pub fn prepare_select_plan( } let (limit, offset) = select .limit - .as_ref() - .map_or(Ok((None, None)), parse_limit)?; + .map_or(Ok((None, None)), |mut l| parse_limit(&mut l, connection))?; // FIXME: handle ORDER BY for compound selects if !select.order_by.is_empty() { @@ -431,8 +430,8 @@ fn prepare_one_select_plan( plan.order_by = key; // Parse the LIMIT/OFFSET clause - (plan.limit, plan.offset) = limit.as_ref().map_or(Ok((None, None)), parse_limit)?; - + (plan.limit, plan.offset) = + limit.map_or(Ok((None, None)), |mut l| parse_limit(&mut l, connection))?; // Return the unoptimized query plan Ok(plan) } diff --git a/core/translate/update.rs b/core/translate/update.rs index 1294160da..41f0adaaa 100644 --- a/core/translate/update.rs +++ b/core/translate/update.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use crate::schema::{BTreeTable, Column, Type}; use crate::translate::optimizer::optimize_select_plan; use crate::translate::plan::{Operation, QueryDestination, Scan, Search, SelectPlan}; +use crate::translate::planner::parse_limit; use crate::vdbe::builder::CursorType; use crate::{ bail_parse_error, @@ -21,8 +22,7 @@ use super::plan::{ ColumnUsedMask, IterationDirection, JoinedTable, Plan, ResultSetColumn, TableReferences, UpdatePlan, }; -use super::planner::bind_column_references; -use super::planner::{parse_limit, parse_where}; +use super::planner::{bind_column_references, parse_where}; /* * Update is simple. By default we scan the table, and for each row, we check the WHERE * clause. If it evaluates to true, we build the new record with the updated value and insert. @@ -59,7 +59,7 @@ pub fn translate_update( mut program: ProgramBuilder, connection: &Arc, ) -> crate::Result { - let mut plan = prepare_update_plan(&mut program, schema, body, connection)?; + let mut plan = prepare_update_plan(&mut program, schema, body, connection, false)?; optimize_plan(&mut plan, schema)?; // TODO: freestyling these numbers let opts = ProgramBuilderOpts { @@ -81,7 +81,7 @@ pub fn translate_update_for_schema_change( ddl_query: &str, after: impl FnOnce(&mut ProgramBuilder), ) -> crate::Result { - let mut plan = prepare_update_plan(&mut program, schema, body, connection)?; + let mut plan = prepare_update_plan(&mut program, schema, body, connection, true)?; if let Plan::Update(plan) = &mut plan { if program.capture_data_changes_mode().has_updates() { @@ -106,6 +106,7 @@ pub fn prepare_update_plan( schema: &Schema, body: &mut ast::Update, connection: &Arc, + is_internal_schema_change: bool, ) -> crate::Result { if body.with.is_some() { bail_parse_error!("WITH clause is not supported in UPDATE"); @@ -121,6 +122,13 @@ pub fn prepare_update_plan( bail_parse_error!("INDEXED BY clause is not supported in UPDATE"); } let table_name = &body.tbl_name.name; + + // Check if this is a system table that should be protected from direct writes + // Skip this check for internal schema change operations (like ALTER TABLE) + if !is_internal_schema_change && crate::schema::is_system_table(table_name.as_str()) { + bail_parse_error!("table {} may not be modified", table_name); + } + if schema.table_has_indexes(&table_name.to_string()) && !schema.indexes_enabled() { // Let's disable altering a table with indices altogether instead of checking column by // column to be extra safe. @@ -132,6 +140,12 @@ pub fn prepare_update_plan( Some(table) => table, None => bail_parse_error!("Parse error: no such table: {}", table_name), }; + + // Check if this is a materialized view + if schema.is_materialized_view(table_name.as_str()) { + bail_parse_error!("cannot modify materialized view {}", table_name); + } + let table_name = table.get_name(); let iter_dir = body .order_by @@ -332,7 +346,10 @@ pub fn prepare_update_plan( }; // Parse the LIMIT/OFFSET clause - let (limit, offset) = body.limit.as_ref().map_or(Ok((None, None)), parse_limit)?; + let (limit, offset) = body + .limit + .as_mut() + .map_or(Ok((None, None)), |l| parse_limit(l, connection))?; // Check what indexes will need to be updated by checking set_clauses and see // if a column is contained in an index. diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index 7e0141ed2..f5fa8ab95 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -379,6 +379,7 @@ pub fn emit_upsert( count: k + 1, dest_reg: rec, index_name: Some((*idx_name).clone()), + affinity_str: None, }); program.emit_insn(Insn::IdxInsert { cursor_id: *idx_cid, @@ -392,11 +393,19 @@ pub fn emit_upsert( // Write table row (same rowid, new payload) let rec = program.alloc_register(); + + let affinity_str = table + .columns() + .iter() + .map(|col| col.affinity().aff_mask()) + .collect::(); + program.emit_insn(Insn::MakeRecord { start_reg: new_start, count: num_cols, dest_reg: rec, index_name: None, + affinity_str: Some(affinity_str), }); program.emit_insn(Insn::Insert { cursor: tbl_cursor_id, diff --git a/core/translate/values.rs b/core/translate/values.rs index 8315290e6..869a63f31 100644 --- a/core/translate/values.rs +++ b/core/translate/values.rs @@ -34,7 +34,7 @@ fn emit_values_when_single_row( t_ctx: &TranslateCtx, ) -> Result { let end_label = program.allocate_label(); - emit_offset(program, plan, end_label, t_ctx.reg_offset); + emit_offset(program, plan, end_label, t_ctx.reg_offset, &t_ctx.resolver); let first_row = &plan.values[0]; let row_len = first_row.len(); let start_reg = program.alloc_registers(row_len); @@ -87,7 +87,7 @@ fn emit_toplevel_values( }); let goto_label = program.allocate_label(); - emit_offset(program, plan, goto_label, t_ctx.reg_offset); + emit_offset(program, plan, goto_label, t_ctx.reg_offset, &t_ctx.resolver); let row_len = plan.values[0].len(); let copy_start_reg = program.alloc_registers(row_len); for i in 0..row_len { @@ -199,6 +199,7 @@ fn emit_values_to_index( count: row_len, dest_reg: record_reg, index_name: Some(index.name.clone()), + affinity_str: None, }); program.emit_insn(Insn::IdxInsert { cursor_id: *cursor_id, diff --git a/core/translate/view.rs b/core/translate/view.rs index fcb12df01..78e9c6a63 100644 --- a/core/translate/view.rs +++ b/core/translate/view.rs @@ -1,69 +1,14 @@ -use crate::schema::Schema; +use crate::schema::{Schema, DBSP_TABLE_PREFIX}; +use crate::storage::pager::CreateBTreeFlags; use crate::translate::emitter::Resolver; use crate::translate::schema::{emit_schema_entry, SchemaEntryType, SQLITE_TABLEID}; use crate::util::normalize_ident; use crate::vdbe::builder::{CursorType, ProgramBuilder}; -use crate::vdbe::insn::{CmpInsFlags, Cookie, Insn}; +use crate::vdbe::insn::{CmpInsFlags, Cookie, Insn, RegisterOrLiteral}; use crate::{Connection, Result, SymbolTable}; use std::sync::Arc; use turso_parser::ast; -/// Common logic for creating views (both regular and materialized) -fn emit_create_view_program( - schema: &Schema, - view_name: &str, - sql: String, - syms: &SymbolTable, - program: &mut ProgramBuilder, - populate_materialized: bool, -) -> Result<()> { - let normalized_view_name = normalize_ident(view_name); - - // Open cursor to sqlite_schema table - let table = schema.get_btree_table(SQLITE_TABLEID).unwrap(); - let sqlite_schema_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(table.clone())); - program.emit_insn(Insn::OpenWrite { - cursor_id: sqlite_schema_cursor_id, - root_page: 1usize.into(), - db: 0, - }); - - // Add the view entry to sqlite_schema - let resolver = Resolver::new(schema, syms); - emit_schema_entry( - program, - &resolver, - sqlite_schema_cursor_id, - None, // cdc_table_cursor_id, no cdc for views - SchemaEntryType::View, - &normalized_view_name, - &normalized_view_name, // for views, tbl_name is same as name - 0, // views don't have a root page - Some(sql), - )?; - - // Parse schema to load the new view - program.emit_insn(Insn::ParseSchema { - db: sqlite_schema_cursor_id, - where_clause: Some(format!("name = '{normalized_view_name}'")), - }); - - program.emit_insn(Insn::SetCookie { - db: 0, - cookie: Cookie::SchemaVersion, - value: (schema.schema_version + 1) as i32, - p5: 0, - }); - - // Populate materialized views if needed - // Note: This must come after SetCookie since it may do I/O operations - if populate_materialized { - program.emit_insn(Insn::PopulateMaterializedViews); - } - - Ok(()) -} - pub fn translate_create_materialized_view( schema: &Schema, view_name: &str, @@ -92,17 +37,144 @@ pub fn translate_create_materialized_view( ))); } - // Validate that this view can be created as an IncrementalView + // Validate the view can be created and extract its columns // This validation happens before updating sqlite_master to prevent // storing invalid view definitions use crate::incremental::view::IncrementalView; - IncrementalView::can_create_view(select_stmt)?; + use crate::schema::BTreeTable; + let view_columns = IncrementalView::validate_and_extract_columns(select_stmt, schema)?; - // Reconstruct the SQL string + // Reconstruct the SQL string for storage let sql = create_materialized_view_to_str(view_name, select_stmt); - // Use common logic to emit the view creation program - emit_create_view_program(schema, view_name, sql, syms, &mut program, true)?; + // Create a btree for storing the materialized view state + // This btree will hold the materialized rows (row_id -> values) + let view_root_reg = program.alloc_register(); + + program.emit_insn(Insn::CreateBtree { + db: 0, + root: view_root_reg, + flags: CreateBTreeFlags::new_table(), + }); + + // Create a second btree for DBSP operator state (e.g., aggregate state) + // This is stored as a hidden table: __turso_internal_dbsp_state_ + let dbsp_state_root_reg = program.alloc_register(); + + program.emit_insn(Insn::CreateBtree { + db: 0, + root: dbsp_state_root_reg, + flags: CreateBTreeFlags::new_table(), + }); + + // Create a proper BTreeTable for the cursor with the actual view columns + let view_table = Arc::new(BTreeTable { + root_page: 0, // Will be set to actual root page after creation + name: normalized_view_name.clone(), + columns: view_columns.clone(), + primary_key_columns: vec![], // Materialized views use implicit rowid + has_rowid: true, + is_strict: false, + unique_sets: None, + }); + + // Allocate a cursor for writing to the view's btree during population + let view_cursor_id = program.alloc_cursor_id(crate::vdbe::builder::CursorType::BTreeTable( + view_table.clone(), + )); + + // Open the cursor to the view's btree + program.emit_insn(Insn::OpenWrite { + cursor_id: view_cursor_id, + root_page: RegisterOrLiteral::Register(view_root_reg), + db: 0, + }); + + // Clear any existing data in the btree + // This is important because if we're reusing a page that previously held + // a materialized view, there might be old data still there + // We need to start with a clean slate + let clear_loop_label = program.allocate_label(); + let clear_done_label = program.allocate_label(); + + // Rewind to the beginning of the btree + program.emit_insn(Insn::Rewind { + cursor_id: view_cursor_id, + pc_if_empty: clear_done_label, + }); + + // Loop to delete all rows + program.preassign_label_to_next_insn(clear_loop_label); + program.emit_insn(Insn::Delete { + cursor_id: view_cursor_id, + table_name: normalized_view_name.clone(), + }); + program.emit_insn(Insn::Next { + cursor_id: view_cursor_id, + pc_if_next: clear_loop_label, + }); + + program.preassign_label_to_next_insn(clear_done_label); + + // Open cursor to sqlite_schema table + let table = schema.get_btree_table(SQLITE_TABLEID).unwrap(); + let sqlite_schema_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(table.clone())); + program.emit_insn(Insn::OpenWrite { + cursor_id: sqlite_schema_cursor_id, + root_page: 1usize.into(), + db: 0, + }); + + // Add the materialized view entry to sqlite_schema + let resolver = Resolver::new(schema, syms); + emit_schema_entry( + &mut program, + &resolver, + sqlite_schema_cursor_id, + None, // cdc_table_cursor_id, no cdc for views + SchemaEntryType::View, + &normalized_view_name, + &normalized_view_name, + view_root_reg, // btree root for materialized view data + Some(sql), + )?; + + // Add the DBSP state table to sqlite_master (required for materialized views) + let dbsp_table_name = format!("{DBSP_TABLE_PREFIX}{normalized_view_name}"); + let dbsp_sql = format!("CREATE TABLE {dbsp_table_name} (key INTEGER PRIMARY KEY, state BLOB)"); + + emit_schema_entry( + &mut program, + &resolver, + sqlite_schema_cursor_id, + None, // cdc_table_cursor_id + SchemaEntryType::Table, + &dbsp_table_name, + &dbsp_table_name, + dbsp_state_root_reg, // Root for DBSP state table + Some(dbsp_sql), + )?; + + // Parse schema to load the new view and DBSP state table + program.emit_insn(Insn::ParseSchema { + db: sqlite_schema_cursor_id, + where_clause: Some(format!( + "name = '{normalized_view_name}' OR name = '{dbsp_table_name}'" + )), + }); + + program.emit_insn(Insn::SetCookie { + db: 0, + cookie: Cookie::SchemaVersion, + value: (schema.schema_version + 1) as i32, + p5: 0, + }); + + // Populate the materialized view + let cursor_info = vec![(normalized_view_name.clone(), view_cursor_id)]; + program.emit_insn(Insn::PopulateMaterializedViews { + cursors: cursor_info, + }); program.epilogue(schema); Ok(program) @@ -137,8 +209,41 @@ pub fn translate_create_view( // Reconstruct the SQL string let sql = create_view_to_str(view_name, select_stmt); - // Use common logic to emit the view creation program - emit_create_view_program(schema, view_name, sql, syms, &mut program, false)?; + // Open cursor to sqlite_schema table + let table = schema.get_btree_table(SQLITE_TABLEID).unwrap(); + let sqlite_schema_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(table.clone())); + program.emit_insn(Insn::OpenWrite { + cursor_id: sqlite_schema_cursor_id, + root_page: 1usize.into(), + db: 0, + }); + + // Add the view entry to sqlite_schema + let resolver = Resolver::new(schema, syms); + emit_schema_entry( + &mut program, + &resolver, + sqlite_schema_cursor_id, + None, // cdc_table_cursor_id, no cdc for views + SchemaEntryType::View, + &normalized_view_name, + &normalized_view_name, + 0, // Regular views don't have a btree + Some(sql), + )?; + + // Parse schema to load the new view + program.emit_insn(Insn::ParseSchema { + db: sqlite_schema_cursor_id, + where_clause: Some(format!("name = '{normalized_view_name}'")), + }); + + program.emit_insn(Insn::SetCookie { + db: 0, + cookie: Cookie::SchemaVersion, + value: (schema.schema_version + 1) as i32, + p5: 0, + }); Ok(program) } @@ -156,10 +261,9 @@ pub fn translate_drop_view( let normalized_view_name = normalize_ident(view_name); // Check if view exists (either regular or materialized) - let view_exists = schema.get_view(&normalized_view_name).is_some() - || schema - .get_materialized_view(&normalized_view_name) - .is_some(); + let is_regular_view = schema.get_view(&normalized_view_name).is_some(); + let is_materialized_view = schema.is_materialized_view(&normalized_view_name); + let view_exists = is_regular_view || is_materialized_view; if !view_exists && !if_exists { return Err(crate::LimboError::ParseError(format!( @@ -172,6 +276,20 @@ pub fn translate_drop_view( return Ok(program); } + // If this is a materialized view, we need to destroy its btree as well + if is_materialized_view { + if let Some(table) = schema.get_table(&normalized_view_name) { + if let Some(btree_table) = table.btree() { + // Destroy the btree for the materialized view + program.emit_insn(Insn::Destroy { + root: btree_table.root_page, + former_root_reg: 0, // No autovacuum + is_temp: 0, + }); + } + } + } + // Open cursor to sqlite_schema table let schema_table = schema.get_btree_table(SQLITE_TABLEID).unwrap(); let sqlite_schema_cursor_id = @@ -217,6 +335,8 @@ pub fn translate_drop_view( // Check if type == 'view' and name == view_name let skip_delete_label = program.allocate_label(); + + // Both regular and materialized views are stored as type='view' in sqlite_schema program.emit_insn(Insn::Ne { lhs: col0_reg, rhs: type_reg, @@ -224,6 +344,7 @@ pub fn translate_drop_view( flags: CmpInsFlags::default(), collation: program.curr_collation(), }); + program.emit_insn(Insn::Ne { lhs: col1_reg, rhs: view_name_reg, diff --git a/core/types.rs b/core/types.rs index 26eb79c41..b5d079ddb 100644 --- a/core/types.rs +++ b/core/types.rs @@ -5,6 +5,7 @@ use turso_parser::ast::SortOrder; use crate::error::LimboError; use crate::ext::{ExtValue, ExtValueType}; +use crate::numeric::format_float; use crate::pseudo::PseudoCursor; use crate::schema::Index; use crate::storage::btree::BTreeCursor; @@ -17,8 +18,6 @@ use crate::vtab::VirtualTableCursor; use crate::{turso_assert, Completion, CompletionError, Result, IO}; use std::fmt::{Debug, Display}; -const MAX_REAL_SIZE: u8 = 15; - /// SQLite by default uses 2000 as maximum numbers in a row. /// It controlld by the constant called SQLITE_MAX_COLUMN /// But the hard limit of number of columns is 32,767 columns i16::MAX @@ -390,6 +389,13 @@ impl Value { Value::Blob(b) => out.extend_from_slice(b), }; } + + pub fn cast_text(&self) -> Option { + Some(match self { + Value::Null => return None, + v => v.to_string(), + }) + } } #[derive(Debug, Clone, PartialEq)] @@ -425,108 +431,7 @@ impl Display for Value { Self::Integer(i) => { write!(f, "{i}") } - Self::Float(fl) => { - let fl = *fl; - if fl == f64::INFINITY { - return write!(f, "Inf"); - } - if fl == f64::NEG_INFINITY { - return write!(f, "-Inf"); - } - if fl.is_nan() { - return write!(f, ""); - } - // handle negative 0 - if fl == -0.0 { - return write!(f, "{:.1}", fl.abs()); - } - - // handle scientific notation without trailing zeros - if (fl.abs() < 1e-4 || fl.abs() >= 1e15) && fl != 0.0 { - let sci_notation = format!("{fl:.14e}"); - let parts: Vec<&str> = sci_notation.split('e').collect(); - - if parts.len() == 2 { - let mantissa = parts[0]; - let exponent = parts[1]; - - let decimal_parts: Vec<&str> = mantissa.split('.').collect(); - if decimal_parts.len() == 2 { - let whole = decimal_parts[0]; - // 1.{this part} - let mut fraction = String::from(decimal_parts[1]); - - //removing trailing 0 from fraction - while fraction.ends_with('0') { - fraction.pop(); - } - - let trimmed_mantissa = if fraction.is_empty() { - whole.to_string() - } else { - format!("{whole}.{fraction}") - }; - let (prefix, exponent) = - if let Some(stripped_exponent) = exponent.strip_prefix('-') { - ("-0", &stripped_exponent[1..]) - } else { - ("+", exponent) - }; - return write!(f, "{trimmed_mantissa}e{prefix}{exponent}"); - } - } - - // fallback - return write!(f, "{sci_notation}"); - } - - // handle floating point max size is 15. - // If left > right && right + left > 15 go to sci notation - // If right > left && right + left > 15 truncate left so right + left == 15 - let rounded = fl.round(); - if (fl - rounded).abs() < 1e-14 { - // if we very close to integer trim decimal part to 1 digit - if rounded == rounded as i64 as f64 { - return write!(f, "{fl:.1}"); - } - } - - let fl_str = format!("{fl}"); - let splitted = fl_str.split('.').collect::>(); - // fallback - if splitted.len() != 2 { - return write!(f, "{fl:.14e}"); - } - - let first_part = if fl < 0.0 { - // remove - - &splitted[0][1..] - } else { - splitted[0] - }; - - let second = splitted[1]; - - // We want more precision for smaller numbers. in SQLite case we want 15 non zero digits in 0 < number < 1 - // leading zeroes added to max real size. But if float < 1e-4 we go to scientific notation - let leading_zeros = second.chars().take_while(|c| c == &'0').count(); - let reminder = if first_part != "0" { - MAX_REAL_SIZE as isize - first_part.len() as isize - } else { - MAX_REAL_SIZE as isize + leading_zeros as isize - }; - // float that have integer part > 15 converted to sci notation - if reminder < 0 { - return write!(f, "{fl:.14e}"); - } - // trim decimal part to reminder or self len so total digits is 15; - let mut fl = format!("{:.*}", second.len().min(reminder as usize), fl); - // if decimal part ends with 0 we trim it - while fl.ends_with('0') { - fl.pop(); - } - write!(f, "{fl}") - } + Self::Float(fl) => f.write_str(&format_float(*fl)), Self::Text(s) => { write!(f, "{}", s.as_str()) } @@ -761,11 +666,8 @@ impl PartialEq for Value { fn eq(&self, other: &Value) -> bool { match (self, other) { (Self::Integer(int_left), Self::Integer(int_right)) => int_left == int_right, - (Self::Integer(int_left), Self::Float(float_right)) => { - (*int_left as f64) == (*float_right) - } - (Self::Float(float_left), Self::Integer(int_right)) => { - float_left == (&(*int_right as f64)) + (Self::Integer(int), Self::Float(float)) | (Self::Float(float), Self::Integer(int)) => { + int_float_cmp(*int, *float).is_eq() } (Self::Float(float_left), Self::Float(float_right)) => float_left == float_right, (Self::Integer(_) | Self::Float(_), Self::Text(_) | Self::Blob(_)) => false, @@ -780,17 +682,32 @@ impl PartialEq for Value { } } +fn int_float_cmp(int: i64, float: f64) -> std::cmp::Ordering { + if float.is_nan() { + return std::cmp::Ordering::Greater; + } + + if float < -9223372036854775808.0 { + return std::cmp::Ordering::Greater; + } + + if float >= 9223372036854775808.0 { + return std::cmp::Ordering::Less; + } + + match int.cmp(&(float as i64)) { + std::cmp::Ordering::Equal => (int as f64).total_cmp(&float), + cmp => cmp, + } +} + #[allow(clippy::non_canonical_partial_ord_impl)] impl PartialOrd for Value { fn partial_cmp(&self, other: &Self) -> Option { match (self, other) { (Self::Integer(int_left), Self::Integer(int_right)) => int_left.partial_cmp(int_right), - (Self::Integer(int_left), Self::Float(float_right)) => { - (*int_left as f64).partial_cmp(float_right) - } - (Self::Float(float_left), Self::Integer(int_right)) => { - float_left.partial_cmp(&(*int_right as f64)) - } + (Self::Float(float), Self::Integer(int)) => Some(int_float_cmp(*int, *float).reverse()), + (Self::Integer(int), Self::Float(float)) => Some(int_float_cmp(*int, *float)), (Self::Float(float_left), Self::Float(float_right)) => { float_left.partial_cmp(float_right) } @@ -842,83 +759,89 @@ impl Ord for Value { impl std::ops::Add for Value { type Output = Value; - fn add(self, rhs: Self) -> Self::Output { - match (self, rhs) { - (Self::Integer(int_left), Self::Integer(int_right)) => { - Self::Integer(int_left + int_right) - } - (Self::Integer(int_left), Self::Float(float_right)) => { - Self::Float(int_left as f64 + float_right) - } - (Self::Float(float_left), Self::Integer(int_right)) => { - Self::Float(float_left + int_right as f64) - } - (Self::Float(float_left), Self::Float(float_right)) => { - Self::Float(float_left + float_right) - } - (Self::Text(string_left), Self::Text(string_right)) => { - Self::build_text(&(string_left.as_str().to_string() + string_right.as_str())) - } - (Self::Text(string_left), Self::Integer(int_right)) => { - Self::build_text(&(string_left.as_str().to_string() + &int_right.to_string())) - } - (Self::Integer(int_left), Self::Text(string_right)) => { - Self::build_text(&(int_left.to_string() + string_right.as_str())) - } - (Self::Text(string_left), Self::Float(float_right)) => { - let string_right = Self::Float(float_right).to_string(); - Self::build_text(&(string_left.as_str().to_string() + &string_right)) - } - (Self::Float(float_left), Self::Text(string_right)) => { - let string_left = Self::Float(float_left).to_string(); - Self::build_text(&(string_left + string_right.as_str())) - } - (lhs, Self::Null) => lhs, - (Self::Null, rhs) => rhs, - _ => Self::Float(0.0), - } + fn add(mut self, rhs: Self) -> Self::Output { + self += rhs; + self } } impl std::ops::Add for Value { type Output = Value; - fn add(self, rhs: f64) -> Self::Output { - match self { - Self::Integer(int_left) => Self::Float(int_left as f64 + rhs), - Self::Float(float_left) => Self::Float(float_left + rhs), - _ => unreachable!(), - } + fn add(mut self, rhs: f64) -> Self::Output { + self += rhs; + self } } impl std::ops::Add for Value { type Output = Value; - fn add(self, rhs: i64) -> Self::Output { - match self { - Self::Integer(int_left) => Self::Integer(int_left + rhs), - Self::Float(float_left) => Self::Float(float_left + rhs as f64), - _ => unreachable!(), - } + fn add(mut self, rhs: i64) -> Self::Output { + self += rhs; + self } } impl std::ops::AddAssign for Value { - fn add_assign(&mut self, rhs: Self) { - *self = self.clone() + rhs; + fn add_assign(mut self: &mut Self, rhs: Self) { + match (&mut self, rhs) { + (Self::Integer(int_left), Self::Integer(int_right)) => *int_left += int_right, + (Self::Integer(int_left), Self::Float(float_right)) => { + *self = Self::Float(*int_left as f64 + float_right) + } + (Self::Float(float_left), Self::Integer(int_right)) => { + *self = Self::Float(*float_left + int_right as f64) + } + (Self::Float(float_left), Self::Float(float_right)) => { + *float_left += float_right; + } + (Self::Text(string_left), Self::Text(string_right)) => { + string_left.value.extend_from_slice(&string_right.value); + string_left.subtype = TextSubtype::Text; + } + (Self::Text(string_left), Self::Integer(int_right)) => { + let string_right = int_right.to_string(); + string_left.value.extend_from_slice(string_right.as_bytes()); + string_left.subtype = TextSubtype::Text; + } + (Self::Integer(int_left), Self::Text(string_right)) => { + let string_left = int_left.to_string(); + *self = Self::build_text(&(string_left + string_right.as_str())); + } + (Self::Text(string_left), Self::Float(float_right)) => { + let string_right = Self::Float(float_right).to_string(); + string_left.value.extend_from_slice(string_right.as_bytes()); + string_left.subtype = TextSubtype::Text; + } + (Self::Float(float_left), Self::Text(string_right)) => { + let string_left = Self::Float(*float_left).to_string(); + *self = Self::build_text(&(string_left + string_right.as_str())); + } + (_, Self::Null) => {} + (Self::Null, rhs) => *self = rhs, + _ => *self = Self::Float(0.0), + } } } impl std::ops::AddAssign for Value { fn add_assign(&mut self, rhs: i64) { - *self = self.clone() + rhs; + match self { + Self::Integer(int_left) => *int_left += rhs, + Self::Float(float_left) => *float_left += rhs as f64, + _ => unreachable!(), + } } } impl std::ops::AddAssign for Value { fn add_assign(&mut self, rhs: f64) { - *self = self.clone() + rhs; + match self { + Self::Integer(int_left) => *self = Self::Float(*int_left as f64 + rhs), + Self::Float(float_left) => *float_left += rhs, + _ => unreachable!(), + } } } @@ -1287,7 +1210,7 @@ impl ImmutableRecord { pub fn column_count(&self) -> usize { let mut cursor = RecordCursor::new(); cursor.parse_full_header(self).unwrap(); - cursor.offsets.len() + cursor.serial_types.len() } } @@ -2430,6 +2353,7 @@ pub enum Cursor { Pseudo(PseudoCursor), Sorter(Sorter), Virtual(VirtualTableCursor), + MaterializedView(Box), } impl Cursor { @@ -2445,6 +2369,12 @@ impl Cursor { Self::Sorter(cursor) } + pub fn new_materialized_view( + cursor: crate::incremental::cursor::MaterializedViewCursor, + ) -> Self { + Self::MaterializedView(Box::new(cursor)) + } + pub fn as_btree_mut(&mut self) -> &mut BTreeCursor { match self { Self::BTree(cursor) => cursor, @@ -2472,6 +2402,15 @@ impl Cursor { _ => panic!("Cursor is not a virtual cursor"), } } + + pub fn as_materialized_view_mut( + &mut self, + ) -> &mut crate::incremental::cursor::MaterializedViewCursor { + match self { + Self::MaterializedView(cursor) => cursor, + _ => panic!("Cursor is not a materialized view cursor"), + } + } } #[derive(Debug)] @@ -2542,9 +2481,27 @@ impl IOResult { #[macro_export] macro_rules! return_if_io { ($expr:expr) => { - match $expr? { - IOResult::Done(v) => v, - IOResult::IO(io) => return Ok(IOResult::IO(io)), + match $expr { + Ok(IOResult::Done(v)) => v, + Ok(IOResult::IO(io)) => return Ok(IOResult::IO(io)), + Err(err) => return Err(err), + } + }; +} + +#[macro_export] +macro_rules! return_and_restore_if_io { + ($field:expr, $saved_state:expr, $e:expr) => { + match $e { + Ok(IOResult::Done(v)) => v, + Ok(IOResult::IO(io)) => { + let _ = std::mem::replace($field, $saved_state); + return Ok(IOResult::IO(io)); + } + Err(e) => { + let _ = std::mem::replace($field, $saved_state); + return Err(e); + } } }; } @@ -3583,4 +3540,19 @@ mod tests { header_length + size_of::() + size_of::() + text.len() ); } + + #[test] + fn test_column_count_matches_values_written() { + // Test with different numbers of values + for num_values in 1..=10 { + let values: Vec = (0..num_values).map(|i| Value::Integer(i as i64)).collect(); + + let record = ImmutableRecord::from_values(&values, values.len()); + let cnt = record.column_count(); + assert_eq!( + cnt, num_values, + "column_count should be {num_values}, not {cnt}" + ); + } + } } diff --git a/core/util.rs b/core/util.rs index 80845c8c9..b259a90f3 100644 --- a/core/util.rs +++ b/core/util.rs @@ -1,14 +1,16 @@ #![allow(unused)] +use crate::incremental::view::IncrementalView; use crate::translate::expr::WalkControl; use crate::types::IOResult; use crate::{ - schema::{self, Column, MaterializedViewsMap, Schema, Type}, + schema::{self, BTreeTable, Column, Schema, Table, Type, DBSP_TABLE_PREFIX}, translate::{collate::CollationSeq, expr::walk_expr, plan::JoinOrderMember}, types::{Value, ValueType}, LimboError, OpenFlags, Result, Statement, StepResult, SymbolTable, }; use crate::{Connection, IO}; use std::{ + collections::HashMap, rc::Rc, sync::{Arc, Mutex}, }; @@ -148,7 +150,7 @@ pub fn parse_schema_rows( schema: &mut Schema, syms: &SymbolTable, mv_tx_id: Option, - mut existing_views: MaterializedViewsMap, + mut existing_views: HashMap>>, ) -> Result<()> { rows.set_mv_tx_id(mv_tx_id); // TODO: if we IO, this unparsed indexes is lost. Will probably need some state between @@ -156,8 +158,12 @@ pub fn parse_schema_rows( let mut from_sql_indexes = Vec::with_capacity(10); let mut automatic_indices = std::collections::HashMap::with_capacity(10); - // Collect views for second pass to populate table_to_views mapping - let mut views_to_process: Vec<(String, Vec)> = Vec::new(); + // Store DBSP state table root pages: view_name -> dbsp_state_root_page + let mut dbsp_state_roots: std::collections::HashMap = + std::collections::HashMap::new(); + // Store materialized view info (SQL and root page) for later creation + let mut materialized_view_info: std::collections::HashMap = + std::collections::HashMap::new(); loop { match rows.step()? { StepResult::Row => { @@ -189,6 +195,18 @@ pub fn parse_schema_rows( schema.add_virtual_table(vtab); } else { let table = schema::BTreeTable::from_sql(sql, root_page as usize)?; + + // Check if this is a DBSP state table + if table.name.starts_with(DBSP_TABLE_PREFIX) { + // Extract the view name from __turso_internal_dbsp_state_ + let view_name = table + .name + .strip_prefix(DBSP_TABLE_PREFIX) + .unwrap() + .to_string(); + dbsp_state_roots.insert(view_name, root_page as usize); + } + schema.add_btree_table(Arc::new(table)); } } @@ -228,6 +246,7 @@ pub fn parse_schema_rows( use turso_parser::parser::Parser; let name: &str = row.get::<&str>(1)?; + let root_page = row.get::(3)?; let sql: &str = row.get::<&str>(4)?; let view_name = name.to_string(); @@ -236,52 +255,17 @@ pub fn parse_schema_rows( if let Ok(Some(Cmd::Stmt(stmt))) = parser.next_cmd() { match stmt { Stmt::CreateMaterializedView { .. } => { - // Handle materialized view with potential reuse - let should_create_new = if let Some(existing_view) = - existing_views.remove(&view_name) - { - // Check if we can reuse this view (same SQL definition) - let can_reuse = if let Ok(view_guard) = existing_view.lock() - { - view_guard.has_same_sql(sql) - } else { - false - }; + // Store materialized view info for later creation + // We'll handle reuse logic and create the actual IncrementalView + // in a later pass when we have both the main root page and DBSP state root + materialized_view_info.insert( + view_name.clone(), + (sql.to_string(), root_page as usize), + ); - if can_reuse { - // Reuse the existing view - it's already populated! - let referenced_tables = - if let Ok(view_guard) = existing_view.lock() { - view_guard.get_referenced_table_names() - } else { - vec![] - }; - - // Add the existing view to the new schema - schema - .materialized_views - .insert(view_name.clone(), existing_view); - - // Store for second pass processing - views_to_process - .push((view_name.clone(), referenced_tables)); - false // Don't create new - } else { - true // SQL changed, need to create new - } - } else { - true // No existing view, need to create new - }; - - if should_create_new { - // Create a new IncrementalView - // If this fails, we should propagate the error so the transaction rolls back - let incremental_view = - IncrementalView::from_sql(sql, schema)?; - let referenced_tables = - incremental_view.get_referenced_table_names(); - schema.add_materialized_view(incremental_view); - views_to_process.push((view_name, referenced_tables)); + // Mark the existing view for potential reuse + if existing_views.contains_key(&view_name) { + // We'll check for reuse in the third pass } } Stmt::CreateView { @@ -359,11 +343,56 @@ pub fn parse_schema_rows( } } - // Second pass: populate table_to_views mapping - for (view_name, referenced_tables) in views_to_process { - // Register this view as dependent on each referenced table - for table_name in referenced_tables { - schema.add_materialized_view_dependency(&table_name, &view_name); + // Third pass: Create materialized views now that we have both root pages + for (view_name, (sql, main_root)) in materialized_view_info { + // Look up the DBSP state root for this view - must exist for materialized views + let dbsp_state_root = dbsp_state_roots.get(&view_name).ok_or_else(|| { + LimboError::InternalError(format!( + "Materialized view {view_name} is missing its DBSP state table" + )) + })?; + + // Check if we can reuse the existing view + let mut reuse_view = false; + if let Some(existing_view_mutex) = schema.get_materialized_view(&view_name) { + let existing_view = existing_view_mutex.lock().unwrap(); + if let Some(existing_sql) = schema.materialized_view_sql.get(&view_name) { + if existing_sql == &sql { + reuse_view = true; + } + } + } + + if reuse_view { + // View already exists with same SQL, just update dependencies + let existing_view_mutex = schema.get_materialized_view(&view_name).unwrap(); + let existing_view = existing_view_mutex.lock().unwrap(); + let referenced_tables = existing_view.get_referenced_table_names(); + drop(existing_view); // Release lock before modifying schema + for table_name in referenced_tables { + schema.add_materialized_view_dependency(&table_name, &view_name); + } + } else { + // Create new IncrementalView with both root pages + let incremental_view = + IncrementalView::from_sql(&sql, schema, main_root, *dbsp_state_root)?; + let referenced_tables = incremental_view.get_referenced_table_names(); + + // Create a Table for the materialized view + let table = Arc::new(schema::Table::BTree(Arc::new(schema::BTreeTable { + root_page: main_root, + name: view_name.clone(), + columns: incremental_view.columns.clone(), // Use the view's columns, not the base table's + primary_key_columns: vec![], + has_rowid: true, + is_strict: false, + unique_sets: None, + }))); + + schema.add_materialized_view(incremental_view, table, sql.clone()); + for table_name in referenced_tables { + schema.add_materialized_view_dependency(&table_name, &view_name); + } } } @@ -679,11 +708,11 @@ pub(crate) fn type_from_name(type_name: &str) -> (Type, bool) { return (Type::Integer, true); } - if let Some(ty) = type_name.windows(4).find_map(|s| { - if contains_ignore_ascii_case!(s, b"INT") { - return Some(Type::Integer); - } + if contains_ignore_ascii_case!(type_name, b"INT") { + return (Type::Integer, false); + } + if let Some(ty) = type_name.windows(4).find_map(|s| { match_ignore_ascii_case!(match s { b"CHAR" | b"CLOB" | b"TEXT" => Some(Type::Text), b"BLOB" => Some(Type::Blob), @@ -1370,6 +1399,7 @@ pub fn extract_view_columns(select_stmt: &ast::Select, schema: &Schema) -> Vec), + MaterializedView( + Arc, + Arc>, + ), } impl CursorType { @@ -865,6 +869,7 @@ impl ProgramBuilder { let default = match cursor_type { CursorType::BTreeTable(btree) => &btree.columns[column].default, CursorType::BTreeIndex(index) => &index.columns[column].default, + CursorType::MaterializedView(btree, _) => &btree.columns[column].default, _ => break 'value None, }; diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index ffa2a447c..7fff9b4c6 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -6,7 +6,7 @@ use crate::storage::btree::{ integrity_check, IntegrityCheckError, IntegrityCheckState, PageCategory, }; use crate::storage::database::DatabaseFile; -use crate::storage::page_cache::DumbLruPageCache; +use crate::storage::page_cache::PageCache; use crate::storage::pager::{AtomicDbState, CreateBTreeFlags, DbState}; use crate::storage::sqlite3_ondisk::read_varint; use crate::translate::collate::CollationSeq; @@ -18,7 +18,6 @@ use crate::util::{normalize_ident, IOExt as _}; use crate::vdbe::insn::InsertFlags; use crate::vdbe::registers_to_ref_values; use crate::vector::{vector_concat, vector_slice}; -use crate::MvCursor; use crate::{ error::{ LimboError, SQLITE_CONSTRAINT, SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PRIMARYKEY, @@ -32,6 +31,7 @@ use crate::{ printf::exec_printf, }, }; +use crate::{get_cursor, MvCursor}; use std::env::temp_dir; use std::ops::DerefMut; use std::{ @@ -118,12 +118,14 @@ macro_rules! load_insn { macro_rules! return_if_io { ($expr:expr) => { - match $expr? { - IOResult::Done(v) => v, - IOResult::IO(io) => return Ok(InsnFunctionStepResult::IO(io)), + match $expr { + Ok(IOResult::Done(v)) => v, + Ok(IOResult::IO(io)) => return Ok(InsnFunctionStepResult::IO(io)), + Err(err) => return Err(err), } }; } + pub type InsnFunction = fn( &Program, &mut ProgramState, @@ -405,7 +407,7 @@ pub fn op_null_row( ) -> Result { load_insn!(NullRow { cursor_id }, insn); { - let mut cursor = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "NullRow"); + let cursor = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "NullRow"); let cursor = cursor.as_btree_mut(); cursor.set_null_flag(true); } @@ -949,15 +951,47 @@ pub fn op_open_read( } None => None, }; - let mut cursors = state.cursors.borrow_mut(); + let cursors = &mut state.cursors; let num_columns = match cursor_type { CursorType::BTreeTable(table_rc) => table_rc.columns.len(), CursorType::BTreeIndex(index_arc) => index_arc.columns.len(), + CursorType::MaterializedView(table_rc, _) => table_rc.columns.len(), _ => unreachable!("This should not have happened"), }; match cursor_type { + CursorType::MaterializedView(_, view_mutex) => { + // This is a materialized view with storage + // Create btree cursor for reading the persistent data + let btree_cursor = Box::new(BTreeCursor::new_table( + mv_cursor, + pager.clone(), + *root_page, + num_columns, + )); + + // Get the view name and look up or create its transaction state + let view_name = view_mutex.lock().unwrap().name().to_string(); + let tx_state = program + .connection + .view_transaction_states + .get_or_create(&view_name); + + // Create materialized view cursor with this view's transaction state + let mv_cursor = crate::incremental::cursor::MaterializedViewCursor::new( + btree_cursor, + view_mutex.clone(), + pager.clone(), + tx_state, + )?; + + cursors + .get_mut(*cursor_id) + .unwrap() + .replace(Cursor::new_materialized_view(mv_cursor)); + } CursorType::BTreeTable(_) => { + // Regular table let cursor = BTreeCursor::new_table(mv_cursor, pager.clone(), *root_page, num_columns); cursors .get_mut(*cursor_id) @@ -1006,7 +1040,6 @@ pub fn op_vopen( let cursor = virtual_table.open(program.connection.clone())?; state .cursors - .borrow_mut() .get_mut(*cursor_id) .unwrap_or_else(|| panic!("cursor id {} out of bounds", *cursor_id)) .replace(Cursor::Virtual(cursor)); @@ -1074,7 +1107,7 @@ pub fn op_vfilter( insn ); let has_rows = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = get_cursor!(state, *cursor_id); let cursor = cursor.as_virtual_mut(); let mut args = Vec::with_capacity(*arg_count); for i in 0..*arg_count { @@ -1115,7 +1148,7 @@ pub fn op_vcolumn( insn ); let value = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_virtual_mut(); cursor.column(*column)? }; @@ -1203,7 +1236,7 @@ pub fn op_vnext( insn ); let has_more = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_virtual_mut(); cursor.next()? }; @@ -1255,7 +1288,7 @@ pub fn op_open_pseudo( insn ); { - let mut cursors = state.cursors.borrow_mut(); + let cursors = &mut state.cursors; let cursor = PseudoCursor::default(); cursors .get_mut(*cursor_id) @@ -1282,10 +1315,18 @@ pub fn op_rewind( ); assert!(pc_if_empty.is_offset()); let is_empty = { - let mut cursor = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "Rewind"); - let cursor = cursor.as_btree_mut(); - return_if_io!(cursor.rewind()); - cursor.is_empty() + let cursor = state.get_cursor(*cursor_id); + match cursor { + Cursor::BTree(btree_cursor) => { + return_if_io!(btree_cursor.rewind()); + btree_cursor.is_empty() + } + Cursor::MaterializedView(mv_cursor) => { + return_if_io!(mv_cursor.rewind()); + !mv_cursor.is_valid()? + } + _ => panic!("Rewind on non-btree/materialized-view cursor"), + } }; if is_empty { state.pc = pc_if_empty.as_offset_int(); @@ -1313,7 +1354,7 @@ pub fn op_last( ); assert!(pc_if_empty.is_offset()); let is_empty = { - let mut cursor = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "Last"); + let cursor = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "Last"); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.last()); cursor.is_empty() @@ -1414,13 +1455,16 @@ pub fn op_column( index_cursor_id, table_cursor_id, } => { - let rowid = { - let mut index_cursor = state.get_cursor(index_cursor_id); + let Some(rowid) = ({ + let index_cursor = state.get_cursor(index_cursor_id); let index_cursor = index_cursor.as_btree_mut(); return_if_io!(index_cursor.rowid()) + }) else { + state.registers[*dest] = Register::Value(Value::Null); + break 'outer; }; state.op_column_state = OpColumnState::Seek { - rowid: rowid.unwrap(), + rowid, table_cursor_id, }; } @@ -1429,20 +1473,45 @@ pub fn op_column( table_cursor_id, } => { { - let mut table_cursor = state.get_cursor(table_cursor_id); - let table_cursor = table_cursor.as_btree_mut(); - return_if_io!( - table_cursor.seek(SeekKey::TableRowId(rowid), SeekOp::GE { eq_only: true }) - ); + let table_cursor = state.get_cursor(table_cursor_id); + // MaterializedView cursors shouldn't go through deferred seek logic + // but if we somehow get here, handle it appropriately + match table_cursor { + Cursor::MaterializedView(mv_cursor) => { + // Seek to the rowid in the materialized view + return_if_io!(mv_cursor + .seek(SeekKey::TableRowId(rowid), SeekOp::GE { eq_only: true })); + } + _ => { + // Regular btree cursor + let table_cursor = table_cursor.as_btree_mut(); + return_if_io!(table_cursor + .seek(SeekKey::TableRowId(rowid), SeekOp::GE { eq_only: true })); + } + } } state.op_column_state = OpColumnState::GetColumn; } OpColumnState::GetColumn => { + // First check if this is a MaterializedViewCursor + { + let cursor = state.get_cursor(*cursor_id); + if let Cursor::MaterializedView(mv_cursor) = cursor { + // Handle materialized view column access + let value = return_if_io!(mv_cursor.column(*column)); + state.registers[*dest] = Register::Value(value); + break 'outer; + } + // Fall back to normal handling + } + let (_, cursor_type) = program.cursor_ref.get(*cursor_id).unwrap(); match cursor_type { - CursorType::BTreeTable(_) | CursorType::BTreeIndex(_) => { + CursorType::BTreeTable(_) + | CursorType::BTreeIndex(_) + | CursorType::MaterializedView(_, _) => { 'ifnull: { - let mut cursor_ref = must_be_btree_cursor!( + let cursor_ref = must_be_btree_cursor!( *cursor_id, program.cursor_ref, state, @@ -1451,7 +1520,6 @@ pub fn op_column( let cursor = cursor_ref.as_btree_mut(); if cursor.get_null_flag() { - drop(cursor_ref); state.registers[*dest] = Register::Value(Value::Null); break 'outer; } @@ -1549,7 +1617,6 @@ pub fn op_column( let serial_type = record_cursor.serial_types[target_column]; drop(record_result); drop(record_cursor); - drop(cursor_ref); match serial_type { // NULL @@ -1688,7 +1755,7 @@ pub fn op_column( } CursorType::Sorter => { let record = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_sorter_mut(); cursor.record().cloned() }; @@ -1704,7 +1771,7 @@ pub fn op_column( } CursorType::Pseudo(_) => { let value = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_pseudo_mut(); if let Some(record) = cursor.record() { record.get_value(*column)?.to_owned() @@ -1800,10 +1867,27 @@ pub fn op_make_record( start_reg, count, dest_reg, + affinity_str, .. }, insn ); + + if let Some(affinity_str) = affinity_str { + if affinity_str.len() != *count { + return Err(LimboError::InternalError(format!( + "MakeRecord: the length of affinity string ({}) does not match the count ({})", + affinity_str.len(), + *count + ))); + } + for (i, affinity_ch) in affinity_str.chars().enumerate().take(*count) { + let reg_index = *start_reg + i; + let affinity = Affinity::from_char(affinity_ch); + apply_affinity_char(&mut state.registers[reg_index], affinity); + } + } + let record = make_record(&state.registers, start_reg, count); state.registers[*dest_reg] = Register::Record(record); state.pc += 1; @@ -1843,12 +1927,19 @@ pub fn op_next( ); assert!(pc_if_next.is_offset()); let is_empty = { - let mut cursor = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "Next"); - let cursor = cursor.as_btree_mut(); - cursor.set_null_flag(false); - return_if_io!(cursor.next()); - - cursor.is_empty() + let cursor = state.get_cursor(*cursor_id); + match cursor { + Cursor::BTree(btree_cursor) => { + btree_cursor.set_null_flag(false); + return_if_io!(btree_cursor.next()); + btree_cursor.is_empty() + } + Cursor::MaterializedView(mv_cursor) => { + let has_more = return_if_io!(mv_cursor.next()); + !has_more + } + _ => panic!("Next on non-btree/materialized-view cursor"), + } }; if !is_empty { // Increment metrics for row read @@ -1885,7 +1976,7 @@ pub fn op_prev( ); assert!(pc_if_prev.is_offset()); let is_empty = { - let mut cursor = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "Prev"); + let cursor = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "Prev"); let cursor = cursor.as_btree_mut(); cursor.set_null_flag(false); return_if_io!(cursor.prev()); @@ -2164,7 +2255,7 @@ pub fn op_auto_commit( insn ); let conn = program.connection.clone(); - if state.commit_state == CommitState::Committing { + if matches!(state.commit_state, CommitState::Committing) { return program .commit_txn(pager.clone(), state, mv_store, *rollback) .map(Into::into); @@ -2337,8 +2428,7 @@ pub fn op_row_data( load_insn!(RowData { cursor_id, dest }, insn); let record = { - let mut cursor_ref = - must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "RowData"); + let cursor_ref = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "RowData"); let cursor = cursor_ref.as_btree_mut(); let record_option = return_if_io!(cursor.record()); @@ -2397,7 +2487,7 @@ pub fn op_row_id( table_cursor_id, } => { let rowid = { - let mut index_cursor = state.get_cursor(index_cursor_id); + let index_cursor = state.get_cursor(index_cursor_id); let index_cursor = index_cursor.as_btree_mut(); let record = return_if_io!(index_cursor.record()); let record = record.as_ref().unwrap(); @@ -2419,7 +2509,7 @@ pub fn op_row_id( table_cursor_id, } => { { - let mut table_cursor = state.get_cursor(table_cursor_id); + let table_cursor = state.get_cursor(table_cursor_id); let table_cursor = table_cursor.as_btree_mut(); return_if_io!( table_cursor.seek(SeekKey::TableRowId(rowid), SeekOp::GE { eq_only: true }) @@ -2428,7 +2518,7 @@ pub fn op_row_id( state.op_row_id_state = OpRowIdState::GetRowid; } OpRowIdState::GetRowid => { - let mut cursors = state.cursors.borrow_mut(); + let cursors = &mut state.cursors; if let Some(Cursor::BTree(btree_cursor)) = cursors.get_mut(*cursor_id).unwrap() { if let Some(ref rowid) = return_if_io!(btree_cursor.rowid()) { state.registers[*dest] = Register::Value(Value::Integer(*rowid)); @@ -2444,9 +2534,18 @@ pub fn op_row_id( } else { state.registers[*dest] = Register::Value(Value::Null); } + } else if let Some(Cursor::MaterializedView(mv_cursor)) = + cursors.get_mut(*cursor_id).unwrap() + { + if let Some(rowid) = return_if_io!(mv_cursor.rowid()) { + state.registers[*dest] = Register::Value(Value::Integer(rowid)); + } else { + state.registers[*dest] = Register::Value(Value::Null); + } } else { return Err(LimboError::InternalError( - "RowId: cursor is not a table or virtual cursor".to_string(), + "RowId: cursor is not a table, virtual, or materialized view cursor" + .to_string(), )); } break; @@ -2467,7 +2566,7 @@ pub fn op_idx_row_id( mv_store: Option<&Arc>, ) -> Result { load_insn!(IdxRowId { cursor_id, dest }, insn); - let mut cursors = state.cursors.borrow_mut(); + let cursors = &mut state.cursors; let cursor = cursors.get_mut(*cursor_id).unwrap().as_mut().unwrap(); let cursor = cursor.as_btree_mut(); let rowid = return_if_io!(cursor.rowid()); @@ -2496,41 +2595,68 @@ pub fn op_seek_rowid( ); assert!(target_pc.is_offset()); let (pc, did_seek) = { - let mut cursor = state.get_cursor(*cursor_id); - let cursor = cursor.as_btree_mut(); - let rowid = match state.registers[*src_reg].get_value() { - Value::Integer(rowid) => Some(*rowid), - Value::Null => None, - // For non-integer values try to apply affinity and convert them to integer. - other => { - let mut temp_reg = Register::Value(other.clone()); - let converted = apply_affinity_char(&mut temp_reg, Affinity::Numeric); - if converted { - match temp_reg.get_value() { - Value::Integer(i) => Some(*i), - Value::Float(f) => Some(*f as i64), - _ => unreachable!("apply_affinity_char with Numeric should produce an integer if it returns true"), + let cursor = get_cursor!(state, *cursor_id); + + // Handle MaterializedView cursor + let (pc, did_seek) = match cursor { + Cursor::MaterializedView(mv_cursor) => { + let rowid = match state.registers[*src_reg].get_value() { + Value::Integer(rowid) => Some(*rowid), + Value::Null => None, + _ => None, + }; + + match rowid { + Some(rowid) => { + let seek_result = return_if_io!(mv_cursor + .seek(SeekKey::TableRowId(rowid), SeekOp::GE { eq_only: true })); + let pc = if !matches!(seek_result, SeekResult::Found) { + target_pc.as_offset_int() + } else { + state.pc + 1 + }; + (pc, true) } - } else { - None + None => (target_pc.as_offset_int(), false), } } - }; - - match rowid { - Some(rowid) => { - let seek_result = return_if_io!( - cursor.seek(SeekKey::TableRowId(rowid), SeekOp::GE { eq_only: true }) - ); - let pc = if !matches!(seek_result, SeekResult::Found) { - target_pc.as_offset_int() - } else { - state.pc + 1 + Cursor::BTree(btree_cursor) => { + let rowid = match state.registers[*src_reg].get_value() { + Value::Integer(rowid) => Some(*rowid), + Value::Null => None, + // For non-integer values try to apply affinity and convert them to integer. + other => { + let mut temp_reg = Register::Value(other.clone()); + let converted = apply_affinity_char(&mut temp_reg, Affinity::Numeric); + if converted { + match temp_reg.get_value() { + Value::Integer(i) => Some(*i), + Value::Float(f) => Some(*f as i64), + _ => unreachable!("apply_affinity_char with Numeric should produce an integer if it returns true"), + } + } else { + None + } + } }; - (pc, true) + + match rowid { + Some(rowid) => { + let seek_result = return_if_io!(btree_cursor + .seek(SeekKey::TableRowId(rowid), SeekOp::GE { eq_only: true })); + let pc = if !matches!(seek_result, SeekResult::Found) { + target_pc.as_offset_int() + } else { + state.pc + 1 + }; + (pc, true) + } + None => (target_pc.as_offset_int(), false), + } } - None => (target_pc.as_offset_int(), false), - } + _ => panic!("SeekRowid on non-btree/materialized-view cursor"), + }; + (pc, did_seek) }; // Increment btree_seeks metric for SeekRowid operation after cursor is dropped if did_seek { @@ -2676,7 +2802,7 @@ pub enum SeekInternalResult { NotFound, IO(IOCompletions), } -#[derive(Clone)] +#[derive(Clone, Copy)] pub enum RecordSource { Unpacked { start_reg: usize, num_regs: usize }, Packed { record_reg: usize }, @@ -2826,7 +2952,7 @@ pub fn seek_internal( } OpSeekState::Seek { key, op } => { let seek_result = { - let mut cursor = state.get_cursor(cursor_id); + let cursor = get_cursor!(state, cursor_id); let cursor = cursor.as_btree_mut(); let seek_key = match key { OpSeekKey::TableRowId(rowid) => SeekKey::TableRowId(*rowid), @@ -2859,7 +2985,7 @@ pub fn seek_internal( } OpSeekState::Advance { op } => { let found = { - let mut cursor = state.get_cursor(cursor_id); + let cursor = get_cursor!(state, cursor_id); let cursor = cursor.as_btree_mut(); // Seek operation has anchor number which equals to the closed boundary of the range // (e.g. for >= x - anchor is x, for > x - anchor is x + 1) @@ -2896,7 +3022,7 @@ pub fn seek_internal( }); } OpSeekState::MoveLast => { - let mut cursor = state.get_cursor(cursor_id); + let cursor = state.get_cursor(cursor_id); let cursor = cursor.as_btree_mut(); match cursor.last()? { IOResult::Done(()) => {} @@ -2987,7 +3113,7 @@ pub fn op_idx_ge( assert!(target_pc.is_offset()); let pc = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = get_cursor!(state, *cursor_id); let cursor = cursor.as_btree_mut(); let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { @@ -3028,7 +3154,7 @@ pub fn op_seek_end( ) -> Result { load_insn!(SeekEnd { cursor_id }, *insn); { - let mut cursor = state.get_cursor(cursor_id); + let cursor = state.get_cursor(cursor_id); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.seek_end()); } @@ -3056,7 +3182,7 @@ pub fn op_idx_le( assert!(target_pc.is_offset()); let pc = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = get_cursor!(state, *cursor_id); let cursor = cursor.as_btree_mut(); let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { @@ -3107,7 +3233,7 @@ pub fn op_idx_gt( assert!(target_pc.is_offset()); let pc = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = get_cursor!(state, *cursor_id); let cursor = cursor.as_btree_mut(); let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { @@ -3158,7 +3284,7 @@ pub fn op_idx_lt( assert!(target_pc.is_offset()); let pc = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = get_cursor!(state, *cursor_id); let cursor = cursor.as_btree_mut(); let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { @@ -3389,22 +3515,22 @@ pub fn op_agg_step( } } AggFunc::Count | AggFunc::Count0 => { - let col = state.registers[*col].get_value().clone(); + let skip = (matches!(func, AggFunc::Count) + && matches!(state.registers[*col].get_value(), Value::Null)); if matches!(&state.registers[*acc_reg], Register::Value(Value::Null)) { state.registers[*acc_reg] = Register::Aggregate(AggContext::Count(Value::Integer(0))); } - let Register::Aggregate(agg) = state.registers[*acc_reg].borrow_mut() else { + let Register::Aggregate(agg) = &mut state.registers[*acc_reg] else { panic!( "Unexpected value {:?} in AggStep at register {}", state.registers[*acc_reg], *acc_reg ); }; - let AggContext::Count(count) = agg.borrow_mut() else { + let AggContext::Count(count) = agg else { unreachable!(); }; - - if !(matches!(func, AggFunc::Count) && matches!(col, Value::Null)) { + if !skip { *count += 1; }; } @@ -3735,7 +3861,7 @@ pub fn op_sorter_open( page_size, pager.io.clone(), ); - let mut cursors = state.cursors.borrow_mut(); + let cursors = &mut state.cursors; cursors .get_mut(*cursor_id) .unwrap() @@ -3760,7 +3886,7 @@ pub fn op_sorter_data( insn ); let record = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_sorter_mut(); cursor.record().cloned() }; @@ -3773,7 +3899,7 @@ pub fn op_sorter_data( }; state.registers[*dest_reg] = Register::Record(record.clone()); { - let mut pseudo_cursor = state.get_cursor(*pseudo_cursor); + let pseudo_cursor = state.get_cursor(*pseudo_cursor); pseudo_cursor.as_pseudo_mut().insert(record); } state.pc += 1; @@ -3795,7 +3921,7 @@ pub fn op_sorter_insert( insn ); { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = get_cursor!(state, *cursor_id); let cursor = cursor.as_sorter_mut(); let record = match &state.registers[*record_reg] { Register::Record(record) => record, @@ -3822,7 +3948,7 @@ pub fn op_sorter_sort( insn ); let (is_empty, did_sort) = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_sorter_mut(); let is_empty = cursor.is_empty(); if !is_empty { @@ -3858,7 +3984,7 @@ pub fn op_sorter_next( ); assert!(pc_if_next.is_offset()); let has_more = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_sorter_mut(); return_if_io!(cursor.next()); cursor.has_more() @@ -5192,12 +5318,11 @@ pub fn op_insert( match &state.op_insert_state.sub_state { OpInsertSubState::MaybeCaptureRecord => { let schema = program.connection.schema.borrow(); - let dependent_views = - schema.get_dependent_materialized_views_unnormalized(table_name); + let dependent_views = schema.get_dependent_materialized_views(table_name); // If there are no dependent views, we don't need to capture the old record. // We also don't need to do it if the rowid of the UPDATEd row was changed, because that means // we deleted it earlier and `op_delete` already captured the change. - if dependent_views.is_none() || flag.has(InsertFlags::UPDATE_ROWID_CHANGE) { + if dependent_views.is_empty() || flag.has(InsertFlags::UPDATE_ROWID_CHANGE) { if flag.has(InsertFlags::REQUIRE_SEEK) { state.op_insert_state.sub_state = OpInsertSubState::Seek; } else { @@ -5209,7 +5334,7 @@ pub fn op_insert( turso_assert!(!flag.has(InsertFlags::REQUIRE_SEEK), "to capture old record accurately, we must be located at the correct position in the table"); let old_record = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_btree_mut(); // Get the current key - for INSERT operations, there may not be a current row let maybe_key = return_if_io!(cursor.rowid()); @@ -5285,7 +5410,7 @@ pub fn op_insert( }; { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = get_cursor!(state, *cursor_id); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.insert(&BTreeKey::new_table_rowid(key, Some(&record)))); @@ -5295,7 +5420,7 @@ pub fn op_insert( // Only update last_insert_rowid for regular table inserts, not schema modifications let root_page = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_btree_mut(); cursor.root_page() }; @@ -5303,9 +5428,8 @@ pub fn op_insert( state.op_insert_state.sub_state = OpInsertSubState::UpdateLastRowid; } else { let schema = program.connection.schema.borrow(); - let dependent_views = - schema.get_dependent_materialized_views_unnormalized(table_name); - if dependent_views.is_some() { + let dependent_views = schema.get_dependent_materialized_views(table_name); + if !dependent_views.is_empty() { state.op_insert_state.sub_state = OpInsertSubState::ApplyViewChange; } else { break; @@ -5314,7 +5438,7 @@ pub fn op_insert( } OpInsertSubState::UpdateLastRowid => { let maybe_rowid = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.rowid()) }; @@ -5325,9 +5449,8 @@ pub fn op_insert( program.n_change.set(prev_changes + 1); } let schema = program.connection.schema.borrow(); - let dependent_views = - schema.get_dependent_materialized_views_unnormalized(table_name); - if dependent_views.is_some() { + let dependent_views = schema.get_dependent_materialized_views(table_name); + if !dependent_views.is_empty() { state.op_insert_state.sub_state = OpInsertSubState::ApplyViewChange; continue; } @@ -5335,13 +5458,11 @@ pub fn op_insert( } OpInsertSubState::ApplyViewChange => { let schema = program.connection.schema.borrow(); - let dependent_views = - schema.get_dependent_materialized_views_unnormalized(table_name); - assert!(dependent_views.is_some()); - let dependent_views = dependent_views.unwrap(); + let dependent_views = schema.get_dependent_materialized_views(table_name); + assert!(!dependent_views.is_empty()); let (key, values) = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_btree_mut(); let key = match &state.registers[*key_reg].get_value() { @@ -5383,17 +5504,22 @@ pub fn op_insert( (key, new_values) }; - let mut tx_states = program.connection.view_transaction_states.borrow_mut(); if let Some((key, values)) = state.op_insert_state.old_record.take() { for view_name in dependent_views.iter() { - let tx_state = tx_states.entry(view_name.clone()).or_default(); - tx_state.delta.delete(key, values.clone()); + let tx_state = program + .connection + .view_transaction_states + .get_or_create(view_name); + tx_state.delete(key, values.clone()); } } for view_name in dependent_views.iter() { - let tx_state = tx_states.entry(view_name.clone()).or_default(); + let tx_state = program + .connection + .view_transaction_states + .get_or_create(view_name); - tx_state.delta.insert(key, values.clone()); + tx_state.insert(key, values.clone()); } break; @@ -5467,7 +5593,7 @@ pub fn op_delete( } let deleted_record = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_btree_mut(); // Get the current key let maybe_key = return_if_io!(cursor.rowid()); @@ -5502,7 +5628,7 @@ pub fn op_delete( } OpDeleteSubState::Delete => { { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.delete()); } @@ -5522,10 +5648,12 @@ pub fn op_delete( assert!(!dependent_views.is_empty()); let maybe_deleted_record = state.op_delete_state.deleted_record.take(); if let Some((key, values)) = maybe_deleted_record { - let mut tx_states = program.connection.view_transaction_states.borrow_mut(); for view_name in dependent_views { - let tx_state = tx_states.entry(view_name.clone()).or_default(); - tx_state.delta.delete(key, values.clone()); + let tx_state = program + .connection + .view_transaction_states + .get_or_create(&view_name); + tx_state.delete(key, values.clone()); } } break; @@ -5610,7 +5738,7 @@ pub fn op_idx_delete( } Some(OpIdxDeleteState::Verifying) => { let rowid = { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.rowid()) }; @@ -5625,7 +5753,7 @@ pub fn op_idx_delete( } Some(OpIdxDeleteState::Deleting) => { { - let mut cursor = state.get_cursor(*cursor_id); + let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.delete()); } @@ -5721,7 +5849,7 @@ pub fn op_idx_insert( } OpIdxInsertState::UniqueConstraintCheck => { let ignore_conflict = 'i: { - let mut cursor = state.get_cursor(cursor_id); + let cursor = get_cursor!(state, cursor_id); let cursor = cursor.as_btree_mut(); let record_opt = return_if_io!(cursor.record()); let Some(record) = record_opt.as_ref() else { @@ -5767,7 +5895,7 @@ pub fn op_idx_insert( } OpIdxInsertState::Insert => { { - let mut cursor = state.get_cursor(cursor_id); + let cursor = get_cursor!(state, cursor_id); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.insert(&BTreeKey::new_index_key(record_to_insert))); } @@ -5782,7 +5910,7 @@ pub fn op_idx_insert( } } -#[derive(Debug)] +#[derive(Debug, Clone, Copy)] pub enum OpNewRowidState { Start, SeekingToLast, @@ -5818,7 +5946,7 @@ pub fn op_new_rowid( if let Some(mv_store) = mv_store { let rowid = { - let mut cursor = state.get_cursor(*cursor); + let cursor = state.get_cursor(*cursor); let cursor = cursor.as_btree_mut(); let mvcc_cursor = cursor.get_mvcc_cursor(); let mut mvcc_cursor = RefCell::borrow_mut(&mvcc_cursor); @@ -5833,14 +5961,14 @@ pub fn op_new_rowid( const MAX_ATTEMPTS: u32 = 100; loop { - match &state.op_new_rowid_state { + match state.op_new_rowid_state { OpNewRowidState::Start => { state.op_new_rowid_state = OpNewRowidState::SeekingToLast; } OpNewRowidState::SeekingToLast => { { - let mut cursor = state.get_cursor(*cursor); + let cursor = state.get_cursor(*cursor); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.seek_to_last()); } @@ -5849,7 +5977,7 @@ pub fn op_new_rowid( OpNewRowidState::ReadingMaxRowid => { let current_max = { - let mut cursor = state.get_cursor(*cursor); + let cursor = state.get_cursor(*cursor); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.rowid()) }; @@ -5876,7 +6004,7 @@ pub fn op_new_rowid( } OpNewRowidState::GeneratingRandom { attempts } => { - if *attempts >= MAX_ATTEMPTS { + if attempts >= MAX_ATTEMPTS { return Err(LimboError::DatabaseFull("Unable to find an unused rowid after 100 attempts - database is probably full".to_string())); } @@ -5889,7 +6017,7 @@ pub fn op_new_rowid( random_rowid += 1; // Ensure positive state.op_new_rowid_state = OpNewRowidState::VerifyingCandidate { - attempts: *attempts, + attempts, candidate: random_rowid, }; } @@ -5899,18 +6027,17 @@ pub fn op_new_rowid( candidate, } => { let exists = { - let mut cursor = state.get_cursor(*cursor); + let cursor = state.get_cursor(*cursor); let cursor = cursor.as_btree_mut(); - let seek_result = return_if_io!(cursor.seek( - SeekKey::TableRowId(*candidate), - SeekOp::GE { eq_only: true } - )); + let seek_result = + return_if_io!(cursor + .seek(SeekKey::TableRowId(candidate), SeekOp::GE { eq_only: true })); matches!(seek_result, SeekResult::Found) }; if !exists { // Found unused rowid! - state.registers[*rowid_reg] = Register::Value(Value::Integer(*candidate)); + state.registers[*rowid_reg] = Register::Value(Value::Integer(candidate)); state.op_new_rowid_state = OpNewRowidState::Start; state.pc += 1; return Ok(InsnFunctionStepResult::Step); @@ -5923,7 +6050,7 @@ pub fn op_new_rowid( } OpNewRowidState::GoNext => { { - let mut cursor = state.get_cursor(*cursor); + let cursor = state.get_cursor(*cursor); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.next()); } @@ -5981,6 +6108,7 @@ pub fn op_soft_null( Ok(InsnFunctionStepResult::Step) } +#[derive(Clone, Copy)] pub enum OpNoConflictState { Start, Seeking(RecordSource), @@ -6006,9 +6134,9 @@ pub fn op_no_conflict( ); loop { - match &state.op_no_conflict_state { + match state.op_no_conflict_state { OpNoConflictState::Start => { - let mut cursor_ref = state.get_cursor(*cursor_id); + let cursor_ref = state.get_cursor(*cursor_id); let cursor = cursor_ref.as_btree_mut(); let record_source = if *num_regs == 0 { @@ -6046,8 +6174,6 @@ pub fn op_no_conflict( }), }; - drop(cursor_ref); - if contains_nulls { state.pc = target_pc.as_offset_int(); state.op_no_conflict_state = OpNoConflictState::Start; @@ -6062,7 +6188,7 @@ pub fn op_no_conflict( state, pager, mv_store, - record_source.clone(), + record_source, *cursor_id, true, SeekOp::GE { eq_only: true }, @@ -6100,12 +6226,12 @@ pub fn op_not_exists( insn ); let exists = if let Some(mv_store) = mv_store { - let mut cursor = must_be_btree_cursor!(*cursor, program.cursor_ref, state, "NotExists"); + let cursor = must_be_btree_cursor!(*cursor, program.cursor_ref, state, "NotExists"); let cursor = cursor.as_btree_mut(); let mvcc_cursor = cursor.get_mvcc_cursor(); false } else { - let mut cursor = must_be_btree_cursor!(*cursor, program.cursor_ref, state, "NotExists"); + let cursor = must_be_btree_cursor!(*cursor, program.cursor_ref, state, "NotExists"); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.exists(state.registers[*rowid_reg].get_value())) }; @@ -6194,7 +6320,7 @@ pub fn op_open_write( }, }; let (_, cursor_type) = program.cursor_ref.get(*cursor_id).unwrap(); - let mut cursors = state.cursors.borrow_mut(); + let cursors = &mut state.cursors; let maybe_index = match cursor_type { CursorType::BTreeIndex(index) => Some(index), _ => None, @@ -6232,7 +6358,10 @@ pub fn op_open_write( } else { let num_columns = match cursor_type { CursorType::BTreeTable(table_rc) => table_rc.columns.len(), - _ => unreachable!("Expected BTreeTable. This should not have happened."), + CursorType::MaterializedView(table_rc, _) => table_rc.columns.len(), + _ => unreachable!( + "Expected BTreeTable or MaterializedView. This should not have happened." + ), }; let cursor = @@ -6372,7 +6501,7 @@ pub fn op_close( mv_store: Option<&Arc>, ) -> Result { load_insn!(Close { cursor_id }, insn); - let mut cursors = state.cursors.borrow_mut(); + let cursors = &mut state.cursors; cursors.get_mut(*cursor_id).unwrap().take(); state.pc += 1; Ok(InsnFunctionStepResult::Step) @@ -6453,6 +6582,7 @@ pub fn op_parse_schema( }, insn ); + let conn = program.connection.clone(); // set auto commit to false in order for parse schema to not commit changes as transaction state is stored in connection, // and we use the same connection for nested query. @@ -6464,7 +6594,7 @@ pub fn op_parse_schema( conn.with_schema_mut(|schema| { // TODO: This function below is synchronous, make it async - let existing_views = schema.materialized_views.clone(); + let existing_views = schema.incremental_views.clone(); conn.is_nested_stmt.set(true); parse_schema_rows( stmt, @@ -6479,7 +6609,7 @@ pub fn op_parse_schema( conn.with_schema_mut(|schema| { // TODO: This function below is synchronous, make it async - let existing_views = schema.materialized_views.clone(); + let existing_views = schema.incremental_views.clone(); conn.is_nested_stmt.set(true); parse_schema_rows( stmt, @@ -6504,14 +6634,75 @@ pub fn op_parse_schema( pub fn op_populate_materialized_views( program: &Program, state: &mut ProgramState, - _insn: &Insn, - _pager: &Rc, + insn: &Insn, + pager: &Rc, _mv_store: Option<&Arc>, ) -> Result { - let conn = program.connection.clone(); - let schema = conn.schema.borrow(); + load_insn!(PopulateMaterializedViews { cursors }, insn); + + let conn = program.connection.clone(); + + // For each view, get its cursor and root page + let mut view_info = Vec::new(); + { + let cursors_ref = &state.cursors; + for (view_name, cursor_id) in cursors { + // Get the cursor to find the root page + let cursor = cursors_ref + .get(*cursor_id) + .and_then(|c| c.as_ref()) + .ok_or_else(|| { + LimboError::InternalError(format!("Cursor {cursor_id} not found")) + })?; + + let root_page = match cursor { + crate::types::Cursor::BTree(btree_cursor) => btree_cursor.root_page(), + _ => { + return Err(LimboError::InternalError( + "Expected BTree cursor for materialized view".into(), + )) + } + }; + + view_info.push((view_name.clone(), root_page, *cursor_id)); + } + } + + // Now populate the views (after releasing the schema borrow) + for (view_name, _root_page, cursor_id) in view_info { + let schema = conn.schema.borrow(); + if let Some(view) = schema.get_materialized_view(&view_name) { + let mut view = view.lock().unwrap(); + // Drop the schema borrow before calling populate_from_table + drop(schema); + + // Get the cursor for writing + // Get a mutable reference to the cursor + let cursors_ref = &mut state.cursors; + let cursor = cursors_ref + .get_mut(cursor_id) + .and_then(|c| c.as_mut()) + .ok_or_else(|| { + LimboError::InternalError(format!( + "Cursor {cursor_id} not found for population" + )) + })?; + + // Extract the BTreeCursor + let btree_cursor = match cursor { + crate::types::Cursor::BTree(btree_cursor) => btree_cursor, + _ => { + return Err(LimboError::InternalError( + "Expected BTree cursor for materialized view population".into(), + )) + } + }; + + // Now populate it with the cursor for writing + return_if_io!(view.populate_from_table(&conn, pager, btree_cursor.as_mut())); + } + } - return_if_io!(schema.populate_materialized_views(&conn)); // All views populated, advance to next instruction state.pc += 1; Ok(InsnFunctionStepResult::Step) @@ -6807,16 +6998,34 @@ pub fn op_open_ephemeral( let conn = program.connection.clone(); let io = conn.pager.borrow().io.clone(); let rand_num = io.generate_random_number(); - let temp_dir = temp_dir(); - let rand_path = - std::path::Path::new(&temp_dir).join(format!("tursodb-ephemeral-{rand_num}")); - let Some(rand_path_str) = rand_path.to_str() else { - return Err(LimboError::InternalError( - "Failed to convert path to string".to_string(), - )); - }; - let file = io.open_file(rand_path_str, OpenFlags::Create, false)?; - let db_file = Arc::new(DatabaseFile::new(file)); + let db_file; + let db_file_io: Arc; + + // we support OPFS in WASM - but it require files to be pre-opened in the browser before use + // we can fix this if we will make open_file interface async + // but for now for simplicity we use MemoryIO for all intermediate calculations + #[cfg(target_family = "wasm")] + { + use crate::MemoryIO; + + db_file_io = Arc::new(MemoryIO::new()); + let file = db_file_io.open_file("temp-file", OpenFlags::Create, false)?; + db_file = Arc::new(DatabaseFile::new(file)); + } + #[cfg(not(target_family = "wasm"))] + { + let temp_dir = temp_dir(); + let rand_path = + std::path::Path::new(&temp_dir).join(format!("tursodb-ephemeral-{rand_num}")); + let Some(rand_path_str) = rand_path.to_str() else { + return Err(LimboError::InternalError( + "Failed to convert path to string".to_string(), + )); + }; + let file = io.open_file(rand_path_str, OpenFlags::Create, false)?; + db_file = Arc::new(DatabaseFile::new(file)); + db_file_io = io; + } let page_size = pager .io @@ -6824,12 +7033,12 @@ pub fn op_open_ephemeral( .get(); let buffer_pool = program.connection._db.buffer_pool.clone(); - let page_cache = Arc::new(RwLock::new(DumbLruPageCache::default())); + let page_cache = Arc::new(RwLock::new(PageCache::default())); let pager = Rc::new(Pager::new( db_file, None, - io, + db_file_io, page_cache, buffer_pool.clone(), Arc::new(AtomicDbState::new(DbState::Uninitialized)), @@ -6902,8 +7111,7 @@ pub fn op_open_ephemeral( OpOpenEphemeralState::Rewind { cursor } => { return_if_io!(cursor.rewind()); - let mut cursors: std::cell::RefMut<'_, Vec>> = - state.cursors.borrow_mut(); + let cursors = &mut state.cursors; let (_, cursor_type) = program.cursor_ref.get(cursor_id).unwrap(); @@ -6936,6 +7144,9 @@ pub fn op_open_ephemeral( CursorType::VirtualTable(_) => { panic!("OpenEphemeral on virtual table cursor, use Insn::VOpen instead"); } + CursorType::MaterializedView(_, _) => { + panic!("OpenEphemeral on materialized view cursor"); + } } state.pc += 1; @@ -7061,7 +7272,7 @@ pub fn op_affinity( for (i, affinity_char) in affinities.chars().enumerate().take(count.get()) { let reg_index = *start_reg + i; - let affinity = Affinity::from_char(affinity_char)?; + let affinity = Affinity::from_char(affinity_char); apply_affinity_char(&mut state.registers[reg_index], affinity); } @@ -7087,7 +7298,7 @@ pub fn op_count( ); let count = { - let mut cursor = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "Count"); + let cursor = must_be_btree_cursor!(*cursor_id, program.cursor_ref, state, "Count"); let cursor = cursor.as_btree_mut(); return_if_io!(cursor.count()) }; @@ -7381,6 +7592,33 @@ pub fn op_alter_column( Ok(InsnFunctionStepResult::Step) } +pub fn op_if_neg( + program: &Program, + state: &mut ProgramState, + insn: &Insn, + pager: &Rc, + mv_store: Option<&Arc>, +) -> Result { + load_insn!(IfNeg { reg, target_pc }, insn); + + match &state.registers[*reg] { + Register::Value(Value::Integer(i)) if *i < 0 => { + state.pc = target_pc.as_offset_int(); + } + Register::Value(Value::Float(f)) if *f < 0.0 => { + state.pc = target_pc.as_offset_int(); + } + Register::Value(Value::Null) => { + state.pc += 1; + } + _ => { + state.pc += 1; + } + } + + Ok(InsnFunctionStepResult::Step) +} + impl Value { pub fn exec_lower(&self) -> Option { match self { @@ -8048,15 +8286,16 @@ impl Value { None => 10.0, }; + if f <= 0.0 || base <= 0.0 || base == 1.0 { + return Value::Null; + } + if base == 2.0 { return Value::Float(libm::log2(f)); } else if base == 10.0 { return Value::Float(libm::log10(f)); }; - if f <= 0.0 || base <= 0.0 || base == 1.0 { - return Value::Null; - } let log_x = libm::log(f); let log_base = libm::log(base); let result = log_x / log_base; @@ -8123,39 +8362,23 @@ impl Value { } pub fn exec_concat(&self, rhs: &Value) -> Value { - match (self, rhs) { - (Value::Text(lhs_text), Value::Text(rhs_text)) => { - Value::build_text(lhs_text.as_str().to_string() + rhs_text.as_str()) - } - (Value::Text(lhs_text), Value::Integer(rhs_int)) => { - Value::build_text(lhs_text.as_str().to_string() + &rhs_int.to_string()) - } - (Value::Text(lhs_text), Value::Float(rhs_float)) => { - Value::build_text(lhs_text.as_str().to_string() + &rhs_float.to_string()) - } - (Value::Integer(lhs_int), Value::Text(rhs_text)) => { - Value::build_text(lhs_int.to_string() + rhs_text.as_str()) - } - (Value::Integer(lhs_int), Value::Integer(rhs_int)) => { - Value::build_text(lhs_int.to_string() + &rhs_int.to_string()) - } - (Value::Integer(lhs_int), Value::Float(rhs_float)) => { - Value::build_text(lhs_int.to_string() + &rhs_float.to_string()) - } - (Value::Float(lhs_float), Value::Text(rhs_text)) => { - Value::build_text(lhs_float.to_string() + rhs_text.as_str()) - } - (Value::Float(lhs_float), Value::Integer(rhs_int)) => { - Value::build_text(lhs_float.to_string() + &rhs_int.to_string()) - } - (Value::Float(lhs_float), Value::Float(rhs_float)) => { - Value::build_text(lhs_float.to_string() + &rhs_float.to_string()) - } - (Value::Null, _) | (_, Value::Null) => Value::Null, - (Value::Blob(_), _) | (_, Value::Blob(_)) => { - todo!("TODO: Handle Blob conversion to String") - } + if let (Value::Blob(lhs), Value::Blob(rhs)) = (self, rhs) { + return Value::build_text(String::from_utf8_lossy(dbg!(&[ + lhs.as_slice(), + rhs.as_slice() + ] + .concat()))); } + + let Some(lhs) = self.cast_text() else { + return Value::Null; + }; + + let Some(rhs) = rhs.cast_text() else { + return Value::Null; + }; + + Value::build_text(lhs + &rhs) } pub fn exec_and(&self, rhs: &Value) -> Value { @@ -8321,15 +8544,19 @@ fn apply_affinity_char(target: &mut Register, affinity: Affinity) -> bool { } if let Value::Text(t) = value { - let text = t.as_str(); + let text = t.as_str().trim(); // Handle hex numbers - they shouldn't be converted if text.starts_with("0x") { return false; } - // Try to parse as number (similar to applyNumericAffinity) - let Ok(num) = checked_cast_text_to_numeric(text) else { + // For affinity conversion, only convert strings that are entirely numeric + let num = if let Ok(i) = text.parse::() { + Value::Integer(i) + } else if let Ok(f) = text.parse::() { + Value::Float(f) + } else { return false; }; diff --git a/core/vdbe/explain.rs b/core/vdbe/explain.rs index 054e7308b..6c850aadd 100644 --- a/core/vdbe/explain.rs +++ b/core/vdbe/explain.rs @@ -19,6 +19,7 @@ pub fn insn_to_str( CursorType::BTreeIndex(index) => &index.name, CursorType::Pseudo(_) => "pseudo", CursorType::VirtualTable(virtual_table) => &virtual_table.name, + CursorType::MaterializedView(table, _) => &table.name, CursorType::Sorter => "sorter", } }; @@ -541,6 +542,10 @@ pub fn insn_to_str( let name = &index.columns.get(*column).unwrap().name; Some(name) } + CursorType::MaterializedView(table, _) => { + let name = table.columns.get(*column).and_then(|v| v.name.as_ref()); + name + } CursorType::Pseudo(_) => None, CursorType::Sorter => None, CursorType::VirtualTable(v) => v.columns.get(*column).unwrap().name.as_ref(), @@ -579,6 +584,7 @@ pub fn insn_to_str( count, dest_reg, index_name, + affinity_str: _, } => { let for_index = index_name.as_ref().map(|name| format!("; for {name}")); ( @@ -1337,13 +1343,13 @@ pub fn insn_to_str( 0, where_clause.clone().unwrap_or("NULL".to_string()), ), - Insn::PopulateMaterializedViews => ( + Insn::PopulateMaterializedViews { cursors } => ( "PopulateMaterializedViews", 0, 0, 0, Value::Null, - 0, + cursors.len() as u16, "".to_string(), ), Insn::Prev { @@ -1709,6 +1715,15 @@ pub fn insn_to_str( 0, format!("collation={collation}"), ), + Insn::IfNeg { reg, target_pc } => ( + "IfNeg", + *reg as i32, + target_pc.as_debug_int(), + 0, + Value::build_text(""), + 0, + format!("if (r[{}] < 0) goto {}", reg, target_pc.as_debug_int()), + ), }; format!( "{:<4} {:<17} {:<4} {:<4} {:<4} {:<13} {:<2} {}", diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index 45210fd42..e2d4a07c6 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -52,7 +52,7 @@ impl CmpInsFlags { pub fn get_affinity(&self) -> Affinity { let aff_code = (self.0 & Self::AFFINITY_MASK) as u8; - Affinity::from_char_code(aff_code).unwrap_or(Affinity::Blob) + Affinity::from_char_code(aff_code) } } @@ -430,6 +430,7 @@ pub enum Insn { count: usize, // P2 dest_reg: usize, // P3 index_name: Option, + affinity_str: Option, }, /// Emit a row of results. @@ -898,7 +899,12 @@ pub enum Insn { }, /// Populate all materialized views after schema parsing - PopulateMaterializedViews, + /// The cursors parameter contains a mapping of view names to cursor IDs that have been + /// opened to the view's btree for writing the materialized data + PopulateMaterializedViews { + /// Mapping of view name to cursor_id for writing to the view's btree + cursors: Vec<(String, usize)>, + }, /// Place the result of lhs >> rhs in dest register. ShiftRight { @@ -1076,6 +1082,10 @@ pub enum Insn { dest: usize, // P2: output register for result new_mode: Option, // P3: new journal mode (if setting) }, + IfNeg { + reg: usize, + target_pc: BranchOffset, + }, } impl Insn { @@ -1186,7 +1196,7 @@ impl Insn { Insn::IsNull { .. } => execute::op_is_null, Insn::CollSeq { .. } => execute::op_coll_seq, Insn::ParseSchema { .. } => execute::op_parse_schema, - Insn::PopulateMaterializedViews => execute::op_populate_materialized_views, + Insn::PopulateMaterializedViews { .. } => execute::op_populate_materialized_views, Insn::ShiftRight { .. } => execute::op_shift_right, Insn::ShiftLeft { .. } => execute::op_shift_left, Insn::AddImm { .. } => execute::op_add_imm, @@ -1213,6 +1223,7 @@ impl Insn { Insn::AlterColumn { .. } => execute::op_alter_column, Insn::MaxPgcnt { .. } => execute::op_max_pgcnt, Insn::JournalMode { .. } => execute::op_journal_mode, + Insn::IfNeg { .. } => execute::op_if_neg, } } } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index f3a9425a8..c5e4e91a8 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -28,15 +28,18 @@ pub mod sorter; use crate::{ error::LimboError, function::{AggFunc, FuncCtx}, - state_machine::StateTransition, + mvcc::{database::CommitStateMachine, LocalClock}, + state_machine::{StateMachine, StateTransition, TransitionResult}, storage::sqlite3_ondisk::SmallVec, translate::{collate::CollationSeq, plan::TableReferences}, types::{IOCompletions, IOResult, RawSlice, TextRef}, - vdbe::execute::{ - OpColumnState, OpDeleteState, OpDeleteSubState, OpIdxInsertState, OpInsertState, - OpInsertSubState, OpNewRowidState, OpNoConflictState, OpRowIdState, OpSeekState, + vdbe::{ + execute::{ + OpColumnState, OpDeleteState, OpDeleteSubState, OpIdxInsertState, OpInsertState, + OpInsertSubState, OpNewRowidState, OpNoConflictState, OpRowIdState, OpSeekState, + }, + metrics::StatementMetrics, }, - vdbe::metrics::StatementMetrics, IOExt, RefValue, }; @@ -57,15 +60,20 @@ use execute::{ }; use regex::Regex; -use std::{ - cell::{Cell, RefCell}, - collections::HashMap, - num::NonZero, - rc::Rc, - sync::Arc, -}; +use std::{cell::Cell, collections::HashMap, num::NonZero, rc::Rc, sync::Arc}; use tracing::{instrument, Level}; +/// State machine for committing view deltas with I/O handling +#[derive(Debug, Clone)] +pub enum ViewDeltaCommitState { + NotStarted, + Processing { + views: Vec, // view names (all materialized views have storage) + current_index: usize, + }, + Done, +} + /// We use labels to indicate that we want to jump to whatever the instruction offset /// will be at runtime, because the offset cannot always be determined when the jump /// instruction is created. @@ -210,7 +218,8 @@ impl Bitfield { } } -#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[derive(Debug)] +#[allow(clippy::large_enum_variant)] /// The commit state of the program. /// There are two states: /// - Ready: The program is ready to run the next instruction, or has shut down after @@ -220,6 +229,9 @@ impl Bitfield { enum CommitState { Ready, Committing, + CommitingMvcc { + state_machine: StateMachine>, + }, } #[derive(Debug, Clone)] @@ -248,7 +260,7 @@ pub struct Row { pub struct ProgramState { pub io_completions: Option, pub pc: InsnReference, - cursors: RefCell>>, + cursors: Vec>, registers: Vec, pub(crate) result_row: Option, last_compare: Option, @@ -277,12 +289,13 @@ pub struct ProgramState { current_collation: Option, op_column_state: OpColumnState, op_row_id_state: OpRowIdState, + /// State machine for committing view deltas with I/O handling + view_delta_state: ViewDeltaCommitState, } impl ProgramState { pub fn new(max_registers: usize, max_cursors: usize) -> Self { - let cursors: RefCell>> = - RefCell::new((0..max_cursors).map(|_| None).collect()); + let cursors: Vec> = (0..max_cursors).map(|_| None).collect(); let registers = vec![Register::Value(Value::Null); max_registers]; Self { io_completions: None, @@ -319,6 +332,7 @@ impl ProgramState { current_collation: None, op_column_state: OpColumnState::Start, op_row_id_state: OpRowIdState::Start, + view_delta_state: ViewDeltaCommitState::NotStarted, } } @@ -360,7 +374,7 @@ impl ProgramState { pub fn reset(&mut self) { self.pc = 0; - self.cursors.borrow_mut().iter_mut().for_each(|c| *c = None); + self.cursors.iter_mut().for_each(|c| *c = None); self.registers .iter_mut() .for_each(|r| *r = Register::Value(Value::Null)); @@ -375,14 +389,12 @@ impl ProgramState { self.json_cache.clear() } - pub fn get_cursor(&self, cursor_id: CursorID) -> std::cell::RefMut { - let cursors = self.cursors.borrow_mut(); - std::cell::RefMut::map(cursors, |c| { - c.get_mut(cursor_id) - .unwrap_or_else(|| panic!("cursor id {cursor_id} out of bounds")) - .as_mut() - .unwrap_or_else(|| panic!("cursor id {cursor_id} is None")) - }) + pub fn get_cursor(&mut self, cursor_id: CursorID) -> &mut Cursor { + self.cursors + .get_mut(cursor_id) + .unwrap_or_else(|| panic!("cursor id {cursor_id} out of bounds")) + .as_mut() + .unwrap_or_else(|| panic!("cursor id {cursor_id} is None")) } } @@ -403,17 +415,33 @@ impl Register { macro_rules! must_be_btree_cursor { ($cursor_id:expr, $cursor_ref:expr, $state:expr, $insn_name:expr) => {{ let (_, cursor_type) = $cursor_ref.get($cursor_id).unwrap(); - let cursor = match cursor_type { - CursorType::BTreeTable(_) => $state.get_cursor($cursor_id), - CursorType::BTreeIndex(_) => $state.get_cursor($cursor_id), - CursorType::Pseudo(_) => panic!("{} on pseudo cursor", $insn_name), - CursorType::Sorter => panic!("{} on sorter cursor", $insn_name), - CursorType::VirtualTable(_) => panic!("{} on virtual table cursor", $insn_name), - }; - cursor + if matches!( + cursor_type, + CursorType::BTreeTable(_) + | CursorType::BTreeIndex(_) + | CursorType::MaterializedView(_, _) + ) { + $crate::get_cursor!($state, $cursor_id) + } else { + panic!("{} on unexpected cursor", $insn_name) + } }}; } +/// Macro is necessary to help the borrow checker see we are only accessing state.cursor field +/// and nothing else +#[macro_export] +macro_rules! get_cursor { + ($state:expr, $cursor_id:expr) => { + $state + .cursors + .get_mut($cursor_id) + .unwrap_or_else(|| panic!("cursor id {} out of bounds", $cursor_id)) + .as_mut() + .unwrap_or_else(|| panic!("cursor id {} is None", $cursor_id)) + }; +} + pub struct Program { pub max_registers: usize, pub insns: Vec<(Insn, InsnFunction)>, @@ -444,6 +472,7 @@ impl Program { mv_store: Option>, pager: Rc, ) -> Result { + let enable_tracing = tracing::enabled!(tracing::Level::TRACE); loop { if self.connection.closed.get() { // Connection is closed for whatever reason, rollback the transaction. @@ -470,7 +499,9 @@ impl Program { // invalidate row let _ = state.result_row.take(); let (insn, insn_function) = &self.insns[state.pc as usize]; - trace_insn(self, state.pc as InsnReference, insn); + if enable_tracing { + trace_insn(self, state.pc as InsnReference, insn); + } // Always increment VM steps for every loop iteration state.metrics.vm_steps = state.metrics.vm_steps.saturating_add(1); @@ -511,20 +542,97 @@ impl Program { } #[instrument(skip_all, level = Level::DEBUG)] - fn apply_view_deltas(&self, rollback: bool) { - if self.connection.view_transaction_states.borrow().is_empty() { - return; - } + fn apply_view_deltas( + &self, + state: &mut ProgramState, + rollback: bool, + pager: &Rc, + ) -> Result> { + use crate::types::IOResult; - let tx_states = self.connection.view_transaction_states.take(); + loop { + match &state.view_delta_state { + ViewDeltaCommitState::NotStarted => { + if self.connection.view_transaction_states.is_empty() { + return Ok(IOResult::Done(())); + } - if !rollback { - let schema = self.connection.schema.borrow(); + if rollback { + // On rollback, just clear and done + self.connection.view_transaction_states.clear(); + return Ok(IOResult::Done(())); + } - for (view_name, tx_state) in tx_states.iter() { - if let Some(view_mutex) = schema.get_materialized_view(view_name) { - let mut view = view_mutex.lock().unwrap(); - view.merge_delta(&tx_state.delta); + // Not a rollback - proceed with processing + let schema = self.connection.schema.borrow(); + + // Collect materialized views - they should all have storage + let mut views = Vec::new(); + for view_name in self.connection.view_transaction_states.get_view_names() { + if let Some(view_mutex) = schema.get_materialized_view(&view_name) { + let view = view_mutex.lock().unwrap(); + let root_page = view.get_root_page(); + + // Materialized views should always have storage (root_page != 0) + assert!( + root_page != 0, + "Materialized view '{view_name}' should have a root page" + ); + + views.push(view_name); + } + } + + state.view_delta_state = ViewDeltaCommitState::Processing { + views, + current_index: 0, + }; + } + + ViewDeltaCommitState::Processing { + views, + current_index, + } => { + // At this point we know it's not a rollback + if *current_index >= views.len() { + // All done, clear the transaction states + self.connection.view_transaction_states.clear(); + state.view_delta_state = ViewDeltaCommitState::Done; + return Ok(IOResult::Done(())); + } + + let view_name = &views[*current_index]; + + let delta = self + .connection + .view_transaction_states + .get(view_name) + .unwrap() + .get_delta(); + + let schema = self.connection.schema.borrow(); + if let Some(view_mutex) = schema.get_materialized_view(view_name) { + let mut view = view_mutex.lock().unwrap(); + + // Handle I/O from merge_delta - pass pager, circuit will create its own cursor + match view.merge_delta(&delta, pager.clone())? { + IOResult::Done(_) => { + // Move to next view + state.view_delta_state = ViewDeltaCommitState::Processing { + views: views.clone(), + current_index: current_index + 1, + }; + } + IOResult::IO(io) => { + // Return I/O, will resume at same index + return Ok(IOResult::IO(io)); + } + } + } + } + + ViewDeltaCommitState::Done => { + return Ok(IOResult::Done(())); } } } @@ -537,7 +645,14 @@ impl Program { mv_store: Option<&Arc>, rollback: bool, ) -> Result> { - self.apply_view_deltas(rollback); + // Apply view deltas with I/O handling + match self.apply_view_deltas(program_state, rollback, &pager)? { + IOResult::IO(io) => return Ok(IOResult::IO(io)), + IOResult::Done(_) => {} + } + + // Reset state for next use + program_state.view_delta_state = ViewDeltaCommitState::NotStarted; if self.connection.transaction_state.get() == TransactionState::None && mv_store.is_none() { // No need to do any work here if not in tx. Current MVCC logic doesn't work with this assumption, @@ -550,25 +665,35 @@ impl Program { if auto_commit { // FIXME: we don't want to commit stuff from other programs. let mut mv_transactions = conn.mv_transactions.borrow_mut(); - for tx_id in mv_transactions.iter() { - let mut state_machine = - mv_store.commit_tx(*tx_id, pager.clone(), &conn).unwrap(); - // TODO: sync IO hack - loop { - let res = state_machine.step(mv_store)?; - match res { - crate::state_machine::TransitionResult::Io(io) => { - io.wait(conn._db.io.as_ref())?; - } - crate::state_machine::TransitionResult::Continue => continue, - crate::state_machine::TransitionResult::Done(_) => break, - } + if matches!(program_state.commit_state, CommitState::Ready) { + assert!( + mv_transactions.len() <= 1, + "for now we only support one mv transaction in single connection, {mv_transactions:?}", + ); + if mv_transactions.is_empty() { + return Ok(IOResult::Done(())); + } + let tx_id = mv_transactions.first().unwrap(); + let state_machine = mv_store.commit_tx(*tx_id, pager.clone(), &conn).unwrap(); + program_state.commit_state = CommitState::CommitingMvcc { state_machine }; + } + let CommitState::CommitingMvcc { state_machine } = &mut program_state.commit_state + else { + panic!("invalid state for mvcc commit step") + }; + match self.step_end_mvcc_txn(state_machine, mv_store)? { + IOResult::Done(_) => { + assert!(state_machine.is_finalized()); + conn.mv_tx_id.set(None); + conn.transaction_state.replace(TransactionState::None); + program_state.commit_state = CommitState::Ready; + mv_transactions.clear(); + return Ok(IOResult::Done(())); + } + IOResult::IO(io) => { + return Ok(IOResult::IO(io)); } - assert!(state_machine.is_finalized()); } - conn.mv_tx_id.set(None); - conn.transaction_state.replace(TransactionState::None); - mv_transactions.clear(); } Ok(IOResult::Done(())) } else { @@ -579,7 +704,7 @@ impl Program { auto_commit, program_state.commit_state ); - if program_state.commit_state == CommitState::Committing { + if matches!(program_state.commit_state, CommitState::Committing) { let TransactionState::Write { .. } = connection.transaction_state.get() else { unreachable!("invalid state for write commit step") }; @@ -644,6 +769,25 @@ impl Program { Ok(IOResult::Done(())) } + #[instrument(skip(self, commit_state, mv_store), level = Level::DEBUG)] + fn step_end_mvcc_txn( + &self, + commit_state: &mut StateMachine>, + mv_store: &Arc, + ) -> Result> { + loop { + match commit_state.step(mv_store)? { + TransitionResult::Continue => {} + TransitionResult::Io(iocompletions) => { + return Ok(IOResult::IO(iocompletions)); + } + TransitionResult::Done(_) => { + return Ok(IOResult::Done(())); + } + } + } + } + #[rustfmt::skip] pub fn explain(&self) -> String { let mut buff = String::with_capacity(1024); @@ -692,9 +836,6 @@ pub fn registers_to_ref_values(registers: &[Register]) -> Vec { #[instrument(skip(program), level = Level::DEBUG)] fn trace_insn(program: &Program, addr: InsnReference, insn: &Insn) { - if !tracing::enabled!(tracing::Level::TRACE) { - return; - } tracing::trace!( "\n{}", explain::insn_to_str( diff --git a/core/vtab.rs b/core/vtab.rs index 61db382ba..661552ca7 100644 --- a/core/vtab.rs +++ b/core/vtab.rs @@ -1,12 +1,13 @@ +use crate::json::vtab::JsonEachVirtualTable; use crate::pragma::{PragmaVirtualTable, PragmaVirtualTableCursor}; use crate::schema::Column; use crate::util::columns_from_create_table_body; use crate::{Connection, LimboError, SymbolTable, Value}; - +use std::cell::RefCell; use std::ffi::c_void; use std::ptr::NonNull; use std::rc::Rc; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use turso_ext::{ConstraintInfo, IndexInfo, OrderByInfo, ResultCode, VTabKind, VTabModuleImpl}; use turso_parser::{ast, parser::Parser}; @@ -14,7 +15,7 @@ use turso_parser::{ast, parser::Parser}; pub(crate) enum VirtualTableType { Pragma(PragmaVirtualTable), External(ExtVirtualTable), - View(crate::vtab_view::ViewVirtualTable), + Internal(Arc>), } #[derive(Clone, Debug)] @@ -30,24 +31,44 @@ impl VirtualTable { match &self.vtab_type { VirtualTableType::Pragma(_) => true, VirtualTableType::External(table) => table.readonly(), - VirtualTableType::View(_) => true, + VirtualTableType::Internal(_) => true, } } pub(crate) fn builtin_functions() -> Vec> { - PragmaVirtualTable::functions() + let mut vtables: Vec> = PragmaVirtualTable::functions() .into_iter() .map(|(tab, schema)| { let vtab = VirtualTable { name: format!("pragma_{}", tab.pragma_name), columns: Self::resolve_columns(schema) - .expect("built-in function schema resolution should not fail"), + .expect("pragma table-valued function schema resolution should not fail"), kind: VTabKind::TableValuedFunction, vtab_type: VirtualTableType::Pragma(tab), }; Arc::new(vtab) }) - .collect() + .collect(); + + #[cfg(feature = "json")] + vtables.extend(Self::json_virtual_tables()); + + vtables + } + + #[cfg(feature = "json")] + fn json_virtual_tables() -> Vec> { + let json_each = JsonEachVirtualTable {}; + + let json_each_virtual_table = VirtualTable { + name: json_each.name(), + columns: Self::resolve_columns(json_each.sql()) + .expect("internal table-valued function schema resolution should not fail"), + kind: VTabKind::TableValuedFunction, + vtab_type: VirtualTableType::Internal(Arc::new(RefCell::new(json_each))), + }; + + vec![Arc::new(json_each_virtual_table)] } pub(crate) fn function(name: &str, syms: &SymbolTable) -> crate::Result> { @@ -88,21 +109,6 @@ impl VirtualTable { Ok(Arc::new(vtab)) } - /// Create a virtual table for a view - pub(crate) fn view( - view_name: &str, - columns: Vec, - view: Arc>, - ) -> crate::Result> { - let vtab = VirtualTable { - name: view_name.to_owned(), - columns, - kind: VTabKind::VirtualTable, - vtab_type: VirtualTableType::View(crate::vtab_view::ViewVirtualTable { view }), - }; - Ok(Arc::new(vtab)) - } - fn resolve_columns(schema: String) -> crate::Result> { let mut parser = Parser::new(schema.as_bytes()); if let ast::Cmd::Stmt(ast::Stmt::CreateTable { body, .. }) = parser.next_cmd()?.ok_or( @@ -124,8 +130,8 @@ impl VirtualTable { VirtualTableType::External(table) => { Ok(VirtualTableCursor::External(table.open(conn.clone())?)) } - VirtualTableType::View(table) => { - Ok(VirtualTableCursor::View(Box::new(table.open(conn)?))) + VirtualTableType::Internal(table) => { + Ok(VirtualTableCursor::Internal(table.borrow().open(conn)?)) } } } @@ -134,7 +140,7 @@ impl VirtualTable { match &self.vtab_type { VirtualTableType::Pragma(_) => Err(LimboError::ReadOnly), VirtualTableType::External(table) => table.update(args), - VirtualTableType::View(_) => Err(LimboError::ReadOnly), + VirtualTableType::Internal(_) => Err(LimboError::ReadOnly), } } @@ -142,7 +148,7 @@ impl VirtualTable { match &self.vtab_type { VirtualTableType::Pragma(_) => Ok(()), VirtualTableType::External(table) => table.destroy(), - VirtualTableType::View(_) => Ok(()), + VirtualTableType::Internal(_) => Ok(()), } } @@ -154,7 +160,7 @@ impl VirtualTable { match &self.vtab_type { VirtualTableType::Pragma(table) => table.best_index(constraints), VirtualTableType::External(table) => table.best_index(constraints, order_by), - VirtualTableType::View(view) => view.best_index(), + VirtualTableType::Internal(table) => table.borrow().best_index(constraints, order_by), } } } @@ -162,7 +168,7 @@ impl VirtualTable { pub enum VirtualTableCursor { Pragma(Box), External(ExtVirtualTableCursor), - View(Box), + Internal(Arc>), } impl VirtualTableCursor { @@ -170,7 +176,7 @@ impl VirtualTableCursor { match self { VirtualTableCursor::Pragma(cursor) => cursor.next(), VirtualTableCursor::External(cursor) => cursor.next(), - VirtualTableCursor::View(cursor) => cursor.next(), + VirtualTableCursor::Internal(cursor) => cursor.borrow_mut().next(), } } @@ -178,7 +184,7 @@ impl VirtualTableCursor { match self { VirtualTableCursor::Pragma(cursor) => cursor.rowid(), VirtualTableCursor::External(cursor) => cursor.rowid(), - VirtualTableCursor::View(cursor) => cursor.rowid(), + VirtualTableCursor::Internal(cursor) => cursor.borrow().rowid(), } } @@ -186,7 +192,7 @@ impl VirtualTableCursor { match self { VirtualTableCursor::Pragma(cursor) => cursor.column(column), VirtualTableCursor::External(cursor) => cursor.column(column), - VirtualTableCursor::View(cursor) => cursor.column(column), + VirtualTableCursor::Internal(cursor) => cursor.borrow().column(column), } } @@ -202,7 +208,9 @@ impl VirtualTableCursor { VirtualTableCursor::External(cursor) => { cursor.filter(idx_num, idx_str, arg_count, args) } - VirtualTableCursor::View(cursor) => cursor.filter(args), + VirtualTableCursor::Internal(cursor) => { + cursor.borrow_mut().filter(&args, idx_str, idx_num) + } } } } @@ -404,3 +412,31 @@ impl Drop for ExtVirtualTableCursor { } } } + +pub trait InternalVirtualTable: std::fmt::Debug { + fn name(&self) -> String; + fn open( + &self, + conn: Arc, + ) -> crate::Result>>; + /// best_index is used by the optimizer. See the comment on `Table::best_index`. + fn best_index( + &self, + constraints: &[turso_ext::ConstraintInfo], + order_by: &[turso_ext::OrderByInfo], + ) -> Result; + fn sql(&self) -> String; +} + +pub trait InternalVirtualTableCursor { + /// next returns `Ok(true)` if there are more rows, and `Ok(false)` otherwise. + fn next(&mut self) -> Result; + fn rowid(&self) -> i64; + fn column(&self, column: usize) -> Result; + fn filter( + &mut self, + args: &[Value], + idx_str: Option, + idx_num: i32, + ) -> Result; +} diff --git a/core/vtab_view.rs b/core/vtab_view.rs deleted file mode 100644 index 4b44f0592..000000000 --- a/core/vtab_view.rs +++ /dev/null @@ -1,101 +0,0 @@ -use crate::incremental::view::IncrementalView; -use crate::{Connection, LimboError, Value, VirtualTable}; -use std::sync::{Arc, Mutex}; - -/// Create a virtual table wrapper for a view -pub fn create_view_virtual_table( - view_name: &str, - view: Arc>, -) -> crate::Result> { - // Use the VirtualTable::view method we added - let view_locked = view.lock().map_err(|_| { - LimboError::InternalError("Failed to lock view for virtual table creation".to_string()) - })?; - let columns = view_locked.columns.clone(); - drop(view_locked); // Release the lock before passing the Arc - VirtualTable::view(view_name, columns, view) -} - -/// Virtual table wrapper for incremental views -#[derive(Clone, Debug)] -pub struct ViewVirtualTable { - pub view: Arc>, -} - -impl ViewVirtualTable { - pub fn best_index(&self) -> Result { - // Views don't use indexes - return a simple index info - Ok(turso_ext::IndexInfo { - idx_num: 0, - idx_str: None, - order_by_consumed: false, - estimated_cost: 1000000.0, - estimated_rows: 1000, - constraint_usages: Vec::new(), - }) - } - - pub fn open(&self, conn: Arc) -> crate::Result { - // Views are now populated during schema parsing (in parse_schema_rows) - // so we just get the current data from the view. - - let view = self.view.lock().map_err(|_| { - LimboError::InternalError("Failed to lock view for reading".to_string()) - })?; - - let tx_states = conn.view_transaction_states.borrow(); - let tx_state = tx_states.get(view.name()); - - let data: Vec<(i64, Vec)> = view.current_data(tx_state); - Ok(ViewVirtualTableCursor { - data, - current_pos: 0, - }) - } -} - -/// Cursor for iterating over view data -pub struct ViewVirtualTableCursor { - data: Vec<(i64, Vec)>, - current_pos: usize, -} - -impl ViewVirtualTableCursor { - pub fn next(&mut self) -> crate::Result { - if self.current_pos < self.data.len() { - self.current_pos += 1; - Ok(self.current_pos < self.data.len()) - } else { - Ok(false) - } - } - - pub fn rowid(&self) -> i64 { - if self.current_pos < self.data.len() { - self.data[self.current_pos].0 - } else { - -1 - } - } - - pub fn column(&self, column: usize) -> crate::Result { - if self.current_pos >= self.data.len() { - return Ok(Value::Null); - } - - let (_row_key, values) = &self.data[self.current_pos]; - - // Return the value at the requested column index - if let Some(value) = values.get(column) { - Ok(value.clone()) - } else { - Ok(Value::Null) - } - } - - pub fn filter(&mut self, _args: Vec) -> crate::Result { - // Reset to beginning for new filter - self.current_pos = 0; - Ok(!self.data.is_empty()) - } -} diff --git a/docs/manual.md b/docs/manual.md index 618182bc0..63cbf12fe 100644 --- a/docs/manual.md +++ b/docs/manual.md @@ -89,6 +89,7 @@ hello, world Turso aims towards full SQLite compatibility but has the following limitations: +* Query result ordering is not guaranteed to be the same (see [#2964](https://github.com/tursodatabase/turso/issues/2964) for more discussion) * No multi-process access * No multi-threading * No savepoints diff --git a/fuzz/Cargo.lock b/fuzz/Cargo.lock index 4730408e4..3e8691c4b 100644 --- a/fuzz/Cargo.lock +++ b/fuzz/Cargo.lock @@ -2,6 +2,51 @@ # It is not intended for manual editing. version = 4 +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + +[[package]] +name = "aegis" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2a1c2f54793fee13c334f70557d3bd6a029a9d453ebffd82ba571d139064da8" +dependencies = [ + "cc", + "softaes", +] + +[[package]] +name = "aes" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "aes-gcm" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "subtle", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -63,6 +108,26 @@ version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +[[package]] +name = "bytemuck" +version = "1.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f154e572231cb6ba2bd1176980827e3d5dc04cc183a75dea38109fbdd672d29" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "cc" version = "1.2.16" @@ -100,6 +165,16 @@ dependencies = [ "windows-link", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -115,6 +190,15 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + [[package]] name = "crossbeam-epoch" version = "0.9.18" @@ -140,6 +224,26 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "rand_core", + "typenum", +] + +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher", +] + [[package]] name = "derive_arbitrary" version = "1.4.1" @@ -190,6 +294,12 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + [[package]] name = "foldhash" version = "0.1.4" @@ -205,6 +315,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + [[package]] name = "getrandom" version = "0.2.15" @@ -228,6 +348,16 @@ dependencies = [ "windows-targets", ] +[[package]] +name = "ghash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" +dependencies = [ + "opaque-debug", + "polyval", +] + [[package]] name = "git2" version = "0.20.0" @@ -449,6 +579,15 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "inout" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +dependencies = [ + "generic-array", +] + [[package]] name = "jobserver" version = "0.1.32" @@ -479,9 +618,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.170" +version = "0.2.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" [[package]] name = "libfuzzer-sys" @@ -521,21 +660,11 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" -[[package]] -name = "libmimalloc-sys" -version = "0.1.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec9d6fac27761dabcd4ee73571cdb06b7022dc99089acbe5435691edffaac0f4" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "libsqlite3-sys" -version = "0.32.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb8270bb4060bd76c6e96f20c52d80620f1d82a3470885694e41e0f81ef6fe7" +checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" dependencies = [ "cc", "pkg-config", @@ -584,9 +713,9 @@ checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -626,15 +755,6 @@ dependencies = [ "syn", ] -[[package]] -name = "mimalloc" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "995942f432bbb4822a7e9c3faa87a695185b0d09273ba85f097b54f4e458f2af" -dependencies = [ - "libmimalloc-sys", -] - [[package]] name = "num-traits" version = "0.2.19" @@ -651,10 +771,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde51589ab56b20a6f686b2c68f7a0bd6add753d697abf720d63f8db3ab7b1ad" [[package]] -name = "parking_lot" -version = "0.12.3" +name = "opaque-debug" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + +[[package]] +name = "pack1" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6e7cd9bd638dc2c831519a0caa1c006cab771a92b1303403a8322773c5b72d6" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "parking_lot" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -662,9 +797,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", @@ -685,45 +820,6 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" -[[package]] -name = "phf" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" -dependencies = [ - "phf_shared", -] - -[[package]] -name = "phf_codegen" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" -dependencies = [ - "phf_generator", - "phf_shared", -] - -[[package]] -name = "phf_generator" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" -dependencies = [ - "phf_shared", - "rand", -] - -[[package]] -name = "phf_shared" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" -dependencies = [ - "siphasher", - "uncased", -] - [[package]] name = "pin-project-lite" version = "0.2.16" @@ -751,6 +847,18 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "polyval" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash", +] + [[package]] name = "ppv-lite86" version = "0.2.21" @@ -848,9 +956,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rusqlite" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e34486da88d8e051c7c0e23c3f15fd806ea8546260aa2fec247e97242ec143" +checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" dependencies = [ "bitflags", "fallible-iterator", @@ -931,16 +1039,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] -name = "siphasher" -version = "1.0.1" +name = "smallvec" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] -name = "smallvec" -version = "1.14.0" +name = "softaes" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" +checksum = "fef461faaeb36c340b6c887167a9054a034f6acfc50a014ead26a02b4356b3de" [[package]] name = "stable_deref_trait" @@ -970,6 +1078,12 @@ dependencies = [ "syn", ] +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "syn" version = "2.0.100" @@ -992,6 +1106,19 @@ dependencies = [ "syn", ] +[[package]] +name = "tempfile" +version = "3.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e" +dependencies = [ + "fastrand", + "getrandom 0.3.1", + "once_cell", + "rustix 1.0.7", + "windows-sys", +] + [[package]] name = "thiserror" version = "1.0.69" @@ -1055,10 +1182,14 @@ dependencies = [ [[package]] name = "turso_core" -version = "0.1.1" +version = "0.1.5-pre.3" dependencies = [ + "aegis", + "aes", + "aes-gcm", "bitflags", "built", + "bytemuck", "cfg_block", "chrono", "crossbeam-skiplist", @@ -1066,10 +1197,11 @@ dependencies = [ "getrandom 0.2.15", "hex", "julian_day_converter", + "libc", "libloading", "libm", "miette", - "mimalloc", + "pack1", "parking_lot", "paste", "polling", @@ -1080,10 +1212,12 @@ dependencies = [ "ryu", "strum", "strum_macros", + "tempfile", "thiserror", "tracing", "turso_ext", "turso_macros", + "turso_parser", "turso_sqlite3_parser", "uncased", "uuid", @@ -1091,7 +1225,7 @@ dependencies = [ [[package]] name = "turso_ext" -version = "0.1.1" +version = "0.1.5-pre.3" dependencies = [ "chrono", "getrandom 0.3.1", @@ -1100,16 +1234,28 @@ dependencies = [ [[package]] name = "turso_macros" -version = "0.1.1" +version = "0.1.5-pre.3" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "turso_parser" +version = "0.1.5-pre.3" +dependencies = [ + "bitflags", + "miette", + "strum", + "strum_macros", + "thiserror", + "turso_macros", +] + [[package]] name = "turso_sqlite3_parser" -version = "0.1.1" +version = "0.1.5-pre.3" dependencies = [ "bitflags", "cc", @@ -1118,14 +1264,17 @@ dependencies = [ "log", "memchr", "miette", - "phf", - "phf_codegen", - "phf_shared", + "smallvec", "strum", "strum_macros", - "uncased", ] +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + [[package]] name = "uncased" version = "0.9.10" @@ -1147,6 +1296,16 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "url" version = "2.5.4" diff --git a/fuzz/fuzz_targets/expression.rs b/fuzz/fuzz_targets/expression.rs index 005d2b42b..a694f3b12 100644 --- a/fuzz/fuzz_targets/expression.rs +++ b/fuzz/fuzz_targets/expression.rs @@ -4,7 +4,6 @@ use std::{error::Error, num::NonZero, sync::Arc}; use arbitrary::Arbitrary; use libfuzzer_sys::{fuzz_target, Corpus}; -use turso_core::IO as _; macro_rules! str_enum { ($vis:vis enum $name:ident { $($variant:ident => $value:literal),*, }) => { @@ -31,15 +30,14 @@ macro_rules! str_enum { str_enum! { enum Binary { - // TODO: Not compatible yet - // Equal => "=", - // Is => "IS", - // Concat => "||", - // NotEqual => "<>", - // GreaterThan => ">", - // GreaterThanOrEqual => ">=", - // LessThan => "<", - // LessThanOrEqual => "<=", + Equal => "=", + Is => "IS", + Concat => "||", + NotEqual => "<>", + GreaterThan => ">", + GreaterThanOrEqual => ">=", + LessThan => "<", + LessThanOrEqual => "<=", RightShift => ">>", LeftShift => "<<", BitwiseAnd => "&", @@ -169,7 +167,7 @@ fn do_fuzz(expr: Expr) -> Result> { let sql = format!("SELECT {}", expr.query); // FIX: `turso_core::translate::expr::translate_expr` causes a overflow if this is any higher. - if expr.depth > 140 { + if expr.depth > 100 { return Ok(Corpus::Reject); } diff --git a/parser/src/lexer.rs b/parser/src/lexer.rs index e644b8c95..5e9e809b8 100644 --- a/parser/src/lexer.rs +++ b/parser/src/lexer.rs @@ -207,7 +207,7 @@ impl<'a> Iterator for Lexer<'a> { b'&' => Some(Ok(self.eat_one_token(TokenType::TK_BITAND))), b'~' => Some(Ok(self.eat_one_token(TokenType::TK_BITNOT))), b'\'' | b'"' | b'`' => Some(self.mark(|l| l.eat_lit_or_id())), - b'.' => Some(self.mark(|l| l.eat_dot_or_frac())), + b'.' => Some(self.mark(|l| l.eat_dot_or_frac(false))), b'0'..=b'9' => Some(self.mark(|l| l.eat_number())), b'[' => Some(self.mark(|l| l.eat_bracket())), b'?' | b'$' | b'@' | b'#' | b':' => Some(self.mark(|l| l.eat_var())), @@ -585,12 +585,14 @@ impl<'a> Lexer<'a> { }) } - fn eat_dot_or_frac(&mut self) -> Result> { + fn eat_dot_or_frac(&mut self, has_digit_prefix: bool) -> Result> { let start = self.offset; self.eat_and_assert(|b| b == b'.'); match self.peek() { - Some(b) if b.is_ascii_digit() => { + Some(b) + if b.is_ascii_digit() || (has_digit_prefix && b.eq_ignore_ascii_case(&b'e')) => + { self.eat_while_number_digit()?; match self.peek() { Some(b'e') | Some(b'E') => { @@ -677,7 +679,7 @@ impl<'a> Lexer<'a> { self.eat_while_number_digit()?; match self.peek() { Some(b'.') => { - self.eat_dot_or_frac()?; + self.eat_dot_or_frac(true)?; Ok(Token { value: &self.input[start..self.offset], token_type: Some(TokenType::TK_FLOAT), @@ -1239,6 +1241,14 @@ mod tests { token_type: Some(TokenType::TK_ID), }, ), + // issue 2933 + ( + b"1.e5".as_slice(), + Token { + value: b"1.e5".as_slice(), + token_type: Some(TokenType::TK_FLOAT), + }, + ), ]; for (input, expected) in test_cases { @@ -1513,6 +1523,24 @@ mod tests { }, ], ), + // issue 2933 + ( + b"u.email".as_slice(), + vec![ + Token { + value: b"u".as_slice(), + token_type: Some(TokenType::TK_ID), + }, + Token { + value: b".".as_slice(), + token_type: Some(TokenType::TK_DOT), + }, + Token { + value: b"email".as_slice(), + token_type: Some(TokenType::TK_ID), + }, + ], + ), ]; for (input, expected_tokens) in test_cases { diff --git a/scripts/merge-pr.py b/scripts/merge-pr.py index 3aa48c683..21e774580 100755 --- a/scripts/merge-pr.py +++ b/scripts/merge-pr.py @@ -97,14 +97,46 @@ def wrap_text(text, width=72): return "\n".join(wrapped_lines) +def check_pr_status(pr_number): + """Check the status of all checks for a PR + + Returns a tuple of (has_failing, has_pending) indicating if there are + any failing or pending checks respectively. + """ + output, error, returncode = run_command(f"gh pr checks {pr_number} --json state,name,startedAt,completedAt") + if returncode != 0: + print(f"Warning: Unable to get PR check status: {error}") + return False, False + + checks_data = json.loads(output) + if not checks_data: + return False, False + + has_failing = any(check.get("state") == "FAILURE" for check in checks_data) + has_pending = any( + check.get("startedAt") and not check.get("completedAt") or check.get("state") == "IN_PROGRESS" + for check in checks_data + ) + return has_failing, has_pending + + def merge_remote(pr_number: int, commit_message: str, commit_title: str): - output, error, returncode = run_command(f"gh pr checks {pr_number} --json state") - if returncode == 0: - checks_data = json.loads(output) - if checks_data and any(check.get("state") == "FAILURE" for check in checks_data): - print("Warning: Some checks are failing") - if input("Do you want to proceed with the merge? (y/N): ").strip().lower() != "y": - exit(0) + has_failing, has_pending = check_pr_status(pr_number) + + prompt_needed = False + warning_msg = "" + + if has_failing: + prompt_needed = True + warning_msg = "Warning: Some checks are failing" + elif has_pending: + prompt_needed = True + warning_msg = "Warning: Some checks are still running" + + if prompt_needed: + print(warning_msg) + if input("Do you want to proceed with the merge? (y/N): ").strip().lower() != "y": + exit(0) # Create a temporary file for the commit message with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as temp_file: @@ -131,6 +163,23 @@ def merge_remote(pr_number: int, commit_message: str, commit_title: str): def merge_local(pr_number: int, commit_message: str): + has_failing, has_pending = check_pr_status(pr_number) + + prompt_needed = False + warning_msg = "" + + if has_failing: + prompt_needed = True + warning_msg = "Warning: Some checks are failing" + elif has_pending: + prompt_needed = True + warning_msg = "Warning: Some checks are still running" + + if prompt_needed: + print(warning_msg) + if input("Do you want to proceed with the merge? (y/N): ").strip().lower() != "y": + exit(0) + current_branch, _, _ = run_command("git branch --show-current") print(f"Fetching PR #{pr_number}...") diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index fcfa07ed4..d873f3b3f 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -1,11 +1,11 @@ use std::{ - collections::HashSet, fmt::{Debug, Display}, path::Path, sync::Arc, vec, }; +use indexmap::IndexSet; use serde::{Deserialize, Serialize}; use sql_generation::{ @@ -166,13 +166,13 @@ impl Interactions { } impl Interactions { - pub(crate) fn dependencies(&self) -> HashSet { + pub(crate) fn dependencies(&self) -> IndexSet { match self { Interactions::Property(property) => { property .interactions() .iter() - .fold(HashSet::new(), |mut acc, i| match i { + .fold(IndexSet::new(), |mut acc, i| match i { Interaction::Query(q) => { acc.extend(q.dependencies()); acc @@ -181,7 +181,7 @@ impl Interactions { }) } Interactions::Query(query) => query.dependencies(), - Interactions::Fault(_) => HashSet::new(), + Interactions::Fault(_) => IndexSet::new(), } } diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 690426346..20adbbe9d 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -1,6 +1,7 @@ -use std::{collections::HashSet, fmt::Display}; +use std::fmt::Display; use anyhow::Context; +use indexmap::IndexSet; use itertools::Itertools; use serde::{Deserialize, Serialize}; use sql_generation::model::{ @@ -32,19 +33,19 @@ pub enum Query { } impl Query { - pub fn dependencies(&self) -> HashSet { + pub fn dependencies(&self) -> IndexSet { match self { Query::Select(select) => select.dependencies(), - Query::Create(_) => HashSet::new(), + Query::Create(_) => IndexSet::new(), Query::Insert(Insert::Select { table, .. }) | Query::Insert(Insert::Values { table, .. }) | Query::Delete(Delete { table, .. }) | Query::Update(Update { table, .. }) - | Query::Drop(Drop { table, .. }) => HashSet::from_iter([table.clone()]), + | Query::Drop(Drop { table, .. }) => IndexSet::from_iter([table.clone()]), Query::CreateIndex(CreateIndex { table_name, .. }) => { - HashSet::from_iter([table_name.clone()]) + IndexSet::from_iter([table_name.clone()]) } - Query::Begin(_) | Query::Commit(_) | Query::Rollback(_) => HashSet::new(), + Query::Begin(_) | Query::Commit(_) | Query::Rollback(_) => IndexSet::new(), } } pub fn uses(&self) -> Vec { diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index f66cf967d..5a3c77590 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -185,59 +185,6 @@ impl SimulatorEnv { ) -> Self { let mut rng = ChaCha8Rng::seed_from_u64(seed); - let total = 100.0; - - let mut create_percent = 0.0; - let mut create_index_percent = 0.0; - let mut drop_percent = 0.0; - let mut delete_percent = 0.0; - let mut update_percent = 0.0; - - let read_percent = rng.random_range(0.0..=total); - let write_percent = total - read_percent; - - if !cli_opts.disable_create { - // Create percent should be 5-15% of the write percent - create_percent = rng.random_range(0.05..=0.15) * write_percent; - } - if !cli_opts.disable_create_index { - // Create indexpercent should be 2-5% of the write percent - create_index_percent = rng.random_range(0.02..=0.05) * write_percent; - } - if !cli_opts.disable_drop { - // Drop percent should be 2-5% of the write percent - drop_percent = rng.random_range(0.02..=0.05) * write_percent; - } - if !cli_opts.disable_delete { - // Delete percent should be 10-20% of the write percent - delete_percent = rng.random_range(0.1..=0.2) * write_percent; - } - if !cli_opts.disable_update { - // Update percent should be 10-20% of the write percent - // TODO: freestyling the percentage - update_percent = rng.random_range(0.1..=0.2) * write_percent; - } - - let write_percent = write_percent - - create_percent - - create_index_percent - - delete_percent - - drop_percent - - update_percent; - - let summed_total: f64 = read_percent - + write_percent - + create_percent - + create_index_percent - + drop_percent - + update_percent - + delete_percent; - - let abs_diff = (summed_total - total).abs(); - if abs_diff > 0.0001 { - panic!("Summed total {summed_total} is not equal to total {total}"); - } - let opts = SimulatorOpts { seed, ticks: rng.random_range(cli_opts.minimum_tests..=cli_opts.maximum_tests), diff --git a/sql_generation/Cargo.toml b/sql_generation/Cargo.toml index cf82bb036..0d8b6a097 100644 --- a/sql_generation/Cargo.toml +++ b/sql_generation/Cargo.toml @@ -21,6 +21,7 @@ anyhow = { workspace = true } tracing = { workspace = true } schemars = { workspace = true } garde = { workspace = true, features = ["derive", "serde"] } +indexmap = { version = "2.11.0" } [dev-dependencies] rand_chacha = "0.9.0" diff --git a/sql_generation/generation/predicate/binary.rs b/sql_generation/generation/predicate/binary.rs index a5901a9f8..5047ff706 100644 --- a/sql_generation/generation/predicate/binary.rs +++ b/sql_generation/generation/predicate/binary.rs @@ -259,16 +259,16 @@ impl SimplePredicate { table: &T, row: &[SimValue], ) -> Self { - // Pick a random column - let columns = table.columns().collect::>(); - let column_index = rng.random_range(0..columns.len()); - let column = columns[column_index]; - let column_value = &row[column_index]; - let table_name = column.table_name; // Avoid creation of NULLs if row.is_empty() { return SimplePredicate(Predicate(Expr::Literal(SimValue::TRUE.into()))); } + // Pick a random column + let columns = table.columns().collect::>(); + let column_index = rng.random_range(0..row.len()); + let column = columns[column_index]; + let column_value = &row[column_index]; + let table_name = column.table_name; let expr = one_of( vec![ @@ -317,16 +317,16 @@ impl SimplePredicate { table: &T, row: &[SimValue], ) -> Self { - let columns = table.columns().collect::>(); - // Pick a random column - let column_index = rng.random_range(0..columns.len()); - let column = columns[column_index]; - let column_value = &row[column_index]; - let table_name = column.table_name; // Avoid creation of NULLs if row.is_empty() { return SimplePredicate(Predicate(Expr::Literal(SimValue::FALSE.into()))); } + let columns = table.columns().collect::>(); + // Pick a random column + let column_index = rng.random_range(0..row.len()); + let column = columns[column_index]; + let column_value = &row[column_index]; + let table_name = column.table_name; let expr = one_of( vec![ diff --git a/sql_generation/generation/predicate/unary.rs b/sql_generation/generation/predicate/unary.rs index bfcd1cff0..1cc0e0d24 100644 --- a/sql_generation/generation/predicate/unary.rs +++ b/sql_generation/generation/predicate/unary.rs @@ -124,12 +124,11 @@ impl SimplePredicate { pub fn true_unary( rng: &mut R, context: &C, - table: &T, + _table: &T, row: &[SimValue], ) -> Self { - let columns = table.columns().collect::>(); // Pick a random column - let column_index = rng.random_range(0..columns.len()); + let column_index = rng.random_range(0..row.len()); let column_value = &row[column_index]; let num_retries = row.len(); // Avoid creation of NULLs @@ -191,18 +190,17 @@ impl SimplePredicate { pub fn false_unary( rng: &mut R, context: &C, - table: &T, + _table: &T, row: &[SimValue], ) -> Self { - let columns = table.columns().collect::>(); - // Pick a random column - let column_index = rng.random_range(0..columns.len()); - let column_value = &row[column_index]; - let num_retries = row.len(); // Avoid creation of NULLs if row.is_empty() { return SimplePredicate(Predicate(Expr::Literal(SimValue::FALSE.into()))); } + // Pick a random column + let column_index = rng.random_range(0..row.len()); + let column_value = &row[column_index]; + let num_retries = row.len(); let expr = backtrack( vec![ // ( diff --git a/sql_generation/generation/query.rs b/sql_generation/generation/query.rs index e2a36ddb9..a0e0e47b0 100644 --- a/sql_generation/generation/query.rs +++ b/sql_generation/generation/query.rs @@ -10,6 +10,7 @@ use crate::model::query::select::{ use crate::model::query::update::Update; use crate::model::query::{Create, CreateIndex, Delete, Drop, Insert, Select}; use crate::model::table::{JoinTable, JoinType, JoinedTable, SimValue, Table, TableContext}; +use indexmap::IndexSet; use itertools::Itertools; use rand::Rng; use turso_parser::ast::{Expr, SortOrder}; @@ -104,7 +105,7 @@ impl Arbitrary for SelectInner { if order_by_col_count == 0 { return None; } - let mut col_names = std::collections::HashSet::new(); + let mut col_names = IndexSet::new(); let mut order_by_cols = Vec::new(); while order_by_cols.len() < order_by_col_count { let table = pick(&order_by_table_candidates, rng); diff --git a/sql_generation/generation/table.rs b/sql_generation/generation/table.rs index 0ea821d9b..ce0ff97f4 100644 --- a/sql_generation/generation/table.rs +++ b/sql_generation/generation/table.rs @@ -1,5 +1,4 @@ -use std::collections::HashSet; - +use indexmap::IndexSet; use rand::Rng; use turso_core::Value; @@ -28,7 +27,7 @@ impl Arbitrary for Table { } else { rng.random_range(opts.column_range) } as usize; - let mut column_set = HashSet::with_capacity(column_size); + let mut column_set = IndexSet::with_capacity(column_size); for col in std::iter::repeat_with(|| Column::arbitrary(rng, context)) { column_set.insert(col); if column_set.len() == column_size { diff --git a/sql_generation/model/query/select.rs b/sql_generation/model/query/select.rs index ea4a375db..f9c92cc8f 100644 --- a/sql_generation/model/query/select.rs +++ b/sql_generation/model/query/select.rs @@ -1,6 +1,7 @@ -use std::{collections::HashSet, fmt::Display}; +use std::fmt::Display; pub use ast::Distinctness; +use indexmap::IndexSet; use itertools::Itertools; use serde::{Deserialize, Serialize}; use turso_parser::ast::{ @@ -105,12 +106,12 @@ impl Select { } } - pub fn dependencies(&self) -> HashSet { + pub fn dependencies(&self) -> IndexSet { if self.body.select.from.is_none() { - return HashSet::new(); + return IndexSet::new(); } let from = self.body.select.from.as_ref().unwrap(); - let mut tables = HashSet::new(); + let mut tables = IndexSet::new(); tables.insert(from.table.clone()); tables.extend(from.dependencies()); @@ -228,7 +229,7 @@ impl FromClause { let mut join_table = JoinTable { tables: vec![first_table.clone()], - rows: Vec::new(), + rows: first_table.rows.clone(), }; for join in &self.joins { diff --git a/sqlite3/tests/compat/mod.rs b/sqlite3/tests/compat/mod.rs index 52ed3d8fa..2608a9cdc 100644 --- a/sqlite3/tests/compat/mod.rs +++ b/sqlite3/tests/compat/mod.rs @@ -149,10 +149,7 @@ mod tests { fn test_prepare_misuse() { unsafe { let mut db = ptr::null_mut(); - assert_eq!( - sqlite3_open(c"../testing/testing_clone.db".as_ptr(), &mut db), - SQLITE_OK - ); + assert_eq!(sqlite3_open(c":memory:".as_ptr(), &mut db), SQLITE_OK); let mut stmt = ptr::null_mut(); assert_eq!( @@ -165,82 +162,6 @@ mod tests { } } - #[test] - fn test_wal_checkpoint() { - unsafe { - // Test with valid db - let mut db = ptr::null_mut(); - assert_eq!( - sqlite3_open(c"../testing/testing_clone.db".as_ptr(), &mut db), - SQLITE_OK - ); - assert_eq!(sqlite3_wal_checkpoint(db, ptr::null()), SQLITE_OK); - assert_eq!(sqlite3_close(db), SQLITE_OK); - } - } - - #[test] - fn test_wal_checkpoint_v2() { - unsafe { - // Test with valid db - let mut db = ptr::null_mut(); - assert_eq!( - sqlite3_open(c"../testing/testing_clone.db".as_ptr(), &mut db), - SQLITE_OK - ); - - let mut log_size = 0; - let mut checkpoint_count = 0; - - // Test different checkpoint modes - assert_eq!( - sqlite3_wal_checkpoint_v2( - db, - ptr::null(), - SQLITE_CHECKPOINT_PASSIVE, - &mut log_size, - &mut checkpoint_count - ), - SQLITE_OK - ); - - // TODO: uncomment when SQLITE_CHECKPOINT_FULL is supported - // assert_eq!( - // sqlite3_wal_checkpoint_v2( - // db, - // ptr::null(), - // SQLITE_CHECKPOINT_FULL, - // &mut log_size, - // &mut checkpoint_count - // ), - // SQLITE_OK - // ); - - assert_eq!( - sqlite3_wal_checkpoint_v2( - db, - ptr::null(), - SQLITE_CHECKPOINT_RESTART, - &mut log_size, - &mut checkpoint_count - ), - SQLITE_OK - ); - - assert_eq!( - sqlite3_wal_checkpoint_v2( - db, - ptr::null(), - SQLITE_CHECKPOINT_TRUNCATE, - &mut log_size, - &mut checkpoint_count - ), - SQLITE_OK - ); - - assert_eq!(sqlite3_close(db), SQLITE_OK); - } - } #[test] fn test_sqlite3_bind_int() { unsafe { diff --git a/stress/main.rs b/stress/main.rs index 2a71de13d..f1a335fae 100644 --- a/stress/main.rs +++ b/stress/main.rs @@ -204,7 +204,7 @@ impl ArbitrarySchema { .collect::>() .join(","); - format!("CREATE TABLE {} ({});", table.name, columns) + format!("CREATE TABLE IF NOT EXISTS {} ({});", table.name, columns) }) .collect() } @@ -476,9 +476,15 @@ async fn main() -> Result<(), Box> { tempfile.path().to_string_lossy().to_string() }; + let vfs_option = opts.vfs.clone(); + for thread in 0..opts.nr_threads { let db_file = db_file.clone(); - let db = Arc::new(Mutex::new(Builder::new_local(&db_file).build().await?)); + let mut builder = Builder::new_local(&db_file); + if let Some(ref vfs) = vfs_option { + builder = builder.with_io(vfs.clone()); + } + let db = Arc::new(Mutex::new(builder.build().await?)); let plan = plan.clone(); let conn = db.lock().await.connect()?; @@ -507,6 +513,7 @@ async fn main() -> Result<(), Box> { let nr_iterations = opts.nr_iterations; let db = db.clone(); + let vfs_for_task = vfs_option.clone(); let handle = tokio::spawn(async move { let mut conn = db.lock().await.connect()?; @@ -518,7 +525,11 @@ async fn main() -> Result<(), Box> { } // Reopen the database let mut db_guard = db.lock().await; - *db_guard = Builder::new_local(&db_file).build().await?; + let mut builder = Builder::new_local(&db_file); + if let Some(ref vfs) = vfs_for_task { + builder = builder.with_io(vfs.clone()); + } + *db_guard = builder.build().await?; conn = db_guard.connect()?; } else if gen_bool(0.01) { // Reconnect to the database @@ -559,13 +570,19 @@ async fn main() -> Result<(), Box> { const INTEGRITY_CHECK_INTERVAL: usize = 100; if query_index % INTEGRITY_CHECK_INTERVAL == 0 { let mut res = conn.query("PRAGMA integrity_check", ()).await.unwrap(); - if let Some(row) = res.next().await? { - let value = row.get_value(0).unwrap(); - if value != "ok".into() { - panic!("integrity check failed: {:?}", value); + match res.next().await { + Ok(Some(row)) => { + let value = row.get_value(0).unwrap(); + if value != "ok".into() { + panic!("integrity check failed: {:?}", value); + } + } + Ok(None) => { + panic!("integrity check failed: no rows"); + } + Err(e) => { + println!("Error performing integrity check: {e}"); } - } else { - panic!("integrity check failed: no rows"); } } } diff --git a/stress/opts.rs b/stress/opts.rs index aed59529f..82802b2dd 100644 --- a/stress/opts.rs +++ b/stress/opts.rs @@ -55,4 +55,11 @@ pub struct Opts { /// Database file #[clap(short = 'd', long, help = "database file")] pub db_file: Option, + + /// Select VFS + #[clap( + long, + help = "Select VFS. options are io_uring (if feature enabled), memory, and syscall" + )] + pub vfs: Option, } diff --git a/sync/javascript/.gitignore b/sync/javascript/.gitignore index adc8d7dbc..dc7770d6a 100644 --- a/sync/javascript/.gitignore +++ b/sync/javascript/.gitignore @@ -134,3 +134,5 @@ Cargo.lock *-draft *-synced *-info + +package.native.json diff --git a/sync/javascript/Makefile b/sync/javascript/Makefile new file mode 100644 index 000000000..9bca855b1 --- /dev/null +++ b/sync/javascript/Makefile @@ -0,0 +1,20 @@ +pack-native: + npm publish --dry-run && npm pack +pack-browser: + cp package.json package.native.json + cp package.browser.json package.json + npm publish --dry-run && npm pack; cp package.native.json package.json + +publish-native: + npm publish --access public +publish-browser: + cp package.json package.native.json + cp package.browser.json package.json + npm publish --access public; cp package.native.json package.json + +publish-native-next: + npm publish --tag next --access public +publish-browser-next: + cp package.json package.native.json + cp package.browser.json package.json + npm publish --tag next --access public; cp package.native.json package.json diff --git a/sync/javascript/package.browser.json b/sync/javascript/package.browser.json new file mode 100644 index 000000000..beddc6066 --- /dev/null +++ b/sync/javascript/package.browser.json @@ -0,0 +1,57 @@ +{ + "name": "@tursodatabase/sync-browser", + "version": "0.1.5-pre.2", + "repository": { + "type": "git", + "url": "https://github.com/tursodatabase/turso" + }, + "description": "Sync engine for the Turso database library specifically for browser/web environment", + "module": "./dist/sync_engine.js", + "main": "./dist/sync_engine.js", + "type": "module", + "exports": "./dist/sync_engine.js", + "files": [ + "browser.js", + "dist/**" + ], + "types": "./dist/sync_engine.d.ts", + "napi": { + "binaryName": "turso-sync-js", + "targets": [ + "wasm32-wasip1-threads" + ] + }, + "license": "MIT", + "devDependencies": { + "@napi-rs/cli": "^3.0.4", + "@napi-rs/wasm-runtime": "^1.0.1", + "@types/node": "^24.2.0", + "ava": "^6.0.1", + "typescript": "^5.9.2" + }, + "ava": { + "timeout": "3m" + }, + "engines": { + "node": ">= 10" + }, + "scripts": { + "artifacts": "napi artifacts", + "build": "npm exec tsc && napi build --platform --release --esm", + "build:debug": "npm exec tsc && napi build --platform", + "prepublishOnly": "npm exec tsc && napi prepublish -t npm --skip-optional-publish", + "test": "true", + "universal": "napi universalize", + "version": "napi version" + }, + "packageManager": "yarn@4.9.2", + "imports": { + "#entry-point": { + "types": "./index.d.ts", + "browser": "./browser.js" + } + }, + "dependencies": { + "@tursodatabase/database": "~0.1.4-pre.5" + } +} diff --git a/sync/javascript/package.json b/sync/javascript/package.json index 3069184d3..cd341bf07 100644 --- a/sync/javascript/package.json +++ b/sync/javascript/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync", - "version": "0.1.5-pre.2", + "version": "0.1.5-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -60,4 +60,4 @@ "dependencies": { "@tursodatabase/database": "~0.1.4-pre.5" } -} +} \ No newline at end of file diff --git a/testing/affinity.test b/testing/affinity.test new file mode 100755 index 000000000..9a08003cd --- /dev/null +++ b/testing/affinity.test @@ -0,0 +1,14 @@ +#!/usr/bin/env tclsh + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_execsql_test_on_specific_db {:memory:} affinity { + CREATE TABLE t1 (c INTEGER); + INSERT INTO t1 VALUES ('1'); + INSERT INTO t1 VALUES ('1a'); + SELECT c, typeof(c) FROM t1; +} { + {1|integer} + {1a|text} +} diff --git a/testing/gen-bigass-database.py b/testing/gen-bigass-database.py new file mode 100644 index 000000000..5e96fa661 --- /dev/null +++ b/testing/gen-bigass-database.py @@ -0,0 +1,407 @@ +#!/usr/bin/env python3 +import random +import sqlite3 + +from faker import Faker + +conn = sqlite3.connect("testing/testing-bigass.db") +cursor = conn.cursor() + +fake = Faker() + +cursor.execute(""" + CREATE TABLE IF NOT EXISTS users ( + id INTEGER PRIMARY KEY, + first_name TEXT, + last_name TEXT, + email TEXT, + phone_number TEXT, + address TEXT, + city TEXT, + state TEXT, + zipcode TEXT, + age INTEGER, + created_at TIMESTAMP, + updated_at TIMESTAMP + ) +""") + +cursor.execute( + """ +CREATE TABLE products ( + id INTEGER PRIMARY KEY, + name TEXT, + price REAL + ); +""", + [], +) + +# specific products we already test for +cursor.execute("INSERT INTO products VALUES(1,'hat',79.0);") +cursor.execute("INSERT INTO products VALUES(2,'cap',82.0);") +cursor.execute("INSERT INTO products VALUES(3,'shirt',18.0);") +cursor.execute("INSERT INTO products VALUES(4,'sweater',25.0);") +cursor.execute("INSERT INTO products VALUES(5,'sweatshirt',74.0);") +cursor.execute("INSERT INTO products VALUES(6,'shorts',70.0);") +cursor.execute("INSERT INTO products VALUES(7,'jeans',78.0);") +cursor.execute("INSERT INTO products VALUES(8,'sneakers',82.0);") +cursor.execute("INSERT INTO products VALUES(9,'boots',1.0);") +cursor.execute("INSERT INTO products VALUES(10,'coat',33.0);") +cursor.execute("INSERT INTO products VALUES(11,'accessories',81.0);") + +for i in range(12, 12001): + name = fake.word().title() + price = round(random.uniform(5.0, 999.99), 2) + cursor.execute("INSERT INTO products (id, name, price) VALUES (?, ?, ?)", [i, name, price]) + +cursor.execute(""" + CREATE TABLE IF NOT EXISTS orders ( + id INTEGER PRIMARY KEY, + user_id INTEGER, + order_date TIMESTAMP, + total_amount REAL, + status TEXT, + shipping_address TEXT, + shipping_city TEXT, + shipping_state TEXT, + shipping_zip TEXT, + payment_method TEXT, + tracking_number TEXT, + notes TEXT, + FOREIGN KEY (user_id) REFERENCES users(id) + ) +""") + +cursor.execute(""" + CREATE TABLE IF NOT EXISTS order_items ( + id INTEGER PRIMARY KEY, + order_id INTEGER, + product_id INTEGER, + quantity INTEGER, + unit_price REAL, + discount REAL, + tax REAL, + total_price REAL, + FOREIGN KEY (order_id) REFERENCES orders(id), + FOREIGN KEY (product_id) REFERENCES products(id) + ) +""") + +cursor.execute(""" + CREATE TABLE IF NOT EXISTS reviews ( + id INTEGER PRIMARY KEY, + product_id INTEGER, + user_id INTEGER, + rating INTEGER, + title TEXT, + comment TEXT, + helpful_count INTEGER, + verified_purchase BOOLEAN, + review_date TIMESTAMP, + FOREIGN KEY (product_id) REFERENCES products(id), + FOREIGN KEY (user_id) REFERENCES users(id) + ) +""") + +cursor.execute(""" + CREATE TABLE IF NOT EXISTS inventory_transactions ( + id INTEGER PRIMARY KEY, + product_id INTEGER, + transaction_type TEXT, + quantity INTEGER, + previous_quantity INTEGER, + new_quantity INTEGER, + transaction_date TIMESTAMP, + reference_type TEXT, + reference_id INTEGER, + notes TEXT, + performed_by TEXT, + FOREIGN KEY (product_id) REFERENCES products(id) + ) +""") + +cursor.execute(""" + CREATE TABLE IF NOT EXISTS customer_support_tickets ( + id INTEGER PRIMARY KEY, + user_id INTEGER, + order_id INTEGER, + ticket_number TEXT, + category TEXT, + priority TEXT, + status TEXT, + subject TEXT, + description TEXT, + created_at TIMESTAMP, + updated_at TIMESTAMP, + resolved_at TIMESTAMP, + assigned_to TEXT, + resolution_notes TEXT, + FOREIGN KEY (user_id) REFERENCES users(id), + FOREIGN KEY (order_id) REFERENCES orders(id) + ) +""") + +print("Generating users...") +users_data = [] +for i in range(15000): + if i % 1000 == 0: + print(f" Generated {i} users...") + + first_name = fake.first_name() + last_name = fake.last_name() + email = fake.email() + phone_number = fake.phone_number() + address = fake.street_address() + city = fake.city() + state = fake.state_abbr() + zipcode = fake.zipcode() + age = fake.random_int(min=18, max=85) + created_at = fake.date_time_between(start_date="-3y", end_date="now") + updated_at = fake.date_time_between(start_date=created_at, end_date="now") + + users_data.append( + (first_name, last_name, email, phone_number, address, city, state, zipcode, age, created_at, updated_at) + ) + +cursor.executemany( + """ + INSERT INTO users (first_name, last_name, email, phone_number, address, + city, state, zipcode, age, created_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) +""", + users_data, +) + +print("Generating orders...") +order_statuses = ["pending", "processing", "shipped", "delivered", "cancelled", "refunded"] +payment_methods = ["credit_card", "debit_card", "paypal", "apple_pay", "google_pay", "bank_transfer"] + +orders_data = [] +for i in range(20000): + if i % 2000 == 0: + print(f" Generated {i} orders...") + + user_id = random.randint(1, 15000) + order_date = fake.date_time_between(start_date="-1y", end_date="now") + total_amount = round(random.uniform(10.0, 5000.0), 2) + status = random.choice(order_statuses) + shipping_address = fake.street_address() + shipping_city = fake.city() + shipping_state = fake.state_abbr() + shipping_zip = fake.zipcode() + payment_method = random.choice(payment_methods) + tracking_number = fake.ean13() if status in ["shipped", "delivered"] else None + notes = fake.text(max_nb_chars=100) if random.random() < 0.3 else None + + orders_data.append( + ( + user_id, + order_date, + total_amount, + status, + shipping_address, + shipping_city, + shipping_state, + shipping_zip, + payment_method, + tracking_number, + notes, + ) + ) + +cursor.executemany( + """ + INSERT INTO orders (user_id, order_date, total_amount, status, shipping_address, + shipping_city, shipping_state, shipping_zip, payment_method, + tracking_number, notes) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) +""", + orders_data, +) + +print("Generating order items...") +order_items_data = [] +for order_id in range(1, 20001): + if order_id % 2000 == 0: + print(f" Generated items for {order_id} orders...") + + num_items = random.randint(1, 8) + for _ in range(num_items): + product_id = random.randint(1, 12000) + quantity = random.randint(1, 5) + unit_price = round(random.uniform(0.99, 999.99), 2) + discount = round(random.uniform(0, 0.3) * unit_price, 2) if random.random() < 0.2 else 0 + tax = round((unit_price - discount) * quantity * 0.08, 2) + total_price = round((unit_price - discount) * quantity + tax, 2) + + order_items_data.append((order_id, product_id, quantity, unit_price, discount, tax, total_price)) + +cursor.executemany( + """ + INSERT INTO order_items (order_id, product_id, quantity, unit_price, + discount, tax, total_price) + VALUES (?, ?, ?, ?, ?, ?, ?) +""", + order_items_data, +) + +print("Generating reviews...") +reviews_data = [] +for i in range(25000): + if i % 2500 == 0: + print(f" Generated {i} reviews...") + + product_id = random.randint(1, 12000) + user_id = random.randint(1, 15000) + rating = random.choices([1, 2, 3, 4, 5], weights=[5, 10, 15, 30, 40])[0] + title = fake.catch_phrase() + comment = fake.text(max_nb_chars=500) + helpful_count = random.randint(0, 100) + verified_purchase = random.choice([0, 1]) + review_date = fake.date_time_between(start_date="-1y", end_date="now") + + reviews_data.append((product_id, user_id, rating, title, comment, helpful_count, verified_purchase, review_date)) + +cursor.executemany( + """ + INSERT INTO reviews (product_id, user_id, rating, title, comment, + helpful_count, verified_purchase, review_date) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) +""", + reviews_data, +) + +print("Generating inventory transactions...") +transaction_types = ["purchase", "sale", "return", "adjustment", "transfer", "damage"] +reference_types = ["order", "return", "adjustment", "transfer", "manual"] + +inventory_data = [] +for i in range(18000): + if i % 2000 == 0: + print(f" Generated {i} inventory transactions...") + + product_id = random.randint(1, 12000) + transaction_type = random.choice(transaction_types) + quantity = random.randint(1, 100) + previous_quantity = random.randint(0, 1000) + new_quantity = ( + previous_quantity + quantity if transaction_type in ["purchase", "return"] else previous_quantity - quantity + ) + new_quantity = max(0, new_quantity) + transaction_date = fake.date_time_between(start_date="-6m", end_date="now") + reference_type = random.choice(reference_types) + reference_id = random.randint(1, 20000) if reference_type == "order" else random.randint(1, 1000) + notes = fake.text(max_nb_chars=100) if random.random() < 0.3 else None + performed_by = fake.name() + + inventory_data.append( + ( + product_id, + transaction_type, + quantity, + previous_quantity, + new_quantity, + transaction_date, + reference_type, + reference_id, + notes, + performed_by, + ) + ) + +cursor.executemany( + """ + INSERT INTO inventory_transactions (product_id, transaction_type, quantity, previous_quantity, + new_quantity, transaction_date, reference_type, reference_id, + notes, performed_by) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) +""", + inventory_data, +) + +print("Generating customer support tickets...") +ticket_categories = ["shipping", "product", "payment", "account", "return", "technical", "other"] +priorities = ["low", "medium", "high", "urgent"] +ticket_statuses = ["open", "in_progress", "waiting_customer", "resolved", "closed"] + +tickets_data = [] +for i in range(10000): + if i % 1000 == 0: + print(f" Generated {i} support tickets...") + + user_id = random.randint(1, 15000) + order_id = random.randint(1, 20000) if random.random() < 0.7 else None + ticket_number = f"TICKET-{fake.random_int(min=100000, max=999999)}" + category = random.choice(ticket_categories) + priority = random.choice(priorities) + status = random.choice(ticket_statuses) + subject = fake.catch_phrase() + description = fake.text(max_nb_chars=1000) + created_at = fake.date_time_between(start_date="-6m", end_date="now") + updated_at = fake.date_time_between(start_date=created_at, end_date="now") + resolved_at = ( + fake.date_time_between(start_date=updated_at, end_date="now") if status in ["resolved", "closed"] else None + ) + assigned_to = fake.name() if status != "open" else None + resolution_notes = fake.text(max_nb_chars=500) if status in ["resolved", "closed"] else None + + tickets_data.append( + ( + user_id, + order_id, + ticket_number, + category, + priority, + status, + subject, + description, + created_at, + updated_at, + resolved_at, + assigned_to, + resolution_notes, + ) + ) + +cursor.executemany( + """ + INSERT INTO customer_support_tickets (user_id, order_id, ticket_number, category, priority, + status, subject, description, created_at, updated_at, + resolved_at, assigned_to, resolution_notes) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) +""", + tickets_data, +) + +print("Creating indexes...") +cursor.execute("CREATE INDEX age_idx on users (age)") +cursor.execute("CREATE INDEX IF NOT EXISTS idx_orders_user_id ON orders(user_id)") +cursor.execute("CREATE INDEX IF NOT EXISTS idx_orders_status ON orders(status)") +cursor.execute("CREATE INDEX IF NOT EXISTS idx_order_items_order_id ON order_items(order_id)") +cursor.execute("CREATE INDEX IF NOT EXISTS idx_order_items_product_id ON order_items(product_id)") +cursor.execute("CREATE INDEX IF NOT EXISTS idx_reviews_product_id ON reviews(product_id)") +cursor.execute("CREATE INDEX IF NOT EXISTS idx_reviews_user_id ON reviews(user_id)") +cursor.execute("CREATE INDEX IF NOT EXISTS idx_inventory_product_id ON inventory_transactions(product_id)") +cursor.execute("CREATE INDEX IF NOT EXISTS idx_tickets_user_id ON customer_support_tickets(user_id)") +cursor.execute("CREATE INDEX IF NOT EXISTS idx_tickets_status ON customer_support_tickets(status)") + +conn.commit() + +# Print summary statistics +print("\n=== Database Generation Complete ===") +print(f"Users: {cursor.execute('SELECT COUNT(*) FROM users').fetchone()[0]:,}") +print(f"Products: {cursor.execute('SELECT COUNT(*) FROM products').fetchone()[0]:,}") +print(f"Orders: {cursor.execute('SELECT COUNT(*) FROM orders').fetchone()[0]:,}") +print(f"Order Items: {cursor.execute('SELECT COUNT(*) FROM order_items').fetchone()[0]:,}") +print(f"Reviews: {cursor.execute('SELECT COUNT(*) FROM reviews').fetchone()[0]:,}") +print(f"Inventory Transactions: {cursor.execute('SELECT COUNT(*) FROM inventory_transactions').fetchone()[0]:,}") +print(f"Support Tickets: {cursor.execute('SELECT COUNT(*) FROM customer_support_tickets').fetchone()[0]:,}") + +# Calculate approximate database size +cursor.execute("SELECT page_count * page_size as size FROM pragma_page_count(), pragma_page_size()") +size_bytes = cursor.fetchone()[0] +print(f"\nApproximate database size: {size_bytes / (1024 * 1024):.2f} MB") + +conn.close() +print("\nDatabase created successfully!") diff --git a/testing/join.test b/testing/join.test index 853ccc875..e0fbd436d 100755 --- a/testing/join.test +++ b/testing/join.test @@ -310,4 +310,32 @@ do_execsql_test_on_specific_db {:memory:} min-null-regression-test { insert into t values (1,1),(2,2); insert into u values (1,1),(3,3); select count(u.x) from t left join u using(y); -} {1} \ No newline at end of file +} {1} + +# regression test for issue 2949: consuming WHERE terms not originating from the LEFT JOIN +do_execsql_test_on_specific_db {:memory:} left-join-seek-key-regression-test { + CREATE TABLE t (x INTEGER PRIMARY KEY); + CREATE TABLE u (x INTEGER PRIMARY KEY); + INSERT INTO t VALUES (1); + SELECT * FROM t LEFT JOIN u ON false WHERE u.x = 1; +} {} + +# regression test for issue 2924: calling Next on a cursor that hasn't moved yet +do_execsql_test_on_specific_db {:memory:} next-crash { + create table a(x int primary key,y); + create table b(x int primary key,y); + create table c(x int primary key,y); + insert into a values (1,1),(2,2); + select a.x, b.x, c.x from a left join b on a.y=b.x left join c on b.y=c.x; +} {1|| +2||} + +# regression test for crash in op_column +do_execsql_test_on_specific_db {:memory:} left-join-column-crash { + create table a(x int primary key,y); + create table b(x int primary key,y); + insert into a values (1,1),(2,2); + insert into b values (3,3),(4,4); + select * from a left join b on a.x < 2 where a.x < 3 and b.x < 12; +} {1|1|3|3 +1|1|4|4} \ No newline at end of file diff --git a/testing/json.test b/testing/json.test index 4f16732ca..998cef195 100755 --- a/testing/json.test +++ b/testing/json.test @@ -1223,3 +1223,122 @@ do_execsql_test json_remove_with_arrow { # WITH RECURSIVE c(x) AS (VALUES(1) UNION ALL SELECT x+1 FROM c WHERE x<0x1f) # SELECT sum(json_valid(json_quote('a'||char(x)||'z'))) FROM c ORDER BY x; # } {31} + +do_execsql_test json_each_arrays_heterogeneous_primitives { + SELECT key, atom, type, fullkey, path, typeof(key) AS ktype + FROM json_each('[1, 2.5, "x", true, false, null]') + ORDER BY key; +} { +0|1|integer|$[0]|$|integer +1|2.5|real|$[1]|$|integer +2|x|text|$[2]|$|integer +3|1|true|$[3]|$|integer +4|0|false|$[4]|$|integer +5||null|$[5]|$|integer +} + +do_execsql_test json_each_arrays_parent_is_always_null { + SELECT COUNT(*) FROM json_each('[0,1,2]') WHERE parent IS NOT NULL; +} {0} + +do_execsql_test json_each_arrays_id_uniqueness { + SELECT COUNT(*), COUNT(DISTINCT id) + FROM json_each('[10,20,30,40]'); +} {4|4} + +do_execsql_test json_each_arrays_empty_container_yields_zero_rows { + SELECT COUNT(*) FROM json_each('[]'); +} {0} + +do_execsql_test json_each_objects_simple_integer_values { + SELECT key, atom, type, fullkey, path, typeof(key) AS ktype + FROM json_each('{"a":1,"b":2}') + ORDER BY key; +} { + {a|1|integer|$.a|$|text} + {b|2|integer|$.b|$|text} +} + +do_execsql_test json_each_objects_nested_containers_value_is_valid_json { + SELECT key, type, json_valid(value) AS is_json, fullkey, path + FROM json_each('{"o":{"x":5},"a":[7,8]}') + ORDER BY key; +} { + {a|array|1|$.a|$} + {o|object|1|$.o|$} +} + +do_execsql_test json_each_objects_empty_container_yields_zero_rows { + SELECT COUNT(*) FROM json_each('{}'); +} {0} + +do_execsql_test json_each_objects_keys_require_quoting_in_json_path { + SELECT key, fullkey + FROM json_each('{"a space":1,"a.b":2,"\"q\"":3}') + ORDER BY key DESC; +} { +{a.b|$."a.b"} +{a space|$."a space"} +{"q"|$."\"q\""} +} + +do_execsql_test json_each_top_level_integer_single_row_key_null { + SELECT (key IS NULL), fullkey, path, atom, type + FROM json_each('42'); +} {1|$|$|42|integer} + +do_execsql_test json_each_top_level_true_single_row_key_null { + SELECT (key IS NULL), fullkey, path, atom, type + FROM json_each('true'); +} {1|$|$|1|true} + +do_execsql_test json_each_top_level_null_single_row_key_null { + SELECT (key IS NULL), fullkey, path, (atom IS NULL), type + FROM json_each('null'); +} {1|$|$|1|null} + +do_execsql_test json_each_atom_equals_value_for_primitives_containers_are_json_text { + WITH t AS ( + SELECT * FROM json_each('[1,"x",{"y":2},[3]]') + ) + SELECT + SUM(type IN ('object','array') AND json_valid(value)=1), + SUM(type NOT IN ('object','array') AND value=atom) + FROM t; +} {2|2} + +do_execsql_test json_each_typeof_key_array_indices_integer { + SELECT GROUP_CONCAT(ktype,'|') FROM ( + SELECT typeof(key) AS ktype FROM json_each('[0,1]') ORDER BY key + ); +} {integer|integer} + +do_execsql_test json_each_typeof_key_object_keys_text { + SELECT GROUP_CONCAT(ktype,'|') FROM ( + SELECT typeof(key) AS ktype FROM json_each('{"0":0,"1":1}') ORDER BY key + ); +} {text|text} + +do_execsql_test json_each_parent_column_always_null { + SELECT COUNT(*) FROM json_each('{"a":[1,2,3],"b":{}}') WHERE parent IS NOT NULL; +} {0} + +do_execsql_test_error json_each_malformed_json_raises_error { + SELECT * FROM json_each('{not json}'); +} {(.*malformed JSON.*)} + +do_execsql_test json_each_object_member_order_preserved { + SELECT key FROM json_each('{"z":0,"a":1,"m":2}'); +} {z a m} + +do_execsql_test json_each_json_extract_on_value { + SELECT key, json_extract(value, '$.x') + FROM json_each('{"k1":{"x":11},"k2":{"x":22},"k3":{"x":[3]}}') + WHERE type!='array' + ORDER BY key; +} { + {k1|11} + {k2|22} + {k3|[3]} +} + diff --git a/testing/select.test b/testing/select.test index 9ed482e4e..6efc3e061 100755 --- a/testing/select.test +++ b/testing/select.test @@ -59,6 +59,12 @@ do_execsql_test_error select-doubly-qualified-wrong-column { SELECT main.users.wrong FROM users LIMIT 0; } {.*} +do_execsql_test select-limit-expression { + select price from products limit 2 + 1 - 1; +} {79.0 +82.0} + + # ORDER BY id here because sqlite uses age_idx here and we (yet) don't so force it to evaluate in ID order do_execsql_test select-limit-true { SELECT id FROM users ORDER BY id LIMIT true; @@ -743,3 +749,21 @@ do_execsql_test_on_specific_db {:memory:} select-in-complex { SELECT * FROM test_table WHERE category IN ('A', 'B') AND value IN (10, 30, 40); } {1|A|10 3|A|30} + +foreach {testname limit ans} { + limit-const-1 1 {1} + limit-text-2 '2' {1 2} + limit-bool-true true {1} + limit-expr-add 1+2+3 {1 2 3 4 5 6} + limit-expr-sub 5-2-3 {} + limit-expr-paren (1+1)*2 {1 2 3 4} + limit-bool-add true+2 {1 2 3} + limit-text-math '2'*2+1 {1 2 3 4 5} + limit-bool-false-add false+4 {1 2 3 4} + limit-mixed-math (1+'1')*(1+1)-(5/5) {1 2 3} + limit-text-bool ('false'+2) {1 2} + limit-coalesce COALESCE(NULL,0+1) {1} +} { + do_execsql_test limit-complex-exprs-$testname \ + "SELECT id FROM users ORDER BY id LIMIT $limit" $ans +} \ No newline at end of file diff --git a/tests/integration/common.rs b/tests/integration/common.rs index 48ea009dd..8a571b8ce 100644 --- a/tests/integration/common.rs +++ b/tests/integration/common.rs @@ -64,9 +64,6 @@ impl TempDatabase { } pub fn new_with_rusqlite(table_sql: &str, enable_indexes: bool) -> Self { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::TRACE) - .finish(); let mut path = TempDir::new().unwrap().keep(); path.push("test.db"); { @@ -211,6 +208,45 @@ pub(crate) fn limbo_exec_rows( rows } +pub(crate) fn limbo_exec_rows_fallible( + _db: &TempDatabase, + conn: &Arc, + query: &str, +) -> Result>, turso_core::LimboError> { + let mut stmt = conn.prepare(query)?; + let mut rows = Vec::new(); + 'outer: loop { + let row = loop { + let result = stmt.step()?; + match result { + turso_core::StepResult::Row => { + let row = stmt.row().unwrap(); + break row; + } + turso_core::StepResult::IO => { + stmt.run_once()?; + continue; + } + + turso_core::StepResult::Done => break 'outer, + r => panic!("unexpected result {r:?}: expecting single row"), + } + }; + let row = row + .get_values() + .map(|x| match x { + turso_core::Value::Null => rusqlite::types::Value::Null, + turso_core::Value::Integer(x) => rusqlite::types::Value::Integer(*x), + turso_core::Value::Float(x) => rusqlite::types::Value::Real(*x), + turso_core::Value::Text(x) => rusqlite::types::Value::Text(x.as_str().to_string()), + turso_core::Value::Blob(x) => rusqlite::types::Value::Blob(x.to_vec()), + }) + .collect(); + rows.push(row); + } + Ok(rows) +} + pub(crate) fn limbo_exec_rows_error( _db: &TempDatabase, conn: &Arc, diff --git a/tests/integration/functions/mod.rs b/tests/integration/functions/mod.rs index 8ee95c32d..b31ed8f53 100644 --- a/tests/integration/functions/mod.rs +++ b/tests/integration/functions/mod.rs @@ -1,16 +1,3 @@ mod test_cdc; mod test_function_rowid; mod test_wal_api; - -#[cfg(test)] -mod tests { - use tracing_subscriber::EnvFilter; - - #[ctor::ctor] - fn init() { - tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .with_ansi(false) - .init(); - } -} diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index b49bfa962..283dab940 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -10,7 +10,10 @@ mod tests { use rusqlite::{params, types::Value}; use crate::{ - common::{limbo_exec_rows, rng_from_time, sqlite_exec_rows, TempDatabase}, + common::{ + limbo_exec_rows, limbo_exec_rows_fallible, rng_from_time, sqlite_exec_rows, + TempDatabase, + }, fuzz::grammar_generator::{const_str, rand_int, rand_str, GrammarGenerator}, }; @@ -504,6 +507,136 @@ mod tests { } } + #[test] + /// Create a table with a random number of columns and indexes, and then randomly update or delete rows from the table. + /// Verify that the results are the same for SQLite and Turso. + pub fn table_index_mutation_fuzz() { + let _ = env_logger::try_init(); + let (mut rng, seed) = rng_from_time(); + println!("index_scan_single_key_mutation_fuzz seed: {seed}"); + + const OUTER_ITERATIONS: usize = 30; + for i in 0..OUTER_ITERATIONS { + println!( + "table_index_mutation_fuzz iteration {}/{}", + i + 1, + OUTER_ITERATIONS + ); + let limbo_db = TempDatabase::new_empty(true); + let sqlite_db = TempDatabase::new_empty(true); + let num_cols = rng.random_range(1..=10); + let table_def = (0..num_cols) + .map(|i| format!("c{i} INTEGER")) + .collect::>(); + let table_def = table_def.join(", "); + let table_def = format!("CREATE TABLE t ({table_def})"); + + let num_indexes = rng.random_range(0..=num_cols); + let indexes = (0..num_indexes) + .map(|i| format!("CREATE INDEX idx_{i} ON t(c{i})")) + .collect::>(); + + // Create tables and indexes in both databases + let limbo_conn = limbo_db.connect_limbo(); + limbo_exec_rows(&limbo_db, &limbo_conn, &table_def); + for t in indexes.iter() { + limbo_exec_rows(&limbo_db, &limbo_conn, t); + } + + let sqlite_conn = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap(); + sqlite_conn.execute(&table_def, params![]).unwrap(); + for t in indexes.iter() { + sqlite_conn.execute(t, params![]).unwrap(); + } + + // Generate initial data + let num_inserts = rng.random_range(10..=1000); + let mut tuples = HashSet::new(); + while tuples.len() < num_inserts { + tuples.insert( + (0..num_cols) + .map(|_| rng.random_range(0..1000)) + .collect::>(), + ); + } + let mut insert_values = Vec::new(); + for tuple in tuples { + insert_values.push(format!( + "({})", + tuple + .iter() + .map(|x| x.to_string()) + .collect::>() + .join(", ") + )); + } + // Track executed statements in case we fail + let mut dml_statements = Vec::new(); + let insert = format!("INSERT INTO t VALUES {}", insert_values.join(", ")); + dml_statements.push(insert.clone()); + + // Insert initial data into both databases + sqlite_conn.execute(&insert, params![]).unwrap(); + limbo_exec_rows(&limbo_db, &limbo_conn, &insert); + + const COMPARISONS: [&str; 3] = ["=", "<", ">"]; + const INNER_ITERATIONS: usize = 100; + + for _ in 0..INNER_ITERATIONS { + let do_update = rng.random_range(0..2) == 0; + + let comparison = COMPARISONS[rng.random_range(0..COMPARISONS.len())]; + let affected_col = rng.random_range(0..num_cols); + let predicate_col = rng.random_range(0..num_cols); + let predicate_value = rng.random_range(0..1000); + + let query = if do_update { + let new_y = rng.random_range(0..1000); + format!("UPDATE t SET c{affected_col} = {new_y} WHERE c{predicate_col} {comparison} {predicate_value}") + } else { + format!("DELETE FROM t WHERE c{predicate_col} {comparison} {predicate_value}") + }; + + dml_statements.push(query.clone()); + + // Execute on both databases + sqlite_conn.execute(&query, params![]).unwrap(); + let limbo_res = limbo_exec_rows_fallible(&limbo_db, &limbo_conn, &query); + if let Err(e) = &limbo_res { + // print all the DDL and DML statements + println!("{table_def};"); + for t in indexes.iter() { + println!("{t};"); + } + for t in dml_statements.iter() { + println!("{t};"); + } + panic!("Error executing query: {e}"); + } + + // Verify results match exactly + let verify_query = format!( + "SELECT * FROM t ORDER BY {}", + (0..num_cols) + .map(|i| format!("c{i}")) + .collect::>() + .join(", ") + ); + let sqlite_rows = sqlite_exec_rows(&sqlite_conn, &verify_query); + let limbo_rows = limbo_exec_rows(&limbo_db, &limbo_conn, &verify_query); + + assert_eq!( + sqlite_rows, limbo_rows, + "Different results after mutation! limbo: {limbo_rows:?}, sqlite: {sqlite_rows:?}, seed: {seed}, query: {query}", + ); + + if sqlite_rows.is_empty() { + break; + } + } + } + } + #[test] pub fn compound_select_fuzz() { let _ = env_logger::try_init(); @@ -1381,9 +1514,6 @@ mod tests { } #[test] - #[ignore] - /// Ignored because of https://github.com/tursodatabase/turso/issues/2040, https://github.com/tursodatabase/turso/issues/2041 - /// TODO: add fuzzing for other aggregate functions pub fn min_max_agg_fuzz() { let _ = env_logger::try_init(); @@ -1438,6 +1568,107 @@ mod tests { } } } + + #[test] + pub fn affinity_fuzz() { + let _ = env_logger::try_init(); + + let (mut rng, seed) = rng_from_time(); + log::info!("affinity_fuzz seed: {seed}"); + + for iteration in 0..500 { + let db = TempDatabase::new_empty(false); + let limbo_conn = db.connect_limbo(); + let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap(); + + // Test different column affinities - cover all SQLite affinity types + let affinities = [ + "INTEGER", + "TEXT", + "REAL", + "NUMERIC", + "BLOB", + "INT", + "TINYINT", + "SMALLINT", + "MEDIUMINT", + "BIGINT", + "UNSIGNED BIG INT", + "INT2", + "INT8", + "CHARACTER(20)", + "VARCHAR(255)", + "VARYING CHARACTER(255)", + "NCHAR(55)", + "NATIVE CHARACTER(70)", + "NVARCHAR(100)", + "CLOB", + "DOUBLE", + "DOUBLE PRECISION", + "FLOAT", + "DECIMAL(10,5)", + "BOOLEAN", + "DATE", + "DATETIME", + ]; + let affinity = affinities[rng.random_range(0..affinities.len())]; + + let create_table = format!("CREATE TABLE t (x {affinity})"); + limbo_exec_rows(&db, &limbo_conn, &create_table); + sqlite_exec_rows(&sqlite_conn, &create_table); + + // Insert various values that test affinity conversion rules + let mut values = Vec::new(); + for _ in 0..20 { + let value = match rng.random_range(0..9) { + 0 => format!("'{}'", rng.random_range(-10000..10000)), // Pure integer as text + 1 => format!( + "'{}.{}'", + rng.random_range(-1000..1000), + rng.random_range(1..999) // Ensure non-zero decimal part + ), // Float as text with decimal + 2 => format!("'a{}'", rng.random_range(0..1000)), // Text with integer suffix + 3 => format!("' {} '", rng.random_range(-100..100)), // Integer with whitespace + 4 => format!("'-{}'", rng.random_range(1..1000)), // Negative integer as text + 5 => format!("{}", rng.random_range(-10000..10000)), // Direct integer + 6 => format!( + "{}.{}", + rng.random_range(-100..100), + rng.random_range(1..999) // Ensure non-zero decimal part + ), // Direct float + 7 => "'text_value'".to_string(), // Pure text that won't convert + 8 => "NULL".to_string(), // NULL value + _ => unreachable!(), + }; + values.push(format!("({value})")); + } + + let insert = format!("INSERT INTO t VALUES {}", values.join(",")); + limbo_exec_rows(&db, &limbo_conn, &insert); + sqlite_exec_rows(&sqlite_conn, &insert); + + // Query values and their types to verify affinity rules are applied correctly + let query = "SELECT x, typeof(x) FROM t"; + let limbo_result = limbo_exec_rows(&db, &limbo_conn, query); + let sqlite_result = sqlite_exec_rows(&sqlite_conn, query); + + assert_eq!( + limbo_result, sqlite_result, + "iteration: {iteration}, seed: {seed}, affinity: {affinity}, values: {values:?}" + ); + + // Also test with ORDER BY to ensure affinity affects sorting + let query_ordered = "SELECT x FROM t ORDER BY x"; + let limbo_ordered = limbo_exec_rows(&db, &limbo_conn, query_ordered); + let sqlite_ordered = sqlite_exec_rows(&sqlite_conn, query_ordered); + + assert_eq!( + limbo_ordered, sqlite_ordered, + "ORDER BY failed - iteration: {iteration}, seed: {seed}, affinity: {affinity}" + ); + } + } + #[test] // Simple fuzz test for SUM with floats pub fn sum_agg_fuzz_floats() { diff --git a/tests/integration/mod.rs b/tests/integration/mod.rs index 0f45007c6..42d782d5b 100644 --- a/tests/integration/mod.rs +++ b/tests/integration/mod.rs @@ -5,3 +5,16 @@ mod fuzz_transaction; mod pragma; mod query_processing; mod wal; + +#[cfg(test)] +mod tests { + use tracing_subscriber::EnvFilter; + + #[ctor::ctor] + fn init() { + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .with_ansi(false) + .init(); + } +}