From 96e4c5d241e42efd3134700b969fe059de637e27 Mon Sep 17 00:00:00 2001 From: TcMits Date: Tue, 16 Sep 2025 18:39:45 +0700 Subject: [PATCH 01/78] fix issue 3144 --- bindings/rust/src/lib.rs | 2 +- core/lib.rs | 132 +++++++++++++----------------- perf/throughput/turso/src/main.rs | 2 +- 3 files changed, 59 insertions(+), 77 deletions(-) diff --git a/bindings/rust/src/lib.rs b/bindings/rust/src/lib.rs index 39706fdd5..dddb8df01 100644 --- a/bindings/rust/src/lib.rs +++ b/bindings/rust/src/lib.rs @@ -408,7 +408,7 @@ impl Connection { /// 3. Step through query -> returns Busy -> sleep/yield for 2 ms /// 4. Step through query -> returns Busy -> sleep/yield for 2 ms (totaling 5 ms of sleep) /// 5. Step through query -> returns Busy -> return Busy to user - pub fn busy_timeout(&self, duration: Option) -> Result<()> { + pub fn busy_timeout(&self, duration: std::time::Duration) -> Result<()> { let conn = self .inner .lock() diff --git a/core/lib.rs b/core/lib.rs index 2a03ec045..7dd615b00 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -486,7 +486,7 @@ impl Database { encryption_cipher_mode: Cell::new(None), sync_mode: Cell::new(SyncMode::Full), data_sync_retry: Cell::new(false), - busy_timeout: Cell::new(None), + busy_timeout: Cell::new(Duration::new(0, 0)), }); self.n_connections .fetch_add(1, std::sync::atomic::Ordering::Relaxed); @@ -978,7 +978,8 @@ pub struct Connection { sync_mode: Cell, data_sync_retry: Cell, /// User defined max accumulated Busy timeout duration - busy_timeout: Cell>, + /// Default is 0 (no timeout) + busy_timeout: Cell, } impl Drop for Connection { @@ -2117,22 +2118,16 @@ impl Connection { /// 5. Step through query -> returns Busy -> return Busy to user /// /// This slight api change demonstrated a better throughtput in `perf/throughput/turso` benchmark - pub fn busy_timeout(&self, mut duration: Option) { - duration = duration.filter(|duration| !duration.is_zero()); + pub fn busy_timeout(&self, duration: std::time::Duration) { self.busy_timeout.set(duration); } } -#[derive(Debug, Default)] +#[derive(Debug)] struct BusyTimeout { /// Busy timeout instant - timeout: Option, - /// Max duration of timeout set by Connection - max_duration: Duration, - /// Accumulated duration for busy timeout - /// - /// It will be decremented until it reaches 0, then after that no timeout will be emitted - accum_duration: Duration, + timeout: Instant, + /// Next iteration index for DELAYS iteration: usize, } @@ -2152,30 +2147,48 @@ impl BusyTimeout { Duration::from_millis(100), ]; - pub fn new(duration: std::time::Duration) -> Self { + const TOTALS: [std::time::Duration; 12] = [ + Duration::from_millis(0), + Duration::from_millis(1), + Duration::from_millis(3), + Duration::from_millis(8), + Duration::from_millis(18), + Duration::from_millis(33), + Duration::from_millis(53), + Duration::from_millis(78), + Duration::from_millis(103), + Duration::from_millis(128), + Duration::from_millis(178), + Duration::from_millis(228), + ]; + + pub fn new(now: Instant) -> Self { Self { - timeout: None, - max_duration: duration, + timeout: now, iteration: 0, - accum_duration: duration, } } - pub fn initiate_timeout(&mut self, now: Instant) { - self.timeout = Self::DELAYS.get(self.iteration).and_then(|delay| { - if self.accum_duration.is_zero() { - None - } else { - let new_timeout = now + (*delay).min(self.accum_duration); - self.accum_duration = self.accum_duration.saturating_sub(*delay); - Some(new_timeout) + // implementation of sqliteDefaultBusyCallback + pub fn busy_callback(&mut self, now: Instant, max_duration: Duration) { + let idx = self.iteration.min(11); + let mut delay = Self::DELAYS[idx]; + let mut prior = Self::TOTALS[idx]; + + if self.iteration >= 12 { + prior += delay * (self.iteration as u32 - 11); + } + + if prior + delay > max_duration { + delay = max_duration.saturating_sub(prior); + // no more waiting after this + if delay.is_zero() { + return; } - }); - self.iteration = if self.iteration < Self::DELAYS.len() - 1 { - self.iteration + 1 - } else { - self.iteration - }; + } + + self.iteration += 1; + self.timeout = now + delay; } } @@ -2193,7 +2206,7 @@ pub struct Statement { /// Flag to show if the statement was busy busy: bool, /// Busy timeout instant - busy_timeout: Option, + busy_timeout: BusyTimeout, } impl Statement { @@ -2210,6 +2223,7 @@ impl Statement { QueryMode::ExplainQueryPlan => (EXPLAIN_QUERY_PLAN_COLUMNS.len(), 0), }; let state = vdbe::ProgramState::new(max_registers, cursor_count); + let now = pager.io.now(); Self { program, state, @@ -2218,7 +2232,7 @@ impl Statement { accesses_db, query_mode, busy: false, - busy_timeout: None, + busy_timeout: BusyTimeout::new(now), } } pub fn get_query_mode(&self) -> QueryMode { @@ -2238,16 +2252,9 @@ impl Statement { } pub fn step(&mut self) -> Result { - if let Some(busy_timeout) = self.busy_timeout.as_mut() { - if let Some(timeout) = busy_timeout.timeout { - let now = self.pager.io.now(); - - if now < timeout { - // Yield the query as the timeout has not been reached yet - return Ok(StepResult::IO); - } - // Timeout ended now continue to query execution - } + if self.pager.io.now() < self.busy_timeout.timeout { + // Yield the query as the timeout has not been reached yet + return Ok(StepResult::IO); } let mut res = if !self.accesses_db { @@ -2292,14 +2299,13 @@ impl Statement { } if matches!(res, Ok(StepResult::Busy)) { - self.check_if_busy_handler_set(); - if let Some(busy_timeout) = self.busy_timeout.as_mut() { - busy_timeout.initiate_timeout(self.pager.io.now()); - if busy_timeout.timeout.is_some() { - // Yield instead of busy, as now we will try to wait for the timeout - // before continuing execution - res = Ok(StepResult::IO); - } + let now = self.pager.io.now(); + self.busy_timeout + .busy_callback(now, self.program.connection.busy_timeout.get()); + if now < self.busy_timeout.timeout { + // Yield instead of busy, as now we will try to wait for the timeout + // before continuing execution + res = Ok(StepResult::IO); } } @@ -2473,7 +2479,7 @@ impl Statement { pub fn _reset(&mut self, max_registers: Option, max_cursors: Option) { self.state.reset(max_registers, max_cursors); self.busy = false; - self.check_if_busy_handler_set(); + self.busy_timeout = BusyTimeout::new(self.pager.io.now()); } pub fn row(&self) -> Option<&Row> { @@ -2487,30 +2493,6 @@ impl Statement { pub fn is_busy(&self) -> bool { self.busy } - - /// Checks if the busy handler is set in the connection and sets the handler if needed - fn check_if_busy_handler_set(&mut self) { - let conn_busy_timeout = self - .program - .connection - .busy_timeout - .get() - .map(BusyTimeout::new); - if self.busy_timeout.is_none() { - self.busy_timeout = conn_busy_timeout; - return; - } - if let Some(conn_busy_timeout) = conn_busy_timeout { - let busy_timeout = self - .busy_timeout - .as_mut() - .expect("busy timeout was checked for None above"); - // User changed max duration, so clear previous handler and set a new one - if busy_timeout.max_duration != conn_busy_timeout.max_duration { - *busy_timeout = conn_busy_timeout; - } - } - } } pub type Row = vdbe::Row; diff --git a/perf/throughput/turso/src/main.rs b/perf/throughput/turso/src/main.rs index 61bd35ed0..e881536b5 100644 --- a/perf/throughput/turso/src/main.rs +++ b/perf/throughput/turso/src/main.rs @@ -168,7 +168,7 @@ async fn worker_thread( for iteration in 0..iterations { let conn = db.connect()?; - conn.busy_timeout(Some(timeout))?; + conn.busy_timeout(timeout)?; let total_inserts = Arc::clone(&total_inserts); let tx_fut = async move { let mut stmt = conn From 597314f1cfb647347bc58c5f271c97ecbb6d582d Mon Sep 17 00:00:00 2001 From: TcMits Date: Tue, 16 Sep 2025 19:21:04 +0700 Subject: [PATCH 02/78] perf --- core/lib.rs | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 7dd615b00..201ab2a46 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -2206,7 +2206,7 @@ pub struct Statement { /// Flag to show if the statement was busy busy: bool, /// Busy timeout instant - busy_timeout: BusyTimeout, + busy_timeout: Option, } impl Statement { @@ -2223,7 +2223,6 @@ impl Statement { QueryMode::ExplainQueryPlan => (EXPLAIN_QUERY_PLAN_COLUMNS.len(), 0), }; let state = vdbe::ProgramState::new(max_registers, cursor_count); - let now = pager.io.now(); Self { program, state, @@ -2232,7 +2231,7 @@ impl Statement { accesses_db, query_mode, busy: false, - busy_timeout: BusyTimeout::new(now), + busy_timeout: None, } } pub fn get_query_mode(&self) -> QueryMode { @@ -2252,9 +2251,11 @@ impl Statement { } pub fn step(&mut self) -> Result { - if self.pager.io.now() < self.busy_timeout.timeout { - // Yield the query as the timeout has not been reached yet - return Ok(StepResult::IO); + if let Some(busy_timeout) = self.busy_timeout.as_ref() { + if self.pager.io.now() < busy_timeout.timeout { + // Yield the query as the timeout has not been reached yet + return Ok(StepResult::IO); + } } let mut res = if !self.accesses_db { @@ -2300,11 +2301,20 @@ impl Statement { if matches!(res, Ok(StepResult::Busy)) { let now = self.pager.io.now(); - self.busy_timeout - .busy_callback(now, self.program.connection.busy_timeout.get()); - if now < self.busy_timeout.timeout { - // Yield instead of busy, as now we will try to wait for the timeout - // before continuing execution + let max_duration = self.program.connection.busy_timeout.get(); + self.busy_timeout = match self.busy_timeout.take() { + None => { + let mut result = BusyTimeout::new(now); + result.busy_callback(now, max_duration); + Some(result) + } + Some(mut bt) => { + bt.busy_callback(now, max_duration); + Some(bt) + } + }; + + if now < self.busy_timeout.as_ref().unwrap().timeout { res = Ok(StepResult::IO); } } @@ -2479,7 +2489,7 @@ impl Statement { pub fn _reset(&mut self, max_registers: Option, max_cursors: Option) { self.state.reset(max_registers, max_cursors); self.busy = false; - self.busy_timeout = BusyTimeout::new(self.pager.io.now()); + self.busy_timeout = None; } pub fn row(&self) -> Option<&Row> { From 226dd5cbe0c78c2be1d1078df3320dc5b846a273 Mon Sep 17 00:00:00 2001 From: TcMits Date: Tue, 16 Sep 2025 20:00:04 +0700 Subject: [PATCH 03/78] add commet --- core/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/core/lib.rs b/core/lib.rs index 201ab2a46..c0fc4bb1e 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -2206,6 +2206,7 @@ pub struct Statement { /// Flag to show if the statement was busy busy: bool, /// Busy timeout instant + /// We need Option here because `io.now()` is not a cheap call busy_timeout: Option, } From 974feac27bbe57d1d89a90da9b77981000fb517d Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 17 Sep 2025 21:38:36 +0400 Subject: [PATCH 04/78] move compute to the main thread for browser and node - now, most of the work is happening on the main thread - for database in browser, we still have dedicated WebWorker - but it is used only for OPFS access and only for that - for syn in browser we still offload sync operations to the WebWorker --- .../packages/browser-common/index.ts | 188 +++++++++++--- .../packages/browser/index-bundle.ts | 2 +- .../packages/browser/index-default.ts | 2 +- .../packages/browser/index-turbopack-hack.ts | 2 +- .../packages/browser/index-vite-dev-hack.ts | 4 +- .../packages/browser/promise-bundle.ts | 14 +- .../packages/browser/promise-default.ts | 14 +- .../browser/promise-turbopack-hack.ts | 14 +- .../packages/browser/promise-vite-dev-hack.ts | 14 +- .../packages/browser/promise.test.ts | 56 +++-- bindings/javascript/packages/common/compat.ts | 11 +- .../javascript/packages/common/promise.ts | 15 +- .../javascript/packages/native/index.d.ts | 20 -- bindings/javascript/perf/package-lock.json | 4 +- bindings/javascript/perf/perf-turso.js | 18 +- bindings/javascript/src/browser.rs | 238 ++++++++++++------ bindings/javascript/src/lib.rs | 87 +++---- .../sync/packages/native/index.d.ts | 38 ++- .../javascript/sync/packages/native/index.js | 5 +- 19 files changed, 471 insertions(+), 275 deletions(-) diff --git a/bindings/javascript/packages/browser-common/index.ts b/bindings/javascript/packages/browser-common/index.ts index dc7c334f7..828f49306 100644 --- a/bindings/javascript/packages/browser-common/index.ts +++ b/bindings/javascript/packages/browser-common/index.ts @@ -24,46 +24,94 @@ interface BrowserImports { is_web_worker(): boolean; lookup_file(ptr: number, len: number): number; read(handle: number, ptr: number, len: number, offset: number): number; + read_async(handle: number, ptr: number, len: number, offset: number, c: number); write(handle: number, ptr: number, len: number, offset: number): number; + write_async(handle: number, ptr: number, len: number, offset: number, c: number); sync(handle: number): number; + sync_async(handle: number, c: number); truncate(handle: number, len: number): number; + truncate_async(handle: number, len: number, c: number); size(handle: number): number; } -function panic(name): never { +function panicMain(name): never { + throw new Error(`method ${name} must be invoked only from the worker thread`); +} + +function panicWorker(name): never { throw new Error(`method ${name} must be invoked only from the main thread`); } -const MainDummyImports: BrowserImports = { - is_web_worker: function (): boolean { - return false; - }, - lookup_file: function (ptr: number, len: number): number { - panic("lookup_file") - }, - read: function (handle: number, ptr: number, len: number, offset: number): number { - panic("read") - }, - write: function (handle: number, ptr: number, len: number, offset: number): number { - panic("write") - }, - sync: function (handle: number): number { - panic("sync") - }, - truncate: function (handle: number, len: number): number { - panic("truncate") - }, - size: function (handle: number): number { - panic("size") - } +let completeOpfs: any = null; + +function mainImports(worker: Worker): BrowserImports { + return { + is_web_worker(): boolean { + return false; + }, + write_async(handle, ptr, len, offset, c) { + writeFileAtWorker(worker, handle, ptr, len, offset) + .then(result => { + completeOpfs(c, result); + }, err => { + console.error('write_async', err); + completeOpfs(c, -1); + }); + }, + sync_async(handle, c) { + syncFileAtWorker(worker, handle) + .then(result => { + completeOpfs(c, result); + }, err => { + console.error('sync_async', err); + completeOpfs(c, -1); + }); + }, + read_async(handle, ptr, len, offset, c) { + readFileAtWorker(worker, handle, ptr, len, offset) + .then(result => { + completeOpfs(c, result); + }, err => { + console.error('read_async', err); + completeOpfs(c, -1); + }); + }, + truncate_async(handle, len, c) { + truncateFileAtWorker(worker, handle, len) + .then(result => { + completeOpfs(c, result); + }, err => { + console.error('truncate_async', err); + completeOpfs(c, -1); + }); + }, + lookup_file(ptr, len): number { + panicMain("lookup_file") + }, + read(handle, ptr, len, offset): number { + panicMain("read") + }, + write(handle, ptr, len, offset): number { + panicMain("write") + }, + sync(handle): number { + panicMain("sync") + }, + truncate(handle, len): number { + panicMain("truncate") + }, + size(handle): number { + panicMain("size") + } + }; }; function workerImports(opfs: OpfsDirectory, memory: WebAssembly.Memory): BrowserImports { return { - is_web_worker: function (): boolean { + is_web_worker(): boolean { return true; }, - lookup_file: function (ptr: number, len: number): number { + lookup_file(ptr, len): number { try { const handle = opfs.lookupFileHandle(getStringFromMemory(memory, ptr, len)); return handle == null ? -404 : handle; @@ -71,29 +119,28 @@ function workerImports(opfs: OpfsDirectory, memory: WebAssembly.Memory): Browser return -1; } }, - read: function (handle: number, ptr: number, len: number, offset: number): number { + read(handle, ptr, len, offset): number { try { return opfs.read(handle, getUint8ArrayFromMemory(memory, ptr, len), offset); } catch (e) { return -1; } }, - write: function (handle: number, ptr: number, len: number, offset: number): number { + write(handle, ptr, len, offset): number { try { return opfs.write(handle, getUint8ArrayFromMemory(memory, ptr, len), offset) } catch (e) { return -1; } }, - sync: function (handle: number): number { + sync(handle): number { try { - opfs.sync(handle); - return 0; + return opfs.sync(handle); } catch (e) { return -1; } }, - truncate: function (handle: number, len: number): number { + truncate(handle, len): number { try { opfs.truncate(handle, len); return 0; @@ -101,13 +148,25 @@ function workerImports(opfs: OpfsDirectory, memory: WebAssembly.Memory): Browser return -1; } }, - size: function (handle: number): number { + size(handle): number { try { return opfs.size(handle); } catch (e) { return -1; } - } + }, + read_async(handle, ptr, len, offset, completion) { + panicWorker("read_async") + }, + write_async(handle, ptr, len, offset, completion) { + panicWorker("write_async") + }, + sync_async(handle, completion) { + panicWorker("sync_async") + }, + truncate_async(handle, len, c) { + panicWorker("truncate_async") + }, } } @@ -175,10 +234,11 @@ class OpfsDirectory { throw e; } } - sync(handle: number) { + sync(handle: number): number { try { const file = this.fileByHandle.get(handle); file.flush(); + return 0; } catch (e) { console.error('sync', handle, e); throw e; @@ -187,8 +247,8 @@ class OpfsDirectory { truncate(handle: number, size: number) { try { const file = this.fileByHandle.get(handle); - const result = file.truncate(size); - return result; + file.truncate(size); + return 0; } catch (e) { console.error('truncate', handle, size, e); throw e; @@ -214,7 +274,7 @@ function waitForWorkerResponse(worker: Worker, id: number): Promise { if (msg.data.error != null) { waitReject(msg.data.error) } else { - waitResolve() + waitResolve(msg.data.result) } cleanup(); } @@ -229,6 +289,38 @@ function waitForWorkerResponse(worker: Worker, id: number): Promise { return result; } +function readFileAtWorker(worker: Worker, handle: number, ptr: number, len: number, offset: number) { + workerRequestId += 1; + const currentId = workerRequestId; + const promise = waitForWorkerResponse(worker, currentId); + worker.postMessage({ __turso__: "read_async", handle: handle, ptr: ptr, len: len, offset: offset, id: currentId }); + return promise; +} + +function writeFileAtWorker(worker: Worker, handle: number, ptr: number, len: number, offset: number) { + workerRequestId += 1; + const currentId = workerRequestId; + const promise = waitForWorkerResponse(worker, currentId); + worker.postMessage({ __turso__: "write_async", handle: handle, ptr: ptr, len: len, offset: offset, id: currentId }); + return promise; +} + +function syncFileAtWorker(worker: Worker, handle: number) { + workerRequestId += 1; + const currentId = workerRequestId; + const promise = waitForWorkerResponse(worker, currentId); + worker.postMessage({ __turso__: "sync_async", handle: handle, id: currentId }); + return promise; +} + +function truncateFileAtWorker(worker: Worker, handle: number, len: number) { + workerRequestId += 1; + const currentId = workerRequestId; + const promise = waitForWorkerResponse(worker, currentId); + worker.postMessage({ __turso__: "truncate_async", handle: handle, len: len, id: currentId }); + return promise; +} + function registerFileAtWorker(worker: Worker, path: string): Promise { workerRequestId += 1; const currentId = workerRequestId; @@ -299,12 +391,25 @@ function setupWebWorker() { self.postMessage({ id: e.data.id, error: error }); } return; + } else if (e.data.__turso__ == 'read_async') { + let result = opfs.read(e.data.handle, getUint8ArrayFromMemory(memory, e.data.ptr, e.data.len), e.data.offset); + self.postMessage({ id: e.data.id, result: result }); + } else if (e.data.__turso__ == 'write_async') { + let result = opfs.write(e.data.handle, getUint8ArrayFromMemory(memory, e.data.ptr, e.data.len), e.data.offset); + self.postMessage({ id: e.data.id, result: result }); + } else if (e.data.__turso__ == 'sync_async') { + let result = opfs.sync(e.data.handle); + self.postMessage({ id: e.data.id, result: result }); + } else if (e.data.__turso__ == 'truncate_async') { + let result = opfs.truncate(e.data.handle, e.data.len); + self.postMessage({ id: e.data.id, result: result }); } handler.handle(e) } } async function setupMainThread(wasmFile: ArrayBuffer, factory: () => Worker): Promise { + const worker = factory(); const __emnapiContext = __emnapiGetDefaultContext() const __wasi = new __WASI({ version: 'preview1', @@ -322,13 +427,13 @@ async function setupMainThread(wasmFile: ArrayBuffer, factory: () => Worker): Pr context: __emnapiContext, asyncWorkPoolSize: 1, wasi: __wasi, - onCreateWorker() { return factory() }, + onCreateWorker() { return worker; }, overwriteImports(importObject) { importObject.env = { ...importObject.env, ...importObject.napi, ...importObject.emnapi, - ...MainDummyImports, + ...mainImports(worker), memory: __sharedMemory, } return importObject @@ -340,8 +445,9 @@ async function setupMainThread(wasmFile: ArrayBuffer, factory: () => Worker): Pr } } }, - }) + }); + completeOpfs = __napiModule.exports.completeOpfs; return __napiModule; } -export { OpfsDirectory, workerImports, MainDummyImports, waitForWorkerResponse, registerFileAtWorker, unregisterFileAtWorker, isWebWorker, setupWebWorker, setupMainThread } \ No newline at end of file +export { OpfsDirectory, workerImports, mainImports as MainDummyImports, waitForWorkerResponse, registerFileAtWorker, unregisterFileAtWorker, isWebWorker, setupWebWorker, setupMainThread } \ No newline at end of file diff --git a/bindings/javascript/packages/browser/index-bundle.ts b/bindings/javascript/packages/browser/index-bundle.ts index 2b74b8114..9aff00c28 100644 --- a/bindings/javascript/packages/browser/index-bundle.ts +++ b/bindings/javascript/packages/browser/index-bundle.ts @@ -20,5 +20,5 @@ export const Database = napiModule.exports.Database export const Opfs = napiModule.exports.Opfs export const OpfsFile = napiModule.exports.OpfsFile export const Statement = napiModule.exports.Statement -export const connect = napiModule.exports.connect +export const connectDbAsync = napiModule.exports.connectDbAsync export const initThreadPool = napiModule.exports.initThreadPool diff --git a/bindings/javascript/packages/browser/index-default.ts b/bindings/javascript/packages/browser/index-default.ts index 53c70e413..844e2c91b 100644 --- a/bindings/javascript/packages/browser/index-default.ts +++ b/bindings/javascript/packages/browser/index-default.ts @@ -18,5 +18,5 @@ export const Database = napiModule.exports.Database export const Opfs = napiModule.exports.Opfs export const OpfsFile = napiModule.exports.OpfsFile export const Statement = napiModule.exports.Statement -export const connect = napiModule.exports.connect +export const connectDbAsync = napiModule.exports.connectDbAsync export const initThreadPool = napiModule.exports.initThreadPool diff --git a/bindings/javascript/packages/browser/index-turbopack-hack.ts b/bindings/javascript/packages/browser/index-turbopack-hack.ts index f43d41624..8dc807f4a 100644 --- a/bindings/javascript/packages/browser/index-turbopack-hack.ts +++ b/bindings/javascript/packages/browser/index-turbopack-hack.ts @@ -21,5 +21,5 @@ export const Database = napiModule.exports.Database export const Opfs = napiModule.exports.Opfs export const OpfsFile = napiModule.exports.OpfsFile export const Statement = napiModule.exports.Statement -export const connect = napiModule.exports.connect +export const connectDbAsync = napiModule.exports.connectDbAsync export const initThreadPool = napiModule.exports.initThreadPool diff --git a/bindings/javascript/packages/browser/index-vite-dev-hack.ts b/bindings/javascript/packages/browser/index-vite-dev-hack.ts index 6f1d42c4a..3c36191de 100644 --- a/bindings/javascript/packages/browser/index-vite-dev-hack.ts +++ b/bindings/javascript/packages/browser/index-vite-dev-hack.ts @@ -7,7 +7,7 @@ let napiModule = { Opfs: {} as any, OpfsFile: {} as any, Statement: {} as any, - connect: {} as any, + connectDbAsync: {} as any, initThreadPool: {} as any, } }; @@ -37,5 +37,5 @@ export const Database = napiModule.exports.Database export const Opfs = napiModule.exports.Opfs export const OpfsFile = napiModule.exports.OpfsFile export const Statement = napiModule.exports.Statement -export const connect = napiModule.exports.connect +export const connectDbAsync = napiModule.exports.connectDbAsync export const initThreadPool = napiModule.exports.initThreadPool diff --git a/bindings/javascript/packages/browser/promise-bundle.ts b/bindings/javascript/packages/browser/promise-bundle.ts index fc28be689..103739e67 100644 --- a/bindings/javascript/packages/browser/promise-bundle.ts +++ b/bindings/javascript/packages/browser/promise-bundle.ts @@ -1,6 +1,6 @@ import { DatabaseOpts, SqliteError, } from "@tursodatabase/database-common" -import { connect as promiseConnect, Database } from "./promise.js"; -import { connect as nativeConnect, initThreadPool, MainWorker } from "./index-bundle.js"; +import { Database, connect as promiseConnect } from "./promise.js"; +import { initThreadPool, MainWorker, connectDbAsync } from "./index-bundle.js"; /** * Creates a new database connection asynchronously. @@ -10,13 +10,19 @@ import { connect as nativeConnect, initThreadPool, MainWorker } from "./index-bu * @returns {Promise} - A promise that resolves to a Database instance. */ async function connect(path: string, opts: DatabaseOpts = {}): Promise { - return await promiseConnect(path, opts, nativeConnect, async () => { + const init = async () => { await initThreadPool(); if (MainWorker == null) { throw new Error("panic: MainWorker is not initialized"); } return MainWorker; - }); + }; + return await promiseConnect( + path, + opts, + connectDbAsync, + init + ); } export { connect, Database, SqliteError } diff --git a/bindings/javascript/packages/browser/promise-default.ts b/bindings/javascript/packages/browser/promise-default.ts index a4dc99dfb..454ded33d 100644 --- a/bindings/javascript/packages/browser/promise-default.ts +++ b/bindings/javascript/packages/browser/promise-default.ts @@ -1,6 +1,6 @@ import { DatabaseOpts, SqliteError, } from "@tursodatabase/database-common" -import { connect as promiseConnect, Database } from "./promise.js"; -import { connect as nativeConnect, initThreadPool, MainWorker } from "./index-default.js"; +import { Database, connect as promiseConnect } from "./promise.js"; +import { initThreadPool, MainWorker, connectDbAsync } from "./index-default.js"; /** * Creates a new database connection asynchronously. @@ -10,13 +10,19 @@ import { connect as nativeConnect, initThreadPool, MainWorker } from "./index-de * @returns {Promise} - A promise that resolves to a Database instance. */ async function connect(path: string, opts: DatabaseOpts = {}): Promise { - return await promiseConnect(path, opts, nativeConnect, async () => { + const init = async () => { await initThreadPool(); if (MainWorker == null) { throw new Error("panic: MainWorker is not initialized"); } return MainWorker; - }); + }; + return await promiseConnect( + path, + opts, + connectDbAsync, + init + ); } export { connect, Database, SqliteError } diff --git a/bindings/javascript/packages/browser/promise-turbopack-hack.ts b/bindings/javascript/packages/browser/promise-turbopack-hack.ts index 359e79e40..b6b4bf09b 100644 --- a/bindings/javascript/packages/browser/promise-turbopack-hack.ts +++ b/bindings/javascript/packages/browser/promise-turbopack-hack.ts @@ -1,6 +1,6 @@ import { DatabaseOpts, SqliteError, } from "@tursodatabase/database-common" -import { connect as promiseConnect, Database } from "./promise.js"; -import { connect as nativeConnect, initThreadPool, MainWorker } from "./index-turbopack-hack.js"; +import { Database, connect as promiseConnect } from "./promise.js"; +import { initThreadPool, MainWorker, connectDbAsync } from "./index-turbopack-hack.js"; /** * Creates a new database connection asynchronously. @@ -10,13 +10,19 @@ import { connect as nativeConnect, initThreadPool, MainWorker } from "./index-tu * @returns {Promise} - A promise that resolves to a Database instance. */ async function connect(path: string, opts: DatabaseOpts = {}): Promise { - return await promiseConnect(path, opts, nativeConnect, async () => { + const init = async () => { await initThreadPool(); if (MainWorker == null) { throw new Error("panic: MainWorker is not initialized"); } return MainWorker; - }); + }; + return await promiseConnect( + path, + opts, + connectDbAsync, + init + ); } export { connect, Database, SqliteError } diff --git a/bindings/javascript/packages/browser/promise-vite-dev-hack.ts b/bindings/javascript/packages/browser/promise-vite-dev-hack.ts index 9e3e59e14..5b3c4acda 100644 --- a/bindings/javascript/packages/browser/promise-vite-dev-hack.ts +++ b/bindings/javascript/packages/browser/promise-vite-dev-hack.ts @@ -1,6 +1,6 @@ import { DatabaseOpts, SqliteError, } from "@tursodatabase/database-common" -import { connect as promiseConnect, Database } from "./promise.js"; -import { connect as nativeConnect, initThreadPool, MainWorker } from "./index-vite-dev-hack.js"; +import { Database, connect as promiseConnect } from "./promise.js"; +import { initThreadPool, MainWorker, connectDbAsync } from "./index-vite-dev-hack.js"; /** * Creates a new database connection asynchronously. @@ -10,13 +10,19 @@ import { connect as nativeConnect, initThreadPool, MainWorker } from "./index-vi * @returns {Promise} - A promise that resolves to a Database instance. */ async function connect(path: string, opts: DatabaseOpts = {}): Promise { - return await promiseConnect(path, opts, nativeConnect, async () => { + const init = async () => { await initThreadPool(); if (MainWorker == null) { throw new Error("panic: MainWorker is not initialized"); } return MainWorker; - }); + }; + return await promiseConnect( + path, + opts, + connectDbAsync, + init + ); } export { connect, Database, SqliteError } diff --git a/bindings/javascript/packages/browser/promise.test.ts b/bindings/javascript/packages/browser/promise.test.ts index 7e76ec029..741e77276 100644 --- a/bindings/javascript/packages/browser/promise.test.ts +++ b/bindings/javascript/packages/browser/promise.test.ts @@ -1,4 +1,4 @@ -import { expect, test, afterEach } from 'vitest' +import { expect, test } from 'vitest' import { connect } from './promise-default.js' test('in-memory db', async () => { @@ -10,6 +10,28 @@ test('in-memory db', async () => { expect(rows).toEqual([{ x: 1 }, { x: 3 }]); }) +test('on-disk db large inserts', async () => { + const path = `test-${(Math.random() * 10000) | 0}.db`; + const db1 = await connect(path); + await db1.prepare("CREATE TABLE t(x)").run(); + await db1.prepare("INSERT INTO t VALUES (randomblob(10 * 4096 + 0))").run(); + await db1.prepare("INSERT INTO t VALUES (randomblob(10 * 4096 + 1))").run(); + await db1.prepare("INSERT INTO t VALUES (randomblob(10 * 4096 + 2))").run(); + const stmt1 = db1.prepare("SELECT length(x) as l FROM t"); + expect(stmt1.columns()).toEqual([{ name: "l", column: null, database: null, table: null, type: null }]); + const rows1 = await stmt1.all(); + expect(rows1).toEqual([{ l: 10 * 4096 }, { l: 10 * 4096 + 1 }, { l: 10 * 4096 + 2 }]); + + await db1.exec("BEGIN"); + await db1.exec("INSERT INTO t VALUES (1)"); + await db1.exec("ROLLBACK"); + + const rows2 = await db1.prepare("SELECT length(x) as l FROM t").all(); + expect(rows2).toEqual([{ l: 10 * 4096 }, { l: 10 * 4096 + 1 }, { l: 10 * 4096 + 2 }]); + + await db1.prepare("PRAGMA wal_checkpoint(TRUNCATE)").run(); +}) + test('on-disk db', async () => { const path = `test-${(Math.random() * 10000) | 0}.db`; const db1 = await connect(path); @@ -19,8 +41,8 @@ test('on-disk db', async () => { expect(stmt1.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); const rows1 = await stmt1.all([1]); expect(rows1).toEqual([{ x: 1 }, { x: 3 }]); - await db1.close(); stmt1.close(); + await db1.close(); const db2 = await connect(path); const stmt2 = db2.prepare("SELECT * FROM t WHERE x % 2 = ?"); @@ -30,23 +52,23 @@ test('on-disk db', async () => { db2.close(); }) -test('attach', async () => { - const path1 = `test-${(Math.random() * 10000) | 0}.db`; - const path2 = `test-${(Math.random() * 10000) | 0}.db`; - const db1 = await connect(path1); - await db1.exec("CREATE TABLE t(x)"); - await db1.exec("INSERT INTO t VALUES (1), (2), (3)"); - const db2 = await connect(path2); - await db2.exec("CREATE TABLE q(x)"); - await db2.exec("INSERT INTO q VALUES (4), (5), (6)"); +// test('attach', async () => { +// const path1 = `test-${(Math.random() * 10000) | 0}.db`; +// const path2 = `test-${(Math.random() * 10000) | 0}.db`; +// const db1 = await connect(path1); +// await db1.exec("CREATE TABLE t(x)"); +// await db1.exec("INSERT INTO t VALUES (1), (2), (3)"); +// const db2 = await connect(path2); +// await db2.exec("CREATE TABLE q(x)"); +// await db2.exec("INSERT INTO q VALUES (4), (5), (6)"); - await db1.exec(`ATTACH '${path2}' as secondary`); +// await db1.exec(`ATTACH '${path2}' as secondary`); - const stmt = db1.prepare("SELECT * FROM t UNION ALL SELECT * FROM secondary.q"); - expect(stmt.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); - const rows = await stmt.all([1]); - expect(rows).toEqual([{ x: 1 }, { x: 2 }, { x: 3 }, { x: 4 }, { x: 5 }, { x: 6 }]); -}) +// const stmt = db1.prepare("SELECT * FROM t UNION ALL SELECT * FROM secondary.q"); +// expect(stmt.columns()).toEqual([{ name: "x", column: null, database: null, table: null, type: null }]); +// const rows = await stmt.all([1]); +// expect(rows).toEqual([{ x: 1 }, { x: 2 }, { x: 3 }, { x: 4 }, { x: 5 }, { x: 6 }]); +// }) test('blobs', async () => { const db = await connect(":memory:"); diff --git a/bindings/javascript/packages/common/compat.ts b/bindings/javascript/packages/common/compat.ts index d7bd493bb..be8c46d56 100644 --- a/bindings/javascript/packages/common/compat.ts +++ b/bindings/javascript/packages/common/compat.ts @@ -192,7 +192,12 @@ class Database { } try { - this.db.batchSync(sql); + let stmt = this.prepare(sql); + try { + stmt.run(); + } finally { + stmt.close(); + } } catch (err) { throw convertError(err); } @@ -408,6 +413,10 @@ class Statement { throw convertError(err); } } + + close() { + this.stmt.finalize(); + } } export { Database, Statement } \ No newline at end of file diff --git a/bindings/javascript/packages/common/promise.ts b/bindings/javascript/packages/common/promise.ts index e81795833..f1a22260c 100644 --- a/bindings/javascript/packages/common/promise.ts +++ b/bindings/javascript/packages/common/promise.ts @@ -196,7 +196,12 @@ class Database { } try { - await this.db.batchAsync(sql); + const stmt = this.prepare(sql); + try { + await stmt.run(); + } finally { + stmt.close(); + } } catch (err) { throw convertError(err); } @@ -298,7 +303,7 @@ class Statement { bindParams(this.stmt, bindParameters); while (true) { - const stepResult = await this.stmt.stepAsync(); + const stepResult = this.stmt.stepSync(); if (stepResult === STEP_IO) { await this.db.db.ioLoopAsync(); continue; @@ -328,7 +333,7 @@ class Statement { bindParams(this.stmt, bindParameters); while (true) { - const stepResult = await this.stmt.stepAsync(); + const stepResult = this.stmt.stepSync(); if (stepResult === STEP_IO) { await this.db.db.ioLoopAsync(); continue; @@ -352,7 +357,7 @@ class Statement { bindParams(this.stmt, bindParameters); while (true) { - const stepResult = await this.stmt.stepAsync(); + const stepResult = this.stmt.stepSync(); if (stepResult === STEP_IO) { await this.db.db.ioLoopAsync(); continue; @@ -377,7 +382,7 @@ class Statement { const rows: any[] = []; while (true) { - const stepResult = await this.stmt.stepAsync(); + const stepResult = this.stmt.stepSync(); if (stepResult === STEP_IO) { await this.db.db.ioLoopAsync(); continue; diff --git a/bindings/javascript/packages/native/index.d.ts b/bindings/javascript/packages/native/index.d.ts index 8654b88d2..cfd72609e 100644 --- a/bindings/javascript/packages/native/index.d.ts +++ b/bindings/javascript/packages/native/index.d.ts @@ -15,26 +15,6 @@ export declare class Database { get path(): string /** Returns whether the database connection is open. */ get open(): boolean - /** - * Executes a batch of SQL statements on main thread - * - * # Arguments - * - * * `sql` - The SQL statements to execute. - * - * # Returns - */ - batchSync(sql: string): void - /** - * Executes a batch of SQL statements outside of main thread - * - * # Arguments - * - * * `sql` - The SQL statements to execute. - * - * # Returns - */ - batchAsync(sql: string): Promise /** * Prepares a statement for execution. * diff --git a/bindings/javascript/perf/package-lock.json b/bindings/javascript/perf/package-lock.json index bf737b714..7ec8c6bdf 100644 --- a/bindings/javascript/perf/package-lock.json +++ b/bindings/javascript/perf/package-lock.json @@ -20,10 +20,10 @@ }, "../packages/native": { "name": "@tursodatabase/database", - "version": "0.1.5-pre.3", + "version": "0.2.0-pre.3", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.1.5-pre.3" + "@tursodatabase/database-common": "^0.2.0-pre.3" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", diff --git a/bindings/javascript/perf/perf-turso.js b/bindings/javascript/perf/perf-turso.js index 092730265..91c2b7d2a 100644 --- a/bindings/javascript/perf/perf-turso.js +++ b/bindings/javascript/perf/perf-turso.js @@ -1,26 +1,26 @@ import { run, bench, group, baseline } from 'mitata'; -import { Database } from '@tursodatabase/database/compat'; +import { Database } from '@tursodatabase/database'; const db = new Database(':memory:'); -db.exec("CREATE TABLE users (id INTEGER, name TEXT, email TEXT)"); -db.exec("INSERT INTO users (id, name, email) VALUES (1, 'Alice', 'alice@example.org')"); +await db.exec("CREATE TABLE users (id INTEGER, name TEXT, email TEXT)"); +await db.exec("INSERT INTO users (id, name, email) VALUES (1, 'Alice', 'alice@example.org')"); const stmtSelect = db.prepare("SELECT * FROM users WHERE id = ?"); const rawStmtSelect = db.prepare("SELECT * FROM users WHERE id = ?").raw(); const stmtInsert = db.prepare("INSERT INTO users (id, name, email) VALUES (?, ?, ?)"); -bench('Statement.get() with bind parameters [expanded]', () => { - stmtSelect.get(1); +bench('Statement.get() with bind parameters [expanded]', async () => { + await stmtSelect.get(1); }); -bench('Statement.get() with bind parameters [raw]', () => { - rawStmtSelect.get(1); +bench('Statement.get() with bind parameters [raw]', async () => { + await rawStmtSelect.get(1); }); -bench('Statement.run() with bind parameters', () => { - stmtInsert.run([1, 'foobar', 'foobar@example.com']); +bench('Statement.run() with bind parameters', async () => { + await stmtInsert.run([1, 'foobar', 'foobar@example.com']); }); await run({ diff --git a/bindings/javascript/src/browser.rs b/bindings/javascript/src/browser.rs index 92c818b4c..c59d86e7f 100644 --- a/bindings/javascript/src/browser.rs +++ b/bindings/javascript/src/browser.rs @@ -1,10 +1,10 @@ -use std::sync::Arc; +use std::{cell::RefCell, collections::HashMap, sync::Arc}; use napi::bindgen_prelude::*; use napi_derive::napi; -use turso_core::{storage::database::DatabaseFile, Clock, File, Instant, IO}; +use turso_core::{Clock, Completion, File, Instant, MemoryIO, IO}; -use crate::{init_tracing, is_memory, Database, DatabaseOpts}; +use crate::{is_memory, Database, DatabaseOpts}; pub struct NoopTask; @@ -29,11 +29,11 @@ pub fn init_thread_pool() -> napi::Result> { pub struct ConnectTask { path: String, io: Arc, + opts: Option, } pub struct ConnectResult { - db: Arc, - conn: Arc, + db: Database, } unsafe impl Send for ConnectResult {} @@ -43,73 +43,98 @@ impl Task for ConnectTask { type JsValue = Database; fn compute(&mut self) -> Result { - let file = self - .io - .open_file(&self.path, turso_core::OpenFlags::Create, false) - .map_err(|e| Error::new(Status::GenericFailure, format!("Failed to open file: {e}")))?; - - let db_file = Arc::new(DatabaseFile::new(file)); - let db = turso_core::Database::open(self.io.clone(), &self.path, db_file, false, true) - .map_err(|e| { - Error::new( - Status::GenericFailure, - format!("Failed to open database: {e}"), - ) - })?; - - let conn = db - .connect() - .map_err(|e| Error::new(Status::GenericFailure, format!("Failed to connect: {e}")))?; - - Ok(ConnectResult { db, conn }) + let db = Database::new_io(self.path.clone(), self.io.clone(), self.opts.clone())?; + Ok(ConnectResult { db }) } fn resolve(&mut self, _: Env, result: Self::Output) -> Result { - Ok(Database::create( - Some(result.db), - self.io.clone(), - result.conn, - self.path.clone(), - )) + Ok(result.db) } } -#[napi] -// we offload connect to the web-worker because: -// 1. browser main-thread do not support Atomic.wait operations -// 2. turso-db use blocking IO [io.wait_for_completion(c)] in few places during initialization path -// -// so, we offload connect to the worker thread -pub fn connect(path: String, opts: Option) -> Result> { - if let Some(opts) = opts { - init_tracing(opts.tracing); - } - let task = if is_memory(&path) { - ConnectTask { - io: Arc::new(turso_core::MemoryIO::new()), - path, - } - } else { - let io = Arc::new(Opfs::new()?); - ConnectTask { io, path } - }; - Ok(AsyncTask::new(task)) -} #[napi] #[derive(Clone)] -pub struct Opfs; +pub struct Opfs { + inner: Arc, +} + +pub struct OpfsInner { + completion_no: RefCell, + completions: RefCell>, +} + +thread_local! { + static OPFS: Arc = Arc::new(Opfs::new()); +} #[napi] #[derive(Clone)] struct OpfsFile { handle: i32, + opfs: Opfs, +} + +// unsafe impl Send for OpfsFile {} +// unsafe impl Sync for OpfsFile {} + +unsafe impl Send for Opfs {} +unsafe impl Sync for Opfs {} + +#[napi] +// we offload connect to the web-worker because +// turso-db use blocking IO [io.wait_for_completion(c)] in few places during initialization path +pub fn connect_db_async( + path: String, + opts: Option, +) -> Result> { + let io: Arc = if is_memory(&path) { + Arc::new(MemoryIO::new()) + } else { + // we must create OPFS IO on the main thread + opfs() + }; + let task = ConnectTask { path, io, opts }; + Ok(AsyncTask::new(task)) } #[napi] +pub fn complete_opfs(completion_no: u32, result: i32) { + OPFS.with(|opfs| opfs.complete(completion_no, result)) +} + +pub fn opfs() -> Arc { + OPFS.with(|opfs| opfs.clone()) +} + impl Opfs { - #[napi(constructor)] - pub fn new() -> napi::Result { - Ok(Self) + pub fn new() -> Self { + Self { + inner: Arc::new(OpfsInner { + completion_no: RefCell::new(0), + completions: RefCell::new(HashMap::new()), + }), + } + } + + pub fn complete(&self, completion_no: u32, result: i32) { + let completion = { + let mut completions = self.inner.completions.borrow_mut(); + completions.remove(&completion_no).unwrap() + }; + completion.complete(result); + } + + fn register_completion(&self, c: Completion) -> u32 { + let inner = &self.inner; + *inner.completion_no.borrow_mut() += 1; + let completion_no = *inner.completion_no.borrow(); + tracing::debug!( + "register completion: {} {:?}", + completion_no, + Arc::as_ptr(inner) + ); + inner.completions.borrow_mut().insert(completion_no, c); + completion_no } } @@ -127,6 +152,13 @@ extern "C" { fn sync(handle: i32) -> i32; fn truncate(handle: i32, length: usize) -> i32; fn size(handle: i32) -> i32; + + fn write_async(handle: i32, buffer: *const u8, buffer_len: usize, offset: i32, c: u32); + fn sync_async(handle: i32, c: u32); + fn read_async(handle: i32, buffer: *mut u8, buffer_len: usize, offset: i32, c: u32); + fn truncate_async(handle: i32, length: usize, c: u32); + // fn size_async(handle: i32) -> i32; + fn is_web_worker() -> bool; } @@ -144,7 +176,12 @@ impl IO for Opfs { tracing::info!("open_file: {}", path); let result = unsafe { lookup_file(path.as_ptr(), path.len()) }; if result >= 0 { - Ok(Arc::new(OpfsFile { handle: result })) + Ok(Arc::new(OpfsFile { + handle: result, + opfs: Opfs { + inner: self.inner.clone(), + }, + })) } else if result == -404 { Err(turso_core::LimboError::InternalError(format!( "unexpected path {path}: files must be created in advance for OPFS IO" @@ -175,17 +212,32 @@ impl File for OpfsFile { pos: u64, c: turso_core::Completion, ) -> turso_core::Result { - assert!( - is_web_worker_safe(), - "opfs must be used only from web worker for now" + let web_worker = is_web_worker_safe(); + tracing::debug!( + "pread({}, is_web_worker={}): pos={}", + self.handle, + web_worker, + pos ); - tracing::debug!("pread({}): pos={}", self.handle, pos); let handle = self.handle; let read_c = c.as_read(); let buffer = read_c.buf_arc(); let buffer = buffer.as_mut_slice(); - let result = unsafe { read(handle, buffer.as_mut_ptr(), buffer.len(), pos as i32) }; - c.complete(result as i32); + if web_worker { + let result = unsafe { read(handle, buffer.as_mut_ptr(), buffer.len(), pos as i32) }; + c.complete(result as i32); + } else { + let completion_no = self.opfs.register_completion(c.clone()); + unsafe { + read_async( + handle, + buffer.as_mut_ptr(), + buffer.len(), + pos as i32, + completion_no, + ) + }; + } Ok(c) } @@ -195,27 +247,44 @@ impl File for OpfsFile { buffer: Arc, c: turso_core::Completion, ) -> turso_core::Result { - assert!( - is_web_worker_safe(), - "opfs must be used only from web worker for now" + let web_worker = is_web_worker_safe(); + tracing::debug!( + "pwrite({}, is_web_worker={}): pos={}", + self.handle, + web_worker, + pos ); - tracing::debug!("pwrite({}): pos={}", self.handle, pos); let handle = self.handle; let buffer = buffer.as_slice(); - let result = unsafe { write(handle, buffer.as_ptr(), buffer.len(), pos as i32) }; - c.complete(result as i32); + if web_worker { + let result = unsafe { write(handle, buffer.as_ptr(), buffer.len(), pos as i32) }; + c.complete(result as i32); + } else { + let completion_no = self.opfs.register_completion(c.clone()); + unsafe { + write_async( + handle, + buffer.as_ptr(), + buffer.len(), + pos as i32, + completion_no, + ) + }; + } Ok(c) } fn sync(&self, c: turso_core::Completion) -> turso_core::Result { - assert!( - is_web_worker_safe(), - "opfs must be used only from web worker for now" - ); - tracing::debug!("sync({})", self.handle); + let web_worker = is_web_worker_safe(); + tracing::debug!("sync({}, is_web_worker={})", self.handle, web_worker); let handle = self.handle; - let result = unsafe { sync(handle) }; - c.complete(result as i32); + if web_worker { + let result = unsafe { sync(handle) }; + c.complete(result as i32); + } else { + let completion_no = self.opfs.register_completion(c.clone()); + unsafe { sync_async(handle, completion_no) }; + } Ok(c) } @@ -224,14 +293,21 @@ impl File for OpfsFile { len: u64, c: turso_core::Completion, ) -> turso_core::Result { - assert!( - is_web_worker_safe(), - "opfs must be used only from web worker for now" + let web_worker = is_web_worker_safe(); + tracing::debug!( + "truncate({}, is_web_worker={}): len={}", + self.handle, + web_worker, + len ); - tracing::debug!("truncate({}): len={}", self.handle, len); let handle = self.handle; - let result = unsafe { truncate(handle, len as usize) }; - c.complete(result as i32); + if web_worker { + let result = unsafe { truncate(handle, len as usize) }; + c.complete(result as i32); + } else { + let completion_no = self.opfs.register_completion(c.clone()); + unsafe { truncate_async(handle, len as usize, completion_no) }; + } Ok(c) } diff --git a/bindings/javascript/src/lib.rs b/bindings/javascript/src/lib.rs index d479f101d..3a9970680 100644 --- a/bindings/javascript/src/lib.rs +++ b/bindings/javascript/src/lib.rs @@ -10,8 +10,10 @@ //! - Iterating through query results //! - Managing the I/O event loop -#[cfg(feature = "browser")] +// #[cfg(feature = "browser")] pub mod browser; +// #[cfg(feature = "browser")] +use crate::browser::opfs; use napi::bindgen_prelude::*; use napi::{Env, Task}; @@ -76,10 +78,6 @@ pub(crate) fn init_tracing(level_filter: Option) { } pub enum DbTask { - Batch { - conn: Arc, - sql: String, - }, Step { stmt: Arc>>, }, @@ -93,10 +91,6 @@ impl Task for DbTask { fn compute(&mut self) -> Result { match self { - DbTask::Batch { conn, sql } => { - batch_sync(conn, sql)?; - Ok(0) - } DbTask::Step { stmt } => step_sync(stmt), } } @@ -107,20 +101,11 @@ impl Task for DbTask { } #[napi(object)] +#[derive(Clone)] pub struct DatabaseOpts { pub tracing: Option, } -fn batch_sync(conn: &Arc, sql: &str) -> napi::Result<()> { - conn.prepare_execute_batch(sql).map_err(|e| { - Error::new( - Status::GenericFailure, - format!("Failed to execute batch: {e}"), - ) - })?; - Ok(()) -} - fn step_sync(stmt: &Arc>>) -> napi::Result { let mut stmt_ref = stmt.borrow_mut(); let stmt = stmt_ref @@ -152,21 +137,38 @@ impl Database { /// # Arguments /// * `path` - The path to the database file. #[napi(constructor)] + pub fn new_napi(path: String, opts: Option) -> Result { + Self::new(path, opts) + } + pub fn new(path: String, opts: Option) -> Result { - if let Some(opts) = opts { - init_tracing(opts.tracing); - } let io: Arc = if is_memory(&path) { Arc::new(turso_core::MemoryIO::new()) } else { - Arc::new(turso_core::PlatformIO::new().map_err(|e| { - Error::new(Status::GenericFailure, format!("Failed to create IO: {e}")) - })?) + #[cfg(not(feature = "browser"))] + { + Arc::new(turso_core::PlatformIO::new().map_err(|e| { + Error::new(Status::GenericFailure, format!("Failed to create IO: {e}")) + })?) + } + #[cfg(feature = "browser")] + { + return Err(napi::Error::new( + napi::Status::GenericFailure, + "FS-backed db must be initialized through connectDbAsync function in the browser", + )); + } }; + Self::new_io(path, io, opts) + } - #[cfg(feature = "browser")] - if !is_memory(&path) { - return Err(Error::new(Status::GenericFailure, "sync constructor is not supported for FS-backed databases in the WASM. Use async connect(...) method instead".to_string())); + pub fn new_io( + path: String, + io: Arc, + opts: Option, + ) -> Result { + if let Some(opts) = opts { + init_tracing(opts.tracing); } let file = io @@ -233,33 +235,6 @@ impl Database { self.is_open.get() } - /// Executes a batch of SQL statements on main thread - /// - /// # Arguments - /// - /// * `sql` - The SQL statements to execute. - /// - /// # Returns - #[napi] - pub fn batch_sync(&self, sql: String) -> Result<()> { - batch_sync(&self.conn()?, &sql) - } - - /// Executes a batch of SQL statements outside of main thread - /// - /// # Arguments - /// - /// * `sql` - The SQL statements to execute. - /// - /// # Returns - #[napi(ts_return_type = "Promise")] - pub fn batch_async(&self, sql: String) -> Result> { - Ok(AsyncTask::new(DbTask::Batch { - conn: self.conn()?.clone(), - sql, - })) - } - /// Prepares a statement for execution. /// /// # Arguments @@ -325,8 +300,8 @@ impl Database { #[napi] pub fn close(&mut self) -> Result<()> { self.is_open.set(false); - let _ = self._db.take(); let _ = self.conn.take().unwrap(); + let _ = self._db.take(); Ok(()) } diff --git a/bindings/javascript/sync/packages/native/index.d.ts b/bindings/javascript/sync/packages/native/index.d.ts index 3ff5f0390..4ff3c2f91 100644 --- a/bindings/javascript/sync/packages/native/index.d.ts +++ b/bindings/javascript/sync/packages/native/index.d.ts @@ -15,26 +15,6 @@ export declare class Database { get path(): string /** Returns whether the database connection is open. */ get open(): boolean - /** - * Executes a batch of SQL statements on main thread - * - * # Arguments - * - * * `sql` - The SQL statements to execute. - * - * # Returns - */ - batchSync(sql: string): void - /** - * Executes a batch of SQL statements outside of main thread - * - * # Arguments - * - * * `sql` - The SQL statements to execute. - * - * # Returns - */ - batchAsync(sql: string): Promise /** * Prepares a statement for execution. * @@ -93,6 +73,16 @@ export declare class Database { ioLoopAsync(): Promise } +export declare class Opfs { + constructor() + connectDb(path: string, opts?: DatabaseOpts | undefined | null): Promise + complete(completionNo: number, result: number): void +} + +export declare class OpfsFile { + +} + /** A prepared statement. */ export declare class Statement { reset(): void @@ -149,6 +139,12 @@ export declare class Statement { export interface DatabaseOpts { tracing?: string } + +/** + * turso-db in the the browser requires explicit thread pool initialization + * so, we just put no-op task on the thread pool and force emnapi to allocate web worker + */ +export declare function initThreadPool(): Promise export declare class GeneratorHolder { resumeSync(error?: string | undefined | null): GeneratorResponse resumeAsync(error?: string | undefined | null): Promise @@ -220,7 +216,7 @@ export type DatabaseRowTransformResultJs = export type GeneratorResponse = | { type: 'IO' } | { type: 'Done' } - | { type: 'SyncEngineStats', operations: number, mainWal: number, revertWal: number, lastPullUnixTime: number, lastPushUnixTime?: number } + | { type: 'SyncEngineStats', operations: number, mainWal: number, revertWal: number, lastPullUnixTime: number, lastPushUnixTime?: number, revision?: string } export type JsProtocolRequest = | { type: 'Http', method: string, path: string, body?: Array, headers: Array<[string, string]> } diff --git a/bindings/javascript/sync/packages/native/index.js b/bindings/javascript/sync/packages/native/index.js index 709ca74e4..53bff489f 100644 --- a/bindings/javascript/sync/packages/native/index.js +++ b/bindings/javascript/sync/packages/native/index.js @@ -508,9 +508,12 @@ if (!nativeBinding) { throw new Error(`Failed to load native binding`) } -const { Database, Statement, GeneratorHolder, JsDataCompletion, JsProtocolIo, JsProtocolRequestBytes, SyncEngine, DatabaseChangeTypeJs, SyncEngineProtocolVersion } = nativeBinding +const { Database, Opfs, OpfsFile, Statement, initThreadPool, GeneratorHolder, JsDataCompletion, JsProtocolIo, JsProtocolRequestBytes, SyncEngine, DatabaseChangeTypeJs, SyncEngineProtocolVersion } = nativeBinding export { Database } +export { Opfs } +export { OpfsFile } export { Statement } +export { initThreadPool } export { GeneratorHolder } export { JsDataCompletion } export { JsProtocolIo } From acc536d54276a5ff93e4b92ca2a9557ae347f272 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 17 Sep 2025 23:30:44 +0400 Subject: [PATCH 05/78] use global opfs IO in the sync --- bindings/javascript/sync/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/javascript/sync/src/lib.rs b/bindings/javascript/sync/src/lib.rs index e92603508..dd5e3f080 100644 --- a/bindings/javascript/sync/src/lib.rs +++ b/bindings/javascript/sync/src/lib.rs @@ -167,7 +167,7 @@ impl SyncEngine { } #[cfg(feature = "browser")] { - Arc::new(turso_node::browser::Opfs::new()?) + turso_node::browser::opfs() } }; Ok(SyncEngine { From b0f60a29baf94071fa36129152e3f2b1baf05ce8 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 16 Sep 2025 12:20:22 +0400 Subject: [PATCH 06/78] guard meta with mutex and make all methods to accept shared reference and offload locking of sync engine methods to the external wrapper --- sync/engine/src/database_sync_engine.rs | 50 ++++++++++++------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/sync/engine/src/database_sync_engine.rs b/sync/engine/src/database_sync_engine.rs index 7176d7ede..e98acb182 100644 --- a/sync/engine/src/database_sync_engine.rs +++ b/sync/engine/src/database_sync_engine.rs @@ -1,5 +1,4 @@ use std::{ - cell::RefCell, collections::{HashMap, HashSet}, sync::{Arc, Mutex}, }; @@ -51,7 +50,7 @@ pub struct DatabaseSyncEngine { meta_path: String, changes_file: Arc>>>, opts: DatabaseSyncEngineOpts, - meta: RefCell, + meta: Mutex, client_unique_id: String, } @@ -147,7 +146,7 @@ impl DatabaseSyncEngine

{ tracing::info!("initialize database tape connection: path={}", main_db_path); let main_tape = DatabaseTape::new_with_opts(main_db, tape_opts); let changes_file = io.open_file(&changes_path, OpenFlags::Create, false)?; - let mut db = Self { + let db = Self { io, protocol, db_file, @@ -158,7 +157,7 @@ impl DatabaseSyncEngine

{ meta_path: format!("{main_db_path}-info"), changes_file: Arc::new(Mutex::new(Some(changes_file))), opts, - meta: RefCell::new(meta.clone()), + meta: Mutex::new(meta.clone()), client_unique_id: meta.client_unique_id.clone(), }; @@ -176,7 +175,7 @@ impl DatabaseSyncEngine

{ Ok(db) } - fn open_revert_db_conn(&mut self) -> Result> { + fn open_revert_db_conn(&self) -> Result> { let db = turso_core::Database::open_with_flags_bypass_registry( self.io.clone(), &self.main_db_path, @@ -191,10 +190,7 @@ impl DatabaseSyncEngine

{ Ok(conn) } - async fn checkpoint_passive( - &mut self, - coro: &Coro, - ) -> Result<(Option>, u64)> { + async fn checkpoint_passive(&self, coro: &Coro) -> Result<(Option>, u64)> { let watermark = self.meta().revert_since_wal_watermark; tracing::info!( "checkpoint(path={:?}): revert_since_wal_watermark={}", @@ -273,9 +269,13 @@ impl DatabaseSyncEngine

{ }) } - pub async fn checkpoint(&mut self, coro: &Coro) -> Result<()> { + pub async fn checkpoint(&self, coro: &Coro) -> Result<()> { let (main_wal_salt, watermark) = self.checkpoint_passive(coro).await?; + tracing::info!( + "checkpoint(path={:?}): passive checkpoint is done", + self.main_db_path + ); let main_conn = connect_untracked(&self.main_tape)?; let revert_conn = self.open_revert_db_conn()?; @@ -419,10 +419,17 @@ impl DatabaseSyncEngine

{ /// This method will **not** send local changed to the remote /// This method will block writes for the period of pull pub async fn apply_changes_from_remote( - &mut self, + &self, coro: &Coro, remote_changes: DbChangesStatus, ) -> Result<()> { + if remote_changes.file_slot.is_none() { + self.update_meta(coro, |m| { + m.last_pull_unix_time = remote_changes.time.secs; + }) + .await?; + return Ok(()); + } assert!(remote_changes.file_slot.is_some(), "file_slot must be set"); let changes_file = remote_changes.file_slot.as_ref().unwrap().value.clone(); let pull_result = self.apply_changes_internal(coro, &changes_file).await; @@ -447,7 +454,7 @@ impl DatabaseSyncEngine

{ Ok(()) } async fn apply_changes_internal( - &mut self, + &self, coro: &Coro, changes_file: &Arc, ) -> Result { @@ -652,7 +659,7 @@ impl DatabaseSyncEngine

{ /// Sync local changes to remote DB and bring new changes from remote to local /// This method will block writes for the period of sync - pub async fn sync(&mut self, coro: &Coro) -> Result<()> { + pub async fn sync(&self, coro: &Coro) -> Result<()> { // todo(sivukhin): this is bit suboptimal as both 'push' and 'pull' will call pull_synced_from_remote // but for now - keep it simple self.push_changes_to_remote(coro).await?; @@ -660,21 +667,14 @@ impl DatabaseSyncEngine

{ Ok(()) } - pub async fn pull_changes_from_remote(&mut self, coro: &Coro) -> Result<()> { + pub async fn pull_changes_from_remote(&self, coro: &Coro) -> Result<()> { let changes = self.wait_changes_from_remote(coro).await?; - if changes.file_slot.is_some() { - self.apply_changes_from_remote(coro, changes).await?; - } else { - self.update_meta(coro, |m| { - m.last_pull_unix_time = changes.time.secs; - }) - .await?; - } + self.apply_changes_from_remote(coro, changes).await?; Ok(()) } - fn meta(&self) -> std::cell::Ref<'_, DatabaseMetadata> { - self.meta.borrow() + fn meta(&self) -> std::sync::MutexGuard<'_, DatabaseMetadata> { + self.meta.lock().unwrap() } async fn update_meta( @@ -688,7 +688,7 @@ impl DatabaseSyncEngine

{ let completion = self.protocol.full_write(&self.meta_path, meta.dump()?)?; // todo: what happen if we will actually update the metadata on disk but fail and so in memory state will not be updated wait_all_results(coro, &completion).await?; - self.meta.replace(meta); + *self.meta.lock().unwrap() = meta; Ok(()) } } From 160119b12ef0fa787fe48857c37f4d34e2c608e0 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 16 Sep 2025 12:20:59 +0400 Subject: [PATCH 07/78] propagate long poll timeout --- sync/engine/src/database_sync_engine.rs | 2 ++ sync/engine/src/database_sync_operations.rs | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/sync/engine/src/database_sync_engine.rs b/sync/engine/src/database_sync_engine.rs index e98acb182..0257ca629 100644 --- a/sync/engine/src/database_sync_engine.rs +++ b/sync/engine/src/database_sync_engine.rs @@ -36,6 +36,7 @@ pub struct DatabaseSyncEngineOpts { pub tables_ignore: Vec, pub use_transform: bool, pub wal_pull_batch_size: u64, + pub long_poll_timeout: Option, pub protocol_version_hint: DatabaseSyncEngineProtocolVersion, } @@ -386,6 +387,7 @@ impl DatabaseSyncEngine

{ &file.value, &revision, self.opts.wal_pull_batch_size, + self.opts.long_poll_timeout, ) .await?; diff --git a/sync/engine/src/database_sync_operations.rs b/sync/engine/src/database_sync_operations.rs index daec39bc4..d82ace6a4 100644 --- a/sync/engine/src/database_sync_operations.rs +++ b/sync/engine/src/database_sync_operations.rs @@ -166,6 +166,7 @@ pub async fn wal_pull_to_file( frames_file: &Arc, revision: &DatabasePullRevision, wal_pull_batch_size: u64, + long_poll_timeout: Option, ) -> Result { // truncate file before pulling new data let c = Completion::new_trunc(move |result| { @@ -195,7 +196,7 @@ pub async fn wal_pull_to_file( .await } DatabasePullRevision::V1 { revision } => { - wal_pull_to_file_v1(coro, client, frames_file, revision).await + wal_pull_to_file_v1(coro, client, frames_file, revision, long_poll_timeout).await } } } @@ -206,6 +207,7 @@ pub async fn wal_pull_to_file_v1( client: &C, frames_file: &Arc, revision: &str, + long_poll_timeout: Option, ) -> Result { tracing::info!("wal_pull: revision={revision}"); let mut bytes = BytesMut::new(); @@ -214,7 +216,7 @@ pub async fn wal_pull_to_file_v1( encoding: PageUpdatesEncodingReq::Raw as i32, server_revision: String::new(), client_revision: revision.to_string(), - long_poll_timeout_ms: 0, + long_poll_timeout_ms: long_poll_timeout.map(|x| x.as_secs() as u32).unwrap_or(0), server_pages: BytesMut::new().into(), client_pages: BytesMut::new().into(), }; From 83303b8c5b2e8121f07061733e977d477554f131 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 16 Sep 2025 12:21:17 +0400 Subject: [PATCH 08/78] properly guard access to the sync engine with locks --- bindings/javascript/sync/src/lib.rs | 116 +++++++++++++++++----------- 1 file changed, 72 insertions(+), 44 deletions(-) diff --git a/bindings/javascript/sync/src/lib.rs b/bindings/javascript/sync/src/lib.rs index dd5e3f080..c70932081 100644 --- a/bindings/javascript/sync/src/lib.rs +++ b/bindings/javascript/sync/src/lib.rs @@ -6,7 +6,7 @@ pub mod js_protocol_io; use std::{ collections::HashMap, - sync::{Arc, Mutex, OnceLock, RwLock, RwLockReadGuard, RwLockWriteGuard}, + sync::{Arc, Mutex, MutexGuard, OnceLock, RwLock, RwLockReadGuard}, }; use napi::bindgen_prelude::{AsyncTask, Either5, Null}; @@ -28,17 +28,50 @@ pub struct DatabaseOpts { pub path: String, } +pub struct SyncEngineGuard { + inner: Arc>>>, + wait_lock: Mutex<()>, + push_lock: Mutex<()>, + pull_lock: Mutex<()>, + checkpoint_lock: Mutex<()>, +} + +impl SyncEngineGuard { + fn checkpoint_lock(&self) -> (MutexGuard<'_, ()>, MutexGuard<'_, ()>, MutexGuard<'_, ()>) { + let push = self.push_lock.lock().unwrap(); + let pull = self.pull_lock.lock().unwrap(); + let checkpoint = self.checkpoint_lock.lock().unwrap(); + (push, pull, checkpoint) + } + fn pull_lock(&self) -> (MutexGuard<'_, ()>, MutexGuard<'_, ()>, MutexGuard<'_, ()>) { + let wait = self.wait_lock.lock().unwrap(); + let push = self.push_lock.lock().unwrap(); + let pull = self.pull_lock.lock().unwrap(); + (wait, push, pull) + } + fn push_lock(&self) -> MutexGuard<'_, ()> { + let push = self.push_lock.lock().unwrap(); + push + } + fn wait_lock(&self) -> (MutexGuard<'_, ()>, MutexGuard<'_, ()>) { + let wait = self.wait_lock.lock().unwrap(); + let pull = self.pull_lock.lock().unwrap(); + (wait, pull) + } +} + #[napi] pub struct SyncEngine { path: String, client_name: String, wal_pull_batch_size: u32, + long_poll_timeout: Option, protocol_version: DatabaseSyncEngineProtocolVersion, tables_ignore: Vec, use_transform: bool, io: Option>, protocol: Option>, - sync_engine: Arc>>>, + sync_engine: Arc, opened: Arc>>, } @@ -123,6 +156,7 @@ pub struct SyncEngineOpts { pub path: String, pub client_name: Option, pub wal_pull_batch_size: Option, + pub long_poll_timeout_ms: Option, pub tracing: Option, pub tables_ignore: Option>, pub use_transform: bool, @@ -174,10 +208,19 @@ impl SyncEngine { path: opts.path, client_name: opts.client_name.unwrap_or("turso-sync-js".to_string()), wal_pull_batch_size: opts.wal_pull_batch_size.unwrap_or(100), + long_poll_timeout: opts + .long_poll_timeout_ms + .map(|x| std::time::Duration::from_millis(x as u64)), tables_ignore: opts.tables_ignore.unwrap_or_default(), use_transform: opts.use_transform, #[allow(clippy::arc_with_non_send_sync)] - sync_engine: Arc::new(RwLock::new(None)), + sync_engine: Arc::new(SyncEngineGuard { + inner: Arc::new(RwLock::new(None)), + wait_lock: Mutex::new(()), + push_lock: Mutex::new(()), + pull_lock: Mutex::new(()), + checkpoint_lock: Mutex::new(()), + }), io: Some(io), protocol: Some(Arc::new(JsProtocolIo::default())), #[allow(clippy::arc_with_non_send_sync)] @@ -196,6 +239,7 @@ impl SyncEngine { let opts = DatabaseSyncEngineOpts { client_name: self.client_name.clone(), wal_pull_batch_size: self.wal_pull_batch_size as u64, + long_poll_timeout: self.long_poll_timeout, tables_ignore: self.tables_ignore.clone(), use_transform: self.use_transform, protocol_version_hint: self.protocol_version, @@ -213,7 +257,7 @@ impl SyncEngine { let connection = initialized.connect_rw(&coro).await?; let db = turso_node::Database::create(None, io.clone(), connection, path); - *sync_engine.write().unwrap() = Some(initialized); + *sync_engine.inner.write().unwrap() = Some(initialized); *opened.lock().unwrap() = Some(db); Ok(()) }); @@ -246,9 +290,10 @@ impl SyncEngine { #[napi] pub fn sync(&self) -> GeneratorHolder { - self.run(async move |coro, sync_engine| { - let mut sync_engine = try_write(sync_engine)?; - let sync_engine = try_unwrap_mut(&mut sync_engine)?; + self.run(async move |coro, guard| { + let _lock = guard.pull_lock(); + let sync_engine = try_read(&guard.inner)?; + let sync_engine = try_unwrap(&sync_engine)?; sync_engine.sync(coro).await?; Ok(None) }) @@ -256,8 +301,9 @@ impl SyncEngine { #[napi] pub fn push(&self) -> GeneratorHolder { - self.run(async move |coro, sync_engine| { - let sync_engine = try_read(sync_engine)?; + self.run(async move |coro, guard| { + let _lock = guard.push_lock(); + let sync_engine = try_read(&guard.inner)?; let sync_engine = try_unwrap(&sync_engine)?; sync_engine.push_changes_to_remote(coro).await?; Ok(None) @@ -266,8 +312,8 @@ impl SyncEngine { #[napi] pub fn stats(&self) -> GeneratorHolder { - self.run(async move |coro, sync_engine| { - let sync_engine = try_read(sync_engine)?; + self.run(async move |coro, guard| { + let sync_engine = try_read(&guard.inner)?; let sync_engine = try_unwrap(&sync_engine)?; let stats = sync_engine.stats(coro).await?; Ok(Some(GeneratorResponse::SyncEngineStats { @@ -283,19 +329,25 @@ impl SyncEngine { #[napi] pub fn pull(&self) -> GeneratorHolder { - self.run(async move |coro, sync_engine| { - let mut sync_engine = try_write(sync_engine)?; - let sync_engine = try_unwrap_mut(&mut sync_engine)?; - sync_engine.pull_changes_from_remote(coro).await?; + self.run(async move |coro, guard| { + let sync_engine = try_read(&guard.inner)?; + let sync_engine = try_unwrap(&sync_engine)?; + let changes = { + let _lock = guard.wait_lock(); + sync_engine.wait_changes_from_remote(coro).await? + }; + let _lock = guard.pull_lock(); + sync_engine.apply_changes_from_remote(coro, changes).await?; Ok(None) }) } #[napi] pub fn checkpoint(&self) -> GeneratorHolder { - self.run(async move |coro, sync_engine| { - let mut sync_engine = try_write(sync_engine)?; - let sync_engine = try_unwrap_mut(&mut sync_engine)?; + self.run(async move |coro, guard| { + let _lock = guard.checkpoint_lock(); + let sync_engine = try_read(&guard.inner)?; + let sync_engine = try_unwrap(&sync_engine)?; sync_engine.checkpoint(coro).await?; Ok(None) }) @@ -315,7 +367,7 @@ impl SyncEngine { #[napi] pub fn close(&mut self) { - let _ = self.sync_engine.write().unwrap().take(); + let _ = self.sync_engine.inner.write().unwrap().take(); let _ = self.opened.lock().unwrap().take().unwrap(); let _ = self.io.take(); let _ = self.protocol.take(); @@ -344,7 +396,7 @@ impl SyncEngine { &self, f: impl AsyncFnOnce( &Coro<()>, - &Arc>>>, + &Arc, ) -> turso_sync_engine::Result> + 'static, ) -> GeneratorHolder { @@ -378,18 +430,6 @@ fn try_read( Ok(sync_engine) } -fn try_write( - sync_engine: &RwLock>>, -) -> turso_sync_engine::Result>>> { - let Ok(sync_engine) = sync_engine.try_write() else { - let nasty_error = "sync_engine is busy".to_string(); - return Err(turso_sync_engine::errors::Error::DatabaseSyncEngineError( - nasty_error, - )); - }; - Ok(sync_engine) -} - fn try_unwrap<'a>( sync_engine: &'a RwLockReadGuard<'_, Option>>, ) -> turso_sync_engine::Result<&'a DatabaseSyncEngine> { @@ -401,15 +441,3 @@ fn try_unwrap<'a>( }; Ok(sync_engine) } - -fn try_unwrap_mut<'a>( - sync_engine: &'a mut RwLockWriteGuard<'_, Option>>, -) -> turso_sync_engine::Result<&'a mut DatabaseSyncEngine> { - let Some(sync_engine) = sync_engine.as_mut() else { - let error = "sync_engine must be initialized".to_string(); - return Err(turso_sync_engine::errors::Error::DatabaseSyncEngineError( - error, - )); - }; - Ok(sync_engine) -} From e68b642f4f836a731945c7563050b4c3a12f1360 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 16 Sep 2025 12:22:02 +0400 Subject: [PATCH 09/78] set longPollTimeoutMs from js bindings --- bindings/javascript/sync/packages/common/types.ts | 1 + bindings/javascript/sync/packages/native/index.d.ts | 1 + bindings/javascript/sync/packages/native/promise.ts | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bindings/javascript/sync/packages/common/types.ts b/bindings/javascript/sync/packages/common/types.ts index 49b140103..27006de8d 100644 --- a/bindings/javascript/sync/packages/common/types.ts +++ b/bindings/javascript/sync/packages/common/types.ts @@ -36,6 +36,7 @@ export interface SyncOpts { encryptionKey?: string; tablesIgnore?: string[], transform?: Transform, + longPollTimeoutMs?: number, tracing?: string, } diff --git a/bindings/javascript/sync/packages/native/index.d.ts b/bindings/javascript/sync/packages/native/index.d.ts index 4ff3c2f91..02ff2c385 100644 --- a/bindings/javascript/sync/packages/native/index.d.ts +++ b/bindings/javascript/sync/packages/native/index.d.ts @@ -228,6 +228,7 @@ export interface SyncEngineOpts { path: string clientName?: string walPullBatchSize?: number + longPollTimeoutMs?: number tracing?: string tablesIgnore?: Array useTransform: boolean diff --git a/bindings/javascript/sync/packages/native/promise.ts b/bindings/javascript/sync/packages/native/promise.ts index 3d473c8a9..2bff31d29 100644 --- a/bindings/javascript/sync/packages/native/promise.ts +++ b/bindings/javascript/sync/packages/native/promise.ts @@ -83,7 +83,8 @@ async function connect(opts: SyncOpts): Promise { tablesIgnore: opts.tablesIgnore, useTransform: opts.transform != null, tracing: opts.tracing, - protocolVersion: 1 + longPollTimeoutMs: opts.longPollTimeoutMs, + protocolVersion: 1, }); const runOpts: RunOpts = { url: opts.url, From 1185298670d6bfd4b2e05019b792798daa9be44c Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 17 Sep 2025 10:37:26 +0400 Subject: [PATCH 10/78] fix replay generator --- sync/engine/src/database_replay_generator.rs | 9 ++++----- sync/engine/src/database_sync_operations.rs | 13 ++++++------- sync/engine/src/database_tape.rs | 8 ++++---- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/sync/engine/src/database_replay_generator.rs b/sync/engine/src/database_replay_generator.rs index daa3b9f0f..02532d825 100644 --- a/sync/engine/src/database_replay_generator.rs +++ b/sync/engine/src/database_replay_generator.rs @@ -264,13 +264,12 @@ impl DatabaseReplayGenerator { let update = self.update_query(coro, table_name, &columns).await?; Ok(update) } else { - let columns = [true].repeat(after.len()); - let update = self.update_query(coro, table_name, &columns).await?; - Ok(update) + let upsert = self.upsert_query(coro, table_name, after.len()).await?; + Ok(upsert) } } DatabaseTapeRowChangeType::Insert { after } => { - let insert = self.insert_query(coro, table_name, after.len()).await?; + let insert = self.upsert_query(coro, table_name, after.len()).await?; Ok(insert) } } @@ -320,7 +319,7 @@ impl DatabaseReplayGenerator { is_ddl_replay: false, }) } - pub(crate) async fn insert_query( + pub(crate) async fn upsert_query( &self, coro: &Coro, table_name: &str, diff --git a/sync/engine/src/database_sync_operations.rs b/sync/engine/src/database_sync_operations.rs index d82ace6a4..5197e04e0 100644 --- a/sync/engine/src/database_sync_operations.rs +++ b/sync/engine/src/database_sync_operations.rs @@ -216,7 +216,7 @@ pub async fn wal_pull_to_file_v1( encoding: PageUpdatesEncodingReq::Raw as i32, server_revision: String::new(), client_revision: revision.to_string(), - long_poll_timeout_ms: long_poll_timeout.map(|x| x.as_secs() as u32).unwrap_or(0), + long_poll_timeout_ms: long_poll_timeout.map(|x| x.as_millis() as u32).unwrap_or(0), server_pages: BytesMut::new().into(), client_pages: BytesMut::new().into(), }; @@ -807,12 +807,11 @@ pub async fn push_logical_changes( }), DatabaseTapeOperation::RowChange(change) => { let replay_info = generator.replay_info(coro, &change).await?; - let change_type = (&change.change).into(); match change.change { DatabaseTapeRowChangeType::Delete { before } => { let values = generator.replay_values( &replay_info, - change_type, + replay_info.change_type, change.id, before, None, @@ -829,7 +828,7 @@ pub async fn push_logical_changes( DatabaseTapeRowChangeType::Insert { after } => { let values = generator.replay_values( &replay_info, - change_type, + replay_info.change_type, change.id, after, None, @@ -850,7 +849,7 @@ pub async fn push_logical_changes( } => { let values = generator.replay_values( &replay_info, - change_type, + replay_info.change_type, change.id, after, Some(updates), @@ -871,7 +870,7 @@ pub async fn push_logical_changes( } => { let values = generator.replay_values( &replay_info, - change_type, + replay_info.change_type, change.id, after, None, @@ -1361,7 +1360,7 @@ pub async fn wait_proto_message( Error::DatabaseSyncEngineError(format!("unable to deserialize protobuf message: {e}")) })?; let _ = bytes.split_to(message_length + prefix_length); - tracing::debug!( + tracing::trace!( "wait_proto_message: elapsed={:?}", std::time::Instant::now().duration_since(start_time) ); diff --git a/sync/engine/src/database_tape.rs b/sync/engine/src/database_tape.rs index b98cd0847..b8dfdb820 100644 --- a/sync/engine/src/database_tape.rs +++ b/sync/engine/src/database_tape.rs @@ -10,7 +10,7 @@ use crate::{ database_sync_operations::WAL_FRAME_HEADER, errors::Error, types::{ - Coro, DatabaseChange, DatabaseTapeOperation, DatabaseTapeRowChange, + Coro, DatabaseChange, DatabaseChangeType, DatabaseTapeOperation, DatabaseTapeRowChange, DatabaseTapeRowChangeType, ProtocolCommand, }, wal_session::WalSession, @@ -584,7 +584,7 @@ impl DatabaseReplaySession { cached.stmt.reset(); let values = self.generator.replay_values( &cached.info, - change_type, + DatabaseChangeType::Delete, change.id, before, None, @@ -600,7 +600,7 @@ impl DatabaseReplaySession { cached.stmt.reset(); let values = self.generator.replay_values( &cached.info, - change_type, + DatabaseChangeType::Insert, change.id, after, None, @@ -643,7 +643,7 @@ impl DatabaseReplaySession { table, columns ); - let info = self.generator.insert_query(coro, table, columns).await?; + let info = self.generator.upsert_query(coro, table, columns).await?; let stmt = self.conn.prepare(&info.query)?; self.cached_insert_stmt .insert(key.clone(), CachedStmt { stmt, info }); From 66de28d84b92757373ff974e9df0fb9282d33eb6 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 17 Sep 2025 10:38:05 +0400 Subject: [PATCH 11/78] wip --- Cargo.lock | 1 + .../examples/browser/package-lock.json | 335 +++ .../examples/drizzle/package-lock.json | 626 ++++++ bindings/javascript/package-lock.json | 1934 ++++++++++++++++- .../packages/browser/promise.test.ts | 1 + .../javascript/packages/common/async-lock.ts | 29 + bindings/javascript/packages/common/index.ts | 3 +- .../javascript/packages/common/promise.ts | 154 +- .../javascript/packages/native/package.json | 3 + .../packages/native/promise.test.ts | 20 + bindings/javascript/sync/Cargo.toml | 3 +- bindings/javascript/sync/packages/browser/a | Bin 0 -> 57344 bytes .../javascript/sync/packages/browser/a-shm | Bin 0 -> 32768 bytes .../sync/packages/browser/package.json | 2 +- .../sync/packages/browser/promise.test.ts | 99 + .../sync/packages/browser/promise.ts | 17 +- .../javascript/sync/packages/common/index.ts | 4 +- .../sync/packages/common/package.json | 3 + .../javascript/sync/packages/common/run.ts | 68 + .../sync/packages/common/tsconfig.json | 3 +- .../javascript/sync/packages/common/types.ts | 2 +- .../sync/packages/native/index.d.ts | 9 +- .../javascript/sync/packages/native/index.js | 3 +- bindings/javascript/sync/packages/native/log | 14 + .../sync/packages/native/package.json | 2 +- .../sync/packages/native/promise.test.ts | 115 +- .../sync/packages/native/promise.ts | 16 +- bindings/javascript/sync/src/generator.rs | 12 +- bindings/javascript/sync/src/lib.rs | 94 +- bindings/javascript/yarn.lock | 793 ++++++- 30 files changed, 4175 insertions(+), 190 deletions(-) create mode 100644 bindings/javascript/examples/browser/package-lock.json create mode 100644 bindings/javascript/examples/drizzle/package-lock.json create mode 100644 bindings/javascript/packages/common/async-lock.ts create mode 100755 bindings/javascript/sync/packages/browser/a create mode 100755 bindings/javascript/sync/packages/browser/a-shm create mode 100644 bindings/javascript/sync/packages/native/log diff --git a/Cargo.lock b/Cargo.lock index 9e2bb330d..f42165992 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4326,6 +4326,7 @@ dependencies = [ "napi", "napi-build", "napi-derive", + "tracing", "tracing-subscriber", "turso_core", "turso_node", diff --git a/bindings/javascript/examples/browser/package-lock.json b/bindings/javascript/examples/browser/package-lock.json new file mode 100644 index 000000000..24eade16e --- /dev/null +++ b/bindings/javascript/examples/browser/package-lock.json @@ -0,0 +1,335 @@ +{ + "name": "wasm", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "wasm", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@tursodatabase/database-browser": "../../packages/browser" + }, + "devDependencies": { + "vite": "^7.1.4" + } + }, + "../../packages/browser": { + "name": "@tursodatabase/database-browser", + "version": "0.1.5", + "license": "MIT", + "dependencies": { + "@napi-rs/wasm-runtime": "^1.0.3", + "@tursodatabase/database-browser-common": "^0.1.5", + "@tursodatabase/database-common": "^0.1.5" + }, + "devDependencies": { + "@napi-rs/cli": "^3.1.5", + "@vitest/browser": "^3.2.4", + "playwright": "^1.55.0", + "typescript": "^5.9.2", + "vitest": "^3.2.4" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.9", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.50.0", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.50.0", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@tursodatabase/database-browser": { + "resolved": "../../packages/browser", + "link": true + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "dev": true, + "license": "MIT" + }, + "node_modules/esbuild": { + "version": "0.25.9", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.9", + "@esbuild/android-arm": "0.25.9", + "@esbuild/android-arm64": "0.25.9", + "@esbuild/android-x64": "0.25.9", + "@esbuild/darwin-arm64": "0.25.9", + "@esbuild/darwin-x64": "0.25.9", + "@esbuild/freebsd-arm64": "0.25.9", + "@esbuild/freebsd-x64": "0.25.9", + "@esbuild/linux-arm": "0.25.9", + "@esbuild/linux-arm64": "0.25.9", + "@esbuild/linux-ia32": "0.25.9", + "@esbuild/linux-loong64": "0.25.9", + "@esbuild/linux-mips64el": "0.25.9", + "@esbuild/linux-ppc64": "0.25.9", + "@esbuild/linux-riscv64": "0.25.9", + "@esbuild/linux-s390x": "0.25.9", + "@esbuild/linux-x64": "0.25.9", + "@esbuild/netbsd-arm64": "0.25.9", + "@esbuild/netbsd-x64": "0.25.9", + "@esbuild/openbsd-arm64": "0.25.9", + "@esbuild/openbsd-x64": "0.25.9", + "@esbuild/openharmony-arm64": "0.25.9", + "@esbuild/sunos-x64": "0.25.9", + "@esbuild/win32-arm64": "0.25.9", + "@esbuild/win32-ia32": "0.25.9", + "@esbuild/win32-x64": "0.25.9" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/rollup": { + "version": "4.50.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.50.0", + "@rollup/rollup-android-arm64": "4.50.0", + "@rollup/rollup-darwin-arm64": "4.50.0", + "@rollup/rollup-darwin-x64": "4.50.0", + "@rollup/rollup-freebsd-arm64": "4.50.0", + "@rollup/rollup-freebsd-x64": "4.50.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.50.0", + "@rollup/rollup-linux-arm-musleabihf": "4.50.0", + "@rollup/rollup-linux-arm64-gnu": "4.50.0", + "@rollup/rollup-linux-arm64-musl": "4.50.0", + "@rollup/rollup-linux-loongarch64-gnu": "4.50.0", + "@rollup/rollup-linux-ppc64-gnu": "4.50.0", + "@rollup/rollup-linux-riscv64-gnu": "4.50.0", + "@rollup/rollup-linux-riscv64-musl": "4.50.0", + "@rollup/rollup-linux-s390x-gnu": "4.50.0", + "@rollup/rollup-linux-x64-gnu": "4.50.0", + "@rollup/rollup-linux-x64-musl": "4.50.0", + "@rollup/rollup-openharmony-arm64": "4.50.0", + "@rollup/rollup-win32-arm64-msvc": "4.50.0", + "@rollup/rollup-win32-ia32-msvc": "4.50.0", + "@rollup/rollup-win32-x64-msvc": "4.50.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.14", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.4.4", + "picomatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/vite": { + "version": "7.1.4", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.14" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + } + } +} diff --git a/bindings/javascript/examples/drizzle/package-lock.json b/bindings/javascript/examples/drizzle/package-lock.json new file mode 100644 index 000000000..7867fc918 --- /dev/null +++ b/bindings/javascript/examples/drizzle/package-lock.json @@ -0,0 +1,626 @@ +{ + "name": "drizzle", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "drizzle", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@tursodatabase/database": "../../packages/native", + "better-sqlite3": "^12.2.0", + "drizzle-orm": "^0.44.3" + } + }, + "../..": { + "version": "0.1.5", + "workspaces": [ + "packages/common", + "packages/native", + "packages/browser", + "packages/browser-common", + "sync/packages/common", + "sync/packages/native", + "sync/packages/browser" + ] + }, + "../../packages/browser": { + "name": "@tursodatabase/database-browser", + "version": "0.1.5", + "extraneous": true, + "license": "MIT", + "dependencies": { + "@napi-rs/wasm-runtime": "^1.0.3", + "@tursodatabase/database-browser-common": "^0.1.5", + "@tursodatabase/database-common": "^0.1.5" + }, + "devDependencies": { + "@napi-rs/cli": "^3.1.5", + "@vitest/browser": "^3.2.4", + "playwright": "^1.55.0", + "typescript": "^5.9.2", + "vitest": "^3.2.4" + } + }, + "../../packages/browser-common": { + "name": "@tursodatabase/database-browser-common", + "version": "0.1.5", + "extraneous": true, + "license": "MIT", + "devDependencies": { + "typescript": "^5.9.2" + } + }, + "../../packages/common": { + "name": "@tursodatabase/database-common", + "version": "0.1.5", + "extraneous": true, + "license": "MIT", + "devDependencies": { + "typescript": "^5.9.2" + } + }, + "../../packages/native": { + "name": "@tursodatabase/database", + "version": "0.1.5", + "license": "MIT", + "dependencies": { + "@tursodatabase/database-common": "^0.1.5" + }, + "devDependencies": { + "@napi-rs/cli": "^3.1.5", + "@types/node": "^24.3.1", + "typescript": "^5.9.2", + "vitest": "^3.2.4" + } + }, + "../../sync/packages/browser": { + "name": "@tursodatabase/sync-browser", + "version": "0.1.5", + "extraneous": true, + "license": "MIT", + "dependencies": { + "@napi-rs/wasm-runtime": "^1.0.3", + "@tursodatabase/database-browser-common": "^0.1.5", + "@tursodatabase/database-common": "^0.1.5", + "@tursodatabase/sync-common": "^0.1.5" + }, + "devDependencies": { + "@napi-rs/cli": "^3.1.5", + "@vitest/browser": "^3.2.4", + "playwright": "^1.55.0", + "typescript": "^5.9.2", + "vitest": "^3.2.4" + } + }, + "../../sync/packages/common": { + "name": "@tursodatabase/sync-common", + "version": "0.1.5", + "extraneous": true, + "license": "MIT", + "devDependencies": { + "typescript": "^5.9.2" + } + }, + "../../sync/packages/native": { + "name": "@tursodatabase/sync", + "version": "0.1.5", + "extraneous": true, + "license": "MIT", + "dependencies": { + "@tursodatabase/database-common": "^0.1.5", + "@tursodatabase/sync-common": "^0.1.5" + }, + "devDependencies": { + "@napi-rs/cli": "^3.1.5", + "@types/node": "^24.3.1", + "typescript": "^5.9.2", + "vitest": "^3.2.4" + } + }, + "node_modules/@tursodatabase/database": { + "resolved": "../../packages/native", + "link": true + }, + "node_modules/base64-js": { + "version": "1.5.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/better-sqlite3": { + "version": "12.2.0", + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "bindings": "^1.5.0", + "prebuild-install": "^7.1.1" + }, + "engines": { + "node": "20.x || 22.x || 23.x || 24.x" + } + }, + "node_modules/bindings": { + "version": "1.5.0", + "license": "MIT", + "dependencies": { + "file-uri-to-path": "1.0.0" + } + }, + "node_modules/bl": { + "version": "4.1.0", + "license": "MIT", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/chownr": { + "version": "1.1.4", + "license": "ISC" + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "license": "MIT", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/detect-libc": { + "version": "2.0.4", + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/drizzle-orm": { + "version": "0.44.4", + "license": "Apache-2.0", + "peerDependencies": { + "@aws-sdk/client-rds-data": ">=3", + "@cloudflare/workers-types": ">=4", + "@electric-sql/pglite": ">=0.2.0", + "@libsql/client": ">=0.10.0", + "@libsql/client-wasm": ">=0.10.0", + "@neondatabase/serverless": ">=0.10.0", + "@op-engineering/op-sqlite": ">=2", + "@opentelemetry/api": "^1.4.1", + "@planetscale/database": ">=1.13", + "@prisma/client": "*", + "@tidbcloud/serverless": "*", + "@types/better-sqlite3": "*", + "@types/pg": "*", + "@types/sql.js": "*", + "@upstash/redis": ">=1.34.7", + "@vercel/postgres": ">=0.8.0", + "@xata.io/client": "*", + "better-sqlite3": ">=7", + "bun-types": "*", + "expo-sqlite": ">=14.0.0", + "gel": ">=2", + "knex": "*", + "kysely": "*", + "mysql2": ">=2", + "pg": ">=8", + "postgres": ">=3", + "sql.js": ">=1", + "sqlite3": ">=5" + }, + "peerDependenciesMeta": { + "@aws-sdk/client-rds-data": { + "optional": true + }, + "@cloudflare/workers-types": { + "optional": true + }, + "@electric-sql/pglite": { + "optional": true + }, + "@libsql/client": { + "optional": true + }, + "@libsql/client-wasm": { + "optional": true + }, + "@neondatabase/serverless": { + "optional": true + }, + "@op-engineering/op-sqlite": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@planetscale/database": { + "optional": true + }, + "@prisma/client": { + "optional": true + }, + "@tidbcloud/serverless": { + "optional": true + }, + "@types/better-sqlite3": { + "optional": true + }, + "@types/pg": { + "optional": true + }, + "@types/sql.js": { + "optional": true + }, + "@upstash/redis": { + "optional": true + }, + "@vercel/postgres": { + "optional": true + }, + "@xata.io/client": { + "optional": true + }, + "better-sqlite3": { + "optional": true + }, + "bun-types": { + "optional": true + }, + "expo-sqlite": { + "optional": true + }, + "gel": { + "optional": true + }, + "knex": { + "optional": true + }, + "kysely": { + "optional": true + }, + "mysql2": { + "optional": true + }, + "pg": { + "optional": true + }, + "postgres": { + "optional": true + }, + "prisma": { + "optional": true + }, + "sql.js": { + "optional": true + }, + "sqlite3": { + "optional": true + } + } + }, + "node_modules/end-of-stream": { + "version": "1.4.5", + "license": "MIT", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/expand-template": { + "version": "2.0.3", + "license": "(MIT OR WTFPL)", + "engines": { + "node": ">=6" + } + }, + "node_modules/file-uri-to-path": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/fs-constants": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/github-from-package": { + "version": "0.0.0", + "license": "MIT" + }, + "node_modules/ieee754": { + "version": "1.2.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/inherits": { + "version": "2.0.4", + "license": "ISC" + }, + "node_modules/ini": { + "version": "1.3.8", + "license": "ISC" + }, + "node_modules/mimic-response": { + "version": "3.1.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mkdirp-classic": { + "version": "0.5.3", + "license": "MIT" + }, + "node_modules/napi-build-utils": { + "version": "2.0.0", + "license": "MIT" + }, + "node_modules/node-abi": { + "version": "3.75.0", + "license": "MIT", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/once": { + "version": "1.4.0", + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/prebuild-install": { + "version": "7.1.3", + "license": "MIT", + "dependencies": { + "detect-libc": "^2.0.0", + "expand-template": "^2.0.3", + "github-from-package": "0.0.0", + "minimist": "^1.2.3", + "mkdirp-classic": "^0.5.3", + "napi-build-utils": "^2.0.0", + "node-abi": "^3.3.0", + "pump": "^3.0.0", + "rc": "^1.2.7", + "simple-get": "^4.0.0", + "tar-fs": "^2.0.0", + "tunnel-agent": "^0.6.0" + }, + "bin": { + "prebuild-install": "bin.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/pump": { + "version": "3.0.3", + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.2", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/simple-concat": { + "version": "1.0.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/simple-get": { + "version": "4.0.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "decompress-response": "^6.0.0", + "once": "^1.3.1", + "simple-concat": "^1.0.0" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/strip-json-comments": { + "version": "2.0.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/tar-fs": { + "version": "2.1.3", + "license": "MIT", + "dependencies": { + "chownr": "^1.1.1", + "mkdirp-classic": "^0.5.2", + "pump": "^3.0.0", + "tar-stream": "^2.1.4" + } + }, + "node_modules/tar-stream": { + "version": "2.2.0", + "license": "MIT", + "dependencies": { + "bl": "^4.0.3", + "end-of-stream": "^1.4.1", + "fs-constants": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "license": "MIT" + }, + "node_modules/wrappy": { + "version": "1.0.2", + "license": "ISC" + } + } +} diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json index e0bc3a671..3df161dd7 100644 --- a/bindings/javascript/package-lock.json +++ b/bindings/javascript/package-lock.json @@ -59,6 +59,13 @@ "node": ">=6.9.0" } }, + "node_modules/@drizzle-team/brocli": { + "version": "0.10.2", + "resolved": "https://registry.npmjs.org/@drizzle-team/brocli/-/brocli-0.10.2.tgz", + "integrity": "sha512-z33Il7l5dKjUgGULTqBsQBQwckHh5AbIuxhdsIxDDiZAzBOrZO6q9ogcWC65kU382AfynTfgNumVcNIjuIua6w==", + "dev": true, + "license": "Apache-2.0" + }, "node_modules/@emnapi/core": { "version": "1.4.5", "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.4.5.tgz", @@ -87,6 +94,238 @@ "tslib": "^2.4.0" } }, + "node_modules/@esbuild-kit/core-utils": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/@esbuild-kit/core-utils/-/core-utils-3.3.2.tgz", + "integrity": "sha512-sPRAnw9CdSsRmEtnsl2WXWdyquogVpB3yZ3dgwJfe8zrOzTsV7cJvmwrKVa+0ma5BoiGJ+BoqkMvawbayKUsqQ==", + "deprecated": "Merged into tsx: https://tsx.is", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "~0.18.20", + "source-map-support": "^0.5.21" + } + }, + "node_modules/@esbuild-kit/core-utils/node_modules/@esbuild/linux-arm64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.18.20.tgz", + "integrity": "sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild-kit/core-utils/node_modules/esbuild": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.18.20.tgz", + "integrity": "sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/android-arm": "0.18.20", + "@esbuild/android-arm64": "0.18.20", + "@esbuild/android-x64": "0.18.20", + "@esbuild/darwin-arm64": "0.18.20", + "@esbuild/darwin-x64": "0.18.20", + "@esbuild/freebsd-arm64": "0.18.20", + "@esbuild/freebsd-x64": "0.18.20", + "@esbuild/linux-arm": "0.18.20", + "@esbuild/linux-arm64": "0.18.20", + "@esbuild/linux-ia32": "0.18.20", + "@esbuild/linux-loong64": "0.18.20", + "@esbuild/linux-mips64el": "0.18.20", + "@esbuild/linux-ppc64": "0.18.20", + "@esbuild/linux-riscv64": "0.18.20", + "@esbuild/linux-s390x": "0.18.20", + "@esbuild/linux-x64": "0.18.20", + "@esbuild/netbsd-x64": "0.18.20", + "@esbuild/openbsd-x64": "0.18.20", + "@esbuild/sunos-x64": "0.18.20", + "@esbuild/win32-arm64": "0.18.20", + "@esbuild/win32-ia32": "0.18.20", + "@esbuild/win32-x64": "0.18.20" + } + }, + "node_modules/@esbuild-kit/esm-loader": { + "version": "2.6.5", + "resolved": "https://registry.npmjs.org/@esbuild-kit/esm-loader/-/esm-loader-2.6.5.tgz", + "integrity": "sha512-FxEMIkJKnodyA1OaCUoEvbYRkoZlLZ4d/eXFu9Fh8CbBBgP5EmZxrfTRyN0qpXZ4vOvqnE5YdRdcrmUUXuU+dA==", + "deprecated": "Merged into tsx: https://tsx.is", + "dev": true, + "license": "MIT", + "dependencies": { + "@esbuild-kit/core-utils": "^3.3.2", + "get-tsconfig": "^4.7.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.9.tgz", + "integrity": "sha512-OaGtL73Jck6pBKjNIe24BnFE6agGl+6KxDtTfHhy1HmhthfKouEcOhqpSL64K4/0WCtbKFLOdzD/44cJ4k9opA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.18.20.tgz", + "integrity": "sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.18.20.tgz", + "integrity": "sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.18.20.tgz", + "integrity": "sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz", + "integrity": "sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.18.20.tgz", + "integrity": "sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.20.tgz", + "integrity": "sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.18.20.tgz", + "integrity": "sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.18.20.tgz", + "integrity": "sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, "node_modules/@esbuild/linux-arm64": { "version": "0.25.9", "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.9.tgz", @@ -104,6 +343,278 @@ "node": ">=18" } }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.18.20.tgz", + "integrity": "sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.18.20.tgz", + "integrity": "sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.18.20.tgz", + "integrity": "sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.18.20.tgz", + "integrity": "sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.18.20.tgz", + "integrity": "sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.18.20.tgz", + "integrity": "sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.18.20.tgz", + "integrity": "sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.9.tgz", + "integrity": "sha512-9jNJl6FqaUG+COdQMjSCGW4QiMHH88xWbvZ+kRVblZsWrkXlABuGdFJ1E9L7HK+T0Yqd4akKNa/lO0+jDxQD4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.18.20.tgz", + "integrity": "sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.9.tgz", + "integrity": "sha512-YaFBlPGeDasft5IIM+CQAhJAqS3St3nJzDEgsgFixcfZeyGPCd6eJBWzke5piZuZ7CtL656eOSYKk4Ls2C0FRQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.18.20.tgz", + "integrity": "sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.9.tgz", + "integrity": "sha512-4Xd0xNiMVXKh6Fa7HEJQbrpP3m3DDn43jKxMjxLLRjWnRsfxjORYJlXPO4JNcXtOyfajXorRKY9NkOpTHptErg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.18.20.tgz", + "integrity": "sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.18.20.tgz", + "integrity": "sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.18.20.tgz", + "integrity": "sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.18.20.tgz", + "integrity": "sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, "node_modules/@inquirer/checkbox": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/@inquirer/checkbox/-/checkbox-4.2.0.tgz", @@ -524,6 +1035,209 @@ "dev": true, "license": "MIT" }, + "node_modules/@libsql/client": { + "version": "0.15.15", + "resolved": "https://registry.npmjs.org/@libsql/client/-/client-0.15.15.tgz", + "integrity": "sha512-twC0hQxPNHPKfeOv3sNT6u2pturQjLcI+CnpTM0SjRpocEGgfiZ7DWKXLNnsothjyJmDqEsBQJ5ztq9Wlu470w==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@libsql/core": "^0.15.14", + "@libsql/hrana-client": "^0.7.0", + "js-base64": "^3.7.5", + "libsql": "^0.5.22", + "promise-limit": "^2.7.0" + } + }, + "node_modules/@libsql/core": { + "version": "0.15.15", + "resolved": "https://registry.npmjs.org/@libsql/core/-/core-0.15.15.tgz", + "integrity": "sha512-C88Z6UKl+OyuKKPwz224riz02ih/zHYI3Ho/LAcVOgjsunIRZoBw7fjRfaH9oPMmSNeQfhGklSG2il1URoOIsA==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "js-base64": "^3.7.5" + } + }, + "node_modules/@libsql/darwin-arm64": { + "version": "0.5.22", + "resolved": "https://registry.npmjs.org/@libsql/darwin-arm64/-/darwin-arm64-0.5.22.tgz", + "integrity": "sha512-4B8ZlX3nIDPndfct7GNe0nI3Yw6ibocEicWdC4fvQbSs/jdq/RC2oCsoJxJ4NzXkvktX70C1J4FcmmoBy069UA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "peer": true + }, + "node_modules/@libsql/darwin-x64": { + "version": "0.5.22", + "resolved": "https://registry.npmjs.org/@libsql/darwin-x64/-/darwin-x64-0.5.22.tgz", + "integrity": "sha512-ny2HYWt6lFSIdNFzUFIJ04uiW6finXfMNJ7wypkAD8Pqdm6nAByO+Fdqu8t7sD0sqJGeUCiOg480icjyQ2/8VA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "peer": true + }, + "node_modules/@libsql/hrana-client": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/@libsql/hrana-client/-/hrana-client-0.7.0.tgz", + "integrity": "sha512-OF8fFQSkbL7vJY9rfuegK1R7sPgQ6kFMkDamiEccNUvieQ+3urzfDFI616oPl8V7T9zRmnTkSjMOImYCAVRVuw==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@libsql/isomorphic-fetch": "^0.3.1", + "@libsql/isomorphic-ws": "^0.1.5", + "js-base64": "^3.7.5", + "node-fetch": "^3.3.2" + } + }, + "node_modules/@libsql/isomorphic-fetch": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@libsql/isomorphic-fetch/-/isomorphic-fetch-0.3.1.tgz", + "integrity": "sha512-6kK3SUK5Uu56zPq/Las620n5aS9xJq+jMBcNSOmjhNf/MUvdyji4vrMTqD7ptY7/4/CAVEAYDeotUz60LNQHtw==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@libsql/isomorphic-ws": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/@libsql/isomorphic-ws/-/isomorphic-ws-0.1.5.tgz", + "integrity": "sha512-DtLWIH29onUYR00i0GlQ3UdcTRC6EP4u9w/h9LxpUZJWRMARk6dQwZ6Jkd+QdwVpuAOrdxt18v0K2uIYR3fwFg==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@types/ws": "^8.5.4", + "ws": "^8.13.0" + } + }, + "node_modules/@libsql/linux-arm-gnueabihf": { + "version": "0.5.22", + "resolved": "https://registry.npmjs.org/@libsql/linux-arm-gnueabihf/-/linux-arm-gnueabihf-0.5.22.tgz", + "integrity": "sha512-3Uo3SoDPJe/zBnyZKosziRGtszXaEtv57raWrZIahtQDsjxBVjuzYQinCm9LRCJCUT5t2r5Z5nLDPJi2CwZVoA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "peer": true + }, + "node_modules/@libsql/linux-arm-musleabihf": { + "version": "0.5.22", + "resolved": "https://registry.npmjs.org/@libsql/linux-arm-musleabihf/-/linux-arm-musleabihf-0.5.22.tgz", + "integrity": "sha512-LCsXh07jvSojTNJptT9CowOzwITznD+YFGGW+1XxUr7fS+7/ydUrpDfsMX7UqTqjm7xG17eq86VkWJgHJfvpNg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "peer": true + }, + "node_modules/@libsql/linux-arm64-gnu": { + "version": "0.5.22", + "resolved": "https://registry.npmjs.org/@libsql/linux-arm64-gnu/-/linux-arm64-gnu-0.5.22.tgz", + "integrity": "sha512-KSdnOMy88c9mpOFKUEzPskSaF3VLflfSUCBwas/pn1/sV3pEhtMF6H8VUCd2rsedwoukeeCSEONqX7LLnQwRMA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "peer": true + }, + "node_modules/@libsql/linux-arm64-musl": { + "version": "0.5.22", + "resolved": "https://registry.npmjs.org/@libsql/linux-arm64-musl/-/linux-arm64-musl-0.5.22.tgz", + "integrity": "sha512-mCHSMAsDTLK5YH//lcV3eFEgiR23Ym0U9oEvgZA0667gqRZg/2px+7LshDvErEKv2XZ8ixzw3p1IrBzLQHGSsw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "peer": true + }, + "node_modules/@libsql/linux-x64-gnu": { + "version": "0.5.22", + "resolved": "https://registry.npmjs.org/@libsql/linux-x64-gnu/-/linux-x64-gnu-0.5.22.tgz", + "integrity": "sha512-kNBHaIkSg78Y4BqAdgjcR2mBilZXs4HYkAmi58J+4GRwDQZh5fIUWbnQvB9f95DkWUIGVeenqLRFY2pcTmlsew==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "peer": true + }, + "node_modules/@libsql/linux-x64-musl": { + "version": "0.5.22", + "resolved": "https://registry.npmjs.org/@libsql/linux-x64-musl/-/linux-x64-musl-0.5.22.tgz", + "integrity": "sha512-UZ4Xdxm4pu3pQXjvfJiyCzZop/9j/eA2JjmhMaAhe3EVLH2g11Fy4fwyUp9sT1QJYR1kpc2JLuybPM0kuXv/Tg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "peer": true + }, + "node_modules/@libsql/win32-x64-msvc": { + "version": "0.5.22", + "resolved": "https://registry.npmjs.org/@libsql/win32-x64-msvc/-/win32-x64-msvc-0.5.22.tgz", + "integrity": "sha512-Fj0j8RnBpo43tVZUVoNK6BV/9AtDUM5S7DF3LB4qTYg1LMSZqi3yeCneUTLJD6XomQJlZzbI4mst89yspVSAnA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "peer": true + }, "node_modules/@napi-rs/cli": { "version": "3.1.5", "resolved": "https://registry.npmjs.org/@napi-rs/cli/-/cli-3.1.5.tgz", @@ -886,6 +1600,15 @@ "node": ">=14.0.0" } }, + "node_modules/@neon-rs/load": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/@neon-rs/load/-/load-0.0.4.tgz", + "integrity": "sha512-kTPhdZyTQxB+2wpiRcFWrDcejc4JI6tkPuS7UZCG4l6Zvc5kU/gGQ/ozvHTh1XR5tS+UlfAfGuPajjzQjCiHCw==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true + }, "node_modules/@octokit/auth-token": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-6.0.0.tgz", @@ -1198,6 +1921,18 @@ "undici-types": "~7.10.0" } }, + "node_modules/@types/ws": { + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", + "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@vitest/browser": { "version": "3.2.4", "resolved": "https://registry.npmjs.org/@vitest/browser/-/browser-3.2.4.tgz", @@ -1446,6 +2181,27 @@ "node": ">=12" } }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/before-after-hook": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-4.0.0.tgz", @@ -1453,14 +2209,74 @@ "dev": true, "license": "Apache-2.0" }, + "node_modules/better-sqlite3": { + "version": "12.2.0", + "resolved": "https://registry.npmjs.org/better-sqlite3/-/better-sqlite3-12.2.0.tgz", + "integrity": "sha512-eGbYq2CT+tos1fBwLQ/tkBt9J5M3JEHjku4hbvQUePCckkvVf14xWj+1m7dGoK81M/fOjFT7yM9UMeKT/+vFLQ==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "bindings": "^1.5.0", + "prebuild-install": "^7.1.1" + }, + "engines": { + "node": "20.x || 22.x || 23.x || 24.x" + } + }, + "node_modules/bindings": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", + "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "file-uri-to-path": "1.0.0" + } + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, "node_modules/buffer-from": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", "dev": true, - "license": "MIT", - "optional": true, - "peer": true + "license": "MIT" }, "node_modules/cac": { "version": "6.7.14", @@ -1506,6 +2322,13 @@ "node": ">= 16" } }, + "node_modules/chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "dev": true, + "license": "ISC" + }, "node_modules/cli-width": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz", @@ -1568,6 +2391,18 @@ "optional": true, "peer": true }, + "node_modules/data-uri-to-buffer": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", + "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "engines": { + "node": ">= 12" + } + }, "node_modules/debug": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", @@ -1586,6 +2421,22 @@ } } }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/deep-eql": { "version": "5.0.2", "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", @@ -1596,6 +2447,16 @@ "node": ">=6" } }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, "node_modules/dequal": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", @@ -1606,6 +2467,16 @@ "node": ">=6" } }, + "node_modules/detect-libc": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.0.tgz", + "integrity": "sha512-vEtk+OcP7VBRtQZ1EJ3bdgzSfBjgnEalLTp5zjJrS+2Z1w2KZly4SBdac/WDU3hhsNAZ9E8SC96ME4Ey8MZ7cg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, "node_modules/dom-accessibility-api": { "version": "0.5.16", "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", @@ -1613,6 +2484,148 @@ "dev": true, "license": "MIT" }, + "node_modules/drizzle-kit": { + "version": "0.31.4", + "resolved": "https://registry.npmjs.org/drizzle-kit/-/drizzle-kit-0.31.4.tgz", + "integrity": "sha512-tCPWVZWZqWVx2XUsVpJRnH9Mx0ClVOf5YUHerZ5so1OKSlqww4zy1R5ksEdGRcO3tM3zj0PYN6V48TbQCL1RfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@drizzle-team/brocli": "^0.10.2", + "@esbuild-kit/esm-loader": "^2.5.5", + "esbuild": "^0.25.4", + "esbuild-register": "^3.5.0" + }, + "bin": { + "drizzle-kit": "bin.cjs" + } + }, + "node_modules/drizzle-orm": { + "version": "0.44.5", + "resolved": "https://registry.npmjs.org/drizzle-orm/-/drizzle-orm-0.44.5.tgz", + "integrity": "sha512-jBe37K7d8ZSKptdKfakQFdeljtu3P2Cbo7tJoJSVZADzIKOBo9IAJPOmMsH2bZl90bZgh8FQlD8BjxXA/zuBkQ==", + "dev": true, + "license": "Apache-2.0", + "peerDependencies": { + "@aws-sdk/client-rds-data": ">=3", + "@cloudflare/workers-types": ">=4", + "@electric-sql/pglite": ">=0.2.0", + "@libsql/client": ">=0.10.0", + "@libsql/client-wasm": ">=0.10.0", + "@neondatabase/serverless": ">=0.10.0", + "@op-engineering/op-sqlite": ">=2", + "@opentelemetry/api": "^1.4.1", + "@planetscale/database": ">=1.13", + "@prisma/client": "*", + "@tidbcloud/serverless": "*", + "@types/better-sqlite3": "*", + "@types/pg": "*", + "@types/sql.js": "*", + "@upstash/redis": ">=1.34.7", + "@vercel/postgres": ">=0.8.0", + "@xata.io/client": "*", + "better-sqlite3": ">=7", + "bun-types": "*", + "expo-sqlite": ">=14.0.0", + "gel": ">=2", + "knex": "*", + "kysely": "*", + "mysql2": ">=2", + "pg": ">=8", + "postgres": ">=3", + "sql.js": ">=1", + "sqlite3": ">=5" + }, + "peerDependenciesMeta": { + "@aws-sdk/client-rds-data": { + "optional": true + }, + "@cloudflare/workers-types": { + "optional": true + }, + "@electric-sql/pglite": { + "optional": true + }, + "@libsql/client": { + "optional": true + }, + "@libsql/client-wasm": { + "optional": true + }, + "@neondatabase/serverless": { + "optional": true + }, + "@op-engineering/op-sqlite": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@planetscale/database": { + "optional": true + }, + "@prisma/client": { + "optional": true + }, + "@tidbcloud/serverless": { + "optional": true + }, + "@types/better-sqlite3": { + "optional": true + }, + "@types/pg": { + "optional": true + }, + "@types/sql.js": { + "optional": true + }, + "@upstash/redis": { + "optional": true + }, + "@vercel/postgres": { + "optional": true + }, + "@xata.io/client": { + "optional": true + }, + "better-sqlite3": { + "optional": true + }, + "bun-types": { + "optional": true + }, + "expo-sqlite": { + "optional": true + }, + "gel": { + "optional": true + }, + "knex": { + "optional": true + }, + "kysely": { + "optional": true + }, + "mysql2": { + "optional": true + }, + "pg": { + "optional": true + }, + "postgres": { + "optional": true + }, + "prisma": { + "optional": true + }, + "sql.js": { + "optional": true + }, + "sqlite3": { + "optional": true + } + } + }, "node_modules/emnapi": { "version": "1.4.5", "resolved": "https://registry.npmjs.org/emnapi/-/emnapi-1.4.5.tgz", @@ -1635,6 +2648,16 @@ "dev": true, "license": "MIT" }, + "node_modules/end-of-stream": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", + "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "once": "^1.4.0" + } + }, "node_modules/es-module-lexer": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", @@ -1695,6 +2718,386 @@ "@esbuild/win32-x64": "0.25.9" } }, + "node_modules/esbuild-register": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/esbuild-register/-/esbuild-register-3.6.0.tgz", + "integrity": "sha512-H2/S7Pm8a9CL1uhp9OvjwrBh5Pvx0H8qVOxNu8Wed9Y7qv56MPtq+GGM8RJpq6glYJn9Wspr8uw7l55uyinNeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.3.4" + }, + "peerDependencies": { + "esbuild": ">=0.12 <1" + } + }, + "node_modules/esbuild/node_modules/@esbuild/android-arm": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.9.tgz", + "integrity": "sha512-5WNI1DaMtxQ7t7B6xa572XMXpHAaI/9Hnhk8lcxF4zVN4xstUgTlvuGDorBguKEnZO70qwEcLpfifMLoxiPqHQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/android-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.9.tgz", + "integrity": "sha512-IDrddSmpSv51ftWslJMvl3Q2ZT98fUSL2/rlUXuVqRXHCs5EUF1/f+jbjF5+NG9UffUDMCiTyh8iec7u8RlTLg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/android-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.9.tgz", + "integrity": "sha512-I853iMZ1hWZdNllhVZKm34f4wErd4lMyeV7BLzEExGEIZYsOzqDWDf+y082izYUE8gtJnYHdeDpN/6tUdwvfiw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/darwin-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.9.tgz", + "integrity": "sha512-XIpIDMAjOELi/9PB30vEbVMs3GV1v2zkkPnuyRRURbhqjyzIINwj+nbQATh4H9GxUgH1kFsEyQMxwiLFKUS6Rg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/darwin-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.9.tgz", + "integrity": "sha512-jhHfBzjYTA1IQu8VyrjCX4ApJDnH+ez+IYVEoJHeqJm9VhG9Dh2BYaJritkYK3vMaXrf7Ogr/0MQ8/MeIefsPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.9.tgz", + "integrity": "sha512-z93DmbnY6fX9+KdD4Ue/H6sYs+bhFQJNCPZsi4XWJoYblUqT06MQUdBCpcSfuiN72AbqeBFu5LVQTjfXDE2A6Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/freebsd-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.9.tgz", + "integrity": "sha512-mrKX6H/vOyo5v71YfXWJxLVxgy1kyt1MQaD8wZJgJfG4gq4DpQGpgTB74e5yBeQdyMTbgxp0YtNj7NuHN0PoZg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/linux-arm": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.9.tgz", + "integrity": "sha512-HBU2Xv78SMgaydBmdor38lg8YDnFKSARg1Q6AT0/y2ezUAKiZvc211RDFHlEZRFNRVhcMamiToo7bDx3VEOYQw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/linux-ia32": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.9.tgz", + "integrity": "sha512-e7S3MOJPZGp2QW6AK6+Ly81rC7oOSerQ+P8L0ta4FhVi+/j/v2yZzx5CqqDaWjtPFfYz21Vi1S0auHrap3Ma3A==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/linux-loong64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.9.tgz", + "integrity": "sha512-Sbe10Bnn0oUAB2AalYztvGcK+o6YFFA/9829PhOCUS9vkJElXGdphz0A3DbMdP8gmKkqPmPcMJmJOrI3VYB1JQ==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/linux-mips64el": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.9.tgz", + "integrity": "sha512-YcM5br0mVyZw2jcQeLIkhWtKPeVfAerES5PvOzaDxVtIyZ2NUBZKNLjC5z3/fUlDgT6w89VsxP2qzNipOaaDyA==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/linux-ppc64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.9.tgz", + "integrity": "sha512-++0HQvasdo20JytyDpFvQtNrEsAgNG2CY1CLMwGXfFTKGBGQT3bOeLSYE2l1fYdvML5KUuwn9Z8L1EWe2tzs1w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/linux-riscv64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.9.tgz", + "integrity": "sha512-uNIBa279Y3fkjV+2cUjx36xkx7eSjb8IvnL01eXUKXez/CBHNRw5ekCGMPM0BcmqBxBcdgUWuUXmVWwm4CH9kg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/linux-s390x": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.9.tgz", + "integrity": "sha512-Mfiphvp3MjC/lctb+7D287Xw1DGzqJPb/J2aHHcHxflUo+8tmN/6d4k6I2yFR7BVo5/g7x2Monq4+Yew0EHRIA==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/linux-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.9.tgz", + "integrity": "sha512-iSwByxzRe48YVkmpbgoxVzn76BXjlYFXC7NvLYq+b+kDjyyk30J0JY47DIn8z1MO3K0oSl9fZoRmZPQI4Hklzg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/netbsd-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.9.tgz", + "integrity": "sha512-RLLdkflmqRG8KanPGOU7Rpg829ZHu8nFy5Pqdi9U01VYtG9Y0zOG6Vr2z4/S+/3zIyOxiK6cCeYNWOFR9QP87g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/openbsd-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.9.tgz", + "integrity": "sha512-1MkgTCuvMGWuqVtAvkpkXFmtL8XhWy+j4jaSO2wxfJtilVCi0ZE37b8uOdMItIHz4I6z1bWWtEX4CJwcKYLcuA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/sunos-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.9.tgz", + "integrity": "sha512-WjH4s6hzo00nNezhp3wFIAfmGZ8U7KtrJNlFMRKxiI9mxEK1scOMAaa9i4crUtu+tBr+0IN6JCuAcSBJZfnphw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/win32-arm64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.9.tgz", + "integrity": "sha512-mGFrVJHmZiRqmP8xFOc6b84/7xa5y5YvR1x8djzXpJBSv/UsNK6aqec+6JDjConTgvvQefdGhFDAs2DLAds6gQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/win32-ia32": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.9.tgz", + "integrity": "sha512-b33gLVU2k11nVx1OhX3C8QQP6UHQK4ZtN56oFWvVXvz2VkDoe6fbG8TOgHFxEvqeqohmRnIHe5A1+HADk4OQww==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/esbuild/node_modules/@esbuild/win32-x64": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.9.tgz", + "integrity": "sha512-PPOl1mi6lpLNQxnGoyAfschAodRFYXJ+9fs6WHXz7CSWKbOqiMZsubC+BQsVKuul+3vKLuwTHsS2c2y9EoKwxQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/expand-template": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", + "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", + "dev": true, + "license": "(MIT OR WTFPL)", + "engines": { + "node": ">=6" + } + }, "node_modules/expect-type": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", @@ -1755,6 +3158,39 @@ } } }, + "node_modules/fetch-blob": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", + "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "paypal", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + }, + "engines": { + "node": "^12.20 || >= 14.13" + } + }, + "node_modules/file-uri-to-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", + "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", + "dev": true, + "license": "MIT" + }, "node_modules/find-up": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-7.0.0.tgz", @@ -1786,6 +3222,28 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/formdata-polyfill": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", + "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "fetch-blob": "^3.1.2" + }, + "engines": { + "node": ">=12.20.0" + } + }, + "node_modules/fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", + "dev": true, + "license": "MIT" + }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", @@ -1801,6 +3259,26 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, + "node_modules/get-tsconfig": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.10.1.tgz", + "integrity": "sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/github-from-package": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", + "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", + "dev": true, + "license": "MIT" + }, "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -1814,6 +3292,41 @@ "node": ">=0.10.0" } }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true, + "license": "ISC" + }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -1824,6 +3337,15 @@ "node": ">=8" } }, + "node_modules/js-base64": { + "version": "3.7.8", + "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-3.7.8.tgz", + "integrity": "sha512-hNngCeKxIUQiEUN3GPJOkz4wF/YvdUdbNL9hsBcMQTkKzboD7T/q3OYOuuPZLUE6dBxSGpwhk5mwuDud7JVAow==", + "dev": true, + "license": "BSD-3-Clause", + "optional": true, + "peer": true + }, "node_modules/js-tokens": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", @@ -1831,6 +3353,53 @@ "dev": true, "license": "MIT" }, + "node_modules/libsql": { + "version": "0.5.22", + "resolved": "https://registry.npmjs.org/libsql/-/libsql-0.5.22.tgz", + "integrity": "sha512-NscWthMQt7fpU8lqd7LXMvT9pi+KhhmTHAJWUB/Lj6MWa0MKFv0F2V4C6WKKpjCVZl0VwcDz4nOI3CyaT1DDiA==", + "cpu": [ + "x64", + "arm64", + "wasm32", + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin", + "linux", + "win32" + ], + "peer": true, + "dependencies": { + "@neon-rs/load": "^0.0.4", + "detect-libc": "2.0.2" + }, + "optionalDependencies": { + "@libsql/darwin-arm64": "0.5.22", + "@libsql/darwin-x64": "0.5.22", + "@libsql/linux-arm-gnueabihf": "0.5.22", + "@libsql/linux-arm-musleabihf": "0.5.22", + "@libsql/linux-arm64-gnu": "0.5.22", + "@libsql/linux-arm64-musl": "0.5.22", + "@libsql/linux-x64-gnu": "0.5.22", + "@libsql/linux-x64-musl": "0.5.22", + "@libsql/win32-x64-msvc": "0.5.22" + } + }, + "node_modules/libsql/node_modules/detect-libc": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", + "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", + "dev": true, + "license": "Apache-2.0", + "optional": true, + "peer": true, + "engines": { + "node": ">=8" + } + }, "node_modules/locate-path": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", @@ -1874,6 +3443,36 @@ "@jridgewell/sourcemap-codec": "^1.5.5" } }, + "node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mkdirp-classic": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", + "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", + "dev": true, + "license": "MIT" + }, "node_modules/mrmime": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.1.tgz", @@ -1920,6 +3519,80 @@ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, + "node_modules/napi-build-utils": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-2.0.0.tgz", + "integrity": "sha512-GEbrYkbfF7MoNaoh2iGG84Mnf/WZfB0GdGEsM8wz7Expx/LlWf5U8t9nvJKXSp3qr5IsEbK04cBGhol/KwOsWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-abi": { + "version": "3.77.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.77.0.tgz", + "integrity": "sha512-DSmt0OEcLoK4i3NuscSbGjOf3bqiDEutejqENSplMSFA/gmB8mkED9G4pKWnPl7MDU4rSHebKPHeitpDfyH0cQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "deprecated": "Use your platform's native DOMException instead", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "optional": true, + "peer": true, + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", + "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.4", + "formdata-polyfill": "^4.0.10" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/node-fetch" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, "node_modules/os-tmpdir": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", @@ -2085,6 +3758,33 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/prebuild-install": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.3.tgz", + "integrity": "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==", + "dev": true, + "license": "MIT", + "dependencies": { + "detect-libc": "^2.0.0", + "expand-template": "^2.0.3", + "github-from-package": "0.0.0", + "minimist": "^1.2.3", + "mkdirp-classic": "^0.5.3", + "napi-build-utils": "^2.0.0", + "node-abi": "^3.3.0", + "pump": "^3.0.0", + "rc": "^1.2.7", + "simple-get": "^4.0.0", + "tar-fs": "^2.0.0", + "tunnel-agent": "^0.6.0" + }, + "bin": { + "prebuild-install": "bin.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/pretty-format": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", @@ -2100,6 +3800,42 @@ "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" } }, + "node_modules/promise-limit": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/promise-limit/-/promise-limit-2.7.0.tgz", + "integrity": "sha512-7nJ6v5lnJsXwGprnGXga4wx6d1POjvi5Qmf1ivTRxTjH4Z/9Czja/UCMLVmB9N93GeWOU93XaFaEt6jbuoagNw==", + "dev": true, + "license": "ISC", + "optional": true, + "peer": true + }, + "node_modules/pump": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz", + "integrity": "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dev": true, + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, "node_modules/react-is": { "version": "17.0.2", "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", @@ -2107,6 +3843,31 @@ "dev": true, "license": "MIT" }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, "node_modules/rollup": { "version": "4.50.1", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.50.1.tgz", @@ -2148,6 +3909,27 @@ "fsevents": "~2.3.2" } }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", @@ -2188,6 +3970,53 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/simple-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", + "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/simple-get": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", + "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "decompress-response": "^6.0.0", + "once": "^1.3.1", + "simple-concat": "^1.0.0" + } + }, "node_modules/sirv": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/sirv/-/sirv-3.0.2.tgz", @@ -2209,8 +4038,6 @@ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true, "license": "BSD-3-Clause", - "optional": true, - "peer": true, "engines": { "node": ">=0.10.0" } @@ -2231,8 +4058,6 @@ "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", "dev": true, "license": "MIT", - "optional": true, - "peer": true, "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -2252,6 +4077,16 @@ "dev": true, "license": "MIT" }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, "node_modules/string-width": { "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", @@ -2280,6 +4115,16 @@ "node": ">=8" } }, + "node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/strip-literal": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.0.0.tgz", @@ -2293,6 +4138,36 @@ "url": "https://github.com/sponsors/antfu" } }, + "node_modules/tar-fs": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz", + "integrity": "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "chownr": "^1.1.1", + "mkdirp-classic": "^0.5.2", + "pump": "^3.0.0", + "tar-stream": "^2.1.4" + } + }, + "node_modules/tar-stream": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", + "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "bl": "^4.0.3", + "end-of-stream": "^1.4.1", + "fs-constants": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/terser": { "version": "5.44.0", "resolved": "https://registry.npmjs.org/terser/-/terser-5.44.0.tgz", @@ -2404,6 +4279,19 @@ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "license": "0BSD" }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, "node_modules/typanion": { "version": "3.14.0", "resolved": "https://registry.npmjs.org/typanion/-/typanion-3.14.0.tgz", @@ -2442,6 +4330,13 @@ "dev": true, "license": "ISC" }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, "node_modules/vite": { "version": "7.1.5", "resolved": "https://registry.npmjs.org/vite/-/vite-7.1.5.tgz", @@ -2613,6 +4508,18 @@ } } }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "engines": { + "node": ">= 8" + } + }, "node_modules/why-is-node-running": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", @@ -2630,6 +4537,13 @@ "node": ">=8" } }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, "node_modules/ws": { "version": "8.18.3", "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", @@ -2724,6 +4638,9 @@ "devDependencies": { "@napi-rs/cli": "^3.1.5", "@types/node": "^24.3.1", + "better-sqlite3": "^12.2.0", + "drizzle-kit": "^0.31.4", + "drizzle-orm": "^0.44.5", "typescript": "^5.9.2", "vitest": "^3.2.4" } @@ -2773,6 +4690,9 @@ "name": "@tursodatabase/sync-common", "version": "0.2.0-pre.3", "license": "MIT", + "dependencies": { + "@tursodatabase/database-common": "^0.2.0-pre.3" + }, "devDependencies": { "typescript": "^5.9.2" } diff --git a/bindings/javascript/packages/browser/promise.test.ts b/bindings/javascript/packages/browser/promise.test.ts index 741e77276..0f9cebff8 100644 --- a/bindings/javascript/packages/browser/promise.test.ts +++ b/bindings/javascript/packages/browser/promise.test.ts @@ -52,6 +52,7 @@ test('on-disk db', async () => { db2.close(); }) +// attach is not supported in browser for now // test('attach', async () => { // const path1 = `test-${(Math.random() * 10000) | 0}.db`; // const path2 = `test-${(Math.random() * 10000) | 0}.db`; diff --git a/bindings/javascript/packages/common/async-lock.ts b/bindings/javascript/packages/common/async-lock.ts new file mode 100644 index 000000000..cf69a8f39 --- /dev/null +++ b/bindings/javascript/packages/common/async-lock.ts @@ -0,0 +1,29 @@ +export class AsyncLock { + locked: boolean; + queue: any[]; + constructor() { + this.locked = false; + this.queue = [] + } + async acquire() { + if (!this.locked) { + this.locked = true; + return Promise.resolve(); + } else { + const block = new Promise(resolve => { this.queue.push(resolve) }); + return block; + } + } + release() { + if (this.locked == false) { + throw new Error("invalid state: lock was already unlocked"); + } + const item = this.queue.shift(); + if (item != null) { + this.locked = true; + item(); + } else { + this.locked = false; + } + } +} \ No newline at end of file diff --git a/bindings/javascript/packages/common/index.ts b/bindings/javascript/packages/common/index.ts index 35e092d03..40a44b573 100644 --- a/bindings/javascript/packages/common/index.ts +++ b/bindings/javascript/packages/common/index.ts @@ -2,5 +2,6 @@ import { NativeDatabase, NativeStatement, DatabaseOpts } from "./types.js"; import { Database as DatabaseCompat, Statement as StatementCompat } from "./compat.js"; import { Database as DatabasePromise, Statement as StatementPromise } from "./promise.js"; import { SqliteError } from "./sqlite-error.js"; +import { AsyncLock } from "./async-lock.js"; -export { DatabaseCompat, StatementCompat, DatabasePromise, StatementPromise, NativeDatabase, NativeStatement, SqliteError, DatabaseOpts } +export { DatabaseCompat, StatementCompat, DatabasePromise, StatementPromise, NativeDatabase, NativeStatement, SqliteError, DatabaseOpts, AsyncLock } diff --git a/bindings/javascript/packages/common/promise.ts b/bindings/javascript/packages/common/promise.ts index f1a22260c..8dbcf5058 100644 --- a/bindings/javascript/packages/common/promise.ts +++ b/bindings/javascript/packages/common/promise.ts @@ -1,3 +1,4 @@ +import { AsyncLock } from "./async-lock.js"; import { bindParams } from "./bind.js"; import { SqliteError } from "./sqlite-error.js"; import { NativeDatabase, NativeStatement, STEP_IO, STEP_ROW, STEP_DONE, DatabaseOpts } from "./types.js"; @@ -32,6 +33,7 @@ class Database { db: NativeDatabase; memory: boolean; open: boolean; + execLock: AsyncLock; private _inTransaction: boolean = false; /** * Creates a new database connection. If the database file pointed to by `path` does not exists, it will be created. @@ -57,6 +59,7 @@ class Database { initialize(db: NativeDatabase, name, readonly) { this.db = db; this.memory = db.memory; + this.execLock = new AsyncLock(); Object.defineProperties(this, { inTransaction: { get: () => this._inTransaction, @@ -112,17 +115,22 @@ class Database { const db = this; const wrapTxn = (mode) => { return async (...bindParameters) => { - await db.exec("BEGIN " + mode); - db._inTransaction = true; + await this.execLock.acquire(); try { - const result = await fn(...bindParameters); - await db.exec("COMMIT"); - db._inTransaction = false; - return result; - } catch (err) { - await db.exec("ROLLBACK"); - db._inTransaction = false; - throw err; + await db.exec("BEGIN " + mode); + db._inTransaction = true; + try { + const result = await fn(...bindParameters); + await db.exec("COMMIT"); + db._inTransaction = false; + return result; + } catch (err) { + await db.exec("ROLLBACK"); + db._inTransaction = false; + throw err; + } + } finally { + this.execLock.release(); } }; }; @@ -195,6 +203,7 @@ class Database { throw new TypeError("The database connection is not open"); } + await this.execLock.acquire(); try { const stmt = this.prepare(sql); try { @@ -204,6 +213,8 @@ class Database { } } catch (err) { throw convertError(err); + } finally { + this.execLock.release(); } } @@ -302,25 +313,30 @@ class Statement { this.stmt.reset(); bindParams(this.stmt, bindParameters); - while (true) { - const stepResult = this.stmt.stepSync(); - if (stepResult === STEP_IO) { - await this.db.db.ioLoopAsync(); - continue; - } - if (stepResult === STEP_DONE) { - break; - } - if (stepResult === STEP_ROW) { - // For run(), we don't need the row data, just continue - continue; + await this.db.execLock.acquire(); + try { + while (true) { + const stepResult = await this.stmt.stepSync(); + if (stepResult === STEP_IO) { + await this.db.db.ioLoopAsync(); + continue; + } + if (stepResult === STEP_DONE) { + break; + } + if (stepResult === STEP_ROW) { + // For run(), we don't need the row data, just continue + continue; + } } + + const lastInsertRowid = this.db.db.lastInsertRowid(); + const changes = this.db.db.totalChanges() === totalChangesBefore ? 0 : this.db.db.changes(); + + return { changes, lastInsertRowid }; + } finally { + this.db.execLock.release(); } - - const lastInsertRowid = this.db.db.lastInsertRowid(); - const changes = this.db.db.totalChanges() === totalChangesBefore ? 0 : this.db.db.changes(); - - return { changes, lastInsertRowid }; } /** @@ -332,18 +348,23 @@ class Statement { this.stmt.reset(); bindParams(this.stmt, bindParameters); - while (true) { - const stepResult = this.stmt.stepSync(); - if (stepResult === STEP_IO) { - await this.db.db.ioLoopAsync(); - continue; - } - if (stepResult === STEP_DONE) { - return undefined; - } - if (stepResult === STEP_ROW) { - return this.stmt.row(); + await this.db.execLock.acquire(); + try { + while (true) { + const stepResult = await this.stmt.stepSync(); + if (stepResult === STEP_IO) { + await this.db.db.ioLoopAsync(); + continue; + } + if (stepResult === STEP_DONE) { + return undefined; + } + if (stepResult === STEP_ROW) { + return this.stmt.row(); + } } + } finally { + } } @@ -356,18 +377,23 @@ class Statement { this.stmt.reset(); bindParams(this.stmt, bindParameters); - while (true) { - const stepResult = this.stmt.stepSync(); - if (stepResult === STEP_IO) { - await this.db.db.ioLoopAsync(); - continue; - } - if (stepResult === STEP_DONE) { - break; - } - if (stepResult === STEP_ROW) { - yield this.stmt.row(); + await this.db.execLock.acquire(); + try { + while (true) { + const stepResult = await this.stmt.stepSync(); + if (stepResult === STEP_IO) { + await this.db.db.ioLoopAsync(); + continue; + } + if (stepResult === STEP_DONE) { + break; + } + if (stepResult === STEP_ROW) { + yield this.stmt.row(); + } } + } finally { + this.db.execLock.release(); } } @@ -381,20 +407,26 @@ class Statement { bindParams(this.stmt, bindParameters); const rows: any[] = []; - while (true) { - const stepResult = this.stmt.stepSync(); - if (stepResult === STEP_IO) { - await this.db.db.ioLoopAsync(); - continue; - } - if (stepResult === STEP_DONE) { - break; - } - if (stepResult === STEP_ROW) { - rows.push(this.stmt.row()); + await this.db.execLock.acquire(); + try { + while (true) { + const stepResult = await this.stmt.stepSync(); + if (stepResult === STEP_IO) { + await this.db.db.ioLoopAsync(); + continue; + } + if (stepResult === STEP_DONE) { + break; + } + if (stepResult === STEP_ROW) { + rows.push(this.stmt.row()); + } } + return rows; + } + finally { + this.db.execLock.release(); } - return rows; } /** diff --git a/bindings/javascript/packages/native/package.json b/bindings/javascript/packages/native/package.json index 81b8e092c..56888f616 100644 --- a/bindings/javascript/packages/native/package.json +++ b/bindings/javascript/packages/native/package.json @@ -22,6 +22,9 @@ "devDependencies": { "@napi-rs/cli": "^3.1.5", "@types/node": "^24.3.1", + "better-sqlite3": "^12.2.0", + "drizzle-kit": "^0.31.4", + "drizzle-orm": "^0.44.5", "typescript": "^5.9.2", "vitest": "^3.2.4" }, diff --git a/bindings/javascript/packages/native/promise.test.ts b/bindings/javascript/packages/native/promise.test.ts index d75e3728e..5819c8f39 100644 --- a/bindings/javascript/packages/native/promise.test.ts +++ b/bindings/javascript/packages/native/promise.test.ts @@ -1,6 +1,26 @@ import { unlinkSync } from "node:fs"; import { expect, test } from 'vitest' import { connect } from './promise.js' +import { sql } from 'drizzle-orm'; +import { drizzle } from 'drizzle-orm/better-sqlite3'; + +test('drizzle-orm', async () => { + const path = `test-${(Math.random() * 10000) | 0}.db`; + try { + const conn = await connect(path); + const db = drizzle(conn); + await db.run('CREATE TABLE t(x, y)'); + let tasks = []; + for (let i = 0; i < 1234; i++) { + tasks.push(db.run(sql`INSERT INTO t VALUES (${i}, randomblob(${i} * 5))`)) + } + await Promise.all(tasks); + expect(await db.all("SELECT COUNT(*) as cnt FROM t")).toEqual([{ cnt: 1234 }]) + } finally { + unlinkSync(path); + unlinkSync(`${path}-wal`); + } +}) test('in-memory db', async () => { const db = await connect(":memory:"); diff --git a/bindings/javascript/sync/Cargo.toml b/bindings/javascript/sync/Cargo.toml index 2f3f3d177..00749c5d3 100644 --- a/bindings/javascript/sync/Cargo.toml +++ b/bindings/javascript/sync/Cargo.toml @@ -18,9 +18,10 @@ turso_core = { workspace = true } turso_node = { workspace = true } genawaiter = { version = "0.99.1", default-features = false } tracing-subscriber = { workspace = true } +tracing.workspace = true [build-dependencies] napi-build = "2.2.3" [features] -browser = ["turso_node/browser"] \ No newline at end of file +browser = ["turso_node/browser"] diff --git a/bindings/javascript/sync/packages/browser/a b/bindings/javascript/sync/packages/browser/a new file mode 100755 index 0000000000000000000000000000000000000000..8f5630c076b124960766842e96d6e1c9609bce89 GIT binary patch literal 57344 zcmeI*3z$`P**5T5d$02m5dk^KhzO_*409e+&LVy{2~gz1R2keeZWIuJ}F7hGAy@ zaJLuhd7kanV@6DCsLrmLIp@amhV1;T7FmfzR=@1*tgI|qe0Rlnh2Kp+CJMeu%|G}T zRnF=;|Lcx3KTEfdX1Ra!9a$|M-zHaO~jz;|6Ds>px(` z;Ou`qp4}Zko>rATZt%o$*`vo^aaI4Z*JfWi_}U)X_$@BVH`F#m&zG{M)mK$F&Ye9& z{A;SLd|ty$yj}K>->IzkKYnbwS7wdu)JeMWAD>S{bz=j5LG%Ot^Pf%h@Mk}n9@%(I zd|s1mtxcOYl7BXSyLoe~=gbA#XMP(zl-)flWncL9;AiT~Z>+}G+9SKMNA|*tE^^d1 zm$&({FAu+GS#K2GeLqW&{&eP=O`p!-|NQFaMqk-n`7@sm9{TK8^x5MThgdgr&VTt_ zdSow{R@G3~BfGA8T5Vke{{Whf;i8MIfAx9dgEjpL%vHTJO`oUVP14%*?JN1$;}dO| zH)rn5vbhWEE6Zk-&uu8HtShgttrkc1r9YAQ%op^fzbw0Z<&0_7^$lNq>a*s}m{C?+ zU7vl$$Z>;*4jwDMtmY5g{Ln>X5^9_3zF+o-@TDKPtoN7xR8sso;SbAkbFxfliQ?_{ zzUhr~f8(`v?{jON_nalp_WxGY;&+NqAU=Wk1mY8jPar;l_ypn;h)*Csf%pXC6ZnD? zC^6*Ol7xSyesvM8^jGN(GPA}^<)Ca)-`w4k~$ry{?1O+|I@^78!N zwXF^sdP-ZP~h1muDyb z>0_4W<(8CG6%^!DRn`>d6y#UbLa7BB!9FM0|~vd8Ijpl@;Z=m6esbg}K#PcX!cC)X#sZ`4zn@ zYHA8=a!PWms&Wd7^76&|mK5idR_7HKRTWp)6ckl?TB-c`k6Kw&nV(ynSCLaukz1Wp zP$7=Dq<3|3PHtZB+@iv~O7Z2EX06ssl+S4YmQYkF7a<(20Y7ZpUGaCK=;MP5xoPEmF5+}z^g zlFI7R((2;G7rbL}bx}!PesxZHbxo1@qbbeLDXA>a%PFcTFU`#>%`GS>Egw}PiFZtV z_7A2iH@C2`vUgQZab;COPC;IwIGy~8ik$M^g?Tl-t4hlAi+W2(qo%&;yq@JQb6dLG zoY&neybHZ=yYG6Fy%)aVFKql!d;;+a#3vA+Kzsu63B)H5pFn&9@d?By5TC&RdnZto zoc&M#2>mKI2+YsF+gG_GVSN7GzRFDn{qyhkRc;GtU+`{U;f{d%`FHy&|E5>I;N8B$ zT?F|H-t8;g1xS9uyM2Xw0Exon?0>p7^HuHuIO6ZEUN**K>7Oaz1k2cTPCJ zciwakIQyNSInOyicAjt^aW*>ZoHfq3oLii&FLG3&5(z}jc+v36M7tS#0iYn`>)T4^n} zmRbv}nO3b;W=*h0SwpS9RSlGa+E|{YnV*`c&6DOa^RRiq+-L4FcbMDEE#@Y3 zow?dvX)ZUHnhVUCX02IfPB2HAL(RTsvDwS)W_B{$n4YN_pBkr)lg2UQuyMfHXY4U{ z7~703#wKH(vD#Q^EH{=K3yhgYtx;x7Fh&_ejlM>)(aY#&bTZl)o}uZV>ZkRS`Z4{m zen8)+@6mVY+w?8^CVid0T3@Lz*O%%G^qG3CUZzjbN9jZLzIw6VOYf$4(%a~su4$iY zr?r#XG3~H+K-;J7(ROItv@O~uZJoASTd6JAmTC*MnOdz@rcKaBX+yQXTCvtk>!x+m z+Gw7psh_H+)syNm^{{$C-KXwRcc|OcE$Sw9ow{0GsV-NSsteSaYOPwPPEbdwL)E@& zvD!=Rrgl=>sGh1RpDL%7lgcsWuyR1zr|eO7DBF}R$|hx?n&-QZcA=SZc468 zu1>B@E>A8^E=bNy)+Wo66GVE%Yi6V`jHabT?ZSEiYiHK;S+iL?v7X1;k+lPBd)9WW z=d!kCZNu7{^&Hk#tY@>fWIcoC@#tV37_vkqb%$U1>pJM(~ z%zujcPci=~=0C;!r>pJM(~%)ih4`^>-3{QJzm&;0w$zt8;p z%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm z&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4 z`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$ zzt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3 z{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p z%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%s=DL^fUa-KBLd% zGx*FsW6#tx^vpaX&%`tE%sb=Gv@`6?I-|~{Gw94YW6qQ_E-G%}3LBBRJ8GKkC}W5^UTgv=l# z$OJY8Fq;3DybrR(|Ke})3B)H5pFn&9@d?By5T8JN0`UpNClH@Nd;;+a#3%6o(Fs_| zjz;rXKs5iK@b+YRZ+m;ZKMH^2z2@!n_Qw7Hxc?vb|Kt9Dn)e*jyy=+cUB@(UJEnQx zG0hu~Y2JBE^VVaU_a4){`IzS2$8^)}MNswZfAjLL8 zihY6<8wDwL3Q}wpq}VG+v00E}w;;uKL5lr?6dML9b_`N%8Kl@VNU>> z8wn|P5>jj>q}WSHv6+x!HzCD#LW=!_6dMXDb`(-wnB=1g%leL zDRvf8Y%Rq7fBwB@{^S0C(|N@Gf1U&LANT)x9_%p2{eLk$xwz>}ZfYuvn##ha(%4iM zG?n>HWnNQhXex7?%ABS$yQ$1-Dl?l(eN(xysmy39H#C*$O=VhBscR~=O{J!(R5z8X zrc&8dDw;}pQ<>US;{LzFXKp@m^Ld+3+kDpMlQy5T`IOCPY(8Q0`I=AHe75G3HJ_{b zRLy5RHtPQ;yrWs(QSW{4J?~H6aqoBDo8GUzSH)d`7rf`ao!*bVC%qqt zO@K|_2JgGx8t>cQZDJcB@RoXuym{U%?*_3EP%d6CaDq41yUH6TwgUQirCx#8%ez>- zmSDEm-fQih<=LL*CB$~X8TWnnUH1>}+wNhpA@D1Azx#7{xBIO7wAd1O%zeb&EdKig z_qunBO@Wo}EpA4x5I-0R(m;=dj^(!IhRTu% zwh^yj=!ngMr1OdMSLXxgr1Os09ysE>?!4yw!gfbnX{h1ot?1 zI;)&-IH9x5S>!Z`O@cb7!kO%hcSbwI#V$b~r^Lx~dN^I3Y_Uzy%5fdtN!TCTABuf~ zckH+AH|>M=EB1?GqhP1~l>LPLsJ+?VAa)AwvRB!+*nz#oZnWpvGwd3Bsy)dbXOFap z*#qp$>>@kYzS!kaEQYrpk^waa?O+HO5&J#1~X z)?4>jcUZSsE3A~Y*qU$6vZh;A))Z@^HP#wo4YB%Jms$l@PpiAt+3H}mwpv)GC7Yj^ zXUzA^6XsF#h-Br|JM`Q16?#fvtk2hH>C^Qp zeTqI&AFGehhv@zEOZ5W1r`}!ftas2`>n(Ism$gr{GunIF3GJwML_4Its=ci3)t=Ly z)}GY1Y7c4mYxinvwA-~?wNP88Ez%madaX{Y&?al+wb9yeZIISSE79__9$Hr|TWhDa z(p*i~66(k5hw3Ty9rbPXP4%Gqiu$6uTivNXr9PoPs%};{sB6``)K%&&YM?Gr8`U}L z47Em`s!meJsUy{4>HzgJwMflXFIKy#=c#Sgma46)YL@bm@`3WMa$I>!c|&{6alwkwY*4=Wp$^~ycU9m;LW3MD0;73V9nl<7*9GDVrFj8#S`LzI5v|6L1|o=SJ6 zv(iCnt+Y@~MV3F2&&cn|C*-5@5&4k(s{FFNSAI@@T7FXADnBIOFW)P#k#Co8l|y-% zyhv`4>*YGRLY^#-mq*LPO&@JaYX_yl|$J_dgPx56#( zQTPaa7=9l<1RsQ(;U>5dJ^=5B_rVSDdvHBm2k(X7g=^t=;63nexCX9x8{rIi1Dp=0!8%wAYhX33f|alWmcyyA3{HX9!^!YEI0;@0C&CGE zJiG>ugIB|`a10y`N5PTsDmVgO35UZg;4nB84uON=AUF^Xfc;@V*cbMJm&428*WjhF z6qdkZSOg1U0nCSaus6(wy7LQ*czS#TfwtoOL!J+0X^tK2inkrCN!W9HK;-b$}kBNP=Z;JS)amB;NRiL z@Ne)V_*Zxao`xU7zrYXR`|v&ZXLt&pgzv&X!4vS0@E!OEcpM&sN8#_`+wgbrE%;k_ z1RjQO!r#C*;Op=Z{53oX55U*pui&fjm+%$%3%DQdgD=CE;EV9*@CEoYxEJn$yW#V2 z7yK!F4n7Na!X5A#_!Iav{4snA{s?Y|+u)P%hwusbID8EL0B(g_;G^&n_%QrFdEqxDMV6zYEvG@4$QD-Ea+D4ex??!aLx%;qCBSa1~q$Z-d{2 zx5970Tj1B>3b-7GFaraahAHU7WpXl0S{kt=;^v6O5jRCFidY!Y7_lH?e#E?phKRWl zb0TI(%!-&9Q6F(*#Egg=BBn=7i>QmJji`yJj;M;LjHrkxkC+-!7BMB_`iRL9*F{W< zxHe*9#Ds|P5!XbFi?}*sY{Zy|(GjB}Mn+r}F(Ts1h~W`eL=1}<8ZjhdaKxa9fe`~D z`bYGO=o`@|;_`^gBEA-JX+&v6Nknl(QAA-xK}3E;UPSMR+=yNgJtJ}=dPH0jadE^& z5#1xYMRbk0FrrJu1rePi&X35B=oE2YM8}8@5$z+|MVuSaHlj^L>xgqAT1A{4(K6z! zh!zoEgd5>R*b!EQ8DYqFvZK^|ui(G_Sc%?fz84@z{eQ(W%P9ye?h~_ha|C_$BeT_ypn;h)*Csf%pXC6NpbBK7sfI;uDBZAU=Wk1pY^yKub5- zv3*@*-Js%mjkSF3yOxf4?CRS3-13FPE1Dk@|2l3zCoivZOiul<=EuaqeETuE)zfD6 z9#+)+m?<7BuAg-E#BuW{G(TpD$3_h=DVj5;d`|OYx_E3t`RqZpW9t9&<7(ous%aCd z`;S|c*Zk{L@mT+wno*;N44l{em?9o)sI8kmrzofRf&qmC2Mj0{Hvu}fuUS|$aN$++ z7dHR8Wc2F_2ajl|n?I@fv4nW+h6N1^XICyLZhj0OV_s?LxT106_?Rgc{}u1?Eb&@@ zFL}qjL!#e5(YxE*=zSpGx>7t=?H&3b@dq4#lJN<|ClH@Nd;;+a#3vA+Kzsu63B)H5 zpFn&9@d^BUP9W;5H{W4!qc@swEjZB|&G!`S=#A!^30Cw*^IZfpdZYRFff2pYeBVHi z-e|sIpha)==fC3B=#A!E1nBHH-y1;xzxk#>GWuoBcLNgQIP6jL2hT4YnLkbR|D`p_ zep%ib@06JRKO*J;_KWTRo!)ja4Y1i;FXsJMiHU$^UZa@wuM=|t6TQ)5#=oDK4#@So zi|PKhUJFn6vcz2fDes!^MA9u-d*Faa#x5c{zi9}Tjx#{^ZTRS zVdC}vOT^@UcQ@N@>$VUx`&rHz=ah3?OzR&Mul2vz*(v7qw>q1h_2QNOS2-)he14-d zOHAiabta10{9#T%F`1t$UgbaAY3sBQQ~6o;88MT8+&&^E^7q?&#XSCYd#jknUvIAw zv-m6QWnvP4mR%?2@F&`%#T0%&yF|?3cek^}1bz!!7xVXLtW#q8{)lx@%--*{c8baS zt=48ScfZD3C8q9|S&d@mzRsE|ChkXD!^FIOiIpp+?X#`6V%A=_vc#nQDf76Pvp;C= z7gP2-&Fx~wezUn=OxUk7SBUxgMst>!uAgd76tnfi%zk3BKG*Cn=IYy;EyPrPmT^YR z)E_sFh>80B#$GW`zunj>rs>xkYs4)53S*g=q@QKfi8=a-#%M7`-_Ix!GxXh!Y%xLK z!qCP1{2Bd}n4UkP9~86md-a`Sa(=76Sen3=EBr;3UB(fTkkFJGeP zifQ?5y{(v)*YzwhDSt{kF6QJ9YWu~M{7!AVn33PCtrrvWtF#qjKE6?#C8p!2Y7@n5 z{4lMbn2gWWx{JB^wpt4@6`!S^5i{|})gxjee!sd`%)@V2w~A@__39ci3%^2LCMMx$ zsdZuwexf>BOu_e4OT-L(cQso~z_(C!G5>x>IVGmwk0=Ml?E7A2r?`xD* zV(NXF(kN!$>y)Wt;(fF-Ow7BND7j+VJzHrjX5DoqOH8_-l8=iy_k;3&G3CBf-Y#a` zH_PkAg!?Lag_v(|lxK&?k6VObLH-Gw%k^3A?tEh@=Wqn@_6z{@?dg* za&K~Ha(i-Xa&vNha!qnoaz%1kvN1U;S(lueoR}P)93}$8Z%W8T<&cZYAs3ZHt~-U? zcM7@i6msJ!shQVSUpyk)nT<+Emo7&U=@E!#5X0>SXEYqRc7U)a+u)T5^Gjd z%`9bI!g@0+mlZSIR?Ki+F~fbu3>OwN+*r(TWii8@#SE7gGu&FtaBVTey~PX{7c<;k z%y4xv!`;OUmlreKUd(WPF~j}EOdUT@ZZKwQ_%?SKGhAZKaEmd+HO3707&BaC%y5%2 zGllPeJ?muF>sTkTUduX>bpq>n)@xYDv0lwOmURs4Xx34zBU!Iv9l?4f>u}a9SckC= zWgWsgm~{~AK-K}Q{aO35_GRtE%KT@T{|xh=Vg57Be}?(bF#j3mKg0ZInEwp(pJDzp z%zuXY&oKWP=0C&yXPEyC^Pge8AXP{eNkdmYwB&?46GH|Kt7tc>h1%|Bv_oqpds%`=1i_KPBvcO4$FD zu>UDx|5L*Lr-c1a3HzTC_CF=;e@fW@l(7FPVgFOY{-=cfPYL^<681kO?0-tw|CF%* zDPjLp!v3d({Z9$|pAz;zCG3An*#DHU|0!YrQ^NkIg#Awm`=1i_KPBvcO4$FDu>UDx z|5L*Lr-c1a3HzTC_CF=;e@fW@l(7FPVgFOY{-=cfPYL^<681kO?0-tw|CF%*DPjLp z!v3d({Z9$|pAz;zCG3An*#DHU|0!YrQ^NkIg#Awm`=1i_KPBvcO4$FDu>UDx|5L*L zr-c1a3HzTC_CF=;e@fW@l(7FPVgFOY{-=cfPYL^<681kO?0-tw|CF%*DPjLp!v3d( z{Z9$|pAz;zCG3An*#DHU|0!YrQ^NkIg#Awm`=1i_KPBvcO4$FDu>UDx|5L*Lr?d=z z-(g2PeU6;Y2tAj)&L4aqwz57LI|V;V3u~UIj1sn#4!Xa=l z90Ui#0kA*p2m8W4@N#$={2IIzmckNP42xhPEP(ki5B7$+uovtJb6^j63A`9y1iQm- zuq(U}c7Yeb&hUJg4LiZ}U`N;iwukNDxv(v416#v$U@LewYzfbTEuaTo=s+7<(1Zrm zp$1i`Kp7@s0!lC|GT#5sdKKsJOZW=>1>6t!!I$An@J0A@_yYVH+za=>-SBz13;q;7 z2cLyI;STr={0V#-{un+5e+0L~ZSYC>L-+)I96knr0Jp*|@KN{(d>DQoJ_H|xo8cz7 z5k3I#hxfq^@OyAQTnF!k--T=8ci=tnZny@nhIhd`;T`bX@OJnuxC*X>x5011Tj4k0 zE%57b1zZk8n1KOI!xZ#ky#F8X|3^#v|6BL}#qXy_zo{0XMko<-L^2|gOlBFTlO>&y z)hzoX`vd!3(LsO9e#3rE^w3|hciGQ~F8X8k!}dneN599u!@f;)(o^l4vcf6qE$9Tk1`L)NR-%c8UXob|Nz zr0A_bWZiGwE4u5qTen)F=&vub8mxNJVXv?zTjNEKeYiEq>La@Bc~%drtLU@0vsziM z=(H!ykIfH7ul*hKZSzghZGXjl(cCTi?N6Ccn2(B%`v!BZd6($9-(m*l647;^W6m&Z zMBjaqInEp@I`0F_%giFtd%xK1VxA|u?=4N+R7L;&BjW?(UD1Jm%Xq_hP4wVjFm@Tw zh%WqN#>2)&(TBgsxWl+jbmCLSVq?DO#ZNb?j47fUKh_vw3=#eKON|1fr|8IcHaZxs zMNi%|Wc?G-m48n^p&u1}`9u1v`pf!W{W<+<{YlZAe@MSyzgKkUZ`W_tL(!jKq&MjG zqC;PyPu9na9{q5Aklsgh>GSj+dRNh>Z>P7?UD2seXdi1IieCLY+S}TjqFeup_M*01 z^y{C}p3oi@9s3R1TJ0{;v%f_Pv?ZczKS!IP)rh|RByF5FQgrSIXqRb4qIZ9>)bs(Y|CaiO`kLtBzo70?pAlXB$JB?_jiQf#k9voCo9N`H)Wzz2 z(aWE%R;g1&H-D@;LLDOd`Io8%YERM8@2qxETZ^8)smjVHqO1R&azZ&O`uc~ISCyAV zXa70nY2``L+kZ&8U%6Lw_itBjRYK9=;W(euAW4&)`G z>pw@HA=ik$|0H>wJW_Q22gsMnMWXkAvD`&IPjvrV%C@X}ABlaJcg6h6TizSuzkjh` zyz=EP?-?-z^O$(u%Z*|R<{s}3@v4_Ayp*@tn=d9|rh8T16fp}kR=iHe5HSsNsd&B1 zo?;%Rv)4hq+GPvR^knxFF%$Eid%`^`reY4cuevXbxtQm~>s&r5CSx9Q?-#FfxyHTS zz10oHbj%{R!L1kbF%|A)cf6R887^MmvX7XN$#Z*%SGUY|+qtb=SIo&IoR6Ik#iYzT z&fCtLVpirA@w%3~#k9;*&J*HQEjK$GoVCtfVq)eNCvcXCnVC7_b^A_4&oPNV!gt^w z;Bj~?GWwpxRbCQTc}ZO5C2^IPwDGeRG@$`?s6kxiC2^IP#8qArS9!^%-x*hVNnGV6 zK8=2T;uH9H_%Zw&{0ROPo`I*~hwv}(1Nc6C5B?dRf+yj-@K5k7eEo@|_?5qhZ^Pfg zx8QH#5qKEB34a6MfUmHm%vqC0#|tnT;(Njm6yO( zUIJHn30&nRaFv(9RbB#Dd5Nv~C|ls8@Dcbh{62gLJ_t9%O>iT80NxMpgB#%YAg=Nf zxXMf5DldVnyacZD61d7s;3_YHtGooR@)EepOW-Omfvda(uJRJN%1huXFM+GP#43E0 zmGCzBO?WH(2D}A+9j<`OVF)uYfN7Y5K3oQu!X@x#xES697r}+F5iWr9;XK#?=fXK~ zHk<`#!g_ckoB?lu)8RB&2Ww#stcF#v5>~)+I2D$`De!tY8D0k`!E50}I024_*T8Y` zYB&~-furFlI1*k3N5CuLaCikA28Y5Sa4;MM2f_icKkNtl!aneFcp3Z}ycCwg5?Bn2 zU?D7k`7jUmhPkj8>`xE6i~-UIK3Yv5{l7rYbR0ly7z7bAgl{{44(lQ%~nJ-Ha( z1Q)@Dun{hR^Wi+$0O!Iva5kI;XTo}TBb)(mfYaeLSO;ri4XlP$uo70layS*1!71>1 zI2m3CC&6psL^uJChu6Sy@M<^~j)9}$C^!;c1xLUu;c$2b90rHNA#gAp1P8(aus`ev z`@%l(a(Eg18oU&i!V*{vi(nxvfcY>F_J+Bz7widhU=Mf+yck{tyTfjFTA@>(T?k|MgUkJIs5ORMZ{r- z$o++o`wJoW7eekYgxp^UxxWx{e<9@lLdgAvkoyZE_ZLF$FNEA*2)Vxya(^M@{zAz8 zg^>FTA@>(T?k|MgUkJIs5ORMZ{r-$o++o X`wJoW7eekYgxp^UxxWxL?JxWX%{~pb literal 0 HcmV?d00001 diff --git a/bindings/javascript/sync/packages/browser/a-shm b/bindings/javascript/sync/packages/browser/a-shm new file mode 100755 index 0000000000000000000000000000000000000000..7c251555d016a9c5a07e151b6fff76239b1cde71 GIT binary patch literal 32768 zcmeI41F$8_viB=dS=D3Pwr$(CZQHhO+qP}nw#_#yzS|KKJ7(@XbIzUb&b;Z4=;-R~ zLjHf5RjYUJwK~kTgv#AOsUTS#r2>61yv$Rud%2o*lRvyrB0vEwGXA*J^g!-v>6?Z5 z!TN6BzJ5P{wS8^(mA{Ow{D0Z?m)-oUY=2VO?^wQZ@XJpB8qas`tK+kM%cpN$bNlf5 z{;l?Y+x&0ieXIVD^!+E+^~K87{WHpbW8#kGi=|t3>$vA`{(R5=t+sBPpP}LZQlBnU zmzT@@TTI<+Zrxv*gR9H0&woYNkF@Y*#m*KC-?niue_4sY`EnR=7?vHZ* z8TI{Y`**SXT{V7>+uzaty}o`&`;X{%^TN%6-&wC;E#I2Ye(U^>djC^CyLsZ5iO22# z{oV1O;{3bx^Hp%*?zU0_xa=M^XvP+%O&~Jj{l|USJw7D-(1aZoLoIF1D|%EKA+vS zxozBYx2=2bJN~LM{*`_E9@lT>pUe7d_Wy0>-sy=fv0J{0TN+Onf?hy1zBgUE9~! zf0gnVm%Ys+;1Tc$cmzBG9s!SlN5CWC5%36j1Uv#B0gr%3z$4%h@CbMWJOUm8kAO$O zBj6G62zUhkHUUr(R7{mXrBrEER+Uo~P(@T}RbEw7wNyjZRJBnZRCm=|4N~*eaASFlXo(D#WFEA+O>bx~6WT2kBXQsa~x&X@3$@!b>zsDVb!5jF)|KRI~|U>Y3)| zym@KJMz)P?JG;!Tw_&~6^;=}uukPeA5fi}o#dUA<2>fvb_{(nx{i%Q|hzhPksjw=t zimu|SBr3Vetn#Uns*0+wTB)vTfEunwsj+H;nyjX&nQD$&q?W3cYQ5T`cB_5rpgN+C zt5fQ%x}YwrYwD)DqwcFm>Zy96UaNQNOn(j0?nZnw1p1P8M;AF=mY&>5DbOUFb*cc444Z` zU^%RUwXgxU!A>{==iwUMggbB_9>G(10k7d5d<2Dp4u-(+7zv|d42+HOFd-(vDL5Tx z;apsRi*XsQ#5K4cH{%Z6g9q^l9>-I77BApsyoNXN4&KK{_!M8@YkY?vQ4x`lLjfrm zg`)5jiK0>ricRq-Atj;Yl#0?)2Fgs?C@1Bi{8WgFQVA+e<)|W6q3TqN>QV!0OwFhz zwW0RZiMmn`>P`J#_IG@40sXg)2XrL=-p(>mHnTWCA&qP=v04%0C@NoVLh zU7~Aro9@#SdQPwCEq$QRgsj=Je-6Y!IRuC1a2%1NaCDBvaXA4e=470b({Osu#9289 z=jQxegiCN4F2@zQ3RmY^T$dYgV{XPRxed4HPTZAyaBuF%19=D!=TSVCC-7vR#xr>y zFXrXEn%D70-oo2?7w_c*e3*~%Nj}5p`4V5{8+@DZ@k4&X&-oR<aKdA9;;{SrFx^@t4|7m!GJ#q2*Du~goDTs4PrtZh!2S%DWrhZ zkPb3J7RU~{p#T(tQcxbMK~1Ow^`Q|og%;2n+CfL?0^Ok(^o0R17>2Bj@SizVP71G!*C>y!SOf=r{WBp zjq`9JF2Uuv3fJNW+=4rCFCN09cmhx3IlPEh@H*bYyZ8Vf<1>7TZ}2^SLLeqX{wNRy zr_dCEB2zSqNpUDXC8DI1f>KjD%1Bu#JLRIhRDcRoF)B%As617os#Jq&Q$1=(O{h7w zqPEn5I#W05Nqwk44WeN*n#R)yp4DA9^TJ~_$Z&?(|nFE@)f?$xA-nU;K%%oU-BD%&z~4HYoq7+V^POa1Fj5@QoIzq z9CJkJ-ln(foqCtvqxb6l`hY&959_1)m_DIT z>eKp+KBv#?i~5qjqOa=f`i8!xZ|l4Io_?So>c{$tex{%6m->}{qu=WH`h)(YKWh*a z7A^iIfCQ4j5=??i7zry8C6dIDm=as!NIZ!z2_=yvk))DJQcGG%Cs`z`WS1P0OL9wI z$tMM*pcIxOQcQ|VNhu{|q^y*e3Q|cbOI4{RHKeB0mO4^T>PtguBu%8LG?y0AN?J=> zX(t_|qjZ)o(oMQcPw6Fnq_6as0WwsE$p{%KV`QvMkcl!$rpQ#8E;D47%$B(_PZr2R zSu9IrnJkx;vP#y-T3Ig}WRq-`t+Gva$Zpvq2jrlfl+$uX&dGVXD3|1lT$Sr`LvG1! zxhwbNfjpGQ@ zx|nXJyXk3qnLehk>2C&@L1wTSYKEB+X0#b=CYVWPs+n$PnK@>@S!kA+WoD&WZPu9$ zX0zF9c9>meui0-7nIq=7IcYAMOXixnZf=`9=7D)=o|)(7z4>621>Lm@RHg+ETWREo;l$3bvB1Y^&O8wuY@~Yuoy^fo*PE*w(g< zZEri+j<%ESV!PVzwukLyd)vOYpB-Qa+QD{+9cG8yk#>|FW5?R@c7mN`C)=rZnw?>1 z+SzuFooDCU#de8ZVOQEUcCFoDH`>j1i`{0o+nsio-DCIK{q}%8WDnb;_Lx0oPuuhM zg1us|+MD*4y=U*+$M%VRVPD#}_MQD?KU;Fxv5w;ebOJdcoKQ}9CxR2niR?smqB${~ zm`-dbjuX#`?<8~*IZ2$PPI4!Olgdf$q;=9c8JvtxX2ZCfO&Z|r6s=A?Wt9$C9dZM1ISL&_$pgt=E4Ho=E z5C{(uAqqr?SP&NyKw?M+DIpD{hfI(a@<3524V9n@REJtn7aBlgXa+5z4YY?&&=q<> zZ|DaDVF-+bDKHBb!eUqkD`5?+hb^!d4#EjI1DD|{+<@C~4<5o3cn+`NEqs8_z!(sN zVi*jE5itrz$5Z{ zV<+s6gK!8A$5A*IC*Wk9hBI*v&c{W#6j$JCT!&k6A0EKNcnnYC89a}d@G9QG+jtKj z;uCz1ukbB?z|V-J$x>hnL*XbQMWN^vi{erON=(TpC8eSCl!>xZ4$4jWs2~-g;#7*t zQU$6^)u<-bq59N_no7b)oLmi~7<48cq{v5>2HUG@ItpLRv!0X%(%d4YZlI z(N5Y!`{@uJr4w|T&e27>K@aIMJ)@WOhThXB0%kVsj{|ZL4$h%CEJxtT9F1dg9FEV4 zI4P&#)SQkpau&|cxws&g;__UHt8xvl&Gon;H{s^oiraDr?#$h|C->q0Jcx(#2p-Mj zcp^{X={$=U@Je38>vkKM^>7& zXh#Rofpst)QisvubtD~C$I!8LJe^P{(aCixomOYi*>w%wShv(|bbH-NXVbZKUR^*J z*2Q#5T}GGJm2_?0Rrk=n^#DCk57EQ*C_PqB(3AB{Jx4FmtMo>_RqxQd^*()2AJND4 zDScL7(3kZ!eN*4j_w^(FRKL)#^*jAhDPnmdXlQ zE$d{XY?1Av@~r@d(+8uH9bsk)6Wbv zL(Fh9%8W7N%|tWBOfxghY%|X+FpJGnv%;)0Yt4GI$!sy(%}%q&>@x?=VROu!FlWp; zbHQ9TSIrG`%iJ~h%_H-|yfJ94^=|{(AU2o{X+ztHHj<5MW7ya>o=s?z*yJ{qO=~mQ z%r=|NY4h0pwva7qOW4x3oULf9*y^^HZD^a=wzjkFW_#K`w!a-@huRT#v>j(B+9`Ir zon`0R1$L=jXE)icc8A?<_t}H?h&^sk+B5c?y=X7nYxah{ZSUF#_K|&RpW9dVjeT!F zTIHal9pm_O0yu%4piXEfjQ7d+JAUkWKL7Cqvglg6pPsExo7X11ZEQn2;hcz06eqe9 z%ZckGa1uMooRm%)C%u!&o8y0cws-(M0v-X6fJeY1;1Tc$cmzBG9s!SlN5CWC5%36j N1Uv#Bf&VK6{s;Wz7XknP literal 0 HcmV?d00001 diff --git a/bindings/javascript/sync/packages/browser/package.json b/bindings/javascript/sync/packages/browser/package.json index a7d33c2ce..efddc0a70 100644 --- a/bindings/javascript/sync/packages/browser/package.json +++ b/bindings/javascript/sync/packages/browser/package.json @@ -42,7 +42,7 @@ "tsc-build": "npm exec tsc && cp sync.wasm32-wasi.wasm ./dist/sync.wasm32-wasi.wasm && WASM_FILE=sync.wasm32-wasi.wasm JS_FILE=./dist/wasm-inline.js node ../../../scripts/inline-wasm-base64.js && npm run bundle", "bundle": "vite build", "build": "npm run napi-build && npm run tsc-build", - "test": "VITE_TURSO_DB_URL=http://b--a--a.localhost:10000 CI=1 vitest --browser=chromium --run && VITE_TURSO_DB_URL=http://b--a--a.localhost:10000 CI=1 vitest --browser=firefox --run" + "test": "VITE_TURSO_DB_URL=http://c--a--a.localhost:10000 CI=1 vitest --testTimeout 30000 --browser=chromium --run && VITE_TURSO_DB_URL=http://c--a--a.localhost:10000 CI=1 vitest --testTimeout 30000 --browser=firefox --run" }, "napi": { "binaryName": "sync", diff --git a/bindings/javascript/sync/packages/browser/promise.test.ts b/bindings/javascript/sync/packages/browser/promise.test.ts index e30163af0..fb60ff61b 100644 --- a/bindings/javascript/sync/packages/browser/promise.test.ts +++ b/bindings/javascript/sync/packages/browser/promise.test.ts @@ -260,6 +260,105 @@ test('persistence-pull-push', async () => { expect(rows2.sort(localeCompare)).toEqual(expected.sort(localeCompare)) }) +test('pull-push-concurrent', async () => { + { + const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, longPollTimeoutMs: 5000 }); + await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)"); + await db.exec("DELETE FROM q"); + await db.push(); + await db.close(); + } + let pullResolve = null; + const pullFinish = new Promise(resolve => pullResolve = resolve); + let pushResolve = null; + const pushFinish = new Promise(resolve => pushResolve = resolve); + let stopPull = false; + let stopPush = false; + const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL }); + let pull = async () => { + try { + await db.pull(); + } catch (e) { + console.error('pull', e); + } finally { + if (!stopPull) { + setTimeout(pull, 0); + } else { + pullResolve() + } + } + } + let push = async () => { + try { + if ((await db.stats()).operations > 0) { + await db.push(); + } + } catch (e) { + console.error('push', e); + } finally { + if (!stopPush) { + setTimeout(push, 0); + } else { + pushResolve(); + } + } + } + setTimeout(pull, 0); + setTimeout(push, 0); + for (let i = 0; i < 1000; i++) { + await db.exec(`INSERT INTO q VALUES ('k${i}', 'v${i}')`); + } + await new Promise(resolve => setTimeout(resolve, 1000)); + stopPush = true; + await pushFinish; + stopPull = true; + await pullFinish; + console.info(await db.stats()); +}) + +test('concurrent-updates', async () => { + { + const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, longPollTimeoutMs: 5000 }); + await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)"); + await db.exec("DELETE FROM q"); + await db.push(); + await db.close(); + } + const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL }); + const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL }); + async function pull(db) { + try { + await db.pull(); + } catch (e) { + // ignore + } finally { + setTimeout(async () => await pull(db), 0); + } + } + async function push(db) { + try { + await db.push(); + } catch (e) { + // ignore + } finally { + setTimeout(async () => await push(db), 0); + } + } + setTimeout(async () => await pull(db1), 0) + setTimeout(async () => await pull(db2), 0) + setTimeout(async () => await push(db1), 0) + setTimeout(async () => await push(db2), 0) + for (let i = 0; i < 1000; i++) { + try { + await db1.exec(`INSERT INTO q VALUES ('1', 0) ON CONFLICT DO UPDATE SET y = randomblob(128)`); + await db2.exec(`INSERT INTO q VALUES ('2', 0) ON CONFLICT DO UPDATE SET y = randomblob(128)`); + } catch (e) { + // ignore + } + await new Promise(resolve => setTimeout(resolve, 1)); + } +}) + test('transform', async () => { { const db = await connect({ diff --git a/bindings/javascript/sync/packages/browser/promise.ts b/bindings/javascript/sync/packages/browser/promise.ts index 3f43b81b6..b12680598 100644 --- a/bindings/javascript/sync/packages/browser/promise.ts +++ b/bindings/javascript/sync/packages/browser/promise.ts @@ -1,6 +1,6 @@ import { registerFileAtWorker, unregisterFileAtWorker } from "@tursodatabase/database-browser-common" import { DatabasePromise, DatabaseOpts, NativeDatabase } from "@tursodatabase/database-common" -import { ProtocolIo, run, SyncOpts, RunOpts, memoryIO, SyncEngineStats } from "@tursodatabase/sync-common"; +import { ProtocolIo, run, SyncOpts, RunOpts, memoryIO, SyncEngineStats, SyncEngineGuards } from "@tursodatabase/sync-common"; let BrowserIo: ProtocolIo = { async read(path: string): Promise { @@ -24,6 +24,7 @@ class Database extends DatabasePromise { io: ProtocolIo; worker: Worker | null; fsPath: string | null; + guards: SyncEngineGuards; constructor(db: NativeDatabase, io: ProtocolIo, worker: Worker | null, runOpts: RunOpts, engine: any, fsPath: string | null, opts: DatabaseOpts = {}) { super(db, opts) this.io = io; @@ -31,18 +32,21 @@ class Database extends DatabasePromise { this.runOpts = runOpts; this.engine = engine; this.fsPath = fsPath; + this.guards = new SyncEngineGuards(); } async sync() { - await run(this.runOpts, this.io, this.engine, this.engine.sync()); + await this.push(); + await this.pull(); } async pull() { - await run(this.runOpts, this.io, this.engine, this.engine.pull()); + const changes = await this.guards.wait(async () => await run(this.runOpts, this.io, this.engine, this.engine.wait())); + await this.guards.apply(async () => await run(this.runOpts, this.io, this.engine, this.engine.apply(changes))); } async push() { - await run(this.runOpts, this.io, this.engine, this.engine.push()); + await this.guards.push(async () => await run(this.runOpts, this.io, this.engine, this.engine.push())); } async checkpoint() { - await run(this.runOpts, this.io, this.engine, this.engine.checkpoint()); + await this.guards.checkpoint(async () => await run(this.runOpts, this.io, this.engine, this.engine.checkpoint())); } async stats(): Promise { return (await run(this.runOpts, this.io, this.engine, this.engine.stats())); @@ -76,7 +80,8 @@ async function connect(opts: SyncOpts, connect: (any) => any, init: () => Promis tablesIgnore: opts.tablesIgnore, useTransform: opts.transform != null, tracing: opts.tracing, - protocolVersion: 1 + protocolVersion: 1, + longPollTimeoutMs: opts.longPollTimeoutMs }); const runOpts: RunOpts = { url: opts.url, diff --git a/bindings/javascript/sync/packages/common/index.ts b/bindings/javascript/sync/packages/common/index.ts index 822a8c24f..7e9af0bea 100644 --- a/bindings/javascript/sync/packages/common/index.ts +++ b/bindings/javascript/sync/packages/common/index.ts @@ -1,5 +1,5 @@ -import { run, memoryIO } from "./run.js" +import { run, memoryIO, SyncEngineGuards } from "./run.js" import { SyncOpts, ProtocolIo, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult, SyncEngineStats } from "./types.js" -export { run, memoryIO, } +export { run, memoryIO, SyncEngineGuards } export type { SyncOpts, ProtocolIo, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult, SyncEngineStats } \ No newline at end of file diff --git a/bindings/javascript/sync/packages/common/package.json b/bindings/javascript/sync/packages/common/package.json index 44e04bd38..dc962ad31 100644 --- a/bindings/javascript/sync/packages/common/package.json +++ b/bindings/javascript/sync/packages/common/package.json @@ -21,5 +21,8 @@ "tsc-build": "npm exec tsc", "build": "npm run tsc-build", "test": "echo 'no tests'" + }, + "dependencies": { + "@tursodatabase/database-common": "^0.2.0-pre.3" } } diff --git a/bindings/javascript/sync/packages/common/run.ts b/bindings/javascript/sync/packages/common/run.ts index f26333d4b..f80606db8 100644 --- a/bindings/javascript/sync/packages/common/run.ts +++ b/bindings/javascript/sync/packages/common/run.ts @@ -1,6 +1,7 @@ "use strict"; import { GeneratorResponse, ProtocolIo, RunOpts } from "./types.js"; +import { AsyncLock } from "@tursodatabase/database-common"; const GENERATOR_RESUME_IO = 0; const GENERATOR_RESUME_DONE = 1; @@ -114,6 +115,10 @@ export async function run(opts: RunOpts, io: ProtocolIo, engine: any, generator: if (type == 'SyncEngineStats') { return rest; } + if (type == 'SyncEngineChanges') { + //@ts-ignore + return rest.changes; + } for (let request = engine.protocolIo(); request != null; request = engine.protocolIo()) { tasks.push(trackPromise(process(opts, io, request))); } @@ -124,4 +129,67 @@ export async function run(opts: RunOpts, io: ProtocolIo, engine: any, generator: tasks = tasks.filter(t => !t.finished); } return generator.take(); +} + + + +export class SyncEngineGuards { + waitLock: AsyncLock; + pushLock: AsyncLock; + pullLock: AsyncLock; + checkpointLock: AsyncLock; + constructor() { + this.waitLock = new AsyncLock(); + this.pushLock = new AsyncLock(); + this.pullLock = new AsyncLock(); + this.checkpointLock = new AsyncLock(); + } + async wait(f: () => Promise): Promise { + try { + await this.waitLock.acquire(); + return await f(); + } finally { + this.waitLock.release(); + } + } + async push(f: () => Promise) { + try { + await this.pushLock.acquire(); + await this.pullLock.acquire(); + await this.checkpointLock.acquire(); + return await f(); + } finally { + this.pushLock.release(); + this.pullLock.release(); + this.checkpointLock.release(); + } + } + async apply(f: () => Promise) { + try { + await this.waitLock.acquire(); + await this.pushLock.acquire(); + await this.pullLock.acquire(); + await this.checkpointLock.acquire(); + return await f(); + } finally { + this.waitLock.release(); + this.pushLock.release(); + this.pullLock.release(); + this.checkpointLock.release(); + } + } + async checkpoint(f: () => Promise) { + try { + await this.waitLock.acquire(); + await this.pushLock.acquire(); + await this.pullLock.acquire(); + await this.checkpointLock.acquire(); + return await f(); + } finally { + this.waitLock.release(); + this.pushLock.release(); + this.pullLock.release(); + this.checkpointLock.release(); + } + } } \ No newline at end of file diff --git a/bindings/javascript/sync/packages/common/tsconfig.json b/bindings/javascript/sync/packages/common/tsconfig.json index 9bc14edd3..2e4046e8a 100644 --- a/bindings/javascript/sync/packages/common/tsconfig.json +++ b/bindings/javascript/sync/packages/common/tsconfig.json @@ -3,8 +3,9 @@ "skipLibCheck": true, "declaration": true, "declarationMap": true, - "module": "esnext", + "module": "nodenext", "target": "esnext", + "moduleResolution": "nodenext", "outDir": "dist/", "lib": [ "es2020", diff --git a/bindings/javascript/sync/packages/common/types.ts b/bindings/javascript/sync/packages/common/types.ts index 27006de8d..8391825cf 100644 --- a/bindings/javascript/sync/packages/common/types.ts +++ b/bindings/javascript/sync/packages/common/types.ts @@ -54,4 +54,4 @@ export interface SyncEngineStats { revision: string | null; } -export type GeneratorResponse = { type: 'IO' } | { type: 'Done' } | ({ type: 'SyncEngineStats' } & SyncEngineStats) \ No newline at end of file +export type GeneratorResponse = { type: 'IO' } | { type: 'Done' } | ({ type: 'SyncEngineStats' } & SyncEngineStats) | { type: 'SyncEngineChanges', changes: any } \ No newline at end of file diff --git a/bindings/javascript/sync/packages/native/index.d.ts b/bindings/javascript/sync/packages/native/index.d.ts index 02ff2c385..4d1b45fa9 100644 --- a/bindings/javascript/sync/packages/native/index.d.ts +++ b/bindings/javascript/sync/packages/native/index.d.ts @@ -174,15 +174,19 @@ export declare class SyncEngine { /** Runs the I/O loop asynchronously, returning a Promise. */ ioLoopAsync(): Promise protocolIo(): JsProtocolRequestBytes | null - sync(): GeneratorHolder push(): GeneratorHolder stats(): GeneratorHolder - pull(): GeneratorHolder + wait(): GeneratorHolder + apply(changes: SyncEngineChanges): GeneratorHolder checkpoint(): GeneratorHolder open(): Database close(): void } +export declare class SyncEngineChanges { + +} + export declare const enum DatabaseChangeTypeJs { Insert = 0, Update = 1, @@ -217,6 +221,7 @@ export type GeneratorResponse = | { type: 'IO' } | { type: 'Done' } | { type: 'SyncEngineStats', operations: number, mainWal: number, revertWal: number, lastPullUnixTime: number, lastPushUnixTime?: number, revision?: string } + | { type: 'SyncEngineChanges', changes: SyncEngineChanges } export type JsProtocolRequest = | { type: 'Http', method: string, path: string, body?: Array, headers: Array<[string, string]> } diff --git a/bindings/javascript/sync/packages/native/index.js b/bindings/javascript/sync/packages/native/index.js index 53bff489f..12e351d61 100644 --- a/bindings/javascript/sync/packages/native/index.js +++ b/bindings/javascript/sync/packages/native/index.js @@ -508,7 +508,7 @@ if (!nativeBinding) { throw new Error(`Failed to load native binding`) } -const { Database, Opfs, OpfsFile, Statement, initThreadPool, GeneratorHolder, JsDataCompletion, JsProtocolIo, JsProtocolRequestBytes, SyncEngine, DatabaseChangeTypeJs, SyncEngineProtocolVersion } = nativeBinding +const { Database, Statement, GeneratorHolder, JsDataCompletion, JsProtocolIo, JsProtocolRequestBytes, SyncEngine, SyncEngineChanges, DatabaseChangeTypeJs, SyncEngineProtocolVersion } = nativeBinding export { Database } export { Opfs } export { OpfsFile } @@ -519,5 +519,6 @@ export { JsDataCompletion } export { JsProtocolIo } export { JsProtocolRequestBytes } export { SyncEngine } +export { SyncEngineChanges } export { DatabaseChangeTypeJs } export { SyncEngineProtocolVersion } diff --git a/bindings/javascript/sync/packages/native/log b/bindings/javascript/sync/packages/native/log new file mode 100644 index 000000000..b6989da2e --- /dev/null +++ b/bindings/javascript/sync/packages/native/log @@ -0,0 +1,14 @@ + +> @tursodatabase/sync@0.2.0-pre.3 test +> VITE_TURSO_DB_URL=http://c--a--a.localhost:10000 vitest --run -t update + + + RUN v3.2.4 /home/sivukhin/turso/limbo/bindings/javascript/sync/packages/native + + ✓ promise.test.ts (14 tests | 13 skipped) 109ms + + Test Files 1 passed (1) + Tests 1 passed | 13 skipped (14) + Start at 16:40:50 + Duration 436ms (transform 99ms, setup 0ms, collect 118ms, tests 109ms, environment 0ms, prepare 57ms) + diff --git a/bindings/javascript/sync/packages/native/package.json b/bindings/javascript/sync/packages/native/package.json index dcdc39fac..c0c01081c 100644 --- a/bindings/javascript/sync/packages/native/package.json +++ b/bindings/javascript/sync/packages/native/package.json @@ -31,7 +31,7 @@ "napi-artifacts": "napi artifacts --output-dir .", "tsc-build": "npm exec tsc", "build": "npm run napi-build && npm run tsc-build", - "test": "VITE_TURSO_DB_URL=http://b--a--a.localhost:10000 vitest --run", + "test": "VITE_TURSO_DB_URL=http://c--a--a.localhost:10000 vitest --run", "prepublishOnly": "npm run napi-dirs && npm run napi-artifacts && napi prepublish -t npm" }, "napi": { diff --git a/bindings/javascript/sync/packages/native/promise.test.ts b/bindings/javascript/sync/packages/native/promise.test.ts index cae58db11..fea253fef 100644 --- a/bindings/javascript/sync/packages/native/promise.test.ts +++ b/bindings/javascript/sync/packages/native/promise.test.ts @@ -160,7 +160,7 @@ test('checkpoint', async () => { await db1.checkpoint(); expect((await db1.stats()).mainWal).toBe(0); let revertWal = (await db1.stats()).revertWal; - expect(revertWal).toBeLessThan(4096 * 1000 / 100); + expect(revertWal).toBeLessThan(4096 * 1000 / 50); for (let i = 0; i < 1000; i++) { await db1.exec(`UPDATE q SET y = 'u${i}' WHERE x = 'k${i}'`); @@ -284,6 +284,119 @@ test('persistence-pull-push', async () => { } }) +test('update', async () => { + { + const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, longPollTimeoutMs: 5000 }); + await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)"); + await db.exec("DELETE FROM q"); + await db.push(); + await db.close(); + } + const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL }); + await db.exec("INSERT INTO q VALUES ('1', '2')") + await db.push(); + await db.exec("INSERT INTO q VALUES ('1', '2') ON CONFLICT DO UPDATE SET y = '3'") + await db.push(); +}) + +test('concurrent-updates', async () => { + { + const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, longPollTimeoutMs: 5000 }); + await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)"); + await db.exec("DELETE FROM q"); + await db.push(); + await db.close(); + } + const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL }); + async function pull(db) { + try { + await db.pull(); + } catch (e) { + // ignore + } finally { + setTimeout(async () => await pull(db), 0); + } + } + async function push(db) { + try { + await db.push(); + } catch (e) { + // ignore + } finally { + setTimeout(async () => await push(db), 0); + } + } + setTimeout(async () => await pull(db1), 0) + setTimeout(async () => await push(db1), 0) + for (let i = 0; i < 1000; i++) { + try { + await Promise.all([ + db1.exec(`INSERT INTO q VALUES ('1', 0) ON CONFLICT DO UPDATE SET y = ${i + 1}`), + db1.exec(`INSERT INTO q VALUES ('2', 0) ON CONFLICT DO UPDATE SET y = ${i + 1}`) + ]); + } catch (e) { + // ignore + } + await new Promise(resolve => setTimeout(resolve, 1)); + } +}) + +test('pull-push-concurrent', async () => { + { + const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, longPollTimeoutMs: 5000 }); + await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)"); + await db.exec("DELETE FROM q"); + await db.push(); + await db.close(); + } + let pullResolve = null; + const pullFinish = new Promise(resolve => pullResolve = resolve); + let pushResolve = null; + const pushFinish = new Promise(resolve => pushResolve = resolve); + let stopPull = false; + let stopPush = false; + const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL }); + let pull = async () => { + try { + await db.pull(); + } catch (e) { + console.error('pull', e); + } finally { + if (!stopPull) { + setTimeout(pull, 0); + } else { + pullResolve() + } + } + } + let push = async () => { + try { + if ((await db.stats()).operations > 0) { + await db.push(); + } + } catch (e) { + console.error('push', e); + } finally { + if (!stopPush) { + setTimeout(push, 0); + } else { + pushResolve(); + } + } + } + setTimeout(pull, 0); + setTimeout(push, 0); + for (let i = 0; i < 1000; i++) { + await db.exec(`INSERT INTO q VALUES ('k${i}', 'v${i}')`); + } + await new Promise(resolve => setTimeout(resolve, 1000)); + stopPush = true; + await pushFinish; + stopPull = true; + await pullFinish; + console.info(await db.stats()); +}) + test('transform', async () => { { const db = await connect({ diff --git a/bindings/javascript/sync/packages/native/promise.ts b/bindings/javascript/sync/packages/native/promise.ts index 2bff31d29..f00656b03 100644 --- a/bindings/javascript/sync/packages/native/promise.ts +++ b/bindings/javascript/sync/packages/native/promise.ts @@ -1,5 +1,5 @@ import { DatabasePromise, DatabaseOpts, NativeDatabase } from "@tursodatabase/database-common" -import { ProtocolIo, run, SyncOpts, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult, SyncEngineStats } from "@tursodatabase/sync-common"; +import { ProtocolIo, run, SyncOpts, RunOpts, DatabaseRowMutation, DatabaseRowStatement, DatabaseRowTransformResult, SyncEngineStats, SyncEngineGuards } from "@tursodatabase/sync-common"; import { Database as NativeDB, SyncEngine } from "#index"; import { promises } from "node:fs"; @@ -43,23 +43,27 @@ class Database extends DatabasePromise { runOpts: RunOpts; engine: any; io: ProtocolIo; + guards: SyncEngineGuards constructor(db: NativeDatabase, io: ProtocolIo, runOpts: RunOpts, engine: any, opts: DatabaseOpts = {}) { super(db, opts) this.runOpts = runOpts; this.engine = engine; this.io = io; + this.guards = new SyncEngineGuards(); } async sync() { - await run(this.runOpts, this.io, this.engine, this.engine.sync()); + await this.push(); + await this.pull(); } async pull() { - await run(this.runOpts, this.io, this.engine, this.engine.pull()); + const changes = await this.guards.wait(async () => await run(this.runOpts, this.io, this.engine, this.engine.wait())); + await this.guards.apply(async () => await run(this.runOpts, this.io, this.engine, this.engine.apply(changes))); } async push() { - await run(this.runOpts, this.io, this.engine, this.engine.push()); + await this.guards.push(async () => await run(this.runOpts, this.io, this.engine, this.engine.push())); } async checkpoint() { - await run(this.runOpts, this.io, this.engine, this.engine.checkpoint()); + await this.guards.checkpoint(async () => await run(this.runOpts, this.io, this.engine, this.engine.checkpoint())); } async stats(): Promise { return (await run(this.runOpts, this.io, this.engine, this.engine.stats())); @@ -83,8 +87,8 @@ async function connect(opts: SyncOpts): Promise { tablesIgnore: opts.tablesIgnore, useTransform: opts.transform != null, tracing: opts.tracing, - longPollTimeoutMs: opts.longPollTimeoutMs, protocolVersion: 1, + longPollTimeoutMs: opts.longPollTimeoutMs, }); const runOpts: RunOpts = { url: opts.url, diff --git a/bindings/javascript/sync/src/generator.rs b/bindings/javascript/sync/src/generator.rs index 2aae4f373..00ae9661e 100644 --- a/bindings/javascript/sync/src/generator.rs +++ b/bindings/javascript/sync/src/generator.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, Mutex}, }; -use turso_sync_engine::types::ProtocolCommand; +use turso_sync_engine::types::{DbChangesStatus, ProtocolCommand}; pub const GENERATOR_RESUME_IO: u32 = 0; pub const GENERATOR_RESUME_DONE: u32 = 1; @@ -35,7 +35,12 @@ impl>> Generator } } -#[napi(discriminant = "type")] +#[napi] +pub struct SyncEngineChanges { + pub(crate) status: Box>, +} + +#[napi(discriminant = "type", object_from_js = false)] pub enum GeneratorResponse { IO, Done, @@ -47,6 +52,9 @@ pub enum GeneratorResponse { last_push_unix_time: Option, revision: Option, }, + SyncEngineChanges { + changes: SyncEngineChanges, + }, } #[napi] diff --git a/bindings/javascript/sync/src/lib.rs b/bindings/javascript/sync/src/lib.rs index c70932081..13427f501 100644 --- a/bindings/javascript/sync/src/lib.rs +++ b/bindings/javascript/sync/src/lib.rs @@ -19,7 +19,7 @@ use turso_sync_engine::{ }; use crate::{ - generator::{GeneratorHolder, GeneratorResponse}, + generator::{GeneratorHolder, GeneratorResponse, SyncEngineChanges}, js_protocol_io::{JsProtocolIo, JsProtocolRequestBytes}, }; @@ -28,38 +28,6 @@ pub struct DatabaseOpts { pub path: String, } -pub struct SyncEngineGuard { - inner: Arc>>>, - wait_lock: Mutex<()>, - push_lock: Mutex<()>, - pull_lock: Mutex<()>, - checkpoint_lock: Mutex<()>, -} - -impl SyncEngineGuard { - fn checkpoint_lock(&self) -> (MutexGuard<'_, ()>, MutexGuard<'_, ()>, MutexGuard<'_, ()>) { - let push = self.push_lock.lock().unwrap(); - let pull = self.pull_lock.lock().unwrap(); - let checkpoint = self.checkpoint_lock.lock().unwrap(); - (push, pull, checkpoint) - } - fn pull_lock(&self) -> (MutexGuard<'_, ()>, MutexGuard<'_, ()>, MutexGuard<'_, ()>) { - let wait = self.wait_lock.lock().unwrap(); - let push = self.push_lock.lock().unwrap(); - let pull = self.pull_lock.lock().unwrap(); - (wait, push, pull) - } - fn push_lock(&self) -> MutexGuard<'_, ()> { - let push = self.push_lock.lock().unwrap(); - push - } - fn wait_lock(&self) -> (MutexGuard<'_, ()>, MutexGuard<'_, ()>) { - let wait = self.wait_lock.lock().unwrap(); - let pull = self.pull_lock.lock().unwrap(); - (wait, pull) - } -} - #[napi] pub struct SyncEngine { path: String, @@ -71,7 +39,7 @@ pub struct SyncEngine { use_transform: bool, io: Option>, protocol: Option>, - sync_engine: Arc, + sync_engine: Arc>>>, opened: Arc>>, } @@ -214,13 +182,7 @@ impl SyncEngine { tables_ignore: opts.tables_ignore.unwrap_or_default(), use_transform: opts.use_transform, #[allow(clippy::arc_with_non_send_sync)] - sync_engine: Arc::new(SyncEngineGuard { - inner: Arc::new(RwLock::new(None)), - wait_lock: Mutex::new(()), - push_lock: Mutex::new(()), - pull_lock: Mutex::new(()), - checkpoint_lock: Mutex::new(()), - }), + sync_engine: Arc::new(RwLock::new(None)), io: Some(io), protocol: Some(Arc::new(JsProtocolIo::default())), #[allow(clippy::arc_with_non_send_sync)] @@ -257,7 +219,7 @@ impl SyncEngine { let connection = initialized.connect_rw(&coro).await?; let db = turso_node::Database::create(None, io.clone(), connection, path); - *sync_engine.inner.write().unwrap() = Some(initialized); + *sync_engine.write().unwrap() = Some(initialized); *opened.lock().unwrap() = Some(db); Ok(()) }); @@ -288,22 +250,10 @@ impl SyncEngine { Ok(self.protocol()?.take_request()) } - #[napi] - pub fn sync(&self) -> GeneratorHolder { - self.run(async move |coro, guard| { - let _lock = guard.pull_lock(); - let sync_engine = try_read(&guard.inner)?; - let sync_engine = try_unwrap(&sync_engine)?; - sync_engine.sync(coro).await?; - Ok(None) - }) - } - #[napi] pub fn push(&self) -> GeneratorHolder { self.run(async move |coro, guard| { - let _lock = guard.push_lock(); - let sync_engine = try_read(&guard.inner)?; + let sync_engine = try_read(&guard)?; let sync_engine = try_unwrap(&sync_engine)?; sync_engine.push_changes_to_remote(coro).await?; Ok(None) @@ -313,7 +263,7 @@ impl SyncEngine { #[napi] pub fn stats(&self) -> GeneratorHolder { self.run(async move |coro, guard| { - let sync_engine = try_read(&guard.inner)?; + let sync_engine = try_read(&guard)?; let sync_engine = try_unwrap(&sync_engine)?; let stats = sync_engine.stats(coro).await?; Ok(Some(GeneratorResponse::SyncEngineStats { @@ -328,16 +278,25 @@ impl SyncEngine { } #[napi] - pub fn pull(&self) -> GeneratorHolder { + pub fn wait(&self) -> GeneratorHolder { self.run(async move |coro, guard| { - let sync_engine = try_read(&guard.inner)?; + let sync_engine = try_read(&guard)?; let sync_engine = try_unwrap(&sync_engine)?; - let changes = { - let _lock = guard.wait_lock(); - sync_engine.wait_changes_from_remote(coro).await? - }; - let _lock = guard.pull_lock(); - sync_engine.apply_changes_from_remote(coro, changes).await?; + Ok(Some(GeneratorResponse::SyncEngineChanges { + changes: SyncEngineChanges { + status: Box::new(Some(sync_engine.wait_changes_from_remote(coro).await?)), + }, + })) + }) + } + + #[napi] + pub fn apply(&self, changes: &mut SyncEngineChanges) -> GeneratorHolder { + let status = changes.status.take().unwrap(); + self.run(async move |coro, guard| { + let sync_engine = try_read(&guard)?; + let sync_engine = try_unwrap(&sync_engine)?; + sync_engine.apply_changes_from_remote(coro, status).await?; Ok(None) }) } @@ -345,8 +304,7 @@ impl SyncEngine { #[napi] pub fn checkpoint(&self) -> GeneratorHolder { self.run(async move |coro, guard| { - let _lock = guard.checkpoint_lock(); - let sync_engine = try_read(&guard.inner)?; + let sync_engine = try_read(&guard)?; let sync_engine = try_unwrap(&sync_engine)?; sync_engine.checkpoint(coro).await?; Ok(None) @@ -367,7 +325,7 @@ impl SyncEngine { #[napi] pub fn close(&mut self) { - let _ = self.sync_engine.inner.write().unwrap().take(); + let _ = self.sync_engine.write().unwrap().take(); let _ = self.opened.lock().unwrap().take().unwrap(); let _ = self.io.take(); let _ = self.protocol.take(); @@ -396,7 +354,7 @@ impl SyncEngine { &self, f: impl AsyncFnOnce( &Coro<()>, - &Arc, + &Arc>>>, ) -> turso_sync_engine::Result> + 'static, ) -> GeneratorHolder { diff --git a/bindings/javascript/yarn.lock b/bindings/javascript/yarn.lock index 5dc592472..f79e485bd 100644 --- a/bindings/javascript/yarn.lock +++ b/bindings/javascript/yarn.lock @@ -30,6 +30,13 @@ __metadata: languageName: node linkType: hard +"@drizzle-team/brocli@npm:^0.10.2": + version: 0.10.2 + resolution: "@drizzle-team/brocli@npm:0.10.2" + checksum: 10c0/3d8b99d680f0b14fea32b45c59b938b6665e0840cc67f04801b1aa3c6747da3c7d01c00e321645034fa100abdba7e0c20ce07cf46fc2ca769ee4cafd97562484 + languageName: node + linkType: hard + "@emnapi/core@npm:^1.4.5": version: 1.4.5 resolution: "@emnapi/core@npm:1.4.5" @@ -58,6 +65,26 @@ __metadata: languageName: node linkType: hard +"@esbuild-kit/core-utils@npm:^3.3.2": + version: 3.3.2 + resolution: "@esbuild-kit/core-utils@npm:3.3.2" + dependencies: + esbuild: "npm:~0.18.20" + source-map-support: "npm:^0.5.21" + checksum: 10c0/d856f5bd720814593f911d781ed7558a3f8ec1a39802f3831d0eea0d1306e0e2dc11b7b2443af621c413ec6557f1f3034a9a4f1472a4cb40e52cd6e3b356aa05 + languageName: node + linkType: hard + +"@esbuild-kit/esm-loader@npm:^2.5.5": + version: 2.6.5 + resolution: "@esbuild-kit/esm-loader@npm:2.6.5" + dependencies: + "@esbuild-kit/core-utils": "npm:^3.3.2" + get-tsconfig: "npm:^4.7.0" + checksum: 10c0/6894b29176eda62bdce0d458d57f32daed5cb8fcff14cb3ddfbc995cfe3e2fa8599f3b0b1af66db446903b30167f57069f27e9cf79a69cf9b41f557115811cde + languageName: node + linkType: hard + "@esbuild/aix-ppc64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/aix-ppc64@npm:0.25.9" @@ -65,6 +92,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/android-arm64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/android-arm64@npm:0.18.20" + conditions: os=android & cpu=arm64 + languageName: node + linkType: hard + "@esbuild/android-arm64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/android-arm64@npm:0.25.9" @@ -72,6 +106,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/android-arm@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/android-arm@npm:0.18.20" + conditions: os=android & cpu=arm + languageName: node + linkType: hard + "@esbuild/android-arm@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/android-arm@npm:0.25.9" @@ -79,6 +120,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/android-x64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/android-x64@npm:0.18.20" + conditions: os=android & cpu=x64 + languageName: node + linkType: hard + "@esbuild/android-x64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/android-x64@npm:0.25.9" @@ -86,6 +134,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/darwin-arm64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/darwin-arm64@npm:0.18.20" + conditions: os=darwin & cpu=arm64 + languageName: node + linkType: hard + "@esbuild/darwin-arm64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/darwin-arm64@npm:0.25.9" @@ -93,6 +148,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/darwin-x64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/darwin-x64@npm:0.18.20" + conditions: os=darwin & cpu=x64 + languageName: node + linkType: hard + "@esbuild/darwin-x64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/darwin-x64@npm:0.25.9" @@ -100,6 +162,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/freebsd-arm64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/freebsd-arm64@npm:0.18.20" + conditions: os=freebsd & cpu=arm64 + languageName: node + linkType: hard + "@esbuild/freebsd-arm64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/freebsd-arm64@npm:0.25.9" @@ -107,6 +176,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/freebsd-x64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/freebsd-x64@npm:0.18.20" + conditions: os=freebsd & cpu=x64 + languageName: node + linkType: hard + "@esbuild/freebsd-x64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/freebsd-x64@npm:0.25.9" @@ -114,6 +190,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-arm64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/linux-arm64@npm:0.18.20" + conditions: os=linux & cpu=arm64 + languageName: node + linkType: hard + "@esbuild/linux-arm64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/linux-arm64@npm:0.25.9" @@ -121,6 +204,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-arm@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/linux-arm@npm:0.18.20" + conditions: os=linux & cpu=arm + languageName: node + linkType: hard + "@esbuild/linux-arm@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/linux-arm@npm:0.25.9" @@ -128,6 +218,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-ia32@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/linux-ia32@npm:0.18.20" + conditions: os=linux & cpu=ia32 + languageName: node + linkType: hard + "@esbuild/linux-ia32@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/linux-ia32@npm:0.25.9" @@ -135,6 +232,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-loong64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/linux-loong64@npm:0.18.20" + conditions: os=linux & cpu=loong64 + languageName: node + linkType: hard + "@esbuild/linux-loong64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/linux-loong64@npm:0.25.9" @@ -142,6 +246,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-mips64el@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/linux-mips64el@npm:0.18.20" + conditions: os=linux & cpu=mips64el + languageName: node + linkType: hard + "@esbuild/linux-mips64el@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/linux-mips64el@npm:0.25.9" @@ -149,6 +260,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-ppc64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/linux-ppc64@npm:0.18.20" + conditions: os=linux & cpu=ppc64 + languageName: node + linkType: hard + "@esbuild/linux-ppc64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/linux-ppc64@npm:0.25.9" @@ -156,6 +274,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-riscv64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/linux-riscv64@npm:0.18.20" + conditions: os=linux & cpu=riscv64 + languageName: node + linkType: hard + "@esbuild/linux-riscv64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/linux-riscv64@npm:0.25.9" @@ -163,6 +288,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-s390x@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/linux-s390x@npm:0.18.20" + conditions: os=linux & cpu=s390x + languageName: node + linkType: hard + "@esbuild/linux-s390x@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/linux-s390x@npm:0.25.9" @@ -170,6 +302,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-x64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/linux-x64@npm:0.18.20" + conditions: os=linux & cpu=x64 + languageName: node + linkType: hard + "@esbuild/linux-x64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/linux-x64@npm:0.25.9" @@ -184,6 +323,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/netbsd-x64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/netbsd-x64@npm:0.18.20" + conditions: os=netbsd & cpu=x64 + languageName: node + linkType: hard + "@esbuild/netbsd-x64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/netbsd-x64@npm:0.25.9" @@ -198,6 +344,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/openbsd-x64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/openbsd-x64@npm:0.18.20" + conditions: os=openbsd & cpu=x64 + languageName: node + linkType: hard + "@esbuild/openbsd-x64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/openbsd-x64@npm:0.25.9" @@ -212,6 +365,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/sunos-x64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/sunos-x64@npm:0.18.20" + conditions: os=sunos & cpu=x64 + languageName: node + linkType: hard + "@esbuild/sunos-x64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/sunos-x64@npm:0.25.9" @@ -219,6 +379,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/win32-arm64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/win32-arm64@npm:0.18.20" + conditions: os=win32 & cpu=arm64 + languageName: node + linkType: hard + "@esbuild/win32-arm64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/win32-arm64@npm:0.25.9" @@ -226,6 +393,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/win32-ia32@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/win32-ia32@npm:0.18.20" + conditions: os=win32 & cpu=ia32 + languageName: node + linkType: hard + "@esbuild/win32-ia32@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/win32-ia32@npm:0.25.9" @@ -233,6 +407,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/win32-x64@npm:0.18.20": + version: 0.18.20 + resolution: "@esbuild/win32-x64@npm:0.18.20" + conditions: os=win32 & cpu=x64 + languageName: node + linkType: hard + "@esbuild/win32-x64@npm:0.25.9": version: 0.25.9 resolution: "@esbuild/win32-x64@npm:0.25.9" @@ -1433,6 +1614,9 @@ __metadata: "@napi-rs/cli": "npm:^3.1.5" "@tursodatabase/database-common": "npm:^0.2.0-pre.3" "@types/node": "npm:^24.3.1" + better-sqlite3: "npm:^12.2.0" + drizzle-kit: "npm:^0.31.4" + drizzle-orm: "npm:^0.44.5" typescript: "npm:^5.9.2" vitest: "npm:^3.2.4" languageName: unknown @@ -1458,6 +1642,7 @@ __metadata: version: 0.0.0-use.local resolution: "@tursodatabase/sync-common@workspace:sync/packages/common" dependencies: + "@tursodatabase/database-common": "npm:^0.2.0-pre.3" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1680,16 +1865,7 @@ __metadata: languageName: node linkType: hard -"ansi-styles@npm:^4.0.0": - version: 4.3.0 - resolution: "ansi-styles@npm:4.3.0" - dependencies: - color-convert: "npm:^2.0.1" - checksum: 10c0/895a23929da416f2bd3de7e9cb4eabd340949328ab85ddd6e484a637d8f6820d485f53933446f5291c3b760cbc488beb8e88573dd0f9c7daf83dccc8fe81b041 - languageName: node - linkType: hard - -"ansi-styles@npm:^5.0.0": +"ansi-styles@npm:^4.0.0, ansi-styles@npm:^5.0.0": version: 5.2.0 resolution: "ansi-styles@npm:5.2.0" checksum: 10c0/9c4ca80eb3c2fb7b33841c210d2f20807f40865d27008d7c3f707b7f95cab7d67462a565e2388ac3285b71cb3d9bb2173de8da37c57692a362885ec34d6e27df @@ -1733,6 +1909,13 @@ __metadata: languageName: node linkType: hard +"base64-js@npm:^1.3.1": + version: 1.5.1 + resolution: "base64-js@npm:1.5.1" + checksum: 10c0/f23823513b63173a001030fae4f2dabe283b99a9d324ade3ad3d148e218134676f1ee8568c877cd79ec1c53158dcf2d2ba527a97c606618928ba99dd930102bf + languageName: node + linkType: hard + "before-after-hook@npm:^4.0.0": version: 4.0.0 resolution: "before-after-hook@npm:4.0.0" @@ -1740,6 +1923,37 @@ __metadata: languageName: node linkType: hard +"better-sqlite3@npm:^12.2.0": + version: 12.2.0 + resolution: "better-sqlite3@npm:12.2.0" + dependencies: + bindings: "npm:^1.5.0" + node-gyp: "npm:latest" + prebuild-install: "npm:^7.1.1" + checksum: 10c0/842247e9bbb775f366ac91f604117112c312497e643bac21648d8b69f479763de0ac049b14b609d6d5ecaee50debcc09a854f682d3dc099a1d933fea92ce68d0 + languageName: node + linkType: hard + +"bindings@npm:^1.5.0": + version: 1.5.0 + resolution: "bindings@npm:1.5.0" + dependencies: + file-uri-to-path: "npm:1.0.0" + checksum: 10c0/3dab2491b4bb24124252a91e656803eac24292473e56554e35bbfe3cc1875332cfa77600c3bac7564049dc95075bf6fcc63a4609920ff2d64d0fe405fcf0d4ba + languageName: node + linkType: hard + +"bl@npm:^4.0.3": + version: 4.1.0 + resolution: "bl@npm:4.1.0" + dependencies: + buffer: "npm:^5.5.0" + inherits: "npm:^2.0.4" + readable-stream: "npm:^3.4.0" + checksum: 10c0/02847e1d2cb089c9dc6958add42e3cdeaf07d13f575973963335ac0fdece563a50ac770ac4c8fa06492d2dd276f6cc3b7f08c7cd9c7a7ad0f8d388b2a28def5f + languageName: node + linkType: hard + "brace-expansion@npm:^2.0.1": version: 2.0.2 resolution: "brace-expansion@npm:2.0.2" @@ -1749,6 +1963,23 @@ __metadata: languageName: node linkType: hard +"buffer-from@npm:^1.0.0": + version: 1.1.2 + resolution: "buffer-from@npm:1.1.2" + checksum: 10c0/124fff9d66d691a86d3b062eff4663fe437a9d9ee4b47b1b9e97f5a5d14f6d5399345db80f796827be7c95e70a8e765dd404b7c3ff3b3324f98e9b0c8826cc34 + languageName: node + linkType: hard + +"buffer@npm:^5.5.0": + version: 5.7.1 + resolution: "buffer@npm:5.7.1" + dependencies: + base64-js: "npm:^1.3.1" + ieee754: "npm:^1.1.13" + checksum: 10c0/27cac81cff434ed2876058d72e7c4789d11ff1120ef32c9de48f59eab58179b66710c488987d295ae89a228f835fc66d088652dffeb8e3ba8659f80eb091d55e + languageName: node + linkType: hard + "cac@npm:^6.7.14": version: 6.7.14 resolution: "cac@npm:6.7.14" @@ -1803,6 +2034,13 @@ __metadata: languageName: node linkType: hard +"chownr@npm:^1.1.1": + version: 1.1.4 + resolution: "chownr@npm:1.1.4" + checksum: 10c0/ed57952a84cc0c802af900cf7136de643d3aba2eecb59d29344bc2f3f9bf703a301b9d84cdc71f82c3ffc9ccde831b0d92f5b45f91727d6c9da62f23aef9d9db + languageName: node + linkType: hard + "chownr@npm:^3.0.0": version: 3.0.0 resolution: "chownr@npm:3.0.0" @@ -1828,22 +2066,6 @@ __metadata: languageName: node linkType: hard -"color-convert@npm:^2.0.1": - version: 2.0.1 - resolution: "color-convert@npm:2.0.1" - dependencies: - color-name: "npm:~1.1.4" - checksum: 10c0/37e1150172f2e311fe1b2df62c6293a342ee7380da7b9cfdba67ea539909afbd74da27033208d01d6d5cfc65ee7868a22e18d7e7648e004425441c0f8a15a7d7 - languageName: node - linkType: hard - -"color-name@npm:~1.1.4": - version: 1.1.4 - resolution: "color-name@npm:1.1.4" - checksum: 10c0/a1a3f914156960902f46f7f56bc62effc6c94e84b2cae157a526b1c1f74b677a47ec602bf68a61abfa2b42d15b7c5651c6dbe72a43af720bc588dff885b10f95 - languageName: node - linkType: hard - "colorette@npm:^2.0.20": version: 2.0.20 resolution: "colorette@npm:2.0.20" @@ -1862,7 +2084,19 @@ __metadata: languageName: node linkType: hard -"debug@npm:4, debug@npm:^4.3.4, debug@npm:^4.4.0, debug@npm:^4.4.1": +"debug@npm:4": + version: 4.4.3 + resolution: "debug@npm:4.4.3" + dependencies: + ms: "npm:^2.1.3" + peerDependenciesMeta: + supports-color: + optional: true + checksum: 10c0/d79136ec6c83ecbefd0f6a5593da6a9c91ec4d7ddc4b54c883d6e71ec9accb5f67a1a5e96d00a328196b5b5c86d365e98d8a3a70856aaf16b4e7b1985e67f5a6 + languageName: node + linkType: hard + +"debug@npm:^4.3.4, debug@npm:^4.4.0, debug@npm:^4.4.1": version: 4.4.1 resolution: "debug@npm:4.4.1" dependencies: @@ -1874,6 +2108,15 @@ __metadata: languageName: node linkType: hard +"decompress-response@npm:^6.0.0": + version: 6.0.0 + resolution: "decompress-response@npm:6.0.0" + dependencies: + mimic-response: "npm:^3.1.0" + checksum: 10c0/bd89d23141b96d80577e70c54fb226b2f40e74a6817652b80a116d7befb8758261ad073a8895648a29cc0a5947021ab66705cb542fa9c143c82022b27c5b175e + languageName: node + linkType: hard + "deep-eql@npm:^5.0.1": version: 5.0.2 resolution: "deep-eql@npm:5.0.2" @@ -1881,6 +2124,13 @@ __metadata: languageName: node linkType: hard +"deep-extend@npm:^0.6.0": + version: 0.6.0 + resolution: "deep-extend@npm:0.6.0" + checksum: 10c0/1c6b0abcdb901e13a44c7d699116d3d4279fdb261983122a3783e7273844d5f2537dc2e1c454a23fcf645917f93fbf8d07101c1d03c015a87faa662755212566 + languageName: node + linkType: hard + "dequal@npm:^2.0.3": version: 2.0.3 resolution: "dequal@npm:2.0.3" @@ -1888,6 +2138,13 @@ __metadata: languageName: node linkType: hard +"detect-libc@npm:^2.0.0": + version: 2.1.0 + resolution: "detect-libc@npm:2.1.0" + checksum: 10c0/4d0d36c77fdcb1d3221779d8dfc7d5808dd52530d49db67193fb3cd8149e2d499a1eeb87bb830ad7c442294929992c12e971f88ae492965549f8f83e5336eba6 + languageName: node + linkType: hard + "dom-accessibility-api@npm:^0.5.9": version: 0.5.16 resolution: "dom-accessibility-api@npm:0.5.16" @@ -1895,6 +2152,115 @@ __metadata: languageName: node linkType: hard +"drizzle-kit@npm:^0.31.4": + version: 0.31.4 + resolution: "drizzle-kit@npm:0.31.4" + dependencies: + "@drizzle-team/brocli": "npm:^0.10.2" + "@esbuild-kit/esm-loader": "npm:^2.5.5" + esbuild: "npm:^0.25.4" + esbuild-register: "npm:^3.5.0" + bin: + drizzle-kit: bin.cjs + checksum: 10c0/5e345cb28b4b8f329ce5f851e47418ac2ee8189aecec85f566f7a6c309f3392613519a39c559618599bd1e63fb99f114b9d9d82fb9e411f1702425678f34d2c2 + languageName: node + linkType: hard + +"drizzle-orm@npm:^0.44.5": + version: 0.44.5 + resolution: "drizzle-orm@npm:0.44.5" + peerDependencies: + "@aws-sdk/client-rds-data": ">=3" + "@cloudflare/workers-types": ">=4" + "@electric-sql/pglite": ">=0.2.0" + "@libsql/client": ">=0.10.0" + "@libsql/client-wasm": ">=0.10.0" + "@neondatabase/serverless": ">=0.10.0" + "@op-engineering/op-sqlite": ">=2" + "@opentelemetry/api": ^1.4.1 + "@planetscale/database": ">=1.13" + "@prisma/client": "*" + "@tidbcloud/serverless": "*" + "@types/better-sqlite3": "*" + "@types/pg": "*" + "@types/sql.js": "*" + "@upstash/redis": ">=1.34.7" + "@vercel/postgres": ">=0.8.0" + "@xata.io/client": "*" + better-sqlite3: ">=7" + bun-types: "*" + expo-sqlite: ">=14.0.0" + gel: ">=2" + knex: "*" + kysely: "*" + mysql2: ">=2" + pg: ">=8" + postgres: ">=3" + sql.js: ">=1" + sqlite3: ">=5" + peerDependenciesMeta: + "@aws-sdk/client-rds-data": + optional: true + "@cloudflare/workers-types": + optional: true + "@electric-sql/pglite": + optional: true + "@libsql/client": + optional: true + "@libsql/client-wasm": + optional: true + "@neondatabase/serverless": + optional: true + "@op-engineering/op-sqlite": + optional: true + "@opentelemetry/api": + optional: true + "@planetscale/database": + optional: true + "@prisma/client": + optional: true + "@tidbcloud/serverless": + optional: true + "@types/better-sqlite3": + optional: true + "@types/pg": + optional: true + "@types/sql.js": + optional: true + "@upstash/redis": + optional: true + "@vercel/postgres": + optional: true + "@xata.io/client": + optional: true + better-sqlite3: + optional: true + bun-types: + optional: true + expo-sqlite: + optional: true + gel: + optional: true + knex: + optional: true + kysely: + optional: true + mysql2: + optional: true + pg: + optional: true + postgres: + optional: true + prisma: + optional: true + sql.js: + optional: true + sqlite3: + optional: true + checksum: 10c0/2f9bd8cc7395b3254574eb9e9c344b7cebd507ac61f1ee8783648ad3bb8a7983875f44c0eabedfd871496d7eae646dbc75111fa21de2c64d0c899fcea091e303 + languageName: node + linkType: hard + "eastasianwidth@npm:^0.2.0": version: 0.2.0 resolution: "eastasianwidth@npm:0.2.0" @@ -1937,6 +2303,15 @@ __metadata: languageName: node linkType: hard +"end-of-stream@npm:^1.1.0, end-of-stream@npm:^1.4.1": + version: 1.4.5 + resolution: "end-of-stream@npm:1.4.5" + dependencies: + once: "npm:^1.4.0" + checksum: 10c0/b0701c92a10b89afb1cb45bf54a5292c6f008d744eb4382fa559d54775ff31617d1d7bc3ef617575f552e24fad2c7c1a1835948c66b3f3a4be0a6c1f35c883d8 + languageName: node + linkType: hard + "env-paths@npm:^2.2.0": version: 2.2.1 resolution: "env-paths@npm:2.2.1" @@ -1970,7 +2345,18 @@ __metadata: languageName: node linkType: hard -"esbuild@npm:^0.25.0": +"esbuild-register@npm:^3.5.0": + version: 3.6.0 + resolution: "esbuild-register@npm:3.6.0" + dependencies: + debug: "npm:^4.3.4" + peerDependencies: + esbuild: ">=0.12 <1" + checksum: 10c0/77193b7ca32ba9f81b35ddf3d3d0138efb0b1429d71b39480cfee932e1189dd2e492bd32bf04a4d0bc3adfbc7ec7381ceb5ffd06efe35f3e70904f1f686566d5 + languageName: node + linkType: hard + +"esbuild@npm:^0.25.0, esbuild@npm:^0.25.4": version: 0.25.9 resolution: "esbuild@npm:0.25.9" dependencies: @@ -2059,6 +2445,83 @@ __metadata: languageName: node linkType: hard +"esbuild@npm:~0.18.20": + version: 0.18.20 + resolution: "esbuild@npm:0.18.20" + dependencies: + "@esbuild/android-arm": "npm:0.18.20" + "@esbuild/android-arm64": "npm:0.18.20" + "@esbuild/android-x64": "npm:0.18.20" + "@esbuild/darwin-arm64": "npm:0.18.20" + "@esbuild/darwin-x64": "npm:0.18.20" + "@esbuild/freebsd-arm64": "npm:0.18.20" + "@esbuild/freebsd-x64": "npm:0.18.20" + "@esbuild/linux-arm": "npm:0.18.20" + "@esbuild/linux-arm64": "npm:0.18.20" + "@esbuild/linux-ia32": "npm:0.18.20" + "@esbuild/linux-loong64": "npm:0.18.20" + "@esbuild/linux-mips64el": "npm:0.18.20" + "@esbuild/linux-ppc64": "npm:0.18.20" + "@esbuild/linux-riscv64": "npm:0.18.20" + "@esbuild/linux-s390x": "npm:0.18.20" + "@esbuild/linux-x64": "npm:0.18.20" + "@esbuild/netbsd-x64": "npm:0.18.20" + "@esbuild/openbsd-x64": "npm:0.18.20" + "@esbuild/sunos-x64": "npm:0.18.20" + "@esbuild/win32-arm64": "npm:0.18.20" + "@esbuild/win32-ia32": "npm:0.18.20" + "@esbuild/win32-x64": "npm:0.18.20" + dependenciesMeta: + "@esbuild/android-arm": + optional: true + "@esbuild/android-arm64": + optional: true + "@esbuild/android-x64": + optional: true + "@esbuild/darwin-arm64": + optional: true + "@esbuild/darwin-x64": + optional: true + "@esbuild/freebsd-arm64": + optional: true + "@esbuild/freebsd-x64": + optional: true + "@esbuild/linux-arm": + optional: true + "@esbuild/linux-arm64": + optional: true + "@esbuild/linux-ia32": + optional: true + "@esbuild/linux-loong64": + optional: true + "@esbuild/linux-mips64el": + optional: true + "@esbuild/linux-ppc64": + optional: true + "@esbuild/linux-riscv64": + optional: true + "@esbuild/linux-s390x": + optional: true + "@esbuild/linux-x64": + optional: true + "@esbuild/netbsd-x64": + optional: true + "@esbuild/openbsd-x64": + optional: true + "@esbuild/sunos-x64": + optional: true + "@esbuild/win32-arm64": + optional: true + "@esbuild/win32-ia32": + optional: true + "@esbuild/win32-x64": + optional: true + bin: + esbuild: bin/esbuild + checksum: 10c0/473b1d92842f50a303cf948a11ebd5f69581cd254d599dd9d62f9989858e0533f64e83b723b5e1398a5b488c0f5fd088795b4235f65ecaf4f007d4b79f04bc88 + languageName: node + linkType: hard + "estree-walker@npm:^3.0.3": version: 3.0.3 resolution: "estree-walker@npm:3.0.3" @@ -2068,6 +2531,13 @@ __metadata: languageName: node linkType: hard +"expand-template@npm:^2.0.3": + version: 2.0.3 + resolution: "expand-template@npm:2.0.3" + checksum: 10c0/1c9e7afe9acadf9d373301d27f6a47b34e89b3391b1ef38b7471d381812537ef2457e620ae7f819d2642ce9c43b189b3583813ec395e2938319abe356a9b2f51 + languageName: node + linkType: hard + "expect-type@npm:^1.2.1": version: 1.2.2 resolution: "expect-type@npm:1.2.2" @@ -2112,6 +2582,13 @@ __metadata: languageName: node linkType: hard +"file-uri-to-path@npm:1.0.0": + version: 1.0.0 + resolution: "file-uri-to-path@npm:1.0.0" + checksum: 10c0/3b545e3a341d322d368e880e1c204ef55f1d45cdea65f7efc6c6ce9e0c4d22d802d5629320eb779d006fe59624ac17b0e848d83cc5af7cd101f206cb704f5519 + languageName: node + linkType: hard + "find-up@npm:^7.0.0": version: 7.0.0 resolution: "find-up@npm:7.0.0" @@ -2133,6 +2610,13 @@ __metadata: languageName: node linkType: hard +"fs-constants@npm:^1.0.0": + version: 1.0.0 + resolution: "fs-constants@npm:1.0.0" + checksum: 10c0/a0cde99085f0872f4d244e83e03a46aa387b74f5a5af750896c6b05e9077fac00e9932fdf5aef84f2f16634cd473c63037d7a512576da7d5c2b9163d1909f3a8 + languageName: node + linkType: hard + "fs-minipass@npm:^3.0.0": version: 3.0.3 resolution: "fs-minipass@npm:3.0.3" @@ -2180,6 +2664,22 @@ __metadata: languageName: node linkType: hard +"get-tsconfig@npm:^4.7.0": + version: 4.10.1 + resolution: "get-tsconfig@npm:4.10.1" + dependencies: + resolve-pkg-maps: "npm:^1.0.0" + checksum: 10c0/7f8e3dabc6a49b747920a800fb88e1952fef871cdf51b79e98db48275a5de6cdaf499c55ee67df5fa6fe7ce65f0063e26de0f2e53049b408c585aa74d39ffa21 + languageName: node + linkType: hard + +"github-from-package@npm:0.0.0": + version: 0.0.0 + resolution: "github-from-package@npm:0.0.0" + checksum: 10c0/737ee3f52d0a27e26332cde85b533c21fcdc0b09fb716c3f8e522cfaa9c600d4a631dec9fcde179ec9d47cca89017b7848ed4d6ae6b6b78f936c06825b1fcc12 + languageName: node + linkType: hard + "glob@npm:^10.2.2": version: 10.4.5 resolution: "glob@npm:10.4.5" @@ -2248,6 +2748,13 @@ __metadata: languageName: node linkType: hard +"ieee754@npm:^1.1.13": + version: 1.2.1 + resolution: "ieee754@npm:1.2.1" + checksum: 10c0/b0782ef5e0935b9f12883a2e2aa37baa75da6e66ce6515c168697b42160807d9330de9a32ec1ed73149aea02e0d822e572bca6f1e22bdcbd2149e13b050b17bb + languageName: node + linkType: hard + "imurmurhash@npm:^0.1.4": version: 0.1.4 resolution: "imurmurhash@npm:0.1.4" @@ -2255,6 +2762,20 @@ __metadata: languageName: node linkType: hard +"inherits@npm:^2.0.3, inherits@npm:^2.0.4": + version: 2.0.4 + resolution: "inherits@npm:2.0.4" + checksum: 10c0/4e531f648b29039fb7426fb94075e6545faa1eb9fe83c29f0b6d9e7263aceb4289d2d4557db0d428188eeb449cc7c5e77b0a0b2c4e248ff2a65933a0dee49ef2 + languageName: node + linkType: hard + +"ini@npm:~1.3.0": + version: 1.3.8 + resolution: "ini@npm:1.3.8" + checksum: 10c0/ec93838d2328b619532e4f1ff05df7909760b6f66d9c9e2ded11e5c1897d6f2f9980c54dd638f88654b00919ce31e827040631eab0a3969e4d1abefa0719516a + languageName: node + linkType: hard + "ip-address@npm:^10.0.1": version: 10.0.1 resolution: "ip-address@npm:10.0.1" @@ -2381,6 +2902,13 @@ __metadata: languageName: node linkType: hard +"mimic-response@npm:^3.1.0": + version: 3.1.0 + resolution: "mimic-response@npm:3.1.0" + checksum: 10c0/0d6f07ce6e03e9e4445bee655202153bdb8a98d67ee8dc965ac140900d7a2688343e6b4c9a72cfc9ef2f7944dfd76eef4ab2482eb7b293a68b84916bac735362 + languageName: node + linkType: hard + "minimatch@npm:^9.0.4": version: 9.0.5 resolution: "minimatch@npm:9.0.5" @@ -2390,6 +2918,13 @@ __metadata: languageName: node linkType: hard +"minimist@npm:^1.2.0, minimist@npm:^1.2.3": + version: 1.2.8 + resolution: "minimist@npm:1.2.8" + checksum: 10c0/19d3fcdca050087b84c2029841a093691a91259a47def2f18222f41e7645a0b7c44ef4b40e88a1e58a40c84d2ef0ee6047c55594d298146d0eb3f6b737c20ce6 + languageName: node + linkType: hard + "minipass-collect@npm:^2.0.1": version: 2.0.1 resolution: "minipass-collect@npm:2.0.1" @@ -2466,6 +3001,13 @@ __metadata: languageName: node linkType: hard +"mkdirp-classic@npm:^0.5.2, mkdirp-classic@npm:^0.5.3": + version: 0.5.3 + resolution: "mkdirp-classic@npm:0.5.3" + checksum: 10c0/95371d831d196960ddc3833cc6907e6b8f67ac5501a6582f47dfae5eb0f092e9f8ce88e0d83afcae95d6e2b61a01741ba03714eeafb6f7a6e9dcc158ac85b168 + languageName: node + linkType: hard + "mkdirp@npm:^3.0.1": version: 3.0.1 resolution: "mkdirp@npm:3.0.1" @@ -2505,6 +3047,13 @@ __metadata: languageName: node linkType: hard +"napi-build-utils@npm:^2.0.0": + version: 2.0.0 + resolution: "napi-build-utils@npm:2.0.0" + checksum: 10c0/5833aaeb5cc5c173da47a102efa4680a95842c13e0d9cc70428bd3ee8d96bb2172f8860d2811799b5daa5cbeda779933601492a2028a6a5351c6d0fcf6de83db + languageName: node + linkType: hard + "negotiator@npm:^1.0.0": version: 1.0.0 resolution: "negotiator@npm:1.0.0" @@ -2512,6 +3061,15 @@ __metadata: languageName: node linkType: hard +"node-abi@npm:^3.3.0": + version: 3.77.0 + resolution: "node-abi@npm:3.77.0" + dependencies: + semver: "npm:^7.3.5" + checksum: 10c0/3354289ccca052538f653968ead73d00785e5ab159ce3a575dbff465724dac749821e7c327ae6c4774f29994f94c402fbafc8799b172aabf4aa8a082a070b00a + languageName: node + linkType: hard + "node-gyp@npm:latest": version: 11.4.2 resolution: "node-gyp@npm:11.4.2" @@ -2543,6 +3101,15 @@ __metadata: languageName: node linkType: hard +"once@npm:^1.3.1, once@npm:^1.4.0": + version: 1.4.0 + resolution: "once@npm:1.4.0" + dependencies: + wrappy: "npm:1" + checksum: 10c0/5d48aca287dfefabd756621c5dfce5c91a549a93e9fdb7b8246bc4c4790aa2ec17b34a260530474635147aeb631a2dcc8b32c613df0675f96041cbb8244517d0 + languageName: node + linkType: hard + "os-tmpdir@npm:~1.0.2": version: 1.0.2 resolution: "os-tmpdir@npm:1.0.2" @@ -2669,6 +3236,28 @@ __metadata: languageName: node linkType: hard +"prebuild-install@npm:^7.1.1": + version: 7.1.3 + resolution: "prebuild-install@npm:7.1.3" + dependencies: + detect-libc: "npm:^2.0.0" + expand-template: "npm:^2.0.3" + github-from-package: "npm:0.0.0" + minimist: "npm:^1.2.3" + mkdirp-classic: "npm:^0.5.3" + napi-build-utils: "npm:^2.0.0" + node-abi: "npm:^3.3.0" + pump: "npm:^3.0.0" + rc: "npm:^1.2.7" + simple-get: "npm:^4.0.0" + tar-fs: "npm:^2.0.0" + tunnel-agent: "npm:^0.6.0" + bin: + prebuild-install: bin.js + checksum: 10c0/25919a42b52734606a4036ab492d37cfe8b601273d8dfb1fa3c84e141a0a475e7bad3ab848c741d2f810cef892fcf6059b8c7fe5b29f98d30e0c29ad009bedff + languageName: node + linkType: hard + "pretty-format@npm:^27.0.2": version: 27.5.1 resolution: "pretty-format@npm:27.5.1" @@ -2697,6 +3286,30 @@ __metadata: languageName: node linkType: hard +"pump@npm:^3.0.0": + version: 3.0.3 + resolution: "pump@npm:3.0.3" + dependencies: + end-of-stream: "npm:^1.1.0" + once: "npm:^1.3.1" + checksum: 10c0/ada5cdf1d813065bbc99aa2c393b8f6beee73b5de2890a8754c9f488d7323ffd2ca5f5a0943b48934e3fcbd97637d0337369c3c631aeb9614915db629f1c75c9 + languageName: node + linkType: hard + +"rc@npm:^1.2.7": + version: 1.2.8 + resolution: "rc@npm:1.2.8" + dependencies: + deep-extend: "npm:^0.6.0" + ini: "npm:~1.3.0" + minimist: "npm:^1.2.0" + strip-json-comments: "npm:~2.0.1" + bin: + rc: ./cli.js + checksum: 10c0/24a07653150f0d9ac7168e52943cc3cb4b7a22c0e43c7dff3219977c2fdca5a2760a304a029c20811a0e79d351f57d46c9bde216193a0f73978496afc2b85b15 + languageName: node + linkType: hard + "react-is@npm:^17.0.1": version: 17.0.2 resolution: "react-is@npm:17.0.2" @@ -2704,6 +3317,24 @@ __metadata: languageName: node linkType: hard +"readable-stream@npm:^3.1.1, readable-stream@npm:^3.4.0": + version: 3.6.2 + resolution: "readable-stream@npm:3.6.2" + dependencies: + inherits: "npm:^2.0.3" + string_decoder: "npm:^1.1.1" + util-deprecate: "npm:^1.0.1" + checksum: 10c0/e37be5c79c376fdd088a45fa31ea2e423e5d48854be7a22a58869b4e84d25047b193f6acb54f1012331e1bcd667ffb569c01b99d36b0bd59658fb33f513511b7 + languageName: node + linkType: hard + +"resolve-pkg-maps@npm:^1.0.0": + version: 1.0.0 + resolution: "resolve-pkg-maps@npm:1.0.0" + checksum: 10c0/fb8f7bbe2ca281a73b7ef423a1cbc786fb244bd7a95cbe5c3fba25b27d327150beca8ba02f622baea65919a57e061eb5005204daa5f93ed590d9b77463a567ab + languageName: node + linkType: hard + "retry@npm:^0.12.0": version: 0.12.0 resolution: "retry@npm:0.12.0" @@ -2795,6 +3426,13 @@ __metadata: languageName: unknown linkType: soft +"safe-buffer@npm:^5.0.1, safe-buffer@npm:~5.2.0": + version: 5.2.1 + resolution: "safe-buffer@npm:5.2.1" + checksum: 10c0/6501914237c0a86e9675d4e51d89ca3c21ffd6a31642efeba25ad65720bce6921c9e7e974e5be91a786b25aa058b5303285d3c15dbabf983a919f5f630d349f3 + languageName: node + linkType: hard + "safer-buffer@npm:>= 2.1.2 < 3, safer-buffer@npm:>= 2.1.2 < 3.0.0": version: 2.1.2 resolution: "safer-buffer@npm:2.1.2" @@ -2841,6 +3479,24 @@ __metadata: languageName: node linkType: hard +"simple-concat@npm:^1.0.0": + version: 1.0.1 + resolution: "simple-concat@npm:1.0.1" + checksum: 10c0/62f7508e674414008910b5397c1811941d457dfa0db4fd5aa7fa0409eb02c3609608dfcd7508cace75b3a0bf67a2a77990711e32cd213d2c76f4fd12ee86d776 + languageName: node + linkType: hard + +"simple-get@npm:^4.0.0": + version: 4.0.1 + resolution: "simple-get@npm:4.0.1" + dependencies: + decompress-response: "npm:^6.0.0" + once: "npm:^1.3.1" + simple-concat: "npm:^1.0.0" + checksum: 10c0/b0649a581dbca741babb960423248899203165769747142033479a7dc5e77d7b0fced0253c731cd57cf21e31e4d77c9157c3069f4448d558ebc96cf9e1eebcf0 + languageName: node + linkType: hard + "sirv@npm:^3.0.1": version: 3.0.2 resolution: "sirv@npm:3.0.2" @@ -2887,6 +3543,23 @@ __metadata: languageName: node linkType: hard +"source-map-support@npm:^0.5.21": + version: 0.5.21 + resolution: "source-map-support@npm:0.5.21" + dependencies: + buffer-from: "npm:^1.0.0" + source-map: "npm:^0.6.0" + checksum: 10c0/9ee09942f415e0f721d6daad3917ec1516af746a8120bba7bb56278707a37f1eb8642bde456e98454b8a885023af81a16e646869975f06afc1a711fb90484e7d + languageName: node + linkType: hard + +"source-map@npm:^0.6.0": + version: 0.6.1 + resolution: "source-map@npm:0.6.1" + checksum: 10c0/ab55398007c5e5532957cb0beee2368529618ac0ab372d789806f5718123cc4367d57de3904b4e6a4170eb5a0b0f41373066d02ca0735a0c4d75c7d328d3e011 + languageName: node + linkType: hard + "ssri@npm:^12.0.0": version: 12.0.0 resolution: "ssri@npm:12.0.0" @@ -2932,6 +3605,15 @@ __metadata: languageName: node linkType: hard +"string_decoder@npm:^1.1.1": + version: 1.3.0 + resolution: "string_decoder@npm:1.3.0" + dependencies: + safe-buffer: "npm:~5.2.0" + checksum: 10c0/810614ddb030e271cd591935dcd5956b2410dd079d64ff92a1844d6b7588bf992b3e1b69b0f4d34a3e06e0bd73046ac646b5264c1987b20d0601f81ef35d731d + languageName: node + linkType: hard + "strip-ansi-cjs@npm:strip-ansi@^6.0.1, strip-ansi@npm:^6.0.0, strip-ansi@npm:^6.0.1": version: 6.0.1 resolution: "strip-ansi@npm:6.0.1" @@ -2950,6 +3632,13 @@ __metadata: languageName: node linkType: hard +"strip-json-comments@npm:~2.0.1": + version: 2.0.1 + resolution: "strip-json-comments@npm:2.0.1" + checksum: 10c0/b509231cbdee45064ff4f9fd73609e2bcc4e84a4d508e9dd0f31f70356473fde18abfb5838c17d56fb236f5a06b102ef115438de0600b749e818a35fbbc48c43 + languageName: node + linkType: hard + "strip-literal@npm:^3.0.0": version: 3.0.0 resolution: "strip-literal@npm:3.0.0" @@ -2959,6 +3648,31 @@ __metadata: languageName: node linkType: hard +"tar-fs@npm:^2.0.0": + version: 2.1.4 + resolution: "tar-fs@npm:2.1.4" + dependencies: + chownr: "npm:^1.1.1" + mkdirp-classic: "npm:^0.5.2" + pump: "npm:^3.0.0" + tar-stream: "npm:^2.1.4" + checksum: 10c0/decb25acdc6839182c06ec83cba6136205bda1db984e120c8ffd0d80182bc5baa1d916f9b6c5c663ea3f9975b4dd49e3c6bb7b1707cbcdaba4e76042f43ec84c + languageName: node + linkType: hard + +"tar-stream@npm:^2.1.4": + version: 2.2.0 + resolution: "tar-stream@npm:2.2.0" + dependencies: + bl: "npm:^4.0.3" + end-of-stream: "npm:^1.4.1" + fs-constants: "npm:^1.0.0" + inherits: "npm:^2.0.3" + readable-stream: "npm:^3.1.1" + checksum: 10c0/2f4c910b3ee7196502e1ff015a7ba321ec6ea837667220d7bcb8d0852d51cb04b87f7ae471008a6fb8f5b1a1b5078f62f3a82d30c706f20ada1238ac797e7692 + languageName: node + linkType: hard + "tar@npm:^7.4.3": version: 7.4.3 resolution: "tar@npm:7.4.3" @@ -3041,6 +3755,15 @@ __metadata: languageName: node linkType: hard +"tunnel-agent@npm:^0.6.0": + version: 0.6.0 + resolution: "tunnel-agent@npm:0.6.0" + dependencies: + safe-buffer: "npm:^5.0.1" + checksum: 10c0/4c7a1b813e7beae66fdbf567a65ec6d46313643753d0beefb3c7973d66fcec3a1e7f39759f0a0b4465883499c6dc8b0750ab8b287399af2e583823e40410a17a + languageName: node + linkType: hard + "typanion@npm:^3.14.0, typanion@npm:^3.8.0": version: 3.14.0 resolution: "typanion@npm:3.14.0" @@ -3114,6 +3837,13 @@ __metadata: languageName: node linkType: hard +"util-deprecate@npm:^1.0.1": + version: 1.0.2 + resolution: "util-deprecate@npm:1.0.2" + checksum: 10c0/41a5bdd214df2f6c3ecf8622745e4a366c4adced864bc3c833739791aeeeb1838119af7daed4ba36428114b5c67dcda034a79c882e97e43c03e66a4dd7389942 + languageName: node + linkType: hard + "vite-node@npm:3.2.4": version: 3.2.4 resolution: "vite-node@npm:3.2.4" @@ -3307,6 +4037,13 @@ __metadata: languageName: node linkType: hard +"wrappy@npm:1": + version: 1.0.2 + resolution: "wrappy@npm:1.0.2" + checksum: 10c0/56fece1a4018c6a6c8e28fbc88c87e0fbf4ea8fd64fc6c63b18f4acc4bd13e0ad2515189786dd2c30d3eec9663d70f4ecf699330002f8ccb547e4a18231fc9f0 + languageName: node + linkType: hard + "ws@npm:^8.18.2": version: 8.18.3 resolution: "ws@npm:8.18.3" From 1d3c823c7b83443144c5234751119aee005b65dc Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 18 Sep 2025 01:35:09 +0400 Subject: [PATCH 12/78] wip --- .../javascript/packages/common/promise.ts | 38 ++++------- .../packages/native/promise.test.ts | 2 +- bindings/javascript/src/lib.rs | 13 +++- bindings/javascript/sync/packages/browser/a | Bin 57344 -> 0 bytes .../javascript/sync/packages/browser/a-shm | Bin 32768 -> 0 bytes .../sync/packages/browser/package.json | 2 +- .../sync/packages/browser/promise.test.ts | 61 ++++++++++++------ .../sync/packages/native/index.d.ts | 16 ----- .../javascript/sync/packages/native/index.js | 3 - .../sync/packages/native/package.json | 2 +- bindings/javascript/sync/src/lib.rs | 4 +- 11 files changed, 72 insertions(+), 69 deletions(-) delete mode 100755 bindings/javascript/sync/packages/browser/a delete mode 100755 bindings/javascript/sync/packages/browser/a-shm diff --git a/bindings/javascript/packages/common/promise.ts b/bindings/javascript/packages/common/promise.ts index 8dbcf5058..88ccb802c 100644 --- a/bindings/javascript/packages/common/promise.ts +++ b/bindings/javascript/packages/common/promise.ts @@ -115,22 +115,17 @@ class Database { const db = this; const wrapTxn = (mode) => { return async (...bindParameters) => { - await this.execLock.acquire(); + await db.exec("BEGIN " + mode); + db._inTransaction = true; try { - await db.exec("BEGIN " + mode); - db._inTransaction = true; - try { - const result = await fn(...bindParameters); - await db.exec("COMMIT"); - db._inTransaction = false; - return result; - } catch (err) { - await db.exec("ROLLBACK"); - db._inTransaction = false; - throw err; - } - } finally { - this.execLock.release(); + const result = await fn(...bindParameters); + await db.exec("COMMIT"); + db._inTransaction = false; + return result; + } catch (err) { + await db.exec("ROLLBACK"); + db._inTransaction = false; + throw err; } }; }; @@ -203,18 +198,11 @@ class Database { throw new TypeError("The database connection is not open"); } - await this.execLock.acquire(); + const stmt = this.prepare(sql); try { - const stmt = this.prepare(sql); - try { - await stmt.run(); - } finally { - stmt.close(); - } - } catch (err) { - throw convertError(err); + await stmt.run(); } finally { - this.execLock.release(); + stmt.close(); } } diff --git a/bindings/javascript/packages/native/promise.test.ts b/bindings/javascript/packages/native/promise.test.ts index 5819c8f39..a554154cd 100644 --- a/bindings/javascript/packages/native/promise.test.ts +++ b/bindings/javascript/packages/native/promise.test.ts @@ -22,7 +22,7 @@ test('drizzle-orm', async () => { } }) -test('in-memory db', async () => { +test('in-memory-db-async', async () => { const db = await connect(":memory:"); await db.exec("CREATE TABLE t(x)"); await db.exec("INSERT INTO t VALUES (1), (2), (3)"); diff --git a/bindings/javascript/src/lib.rs b/bindings/javascript/src/lib.rs index 3a9970680..ff910b7f2 100644 --- a/bindings/javascript/src/lib.rs +++ b/bindings/javascript/src/lib.rs @@ -10,9 +10,9 @@ //! - Iterating through query results //! - Managing the I/O event loop -// #[cfg(feature = "browser")] +#[cfg(feature = "browser")] pub mod browser; -// #[cfg(feature = "browser")] +#[cfg(feature = "browser")] use crate::browser::opfs; use napi::bindgen_prelude::*; @@ -62,6 +62,8 @@ pub(crate) fn init_tracing(level_filter: Option) { return; }; let level_filter = match level_filter.as_ref() { + "error" => LevelFilter::ERROR, + "warn" => LevelFilter::WARN, "info" => LevelFilter::INFO, "debug" => LevelFilter::DEBUG, "trace" => LevelFilter::TRACE, @@ -596,7 +598,12 @@ impl Statement { /// Finalizes the statement. #[napi] pub fn finalize(&self) -> Result<()> { - self.stmt.borrow_mut().take(); + match self.stmt.try_borrow_mut() { + Ok(mut stmt) => { + stmt.take(); + } + Err(err) => tracing::error!("borrow error: {:?}", err), + } Ok(()) } } diff --git a/bindings/javascript/sync/packages/browser/a b/bindings/javascript/sync/packages/browser/a deleted file mode 100755 index 8f5630c076b124960766842e96d6e1c9609bce89..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 57344 zcmeI*3z$`P**5T5d$02m5dk^KhzO_*409e+&LVy{2~gz1R2keeZWIuJ}F7hGAy@ zaJLuhd7kanV@6DCsLrmLIp@amhV1;T7FmfzR=@1*tgI|qe0Rlnh2Kp+CJMeu%|G}T zRnF=;|Lcx3KTEfdX1Ra!9a$|M-zHaO~jz;|6Ds>px(` z;Ou`qp4}Zko>rATZt%o$*`vo^aaI4Z*JfWi_}U)X_$@BVH`F#m&zG{M)mK$F&Ye9& z{A;SLd|ty$yj}K>->IzkKYnbwS7wdu)JeMWAD>S{bz=j5LG%Ot^Pf%h@Mk}n9@%(I zd|s1mtxcOYl7BXSyLoe~=gbA#XMP(zl-)flWncL9;AiT~Z>+}G+9SKMNA|*tE^^d1 zm$&({FAu+GS#K2GeLqW&{&eP=O`p!-|NQFaMqk-n`7@sm9{TK8^x5MThgdgr&VTt_ zdSow{R@G3~BfGA8T5Vke{{Whf;i8MIfAx9dgEjpL%vHTJO`oUVP14%*?JN1$;}dO| zH)rn5vbhWEE6Zk-&uu8HtShgttrkc1r9YAQ%op^fzbw0Z<&0_7^$lNq>a*s}m{C?+ zU7vl$$Z>;*4jwDMtmY5g{Ln>X5^9_3zF+o-@TDKPtoN7xR8sso;SbAkbFxfliQ?_{ zzUhr~f8(`v?{jON_nalp_WxGY;&+NqAU=Wk1mY8jPar;l_ypn;h)*Csf%pXC6ZnD? zC^6*Ol7xSyesvM8^jGN(GPA}^<)Ca)-`w4k~$ry{?1O+|I@^78!N zwXF^sdP-ZP~h1muDyb z>0_4W<(8CG6%^!DRn`>d6y#UbLa7BB!9FM0|~vd8Ijpl@;Z=m6esbg}K#PcX!cC)X#sZ`4zn@ zYHA8=a!PWms&Wd7^76&|mK5idR_7HKRTWp)6ckl?TB-c`k6Kw&nV(ynSCLaukz1Wp zP$7=Dq<3|3PHtZB+@iv~O7Z2EX06ssl+S4YmQYkF7a<(20Y7ZpUGaCK=;MP5xoPEmF5+}z^g zlFI7R((2;G7rbL}bx}!PesxZHbxo1@qbbeLDXA>a%PFcTFU`#>%`GS>Egw}PiFZtV z_7A2iH@C2`vUgQZab;COPC;IwIGy~8ik$M^g?Tl-t4hlAi+W2(qo%&;yq@JQb6dLG zoY&neybHZ=yYG6Fy%)aVFKql!d;;+a#3vA+Kzsu63B)H5pFn&9@d?By5TC&RdnZto zoc&M#2>mKI2+YsF+gG_GVSN7GzRFDn{qyhkRc;GtU+`{U;f{d%`FHy&|E5>I;N8B$ zT?F|H-t8;g1xS9uyM2Xw0Exon?0>p7^HuHuIO6ZEUN**K>7Oaz1k2cTPCJ zciwakIQyNSInOyicAjt^aW*>ZoHfq3oLii&FLG3&5(z}jc+v36M7tS#0iYn`>)T4^n} zmRbv}nO3b;W=*h0SwpS9RSlGa+E|{YnV*`c&6DOa^RRiq+-L4FcbMDEE#@Y3 zow?dvX)ZUHnhVUCX02IfPB2HAL(RTsvDwS)W_B{$n4YN_pBkr)lg2UQuyMfHXY4U{ z7~703#wKH(vD#Q^EH{=K3yhgYtx;x7Fh&_ejlM>)(aY#&bTZl)o}uZV>ZkRS`Z4{m zen8)+@6mVY+w?8^CVid0T3@Lz*O%%G^qG3CUZzjbN9jZLzIw6VOYf$4(%a~su4$iY zr?r#XG3~H+K-;J7(ROItv@O~uZJoASTd6JAmTC*MnOdz@rcKaBX+yQXTCvtk>!x+m z+Gw7psh_H+)syNm^{{$C-KXwRcc|OcE$Sw9ow{0GsV-NSsteSaYOPwPPEbdwL)E@& zvD!=Rrgl=>sGh1RpDL%7lgcsWuyR1zr|eO7DBF}R$|hx?n&-QZcA=SZc468 zu1>B@E>A8^E=bNy)+Wo66GVE%Yi6V`jHabT?ZSEiYiHK;S+iL?v7X1;k+lPBd)9WW z=d!kCZNu7{^&Hk#tY@>fWIcoC@#tV37_vkqb%$U1>pJM(~ z%zujcPci=~=0C;!r>pJM(~%)ih4`^>-3{QJzm&;0w$zt8;p z%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm z&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4 z`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$ zzt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3 z{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p z%)ih4`^>-3{QJzm&;0w$zt8;p%)ih4`^>-3{QJzm&;0w$zt8;p%s=DL^fUa-KBLd% zGx*FsW6#tx^vpaX&%`tE%sb=Gv@`6?I-|~{Gw94YW6qQ_E-G%}3LBBRJ8GKkC}W5^UTgv=l# z$OJY8Fq;3DybrR(|Ke})3B)H5pFn&9@d?By5T8JN0`UpNClH@Nd;;+a#3%6o(Fs_| zjz;rXKs5iK@b+YRZ+m;ZKMH^2z2@!n_Qw7Hxc?vb|Kt9Dn)e*jyy=+cUB@(UJEnQx zG0hu~Y2JBE^VVaU_a4){`IzS2$8^)}MNswZfAjLL8 zihY6<8wDwL3Q}wpq}VG+v00E}w;;uKL5lr?6dML9b_`N%8Kl@VNU>> z8wn|P5>jj>q}WSHv6+x!HzCD#LW=!_6dMXDb`(-wnB=1g%leL zDRvf8Y%Rq7fBwB@{^S0C(|N@Gf1U&LANT)x9_%p2{eLk$xwz>}ZfYuvn##ha(%4iM zG?n>HWnNQhXex7?%ABS$yQ$1-Dl?l(eN(xysmy39H#C*$O=VhBscR~=O{J!(R5z8X zrc&8dDw;}pQ<>US;{LzFXKp@m^Ld+3+kDpMlQy5T`IOCPY(8Q0`I=AHe75G3HJ_{b zRLy5RHtPQ;yrWs(QSW{4J?~H6aqoBDo8GUzSH)d`7rf`ao!*bVC%qqt zO@K|_2JgGx8t>cQZDJcB@RoXuym{U%?*_3EP%d6CaDq41yUH6TwgUQirCx#8%ez>- zmSDEm-fQih<=LL*CB$~X8TWnnUH1>}+wNhpA@D1Azx#7{xBIO7wAd1O%zeb&EdKig z_qunBO@Wo}EpA4x5I-0R(m;=dj^(!IhRTu% zwh^yj=!ngMr1OdMSLXxgr1Os09ysE>?!4yw!gfbnX{h1ot?1 zI;)&-IH9x5S>!Z`O@cb7!kO%hcSbwI#V$b~r^Lx~dN^I3Y_Uzy%5fdtN!TCTABuf~ zckH+AH|>M=EB1?GqhP1~l>LPLsJ+?VAa)AwvRB!+*nz#oZnWpvGwd3Bsy)dbXOFap z*#qp$>>@kYzS!kaEQYrpk^waa?O+HO5&J#1~X z)?4>jcUZSsE3A~Y*qU$6vZh;A))Z@^HP#wo4YB%Jms$l@PpiAt+3H}mwpv)GC7Yj^ zXUzA^6XsF#h-Br|JM`Q16?#fvtk2hH>C^Qp zeTqI&AFGehhv@zEOZ5W1r`}!ftas2`>n(Ism$gr{GunIF3GJwML_4Its=ci3)t=Ly z)}GY1Y7c4mYxinvwA-~?wNP88Ez%madaX{Y&?al+wb9yeZIISSE79__9$Hr|TWhDa z(p*i~66(k5hw3Ty9rbPXP4%Gqiu$6uTivNXr9PoPs%};{sB6``)K%&&YM?Gr8`U}L z47Em`s!meJsUy{4>HzgJwMflXFIKy#=c#Sgma46)YL@bm@`3WMa$I>!c|&{6alwkwY*4=Wp$^~ycU9m;LW3MD0;73V9nl<7*9GDVrFj8#S`LzI5v|6L1|o=SJ6 zv(iCnt+Y@~MV3F2&&cn|C*-5@5&4k(s{FFNSAI@@T7FXADnBIOFW)P#k#Co8l|y-% zyhv`4>*YGRLY^#-mq*LPO&@JaYX_yl|$J_dgPx56#( zQTPaa7=9l<1RsQ(;U>5dJ^=5B_rVSDdvHBm2k(X7g=^t=;63nexCX9x8{rIi1Dp=0!8%wAYhX33f|alWmcyyA3{HX9!^!YEI0;@0C&CGE zJiG>ugIB|`a10y`N5PTsDmVgO35UZg;4nB84uON=AUF^Xfc;@V*cbMJm&428*WjhF z6qdkZSOg1U0nCSaus6(wy7LQ*czS#TfwtoOL!J+0X^tK2inkrCN!W9HK;-b$}kBNP=Z;JS)amB;NRiL z@Ne)V_*Zxao`xU7zrYXR`|v&ZXLt&pgzv&X!4vS0@E!OEcpM&sN8#_`+wgbrE%;k_ z1RjQO!r#C*;Op=Z{53oX55U*pui&fjm+%$%3%DQdgD=CE;EV9*@CEoYxEJn$yW#V2 z7yK!F4n7Na!X5A#_!Iav{4snA{s?Y|+u)P%hwusbID8EL0B(g_;G^&n_%QrFdEqxDMV6zYEvG@4$QD-Ea+D4ex??!aLx%;qCBSa1~q$Z-d{2 zx5970Tj1B>3b-7GFaraahAHU7WpXl0S{kt=;^v6O5jRCFidY!Y7_lH?e#E?phKRWl zb0TI(%!-&9Q6F(*#Egg=BBn=7i>QmJji`yJj;M;LjHrkxkC+-!7BMB_`iRL9*F{W< zxHe*9#Ds|P5!XbFi?}*sY{Zy|(GjB}Mn+r}F(Ts1h~W`eL=1}<8ZjhdaKxa9fe`~D z`bYGO=o`@|;_`^gBEA-JX+&v6Nknl(QAA-xK}3E;UPSMR+=yNgJtJ}=dPH0jadE^& z5#1xYMRbk0FrrJu1rePi&X35B=oE2YM8}8@5$z+|MVuSaHlj^L>xgqAT1A{4(K6z! zh!zoEgd5>R*b!EQ8DYqFvZK^|ui(G_Sc%?fz84@z{eQ(W%P9ye?h~_ha|C_$BeT_ypn;h)*Csf%pXC6NpbBK7sfI;uDBZAU=Wk1pY^yKub5- zv3*@*-Js%mjkSF3yOxf4?CRS3-13FPE1Dk@|2l3zCoivZOiul<=EuaqeETuE)zfD6 z9#+)+m?<7BuAg-E#BuW{G(TpD$3_h=DVj5;d`|OYx_E3t`RqZpW9t9&<7(ous%aCd z`;S|c*Zk{L@mT+wno*;N44l{em?9o)sI8kmrzofRf&qmC2Mj0{Hvu}fuUS|$aN$++ z7dHR8Wc2F_2ajl|n?I@fv4nW+h6N1^XICyLZhj0OV_s?LxT106_?Rgc{}u1?Eb&@@ zFL}qjL!#e5(YxE*=zSpGx>7t=?H&3b@dq4#lJN<|ClH@Nd;;+a#3vA+Kzsu63B)H5 zpFn&9@d^BUP9W;5H{W4!qc@swEjZB|&G!`S=#A!^30Cw*^IZfpdZYRFff2pYeBVHi z-e|sIpha)==fC3B=#A!E1nBHH-y1;xzxk#>GWuoBcLNgQIP6jL2hT4YnLkbR|D`p_ zep%ib@06JRKO*J;_KWTRo!)ja4Y1i;FXsJMiHU$^UZa@wuM=|t6TQ)5#=oDK4#@So zi|PKhUJFn6vcz2fDes!^MA9u-d*Faa#x5c{zi9}Tjx#{^ZTRS zVdC}vOT^@UcQ@N@>$VUx`&rHz=ah3?OzR&Mul2vz*(v7qw>q1h_2QNOS2-)he14-d zOHAiabta10{9#T%F`1t$UgbaAY3sBQQ~6o;88MT8+&&^E^7q?&#XSCYd#jknUvIAw zv-m6QWnvP4mR%?2@F&`%#T0%&yF|?3cek^}1bz!!7xVXLtW#q8{)lx@%--*{c8baS zt=48ScfZD3C8q9|S&d@mzRsE|ChkXD!^FIOiIpp+?X#`6V%A=_vc#nQDf76Pvp;C= z7gP2-&Fx~wezUn=OxUk7SBUxgMst>!uAgd76tnfi%zk3BKG*Cn=IYy;EyPrPmT^YR z)E_sFh>80B#$GW`zunj>rs>xkYs4)53S*g=q@QKfi8=a-#%M7`-_Ix!GxXh!Y%xLK z!qCP1{2Bd}n4UkP9~86md-a`Sa(=76Sen3=EBr;3UB(fTkkFJGeP zifQ?5y{(v)*YzwhDSt{kF6QJ9YWu~M{7!AVn33PCtrrvWtF#qjKE6?#C8p!2Y7@n5 z{4lMbn2gWWx{JB^wpt4@6`!S^5i{|})gxjee!sd`%)@V2w~A@__39ci3%^2LCMMx$ zsdZuwexf>BOu_e4OT-L(cQso~z_(C!G5>x>IVGmwk0=Ml?E7A2r?`xD* zV(NXF(kN!$>y)Wt;(fF-Ow7BND7j+VJzHrjX5DoqOH8_-l8=iy_k;3&G3CBf-Y#a` zH_PkAg!?Lag_v(|lxK&?k6VObLH-Gw%k^3A?tEh@=Wqn@_6z{@?dg* za&K~Ha(i-Xa&vNha!qnoaz%1kvN1U;S(lueoR}P)93}$8Z%W8T<&cZYAs3ZHt~-U? zcM7@i6msJ!shQVSUpyk)nT<+Emo7&U=@E!#5X0>SXEYqRc7U)a+u)T5^Gjd z%`9bI!g@0+mlZSIR?Ki+F~fbu3>OwN+*r(TWii8@#SE7gGu&FtaBVTey~PX{7c<;k z%y4xv!`;OUmlreKUd(WPF~j}EOdUT@ZZKwQ_%?SKGhAZKaEmd+HO3707&BaC%y5%2 zGllPeJ?muF>sTkTUduX>bpq>n)@xYDv0lwOmURs4Xx34zBU!Iv9l?4f>u}a9SckC= zWgWsgm~{~AK-K}Q{aO35_GRtE%KT@T{|xh=Vg57Be}?(bF#j3mKg0ZInEwp(pJDzp z%zuXY&oKWP=0C&yXPEyC^Pge8AXP{eNkdmYwB&?46GH|Kt7tc>h1%|Bv_oqpds%`=1i_KPBvcO4$FD zu>UDx|5L*Lr-c1a3HzTC_CF=;e@fW@l(7FPVgFOY{-=cfPYL^<681kO?0-tw|CF%* zDPjLp!v3d({Z9$|pAz;zCG3An*#DHU|0!YrQ^NkIg#Awm`=1i_KPBvcO4$FDu>UDx z|5L*Lr-c1a3HzTC_CF=;e@fW@l(7FPVgFOY{-=cfPYL^<681kO?0-tw|CF%*DPjLp z!v3d({Z9$|pAz;zCG3An*#DHU|0!YrQ^NkIg#Awm`=1i_KPBvcO4$FDu>UDx|5L*L zr-c1a3HzTC_CF=;e@fW@l(7FPVgFOY{-=cfPYL^<681kO?0-tw|CF%*DPjLp!v3d( z{Z9$|pAz;zCG3An*#DHU|0!YrQ^NkIg#Awm`=1i_KPBvcO4$FDu>UDx|5L*Lr?d=z z-(g2PeU6;Y2tAj)&L4aqwz57LI|V;V3u~UIj1sn#4!Xa=l z90Ui#0kA*p2m8W4@N#$={2IIzmckNP42xhPEP(ki5B7$+uovtJb6^j63A`9y1iQm- zuq(U}c7Yeb&hUJg4LiZ}U`N;iwukNDxv(v416#v$U@LewYzfbTEuaTo=s+7<(1Zrm zp$1i`Kp7@s0!lC|GT#5sdKKsJOZW=>1>6t!!I$An@J0A@_yYVH+za=>-SBz13;q;7 z2cLyI;STr={0V#-{un+5e+0L~ZSYC>L-+)I96knr0Jp*|@KN{(d>DQoJ_H|xo8cz7 z5k3I#hxfq^@OyAQTnF!k--T=8ci=tnZny@nhIhd`;T`bX@OJnuxC*X>x5011Tj4k0 zE%57b1zZk8n1KOI!xZ#ky#F8X|3^#v|6BL}#qXy_zo{0XMko<-L^2|gOlBFTlO>&y z)hzoX`vd!3(LsO9e#3rE^w3|hciGQ~F8X8k!}dneN599u!@f;)(o^l4vcf6qE$9Tk1`L)NR-%c8UXob|Nz zr0A_bWZiGwE4u5qTen)F=&vub8mxNJVXv?zTjNEKeYiEq>La@Bc~%drtLU@0vsziM z=(H!ykIfH7ul*hKZSzghZGXjl(cCTi?N6Ccn2(B%`v!BZd6($9-(m*l647;^W6m&Z zMBjaqInEp@I`0F_%giFtd%xK1VxA|u?=4N+R7L;&BjW?(UD1Jm%Xq_hP4wVjFm@Tw zh%WqN#>2)&(TBgsxWl+jbmCLSVq?DO#ZNb?j47fUKh_vw3=#eKON|1fr|8IcHaZxs zMNi%|Wc?G-m48n^p&u1}`9u1v`pf!W{W<+<{YlZAe@MSyzgKkUZ`W_tL(!jKq&MjG zqC;PyPu9na9{q5Aklsgh>GSj+dRNh>Z>P7?UD2seXdi1IieCLY+S}TjqFeup_M*01 z^y{C}p3oi@9s3R1TJ0{;v%f_Pv?ZczKS!IP)rh|RByF5FQgrSIXqRb4qIZ9>)bs(Y|CaiO`kLtBzo70?pAlXB$JB?_jiQf#k9voCo9N`H)Wzz2 z(aWE%R;g1&H-D@;LLDOd`Io8%YERM8@2qxETZ^8)smjVHqO1R&azZ&O`uc~ISCyAV zXa70nY2``L+kZ&8U%6Lw_itBjRYK9=;W(euAW4&)`G z>pw@HA=ik$|0H>wJW_Q22gsMnMWXkAvD`&IPjvrV%C@X}ABlaJcg6h6TizSuzkjh` zyz=EP?-?-z^O$(u%Z*|R<{s}3@v4_Ayp*@tn=d9|rh8T16fp}kR=iHe5HSsNsd&B1 zo?;%Rv)4hq+GPvR^knxFF%$Eid%`^`reY4cuevXbxtQm~>s&r5CSx9Q?-#FfxyHTS zz10oHbj%{R!L1kbF%|A)cf6R887^MmvX7XN$#Z*%SGUY|+qtb=SIo&IoR6Ik#iYzT z&fCtLVpirA@w%3~#k9;*&J*HQEjK$GoVCtfVq)eNCvcXCnVC7_b^A_4&oPNV!gt^w z;Bj~?GWwpxRbCQTc}ZO5C2^IPwDGeRG@$`?s6kxiC2^IP#8qArS9!^%-x*hVNnGV6 zK8=2T;uH9H_%Zw&{0ROPo`I*~hwv}(1Nc6C5B?dRf+yj-@K5k7eEo@|_?5qhZ^Pfg zx8QH#5qKEB34a6MfUmHm%vqC0#|tnT;(Njm6yO( zUIJHn30&nRaFv(9RbB#Dd5Nv~C|ls8@Dcbh{62gLJ_t9%O>iT80NxMpgB#%YAg=Nf zxXMf5DldVnyacZD61d7s;3_YHtGooR@)EepOW-Omfvda(uJRJN%1huXFM+GP#43E0 zmGCzBO?WH(2D}A+9j<`OVF)uYfN7Y5K3oQu!X@x#xES697r}+F5iWr9;XK#?=fXK~ zHk<`#!g_ckoB?lu)8RB&2Ww#stcF#v5>~)+I2D$`De!tY8D0k`!E50}I024_*T8Y` zYB&~-furFlI1*k3N5CuLaCikA28Y5Sa4;MM2f_icKkNtl!aneFcp3Z}ycCwg5?Bn2 zU?D7k`7jUmhPkj8>`xE6i~-UIK3Yv5{l7rYbR0ly7z7bAgl{{44(lQ%~nJ-Ha( z1Q)@Dun{hR^Wi+$0O!Iva5kI;XTo}TBb)(mfYaeLSO;ri4XlP$uo70layS*1!71>1 zI2m3CC&6psL^uJChu6Sy@M<^~j)9}$C^!;c1xLUu;c$2b90rHNA#gAp1P8(aus`ev z`@%l(a(Eg18oU&i!V*{vi(nxvfcY>F_J+Bz7widhU=Mf+yck{tyTfjFTA@>(T?k|MgUkJIs5ORMZ{r- z$o++o`wJoW7eekYgxp^UxxWx{e<9@lLdgAvkoyZE_ZLF$FNEA*2)Vxya(^M@{zAz8 zg^>FTA@>(T?k|MgUkJIs5ORMZ{r-$o++o X`wJoW7eekYgxp^UxxWxL?JxWX%{~pb diff --git a/bindings/javascript/sync/packages/browser/a-shm b/bindings/javascript/sync/packages/browser/a-shm deleted file mode 100755 index 7c251555d016a9c5a07e151b6fff76239b1cde71..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32768 zcmeI41F$8_viB=dS=D3Pwr$(CZQHhO+qP}nw#_#yzS|KKJ7(@XbIzUb&b;Z4=;-R~ zLjHf5RjYUJwK~kTgv#AOsUTS#r2>61yv$Rud%2o*lRvyrB0vEwGXA*J^g!-v>6?Z5 z!TN6BzJ5P{wS8^(mA{Ow{D0Z?m)-oUY=2VO?^wQZ@XJpB8qas`tK+kM%cpN$bNlf5 z{;l?Y+x&0ieXIVD^!+E+^~K87{WHpbW8#kGi=|t3>$vA`{(R5=t+sBPpP}LZQlBnU zmzT@@TTI<+Zrxv*gR9H0&woYNkF@Y*#m*KC-?niue_4sY`EnR=7?vHZ* z8TI{Y`**SXT{V7>+uzaty}o`&`;X{%^TN%6-&wC;E#I2Ye(U^>djC^CyLsZ5iO22# z{oV1O;{3bx^Hp%*?zU0_xa=M^XvP+%O&~Jj{l|USJw7D-(1aZoLoIF1D|%EKA+vS zxozBYx2=2bJN~LM{*`_E9@lT>pUe7d_Wy0>-sy=fv0J{0TN+Onf?hy1zBgUE9~! zf0gnVm%Ys+;1Tc$cmzBG9s!SlN5CWC5%36j1Uv#B0gr%3z$4%h@CbMWJOUm8kAO$O zBj6G62zUhkHUUr(R7{mXrBrEER+Uo~P(@T}RbEw7wNyjZRJBnZRCm=|4N~*eaASFlXo(D#WFEA+O>bx~6WT2kBXQsa~x&X@3$@!b>zsDVb!5jF)|KRI~|U>Y3)| zym@KJMz)P?JG;!Tw_&~6^;=}uukPeA5fi}o#dUA<2>fvb_{(nx{i%Q|hzhPksjw=t zimu|SBr3Vetn#Uns*0+wTB)vTfEunwsj+H;nyjX&nQD$&q?W3cYQ5T`cB_5rpgN+C zt5fQ%x}YwrYwD)DqwcFm>Zy96UaNQNOn(j0?nZnw1p1P8M;AF=mY&>5DbOUFb*cc444Z` zU^%RUwXgxU!A>{==iwUMggbB_9>G(10k7d5d<2Dp4u-(+7zv|d42+HOFd-(vDL5Tx z;apsRi*XsQ#5K4cH{%Z6g9q^l9>-I77BApsyoNXN4&KK{_!M8@YkY?vQ4x`lLjfrm zg`)5jiK0>ricRq-Atj;Yl#0?)2Fgs?C@1Bi{8WgFQVA+e<)|W6q3TqN>QV!0OwFhz zwW0RZiMmn`>P`J#_IG@40sXg)2XrL=-p(>mHnTWCA&qP=v04%0C@NoVLh zU7~Aro9@#SdQPwCEq$QRgsj=Je-6Y!IRuC1a2%1NaCDBvaXA4e=470b({Osu#9289 z=jQxegiCN4F2@zQ3RmY^T$dYgV{XPRxed4HPTZAyaBuF%19=D!=TSVCC-7vR#xr>y zFXrXEn%D70-oo2?7w_c*e3*~%Nj}5p`4V5{8+@DZ@k4&X&-oR<aKdA9;;{SrFx^@t4|7m!GJ#q2*Du~goDTs4PrtZh!2S%DWrhZ zkPb3J7RU~{p#T(tQcxbMK~1Ow^`Q|og%;2n+CfL?0^Ok(^o0R17>2Bj@SizVP71G!*C>y!SOf=r{WBp zjq`9JF2Uuv3fJNW+=4rCFCN09cmhx3IlPEh@H*bYyZ8Vf<1>7TZ}2^SLLeqX{wNRy zr_dCEB2zSqNpUDXC8DI1f>KjD%1Bu#JLRIhRDcRoF)B%As617os#Jq&Q$1=(O{h7w zqPEn5I#W05Nqwk44WeN*n#R)yp4DA9^TJ~_$Z&?(|nFE@)f?$xA-nU;K%%oU-BD%&z~4HYoq7+V^POa1Fj5@QoIzq z9CJkJ-ln(foqCtvqxb6l`hY&959_1)m_DIT z>eKp+KBv#?i~5qjqOa=f`i8!xZ|l4Io_?So>c{$tex{%6m->}{qu=WH`h)(YKWh*a z7A^iIfCQ4j5=??i7zry8C6dIDm=as!NIZ!z2_=yvk))DJQcGG%Cs`z`WS1P0OL9wI z$tMM*pcIxOQcQ|VNhu{|q^y*e3Q|cbOI4{RHKeB0mO4^T>PtguBu%8LG?y0AN?J=> zX(t_|qjZ)o(oMQcPw6Fnq_6as0WwsE$p{%KV`QvMkcl!$rpQ#8E;D47%$B(_PZr2R zSu9IrnJkx;vP#y-T3Ig}WRq-`t+Gva$Zpvq2jrlfl+$uX&dGVXD3|1lT$Sr`LvG1! zxhwbNfjpGQ@ zx|nXJyXk3qnLehk>2C&@L1wTSYKEB+X0#b=CYVWPs+n$PnK@>@S!kA+WoD&WZPu9$ zX0zF9c9>meui0-7nIq=7IcYAMOXixnZf=`9=7D)=o|)(7z4>621>Lm@RHg+ETWREo;l$3bvB1Y^&O8wuY@~Yuoy^fo*PE*w(g< zZEri+j<%ESV!PVzwukLyd)vOYpB-Qa+QD{+9cG8yk#>|FW5?R@c7mN`C)=rZnw?>1 z+SzuFooDCU#de8ZVOQEUcCFoDH`>j1i`{0o+nsio-DCIK{q}%8WDnb;_Lx0oPuuhM zg1us|+MD*4y=U*+$M%VRVPD#}_MQD?KU;Fxv5w;ebOJdcoKQ}9CxR2niR?smqB${~ zm`-dbjuX#`?<8~*IZ2$PPI4!Olgdf$q;=9c8JvtxX2ZCfO&Z|r6s=A?Wt9$C9dZM1ISL&_$pgt=E4Ho=E z5C{(uAqqr?SP&NyKw?M+DIpD{hfI(a@<3524V9n@REJtn7aBlgXa+5z4YY?&&=q<> zZ|DaDVF-+bDKHBb!eUqkD`5?+hb^!d4#EjI1DD|{+<@C~4<5o3cn+`NEqs8_z!(sN zVi*jE5itrz$5Z{ zV<+s6gK!8A$5A*IC*Wk9hBI*v&c{W#6j$JCT!&k6A0EKNcnnYC89a}d@G9QG+jtKj z;uCz1ukbB?z|V-J$x>hnL*XbQMWN^vi{erON=(TpC8eSCl!>xZ4$4jWs2~-g;#7*t zQU$6^)u<-bq59N_no7b)oLmi~7<48cq{v5>2HUG@ItpLRv!0X%(%d4YZlI z(N5Y!`{@uJr4w|T&e27>K@aIMJ)@WOhThXB0%kVsj{|ZL4$h%CEJxtT9F1dg9FEV4 zI4P&#)SQkpau&|cxws&g;__UHt8xvl&Gon;H{s^oiraDr?#$h|C->q0Jcx(#2p-Mj zcp^{X={$=U@Je38>vkKM^>7& zXh#Rofpst)QisvubtD~C$I!8LJe^P{(aCixomOYi*>w%wShv(|bbH-NXVbZKUR^*J z*2Q#5T}GGJm2_?0Rrk=n^#DCk57EQ*C_PqB(3AB{Jx4FmtMo>_RqxQd^*()2AJND4 zDScL7(3kZ!eN*4j_w^(FRKL)#^*jAhDPnmdXlQ zE$d{XY?1Av@~r@d(+8uH9bsk)6Wbv zL(Fh9%8W7N%|tWBOfxghY%|X+FpJGnv%;)0Yt4GI$!sy(%}%q&>@x?=VROu!FlWp; zbHQ9TSIrG`%iJ~h%_H-|yfJ94^=|{(AU2o{X+ztHHj<5MW7ya>o=s?z*yJ{qO=~mQ z%r=|NY4h0pwva7qOW4x3oULf9*y^^HZD^a=wzjkFW_#K`w!a-@huRT#v>j(B+9`Ir zon`0R1$L=jXE)icc8A?<_t}H?h&^sk+B5c?y=X7nYxah{ZSUF#_K|&RpW9dVjeT!F zTIHal9pm_O0yu%4piXEfjQ7d+JAUkWKL7Cqvglg6pPsExo7X11ZEQn2;hcz06eqe9 z%ZckGa1uMooRm%)C%u!&o8y0cws-(M0v-X6fJeY1;1Tc$cmzBG9s!SlN5CWC5%36j N1Uv#Bf&VK6{s;Wz7XknP diff --git a/bindings/javascript/sync/packages/browser/package.json b/bindings/javascript/sync/packages/browser/package.json index efddc0a70..e09740154 100644 --- a/bindings/javascript/sync/packages/browser/package.json +++ b/bindings/javascript/sync/packages/browser/package.json @@ -42,7 +42,7 @@ "tsc-build": "npm exec tsc && cp sync.wasm32-wasi.wasm ./dist/sync.wasm32-wasi.wasm && WASM_FILE=sync.wasm32-wasi.wasm JS_FILE=./dist/wasm-inline.js node ../../../scripts/inline-wasm-base64.js && npm run bundle", "bundle": "vite build", "build": "npm run napi-build && npm run tsc-build", - "test": "VITE_TURSO_DB_URL=http://c--a--a.localhost:10000 CI=1 vitest --testTimeout 30000 --browser=chromium --run && VITE_TURSO_DB_URL=http://c--a--a.localhost:10000 CI=1 vitest --testTimeout 30000 --browser=firefox --run" + "test": "VITE_TURSO_DB_URL=http://f--a--a.localhost:10000 CI=1 vitest --testTimeout 30000 --browser=chromium --run && VITE_TURSO_DB_URL=http://f--a--a.localhost:10000 CI=1 vitest --testTimeout 30000 --browser=firefox --run" }, "napi": { "binaryName": "sync", diff --git a/bindings/javascript/sync/packages/browser/promise.test.ts b/bindings/javascript/sync/packages/browser/promise.test.ts index fb60ff61b..ff271d0e6 100644 --- a/bindings/javascript/sync/packages/browser/promise.test.ts +++ b/bindings/javascript/sync/packages/browser/promise.test.ts @@ -316,47 +316,72 @@ test('pull-push-concurrent', async () => { console.info(await db.stats()); }) -test('concurrent-updates', async () => { +test('concurrent-updates', { timeout: 60000 }, async () => { { - const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, longPollTimeoutMs: 5000 }); - await db.exec("CREATE TABLE IF NOT EXISTS q(x TEXT PRIMARY KEY, y)"); - await db.exec("DELETE FROM q"); + const db = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL, longPollTimeoutMs: 10 }); + await db.exec("CREATE TABLE IF NOT EXISTS three(x TEXT PRIMARY KEY, y, z)"); + await db.exec("DELETE FROM three"); await db.push(); await db.close(); } - const db1 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL }); - const db2 = await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL }); - async function pull(db) { + let stop = false; + const dbs = []; + for (let i = 0; i < 8; i++) { + dbs.push(await connect({ path: ':memory:', url: process.env.VITE_TURSO_DB_URL })); + } + async function pull(db, i) { try { + console.info('pull', i); await db.pull(); } catch (e) { - // ignore + console.error('pull', i, e); } finally { - setTimeout(async () => await pull(db), 0); + if (!stop) { + setTimeout(async () => await pull(db, i), 0); + } } } - async function push(db) { + async function push(db, i) { try { + console.info('push', i); await db.push(); } catch (e) { - // ignore + console.error('push', i, e); } finally { - setTimeout(async () => await push(db), 0); + if (!stop) { + setTimeout(async () => await push(db, i), 0); + } } } - setTimeout(async () => await pull(db1), 0) - setTimeout(async () => await pull(db2), 0) - setTimeout(async () => await push(db1), 0) - setTimeout(async () => await push(db2), 0) + for (let i = 0; i < dbs.length; i++) { + setTimeout(async () => await pull(dbs[i], i), 0) + setTimeout(async () => await push(dbs[i], i), 0) + } for (let i = 0; i < 1000; i++) { try { - await db1.exec(`INSERT INTO q VALUES ('1', 0) ON CONFLICT DO UPDATE SET y = randomblob(128)`); - await db2.exec(`INSERT INTO q VALUES ('2', 0) ON CONFLICT DO UPDATE SET y = randomblob(128)`); + const tasks = []; + for (let s = 0; s < dbs.length; s++) { + tasks.push(dbs[s].exec(`INSERT INTO three VALUES ('${s}', 0, randomblob(128)) ON CONFLICT DO UPDATE SET y = y + 1, z = randomblob(128)`)); + } + await Promise.all(tasks); } catch (e) { // ignore } await new Promise(resolve => setTimeout(resolve, 1)); } + stop = true; + await Promise.all(dbs.map(db => db.push())); + await Promise.all(dbs.map(db => db.pull())); + let results = []; + for (let i = 0; i < dbs.length; i++) { + results.push(await dbs[i].prepare('SELECT x, y FROM three').all()); + } + for (let i = 0; i < dbs.length; i++) { + expect(results[i]).toEqual(results[0]); + for (let s = 0; s < dbs.length; s++) { + expect(results[i][s].y).toBeGreaterThan(500); + } + } }) test('transform', async () => { diff --git a/bindings/javascript/sync/packages/native/index.d.ts b/bindings/javascript/sync/packages/native/index.d.ts index 4d1b45fa9..af73101f3 100644 --- a/bindings/javascript/sync/packages/native/index.d.ts +++ b/bindings/javascript/sync/packages/native/index.d.ts @@ -73,16 +73,6 @@ export declare class Database { ioLoopAsync(): Promise } -export declare class Opfs { - constructor() - connectDb(path: string, opts?: DatabaseOpts | undefined | null): Promise - complete(completionNo: number, result: number): void -} - -export declare class OpfsFile { - -} - /** A prepared statement. */ export declare class Statement { reset(): void @@ -139,12 +129,6 @@ export declare class Statement { export interface DatabaseOpts { tracing?: string } - -/** - * turso-db in the the browser requires explicit thread pool initialization - * so, we just put no-op task on the thread pool and force emnapi to allocate web worker - */ -export declare function initThreadPool(): Promise export declare class GeneratorHolder { resumeSync(error?: string | undefined | null): GeneratorResponse resumeAsync(error?: string | undefined | null): Promise diff --git a/bindings/javascript/sync/packages/native/index.js b/bindings/javascript/sync/packages/native/index.js index 12e351d61..cd543a959 100644 --- a/bindings/javascript/sync/packages/native/index.js +++ b/bindings/javascript/sync/packages/native/index.js @@ -510,10 +510,7 @@ if (!nativeBinding) { const { Database, Statement, GeneratorHolder, JsDataCompletion, JsProtocolIo, JsProtocolRequestBytes, SyncEngine, SyncEngineChanges, DatabaseChangeTypeJs, SyncEngineProtocolVersion } = nativeBinding export { Database } -export { Opfs } -export { OpfsFile } export { Statement } -export { initThreadPool } export { GeneratorHolder } export { JsDataCompletion } export { JsProtocolIo } diff --git a/bindings/javascript/sync/packages/native/package.json b/bindings/javascript/sync/packages/native/package.json index c0c01081c..c7c52414f 100644 --- a/bindings/javascript/sync/packages/native/package.json +++ b/bindings/javascript/sync/packages/native/package.json @@ -31,7 +31,7 @@ "napi-artifacts": "napi artifacts --output-dir .", "tsc-build": "npm exec tsc", "build": "npm run napi-build && npm run tsc-build", - "test": "VITE_TURSO_DB_URL=http://c--a--a.localhost:10000 vitest --run", + "test": "VITE_TURSO_DB_URL=http://d--a--a.localhost:10000 vitest --run", "prepublishOnly": "npm run napi-dirs && npm run napi-artifacts && napi prepublish -t npm" }, "napi": { diff --git a/bindings/javascript/sync/src/lib.rs b/bindings/javascript/sync/src/lib.rs index 13427f501..3223b0795 100644 --- a/bindings/javascript/sync/src/lib.rs +++ b/bindings/javascript/sync/src/lib.rs @@ -6,7 +6,7 @@ pub mod js_protocol_io; use std::{ collections::HashMap, - sync::{Arc, Mutex, MutexGuard, OnceLock, RwLock, RwLockReadGuard}, + sync::{Arc, Mutex, OnceLock, RwLock, RwLockReadGuard}, }; use napi::bindgen_prelude::{AsyncTask, Either5, Null}; @@ -149,6 +149,8 @@ impl SyncEngine { pub fn new(opts: SyncEngineOpts) -> napi::Result { // helpful for local debugging match opts.tracing.as_deref() { + Some("error") => init_tracing(LevelFilter::ERROR), + Some("warn") => init_tracing(LevelFilter::WARN), Some("info") => init_tracing(LevelFilter::INFO), Some("debug") => init_tracing(LevelFilter::DEBUG), Some("trace") => init_tracing(LevelFilter::TRACE), From b1062207435dcc67e2cfd6b0c90cc796b67fe30d Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 18 Sep 2025 01:35:22 +0400 Subject: [PATCH 13/78] main thread in browser can't execute parking - so we use parking lot in spin-lock style for that target --- core/storage/wal.rs | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/core/storage/wal.rs b/core/storage/wal.rs index 3edf3f6d5..d6b111172 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -1639,11 +1639,41 @@ impl WalFile { } fn get_shared_mut(&self) -> parking_lot::RwLockWriteGuard<'_, WalFileShared> { - self.shared.write() + // WASM in browser main thread doesn't have a way to "park" a thread + // so, we spin way here instead of calling blocking lock + #[cfg(target_family = "wasm")] + { + loop { + let Some(lock) = self.shared.try_write() else { + std::hint::spin_loop(); + continue; + }; + return lock; + } + } + #[cfg(not(target_family = "wasm"))] + { + self.shared.write() + } } fn get_shared(&self) -> parking_lot::RwLockReadGuard<'_, WalFileShared> { - self.shared.read() + // WASM in browser main thread doesn't have a way to "park" a thread + // so, we spin way here instead of calling blocking lock + #[cfg(target_family = "wasm")] + { + loop { + let Some(lock) = self.shared.try_read() else { + std::hint::spin_loop(); + continue; + }; + return lock; + } + } + #[cfg(not(target_family = "wasm"))] + { + self.shared.read() + } } fn complete_append_frame(&mut self, page_id: u64, frame_id: u64, checksums: (u32, u32)) { From bc2dbe902514a5e6467af4f587520ddbae9febb8 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Fri, 19 Sep 2025 12:56:45 +0400 Subject: [PATCH 14/78] fix bug --- bindings/javascript/packages/common/promise.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/javascript/packages/common/promise.ts b/bindings/javascript/packages/common/promise.ts index 88ccb802c..3009b2fcb 100644 --- a/bindings/javascript/packages/common/promise.ts +++ b/bindings/javascript/packages/common/promise.ts @@ -352,7 +352,7 @@ class Statement { } } } finally { - + this.db.execLock.release(); } } From 7049f3ddae4005e528811444aa6765a535dce23d Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Fri, 19 Sep 2025 13:17:23 +0400 Subject: [PATCH 15/78] fix clock implementation for OPFS IO --- Cargo.lock | 2 +- Cargo.toml | 6 ++---- bindings/javascript/Cargo.toml | 1 + bindings/javascript/src/browser.rs | 6 +++++- bindings/javascript/src/lib.rs | 2 -- bindings/javascript/sync/Cargo.toml | 1 - 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f42165992..79c477a01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4220,6 +4220,7 @@ dependencies = [ name = "turso_node" version = "0.2.0-pre.3" dependencies = [ + "chrono", "napi", "napi-build", "napi-derive", @@ -4322,7 +4323,6 @@ name = "turso_sync_js" version = "0.2.0-pre.3" dependencies = [ "genawaiter", - "http", "napi", "napi-build", "napi-derive", diff --git a/Cargo.toml b/Cargo.toml index 2771c2a31..33352e546 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,9 +33,7 @@ members = [ "perf/throughput/turso", "perf/throughput/rusqlite", ] -exclude = [ - "perf/latency/limbo", -] +exclude = ["perf/latency/limbo"] [workspace.package] version = "0.2.0-pre.3" @@ -60,7 +58,7 @@ limbo_percentile = { path = "extensions/percentile", version = "0.2.0-pre.3" } limbo_regexp = { path = "extensions/regexp", version = "0.2.0-pre.3" } turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.2.0-pre.3" } limbo_uuid = { path = "extensions/uuid", version = "0.2.0-pre.3" } -turso_parser = { path = "parser", version = "0.2.0-pre.3" } +turso_parser = { path = "parser", version = "0.2.0-pre.3" } sql_generation = { path = "sql_generation" } strum = { version = "0.26", features = ["derive"] } strum_macros = "0.26" diff --git a/bindings/javascript/Cargo.toml b/bindings/javascript/Cargo.toml index 1b5001839..dcd2ba441 100644 --- a/bindings/javascript/Cargo.toml +++ b/bindings/javascript/Cargo.toml @@ -16,6 +16,7 @@ napi = { version = "3.1.3", default-features = false, features = ["napi6"] } napi-derive = { version = "3.1.1", default-features = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } tracing.workspace = true +chrono = { workspace = true, default-features = false, features = ["clock"] } [features] encryption = ["turso_core/encryption"] diff --git a/bindings/javascript/src/browser.rs b/bindings/javascript/src/browser.rs index c59d86e7f..4ebae0a59 100644 --- a/bindings/javascript/src/browser.rs +++ b/bindings/javascript/src/browser.rs @@ -140,7 +140,11 @@ impl Opfs { impl Clock for Opfs { fn now(&self) -> Instant { - Instant { secs: 0, micros: 0 } // TODO + let now = chrono::Local::now(); + Instant { + secs: now.timestamp(), + micros: now.timestamp_subsec_micros(), + } } } diff --git a/bindings/javascript/src/lib.rs b/bindings/javascript/src/lib.rs index ff910b7f2..df4724e6e 100644 --- a/bindings/javascript/src/lib.rs +++ b/bindings/javascript/src/lib.rs @@ -12,8 +12,6 @@ #[cfg(feature = "browser")] pub mod browser; -#[cfg(feature = "browser")] -use crate::browser::opfs; use napi::bindgen_prelude::*; use napi::{Env, Task}; diff --git a/bindings/javascript/sync/Cargo.toml b/bindings/javascript/sync/Cargo.toml index 00749c5d3..1057c4e8f 100644 --- a/bindings/javascript/sync/Cargo.toml +++ b/bindings/javascript/sync/Cargo.toml @@ -10,7 +10,6 @@ repository.workspace = true crate-type = ["cdylib"] [dependencies] -http = "1.3.1" napi = { version = "3.1.3", default-features = false, features = ["napi6"] } napi-derive = { version = "3.1.1", default-features = true } turso_sync_engine = { workspace = true } From cfc8728774bf914e1156b6907b9bff996eacfa73 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Fri, 19 Sep 2025 13:25:52 +0400 Subject: [PATCH 16/78] fix clippy --- bindings/javascript/src/browser.rs | 26 +++++++++++++------------- bindings/javascript/sync/src/lib.rs | 10 +++++----- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/bindings/javascript/src/browser.rs b/bindings/javascript/src/browser.rs index 4ebae0a59..16b071ec8 100644 --- a/bindings/javascript/src/browser.rs +++ b/bindings/javascript/src/browser.rs @@ -64,7 +64,7 @@ pub struct OpfsInner { } thread_local! { - static OPFS: Arc = Arc::new(Opfs::new()); + static OPFS: Arc = Arc::new(Opfs::default()); } #[napi] @@ -74,9 +74,6 @@ struct OpfsFile { opfs: Opfs, } -// unsafe impl Send for OpfsFile {} -// unsafe impl Sync for OpfsFile {} - unsafe impl Send for Opfs {} unsafe impl Sync for Opfs {} @@ -107,15 +104,6 @@ pub fn opfs() -> Arc { } impl Opfs { - pub fn new() -> Self { - Self { - inner: Arc::new(OpfsInner { - completion_no: RefCell::new(0), - completions: RefCell::new(HashMap::new()), - }), - } - } - pub fn complete(&self, completion_no: u32, result: i32) { let completion = { let mut completions = self.inner.completions.borrow_mut(); @@ -148,6 +136,18 @@ impl Clock for Opfs { } } +impl Default for Opfs { + fn default() -> Self { + Self { + #[allow(clippy::arc_with_non_send_sync)] + inner: Arc::new(OpfsInner { + completion_no: RefCell::new(0), + completions: RefCell::new(HashMap::new()), + }), + } + } +} + #[link(wasm_import_module = "env")] extern "C" { fn lookup_file(path: *const u8, path_len: usize) -> i32; diff --git a/bindings/javascript/sync/src/lib.rs b/bindings/javascript/sync/src/lib.rs index 3223b0795..f851af991 100644 --- a/bindings/javascript/sync/src/lib.rs +++ b/bindings/javascript/sync/src/lib.rs @@ -255,7 +255,7 @@ impl SyncEngine { #[napi] pub fn push(&self) -> GeneratorHolder { self.run(async move |coro, guard| { - let sync_engine = try_read(&guard)?; + let sync_engine = try_read(guard)?; let sync_engine = try_unwrap(&sync_engine)?; sync_engine.push_changes_to_remote(coro).await?; Ok(None) @@ -265,7 +265,7 @@ impl SyncEngine { #[napi] pub fn stats(&self) -> GeneratorHolder { self.run(async move |coro, guard| { - let sync_engine = try_read(&guard)?; + let sync_engine = try_read(guard)?; let sync_engine = try_unwrap(&sync_engine)?; let stats = sync_engine.stats(coro).await?; Ok(Some(GeneratorResponse::SyncEngineStats { @@ -282,7 +282,7 @@ impl SyncEngine { #[napi] pub fn wait(&self) -> GeneratorHolder { self.run(async move |coro, guard| { - let sync_engine = try_read(&guard)?; + let sync_engine = try_read(guard)?; let sync_engine = try_unwrap(&sync_engine)?; Ok(Some(GeneratorResponse::SyncEngineChanges { changes: SyncEngineChanges { @@ -296,7 +296,7 @@ impl SyncEngine { pub fn apply(&self, changes: &mut SyncEngineChanges) -> GeneratorHolder { let status = changes.status.take().unwrap(); self.run(async move |coro, guard| { - let sync_engine = try_read(&guard)?; + let sync_engine = try_read(guard)?; let sync_engine = try_unwrap(&sync_engine)?; sync_engine.apply_changes_from_remote(coro, status).await?; Ok(None) @@ -306,7 +306,7 @@ impl SyncEngine { #[napi] pub fn checkpoint(&self) -> GeneratorHolder { self.run(async move |coro, guard| { - let sync_engine = try_read(&guard)?; + let sync_engine = try_read(guard)?; let sync_engine = try_unwrap(&sync_engine)?; sync_engine.checkpoint(coro).await?; Ok(None) From c24e5219d2665c3875f273e48621b093b2be1a8a Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Fri, 19 Sep 2025 14:24:42 +0400 Subject: [PATCH 17/78] remove log file --- bindings/javascript/sync/packages/native/log | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 bindings/javascript/sync/packages/native/log diff --git a/bindings/javascript/sync/packages/native/log b/bindings/javascript/sync/packages/native/log deleted file mode 100644 index b6989da2e..000000000 --- a/bindings/javascript/sync/packages/native/log +++ /dev/null @@ -1,14 +0,0 @@ - -> @tursodatabase/sync@0.2.0-pre.3 test -> VITE_TURSO_DB_URL=http://c--a--a.localhost:10000 vitest --run -t update - - - RUN v3.2.4 /home/sivukhin/turso/limbo/bindings/javascript/sync/packages/native - - ✓ promise.test.ts (14 tests | 13 skipped) 109ms - - Test Files 1 passed (1) - Tests 1 passed | 13 skipped (14) - Start at 16:40:50 - Duration 436ms (transform 99ms, setup 0ms, collect 118ms, tests 109ms, environment 0ms, prepare 57ms) - From d2cd48d9e4219bbb0e397278432f55c6e1c821eb Mon Sep 17 00:00:00 2001 From: Samuel Marks <807580+SamuelMarks@users.noreply.github.com> Date: Fri, 19 Sep 2025 20:40:11 -0500 Subject: [PATCH 18/78] [sqlite3/tests/compat/mod.rs] Use canonical path to fix temp path on macOS ; rename to resolve binding connascence --- sqlite3/tests/compat/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sqlite3/tests/compat/mod.rs b/sqlite3/tests/compat/mod.rs index eb90c35a4..d04b933e8 100644 --- a/sqlite3/tests/compat/mod.rs +++ b/sqlite3/tests/compat/mod.rs @@ -1247,8 +1247,10 @@ mod tests { // Test with "main" database name let filename = sqlite3_db_filename(db, c"main".as_ptr()); assert!(!filename.is_null()); - let filename_str = std::ffi::CStr::from_ptr(filename).to_str().unwrap(); - assert_eq!(filename_str, temp_file.path().to_str().unwrap()); + let filename_pathbuf = + std::fs::canonicalize(std::ffi::CStr::from_ptr(filename).to_str().unwrap()) + .unwrap(); + assert_eq!(filename_pathbuf, temp_file.path().canonicalize().unwrap()); // Test with NULL database name (defaults to main) let filename_default = sqlite3_db_filename(db, ptr::null()); From 30538e789832ee173b125ce83be0aa60455049d6 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 18 Sep 2025 15:21:02 -0300 Subject: [PATCH 19/78] modify Begin struct --- simulator/generation/property.rs | 6 ++++-- sql_generation/model/query/transaction.rs | 13 ++++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 4ab9e9ff1..812ffb781 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1176,8 +1176,10 @@ fn property_insert_values_select( // - [x] The inserted row will not be updated. // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) if let Some(ref interactive) = interactive { - queries.push(Query::Begin(Begin { - immediate: interactive.start_with_immediate, + queries.push(Query::Begin(if interactive.start_with_immediate { + Begin::Immediate + } else { + Begin::Deferred })); } for _ in 0..rng.random_range(0..3) { diff --git a/sql_generation/model/query/transaction.rs b/sql_generation/model/query/transaction.rs index 1114200a0..40ced59cd 100644 --- a/sql_generation/model/query/transaction.rs +++ b/sql_generation/model/query/transaction.rs @@ -3,8 +3,10 @@ use std::fmt::Display; use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Begin { - pub immediate: bool, +pub enum Begin { + Deferred, + Immediate, + Concurrent, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -15,7 +17,12 @@ pub struct Rollback; impl Display for Begin { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "BEGIN {}", if self.immediate { "IMMEDIATE" } else { "" }) + let keyword = match self { + Begin::Deferred => "", + Begin::Immediate => "IMMEDIATE", + Begin::Concurrent => "CONCURRENT", + }; + write!(f, "BEGIN {keyword}") } } From c4843d6a6ede9f66f6ba560e87fb917ca2c54d4c Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 18 Sep 2025 16:37:22 -0300 Subject: [PATCH 20/78] refactor SimulatorEnv to hold committed tables and separate connection tables for snapshot isolation --- simulator/model/mod.rs | 2 ++ simulator/runner/differential.rs | 4 +-- simulator/runner/doublecheck.rs | 4 +-- simulator/runner/env.rs | 60 ++++++++++++++++++++++++++++---- simulator/runner/execution.rs | 2 +- 5 files changed, 60 insertions(+), 12 deletions(-) diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 20adbbe9d..237750683 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -358,6 +358,8 @@ impl Shadow for Select { impl Shadow for Begin { type Result = Vec>; fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result { + // FIXME: currently the snapshot is taken eagerly + // this is wrong for Deffered transactions tables.snapshot = Some(tables.tables.clone()); vec![] } diff --git a/simulator/runner/differential.rs b/simulator/runner/differential.rs index 6dd5803ee..c2be34b38 100644 --- a/simulator/runner/differential.rs +++ b/simulator/runner/differential.rs @@ -59,8 +59,8 @@ pub(crate) fn execute_interactions( let mut env = env.lock().unwrap(); let mut rusqlite_env = rusqlite_env.lock().unwrap(); - env.tables.clear(); - rusqlite_env.tables.clear(); + env.clear_tables(); + rusqlite_env.clear_tables(); let now = std::time::Instant::now(); diff --git a/simulator/runner/doublecheck.rs b/simulator/runner/doublecheck.rs index a2c98b424..d90408686 100644 --- a/simulator/runner/doublecheck.rs +++ b/simulator/runner/doublecheck.rs @@ -89,8 +89,8 @@ pub(crate) fn execute_plans( let mut env = env.lock().unwrap(); let mut doublecheck_env = doublecheck_env.lock().unwrap(); - env.tables.clear(); - doublecheck_env.tables.clear(); + env.clear_tables(); + doublecheck_env.clear_tables(); let now = std::time::Instant::now(); diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index c5423f97c..37c17966a 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -8,6 +8,7 @@ use std::sync::Arc; use garde::Validate; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; +use sql_generation::generation::GenerationContext; use sql_generation::model::table::Table; use turso_core::Database; @@ -71,8 +72,12 @@ pub(crate) struct SimulatorEnv { pub(crate) paths: Paths, pub(crate) type_: SimulationType, pub(crate) phase: SimulationPhase, - pub(crate) tables: SimulatorTables, pub memory_io: bool, + + /// If connection state is None, means we are not in a transaction + pub connection_tables: Vec>, + // Table data that is committed into the database or wal + pub committed_tables: SimulatorTables, } impl UnwindSafe for SimulatorEnv {} @@ -81,10 +86,6 @@ impl SimulatorEnv { pub(crate) fn clone_without_connections(&self) -> Self { SimulatorEnv { opts: self.opts.clone(), - tables: self.tables.clone(), - connections: (0..self.connections.len()) - .map(|_| SimConnection::Disconnected) - .collect(), io: self.io.clone(), db: self.db.clone(), rng: self.rng.clone(), @@ -93,11 +94,17 @@ impl SimulatorEnv { phase: self.phase, memory_io: self.memory_io, profile: self.profile.clone(), + connections: (0..self.connections.len()) + .map(|_| SimConnection::Disconnected) + .collect(), + // TODO: not sure if connection_tables should be recreated instead + connection_tables: self.connection_tables.clone(), + committed_tables: self.committed_tables.clone(), } } pub(crate) fn clear(&mut self) { - self.tables.clear(); + self.clear_tables(); self.connections.iter_mut().for_each(|c| c.disconnect()); self.rng = ChaCha8Rng::seed_from_u64(self.opts.seed); @@ -284,7 +291,6 @@ impl SimulatorEnv { SimulatorEnv { opts, - tables: SimulatorTables::new(), connections, paths, rng, @@ -294,6 +300,8 @@ impl SimulatorEnv { phase: SimulationPhase::Test, memory_io: cli_opts.memory_io, profile: profile.clone(), + committed_tables: SimulatorTables::new(), + connection_tables: vec![None; profile.max_connections], } } @@ -327,6 +335,44 @@ impl SimulatorEnv { } }; } + + /// Clears the commited tables and the connection tables + pub fn clear_tables(&mut self) { + self.committed_tables.clear(); + self.connection_tables.iter_mut().for_each(|t| { + if let Some(t) = t { + t.clear(); + } + }); + } + + pub fn connection_context(&self, connection_index: usize) -> impl GenerationContext { + struct ConnectionGenContext<'a> { + tables: &'a Vec, + opts: &'a sql_generation::generation::Opts, + } + + impl<'a> GenerationContext for ConnectionGenContext<'a> { + fn tables(&self) -> &Vec { + self.tables + } + + fn opts(&self) -> &sql_generation::generation::Opts { + self.opts + } + } + + let tables = if let Some(tables) = self.connection_tables.get(connection_index).unwrap() { + &tables.tables + } else { + &self.committed_tables.tables + }; + + ConnectionGenContext { + opts: &self.profile.query.gen_opts, + tables, + } + } } pub trait ConnectionTrait diff --git a/simulator/runner/execution.rs b/simulator/runner/execution.rs index 5c3f81a6b..2ff819f22 100644 --- a/simulator/runner/execution.rs +++ b/simulator/runner/execution.rs @@ -65,7 +65,7 @@ pub(crate) fn execute_interactions( env.clear_poison(); let mut env = env.lock().unwrap(); - env.tables.clear(); + env.clear_tables(); for _tick in 0..env.opts.ticks { tracing::trace!("Executing tick {}", _tick); From 13f36880f870ee2fa31c3d33829a63939c6df81b Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 18 Sep 2025 17:45:07 -0300 Subject: [PATCH 21/78] adjust generation code to use the context from the current connection index instead of the whole database --- simulator/generation/mod.rs | 22 ++-- simulator/generation/plan.rs | 130 ++++++++++++++--------- simulator/generation/property.rs | 175 ++++++++++++++++++------------- simulator/runner/env.rs | 25 +++-- simulator/runner/execution.rs | 4 +- 5 files changed, 208 insertions(+), 148 deletions(-) diff --git a/simulator/generation/mod.rs b/simulator/generation/mod.rs index 88a40d708..140075d32 100644 --- a/simulator/generation/mod.rs +++ b/simulator/generation/mod.rs @@ -1,6 +1,6 @@ use sql_generation::generation::GenerationContext; -use crate::runner::env::{SimulatorEnv, SimulatorTables}; +use crate::runner::env::SimulatorTables; pub mod plan; pub mod property; @@ -20,22 +20,16 @@ pub(crate) trait Shadow { fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result; } -impl GenerationContext for SimulatorEnv { +/// Generation context that will always panic when called +/// This is meant to be used when want to ensure that no downstream arbitrary fn will use this context +pub struct PanicGenerationContext; + +impl GenerationContext for PanicGenerationContext { fn tables(&self) -> &Vec { - &self.tables.tables + unimplemented!("you are not supposed to use this context") } fn opts(&self) -> &sql_generation::generation::Opts { - &self.profile.query.gen_opts - } -} - -impl GenerationContext for &mut SimulatorEnv { - fn tables(&self) -> &Vec { - &self.tables.tables - } - - fn opts(&self) -> &sql_generation::generation::Opts { - &self.profile.query.gen_opts + unimplemented!("you are not supposed to use this context") } } diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 5dde1d11e..cb19184f1 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -21,7 +21,7 @@ use turso_core::{Connection, Result, StepResult}; use crate::{ SimulatorEnv, - generation::Shadow, + generation::{PanicGenerationContext, Shadow}, model::Query, runner::env::{SimConnection, SimulationType, SimulatorTables}, }; @@ -426,8 +426,8 @@ impl InteractionPlan { let num_interactions = env.opts.max_interactions as usize; // First create at least one table - let create_query = Create::arbitrary(rng, env); - env.tables.push(create_query.table.clone()); + let create_query = Create::arbitrary(rng, &env.connection_context(0)); + env.committed_tables.push(create_query.table.clone()); // initial query starts at 0th connection plan.plan.push(Interactions::new( @@ -441,8 +441,9 @@ impl InteractionPlan { plan.plan.len(), num_interactions ); - let interactions = Interactions::arbitrary_from(rng, env, (env, plan.stats())); - interactions.shadow(&mut env.tables); + let interactions = + Interactions::arbitrary_from(rng, &PanicGenerationContext, (env, plan.stats())); + interactions.shadow(env.get_conn_tables_mut(interactions.connection_index)); plan.plan.push(interactions); } @@ -834,66 +835,90 @@ fn reopen_database(env: &mut SimulatorEnv) { }; } -fn random_create(rng: &mut R, env: &SimulatorEnv) -> Interactions { - let mut create = Create::arbitrary(rng, env); - while env.tables.iter().any(|t| t.name == create.table.name) { - create = Create::arbitrary(rng, env); +fn random_create(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { + let conn_ctx = env.connection_context(conn_index); + let mut create = Create::arbitrary(rng, &conn_ctx); + while conn_ctx + .tables() + .iter() + .any(|t| t.name == create.table.name) + { + create = Create::arbitrary(rng, &conn_ctx); } + Interactions::new(conn_index, InteractionsType::Query(Query::Create(create))) +} + +fn random_read(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { Interactions::new( - env.choose_conn(rng), - InteractionsType::Query(Query::Create(create)), + conn_index, + InteractionsType::Query(Query::Select(Select::arbitrary( + rng, + &env.connection_context(conn_index), + ))), ) } -fn random_read(rng: &mut R, env: &SimulatorEnv) -> Interactions { +fn random_expr(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { Interactions::new( - env.choose_conn(rng), - InteractionsType::Query(Query::Select(Select::arbitrary(rng, env))), + conn_index, + InteractionsType::Query(Query::Select( + SelectFree::arbitrary(rng, &env.connection_context(conn_index)).0, + )), ) } -fn random_expr(rng: &mut R, env: &SimulatorEnv) -> Interactions { +fn random_write(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { Interactions::new( - env.choose_conn(rng), - InteractionsType::Query(Query::Select(SelectFree::arbitrary(rng, env).0)), + conn_index, + InteractionsType::Query(Query::Insert(Insert::arbitrary( + rng, + &env.connection_context(conn_index), + ))), ) } -fn random_write(rng: &mut R, env: &SimulatorEnv) -> Interactions { +fn random_delete(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { Interactions::new( - env.choose_conn(rng), - InteractionsType::Query(Query::Insert(Insert::arbitrary(rng, env))), + conn_index, + InteractionsType::Query(Query::Delete(Delete::arbitrary( + rng, + &env.connection_context(conn_index), + ))), ) } -fn random_delete(rng: &mut R, env: &SimulatorEnv) -> Interactions { +fn random_update(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { Interactions::new( - env.choose_conn(rng), - InteractionsType::Query(Query::Delete(Delete::arbitrary(rng, env))), + conn_index, + InteractionsType::Query(Query::Update(Update::arbitrary( + rng, + &env.connection_context(conn_index), + ))), ) } -fn random_update(rng: &mut R, env: &SimulatorEnv) -> Interactions { +fn random_drop(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { Interactions::new( - env.choose_conn(rng), - InteractionsType::Query(Query::Update(Update::arbitrary(rng, env))), + conn_index, + InteractionsType::Query(Query::Drop(Drop::arbitrary( + rng, + &env.connection_context(conn_index), + ))), ) } -fn random_drop(rng: &mut R, env: &SimulatorEnv) -> Interactions { - Interactions::new( - env.choose_conn(rng), - InteractionsType::Query(Query::Drop(Drop::arbitrary(rng, env))), - ) -} - -fn random_create_index(rng: &mut R, env: &SimulatorEnv) -> Option { - if env.tables.is_empty() { +fn random_create_index( + rng: &mut R, + env: &SimulatorEnv, + conn_index: usize, +) -> Option { + let conn_ctx = env.connection_context(conn_index); + if conn_ctx.tables().is_empty() { return None; } - let mut create_index = CreateIndex::arbitrary(rng, env); - while env - .tables + let mut create_index = CreateIndex::arbitrary(rng, &conn_ctx); + while conn_ctx + .tables() .iter() .find(|t| t.name == create_index.table_name) .expect("table should exist") @@ -901,11 +926,11 @@ fn random_create_index(rng: &mut R, env: &SimulatorEnv) -> Option< .iter() .any(|i| i == &create_index.index_name) { - create_index = CreateIndex::arbitrary(rng, env); + create_index = CreateIndex::arbitrary(rng, &conn_ctx); } Some(Interactions::new( - env.choose_conn(rng), + conn_index, InteractionsType::Query(Query::CreateIndex(create_index)), )) } @@ -927,60 +952,61 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats)> for Interactions { (env, stats): (&SimulatorEnv, InteractionStats), ) -> Self { let remaining_ = remaining(env.opts.max_interactions, &env.profile.query, &stats); + let conn_index = env.choose_conn(rng); frequency( vec![ ( u32::min(remaining_.select, remaining_.insert) + remaining_.create, Box::new(|rng: &mut R| { Interactions::new( - env.choose_conn(rng), + conn_index, InteractionsType::Property(Property::arbitrary_from( rng, - env, - (env, &stats), + &PanicGenerationContext, + (env, &stats, conn_index), )), ) }), ), ( remaining_.select, - Box::new(|rng: &mut R| random_read(rng, env)), + Box::new(|rng: &mut R| random_read(rng, env, conn_index)), ), ( remaining_.select / 3, - Box::new(|rng: &mut R| random_expr(rng, env)), + Box::new(|rng: &mut R| random_expr(rng, env, conn_index)), ), ( remaining_.insert, - Box::new(|rng: &mut R| random_write(rng, env)), + Box::new(|rng: &mut R| random_write(rng, env, conn_index)), ), ( remaining_.create, - Box::new(|rng: &mut R| random_create(rng, env)), + Box::new(|rng: &mut R| random_create(rng, env, conn_index)), ), ( remaining_.create_index, Box::new(|rng: &mut R| { - if let Some(interaction) = random_create_index(rng, env) { + if let Some(interaction) = random_create_index(rng, env, conn_index) { interaction } else { // if no tables exist, we can't create an index, so fallback to creating a table - random_create(rng, env) + random_create(rng, env, conn_index) } }), ), ( remaining_.delete, - Box::new(|rng: &mut R| random_delete(rng, env)), + Box::new(|rng: &mut R| random_delete(rng, env, conn_index)), ), ( remaining_.update, - Box::new(|rng: &mut R| random_update(rng, env)), + Box::new(|rng: &mut R| random_update(rng, env, conn_index)), ), ( // remaining_.drop, 0, - Box::new(|rng: &mut R| random_drop(rng, env)), + Box::new(|rng: &mut R| random_drop(rng, env, conn_index)), ), ( remaining_ diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 812ffb781..3674ed936 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -226,7 +226,8 @@ impl Property { let assumption = InteractionType::Assumption(Assertion::new( format!("table {} exists", table.clone()), move |_: &Vec, env: &mut SimulatorEnv| { - if env.tables.iter().any(|t| t.name == table_name) { + let conn_tables = env.get_conn_tables(connection_index); + if conn_tables.iter().any(|t| t.name == table_name) { Ok(Ok(())) } else { Ok(Err(format!("table {table_name} does not exist"))) @@ -246,8 +247,8 @@ impl Property { let Ok(rows) = rows else { return Ok(Err(format!("expected rows but got error: {rows:?}"))); }; - let sim_table = env - .tables + let conn_tables = env.get_conn_tables(connection_index); + let sim_table = conn_tables .iter() .find(|t| t.name == table) .expect("table should be in enviroment"); @@ -283,7 +284,8 @@ impl Property { let assumption = InteractionType::Assumption(Assertion::new( format!("table {} exists", table.clone()), move |_: &Vec, env: &mut SimulatorEnv| { - if env.tables.iter().any(|t| t.name == table.clone()) { + let conn_tables = env.get_conn_tables(connection_index); + if conn_tables.iter().any(|t| t.name == table.clone()) { Ok(Ok(())) } else { Ok(Err(format!("table {} does not exist", table.clone()))) @@ -360,7 +362,8 @@ impl Property { { let table_name = table.clone(); move |_: &Vec, env: &mut SimulatorEnv| { - if env.tables.iter().any(|t| t.name == table_name) { + let conn_tables = env.get_conn_tables(connection_index); + if conn_tables.iter().any(|t| t.name == table_name) { Ok(Ok(())) } else { Ok(Err(format!("table {table_name} does not exist"))) @@ -429,7 +432,8 @@ impl Property { let assumption = InteractionType::Assumption(Assertion::new( "Double-Create-Failure should not be called on an existing table".to_string(), move |_: &Vec, env: &mut SimulatorEnv| { - if !env.tables.iter().any(|t| t.name == table_name) { + let conn_tables = env.get_conn_tables(connection_index); + if !conn_tables.iter().any(|t| t.name == table_name) { Ok(Ok(())) } else { Ok(Err(format!("table {table_name} already exists"))) @@ -484,15 +488,16 @@ impl Property { { let table_name = select.dependencies(); move |_: &Vec, env: &mut SimulatorEnv| { + let conn_tables = env.get_conn_tables(connection_index); if table_name .iter() - .all(|table| env.tables.iter().any(|t| t.name == *table)) + .all(|table| conn_tables.iter().any(|t| t.name == *table)) { Ok(Ok(())) } else { let missing_tables = table_name .iter() - .filter(|t| !env.tables.iter().any(|t2| t2.name == **t)) + .filter(|t| !conn_tables.iter().any(|t2| t2.name == **t)) .collect::>(); Ok(Err(format!("missing tables: {missing_tables:?}"))) } @@ -544,12 +549,13 @@ impl Property { { let table = table.clone(); move |_: &Vec, env: &mut SimulatorEnv| { - if env.tables.iter().any(|t| t.name == table) { + let conn_tables = env.get_conn_tables(connection_index); + if conn_tables.iter().any(|t| t.name == table) { Ok(Ok(())) } else { { let available_tables: Vec = - env.tables.iter().map(|t| t.name.clone()).collect(); + conn_tables.iter().map(|t| t.name.clone()).collect(); Ok(Err(format!( "table \'{table}\' not found. Available tables: {available_tables:?}" ))) @@ -617,12 +623,13 @@ impl Property { { let table = table.clone(); move |_, env: &mut SimulatorEnv| { - if env.tables.iter().any(|t| t.name == table) { + let conn_tables = env.get_conn_tables(connection_index); + if conn_tables.iter().any(|t| t.name == table) { Ok(Ok(())) } else { { let available_tables: Vec = - env.tables.iter().map(|t| t.name.clone()).collect(); + conn_tables.iter().map(|t| t.name.clone()).collect(); Ok(Err(format!( "table \'{table}\' not found. Available tables: {available_tables:?}" ))) @@ -684,12 +691,13 @@ impl Property { { let table = table.clone(); move |_: &Vec, env: &mut SimulatorEnv| { - if env.tables.iter().any(|t| t.name == table) { + let conn_tables = env.get_conn_tables(connection_index); + if conn_tables.iter().any(|t| t.name == table) { Ok(Ok(())) } else { { let available_tables: Vec = - env.tables.iter().map(|t| t.name.clone()).collect(); + conn_tables.iter().map(|t| t.name.clone()).collect(); Ok(Err(format!( "table \'{table}\' not found. Available tables: {available_tables:?}" ))) @@ -788,7 +796,8 @@ impl Property { let last = stack.last().unwrap(); match last { Ok(_) => { - let _ = query_clone.shadow(&mut env.tables); + let _ = + query_clone.shadow(env.get_conn_tables_mut(connection_index)); Ok(Ok(())) } Err(err) => { @@ -821,15 +830,16 @@ impl Property { { let tables = select.dependencies(); move |_: &Vec, env: &mut SimulatorEnv| { + let conn_tables = env.get_conn_tables(connection_index); if tables .iter() - .all(|table| env.tables.iter().any(|t| t.name == *table)) + .all(|table| conn_tables.iter().any(|t| t.name == *table)) { Ok(Ok(())) } else { let missing_tables = tables .iter() - .filter(|t| !env.tables.iter().any(|t2| t2.name == **t)) + .filter(|t| !conn_tables.iter().any(|t2| t2.name == **t)) .collect::>(); Ok(Err(format!("missing tables: {missing_tables:?}"))) } @@ -1030,7 +1040,7 @@ fn assert_all_table_values( let assertion = InteractionType::Assertion(Assertion::new(format!("table {table} should contain all of its expected values"), { let table = table.clone(); move |stack: &Vec, env: &mut SimulatorEnv| { - let table = env.tables.iter().find(|t| t.name == table).ok_or_else(|| { + let table = env.get_conn_tables(connection_index).iter().find(|t| t.name == table).ok_or_else(|| { LimboError::InternalError(format!( "table {table} should exist in simulator env" )) @@ -1140,14 +1150,14 @@ pub(crate) fn remaining( fn property_insert_values_select( rng: &mut R, - env: &SimulatorEnv, remaining: &Remaining, + ctx: &impl GenerationContext, ) -> Property { // Get a random table - let table = pick(&env.tables, rng); + let table = pick(ctx.tables(), rng); // Generate rows to insert let rows = (0..rng.random_range(1..=5)) - .map(|_| Vec::::arbitrary_from(rng, env, table)) + .map(|_| Vec::::arbitrary_from(rng, ctx, table)) .collect::>(); // Pick a random row to select @@ -1183,7 +1193,7 @@ fn property_insert_values_select( })); } for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, env, remaining); + let query = Query::arbitrary_from(rng, ctx, remaining); match &query { Query::Delete(Delete { table: t, @@ -1226,7 +1236,7 @@ fn property_insert_values_select( // Select the row let select_query = Select::simple( table.name.clone(), - Predicate::arbitrary_from(rng, env, (table, &row)), + Predicate::arbitrary_from(rng, ctx, (table, &row)), ); Property::InsertValuesSelect { @@ -1238,9 +1248,12 @@ fn property_insert_values_select( } } -fn property_read_your_updates_back(rng: &mut R, env: &SimulatorEnv) -> Property { +fn property_read_your_updates_back( + rng: &mut R, + ctx: &impl GenerationContext, +) -> Property { // e.g. UPDATE t SET a=1, b=2 WHERE c=1; - let update = Update::arbitrary(rng, env); + let update = Update::arbitrary(rng, ctx); // e.g. SELECT a, b FROM t WHERE c=1; let select = Select::single( update.table().to_string(), @@ -1257,22 +1270,25 @@ fn property_read_your_updates_back(rng: &mut R, env: &SimulatorEnv Property::ReadYourUpdatesBack { update, select } } -fn property_table_has_expected_content(rng: &mut R, env: &SimulatorEnv) -> Property { +fn property_table_has_expected_content( + rng: &mut R, + ctx: &impl GenerationContext, +) -> Property { // Get a random table - let table = pick(&env.tables, rng); + let table = pick(ctx.tables(), rng); Property::TableHasExpectedContent { table: table.name.clone(), } } -fn property_select_limit(rng: &mut R, env: &SimulatorEnv) -> Property { +fn property_select_limit(rng: &mut R, ctx: &impl GenerationContext) -> Property { // Get a random table - let table = pick(&env.tables, rng); + let table = pick(ctx.tables(), rng); // Select the table let select = Select::single( table.name.clone(), vec![ResultColumn::Star], - Predicate::arbitrary_from(rng, env, table), + Predicate::arbitrary_from(rng, ctx, table), Some(rng.random_range(1..=5)), Distinctness::All, ); @@ -1281,11 +1297,11 @@ fn property_select_limit(rng: &mut R, env: &SimulatorEnv) -> Prope fn property_double_create_failure( rng: &mut R, - env: &SimulatorEnv, remaining: &Remaining, + ctx: &impl GenerationContext, ) -> Property { // Create the table - let create_query = Create::arbitrary(rng, env); + let create_query = Create::arbitrary(rng, ctx); let table = &create_query.table; // Create random queries respecting the constraints @@ -1294,7 +1310,7 @@ fn property_double_create_failure( // - [x] There will be no errors in the middle interactions.(best effort) // - [ ] Table `t` will not be renamed or dropped.(todo: add this constraint once ALTER or DROP is implemented) for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, env, remaining); + let query = Query::arbitrary_from(rng, ctx, remaining); if let Query::Create(Create { table: t }) = &query { // There will be no errors in the middle interactions. // - Creating the same table is an error @@ -1313,13 +1329,13 @@ fn property_double_create_failure( fn property_delete_select( rng: &mut R, - env: &SimulatorEnv, remaining: &Remaining, + ctx: &impl GenerationContext, ) -> Property { // Get a random table - let table = pick(&env.tables, rng); + let table = pick(ctx.tables(), rng); // Generate a random predicate - let predicate = Predicate::arbitrary_from(rng, env, table); + let predicate = Predicate::arbitrary_from(rng, ctx, table); // Create random queries respecting the constraints let mut queries = Vec::new(); @@ -1327,7 +1343,7 @@ fn property_delete_select( // - [x] A row that holds for the predicate will not be inserted. // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, env, remaining); + let query = Query::arbitrary_from(rng, ctx, remaining); match &query { Query::Insert(Insert::Values { table: t, values }) => { // A row that holds for the predicate will not be inserted. @@ -1371,18 +1387,18 @@ fn property_delete_select( fn property_drop_select( rng: &mut R, - env: &SimulatorEnv, remaining: &Remaining, + ctx: &impl GenerationContext, ) -> Property { // Get a random table - let table = pick(&env.tables, rng); + let table = pick(ctx.tables(), rng); // Create random queries respecting the constraints let mut queries = Vec::new(); // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) // - [-] The table `t` will not be created, no table will be renamed to `t`. (todo: update this constraint once ALTER is implemented) for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, env, remaining); + let query = Query::arbitrary_from(rng, ctx, remaining); if let Query::Create(Create { table: t }) = &query { // - The table `t` will not be created if t.name == table.name { @@ -1394,7 +1410,7 @@ fn property_drop_select( let select = Select::simple( table.name.clone(), - Predicate::arbitrary_from(rng, env, table), + Predicate::arbitrary_from(rng, ctx, table), ); Property::DropSelect { @@ -1404,11 +1420,14 @@ fn property_drop_select( } } -fn property_select_select_optimizer(rng: &mut R, env: &SimulatorEnv) -> Property { +fn property_select_select_optimizer( + rng: &mut R, + ctx: &impl GenerationContext, +) -> Property { // Get a random table - let table = pick(&env.tables, rng); + let table = pick(ctx.tables(), rng); // Generate a random predicate - let predicate = Predicate::arbitrary_from(rng, env, table); + let predicate = Predicate::arbitrary_from(rng, ctx, table); // Transform into a Binary predicate to force values to be casted to a bool let expr = ast::Expr::Binary( Box::new(predicate.0), @@ -1422,12 +1441,15 @@ fn property_select_select_optimizer(rng: &mut R, env: &SimulatorEn } } -fn property_where_true_false_null(rng: &mut R, env: &SimulatorEnv) -> Property { +fn property_where_true_false_null( + rng: &mut R, + ctx: &impl GenerationContext, +) -> Property { // Get a random table - let table = pick(&env.tables, rng); + let table = pick(ctx.tables(), rng); // Generate a random predicate - let p1 = Predicate::arbitrary_from(rng, env, table); - let p2 = Predicate::arbitrary_from(rng, env, table); + let p1 = Predicate::arbitrary_from(rng, ctx, table); + let p2 = Predicate::arbitrary_from(rng, ctx, table); // Create the select query let select = Select::simple(table.name.clone(), p1); @@ -1440,13 +1462,13 @@ fn property_where_true_false_null(rng: &mut R, env: &SimulatorEnv) fn property_union_all_preserves_cardinality( rng: &mut R, - env: &SimulatorEnv, + ctx: &impl GenerationContext, ) -> Property { // Get a random table - let table = pick(&env.tables, rng); + let table = pick(ctx.tables(), rng); // Generate a random predicate - let p1 = Predicate::arbitrary_from(rng, env, table); - let p2 = Predicate::arbitrary_from(rng, env, table); + let p1 = Predicate::arbitrary_from(rng, ctx, table); + let p2 = Predicate::arbitrary_from(rng, ctx, table); // Create the select query let select = Select::single( @@ -1465,33 +1487,34 @@ fn property_union_all_preserves_cardinality( fn property_fsync_no_wait( rng: &mut R, - env: &SimulatorEnv, remaining: &Remaining, + ctx: &impl GenerationContext, ) -> Property { Property::FsyncNoWait { - query: Query::arbitrary_from(rng, env, remaining), - tables: env.tables.iter().map(|t| t.name.clone()).collect(), + query: Query::arbitrary_from(rng, ctx, remaining), + tables: ctx.tables().iter().map(|t| t.name.clone()).collect(), } } fn property_faulty_query( rng: &mut R, - env: &SimulatorEnv, remaining: &Remaining, + ctx: &impl GenerationContext, ) -> Property { Property::FaultyQuery { - query: Query::arbitrary_from(rng, env, remaining), - tables: env.tables.iter().map(|t| t.name.clone()).collect(), + query: Query::arbitrary_from(rng, ctx, remaining), + tables: ctx.tables().iter().map(|t| t.name.clone()).collect(), } } -impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { +impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats, usize)> for Property { fn arbitrary_from( rng: &mut R, - context: &C, - (env, stats): (&SimulatorEnv, &InteractionStats), + _context: &C, + (env, stats, conn_index): (&SimulatorEnv, &InteractionStats, usize), ) -> Self { - let opts = context.opts(); + let conn_ctx = &env.connection_context(conn_index); + let opts = conn_ctx.opts(); let remaining_ = remaining(env.opts.max_interactions, &env.profile.query, stats); frequency( @@ -1502,15 +1525,17 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { } else { 0 }, - Box::new(|rng: &mut R| property_insert_values_select(rng, env, &remaining_)), + Box::new(|rng: &mut R| { + property_insert_values_select(rng, &remaining_, conn_ctx) + }), ), ( remaining_.select, - Box::new(|rng: &mut R| property_table_has_expected_content(rng, env)), + Box::new(|rng: &mut R| property_table_has_expected_content(rng, conn_ctx)), ), ( u32::min(remaining_.select, remaining_.insert), - Box::new(|rng: &mut R| property_read_your_updates_back(rng, env)), + Box::new(|rng: &mut R| property_read_your_updates_back(rng, conn_ctx)), ), ( if !env.opts.disable_double_create_failure { @@ -1518,7 +1543,9 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { } else { 0 }, - Box::new(|rng: &mut R| property_double_create_failure(rng, env, &remaining_)), + Box::new(|rng: &mut R| { + property_double_create_failure(rng, &remaining_, conn_ctx) + }), ), ( if !env.opts.disable_select_limit { @@ -1526,7 +1553,7 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { } else { 0 }, - Box::new(|rng: &mut R| property_select_limit(rng, env)), + Box::new(|rng: &mut R| property_select_limit(rng, conn_ctx)), ), ( if !env.opts.disable_delete_select { @@ -1534,7 +1561,7 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { } else { 0 }, - Box::new(|rng: &mut R| property_delete_select(rng, env, &remaining_)), + Box::new(|rng: &mut R| property_delete_select(rng, &remaining_, conn_ctx)), ), ( if !env.opts.disable_drop_select { @@ -1543,7 +1570,7 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { } else { 0 }, - Box::new(|rng: &mut R| property_drop_select(rng, env, &remaining_)), + Box::new(|rng: &mut R| property_drop_select(rng, &remaining_, conn_ctx)), ), ( if !env.opts.disable_select_optimizer { @@ -1551,7 +1578,7 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { } else { 0 }, - Box::new(|rng: &mut R| property_select_select_optimizer(rng, env)), + Box::new(|rng: &mut R| property_select_select_optimizer(rng, conn_ctx)), ), ( if opts.indexes && !env.opts.disable_where_true_false_null { @@ -1559,7 +1586,7 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { } else { 0 }, - Box::new(|rng: &mut R| property_where_true_false_null(rng, env)), + Box::new(|rng: &mut R| property_where_true_false_null(rng, conn_ctx)), ), ( if opts.indexes && !env.opts.disable_union_all_preserves_cardinality { @@ -1567,7 +1594,7 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { } else { 0 }, - Box::new(|rng: &mut R| property_union_all_preserves_cardinality(rng, env)), + Box::new(|rng: &mut R| property_union_all_preserves_cardinality(rng, conn_ctx)), ), ( if env.profile.io.enable && !env.opts.disable_fsync_no_wait { @@ -1575,7 +1602,7 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { } else { 0 }, - Box::new(|rng: &mut R| property_fsync_no_wait(rng, env, &remaining_)), + Box::new(|rng: &mut R| property_fsync_no_wait(rng, &remaining_, conn_ctx)), ), ( if env.profile.io.enable @@ -1586,7 +1613,7 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { } else { 0 }, - Box::new(|rng: &mut R| property_faulty_query(rng, env, &remaining_)), + Box::new(|rng: &mut R| property_faulty_query(rng, &remaining_, conn_ctx)), ), ], rng, diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index 37c17966a..922bfe736 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -346,7 +346,8 @@ impl SimulatorEnv { }); } - pub fn connection_context(&self, connection_index: usize) -> impl GenerationContext { + // TODO: does not yet create the appropriate context to avoid WriteWriteConflitcs + pub fn connection_context(&self, conn_index: usize) -> impl GenerationContext { struct ConnectionGenContext<'a> { tables: &'a Vec, opts: &'a sql_generation::generation::Opts, @@ -362,17 +363,29 @@ impl SimulatorEnv { } } - let tables = if let Some(tables) = self.connection_tables.get(connection_index).unwrap() { - &tables.tables - } else { - &self.committed_tables.tables - }; + let tables = &self.get_conn_tables(conn_index).tables; ConnectionGenContext { opts: &self.profile.query.gen_opts, tables, } } + + pub fn get_conn_tables(&self, conn_index: usize) -> &SimulatorTables { + self.connection_tables + .get(conn_index) + .unwrap() + .as_ref() + .unwrap_or(&self.committed_tables) + } + + pub fn get_conn_tables_mut(&mut self, conn_index: usize) -> &mut SimulatorTables { + self.connection_tables + .get_mut(conn_index) + .unwrap() + .as_mut() + .unwrap_or(&mut self.committed_tables) + } } pub trait ConnectionTrait diff --git a/simulator/runner/execution.rs b/simulator/runner/execution.rs index 2ff819f22..676fc5a2f 100644 --- a/simulator/runner/execution.rs +++ b/simulator/runner/execution.rs @@ -230,7 +230,7 @@ pub fn execute_interaction_turso( limbo_integrity_check(&conn)?; } } - let _ = interaction.shadow(&mut env.tables); + let _ = interaction.shadow(env.get_conn_tables_mut(interaction.connection_index)); Ok(ExecutionContinuation::NextInteraction) } @@ -323,7 +323,7 @@ fn execute_interaction_rusqlite( } } - let _ = interaction.shadow(&mut env.tables); + let _ = interaction.shadow(env.get_conn_tables_mut(interaction.connection_index)); Ok(ExecutionContinuation::NextInteraction) } From c3cdb0e0fdc4b64a79f5c5e895f5325058091239 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 18 Sep 2025 20:14:02 -0300 Subject: [PATCH 22/78] add begin concurrent before each statement with mvcc --- Cargo.lock | 1 + simulator/Cargo.toml | 1 + simulator/generation/plan.rs | 231 ++++++++++++++++++++++------------- simulator/main.rs | 6 +- simulator/shrink/plan.rs | 18 +-- 5 files changed, 159 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e2bb330d..10207eb20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2140,6 +2140,7 @@ dependencies = [ "chrono", "clap", "dirs 6.0.0", + "either", "env_logger 0.11.7", "garde", "hex", diff --git a/simulator/Cargo.toml b/simulator/Cargo.toml index 9ea6d093e..8c37dd8f0 100644 --- a/simulator/Cargo.toml +++ b/simulator/Cargo.toml @@ -44,3 +44,4 @@ json5 = { version = "0.4.1" } strum = { workspace = true } parking_lot = { workspace = true } indexmap = { workspace = true } +either = "1.15.0" diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index cb19184f1..7a5201287 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -13,7 +13,11 @@ use serde::{Deserialize, Serialize}; use sql_generation::{ generation::{Arbitrary, ArbitraryFrom, GenerationContext, frequency, query::SelectFree}, model::{ - query::{Create, CreateIndex, Delete, Drop, Insert, Select, update::Update}, + query::{ + Create, CreateIndex, Delete, Drop, Insert, Select, + transaction::{Begin, Commit}, + update::Update, + }, table::SimValue, }, }; @@ -32,10 +36,46 @@ pub(crate) type ResultSet = Result>>; #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct InteractionPlan { - pub(crate) plan: Vec, + pub plan: Vec, + pub mvcc: bool, } impl InteractionPlan { + pub(crate) fn new(mvcc: bool) -> Self { + Self { + plan: Vec::new(), + mvcc, + } + } + + pub fn new_with(plan: Vec, mvcc: bool) -> Self { + Self { plan, mvcc } + } + + #[inline] + pub fn plan(&self) -> &[Interactions] { + &self.plan + } + + // TODO: this is just simplified logic so we can get something rolling with begin concurrent + // transactions in the simulator. Ideally when we generate the plan we will have begin and commits statements across interactions + pub fn push(&mut self, interactions: Interactions) { + if self.mvcc { + let conn_index = interactions.connection_index; + let begin = Interactions::new( + conn_index, + InteractionsType::Query(Query::Begin(Begin::Concurrent)), + ); + let commit = + Interactions::new(conn_index, InteractionsType::Query(Query::Commit(Commit))); + self.plan.push(begin); + self.plan.push(interactions); + self.plan.push(commit); + } else { + self.plan.push(interactions); + } + } + /// Compute via diff computes a a plan from a given `.plan` file without the need to parse /// sql. This is possible because there are two versions of the plan file, one that is human /// readable and one that is serialized as JSON. Under watch mode, the users will be able to @@ -121,6 +161,109 @@ impl InteractionPlan { }) .collect() } + + pub(crate) fn stats(&self) -> InteractionStats { + let mut stats = InteractionStats { + select_count: 0, + insert_count: 0, + delete_count: 0, + update_count: 0, + create_count: 0, + create_index_count: 0, + drop_count: 0, + begin_count: 0, + commit_count: 0, + rollback_count: 0, + }; + + fn query_stat(q: &Query, stats: &mut InteractionStats) { + match q { + Query::Select(_) => stats.select_count += 1, + Query::Insert(_) => stats.insert_count += 1, + Query::Delete(_) => stats.delete_count += 1, + Query::Create(_) => stats.create_count += 1, + Query::Drop(_) => stats.drop_count += 1, + Query::Update(_) => stats.update_count += 1, + Query::CreateIndex(_) => stats.create_index_count += 1, + Query::Begin(_) => stats.begin_count += 1, + Query::Commit(_) => stats.commit_count += 1, + Query::Rollback(_) => stats.rollback_count += 1, + } + } + for interactions in &self.plan { + match &interactions.interactions { + InteractionsType::Property(property) => { + for interaction in &property.interactions(interactions.connection_index) { + if let InteractionType::Query(query) = &interaction.interaction { + query_stat(query, &mut stats); + } + } + } + InteractionsType::Query(query) => { + query_stat(query, &mut stats); + } + InteractionsType::Fault(_) => {} + } + } + + stats + } + + pub fn generate_plan(rng: &mut R, env: &mut SimulatorEnv) -> Self { + let mut plan = InteractionPlan::new(env.profile.experimental_mvcc); + + let num_interactions = env.opts.max_interactions as usize; + + // First create at least one table + let create_query = Create::arbitrary(rng, &env.connection_context(0)); + env.committed_tables.push(create_query.table.clone()); + + // initial query starts at 0th connection + plan.plan.push(Interactions::new( + 0, + InteractionsType::Query(Query::Create(create_query)), + )); + + while plan.len() < num_interactions { + tracing::debug!( + "Generating interaction {}/{}", + plan.len(), + num_interactions + ); + let interactions = + Interactions::arbitrary_from(rng, &PanicGenerationContext, (env, plan.stats())); + interactions.shadow(env.get_conn_tables_mut(interactions.connection_index)); + plan.push(interactions); + } + + tracing::info!("Generated plan with {} interactions", plan.plan.len()); + + plan + } +} + +impl Deref for InteractionPlan { + type Target = [Interactions]; + + fn deref(&self) -> &Self::Target { + &self.plan + } +} + +impl DerefMut for InteractionPlan { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.plan + } +} + +impl IntoIterator for InteractionPlan { + type Item = Interactions; + + type IntoIter = as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.plan.into_iter() + } } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -368,90 +511,6 @@ impl Display for Fault { } } -impl InteractionPlan { - pub(crate) fn new() -> Self { - Self { plan: Vec::new() } - } - - pub(crate) fn stats(&self) -> InteractionStats { - let mut stats = InteractionStats { - select_count: 0, - insert_count: 0, - delete_count: 0, - update_count: 0, - create_count: 0, - create_index_count: 0, - drop_count: 0, - begin_count: 0, - commit_count: 0, - rollback_count: 0, - }; - - fn query_stat(q: &Query, stats: &mut InteractionStats) { - match q { - Query::Select(_) => stats.select_count += 1, - Query::Insert(_) => stats.insert_count += 1, - Query::Delete(_) => stats.delete_count += 1, - Query::Create(_) => stats.create_count += 1, - Query::Drop(_) => stats.drop_count += 1, - Query::Update(_) => stats.update_count += 1, - Query::CreateIndex(_) => stats.create_index_count += 1, - Query::Begin(_) => stats.begin_count += 1, - Query::Commit(_) => stats.commit_count += 1, - Query::Rollback(_) => stats.rollback_count += 1, - } - } - for interactions in &self.plan { - match &interactions.interactions { - InteractionsType::Property(property) => { - for interaction in &property.interactions(interactions.connection_index) { - if let InteractionType::Query(query) = &interaction.interaction { - query_stat(query, &mut stats); - } - } - } - InteractionsType::Query(query) => { - query_stat(query, &mut stats); - } - InteractionsType::Fault(_) => {} - } - } - - stats - } - - pub fn generate_plan(rng: &mut R, env: &mut SimulatorEnv) -> Self { - let mut plan = InteractionPlan::new(); - - let num_interactions = env.opts.max_interactions as usize; - - // First create at least one table - let create_query = Create::arbitrary(rng, &env.connection_context(0)); - env.committed_tables.push(create_query.table.clone()); - - // initial query starts at 0th connection - plan.plan.push(Interactions::new( - 0, - InteractionsType::Query(Query::Create(create_query)), - )); - - while plan.plan.len() < num_interactions { - tracing::debug!( - "Generating interaction {}/{}", - plan.plan.len(), - num_interactions - ); - let interactions = - Interactions::arbitrary_from(rng, &PanicGenerationContext, (env, plan.stats())); - interactions.shadow(env.get_conn_tables_mut(interactions.connection_index)); - plan.plan.push(interactions); - } - - tracing::info!("Generated plan with {} interactions", plan.plan.len()); - plan - } -} - #[derive(Debug, Clone)] pub struct Interaction { pub connection_index: usize, diff --git a/simulator/main.rs b/simulator/main.rs index 00f64b5fb..5fdba980e 100644 --- a/simulator/main.rs +++ b/simulator/main.rs @@ -330,7 +330,7 @@ fn run_simulator( tracing::trace!( "adding bug to bugbase, seed: {}, plan: {}, error: {}", env.opts.seed, - plan.plan.len(), + plan.len(), error ); bugbase @@ -361,8 +361,8 @@ fn run_simulator( tracing::info!( "shrinking succeeded, reduced the plan from {} to {}", - plan.plan.len(), - final_plan.plan.len() + plan.len(), + final_plan.len() ); // Save the shrunk database if let Some(bugbase) = bugbase.as_deref_mut() { diff --git a/simulator/shrink/plan.rs b/simulator/shrink/plan.rs index 143c1d0d6..557ca6d83 100644 --- a/simulator/shrink/plan.rs +++ b/simulator/shrink/plan.rs @@ -54,7 +54,7 @@ impl InteractionPlan { } } - let before = self.plan.len(); + let before = self.len(); // Remove all properties after the failing one plan.plan.truncate(secondary_interactions_index + 1); @@ -124,7 +124,7 @@ impl InteractionPlan { retain }); - let after = plan.plan.len(); + let after = plan.len(); tracing::info!( "Shrinking interaction plan from {} to {} properties", @@ -184,7 +184,7 @@ impl InteractionPlan { } } - let before = self.plan.len(); + let before = self.len(); plan.plan.truncate(secondary_interactions_index + 1); @@ -196,8 +196,8 @@ impl InteractionPlan { | Property::DoubleCreateFailure { queries, .. } | Property::DeleteSelect { queries, .. } | Property::DropSelect { queries, .. } => { - let mut temp_plan = InteractionPlan { - plan: queries + let mut temp_plan = InteractionPlan::new_with( + queries .iter() .map(|q| { Interactions::new( @@ -206,7 +206,8 @@ impl InteractionPlan { ) }) .collect(), - }; + self.mvcc, + ); temp_plan = InteractionPlan::iterative_shrink( temp_plan, @@ -218,7 +219,6 @@ impl InteractionPlan { //temp_plan = Self::shrink_queries(temp_plan, failing_execution, result, env); *queries = temp_plan - .plan .into_iter() .filter_map(|i| match i.interactions { InteractionsType::Query(q) => Some(q), @@ -247,7 +247,7 @@ impl InteractionPlan { secondary_interactions_index, ); - let after = plan.plan.len(); + let after = plan.len(); tracing::info!( "Shrinking interaction plan from {} to {} properties", @@ -266,7 +266,7 @@ impl InteractionPlan { env: Arc>, secondary_interaction_index: usize, ) -> InteractionPlan { - for i in (0..plan.plan.len()).rev() { + for i in (0..plan.len()).rev() { if i == secondary_interaction_index { continue; } From 850dbc75a2cbe28530a0464956bd34bdd0012a63 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 19 Sep 2025 14:50:14 -0300 Subject: [PATCH 23/78] adjust shrinking to keep previous interactions if there are no depending tables --- simulator/generation/plan.rs | 6 +- simulator/shrink/plan.rs | 123 ++++++++++++++++++----------------- 2 files changed, 64 insertions(+), 65 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 7a5201287..39982637b 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -225,11 +225,7 @@ impl InteractionPlan { )); while plan.len() < num_interactions { - tracing::debug!( - "Generating interaction {}/{}", - plan.len(), - num_interactions - ); + tracing::debug!("Generating interaction {}/{}", plan.len(), num_interactions); let interactions = Interactions::arbitrary_from(rng, &PanicGenerationContext, (env, plan.stats())); interactions.shadow(env.get_conn_tables_mut(interactions.connection_index)); diff --git a/simulator/shrink/plan.rs b/simulator/shrink/plan.rs index 557ca6d83..1c3c715d6 100644 --- a/simulator/shrink/plan.rs +++ b/simulator/shrink/plan.rs @@ -59,70 +59,73 @@ impl InteractionPlan { // Remove all properties after the failing one plan.plan.truncate(secondary_interactions_index + 1); - let mut idx = 0; - // Remove all properties that do not use the failing tables - plan.plan.retain_mut(|interactions| { - let retain = if idx == secondary_interactions_index { - if let InteractionsType::Property( - Property::FsyncNoWait { tables, .. } | Property::FaultyQuery { tables, .. }, - ) = &mut interactions.interactions - { - tables.retain(|table| depending_tables.contains(table)); - } - true - } else { - let mut has_table = interactions - .uses() - .iter() - .any(|t| depending_tables.contains(t)); - - if has_table { - // Remove the extensional parts of the properties - if let InteractionsType::Property(p) = &mut interactions.interactions { - match p { - Property::InsertValuesSelect { queries, .. } - | Property::DoubleCreateFailure { queries, .. } - | Property::DeleteSelect { queries, .. } - | Property::DropSelect { queries, .. } => { - queries.clear(); - } - Property::FsyncNoWait { tables, query } - | Property::FaultyQuery { tables, query } => { - if !query.uses().iter().any(|t| depending_tables.contains(t)) { - tables.clear(); - } else { - tables.retain(|table| depending_tables.contains(table)); - } - } - Property::SelectLimit { .. } - | Property::SelectSelectOptimizer { .. } - | Property::WhereTrueFalseNull { .. } - | Property::UNIONAllPreservesCardinality { .. } - | Property::ReadYourUpdatesBack { .. } - | Property::TableHasExpectedContent { .. } => {} - } + // means we errored in some fault on transaction statement so just maintain the statements from before the failing one + if !depending_tables.is_empty() { + let mut idx = 0; + // Remove all properties that do not use the failing tables + plan.plan.retain_mut(|interactions| { + let retain = if idx == secondary_interactions_index { + if let InteractionsType::Property( + Property::FsyncNoWait { tables, .. } | Property::FaultyQuery { tables, .. }, + ) = &mut interactions.interactions + { + tables.retain(|table| depending_tables.contains(table)); } - // Check again after query clear if the interactions still uses the failing table - has_table = interactions + true + } else { + let mut has_table = interactions .uses() .iter() .any(|t| depending_tables.contains(t)); - } - let is_fault = matches!(interactions.interactions, InteractionsType::Fault(..)); - is_fault - || (has_table - && !matches!( - interactions.interactions, - InteractionsType::Query(Query::Select(_)) - | InteractionsType::Property(Property::SelectLimit { .. }) - | InteractionsType::Property( - Property::SelectSelectOptimizer { .. } - ) - )) - }; - idx += 1; - retain - }); + + if has_table { + // Remove the extensional parts of the properties + if let InteractionsType::Property(p) = &mut interactions.interactions { + match p { + Property::InsertValuesSelect { queries, .. } + | Property::DoubleCreateFailure { queries, .. } + | Property::DeleteSelect { queries, .. } + | Property::DropSelect { queries, .. } => { + queries.clear(); + } + Property::FsyncNoWait { tables, query } + | Property::FaultyQuery { tables, query } => { + if !query.uses().iter().any(|t| depending_tables.contains(t)) { + tables.clear(); + } else { + tables.retain(|table| depending_tables.contains(table)); + } + } + Property::SelectLimit { .. } + | Property::SelectSelectOptimizer { .. } + | Property::WhereTrueFalseNull { .. } + | Property::UNIONAllPreservesCardinality { .. } + | Property::ReadYourUpdatesBack { .. } + | Property::TableHasExpectedContent { .. } => {} + } + } + // Check again after query clear if the interactions still uses the failing table + has_table = interactions + .uses() + .iter() + .any(|t| depending_tables.contains(t)); + } + let is_fault = matches!(interactions.interactions, InteractionsType::Fault(..)); + is_fault + || (has_table + && !matches!( + interactions.interactions, + InteractionsType::Query(Query::Select(_)) + | InteractionsType::Property(Property::SelectLimit { .. }) + | InteractionsType::Property( + Property::SelectSelectOptimizer { .. } + ) + )) + }; + idx += 1; + retain + }); + } let after = plan.len(); From 0293c32616997dfd9b6f8a130d85e2734f64f760 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 19 Sep 2025 14:55:10 -0300 Subject: [PATCH 24/78] disable integrity check and indexes when using mvcc --- simulator/generation/plan.rs | 7 ++++++- simulator/generation/property.rs | 15 +++++++++++++-- simulator/runner/execution.rs | 10 ++++++++-- 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 39982637b..9641b5d7c 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -1006,7 +1006,12 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats)> for Interactions { _context: &C, (env, stats): (&SimulatorEnv, InteractionStats), ) -> Self { - let remaining_ = remaining(env.opts.max_interactions, &env.profile.query, &stats); + let remaining_ = remaining( + env.opts.max_interactions, + &env.profile.query, + &stats, + env.profile.experimental_mvcc, + ); let conn_index = env.choose_conn(rng); frequency( vec![ diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 3674ed936..afa3f1b0c 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1100,6 +1100,7 @@ pub(crate) fn remaining( max_interactions: u32, opts: &QueryProfile, stats: &InteractionStats, + mvcc: bool, ) -> Remaining { let total_weight = opts.select_weight + opts.create_table_weight @@ -1126,7 +1127,7 @@ pub(crate) fn remaining( let remaining_create = total_create .checked_sub(stats.create_count) .unwrap_or_default(); - let remaining_create_index = total_create_index + let mut remaining_create_index = total_create_index .checked_sub(stats.create_index_count) .unwrap_or_default(); let remaining_delete = total_delete @@ -1137,6 +1138,11 @@ pub(crate) fn remaining( .unwrap_or_default(); let remaining_drop = total_drop.checked_sub(stats.drop_count).unwrap_or_default(); + if mvcc { + // TODO: index not supported yet for mvcc + remaining_create_index = 0; + } + Remaining { select: remaining_select, insert: remaining_insert, @@ -1515,7 +1521,12 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats, usize)> for Property { ) -> Self { let conn_ctx = &env.connection_context(conn_index); let opts = conn_ctx.opts(); - let remaining_ = remaining(env.opts.max_interactions, &env.profile.query, stats); + let remaining_ = remaining( + env.opts.max_interactions, + &env.profile.query, + stats, + env.profile.experimental_mvcc, + ); frequency( vec![ diff --git a/simulator/runner/execution.rs b/simulator/runner/execution.rs index 676fc5a2f..09c1674fe 100644 --- a/simulator/runner/execution.rs +++ b/simulator/runner/execution.rs @@ -186,7 +186,10 @@ pub fn execute_interaction_turso( tracing::error!(?results); } stack.push(results); - limbo_integrity_check(conn)?; + // TODO: skip integrity check with mvcc + if !env.profile.experimental_mvcc { + limbo_integrity_check(conn)?; + } } InteractionType::FsyncQuery(query) => { let results = interaction.execute_fsync_query(conn.clone(), env); @@ -227,7 +230,10 @@ pub fn execute_interaction_turso( stack.push(results); // Reset fault injection env.io.inject_fault(false); - limbo_integrity_check(&conn)?; + // TODO: skip integrity check with mvcc + if !env.profile.experimental_mvcc { + limbo_integrity_check(&conn)?; + } } } let _ = interaction.shadow(env.get_conn_tables_mut(interaction.connection_index)); From 6b0011f47787f2805851485c11e90d65b46b6604 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 19 Sep 2025 15:17:13 -0300 Subject: [PATCH 25/78] in shriking remove unnecessary begin commit queries --- simulator/shrink/plan.rs | 46 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/simulator/shrink/plan.rs b/simulator/shrink/plan.rs index 1c3c715d6..be5e3159b 100644 --- a/simulator/shrink/plan.rs +++ b/simulator/shrink/plan.rs @@ -72,6 +72,13 @@ impl InteractionPlan { tables.retain(|table| depending_tables.contains(table)); } true + } else if matches!( + interactions.interactions, + InteractionsType::Query(Query::Begin(..)) + | InteractionsType::Query(Query::Commit(..)) + | InteractionsType::Query(Query::Rollback(..)) + ) { + true } else { let mut has_table = interactions .uses() @@ -125,6 +132,45 @@ impl InteractionPlan { idx += 1; retain }); + + // Comprise of idxs of Begin interactions + let mut begin_idx = Vec::new(); + // Comprise of idxs of the intereactions Commit and Rollback + let mut end_tx_idx = Vec::new(); + + for (idx, interactions) in plan.plan.iter().enumerate() { + match &interactions.interactions { + InteractionsType::Query(Query::Begin(..)) => { + begin_idx.push(idx); + } + InteractionsType::Query(Query::Commit(..)) + | InteractionsType::Query(Query::Rollback(..)) => { + let last_begin = begin_idx.last().unwrap() + 1; + if last_begin == idx { + end_tx_idx.push(idx); + } + } + _ => {} + } + } + + // remove interactions if its just a Begin Commit/Rollback with no queries in the middle + let mut range_transactions = end_tx_idx.into_iter().peekable(); + let mut idx = 0; + plan.plan.retain_mut(|_| { + let mut retain = true; + + if let Some(txn_interaction_idx) = range_transactions.peek().copied() { + if txn_interaction_idx == idx { + range_transactions.next(); + } + if txn_interaction_idx == idx || txn_interaction_idx.saturating_sub(1) == idx { + retain = false; + } + } + idx += 1; + retain + }); } let after = plan.len(); From 021d5d272a90137c82d3dec65e17553d1fb03295 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 19 Sep 2025 17:14:54 -0300 Subject: [PATCH 26/78] refactor shadowing code to take into account snapshot isolation --- simulator/generation/mod.rs | 4 +- simulator/generation/plan.rs | 10 +-- simulator/generation/property.rs | 7 ++- simulator/model/mod.rs | 51 +++++++-------- simulator/runner/env.rs | 105 ++++++++++++++++++++++++++----- simulator/runner/execution.rs | 4 +- 6 files changed, 124 insertions(+), 57 deletions(-) diff --git a/simulator/generation/mod.rs b/simulator/generation/mod.rs index 140075d32..d2ccf4a00 100644 --- a/simulator/generation/mod.rs +++ b/simulator/generation/mod.rs @@ -1,6 +1,6 @@ use sql_generation::generation::GenerationContext; -use crate::runner::env::SimulatorTables; +use crate::runner::env::ShadowTablesMut; pub mod plan; pub mod property; @@ -17,7 +17,7 @@ pub mod query; /// might return a vector of rows that were inserted into the table. pub(crate) trait Shadow { type Result; - fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result; + fn shadow(&self, tables: &mut ShadowTablesMut<'_>) -> Self::Result; } /// Generation context that will always panic when called diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 9641b5d7c..8b8f97aa8 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -27,7 +27,7 @@ use crate::{ SimulatorEnv, generation::{PanicGenerationContext, Shadow}, model::Query, - runner::env::{SimConnection, SimulationType, SimulatorTables}, + runner::env::{ShadowTablesMut, SimConnection, SimulationType}, }; use super::property::{Property, remaining}; @@ -228,7 +228,7 @@ impl InteractionPlan { tracing::debug!("Generating interaction {}/{}", plan.len(), num_interactions); let interactions = Interactions::arbitrary_from(rng, &PanicGenerationContext, (env, plan.stats())); - interactions.shadow(env.get_conn_tables_mut(interactions.connection_index)); + interactions.shadow(&mut env.get_conn_tables_mut(interactions.connection_index)); plan.push(interactions); } @@ -311,7 +311,7 @@ pub enum InteractionsType { impl Shadow for Interactions { type Result = (); - fn shadow(&self, tables: &mut SimulatorTables) { + fn shadow(&self, tables: &mut ShadowTablesMut) { match &self.interactions { InteractionsType::Property(property) => { let initial_tables = tables.clone(); @@ -319,7 +319,7 @@ impl Shadow for Interactions { let res = interaction.shadow(tables); if res.is_err() { // If any interaction fails, we reset the tables to the initial state - *tables = initial_tables.clone(); + **tables = initial_tables.clone(); break; } } @@ -576,7 +576,7 @@ impl Display for InteractionType { impl Shadow for InteractionType { type Result = anyhow::Result>>; - fn shadow(&self, env: &mut SimulatorTables) -> Self::Result { + fn shadow(&self, env: &mut ShadowTablesMut) -> Self::Result { match self { Self::Query(query) => query.shadow(env), Self::FsyncQuery(query) => { diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index afa3f1b0c..bb4678872 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -796,8 +796,8 @@ impl Property { let last = stack.last().unwrap(); match last { Ok(_) => { - let _ = - query_clone.shadow(env.get_conn_tables_mut(connection_index)); + let _ = query_clone + .shadow(&mut env.get_conn_tables_mut(connection_index)); Ok(Ok(())) } Err(err) => { @@ -1040,7 +1040,8 @@ fn assert_all_table_values( let assertion = InteractionType::Assertion(Assertion::new(format!("table {table} should contain all of its expected values"), { let table = table.clone(); move |stack: &Vec, env: &mut SimulatorEnv| { - let table = env.get_conn_tables(connection_index).iter().find(|t| t.name == table).ok_or_else(|| { + let conn_ctx = env.get_conn_tables(connection_index); + let table = conn_ctx.iter().find(|t| t.name == table).ok_or_else(|| { LimboError::InternalError(format!( "table {table} should exist in simulator env" )) diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 237750683..9a3c81e9f 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -15,7 +15,7 @@ use sql_generation::model::{ }; use turso_parser::ast::Distinctness; -use crate::{generation::Shadow, runner::env::SimulatorTables}; +use crate::{generation::Shadow, runner::env::ShadowTablesMut}; // This type represents the potential queries on the database. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -83,7 +83,7 @@ impl Display for Query { impl Shadow for Query { type Result = anyhow::Result>>; - fn shadow(&self, env: &mut SimulatorTables) -> Self::Result { + fn shadow(&self, env: &mut ShadowTablesMut) -> Self::Result { match self { Query::Create(create) => create.shadow(env), Query::Insert(insert) => insert.shadow(env), @@ -102,7 +102,7 @@ impl Shadow for Query { impl Shadow for Create { type Result = anyhow::Result>>; - fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result { + fn shadow(&self, tables: &mut ShadowTablesMut) -> Self::Result { if !tables.iter().any(|t| t.name == self.table.name) { tables.push(self.table.clone()); Ok(vec![]) @@ -117,9 +117,8 @@ impl Shadow for Create { impl Shadow for CreateIndex { type Result = Vec>; - fn shadow(&self, env: &mut SimulatorTables) -> Vec> { - env.tables - .iter_mut() + fn shadow(&self, env: &mut ShadowTablesMut) -> Vec> { + env.iter_mut() .find(|t| t.name == self.table_name) .unwrap() .indexes @@ -131,8 +130,8 @@ impl Shadow for CreateIndex { impl Shadow for Delete { type Result = anyhow::Result>>; - fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result { - let table = tables.tables.iter_mut().find(|t| t.name == self.table); + fn shadow(&self, tables: &mut ShadowTablesMut) -> Self::Result { + let table = tables.iter_mut().find(|t| t.name == self.table); if let Some(table) = table { // If the table exists, we can delete from it @@ -153,7 +152,7 @@ impl Shadow for Delete { impl Shadow for Drop { type Result = anyhow::Result>>; - fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result { + fn shadow(&self, tables: &mut ShadowTablesMut) -> Self::Result { if !tables.iter().any(|t| t.name == self.table) { // If the table does not exist, we return an error return Err(anyhow::anyhow!( @@ -162,7 +161,7 @@ impl Shadow for Drop { )); } - tables.tables.retain(|t| t.name != self.table); + tables.retain(|t| t.name != self.table); Ok(vec![]) } @@ -171,10 +170,10 @@ impl Shadow for Drop { impl Shadow for Insert { type Result = anyhow::Result>>; - fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result { + fn shadow(&self, tables: &mut ShadowTablesMut) -> Self::Result { match self { Insert::Values { table, values } => { - if let Some(t) = tables.tables.iter_mut().find(|t| &t.name == table) { + if let Some(t) = tables.iter_mut().find(|t| &t.name == table) { t.rows.extend(values.clone()); } else { return Err(anyhow::anyhow!( @@ -185,7 +184,7 @@ impl Shadow for Insert { } Insert::Select { table, select } => { let rows = select.shadow(tables)?; - if let Some(t) = tables.tables.iter_mut().find(|t| &t.name == table) { + if let Some(t) = tables.iter_mut().find(|t| &t.name == table) { t.rows.extend(rows); } else { return Err(anyhow::anyhow!( @@ -202,9 +201,7 @@ impl Shadow for Insert { impl Shadow for FromClause { type Result = anyhow::Result; - fn shadow(&self, env: &mut SimulatorTables) -> Self::Result { - let tables = &mut env.tables; - + fn shadow(&self, tables: &mut ShadowTablesMut) -> Self::Result { let first_table = tables .iter() .find(|t| t.name == self.table) @@ -259,7 +256,7 @@ impl Shadow for FromClause { impl Shadow for SelectInner { type Result = anyhow::Result; - fn shadow(&self, env: &mut SimulatorTables) -> Self::Result { + fn shadow(&self, env: &mut ShadowTablesMut) -> Self::Result { if let Some(from) = &self.from { let mut join_table = from.shadow(env)?; let col_count = join_table.columns().count(); @@ -327,7 +324,7 @@ impl Shadow for SelectInner { impl Shadow for Select { type Result = anyhow::Result>>; - fn shadow(&self, env: &mut SimulatorTables) -> Self::Result { + fn shadow(&self, env: &mut ShadowTablesMut) -> Self::Result { let first_result = self.body.select.shadow(env)?; let mut rows = first_result.rows; @@ -357,28 +354,26 @@ impl Shadow for Select { impl Shadow for Begin { type Result = Vec>; - fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result { + fn shadow(&self, tables: &mut ShadowTablesMut) -> Self::Result { // FIXME: currently the snapshot is taken eagerly // this is wrong for Deffered transactions - tables.snapshot = Some(tables.tables.clone()); + tables.create_snapshot(); vec![] } } impl Shadow for Commit { type Result = Vec>; - fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result { - tables.snapshot = None; + fn shadow(&self, tables: &mut ShadowTablesMut) -> Self::Result { + tables.apply_snapshot(); vec![] } } impl Shadow for Rollback { type Result = Vec>; - fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result { - if let Some(tables_) = tables.snapshot.take() { - tables.tables = tables_; - } + fn shadow(&self, tables: &mut ShadowTablesMut) -> Self::Result { + tables.delete_snapshot(); vec![] } } @@ -386,8 +381,8 @@ impl Shadow for Rollback { impl Shadow for Update { type Result = anyhow::Result>>; - fn shadow(&self, tables: &mut SimulatorTables) -> Self::Result { - let table = tables.tables.iter_mut().find(|t| t.name == self.table); + fn shadow(&self, tables: &mut ShadowTablesMut) -> Self::Result { + let table = tables.iter_mut().find(|t| t.name == self.table); let table = if let Some(table) = table { table diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index 922bfe736..9ef9b612a 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -1,6 +1,6 @@ use std::fmt::Display; use std::mem; -use std::ops::Deref; +use std::ops::{Deref, DerefMut}; use std::panic::UnwindSafe; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -32,6 +32,79 @@ pub(crate) enum SimulationPhase { Shrink, } +#[derive(Debug)] +pub struct ShadowTables<'a> { + commited_tables: &'a Vec, + transaction_tables: Option<&'a Vec
>, +} + +#[derive(Debug)] +pub struct ShadowTablesMut<'a> { + commited_tables: &'a mut Vec
, + transaction_tables: &'a mut Option>, +} + +impl<'a> ShadowTables<'a> { + fn tables(&self) -> &'a Vec
{ + self.transaction_tables.map_or(self.commited_tables, |v| v) + } +} + +impl<'a> Deref for ShadowTables<'a> { + type Target = Vec
; + + fn deref(&self) -> &Self::Target { + self.tables() + } +} + +impl<'a, 'b> ShadowTablesMut<'a> +where + 'a: 'b, +{ + fn tables(&'a self) -> &'a Vec
{ + self.transaction_tables + .as_ref() + .unwrap_or(self.commited_tables) + } + + fn tables_mut(&'b mut self) -> &'b mut Vec
{ + self.transaction_tables + .as_mut() + .unwrap_or(self.commited_tables) + } + + pub fn create_snapshot(&mut self) { + *self.transaction_tables = Some(self.commited_tables.clone()); + } + + pub fn apply_snapshot(&mut self) { + // TODO: as we do not have concurrent tranasactions yet in the simulator + // there is no conflict we are ignoring conflict problems right now + if let Some(transation_tables) = self.transaction_tables.take() { + *self.commited_tables = transation_tables + } + } + + pub fn delete_snapshot(&mut self) { + *self.transaction_tables = None; + } +} + +impl<'a> Deref for ShadowTablesMut<'a> { + type Target = Vec
; + + fn deref(&self) -> &Self::Target { + self.tables() + } +} + +impl<'a> DerefMut for ShadowTablesMut<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.tables_mut() + } +} + #[derive(Debug, Clone)] pub(crate) struct SimulatorTables { pub(crate) tables: Vec
, @@ -75,9 +148,9 @@ pub(crate) struct SimulatorEnv { pub memory_io: bool, /// If connection state is None, means we are not in a transaction - pub connection_tables: Vec>, + pub connection_tables: Vec>>, // Table data that is committed into the database or wal - pub committed_tables: SimulatorTables, + pub committed_tables: Vec
, } impl UnwindSafe for SimulatorEnv {} @@ -300,7 +373,7 @@ impl SimulatorEnv { phase: SimulationPhase::Test, memory_io: cli_opts.memory_io, profile: profile.clone(), - committed_tables: SimulatorTables::new(), + committed_tables: Vec::new(), connection_tables: vec![None; profile.max_connections], } } @@ -363,7 +436,7 @@ impl SimulatorEnv { } } - let tables = &self.get_conn_tables(conn_index).tables; + let tables = self.get_conn_tables(conn_index).tables(); ConnectionGenContext { opts: &self.profile.query.gen_opts, @@ -371,20 +444,18 @@ impl SimulatorEnv { } } - pub fn get_conn_tables(&self, conn_index: usize) -> &SimulatorTables { - self.connection_tables - .get(conn_index) - .unwrap() - .as_ref() - .unwrap_or(&self.committed_tables) + pub fn get_conn_tables<'a>(&'a self, conn_index: usize) -> ShadowTables<'a> { + ShadowTables { + transaction_tables: self.connection_tables.get(conn_index).unwrap().as_ref(), + commited_tables: &self.committed_tables, + } } - pub fn get_conn_tables_mut(&mut self, conn_index: usize) -> &mut SimulatorTables { - self.connection_tables - .get_mut(conn_index) - .unwrap() - .as_mut() - .unwrap_or(&mut self.committed_tables) + pub fn get_conn_tables_mut<'a>(&'a mut self, conn_index: usize) -> ShadowTablesMut<'a> { + ShadowTablesMut { + transaction_tables: self.connection_tables.get_mut(conn_index).unwrap(), + commited_tables: &mut self.committed_tables, + } } } diff --git a/simulator/runner/execution.rs b/simulator/runner/execution.rs index 09c1674fe..3657b995b 100644 --- a/simulator/runner/execution.rs +++ b/simulator/runner/execution.rs @@ -236,7 +236,7 @@ pub fn execute_interaction_turso( } } } - let _ = interaction.shadow(env.get_conn_tables_mut(interaction.connection_index)); + let _ = interaction.shadow(&mut env.get_conn_tables_mut(interaction.connection_index)); Ok(ExecutionContinuation::NextInteraction) } @@ -329,7 +329,7 @@ fn execute_interaction_rusqlite( } } - let _ = interaction.shadow(env.get_conn_tables_mut(interaction.connection_index)); + let _ = interaction.shadow(&mut env.get_conn_tables_mut(interaction.connection_index)); Ok(ExecutionContinuation::NextInteraction) } From 0e702fbec2609573445db5937d1a211a4f7e6dfb Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sat, 20 Sep 2025 11:36:05 -0300 Subject: [PATCH 27/78] check FaultyQuery depending tables when shrinking --- simulator/shrink/plan.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/simulator/shrink/plan.rs b/simulator/shrink/plan.rs index be5e3159b..58f5eb156 100644 --- a/simulator/shrink/plan.rs +++ b/simulator/shrink/plan.rs @@ -33,7 +33,7 @@ impl InteractionPlan { break; } match &all_interactions[idx].1.interaction { - InteractionType::Query(query) => { + InteractionType::Query(query) | InteractionType::FaultyQuery(query) => { depending_tables = query.dependencies(); break; } From 0597ea722ae0da9914e67f752fabe1510e4eff09 Mon Sep 17 00:00:00 2001 From: Avinash Sajjanshetty Date: Sat, 20 Sep 2025 21:56:58 +0530 Subject: [PATCH 28/78] Add encryption throughput test --- Cargo.lock | 13 + Cargo.toml | 2 + bindings/rust/Cargo.toml | 1 + perf/encryption/Cargo.toml | 17 ++ perf/encryption/README.md | 28 +++ perf/encryption/src/main.rs | 457 ++++++++++++++++++++++++++++++++++++ 6 files changed, 518 insertions(+) create mode 100644 perf/encryption/Cargo.toml create mode 100644 perf/encryption/README.md create mode 100644 perf/encryption/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 9e2bb330d..17a1d956d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1100,6 +1100,19 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +[[package]] +name = "encryption-throughput" +version = "0.1.0" +dependencies = [ + "clap", + "futures", + "hex", + "rand 0.9.2", + "tokio", + "tracing-subscriber", + "turso", +] + [[package]] name = "endian-type" version = "0.1.2" diff --git a/Cargo.toml b/Cargo.toml index 2771c2a31..aff912890 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,9 @@ members = [ "whopper", "perf/throughput/turso", "perf/throughput/rusqlite", + "perf/encryption" ] + exclude = [ "perf/latency/limbo", ] diff --git a/bindings/rust/Cargo.toml b/bindings/rust/Cargo.toml index d799b5320..e50304f01 100644 --- a/bindings/rust/Cargo.toml +++ b/bindings/rust/Cargo.toml @@ -15,6 +15,7 @@ conn_raw_api = ["turso_core/conn_raw_api"] experimental_indexes = [] antithesis = ["turso_core/antithesis"] tracing_release = ["turso_core/tracing_release"] +encryption = ["turso_core/encryption"] [dependencies] turso_core = { workspace = true, features = ["io_uring"] } diff --git a/perf/encryption/Cargo.toml b/perf/encryption/Cargo.toml new file mode 100644 index 000000000..e769c5b0b --- /dev/null +++ b/perf/encryption/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "encryption-throughput" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "encryption-throughput" +path = "src/main.rs" + +[dependencies] +turso = { workspace = true, features = ["encryption"] } +clap = { workspace = true, features = ["derive"] } +tokio = { workspace = true, default-features = true, features = ["full"] } +futures = { workspace = true } +tracing-subscriber = { workspace = true } +rand = { workspace = true, features = ["small_rng"] } +hex = { workspace = true } \ No newline at end of file diff --git a/perf/encryption/README.md b/perf/encryption/README.md new file mode 100644 index 000000000..0ec611258 --- /dev/null +++ b/perf/encryption/README.md @@ -0,0 +1,28 @@ +# Encryption Throughput Benchmarking + +```shell +$ cargo run --release -- --help + +Usage: encryption-throughput [OPTIONS] + +Options: + -t, --threads [default: 1] + -b, --batch-size [default: 100] + -i, --iterations [default: 10] + -r, --read-ratio Percentage of operations that should be reads (0-100) + -w, --write-ratio Percentage of operations that should be writes (0-100) + --encryption Enable database encryption + --cipher Encryption cipher to use (only relevant if --encryption is set) [default: aegis-256] + --think Per transaction think time (ms) [default: 0] + --timeout Busy timeout in milliseconds [default: 30000] + --seed Random seed for reproducible workloads [default: 2167532792061351037] + -h, --help Print help +``` + +```shell +# try these: + +cargo run --release -- -b 100 -i 25000 --read-ratio 75 + +cargo run --release -- -b 100 -i 25000 --read-ratio 75 --encryption +``` \ No newline at end of file diff --git a/perf/encryption/src/main.rs b/perf/encryption/src/main.rs new file mode 100644 index 000000000..7736055c5 --- /dev/null +++ b/perf/encryption/src/main.rs @@ -0,0 +1,457 @@ +use clap::Parser; +use rand::rngs::SmallRng; +use rand::{Rng, RngCore, SeedableRng}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Barrier}; +use std::time::{Duration, Instant}; +use turso::{Builder, Database, Result}; + +#[derive(Debug, Clone)] +struct EncryptionOpts { + cipher: String, + hexkey: String, +} + +#[derive(Parser)] +#[command(name = "encryption-throughput")] +#[command(about = "Encryption throughput benchmark on Turso DB")] +struct Args { + /// More than one thread does not work yet + #[arg(short = 't', long = "threads", default_value = "1")] + threads: usize, + + /// the number operations per transaction + #[arg(short = 'b', long = "batch-size", default_value = "100")] + batch_size: usize, + + /// number of transactions per thread + #[arg(short = 'i', long = "iterations", default_value = "10")] + iterations: usize, + + #[arg( + short = 'r', + long = "read-ratio", + help = "Percentage of operations that should be reads (0-100)" + )] + read_ratio: Option, + + #[arg( + short = 'w', + long = "write-ratio", + help = "Percentage of operations that should be writes (0-100)" + )] + write_ratio: Option, + + #[arg( + long = "encryption", + action = clap::ArgAction::SetTrue, + help = "Enable database encryption" + )] + encryption: bool, + + #[arg( + long = "cipher", + default_value = "aegis-256", + help = "Encryption cipher to use (only relevant if --encryption is set)" + )] + cipher: String, + + #[arg( + long = "think", + default_value = "0", + help = "Per transaction think time (ms)" + )] + think: u64, + + #[arg( + long = "timeout", + default_value = "30000", + help = "Busy timeout in milliseconds" + )] + timeout: u64, + + #[arg( + long = "seed", + default_value = "2167532792061351037", + help = "Random seed for reproducible workloads" + )] + seed: u64, +} + +#[derive(Debug)] +struct WorkerStats { + transactions_completed: u64, + reads_completed: u64, + writes_completed: u64, + reads_found: u64, + reads_not_found: u64, + total_transaction_time: Duration, +} + +#[derive(Debug, Clone)] +struct SharedState { + max_inserted_id: Arc, +} + +#[tokio::main] +async fn main() -> Result<()> { + let _ = tracing_subscriber::fmt::try_init(); + let args = Args::parse(); + + let read_ratio = match (args.read_ratio, args.write_ratio) { + (Some(_), Some(_)) => { + eprintln!("Error: Cannot specify both --read-ratio and --write-ratio"); + std::process::exit(1); + } + (Some(r), None) => { + if r > 100 { + eprintln!("Error: read-ratio must be between 0 and 100"); + std::process::exit(1); + } + r + } + (None, Some(w)) => { + if w > 100 { + eprintln!("Error: write-ratio must be between 0 and 100"); + std::process::exit(1); + } + 100 - w + } + // lets default to 0% reads (100% writes) + (None, None) => 0, + }; + + println!( + "Running encryption throughput benchmark with {} threads, {} batch size, {} iterations", + args.threads, args.batch_size, args.iterations + ); + println!( + "Read/Write ratio: {}% reads, {}% writes", + read_ratio, + 100 - read_ratio + ); + println!("Encryption enabled: {}", args.encryption); + println!("Random seed: {}", args.seed); + + let encryption_opts = if args.encryption { + let mut key_rng = SmallRng::seed_from_u64(args.seed); + let key_size = get_key_size_for_cipher(&args.cipher); + let mut key = vec![0u8; key_size]; + key_rng.fill_bytes(&mut key); + + let config = EncryptionOpts { + cipher: args.cipher.clone(), + hexkey: hex::encode(&key), + }; + + println!("Cipher: {}", config.cipher); + println!("Hexkey: {}", config.hexkey); + Some(config) + } else { + None + }; + + let db_path = "encryption_throughput_test.db"; + if std::path::Path::new(db_path).exists() { + std::fs::remove_file(db_path).expect("Failed to remove existing database"); + } + let wal_path = "encryption_throughput_test.db-wal"; + if std::path::Path::new(wal_path).exists() { + std::fs::remove_file(wal_path).expect("Failed to remove existing WAL file"); + } + + let db = setup_database(db_path, &encryption_opts).await?; + + // for create a var which is shared between all the threads, this we use to track the + // max inserted id so that we only read these + let shared_state = SharedState { + max_inserted_id: Arc::new(AtomicU64::new(0)), + }; + + let start_barrier = Arc::new(Barrier::new(args.threads)); + let mut handles = Vec::new(); + + let timeout = Duration::from_millis(args.timeout); + let overall_start = Instant::now(); + + for thread_id in 0..args.threads { + let db_clone = db.clone(); + let barrier = Arc::clone(&start_barrier); + let encryption_opts_clone = encryption_opts.clone(); + let shared_state_clone = shared_state.clone(); + + let handle = tokio::task::spawn(worker_thread( + thread_id, + db_clone, + args.batch_size, + args.iterations, + barrier, + read_ratio, + encryption_opts_clone, + args.think, + timeout, + shared_state_clone, + args.seed, + )); + + handles.push(handle); + } + + let mut total_transactions = 0; + let mut total_reads = 0; + let mut total_writes = 0; + let mut total_reads_found = 0; + let mut total_reads_not_found = 0; + + for (idx, handle) in handles.into_iter().enumerate() { + match handle.await { + Ok(Ok(stats)) => { + total_transactions += stats.transactions_completed; + total_reads += stats.reads_completed; + total_writes += stats.writes_completed; + total_reads_found += stats.reads_found; + total_reads_not_found += stats.reads_not_found; + } + Ok(Err(e)) => { + eprintln!("Thread error {idx}: {e}"); + return Err(e); + } + Err(_) => { + eprintln!("Thread panicked"); + std::process::exit(1); + } + } + } + + let overall_elapsed = overall_start.elapsed(); + let total_operations = total_reads + total_writes; + + let transaction_throughput = (total_transactions as f64) / overall_elapsed.as_secs_f64(); + let operation_throughput = (total_operations as f64) / overall_elapsed.as_secs_f64(); + let read_throughput = if total_reads > 0 { + (total_reads as f64) / overall_elapsed.as_secs_f64() + } else { + 0.0 + }; + let write_throughput = if total_writes > 0 { + (total_writes as f64) / overall_elapsed.as_secs_f64() + } else { + 0.0 + }; + let avg_ops_per_txn = (total_operations as f64) / (total_transactions as f64); + + println!("\n=== BENCHMARK RESULTS ==="); + println!("Total transactions: {total_transactions}"); + println!("Total operations: {total_operations}"); + println!("Operations per transaction: {avg_ops_per_txn:.1}"); + println!("Total time: {:.2}s", overall_elapsed.as_secs_f64()); + println!(); + println!("Transaction throughput: {transaction_throughput:.2} txns/sec"); + println!("Operation throughput: {operation_throughput:.2} ops/sec"); + + // not found should be zero since track the max inserted id + // todo(v): probably handle the not found error and remove max id + if total_reads > 0 { + println!( + " - Read operations: {total_reads} ({total_reads_found} found, {total_reads_not_found} not found)" + ); + println!(" - Read throughput: {read_throughput:.2} reads/sec"); + } + if total_writes > 0 { + println!(" - Write operations: {total_writes}"); + println!(" - Write throughput: {write_throughput:.2} writes/sec"); + } + + println!("\nConfiguration:"); + println!("Threads: {}", args.threads); + println!("Batch size: {}", args.batch_size); + println!("Iterations per thread: {}", args.iterations); + println!("Encryption: {}", args.encryption); + println!("Seed: {}", args.seed); + + if let Ok(metadata) = std::fs::metadata(db_path) { + println!("Database file size: {} bytes", metadata.len()); + } + + Ok(()) +} + +fn get_key_size_for_cipher(cipher: &str) -> usize { + match cipher.to_lowercase().as_str() { + "aes-128-gcm" | "aegis-128l" | "aegis-128x2" | "aegis-128x4" => 16, + "aes-256-gcm" | "aegis-256" | "aegis-256x2" | "aegis-256x4" => 32, + _ => 32, // default to 256-bit key + } +} + +async fn setup_database( + db_path: &str, + encryption_opts: &Option, +) -> Result { + let builder = Builder::new_local(db_path); + let db = builder.build().await?; + let conn = db.connect()?; + + if let Some(config) = encryption_opts { + conn.execute(&format!("PRAGMA cipher='{}'", config.cipher), ()) + .await?; + conn.execute(&format!("PRAGMA hexkey='{}'", config.hexkey), ()) + .await?; + } + + // todo(v): probably store blobs and then have option of randomblob size + conn.execute( + "CREATE TABLE IF NOT EXISTS test_table ( + id INTEGER PRIMARY KEY, + data TEXT NOT NULL + )", + (), + ) + .await?; + + println!("Database created at: {db_path}"); + Ok(db) +} + +#[allow(clippy::too_many_arguments)] +async fn worker_thread( + thread_id: usize, + db: Database, + batch_size: usize, + iterations: usize, + start_barrier: Arc, + read_ratio: u8, + encryption_opts: Option, + think_ms: u64, + timeout: Duration, + shared_state: SharedState, + base_seed: u64, +) -> Result { + start_barrier.wait(); + + let start_time = Instant::now(); + let mut stats = WorkerStats { + transactions_completed: 0, + reads_completed: 0, + writes_completed: 0, + reads_found: 0, + reads_not_found: 0, + total_transaction_time: Duration::ZERO, + }; + + let thread_seed = base_seed.wrapping_add(thread_id as u64); + let mut rng = SmallRng::seed_from_u64(thread_seed); + + for iteration in 0..iterations { + let conn = db.connect()?; + + if let Some(config) = &encryption_opts { + conn.execute(&format!("PRAGMA cipher='{}'", config.cipher), ()) + .await?; + conn.execute(&format!("PRAGMA hexkey='{}'", config.hexkey), ()) + .await?; + } + + conn.busy_timeout(Some(timeout))?; + + let mut insert_stmt = conn + .prepare("INSERT INTO test_table (id, data) VALUES (?, ?)") + .await?; + + let transaction_start = Instant::now(); + conn.execute("BEGIN", ()).await?; + + for i in 0..batch_size { + let should_read = rng.random_range(0..100) < read_ratio; + + if should_read { + // only attempt reads if we have inserted some data + let max_id = shared_state.max_inserted_id.load(Ordering::Relaxed); + if max_id > 0 { + let read_id = rng.random_range(1..=max_id); + let row = conn + .query( + "SELECT data FROM test_table WHERE id = ?", + turso::params::Params::Positional(vec![turso::Value::Integer( + read_id as i64, + )]), + ) + .await; + + match row { + Ok(_) => stats.reads_found += 1, + Err(turso::Error::QueryReturnedNoRows) => stats.reads_not_found += 1, + Err(e) => return Err(e), + }; + stats.reads_completed += 1; + } else { + // if no data inserted yet, convert to a write + let id = thread_id * iterations * batch_size + iteration * batch_size + i + 1; + insert_stmt + .execute(turso::params::Params::Positional(vec![ + turso::Value::Integer(id as i64), + turso::Value::Text(format!("data_{id}")), + ])) + .await?; + + shared_state + .max_inserted_id + .fetch_max(id as u64, Ordering::Relaxed); + stats.writes_completed += 1; + } + } else { + let id = thread_id * iterations * batch_size + iteration * batch_size + i + 1; + insert_stmt + .execute(turso::params::Params::Positional(vec![ + turso::Value::Integer(id as i64), + turso::Value::Text(format!("data_{id}")), + ])) + .await?; + + shared_state + .max_inserted_id + .fetch_max(id as u64, Ordering::Relaxed); + stats.writes_completed += 1; + } + } + + if think_ms > 0 { + tokio::time::sleep(Duration::from_millis(think_ms)).await; + } + + conn.execute("COMMIT", ()).await?; + + let transaction_elapsed = transaction_start.elapsed(); + stats.transactions_completed += 1; + stats.total_transaction_time += transaction_elapsed; + } + + let elapsed = start_time.elapsed(); + let total_ops = stats.reads_completed + stats.writes_completed; + let transaction_throughput = (stats.transactions_completed as f64) / elapsed.as_secs_f64(); + let operation_throughput = (total_ops as f64) / elapsed.as_secs_f64(); + let avg_txn_latency = + stats.total_transaction_time.as_secs_f64() * 1000.0 / stats.transactions_completed as f64; + + println!( + "Thread {}: {} txns ({} ops: {} reads, {} writes) in {:.2}s ({:.2} txns/sec, {:.2} ops/sec, {:.2}ms avg latency)", + thread_id, + stats.transactions_completed, + total_ops, + stats.reads_completed, + stats.writes_completed, + elapsed.as_secs_f64(), + transaction_throughput, + operation_throughput, + avg_txn_latency + ); + + if stats.reads_completed > 0 { + println!( + " Thread {} reads: {} found, {} not found", + thread_id, stats.reads_found, stats.reads_not_found + ); + } + + Ok(stats) +} From 864d113037be75773a12a93cc7d04d0063d05cca Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 19 Sep 2025 20:02:07 -0400 Subject: [PATCH 29/78] Remove dumb comment --- core/util.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/core/util.rs b/core/util.rs index faffc72cf..264057556 100644 --- a/core/util.rs +++ b/core/util.rs @@ -333,8 +333,6 @@ pub fn check_literal_equivalency(lhs: &Literal, rhs: &Literal) -> bool { /// This function is used to determine whether two expressions are logically /// equivalent in the context of queries, even if their representations /// differ. e.g.: `SUM(x)` and `sum(x)`, `x + y` and `y + x` -/// -/// *Note*: doesn't attempt to evaluate/compute "constexpr" results pub fn exprs_are_equivalent(expr1: &Expr, expr2: &Expr) -> bool { match (expr1, expr2) { ( From a0f574d279b7bf6b361b4c5184603702e345b9d0 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 19 Sep 2025 20:04:43 -0400 Subject: [PATCH 30/78] Add where_clause expr field to Index --- core/incremental/operator.rs | 1 + core/schema.rs | 17 +++++-- core/translate/compound_select.rs | 1 + core/translate/index.rs | 78 ++++++++++++++++++++++++++++++- 4 files changed, 90 insertions(+), 7 deletions(-) diff --git a/core/incremental/operator.rs b/core/incremental/operator.rs index 2af512504..f7f81e6f2 100644 --- a/core/incremental/operator.rs +++ b/core/incremental/operator.rs @@ -69,6 +69,7 @@ pub fn create_dbsp_state_index(root_page: usize) -> Index { unique: true, ephemeral: false, has_rowid: true, + where_clause: None, } } diff --git a/core/schema.rs b/core/schema.rs index 71cbb4932..49cacd67b 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -447,11 +447,13 @@ impl Schema { )?)); } else { // Add single column unique index - self.add_index(Arc::new(Index::automatic_from_unique( - table.as_ref(), - automatic_indexes.pop().unwrap(), - vec![(pos_in_table, unique_set.columns.first().unwrap().1)], - )?)); + if let Some(autoidx) = automatic_indexes.pop() { + self.add_index(Arc::new(Index::automatic_from_unique( + table.as_ref(), + autoidx, + vec![(pos_in_table, unique_set.columns.first().unwrap().1)], + )?)); + } } } for unique_set in table.unique_sets.iter().filter(|us| us.columns.len() > 1) { @@ -1593,6 +1595,7 @@ pub struct Index { /// For example, WITHOUT ROWID tables (not supported in Limbo yet), /// and SELECT DISTINCT ephemeral indexes will not have a rowid. pub has_rowid: bool, + pub where_clause: Option>, } #[allow(dead_code)] @@ -1620,6 +1623,7 @@ impl Index { tbl_name, columns, unique, + where_clause, .. })) => { let index_name = normalize_ident(idx_name.name.as_str()); @@ -1649,6 +1653,7 @@ impl Index { unique, ephemeral: false, has_rowid: table.has_rowid, + where_clause, }) } _ => todo!("Expected create index statement"), @@ -1693,6 +1698,7 @@ impl Index { unique: true, ephemeral: false, has_rowid: table.has_rowid, + where_clause: None, }) } @@ -1729,6 +1735,7 @@ impl Index { unique: true, ephemeral: false, has_rowid: table.has_rowid, + where_clause: None, }) } diff --git a/core/translate/compound_select.rs b/core/translate/compound_select.rs index f96b6ad8a..40eb2ce59 100644 --- a/core/translate/compound_select.rs +++ b/core/translate/compound_select.rs @@ -401,6 +401,7 @@ fn create_dedupe_index( table_name: String::new(), unique: false, has_rowid: false, + where_clause: None, }); let cursor_id = program.alloc_cursor_id(CursorType::BTreeIndex(dedupe_index.clone())); program.emit_insn(Insn::OpenEphemeral { diff --git a/core/translate/index.rs b/core/translate/index.rs index 8d1ea14a2..b25a08882 100644 --- a/core/translate/index.rs +++ b/core/translate/index.rs @@ -1,8 +1,14 @@ use std::sync::Arc; +use crate::schema::Table; use crate::translate::emitter::{ emit_cdc_full_record, emit_cdc_insns, prepare_cdc_if_necessary, OperationMode, Resolver, }; +use crate::translate::expr::{bind_and_rewrite_expr, translate_expr, ParamState}; +use crate::translate::plan::{ + ColumnUsedMask, IterationDirection, JoinedTable, Operation, Scan, TableReferences, +}; +use crate::vdbe::builder::CursorKey; use crate::vdbe::insn::{CmpInsFlags, Cookie}; use crate::SymbolTable; use crate::{ @@ -18,6 +24,7 @@ use turso_parser::ast::{self, Expr, SortOrder, SortedColumn}; use super::schema::{emit_schema_entry, SchemaEntryType, SQLITE_TABLEID}; +#[allow(clippy::too_many_arguments)] pub fn translate_create_index( unique_if_not_exists: (bool, bool), idx_name: &str, @@ -26,6 +33,8 @@ pub fn translate_create_index( schema: &Schema, syms: &SymbolTable, mut program: ProgramBuilder, + connection: &Arc, + mut where_clause: Option>, ) -> crate::Result { if !schema.indexes_enabled() { crate::bail_parse_error!( @@ -75,6 +84,8 @@ pub fn translate_create_index( unique: unique_if_not_exists.0, ephemeral: false, has_rowid: tbl.has_rowid, + where_clause: where_clause.clone(), // store the *original* where clause, because we need to rewrite it + // before translating, and it cannot reference a table alias }); // Allocate the necessary cursors: @@ -87,13 +98,43 @@ pub fn translate_create_index( let sqlite_table = schema.get_btree_table(SQLITE_TABLEID).unwrap(); let sqlite_schema_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(sqlite_table.clone())); + let table_ref = program.table_reference_counter.next(); let btree_cursor_id = program.alloc_cursor_id(CursorType::BTreeIndex(idx.clone())); - let table_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(tbl.clone())); + let table_cursor_id = program.alloc_cursor_id_keyed( + CursorKey::table(table_ref), + CursorType::BTreeTable(tbl.clone()), + ); let sorter_cursor_id = program.alloc_cursor_id(CursorType::Sorter); let pseudo_cursor_id = program.alloc_cursor_id(CursorType::Pseudo(PseudoCursorType { column_count: tbl.columns.len(), })); + let mut table_references = TableReferences::new( + vec![JoinedTable { + op: Operation::Scan(Scan::BTreeTable { + iter_dir: IterationDirection::Forwards, + index: None, + }), + table: Table::BTree(tbl.clone()), + identifier: tbl_name.clone(), + internal_id: table_ref, + join_info: None, + col_used_mask: ColumnUsedMask::default(), + database_id: 0, + }], + vec![], + ); + let mut param_state = ParamState::default(); + if let Some(where_clause) = where_clause.as_mut() { + bind_and_rewrite_expr( + where_clause, + Some(&mut table_references), + None, + connection, + &mut param_state, + )?; + } + // Create a new B-Tree and store the root page index in a register let root_page_reg = program.alloc_register(); program.emit_insn(Insn::CreateBtree { @@ -108,7 +149,13 @@ pub fn translate_create_index( root_page: RegisterOrLiteral::Literal(sqlite_table.root_page), db: 0, }); - let sql = create_idx_stmt_to_sql(&tbl_name, &idx_name, unique_if_not_exists, &columns); + let sql = create_idx_stmt_to_sql( + &tbl_name, + &idx_name, + unique_if_not_exists, + &columns, + &idx.where_clause.clone(), + ); let resolver = Resolver::new(schema, syms); let cdc_table = prepare_cdc_if_necessary(&mut program, schema, SQLITE_TABLEID)?; emit_schema_entry( @@ -159,6 +206,25 @@ pub fn translate_create_index( // emit MakeRecord (index key + rowid) into record_reg. // // Then insert the record into the sorter + let mut skip_row_label = None; + if let Some(where_clause) = where_clause { + let reg = program.alloc_register(); + let label = program.allocate_label(); + let pr = translate_expr( + &mut program, + Some(&table_references), + &where_clause, + reg, + &resolver, + )?; + program.emit_insn(Insn::IfNot { + reg: pr, + target_pc: label, + jump_if_null: true, + }); + skip_row_label = Some(label); + } + let start_reg = program.alloc_registers(columns.len() + 1); for (i, (col, _)) in columns.iter().enumerate() { program.emit_column_or_rowid(table_cursor_id, col.0, start_reg + i); @@ -181,6 +247,9 @@ pub fn translate_create_index( record_reg, }); + if let Some(skip_row_label) = skip_row_label { + program.resolve_label(skip_row_label, program.offset()); + } program.emit_insn(Insn::Next { cursor_id: table_cursor_id, pc_if_next: loop_start_label, @@ -285,6 +354,7 @@ fn create_idx_stmt_to_sql( idx_name: &str, unique_if_not_exists: (bool, bool), cols: &[((usize, &Column), SortOrder)], + where_clause: &Option>, ) -> String { let mut sql = String::with_capacity(128); sql.push_str("CREATE "); @@ -309,6 +379,10 @@ fn create_idx_stmt_to_sql( } } sql.push(')'); + if let Some(where_clause) = where_clause { + sql.push_str(" WHERE "); + sql.push_str(&where_clause.to_string()); + } sql } From 51f970a263c8d596c90c39c56c6167f4a3391dd7 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 19 Sep 2025 20:05:06 -0400 Subject: [PATCH 31/78] Support partial indexes in INSERT/UPDATE/DELETE --- core/translate/delete.rs | 2 +- core/translate/emitter.rs | 363 ++++++++++++++++-------- core/translate/expr.rs | 11 + core/translate/insert.rs | 86 +++++- core/translate/main_loop.rs | 2 + core/translate/mod.rs | 25 +- core/translate/optimizer/constraints.rs | 51 ++-- core/translate/optimizer/mod.rs | 1 + core/translate/select.rs | 2 +- core/translate/update.rs | 59 +++- 10 files changed, 436 insertions(+), 166 deletions(-) diff --git a/core/translate/delete.rs b/core/translate/delete.rs index c2a76f9ec..ccec40138 100644 --- a/core/translate/delete.rs +++ b/core/translate/delete.rs @@ -66,7 +66,7 @@ pub fn translate_delete( approx_num_labels: 0, }; program.extend(&opts); - emit_program(&mut program, delete_plan, schema, syms, |_| {})?; + emit_program(connection, &mut program, delete_plan, schema, syms, |_| {})?; Ok(program) } diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 00a5fc9b8..8e3e2c81e 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -24,16 +24,20 @@ use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY; use crate::function::Func; use crate::schema::{BTreeTable, Column, Schema, Table}; use crate::translate::compound_select::emit_program_for_compound_select; -use crate::translate::expr::{emit_returning_results, ReturningValueRegisters}; +use crate::translate::expr::{ + bind_and_rewrite_expr, emit_returning_results, walk_expr_mut, ParamState, + ReturningValueRegisters, WalkControl, +}; use crate::translate::plan::{DeletePlan, Plan, QueryDestination, Search}; use crate::translate::result_row::try_fold_expr_to_i64; use crate::translate::values::emit_values; use crate::translate::window::{emit_window_results, init_window, WindowMetadata}; -use crate::util::exprs_are_equivalent; +use crate::util::{exprs_are_equivalent, normalize_ident}; use crate::vdbe::builder::{CursorKey, CursorType, ProgramBuilder}; use crate::vdbe::insn::{CmpInsFlags, IdxInsertFlags, InsertFlags, RegisterOrLiteral}; use crate::vdbe::CursorID; use crate::vdbe::{insn::Insn, BranchOffset}; +use crate::Connection; use crate::{bail_parse_error, Result, SymbolTable}; pub struct Resolver<'a> { @@ -201,6 +205,7 @@ pub enum TransactionMode { /// Takes a query plan and generates the corresponding bytecode program #[instrument(skip_all, level = Level::DEBUG)] pub fn emit_program( + connection: &Arc, program: &mut ProgramBuilder, plan: Plan, schema: &Schema, @@ -209,8 +214,10 @@ pub fn emit_program( ) -> Result<()> { match plan { Plan::Select(plan) => emit_program_for_select(program, plan, schema, syms), - Plan::Delete(plan) => emit_program_for_delete(program, plan, schema, syms), - Plan::Update(plan) => emit_program_for_update(program, plan, schema, syms, after), + Plan::Delete(plan) => emit_program_for_delete(connection, program, plan, schema, syms), + Plan::Update(plan) => { + emit_program_for_update(connection, program, plan, schema, syms, after) + } Plan::CompoundSelect { .. } => { emit_program_for_compound_select(program, plan, schema, syms) } @@ -407,8 +414,9 @@ pub fn emit_query<'a>( #[instrument(skip_all, level = Level::DEBUG)] fn emit_program_for_delete( + connection: &Arc, program: &mut ProgramBuilder, - plan: DeletePlan, + mut plan: DeletePlan, schema: &Schema, syms: &SymbolTable, ) -> Result<()> { @@ -461,9 +469,10 @@ fn emit_program_for_delete( )?; emit_delete_insns( + connection, program, &mut t_ctx, - &plan.table_references, + &mut plan.table_references, &plan.result_columns, )?; @@ -484,12 +493,13 @@ fn emit_program_for_delete( } fn emit_delete_insns( + connection: &Arc, program: &mut ProgramBuilder, t_ctx: &mut TranslateCtx, - table_references: &TableReferences, + table_references: &mut TableReferences, result_columns: &[super::plan::ResultSetColumn], ) -> Result<()> { - let table_reference = table_references.joined_tables().first().unwrap(); + let table_reference = table_references.joined_tables().first().unwrap().clone(); if table_reference .virtual_table() .is_some_and(|t| t.readonly()) @@ -572,6 +582,35 @@ fn emit_delete_insns( .unwrap_or_default(); for (index, index_cursor_id) in other_indexes { + let skip_delete_label = if let Some(where_clause) = &index.where_clause { + let mut where_copy = where_clause.as_ref().clone(); + let mut param_state = ParamState::disallow(); + bind_and_rewrite_expr( + &mut where_copy, + Some(table_references), + None, + connection, + &mut param_state, + )?; + let where_result_reg = program.alloc_register(); + translate_expr( + program, + Some(table_references), + &where_copy, + where_result_reg, + &t_ctx.resolver, + )?; + + let skip_label = program.allocate_label(); + program.emit_insn(Insn::IfNot { + reg: where_result_reg, + target_pc: skip_label, + jump_if_null: true, + }); + Some(skip_label) + } else { + None + }; let num_regs = index.columns.len() + 1; let start_reg = program.alloc_registers(num_regs); // Emit columns that are part of the index @@ -594,8 +633,11 @@ fn emit_delete_insns( start_reg, num_regs, cursor_id: index_cursor_id, - raise_error_if_no_matching_entry: true, + raise_error_if_no_matching_entry: index.where_clause.is_none(), }); + if let Some(label) = skip_delete_label { + program.resolve_label(label, program.offset()); + } } // Emit update in the CDC table if necessary (before DELETE updated the table) @@ -684,6 +726,7 @@ fn emit_delete_insns( #[instrument(skip_all, level = Level::DEBUG)] fn emit_program_for_update( + connection: &Arc, program: &mut ProgramBuilder, mut plan: UpdatePlan, schema: &Schema, @@ -779,7 +822,14 @@ fn emit_program_for_update( )?; // Emit update instructions - emit_update_insns(&plan, &t_ctx, program, index_cursors, temp_cursor_id)?; + emit_update_insns( + connection, + &mut plan, + &t_ctx, + program, + index_cursors, + temp_cursor_id, + )?; // Close the main loop close_loop( @@ -801,13 +851,19 @@ fn emit_program_for_update( #[instrument(skip_all, level = Level::DEBUG)] fn emit_update_insns( - plan: &UpdatePlan, + connection: &Arc, + plan: &mut UpdatePlan, t_ctx: &TranslateCtx, program: &mut ProgramBuilder, index_cursors: Vec<(usize, usize)>, temp_cursor_id: Option, ) -> crate::Result<()> { - let table_ref = plan.table_references.joined_tables().first().unwrap(); + let table_ref = plan + .table_references + .joined_tables() + .first() + .unwrap() + .clone(); let loop_labels = t_ctx.labels_main_loop.first().unwrap(); let cursor_id = program.resolve_cursor_id(&CursorKey::table(table_ref.internal_id)); let (index, is_virtual) = match &table_ref.op { @@ -1027,47 +1083,123 @@ fn emit_update_insns( } for (index, (idx_cursor_id, record_reg)) in plan.indexes_to_update.iter().zip(&index_cursors) { + let (old_satisfies_where, new_satisfies_where) = + if let Some(where_clause) = &index.where_clause { + let mut where_copy = where_clause.as_ref().clone(); + let mut param_state = ParamState::disallow(); + bind_and_rewrite_expr( + &mut where_copy, + Some(&mut plan.table_references), + None, + connection, + &mut param_state, + )?; + let old_satisfied_reg = program.alloc_register(); + translate_expr( + program, + Some(&plan.table_references), + &where_copy, + old_satisfied_reg, + &t_ctx.resolver, + )?; + + let mut new_where = where_clause.as_ref().clone(); + rewrite_where_for_update_registers( + &mut new_where, + table_ref.columns(), + start, + rowid_set_clause_reg.unwrap_or(beg), + )?; + + let new_satisfied_reg = program.alloc_register(); + translate_expr( + program, + None, + &new_where, + new_satisfied_reg, + &t_ctx.resolver, + )?; + + (Some(old_satisfied_reg), Some(new_satisfied_reg)) + } else { + (None, None) + }; + + let mut skip_delete_label = None; + let mut skip_insert_label = None; + + // Handle deletion for partial indexes + if let Some(old_satisfied) = old_satisfies_where { + skip_delete_label = Some(program.allocate_label()); + program.emit_insn(Insn::IfNot { + reg: old_satisfied, + target_pc: skip_delete_label.unwrap(), + jump_if_null: true, + }); + } + + // Delete old index entry + let num_regs = index.columns.len() + 1; + let delete_start_reg = program.alloc_registers(num_regs); + for (reg_offset, column_index) in index.columns.iter().enumerate() { + program.emit_column_or_rowid( + cursor_id, + column_index.pos_in_table, + delete_start_reg + reg_offset, + ); + } + program.emit_insn(Insn::RowId { + cursor_id, + dest: delete_start_reg + num_regs - 1, + }); + program.emit_insn(Insn::IdxDelete { + start_reg: delete_start_reg, + num_regs, + cursor_id: *idx_cursor_id, + raise_error_if_no_matching_entry: old_satisfies_where.is_none(), + }); + + // Resolve delete skip label if it exists + if let Some(label) = skip_delete_label { + program.resolve_label(label, program.offset()); + } + + // Check if we should insert into partial index + if let Some(new_satisfied) = new_satisfies_where { + skip_insert_label = Some(program.allocate_label()); + program.emit_insn(Insn::IfNot { + reg: new_satisfied, + target_pc: skip_insert_label.unwrap(), + jump_if_null: true, + }); + } + + // Build new index entry let num_cols = index.columns.len(); - // allocate scratch registers for the index columns plus rowid let idx_start_reg = program.alloc_registers(num_cols + 1); + let rowid_reg = rowid_set_clause_reg.unwrap_or(beg); - // Use the new rowid value (if the UPDATE statement sets the rowid alias), - // otherwise keep using the original rowid. This guarantees that any - // newly inserted/updated index entries point at the correct row after - // the primary key change. - let rowid_reg = if has_user_provided_rowid { - // Safe to unwrap because `has_user_provided_rowid` implies the register was allocated. - rowid_set_clause_reg.expect("rowid register must be set when updating rowid alias") - } else { - beg - }; - let idx_cols_start_reg = beg + 1; - - // copy each index column from the table's column registers into these scratch regs for (i, col) in index.columns.iter().enumerate() { let col_in_table = table_ref .columns() .get(col.pos_in_table) .expect("column index out of bounds"); - // copy from the table's column register over to the index's scratch register program.emit_insn(Insn::Copy { src_reg: if col_in_table.is_rowid_alias { rowid_reg } else { - idx_cols_start_reg + col.pos_in_table + start + col.pos_in_table }, dst_reg: idx_start_reg + i, extra_amount: 0, }); } - // last register is the rowid program.emit_insn(Insn::Copy { src_reg: rowid_reg, dst_reg: idx_start_reg + num_cols, extra_amount: 0, }); - // this record will be inserted into the index later program.emit_insn(Insn::MakeRecord { start_reg: idx_start_reg, count: num_cols + 1, @@ -1076,55 +1208,58 @@ fn emit_update_insns( affinity_str: None, }); - if !index.unique { - continue; + // Handle unique constraint + if index.unique { + let constraint_check = program.allocate_label(); + program.emit_insn(Insn::NoConflict { + cursor_id: *idx_cursor_id, + target_pc: constraint_check, + record_reg: idx_start_reg, + num_regs: num_cols, + }); + + let idx_rowid_reg = program.alloc_register(); + program.emit_insn(Insn::IdxRowId { + cursor_id: *idx_cursor_id, + dest: idx_rowid_reg, + }); + + program.emit_insn(Insn::Eq { + lhs: beg, + rhs: idx_rowid_reg, + target_pc: constraint_check, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + + let column_names = index + .columns + .iter() + .map(|c| format!("{}.{}", table_ref.table.get_name(), c.name)) + .collect::>() + .join(", "); + + program.emit_insn(Insn::Halt { + err_code: SQLITE_CONSTRAINT_PRIMARYKEY, + description: column_names, + }); + + program.preassign_label_to_next_insn(constraint_check); } - // check if the record already exists in the index for unique indexes and abort if so - let constraint_check = program.allocate_label(); - program.emit_insn(Insn::NoConflict { + // Insert the index entry + program.emit_insn(Insn::IdxInsert { cursor_id: *idx_cursor_id, - target_pc: constraint_check, - record_reg: idx_start_reg, - num_regs: num_cols, + record_reg: *record_reg, + unpacked_start: Some(idx_start_reg), + unpacked_count: Some((num_cols + 1) as u16), + flags: IdxInsertFlags::new().nchange(true), }); - let column_names = index.columns.iter().enumerate().fold( - String::with_capacity(50), - |mut accum, (idx, col)| { - if idx > 0 { - accum.push_str(", "); - } - accum.push_str(table_ref.table.get_name()); - accum.push('.'); - accum.push_str(&col.name); - - accum - }, - ); - - let idx_rowid_reg = program.alloc_register(); - program.emit_insn(Insn::IdxRowId { - cursor_id: *idx_cursor_id, - dest: idx_rowid_reg, - }); - - // Skip over the UNIQUE constraint failure if the existing row is the one that we are currently changing - let original_rowid_reg = beg; - program.emit_insn(Insn::Eq { - lhs: original_rowid_reg, - rhs: idx_rowid_reg, - target_pc: constraint_check, - flags: CmpInsFlags::default(), // TODO: not sure what type of comparison flag is needed - collation: program.curr_collation(), - }); - - program.emit_insn(Insn::Halt { - err_code: SQLITE_CONSTRAINT_PRIMARYKEY, // TODO: distinct between primary key and unique index for error code - description: column_names, - }); - - program.preassign_label_to_next_insn(constraint_check); + // Resolve insert skip label if it exists + if let Some(label) = skip_insert_label { + program.resolve_label(label, program.offset()); + } } if let Some(btree_table) = table_ref.btree() { @@ -1197,47 +1332,6 @@ fn emit_update_insns( }); } - // For each index -> insert - for (index, (idx_cursor_id, record_reg)) in plan.indexes_to_update.iter().zip(index_cursors) - { - let num_regs = index.columns.len() + 1; - let start_reg = program.alloc_registers(num_regs); - - // Delete existing index key - index - .columns - .iter() - .enumerate() - .for_each(|(reg_offset, column_index)| { - program.emit_column_or_rowid( - cursor_id, - column_index.pos_in_table, - start_reg + reg_offset, - ); - }); - - program.emit_insn(Insn::RowId { - cursor_id, - dest: start_reg + num_regs - 1, - }); - - program.emit_insn(Insn::IdxDelete { - start_reg, - num_regs, - cursor_id: idx_cursor_id, - raise_error_if_no_matching_entry: true, - }); - - // Insert new index key (filled further above with values from set_clauses) - program.emit_insn(Insn::IdxInsert { - cursor_id: idx_cursor_id, - record_reg, - unpacked_start: Some(start), - unpacked_count: Some((index.columns.len() + 1) as u16), - flags: IdxInsertFlags::new().nchange(true), - }); - } - // create alias for CDC rowid after the change (will differ from cdc_rowid_before_reg only in case of UPDATE with change in rowid alias) let cdc_rowid_after_reg = rowid_set_clause_reg.unwrap_or(beg); @@ -1671,3 +1765,42 @@ fn init_limit( } } } + +fn rewrite_where_for_update_registers( + expr: &mut Expr, + columns: &[Column], + columns_start_reg: usize, + rowid_reg: usize, +) -> Result { + walk_expr_mut(expr, &mut |e: &mut Expr| -> Result { + match e { + Expr::Qualified(_, col) | Expr::DoublyQualified(_, _, col) => { + let normalized = normalize_ident(col.as_str()); + if let Some((idx, _)) = columns.iter().enumerate().find(|(_, c)| { + c.name + .as_ref() + .map_or(false, |n| n.eq_ignore_ascii_case(&normalized)) + }) { + *e = Expr::Register(columns_start_reg + idx); + } + } + Expr::Id(ast::Name::Ident(name)) | Expr::Id(ast::Name::Quoted(name)) => { + let normalized = normalize_ident(name.as_str()); + if normalized.eq_ignore_ascii_case("rowid") { + *e = Expr::Register(rowid_reg); + } else if let Some((idx, _)) = columns.iter().enumerate().find(|(_, c)| { + c.name + .as_ref() + .map_or(false, |n| n.eq_ignore_ascii_case(&normalized)) + }) { + *e = Expr::Register(columns_start_reg + idx); + } + } + Expr::RowId { .. } => { + *e = Expr::Register(rowid_reg); + } + _ => {} + } + Ok(WalkControl::Continue) + }) +} diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 38eca1ae4..c208710e1 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -3264,6 +3264,14 @@ impl Default for ParamState { Self { next_param_idx: 1 } } } +impl ParamState { + pub fn is_valid(&self) -> bool { + self.next_param_idx > 0 + } + pub fn disallow() -> Self { + Self { next_param_idx: 0 } + } +} /// Rewrite ast::Expr in place, binding Column references/rewriting Expr::Id -> Expr::Column /// using the provided TableReferences, and replacing anonymous parameters with internal named @@ -3287,6 +3295,9 @@ pub fn bind_and_rewrite_expr<'a>( } // Rewrite anonymous variables in encounter order. ast::Expr::Variable(var) if var.is_empty() => { + if !param_state.is_valid() { + crate::bail_parse_error!("Parameters are not allowed in this context"); + } *expr = ast::Expr::Variable(format!( "{}{}", PARAM_PREFIX, param_state.next_param_idx diff --git a/core/translate/insert.rs b/core/translate/insert.rs index ddcf00755..6b21798db 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -10,8 +10,8 @@ use crate::translate::emitter::{ emit_cdc_insns, emit_cdc_patch_record, prepare_cdc_if_necessary, OperationMode, }; use crate::translate::expr::{ - bind_and_rewrite_expr, emit_returning_results, process_returning_clause, ParamState, - ReturningValueRegisters, + bind_and_rewrite_expr, emit_returning_results, process_returning_clause, walk_expr_mut, + ParamState, ReturningValueRegisters, WalkControl, }; use crate::translate::planner::ROWID; use crate::translate::upsert::{ @@ -21,6 +21,7 @@ use crate::util::normalize_ident; use crate::vdbe::builder::ProgramBuilderOpts; use crate::vdbe::insn::{IdxInsertFlags, InsertFlags, RegisterOrLiteral}; use crate::vdbe::BranchOffset; +use crate::{bail_parse_error, Result, SymbolTable, VirtualTable}; use crate::{ schema::{Column, Schema}, vdbe::{ @@ -28,7 +29,6 @@ use crate::{ insn::Insn, }, }; -use crate::{Result, SymbolTable, VirtualTable}; use super::emitter::Resolver; use super::expr::{translate_expr, translate_expr_no_constant_opt, NoConstantOptReason}; @@ -510,6 +510,32 @@ pub fn translate_insert( .map(|(_, _, c_id)| *c_id) .expect("no cursor found for index"); + let skip_index_label = if let Some(where_clause) = &index.where_clause { + // Clone and rewrite WHERE to use insertion registers + let mut where_for_eval = where_clause.as_ref().clone(); + rewrite_partial_index_where(&mut where_for_eval, &insertion)?; + + // Evaluate rewritten WHERE clause + let where_result_reg = program.alloc_register(); + translate_expr( + &mut program, + None, + &where_for_eval, + where_result_reg, + &resolver, + )?; + // Skip index update if WHERE is false/null + let skip_label = program.allocate_label(); + program.emit_insn(Insn::IfNot { + reg: where_result_reg, + target_pc: skip_label, + jump_if_null: true, + }); + Some(skip_label) + } else { + None + }; + let num_cols = index.columns.len(); // allocate scratch registers for the index columns plus rowid let idx_start_reg = program.alloc_registers(num_cols + 1); @@ -623,6 +649,9 @@ pub fn translate_insert( // TODO: figure out how to determine whether or not we need to seek prior to insert. flags: IdxInsertFlags::new().nchange(true), }); + if let Some(skip_label) = skip_index_label { + program.resolve_label(skip_label, program.offset()); + } } for column_mapping in insertion @@ -1186,3 +1215,54 @@ fn translate_virtual_table_insert( Ok(program) } + +/// Rewrite WHERE clause for partial index to reference insertion registers +pub fn rewrite_partial_index_where( + expr: &mut ast::Expr, + insertion: &Insertion, +) -> crate::Result { + walk_expr_mut( + expr, + &mut |e: &mut ast::Expr| -> crate::Result { + match e { + // Unqualified column reference, map to insertion register + Expr::Column { + column, + is_rowid_alias, + .. + } => { + if *is_rowid_alias { + *e = Expr::Register(insertion.key_register()); + } else if let Some(col_mapping) = insertion.col_mappings.get(*column) { + *e = Expr::Register(col_mapping.register); + } else { + bail_parse_error!("Column index {} not found in insertion", column); + } + } + Expr::Id(ast::Name::Ident(name)) | Expr::Id(ast::Name::Quoted(name)) => { + let normalized = normalize_ident(name.as_str()); + if normalized.eq_ignore_ascii_case("rowid") { + *e = Expr::Register(insertion.key_register()); + } else if let Some(col_mapping) = insertion.get_col_mapping_by_name(&normalized) + { + *e = Expr::Register(col_mapping.register); + } + } + Expr::Qualified(_, col) | Expr::DoublyQualified(_, _, col) => { + let normalized = normalize_ident(col.as_str()); + if normalized.eq_ignore_ascii_case("rowid") { + *e = Expr::Register(insertion.key_register()); + } else if let Some(col_mapping) = insertion.get_col_mapping_by_name(&normalized) + { + *e = Expr::Register(col_mapping.register); + } + } + Expr::RowId { .. } => { + *e = Expr::Register(insertion.key_register()); + } + _ => {} + } + Ok(WalkControl::Continue) + }, + ) +} diff --git a/core/translate/main_loop.rs b/core/translate/main_loop.rs index 292b22716..b41a28945 100644 --- a/core/translate/main_loop.rs +++ b/core/translate/main_loop.rs @@ -93,6 +93,7 @@ pub fn init_distinct(program: &mut ProgramBuilder, plan: &SelectPlan) -> Distinc .collect(), unique: false, has_rowid: false, + where_clause: None, }); let cursor_id = program.alloc_cursor_id(CursorType::BTreeIndex(index.clone())); let ctx = DistinctCtx { @@ -166,6 +167,7 @@ pub fn init_loop( }], has_rowid: false, unique: false, + where_clause: None, }); let cursor_id = program.alloc_cursor_id(CursorType::BTreeIndex(index.clone())); if group_by.is_none() { diff --git a/core/translate/mod.rs b/core/translate/mod.rs index 4f31fecc2..1a963b5b3 100644 --- a/core/translate/mod.rs +++ b/core/translate/mod.rs @@ -163,20 +163,17 @@ pub fn translate_inner( tbl_name, columns, where_clause, - } => { - if where_clause.is_some() { - bail_parse_error!("Partial indexes are not supported"); - } - translate_create_index( - (unique, if_not_exists), - idx_name.name.as_str(), - tbl_name.as_str(), - &columns, - schema, - syms, - program, - )? - } + } => translate_create_index( + (unique, if_not_exists), + idx_name.name.as_str(), + tbl_name.as_str(), + &columns, + schema, + syms, + program, + connection, + where_clause, + )?, ast::Stmt::CreateTable { temporary, if_not_exists, diff --git a/core/translate/optimizer/constraints.rs b/core/translate/optimizer/constraints.rs index 08cb3c9d2..cdd830b5f 100644 --- a/core/translate/optimizer/constraints.rs +++ b/core/translate/optimizer/constraints.rs @@ -7,6 +7,7 @@ use crate::{ plan::{JoinOrderMember, TableReferences, WhereTerm}, planner::{table_mask_from_expr, TableMask}, }, + util::exprs_are_equivalent, Result, }; use turso_ext::{ConstraintInfo, ConstraintOp}; @@ -319,26 +320,21 @@ pub fn constraints_from_where_clause( if let Some(position_in_index) = index.column_table_pos_to_index_pos(constraint.table_col_pos) { - let index_candidate = cs - .candidates - .iter_mut() - .find_map(|candidate| { - if candidate - .index - .as_ref() - .is_some_and(|i| Arc::ptr_eq(index, i)) - { - Some(candidate) - } else { - None - } - }) - .unwrap(); - index_candidate.refs.push(ConstraintRef { - constraint_vec_pos: i, - index_col_pos: position_in_index, - sort_order: index.columns[position_in_index].order, - }); + if let Some(index_candidate) = cs.candidates.iter_mut().find_map(|candidate| { + if candidate.index.as_ref().is_some_and(|i| { + Arc::ptr_eq(index, i) && can_use_partial_index(index, where_clause) + }) { + Some(candidate) + } else { + None + } + }) { + index_candidate.refs.push(ConstraintRef { + constraint_vec_pos: i, + index_col_pos: position_in_index, + sort_order: index.columns[position_in_index].order, + }); + } } } } @@ -403,6 +399,21 @@ pub fn usable_constraints_for_join_order<'a>( &refs[..usable_until] } +fn can_use_partial_index(index: &Index, query_where_clause: &[WhereTerm]) -> bool { + let Some(index_where) = &index.where_clause else { + // Full index, always usable + return true; + }; + // Check if query WHERE contains the exact same predicate + for term in query_where_clause { + if exprs_are_equivalent(&term.expr, index_where.as_ref()) { + return true; + } + } + // TODO: do better to determine if we should use partial index + false +} + pub fn convert_to_vtab_constraint( constraints: &[Constraint], join_order: &[JoinOrderMember], diff --git a/core/translate/optimizer/mod.rs b/core/translate/optimizer/mod.rs index b9df7c698..eb883b05e 100644 --- a/core/translate/optimizer/mod.rs +++ b/core/translate/optimizer/mod.rs @@ -899,6 +899,7 @@ fn ephemeral_index_build( ephemeral: true, table_name: table_reference.table.get_name().to_string(), root_page: 0, + where_clause: None, has_rowid: table_reference .table .btree() diff --git a/core/translate/select.rs b/core/translate/select.rs index a1ec15abe..9d1cf62a1 100644 --- a/core/translate/select.rs +++ b/core/translate/select.rs @@ -82,7 +82,7 @@ pub fn translate_select( }; program.extend(&opts); - emit_program(&mut program, select_plan, schema, syms, |_| {})?; + emit_program(connection, &mut program, select_plan, schema, syms, |_| {})?; Ok(TranslateSelectResult { program, num_result_cols, diff --git a/core/translate/update.rs b/core/translate/update.rs index feb3d926d..14d5f46d2 100644 --- a/core/translate/update.rs +++ b/core/translate/update.rs @@ -1,8 +1,8 @@ -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use crate::schema::{BTreeTable, Column, Type}; -use crate::translate::expr::{bind_and_rewrite_expr, ParamState}; +use crate::translate::expr::{bind_and_rewrite_expr, walk_expr, ParamState, WalkControl}; use crate::translate::optimizer::optimize_select_plan; use crate::translate::plan::{Operation, QueryDestination, Scan, Search, SelectPlan}; use crate::translate::planner::parse_limit; @@ -62,14 +62,13 @@ pub fn translate_update( ) -> crate::Result { let mut plan = prepare_update_plan(&mut program, schema, body, connection, false)?; optimize_plan(&mut plan, schema)?; - // TODO: freestyling these numbers let opts = ProgramBuilderOpts { num_cursors: 1, approx_num_insns: 20, approx_num_labels: 4, }; program.extend(&opts); - emit_program(&mut program, plan, schema, syms, |_| {})?; + emit_program(connection, &mut program, plan, schema, syms, |_| {})?; Ok(program) } @@ -97,7 +96,7 @@ pub fn translate_update_for_schema_change( approx_num_labels: 4, }; program.extend(&opts); - emit_program(&mut program, plan, schema, syms, after)?; + emit_program(connection, &mut program, plan, schema, syms, after)?; Ok(program) } @@ -372,6 +371,7 @@ pub fn prepare_update_plan( // Check what indexes will need to be updated by checking set_clauses and see // if a column is contained in an index. let indexes = schema.get_indices(table_name); + let updated_cols: HashSet = set_clauses.iter().map(|(i, _)| *i).collect(); let rowid_alias_used = set_clauses .iter() .any(|(idx, _)| columns[*idx].is_rowid_alias); @@ -382,14 +382,37 @@ pub fn prepare_update_plan( // otherwise we need to update the indexes whose columns are set in the SET clause indexes .iter() - .filter(|index| { - index.columns.iter().any(|index_column| { - set_clauses - .iter() - .any(|(set_index_column, _)| index_column.pos_in_table == *set_index_column) - }) + .filter_map(|idx| { + let mut needs = idx + .columns + .iter() + .any(|c| updated_cols.contains(&c.pos_in_table)); + + if !needs { + if let Some(w) = &idx.where_clause { + // Bind once so names→positions are resolved exactly like execution. + let mut where_copy = w.as_ref().clone(); + let mut param = ParamState::disallow(); + let mut tr = + TableReferences::new(table_references.joined_tables().to_vec(), vec![]); + bind_and_rewrite_expr( + &mut where_copy, + Some(&mut tr), + None, + connection, + &mut param, + ) + .expect("bind where"); + let cols_used = collect_cols_used_in_expr(&where_copy); + needs = cols_used.iter().any(|c| updated_cols.contains(c)); + } + } + if needs { + Some(idx.clone()) + } else { + None + } }) - .cloned() .collect() }; @@ -422,3 +445,15 @@ fn build_scan_op(table: &Table, iter_dir: IterationDirection) -> Operation { _ => unreachable!(), } } + +fn collect_cols_used_in_expr(expr: &Expr) -> HashSet { + let mut acc = HashSet::new(); + let _ = walk_expr(expr, &mut |expr| match expr { + Expr::Column { column, .. } => { + acc.insert(*column); + Ok(WalkControl::Continue) + } + _ => Ok(WalkControl::Continue), + }); + acc +} From 4c6917f84906c7f1945738d3a171620f2e7c8697 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 19 Sep 2025 20:09:07 -0400 Subject: [PATCH 32/78] Add testing for partial indexes --- testing/all.test | 1 + testing/partial_idx.test | 155 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+) create mode 100755 testing/partial_idx.test diff --git a/testing/all.test b/testing/all.test index 2d913afa8..aa89838d7 100755 --- a/testing/all.test +++ b/testing/all.test @@ -43,3 +43,4 @@ source $testdir/views.test source $testdir/vtab.test source $testdir/upsert.test source $testdir/window.test +source $testdir/partial_idx.test diff --git a/testing/partial_idx.test b/testing/partial_idx.test new file mode 100755 index 000000000..c3147a8a9 --- /dev/null +++ b/testing/partial_idx.test @@ -0,0 +1,155 @@ +#!/usr/bin/env tclsh +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/sqlite3/tester.tcl + +do_execsql_test_on_specific_db {:memory:} partial-index-unique-basic { + CREATE TABLE users (id INTEGER PRIMARY KEY, email TEXT, status TEXT); + CREATE UNIQUE INDEX idx_active_email ON users(email) WHERE status = 'active'; + INSERT INTO users VALUES (1, 'user@test.com', 'active'); + INSERT INTO users VALUES (2, 'user@test.com', 'inactive'); + INSERT INTO users VALUES (3, 'user@test.com', 'deleted'); + SELECT id, email, status FROM users ORDER BY id; +} {1|user@test.com|active +2|user@test.com|inactive +3|user@test.com|deleted} + +do_execsql_test_in_memory_error_content partial-index-unique-violation { + CREATE TABLE users (id INTEGER PRIMARY KEY, email TEXT, status TEXT); + CREATE UNIQUE INDEX idx_active_email ON users(email) WHERE status = 'active'; + INSERT INTO users VALUES (1, 'user@test.com', 'active'); + INSERT INTO users VALUES (2, 'user@test.com', 'inactive'); + INSERT INTO users VALUES (3, 'user@test.com', 'deleted'); + INSERT INTO users VALUES (4, 'user@test.com', 'active'); +} {UNIQUE constraint failed: idx_active_email.email (19)} + +do_execsql_test_on_specific_db {:memory:} partial-index-expression-where { + CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); + CREATE UNIQUE INDEX idx_expensive ON products(sku) WHERE price > 100; + INSERT INTO products VALUES (1, 'ABC123', 50); + INSERT INTO products VALUES (2, 'ABC123', 150); + INSERT INTO products VALUES (3, 'XYZ789', 200); + INSERT INTO products VALUES (4, 'ABC123', 75); + SELECT id, sku, price FROM products ORDER BY id; +} {1|ABC123|50 +2|ABC123|150 +3|XYZ789|200 +4|ABC123|75} + +do_execsql_test_in_memory_error_content partial-index-expensive-violation { + CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); + CREATE UNIQUE INDEX idx_expensive ON products(sku) WHERE price > 100; + INSERT INTO products VALUES (1, 'ABC123', 50); + INSERT INTO products VALUES (2, 'ABC123', 150); + INSERT INTO products VALUES (3, 'XYZ789', 200); + INSERT INTO products VALUES (4, 'ABC123', 75); + INSERT INTO products VALUES (5, 'ABC123', 250); + -- should fail with unique sku where price > 100 +} {UNIQUE constraint failed: idx_expensive.sku (19)} + +do_execsql_test_in_memory_error_content partial-index-expensive-violation-update { + CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); + CREATE UNIQUE INDEX idx_expensive ON products(sku) WHERE price > 100; + INSERT INTO products VALUES (1, 'ABC123', 50); + INSERT INTO products VALUES (2, 'ABC123', 150); + INSERT INTO products VALUES (3, 'XYZ789', 200); + INSERT INTO products VALUES (4, 'ABC123', 75); + UPDATE products SET price = 300 WHERE id = 1; + -- should fail with unique sku where price > 100 +} {UNIQUE constraint failed: products.sku (19)} + +do_execsql_test_on_specific_db {:memory:} partial-index-null-where { + CREATE TABLE items (id INTEGER PRIMARY KEY, code TEXT, category TEXT); + CREATE UNIQUE INDEX idx_categorized ON items(code) WHERE category IS NOT NULL; + INSERT INTO items VALUES (1, 'ITEM1', 'electronics'); + INSERT INTO items VALUES (2, 'ITEM1', NULL); + INSERT INTO items VALUES (3, 'ITEM1', NULL); + INSERT INTO items VALUES (4, 'ITEM2', 'books'); + SELECT id, code, category FROM items ORDER BY id; +} {1|ITEM1|electronics +2|ITEM1| +3|ITEM1| +4|ITEM2|books} + + +do_execsql_test_in_memory_error_content partial-index-function-where { + CREATE TABLE docs (id INTEGER PRIMARY KEY, title TEXT); + CREATE UNIQUE INDEX idx_lower_title ON docs(title) WHERE LOWER(title) = title; + INSERT INTO docs VALUES (1, 'lowercase'); + INSERT INTO docs VALUES (2, 'UPPERCASE'); + INSERT INTO docs VALUES (3, 'lowercase'); +} {UNIQUE constraint failed: idx_lower_title.title (19)} + +do_execsql_test_on_specific_db {:memory:} partial-index-multiple { + CREATE TABLE tasks (id INTEGER PRIMARY KEY, name TEXT, priority INTEGER, status TEXT); + CREATE UNIQUE INDEX idx_urgent ON tasks(name) WHERE priority = 1; + CREATE UNIQUE INDEX idx_completed ON tasks(name) WHERE status = 'done'; + + INSERT INTO tasks VALUES (1, 'task1', 1, 'open'); + INSERT INTO tasks VALUES (2, 'task1', 2, 'open'); + INSERT INTO tasks VALUES (3, 'task1', 3, 'done'); + INSERT INTO tasks VALUES (4, 'task2', 1, 'done'); + + SELECT id, name, priority, status FROM tasks ORDER BY id; +} {1|task1|1|open +2|task1|2|open +3|task1|3|done +4|task2|1|done} + +do_execsql_test_in_memory_error_content partial-index-function-where { + CREATE TABLE tasks (id INTEGER PRIMARY KEY, name TEXT, priority INTEGER, status TEXT); + CREATE UNIQUE INDEX idx_urgent ON tasks(name) WHERE priority = 1; + CREATE UNIQUE INDEX idx_completed ON tasks(name) WHERE status = 'done'; + + INSERT INTO tasks VALUES (1, 'task1', 1, 'open'); + INSERT INTO tasks VALUES (2, 'task1', 2, 'open'); + INSERT INTO tasks VALUES (3, 'task1', 3, 'done'); + INSERT INTO tasks VALUES (4, 'task2', 1, 'done'); + INSERT INTO tasks VALUES (5, 'task1', 1, 'pending'); + -- should fail for unique name where priority = 1 +} {UNIQUE constraint failed: idx_urgent.name (19)} + +do_execsql_test_in_memory_error_content partial-index-function-where-2 { + CREATE TABLE tasks (id INTEGER PRIMARY KEY, name TEXT, priority INTEGER, status TEXT); + CREATE UNIQUE INDEX idx_urgent ON tasks(name) WHERE priority = 1; + CREATE UNIQUE INDEX idx_completed ON tasks(name) WHERE status = 'done'; + INSERT INTO tasks VALUES (1, 'task1', 1, 'open'); + INSERT INTO tasks VALUES (2, 'task1', 2, 'open'); + INSERT INTO tasks VALUES (3, 'task1', 3, 'done'); + INSERT INTO tasks VALUES (4, 'task2', 1, 'done'); + INSERT INTO tasks VALUES (6, 'task1', 2, 'done'); + -- should fail for unique name where status = 'done' +} {UNIQUE constraint failed: idx_completed.name (19)} + +do_execsql_test_on_specific_db {:memory:} partial-index-update-rowid { + CREATE TABLE rowid_test (id INTEGER PRIMARY KEY, val TEXT, flag INTEGER); + CREATE UNIQUE INDEX idx_flagged ON rowid_test(val) WHERE flag = 1; + INSERT INTO rowid_test VALUES (1, 'test', 1); + INSERT INTO rowid_test VALUES (2, 'test', 0); + UPDATE rowid_test SET id = 10 WHERE id = 1; + SELECT id, val, flag FROM rowid_test ORDER BY id; +} {2|test|0 +10|test|1} + +do_execsql_test_in_memory_error_content partial-index-update-complex { + CREATE TABLE complex (id INTEGER PRIMARY KEY, a TEXT, b INTEGER, c TEXT); + CREATE UNIQUE INDEX idx_complex ON complex(a) WHERE b > 10 AND c = 'active'; + INSERT INTO complex VALUES (1, 'dup', 5, 'active'); + INSERT INTO complex VALUES (2, 'dup', 15, 'inactive'); + INSERT INTO complex VALUES (3, 'dup', 15, 'active'); + INSERT INTO complex VALUES (4, 'dup', 20, 'active'); +} {UNIQUE constraint failed: idx_complex.a (19)} + +do_execsql_test_on_specific_db {:memory:} partial-index-delete { + CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); + CREATE UNIQUE INDEX idx_expensive ON products(sku) WHERE price > 100; + INSERT INTO products VALUES (1, 'ABC123', 50); + INSERT INTO products VALUES (2, 'ABC123', 150); + INSERT INTO products VALUES (3, 'XYZ789', 200); + INSERT INTO products VALUES (4, 'ABC123', 75); + DELETE FROM products WHERE price > 100; + INSERT INTO products VALUES (5, 'ABC123', 500); + INSERT INTO products VALUES (6, 'XYZ789', 600); + SELECT id, sku, price FROM products WHERE price > 100 ORDER BY id; +} {5|ABC123|500 +6|XYZ789|600} From 635273f782ae2fdcac694dabb6732c9903fd4e27 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 19 Sep 2025 22:33:51 -0400 Subject: [PATCH 33/78] Prevent using a partial index as a scan driver --- core/translate/optimizer/constraints.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/core/translate/optimizer/constraints.rs b/core/translate/optimizer/constraints.rs index cdd830b5f..049408b17 100644 --- a/core/translate/optimizer/constraints.rs +++ b/core/translate/optimizer/constraints.rs @@ -361,6 +361,15 @@ pub fn constraints_from_where_clause( candidate.refs.truncate(first_inequality + 1); } } + cs.candidates.retain(|c| { + if let Some(idx) = &c.index { + if idx.where_clause.is_some() && c.refs.is_empty() { + // prevent a partial index from even being considered as a scan driver. + return false; + } + } + true + }); constraints.push(cs); } From 2d952feae309f726bef9ca6e9ea346b3bd47bdc2 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 19 Sep 2025 22:34:25 -0400 Subject: [PATCH 34/78] Add DELETE behavior tests for partial indexes --- testing/partial_idx.test | 77 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/testing/partial_idx.test b/testing/partial_idx.test index c3147a8a9..5bce6b0c8 100755 --- a/testing/partial_idx.test +++ b/testing/partial_idx.test @@ -153,3 +153,80 @@ do_execsql_test_on_specific_db {:memory:} partial-index-delete { SELECT id, sku, price FROM products WHERE price > 100 ORDER BY id; } {5|ABC123|500 6|XYZ789|600} + +do_execsql_test_on_specific_db {:memory:} partial-index-delete-function-where { + CREATE TABLE func_del (id INTEGER PRIMARY KEY, name TEXT); + CREATE UNIQUE INDEX idx_lower ON func_del(name) WHERE LOWER(name) = name; + + INSERT INTO func_del VALUES (1, 'lowercase'); + INSERT INTO func_del VALUES (2, 'UPPERCASE'); + INSERT INTO func_del VALUES (3, 'MixedCase'); + DELETE FROM func_del WHERE LOWER(name) = name; + + -- Should be able to insert lowercase now + INSERT INTO func_del VALUES (4, 'lowercase'); + INSERT INTO func_del VALUES (5, 'another'); + SELECT id, name FROM func_del ORDER BY id; +} {2|UPPERCASE +3|MixedCase +4|lowercase +5|another} + +do_execsql_test_in_memory_error_content partial-index-delete-all { + CREATE TABLE del_all (id INTEGER PRIMARY KEY, val TEXT, flag INTEGER); + CREATE UNIQUE INDEX idx_all ON del_all(val) WHERE flag = 1; + INSERT INTO del_all VALUES (1, 'test', 1), (2, 'test', 0), (3, 'other', 1); + DELETE FROM del_all; + -- Should be able to insert anything now + INSERT INTO del_all VALUES (4, 'test', 1); + INSERT INTO del_all VALUES (5, 'test', 1); +} {UNIQUE constraint failed: idx_all.val (19)} + +do_execsql_test_on_specific_db {:memory:} partial-index-delete-cascade-scenario { + CREATE TABLE parent_del (id INTEGER PRIMARY KEY, status TEXT); + CREATE TABLE child_del (id INTEGER PRIMARY KEY, parent_id INTEGER, name TEXT, active INTEGER); + CREATE UNIQUE INDEX idx_active_child ON child_del(name) WHERE active = 1; + + INSERT INTO parent_del VALUES (1, 'active'), (2, 'inactive'); + INSERT INTO child_del VALUES (1, 1, 'child1', 1); + INSERT INTO child_del VALUES (2, 1, 'child2', 1); + INSERT INTO child_del VALUES (3, 2, 'child1', 0); + -- Simulate cascade by deleting children of parent 1 + DELETE FROM child_del WHERE parent_id = 1; + -- Should now allow these since active children are gone + INSERT INTO child_del VALUES (4, 2, 'child1', 1); + INSERT INTO child_del VALUES (5, 2, 'child2', 1); + SELECT COUNT(*) FROM child_del WHERE active = 1; +} {2} + +do_execsql_test_on_specific_db {:memory:} partial-index-delete-null-where { + CREATE TABLE null_del (id INTEGER PRIMARY KEY, code TEXT, category TEXT); + CREATE UNIQUE INDEX idx_with_category ON null_del(code) WHERE category IS NOT NULL; + INSERT INTO null_del VALUES (1, 'CODE1', 'cat1'); + INSERT INTO null_del VALUES (2, 'CODE1', NULL); + INSERT INTO null_del VALUES (3, 'CODE2', 'cat2'); + INSERT INTO null_del VALUES (4, 'CODE1', NULL); + -- Delete the one with category + DELETE FROM null_del WHERE code = 'CODE1' AND category IS NOT NULL; + -- Should allow this now + INSERT INTO null_del VALUES (5, 'CODE1', 'cat3'); + + SELECT id, code, category FROM null_del WHERE code = 'CODE1' ORDER BY id; +} {2|CODE1| +4|CODE1| +5|CODE1|cat3} + +do_execsql_test_on_specific_db {:memory:} partial-index-delete-complex-where { + CREATE TABLE complex_del (id INTEGER PRIMARY KEY, a INTEGER, b INTEGER, c TEXT); + CREATE UNIQUE INDEX idx_complex ON complex_del(c) WHERE a > 10 AND b < 20; + INSERT INTO complex_del VALUES (1, 15, 10, 'dup'); + INSERT INTO complex_del VALUES (2, 5, 15, 'dup'); + INSERT INTO complex_del VALUES (3, 15, 25, 'dup'); + INSERT INTO complex_del VALUES (4, 20, 10, 'unique'); + -- Delete the one entry that's actually in the partial index + DELETE FROM complex_del WHERE a > 10 AND b < 20; + + -- Should now allow this since we deleted the conflicting entry + INSERT INTO complex_del VALUES (5, 12, 18, 'dup'); + SELECT COUNT(*) FROM complex_del WHERE c = 'dup'; +} {3} From 67cb59d9a7507bb4d47adcea58c3b2dbeadba7b0 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 19 Sep 2025 22:43:10 -0400 Subject: [PATCH 35/78] Add UPDATE tests for partial index behavior --- testing/partial_idx.test | 136 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) diff --git a/testing/partial_idx.test b/testing/partial_idx.test index 5bce6b0c8..c9208ea77 100755 --- a/testing/partial_idx.test +++ b/testing/partial_idx.test @@ -230,3 +230,139 @@ do_execsql_test_on_specific_db {:memory:} partial-index-delete-complex-where { INSERT INTO complex_del VALUES (5, 12, 18, 'dup'); SELECT COUNT(*) FROM complex_del WHERE c = 'dup'; } {3} + + +# Entering predicate via UPDATE should conflict with an existing in-predicate key +do_execsql_test_in_memory_error_content partial-index-update-enter-conflict-1 { + CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); + CREATE UNIQUE INDEX idx_expensive ON products(sku) WHERE price > 100; + INSERT INTO products VALUES (1, 'ABC123', 50); + INSERT INTO products VALUES (2, 'ABC123', 150); + UPDATE products SET price = 200 WHERE id = 1; +} {UNIQUE constraint failed: products.sku (19)} + +# Staying in predicate but changing key to a conflicting key should fail +do_execsql_test_in_memory_error_content partial-index-update-change-key-conflict { + CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); + CREATE UNIQUE INDEX idx_expensive ON products(sku) WHERE price > 100; + INSERT INTO products VALUES (1, 'ABC123', 150); + INSERT INTO products VALUES (2, 'XYZ789', 200); + UPDATE products SET sku = 'XYZ789' WHERE id = 1; +} {UNIQUE constraint failed: products.sku (19)} + +# Exiting predicate via UPDATE should remove index entry; then re-entering later may fail +do_execsql_test_in_memory_error_content partial-index-update-exit-then-reenter { + CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); + CREATE UNIQUE INDEX idx_expensive ON products(sku) WHERE price > 100; + INSERT INTO products VALUES (1, 'ABC123', 150); + UPDATE products SET price = 50 WHERE id = 1; + INSERT INTO products VALUES (2, 'ABC123', 200); + UPDATE products SET price = 300 WHERE id = 1; +} {UNIQUE constraint failed: products.sku (19)} + +# Multi-row UPDATE causing multiple rows to enter predicate together should conflict +do_execsql_test_in_memory_error_content partial-index-update-multirow-conflict { + CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); + CREATE UNIQUE INDEX idx_expensive ON products(sku) WHERE price > 100; + INSERT INTO products VALUES (1, 'ABC123', 50); + INSERT INTO products VALUES (2, 'ABC123', 150); + INSERT INTO products VALUES (3, 'ABC123', 75); + UPDATE products SET price = 150 WHERE sku = 'ABC123'; +} {UNIQUE constraint failed: products.sku (19)} + +# Update of unrelated columns should not affect partial index membership +do_execsql_test_on_specific_db {:memory:} partial-index-update-unrelated-column { + CREATE TABLE users (id INTEGER PRIMARY KEY, email TEXT, status TEXT, note TEXT); + CREATE UNIQUE INDEX idx_active_email ON users(email) WHERE status = 'active'; + INSERT INTO users VALUES (1, 'u@test.com', 'active', 'n1'); + INSERT INTO users VALUES (2, 'u@test.com', 'inactive','n2'); + UPDATE users SET note = 'changed' WHERE id = 2; + SELECT id,email,status,note FROM users ORDER BY id; +} {1|u@test.com|active|n1 +2|u@test.com|inactive|changed} + +# NULL -> NOT NULL transition enters predicate and may conflict +do_execsql_test_in_memory_error_content partial-index-update-null-enters-conflict { + CREATE TABLE items (id INTEGER PRIMARY KEY, code TEXT, category TEXT); + CREATE UNIQUE INDEX idx_categorized ON items(code) WHERE category IS NOT NULL; + INSERT INTO items VALUES (1,'CODE1','electronics'); + INSERT INTO items VALUES (2,'CODE1',NULL); + UPDATE items SET category = 'x' WHERE id = 2; +} {UNIQUE constraint failed: items.code (19)} + +# Function predicate: UPDATE causes entry into predicate -> conflict +do_execsql_test_in_memory_error_content partial-index-update-function-enters { + CREATE TABLE docs (id INTEGER PRIMARY KEY, title TEXT); + CREATE UNIQUE INDEX idx_lower_title ON docs(title) WHERE LOWER(title) = title; + INSERT INTO docs VALUES (1, 'lowercase'); + INSERT INTO docs VALUES (2, 'UPPERCASE'); + UPDATE docs SET title = 'lowercase' WHERE id = 2; +} {UNIQUE constraint failed: docs.title (19)} + +# Multi-column unique key with partial predicate: conflict on UPDATE entering predicate +do_execsql_test_in_memory_error_content partial-index-update-multicol-enter-conflict { + CREATE TABLE inv (id INTEGER PRIMARY KEY, sku TEXT, region TEXT, price INT); + CREATE UNIQUE INDEX idx_sr ON inv(sku,region) WHERE price > 100; + INSERT INTO inv VALUES (1,'A','US', 50); + INSERT INTO inv VALUES (2,'A','US',150); + INSERT INTO inv VALUES (3,'A','EU',150); + UPDATE inv SET price = 200 WHERE id = 1; +} {UNIQUE constraint failed: inv.sku, inv.region (19)} + +# Staying in predicate but changing second key part to collide should fail +do_execsql_test_in_memory_error_content partial-index-update-multicol-change-second { + CREATE TABLE inv2 (id INTEGER PRIMARY KEY, sku TEXT, region TEXT, price INT); + CREATE UNIQUE INDEX idx_sr2 ON inv2(sku,region) WHERE price > 100; + INSERT INTO inv2 VALUES (1,'A','US',150); + INSERT INTO inv2 VALUES (2,'A','EU',150); + UPDATE inv2 SET region = 'US' WHERE id = 2; +} {UNIQUE constraint failed: inv2.sku, inv2.region (19)} + +# UPDATE that leaves predicate and then changes key should be allowed, then re-entering may fail +do_execsql_test_in_memory_error_content partial-index-update-exit-change-key-reenter { + CREATE TABLE t (id INTEGER PRIMARY KEY, a TEXT, b INT); + CREATE UNIQUE INDEX idx_a ON t(a) WHERE b > 0; + INSERT INTO t VALUES (1,'K', 10); + INSERT INTO t VALUES (2,'X', 10); + UPDATE t SET b = 0 WHERE id = 1; + UPDATE t SET a = 'X' WHERE id = 1; + UPDATE t SET b = 5 WHERE id = 1; +} {UNIQUE constraint failed: t.a (19)} + +# Rowid (INTEGER PRIMARY KEY) change while in predicate should not self-conflict +do_execsql_test_on_specific_db {:memory:} partial-index-update-rowid-no-self-conflict { + CREATE TABLE rowid_test (id INTEGER PRIMARY KEY, val TEXT, flag INT); + CREATE UNIQUE INDEX idx_flagged ON rowid_test(val) WHERE flag = 1; + INSERT INTO rowid_test VALUES (1,'v',1); + UPDATE rowid_test SET id = 9 WHERE id = 1; + SELECT id,val,flag FROM rowid_test ORDER BY id; +} {9|v|1} + +# Batch UPDATE that toggles predicate truth for multiple rows; ensure net uniqueness is enforced +do_execsql_test_in_memory_error_content partial-index-update-batch-crossing { + CREATE TABLE p (id INTEGER PRIMARY KEY, k TEXT, x INT); + CREATE UNIQUE INDEX idx_k ON p(k) WHERE x > 0; + INSERT INTO p VALUES (1,'A', 1); + INSERT INTO p VALUES (2,'A', 0); + INSERT INTO p VALUES (3,'A', 0); + UPDATE p SET x = CASE id WHEN 1 THEN 0 ELSE 1 END; +} {UNIQUE constraint failed: p.k (19)} + +# UPDATE with WHERE predicate true, but changing to a unique new key while staying in predicate +do_execsql_test_on_specific_db {:memory:} partial-index-update-stay-in-predicate-change-to-unique { + CREATE TABLE q (id INTEGER PRIMARY KEY, k TEXT, x INT); + CREATE UNIQUE INDEX idx_kx ON q(k) WHERE x > 0; + INSERT INTO q VALUES (1,'A',1); + INSERT INTO q VALUES (2,'B',1); + UPDATE q SET k='C' WHERE id=1; -- stays in predicate, key now unique + SELECT id,k,x FROM q ORDER BY id; +} {1|C|1 +2|B|1} + +do_execsql_test_in_memory_error_content partial-index-update-only-predicate-col-error { + CREATE TABLE r2 (id INTEGER PRIMARY KEY, k TEXT, x INT); + CREATE UNIQUE INDEX idx_k ON r2(k) WHERE x > 0; + INSERT INTO r2 VALUES (1,'A',0); + INSERT INTO r2 VALUES (2,'A',1); + UPDATE r2 SET x = 1 WHERE id = 1; +} {UNIQUE constraint failed: r2.k (19)} From 6d8bf009f1ae2e2bc43248fbc144d80adccd8823 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 19 Sep 2025 23:04:51 -0400 Subject: [PATCH 36/78] Add some comments --- core/translate/emitter.rs | 23 ++++++++++++++++------- core/translate/update.rs | 11 ++++++----- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 8e3e2c81e..67c8695de 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -1194,6 +1194,7 @@ fn emit_update_insns( extra_amount: 0, }); } + // last register is the rowid program.emit_insn(Insn::Copy { src_reg: rowid_reg, dst_reg: idx_start_reg + num_cols, @@ -1209,8 +1210,9 @@ fn emit_update_insns( }); // Handle unique constraint - if index.unique { + if !index.unique { let constraint_check = program.allocate_label(); + // check if the record already exists in the index for unique indexes and abort if so program.emit_insn(Insn::NoConflict { cursor_id: *idx_cursor_id, target_pc: constraint_check, @@ -1224,6 +1226,7 @@ fn emit_update_insns( dest: idx_rowid_reg, }); + // Skip over the UNIQUE constraint failure if the existing row is the one that we are currently changing program.emit_insn(Insn::Eq { lhs: beg, rhs: idx_rowid_reg, @@ -1232,12 +1235,18 @@ fn emit_update_insns( collation: program.curr_collation(), }); - let column_names = index - .columns - .iter() - .map(|c| format!("{}.{}", table_ref.table.get_name(), c.name)) - .collect::>() - .join(", "); + let column_names = index.columns.iter().enumerate().fold( + String::with_capacity(50), + |mut accum, (idx, col)| { + if idx > 0 { + accum.push_str(", "); + } + accum.push_str(table_ref.table.get_name()); + accum.push('.'); + accum.push_str(&col.name); + accum + }, + ); program.emit_insn(Insn::Halt { err_code: SQLITE_CONSTRAINT_PRIMARYKEY, diff --git a/core/translate/update.rs b/core/translate/update.rs index 14d5f46d2..8f565b630 100644 --- a/core/translate/update.rs +++ b/core/translate/update.rs @@ -379,7 +379,8 @@ pub fn prepare_update_plan( // If the rowid alias is used in the SET clause, we need to update all indexes indexes.to_vec() } else { - // otherwise we need to update the indexes whose columns are set in the SET clause + // otherwise we need to update the indexes whose columns are set in the SET clause, + // or if the colunns used in the partial index WHERE clause are being updated indexes .iter() .filter_map(|idx| { @@ -390,20 +391,20 @@ pub fn prepare_update_plan( if !needs { if let Some(w) = &idx.where_clause { - // Bind once so names→positions are resolved exactly like execution. let mut where_copy = w.as_ref().clone(); let mut param = ParamState::disallow(); let mut tr = TableReferences::new(table_references.joined_tables().to_vec(), vec![]); - bind_and_rewrite_expr( + let _ = bind_and_rewrite_expr( &mut where_copy, Some(&mut tr), None, connection, &mut param, - ) - .expect("bind where"); + ); let cols_used = collect_cols_used_in_expr(&where_copy); + // if any of the columns used in the partial index WHERE clause is being + // updated, we need to update this index needs = cols_used.iter().any(|c| updated_cols.contains(c)); } } From f4258b8b08ee996608dc250e94c9f170f757665d Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 19 Sep 2025 23:34:40 -0400 Subject: [PATCH 37/78] Just use raw pointer instead of cloning JoinedTable in emitter --- core/translate/emitter.rs | 140 ++++++++++++++++++-------------------- 1 file changed, 65 insertions(+), 75 deletions(-) diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 67c8695de..4c7e412e2 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -28,7 +28,7 @@ use crate::translate::expr::{ bind_and_rewrite_expr, emit_returning_results, walk_expr_mut, ParamState, ReturningValueRegisters, WalkControl, }; -use crate::translate::plan::{DeletePlan, Plan, QueryDestination, Search}; +use crate::translate::plan::{DeletePlan, JoinedTable, Plan, QueryDestination, Search}; use crate::translate::result_row::try_fold_expr_to_i64; use crate::translate::values::emit_values; use crate::translate::window::{emit_window_results, init_window, WindowMetadata}; @@ -499,32 +499,29 @@ fn emit_delete_insns( table_references: &mut TableReferences, result_columns: &[super::plan::ResultSetColumn], ) -> Result<()> { - let table_reference = table_references.joined_tables().first().unwrap().clone(); - if table_reference + // we can either use this obviously safe raw pointer or we can clone it + let table_reference: *const JoinedTable = table_references.joined_tables().first().unwrap(); + if unsafe { &*table_reference } .virtual_table() .is_some_and(|t| t.readonly()) { return Err(crate::LimboError::ReadOnly); } + let internal_id = unsafe { (*table_reference).internal_id }; - let cursor_id = match &table_reference.op { - Operation::Scan { .. } => { - program.resolve_cursor_id(&CursorKey::table(table_reference.internal_id)) - } + let table_name = unsafe { &*table_reference }.table.get_name(); + let cursor_id = match unsafe { &(*table_reference).op } { + Operation::Scan { .. } => program.resolve_cursor_id(&CursorKey::table(internal_id)), Operation::Search(search) => match search { Search::RowidEq { .. } | Search::Seek { index: None, .. } => { - program.resolve_cursor_id(&CursorKey::table(table_reference.internal_id)) + program.resolve_cursor_id(&CursorKey::table(internal_id)) } Search::Seek { index: Some(index), .. - } => program.resolve_cursor_id(&CursorKey::index( - table_reference.internal_id, - index.clone(), - )), + } => program.resolve_cursor_id(&CursorKey::index(internal_id, index.clone())), }, }; - let main_table_cursor_id = - program.resolve_cursor_id(&CursorKey::table(table_reference.internal_id)); + let main_table_cursor_id = program.resolve_cursor_id(&CursorKey::table(internal_id)); // Emit the instructions to delete the row let key_reg = program.alloc_register(); @@ -533,7 +530,7 @@ fn emit_delete_insns( dest: key_reg, }); - if table_reference.virtual_table().is_some() { + if unsafe { &*table_reference }.virtual_table().is_some() { let conflict_action = 0u16; let start_reg = key_reg; @@ -550,14 +547,10 @@ fn emit_delete_insns( }); } else { // Delete from all indexes before deleting from the main table. - let indexes = t_ctx - .resolver - .schema - .indexes - .get(table_reference.table.get_name()); + let indexes = t_ctx.resolver.schema.indexes.get(table_name); // Get the index that is being used to iterate the deletion loop, if there is one. - let iteration_index = table_reference.op.index(); + let iteration_index = unsafe { &*table_reference }.op.index(); // Get all indexes that are not the iteration index. let other_indexes = indexes .map(|indexes| { @@ -571,10 +564,8 @@ fn emit_delete_insns( .map(|index| { ( index.clone(), - program.resolve_cursor_id(&CursorKey::index( - table_reference.internal_id, - index.clone(), - )), + program + .resolve_cursor_id(&CursorKey::index(internal_id, index.clone())), ) }) .collect::>() @@ -651,7 +642,7 @@ fn emit_delete_insns( let before_record_reg = if cdc_has_before { Some(emit_cdc_full_record( program, - table_reference.table.columns(), + unsafe { &*table_reference }.table.columns(), main_table_cursor_id, rowid_reg, )) @@ -667,7 +658,7 @@ fn emit_delete_insns( before_record_reg, None, None, - table_reference.table.get_name(), + table_name, )?; } @@ -679,12 +670,13 @@ fn emit_delete_insns( cursor_id: main_table_cursor_id, dest: rowid_reg, }); + let cols_len = unsafe { &*table_reference }.columns().len(); // Allocate registers for column values - let columns_start_reg = program.alloc_registers(table_reference.columns().len()); + let columns_start_reg = program.alloc_registers(cols_len); // Read all column values from the row to be deleted - for (i, _column) in table_reference.columns().iter().enumerate() { + for (i, _column) in unsafe { &*table_reference }.columns().iter().enumerate() { program.emit_column_or_rowid(main_table_cursor_id, i, columns_start_reg + i); } @@ -692,7 +684,7 @@ fn emit_delete_insns( let value_registers = ReturningValueRegisters { rowid_register: rowid_reg, columns_start_register: columns_start_reg, - num_columns: table_reference.columns().len(), + num_columns: cols_len, }; emit_returning_results(program, result_columns, &value_registers)?; @@ -700,14 +692,12 @@ fn emit_delete_insns( program.emit_insn(Insn::Delete { cursor_id: main_table_cursor_id, - table_name: table_reference.table.get_name().to_string(), + table_name: table_name.to_string(), }); if let Some(index) = iteration_index { - let iteration_index_cursor = program.resolve_cursor_id(&CursorKey::index( - table_reference.internal_id, - index.clone(), - )); + let iteration_index_cursor = + program.resolve_cursor_id(&CursorKey::index(internal_id, index.clone())); program.emit_insn(Insn::Delete { cursor_id: iteration_index_cursor, table_name: index.name.clone(), @@ -858,26 +848,22 @@ fn emit_update_insns( index_cursors: Vec<(usize, usize)>, temp_cursor_id: Option, ) -> crate::Result<()> { - let table_ref = plan - .table_references - .joined_tables() - .first() - .unwrap() - .clone(); + // we can either use this obviously safe raw pointer or we can clone it + let table_ref: *const JoinedTable = plan.table_references.joined_tables().first().unwrap(); + let internal_id = unsafe { (*table_ref).internal_id }; let loop_labels = t_ctx.labels_main_loop.first().unwrap(); - let cursor_id = program.resolve_cursor_id(&CursorKey::table(table_ref.internal_id)); - let (index, is_virtual) = match &table_ref.op { + let cursor_id = program.resolve_cursor_id(&CursorKey::table(internal_id)); + let (index, is_virtual) = match &unsafe { &*table_ref }.op { Operation::Scan(Scan::BTreeTable { index, .. }) => ( index.as_ref().map(|index| { ( index.clone(), - program - .resolve_cursor_id(&CursorKey::index(table_ref.internal_id, index.clone())), + program.resolve_cursor_id(&CursorKey::index(internal_id, index.clone())), ) }), false, ), - Operation::Scan(_) => (None, table_ref.virtual_table().is_some()), + Operation::Scan(_) => (None, unsafe { &*table_ref }.virtual_table().is_some()), Operation::Search(search) => match search { &Search::RowidEq { .. } | Search::Seek { index: None, .. } => (None, false), Search::Seek { @@ -885,8 +871,7 @@ fn emit_update_insns( } => ( Some(( index.clone(), - program - .resolve_cursor_id(&CursorKey::index(table_ref.internal_id, index.clone())), + program.resolve_cursor_id(&CursorKey::index(internal_id, index.clone())), )), false, ), @@ -894,7 +879,7 @@ fn emit_update_insns( }; let beg = program.alloc_registers( - table_ref.table.columns().len() + unsafe { &*table_ref }.table.columns().len() + if is_virtual { 2 // two args before the relevant columns for VUpdate } else { @@ -907,7 +892,10 @@ fn emit_update_insns( }); // Check if rowid was provided (through INTEGER PRIMARY KEY as a rowid alias) - let rowid_alias_index = table_ref.columns().iter().position(|c| c.is_rowid_alias); + let rowid_alias_index = unsafe { &*table_ref } + .columns() + .iter() + .position(|c| c.is_rowid_alias); let has_user_provided_rowid = if let Some(index) = rowid_alias_index { plan.set_clauses.iter().position(|(idx, _)| *idx == index) @@ -957,6 +945,7 @@ fn emit_update_insns( decrement_by: 1, }); } + let col_len = unsafe { &*table_ref }.columns().len(); // we scan a column at a time, loading either the column's values, or the new value // from the Set expression, into registers so we can emit a MakeRecord and update the row. @@ -964,13 +953,14 @@ fn emit_update_insns( // we allocate 2C registers for "updates" as the structure of this column for CDC table is following: // [C boolean values where true set for changed columns] [C values with updates where NULL is set for not-changed columns] let cdc_updates_register = if program.capture_data_changes_mode().has_updates() { - Some(program.alloc_registers(2 * table_ref.columns().len())) + Some(program.alloc_registers(2 * col_len)) } else { None }; + let table_name = unsafe { &*table_ref }.table.get_name(); let start = if is_virtual { beg + 2 } else { beg + 1 }; - for (idx, table_column) in table_ref.columns().iter().enumerate() { + for (idx, table_column) in unsafe { &*table_ref }.columns().iter().enumerate() { let target_reg = start + idx; if let Some((_, expr)) = plan.set_clauses.iter().find(|(i, _)| *i == idx) { if has_user_provided_rowid @@ -1006,7 +996,7 @@ fn emit_update_insns( err_code: SQLITE_CONSTRAINT_NOTNULL, description: format!( "{}.{}", - table_ref.table.get_name(), + table_name, table_column .name .as_ref() @@ -1018,7 +1008,7 @@ fn emit_update_insns( if let Some(cdc_updates_register) = cdc_updates_register { let change_reg = cdc_updates_register + idx; - let value_reg = cdc_updates_register + table_ref.columns().len() + idx; + let value_reg = cdc_updates_register + col_len + idx; program.emit_bool(true, change_reg); program.mark_last_insn_constant(); let mut updated = false; @@ -1073,7 +1063,7 @@ fn emit_update_insns( if let Some(cdc_updates_register) = cdc_updates_register { let change_bit_reg = cdc_updates_register + idx; - let value_reg = cdc_updates_register + table_ref.columns().len() + idx; + let value_reg = cdc_updates_register + col_len + idx; program.emit_bool(false, change_bit_reg); program.mark_last_insn_constant(); program.emit_null(value_reg, None); @@ -1106,7 +1096,7 @@ fn emit_update_insns( let mut new_where = where_clause.as_ref().clone(); rewrite_where_for_update_registers( &mut new_where, - table_ref.columns(), + unsafe { &*table_ref }.columns(), start, rowid_set_clause_reg.unwrap_or(beg), )?; @@ -1180,7 +1170,7 @@ fn emit_update_insns( let rowid_reg = rowid_set_clause_reg.unwrap_or(beg); for (i, col) in index.columns.iter().enumerate() { - let col_in_table = table_ref + let col_in_table = unsafe { &*table_ref } .columns() .get(col.pos_in_table) .expect("column index out of bounds"); @@ -1241,7 +1231,7 @@ fn emit_update_insns( if idx > 0 { accum.push_str(", "); } - accum.push_str(table_ref.table.get_name()); + accum.push_str(table_name); accum.push('.'); accum.push_str(&col.name); accum @@ -1271,11 +1261,11 @@ fn emit_update_insns( } } - if let Some(btree_table) = table_ref.btree() { + if let Some(btree_table) = unsafe { &*table_ref }.btree() { if btree_table.is_strict { program.emit_insn(Insn::TypeCheck { start_reg: start, - count: table_ref.columns().len(), + count: col_len, check_generated: true, table_reference: Arc::clone(&btree_table), }); @@ -1303,8 +1293,8 @@ fn emit_update_insns( err_code: SQLITE_CONSTRAINT_PRIMARYKEY, description: format!( "{}.{}", - table_ref.table.get_name(), - &table_ref + table_name, + unsafe { &*table_ref } .columns() .get(idx) .unwrap() @@ -1319,7 +1309,7 @@ fn emit_update_insns( let record_reg = program.alloc_register(); - let affinity_str = table_ref + let affinity_str = unsafe { &*table_ref } .columns() .iter() .map(|col| col.affinity().aff_mask()) @@ -1327,7 +1317,7 @@ fn emit_update_insns( program.emit_insn(Insn::MakeRecord { start_reg: start, - count: table_ref.columns().len(), + count: col_len, dest_reg: record_reg, index_name: None, affinity_str: Some(affinity_str), @@ -1364,7 +1354,7 @@ fn emit_update_insns( let cdc_before_reg = if program.capture_data_changes_mode().has_before() { Some(emit_cdc_full_record( program, - table_ref.table.columns(), + unsafe { &*table_ref }.table.columns(), cursor_id, cdc_rowid_before_reg.expect("cdc_rowid_before_reg must be set"), )) @@ -1378,7 +1368,7 @@ fn emit_update_insns( if has_user_provided_rowid { program.emit_insn(Insn::Delete { cursor_id, - table_name: table_ref.table.get_name().to_string(), + table_name: table_name.to_string(), }); } @@ -1393,7 +1383,7 @@ fn emit_update_insns( } else { InsertFlags::new() }, - table_name: table_ref.identifier.clone(), + table_name: unsafe { &*table_ref }.identifier.clone(), }); // Emit RETURNING results if specified @@ -1402,7 +1392,7 @@ fn emit_update_insns( let value_registers = ReturningValueRegisters { rowid_register: rowid_set_clause_reg.unwrap_or(beg), columns_start_register: start, - num_columns: table_ref.columns().len(), + num_columns: col_len, }; emit_returning_results(program, returning_columns, &value_registers)?; @@ -1413,7 +1403,7 @@ fn emit_update_insns( let cdc_after_reg = if program.capture_data_changes_mode().has_after() { Some(emit_cdc_patch_record( program, - &table_ref.table, + &unsafe { &*table_ref }.table, start, record_reg, cdc_rowid_after_reg, @@ -1426,7 +1416,7 @@ fn emit_update_insns( let record_reg = program.alloc_register(); program.emit_insn(Insn::MakeRecord { start_reg: cdc_updates_register, - count: 2 * table_ref.columns().len(), + count: 2 * col_len, dest_reg: record_reg, index_name: None, affinity_str: None, @@ -1450,7 +1440,7 @@ fn emit_update_insns( cdc_before_reg, None, None, - table_ref.table.get_name(), + table_name, )?; emit_cdc_insns( program, @@ -1461,7 +1451,7 @@ fn emit_update_insns( cdc_after_reg, None, None, - table_ref.table.get_name(), + table_name, )?; } else { emit_cdc_insns( @@ -1473,12 +1463,12 @@ fn emit_update_insns( cdc_before_reg, cdc_after_reg, cdc_updates_record, - table_ref.table.get_name(), + table_name, )?; } } - } else if table_ref.virtual_table().is_some() { - let arg_count = table_ref.columns().len() + 2; + } else if unsafe { &*table_ref }.virtual_table().is_some() { + let arg_count = col_len + 2; program.emit_insn(Insn::VUpdate { cursor_id, arg_count, From 21f6455190a09516aa6e6a19841b8ff0f5317459 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 19 Sep 2025 23:40:38 -0400 Subject: [PATCH 38/78] Fix clippy warnings and tests --- core/storage/btree.rs | 2 ++ core/translate/emitter.rs | 4 ++-- core/translate/optimizer/join.rs | 8 ++++++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 21806cb27..67a6d0dfa 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -8565,6 +8565,7 @@ mod tests { .unwrap() as usize; let index_def = Index { name: "testindex".to_string(), + where_clause: None, columns: (0..10) .map(|i| IndexColumn { name: format!("test{i}"), @@ -8726,6 +8727,7 @@ mod tests { .unwrap() as usize; let index_def = Index { name: "testindex".to_string(), + where_clause: None, columns: vec![IndexColumn { name: "testcol".to_string(), order: SortOrder::Asc, diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 4c7e412e2..1a30f3913 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -1778,7 +1778,7 @@ fn rewrite_where_for_update_registers( if let Some((idx, _)) = columns.iter().enumerate().find(|(_, c)| { c.name .as_ref() - .map_or(false, |n| n.eq_ignore_ascii_case(&normalized)) + .is_some_and(|n| n.eq_ignore_ascii_case(&normalized)) }) { *e = Expr::Register(columns_start_reg + idx); } @@ -1790,7 +1790,7 @@ fn rewrite_where_for_update_registers( } else if let Some((idx, _)) = columns.iter().enumerate().find(|(_, c)| { c.name .as_ref() - .map_or(false, |n| n.eq_ignore_ascii_case(&normalized)) + .is_some_and(|n| n.eq_ignore_ascii_case(&normalized)) }) { *e = Expr::Register(columns_start_reg + idx); } diff --git a/core/translate/optimizer/join.rs b/core/translate/optimizer/join.rs index b4e85b7d1..81b6e93ef 100644 --- a/core/translate/optimizer/join.rs +++ b/core/translate/optimizer/join.rs @@ -664,6 +664,7 @@ mod tests { let index = Arc::new(Index { name: "sqlite_autoindex_test_table_1".to_string(), table_name: "test_table".to_string(), + where_clause: None, columns: vec![IndexColumn { name: "id".to_string(), order: SortOrder::Asc, @@ -733,6 +734,7 @@ mod tests { let index1 = Arc::new(Index { name: "index1".to_string(), table_name: "table1".to_string(), + where_clause: None, columns: vec![IndexColumn { name: "id".to_string(), order: SortOrder::Asc, @@ -849,6 +851,7 @@ mod tests { let index_name = format!("sqlite_autoindex_{table_name}_1"); let index = Arc::new(Index { name: index_name, + where_clause: None, table_name: table_name.to_string(), columns: vec![IndexColumn { name: "id".to_string(), @@ -867,6 +870,7 @@ mod tests { let customer_id_idx = Arc::new(Index { name: "orders_customer_id_idx".to_string(), table_name: "orders".to_string(), + where_clause: None, columns: vec![IndexColumn { name: "customer_id".to_string(), order: SortOrder::Asc, @@ -882,6 +886,7 @@ mod tests { let order_id_idx = Arc::new(Index { name: "order_items_order_id_idx".to_string(), table_name: "order_items".to_string(), + where_clause: None, columns: vec![IndexColumn { name: "order_id".to_string(), order: SortOrder::Asc, @@ -1295,6 +1300,7 @@ mod tests { let index = Arc::new(Index { name: "idx_xy".to_string(), table_name: "t1".to_string(), + where_clause: None, columns: vec![ IndexColumn { name: "x".to_string(), @@ -1381,6 +1387,7 @@ mod tests { let index = Arc::new(Index { name: "idx1".to_string(), table_name: "t1".to_string(), + where_clause: None, columns: vec![ IndexColumn { name: "c1".to_string(), @@ -1492,6 +1499,7 @@ mod tests { let index = Arc::new(Index { name: "idx1".to_string(), table_name: "t1".to_string(), + where_clause: None, columns: vec![ IndexColumn { name: "c1".to_string(), From 340b95aa8b3fd37d534b16b988839cfd1bf02853 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 14:34:48 -0400 Subject: [PATCH 39/78] Apply PR review suggestions, add comments to partial indexes --- core/translate/emitter.rs | 22 ++++++++++++++++++++-- core/translate/index.rs | 21 +++++++++++---------- core/translate/update.rs | 7 +++++-- 3 files changed, 36 insertions(+), 14 deletions(-) diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 1a30f3913..51b480daf 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -1073,10 +1073,16 @@ fn emit_update_insns( } for (index, (idx_cursor_id, record_reg)) in plan.indexes_to_update.iter().zip(&index_cursors) { + // We need to know whether or not the OLD values satisfied the predicate on the + // partial index, so we can know whether or not to delete the old index entry, + // as well as whether or not the NEW values satisfy the predicate, to determine whether + // or not to insert a new index entry for a partial index let (old_satisfies_where, new_satisfies_where) = if let Some(where_clause) = &index.where_clause { let mut where_copy = where_clause.as_ref().clone(); let mut param_state = ParamState::disallow(); + // This means that we need to bind the column references to a copy of the index Expr, + // so we can emit Insn::Column instructions and refer to the old values. bind_and_rewrite_expr( &mut where_copy, Some(&mut plan.table_references), @@ -1094,6 +1100,9 @@ fn emit_update_insns( )?; let mut new_where = where_clause.as_ref().clone(); + // Now we need to rewrite the Expr::Id and Expr::Qualified/Expr::RowID (from a copy of the original, un-bound `where` expr), + // to refer to the new values, which are already loaded into registers starting at + // `start`. rewrite_where_for_update_registers( &mut new_where, unsafe { &*table_ref }.columns(), @@ -1110,6 +1119,9 @@ fn emit_update_insns( &t_ctx.resolver, )?; + // now we have two registers that tell us whether or not the old and new values satisfy + // the partial index predicate, and we can use those to decide whether or not to + // delete/insert a new index entry for this partial index. (Some(old_satisfied_reg), Some(new_satisfied_reg)) } else { (None, None) @@ -1121,6 +1133,7 @@ fn emit_update_insns( // Handle deletion for partial indexes if let Some(old_satisfied) = old_satisfies_where { skip_delete_label = Some(program.allocate_label()); + // If the old values don't satisfy the WHERE clause, skip the delete program.emit_insn(Insn::IfNot { reg: old_satisfied, target_pc: skip_delete_label.unwrap(), @@ -1146,7 +1159,7 @@ fn emit_update_insns( start_reg: delete_start_reg, num_regs, cursor_id: *idx_cursor_id, - raise_error_if_no_matching_entry: old_satisfies_where.is_none(), + raise_error_if_no_matching_entry: true, }); // Resolve delete skip label if it exists @@ -1157,6 +1170,7 @@ fn emit_update_insns( // Check if we should insert into partial index if let Some(new_satisfied) = new_satisfies_where { skip_insert_label = Some(program.allocate_label()); + // If the new values don't satisfy the WHERE clause, skip the idx insert program.emit_insn(Insn::IfNot { reg: new_satisfied, target_pc: skip_insert_label.unwrap(), @@ -1200,7 +1214,7 @@ fn emit_update_insns( }); // Handle unique constraint - if !index.unique { + if index.unique { let constraint_check = program.allocate_label(); // check if the record already exists in the index for unique indexes and abort if so program.emit_insn(Insn::NoConflict { @@ -1765,6 +1779,10 @@ fn init_limit( } } +/// We have `Expr`s which have *not* had column references bound to them, +/// so they are in the state of Expr::Id/Expr::Qualified, etc, and instead of binding Expr::Column +/// we need to bind Expr::Register, as we have already loaded the *new* column values from the +/// UPDATE statement into registers starting at `columns_start_reg`, which we want to reference. fn rewrite_where_for_update_registers( expr: &mut Expr, columns: &[Column], diff --git a/core/translate/index.rs b/core/translate/index.rs index b25a08882..03e4ac41c 100644 --- a/core/translate/index.rs +++ b/core/translate/index.rs @@ -4,12 +4,15 @@ use crate::schema::Table; use crate::translate::emitter::{ emit_cdc_full_record, emit_cdc_insns, prepare_cdc_if_necessary, OperationMode, Resolver, }; -use crate::translate::expr::{bind_and_rewrite_expr, translate_expr, ParamState}; +use crate::translate::expr::{ + bind_and_rewrite_expr, translate_condition_expr, translate_expr, ConditionMetadata, ParamState, +}; use crate::translate::plan::{ ColumnUsedMask, IterationDirection, JoinedTable, Operation, Scan, TableReferences, }; use crate::vdbe::builder::CursorKey; use crate::vdbe::insn::{CmpInsFlags, Cookie}; +use crate::vdbe::BranchOffset; use crate::SymbolTable; use crate::{ schema::{BTreeTable, Column, Index, IndexColumn, PseudoCursorType, Schema}, @@ -208,20 +211,18 @@ pub fn translate_create_index( // Then insert the record into the sorter let mut skip_row_label = None; if let Some(where_clause) = where_clause { - let reg = program.alloc_register(); let label = program.allocate_label(); - let pr = translate_expr( + translate_condition_expr( &mut program, - Some(&table_references), + &table_references, &where_clause, - reg, + ConditionMetadata { + jump_if_condition_is_true: false, + jump_target_when_false: label, + jump_target_when_true: BranchOffset::Placeholder, + }, &resolver, )?; - program.emit_insn(Insn::IfNot { - reg: pr, - target_pc: label, - jump_if_null: true, - }); skip_row_label = Some(label); } diff --git a/core/translate/update.rs b/core/translate/update.rs index 8f565b630..388e9e3df 100644 --- a/core/translate/update.rs +++ b/core/translate/update.rs @@ -395,13 +395,14 @@ pub fn prepare_update_plan( let mut param = ParamState::disallow(); let mut tr = TableReferences::new(table_references.joined_tables().to_vec(), vec![]); - let _ = bind_and_rewrite_expr( + bind_and_rewrite_expr( &mut where_copy, Some(&mut tr), None, connection, &mut param, - ); + ) + .ok()?; let cols_used = collect_cols_used_in_expr(&where_copy); // if any of the columns used in the partial index WHERE clause is being // updated, we need to update this index @@ -447,6 +448,8 @@ fn build_scan_op(table: &Table, iter_dir: IterationDirection) -> Operation { } } +/// Returns a set of column indices used in the expression. +/// *Must* be used on an Expr already processed by `bind_and_rewrite_expr` fn collect_cols_used_in_expr(expr: &Expr) -> HashSet { let mut acc = HashSet::new(); let _ = walk_expr(expr, &mut |expr| match expr { From 281344434fbc5db91c754ac13a47f35bf591b135 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 14:38:42 -0400 Subject: [PATCH 40/78] Remove unused import --- core/translate/index.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/translate/index.rs b/core/translate/index.rs index 03e4ac41c..875cedf3f 100644 --- a/core/translate/index.rs +++ b/core/translate/index.rs @@ -5,7 +5,7 @@ use crate::translate::emitter::{ emit_cdc_full_record, emit_cdc_insns, prepare_cdc_if_necessary, OperationMode, Resolver, }; use crate::translate::expr::{ - bind_and_rewrite_expr, translate_condition_expr, translate_expr, ConditionMetadata, ParamState, + bind_and_rewrite_expr, translate_condition_expr, ConditionMetadata, ParamState, }; use crate::translate::plan::{ ColumnUsedMask, IterationDirection, JoinedTable, Operation, Scan, TableReferences, From 6dc7d04c5ae7425883b036b7a8392a5cfc1a064a Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 15:02:06 -0400 Subject: [PATCH 41/78] Replace translate_epxr with translate_condition_expr and fix constraint error --- core/translate/insert.rs | 52 ++++++++++++++++++++-------------------- core/translate/plan.rs | 6 +++++ 2 files changed, 32 insertions(+), 26 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 6b21798db..a8d6fd379 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -10,9 +10,11 @@ use crate::translate::emitter::{ emit_cdc_insns, emit_cdc_patch_record, prepare_cdc_if_necessary, OperationMode, }; use crate::translate::expr::{ - bind_and_rewrite_expr, emit_returning_results, process_returning_clause, walk_expr_mut, - ParamState, ReturningValueRegisters, WalkControl, + bind_and_rewrite_expr, emit_returning_results, process_returning_clause, + translate_condition_expr, walk_expr_mut, ConditionMetadata, ParamState, + ReturningValueRegisters, WalkControl, }; +use crate::translate::plan::TableReferences; use crate::translate::planner::ROWID; use crate::translate::upsert::{ collect_set_clauses_for_upsert, emit_upsert, upsert_matches_index, upsert_matches_pk, @@ -422,17 +424,6 @@ pub fn translate_insert( }); } - let emit_halt_with_constraint = |program: &mut ProgramBuilder, col_name: &str| { - let mut description = String::with_capacity(table_name.as_str().len() + col_name.len() + 2); - description.push_str(table_name.as_str()); - description.push('.'); - description.push_str(col_name); - program.emit_insn(Insn::Halt { - err_code: SQLITE_CONSTRAINT_PRIMARYKEY, - description, - }); - }; - // Check uniqueness constraint for rowid if it was provided by user. // When the DB allocates it there are no need for separate uniqueness checks. if has_user_provided_rowid { @@ -481,7 +472,15 @@ pub fn translate_insert( break 'emit_halt; } } - emit_halt_with_constraint(&mut program, rowid_column_name); + let mut description = + String::with_capacity(table_name.as_str().len() + rowid_column_name.len() + 2); + description.push_str(table_name.as_str()); + description.push('.'); + description.push_str(rowid_column_name); + program.emit_insn(Insn::Halt { + err_code: SQLITE_CONSTRAINT_PRIMARYKEY, + description, + }); } program.preassign_label_to_next_insn(make_record_label); } @@ -516,21 +515,22 @@ pub fn translate_insert( rewrite_partial_index_where(&mut where_for_eval, &insertion)?; // Evaluate rewritten WHERE clause - let where_result_reg = program.alloc_register(); - translate_expr( + let skip_label = program.allocate_label(); + // We can use an empty TableReferences here because we shouldn't have any + // Expr::Column's in the partial index WHERE clause after rewriting it to use + // regsisters + let table_references = TableReferences::new_empty(); + translate_condition_expr( &mut program, - None, + &table_references, &where_for_eval, - where_result_reg, + ConditionMetadata { + jump_if_condition_is_true: false, + jump_target_when_false: skip_label, + jump_target_when_true: BranchOffset::Placeholder, + }, &resolver, )?; - // Skip index update if WHERE is false/null - let skip_label = program.allocate_label(); - program.emit_insn(Insn::IfNot { - reg: where_result_reg, - target_pc: skip_label, - jump_if_null: true, - }); Some(skip_label) } else { None @@ -584,7 +584,7 @@ pub fn translate_insert( if idx > 0 { accum.push_str(", "); } - accum.push_str(&index.name); + accum.push_str(table_name.as_str()); accum.push('.'); accum.push_str(&column.name); accum diff --git a/core/translate/plan.rs b/core/translate/plan.rs index 861c87de9..5b309207d 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -577,6 +577,12 @@ impl TableReferences { outer_query_refs, } } + pub fn new_empty() -> Self { + Self { + joined_tables: Vec::new(), + outer_query_refs: Vec::new(), + } + } pub fn is_empty(&self) -> bool { self.joined_tables.is_empty() && self.outer_query_refs.is_empty() From 0f771ecb5dd6c29fa240aba9bd4a88b9a2ef3579 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 15:02:38 -0400 Subject: [PATCH 42/78] Fix tests to assert for UNIQUE constraint failed: table.col_name instead of idx name --- testing/partial_idx.test | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/testing/partial_idx.test b/testing/partial_idx.test index c9208ea77..350717e69 100755 --- a/testing/partial_idx.test +++ b/testing/partial_idx.test @@ -21,7 +21,7 @@ do_execsql_test_in_memory_error_content partial-index-unique-violation { INSERT INTO users VALUES (2, 'user@test.com', 'inactive'); INSERT INTO users VALUES (3, 'user@test.com', 'deleted'); INSERT INTO users VALUES (4, 'user@test.com', 'active'); -} {UNIQUE constraint failed: idx_active_email.email (19)} +} {UNIQUE constraint failed: users.email (19)} do_execsql_test_on_specific_db {:memory:} partial-index-expression-where { CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); @@ -45,7 +45,7 @@ do_execsql_test_in_memory_error_content partial-index-expensive-violation { INSERT INTO products VALUES (4, 'ABC123', 75); INSERT INTO products VALUES (5, 'ABC123', 250); -- should fail with unique sku where price > 100 -} {UNIQUE constraint failed: idx_expensive.sku (19)} +} {UNIQUE constraint failed: products.sku (19)} do_execsql_test_in_memory_error_content partial-index-expensive-violation-update { CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); @@ -78,7 +78,7 @@ do_execsql_test_in_memory_error_content partial-index-function-where { INSERT INTO docs VALUES (1, 'lowercase'); INSERT INTO docs VALUES (2, 'UPPERCASE'); INSERT INTO docs VALUES (3, 'lowercase'); -} {UNIQUE constraint failed: idx_lower_title.title (19)} +} {UNIQUE constraint failed: docs.title (19)} do_execsql_test_on_specific_db {:memory:} partial-index-multiple { CREATE TABLE tasks (id INTEGER PRIMARY KEY, name TEXT, priority INTEGER, status TEXT); @@ -107,7 +107,7 @@ do_execsql_test_in_memory_error_content partial-index-function-where { INSERT INTO tasks VALUES (4, 'task2', 1, 'done'); INSERT INTO tasks VALUES (5, 'task1', 1, 'pending'); -- should fail for unique name where priority = 1 -} {UNIQUE constraint failed: idx_urgent.name (19)} +} {UNIQUE constraint failed: tasks.name (19)} do_execsql_test_in_memory_error_content partial-index-function-where-2 { CREATE TABLE tasks (id INTEGER PRIMARY KEY, name TEXT, priority INTEGER, status TEXT); @@ -119,7 +119,7 @@ do_execsql_test_in_memory_error_content partial-index-function-where-2 { INSERT INTO tasks VALUES (4, 'task2', 1, 'done'); INSERT INTO tasks VALUES (6, 'task1', 2, 'done'); -- should fail for unique name where status = 'done' -} {UNIQUE constraint failed: idx_completed.name (19)} +} {UNIQUE constraint failed: tasks.name (19)} do_execsql_test_on_specific_db {:memory:} partial-index-update-rowid { CREATE TABLE rowid_test (id INTEGER PRIMARY KEY, val TEXT, flag INTEGER); @@ -138,7 +138,7 @@ do_execsql_test_in_memory_error_content partial-index-update-complex { INSERT INTO complex VALUES (2, 'dup', 15, 'inactive'); INSERT INTO complex VALUES (3, 'dup', 15, 'active'); INSERT INTO complex VALUES (4, 'dup', 20, 'active'); -} {UNIQUE constraint failed: idx_complex.a (19)} +} {UNIQUE constraint failed: complex.a (19)} do_execsql_test_on_specific_db {:memory:} partial-index-delete { CREATE TABLE products (id INTEGER PRIMARY KEY, sku TEXT, price INTEGER); @@ -180,7 +180,7 @@ do_execsql_test_in_memory_error_content partial-index-delete-all { -- Should be able to insert anything now INSERT INTO del_all VALUES (4, 'test', 1); INSERT INTO del_all VALUES (5, 'test', 1); -} {UNIQUE constraint failed: idx_all.val (19)} +} {UNIQUE constraint failed: del_all.val (19)} do_execsql_test_on_specific_db {:memory:} partial-index-delete-cascade-scenario { CREATE TABLE parent_del (id INTEGER PRIMARY KEY, status TEXT); From 1ed3fc52f7cdace43e676554f4a627bbc4e8ead6 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 17:21:04 -0400 Subject: [PATCH 43/78] Add method to validate the Where Expr from a partial index --- core/schema.rs | 108 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 104 insertions(+), 4 deletions(-) diff --git a/core/schema.rs b/core/schema.rs index 49cacd67b..7e7fc953e 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1,4 +1,8 @@ +use crate::function::Func; use crate::incremental::view::IncrementalView; +use crate::translate::emitter::Resolver; +use crate::translate::expr::{bind_and_rewrite_expr, walk_expr, ParamState, WalkControl}; +use crate::translate::optimizer::Optimizable; use parking_lot::RwLock; /// Simple view structure for non-materialized views @@ -15,13 +19,13 @@ pub type ViewsMap = HashMap; use crate::storage::btree::BTreeCursor; use crate::translate::collate::CollationSeq; -use crate::translate::plan::SelectPlan; +use crate::translate::plan::{SelectPlan, TableReferences}; use crate::util::{ module_args_from_sql, module_name_from_sql, type_from_name, IOExt, UnparsedFromSqlIndex, }; use crate::{ - contains_ignore_ascii_case, eq_ignore_ascii_case, match_ignore_ascii_case, LimboError, - MvCursor, MvStore, Pager, RefValue, SymbolTable, VirtualTable, + contains_ignore_ascii_case, eq_ignore_ascii_case, match_ignore_ascii_case, Connection, + LimboError, MvCursor, MvStore, Pager, RefValue, SymbolTable, VirtualTable, }; use crate::{util::normalize_ident, Result}; use core::fmt; @@ -32,7 +36,7 @@ use std::sync::Mutex; use tracing::trace; use turso_parser::ast::{self, ColumnDefinition, Expr, Literal, SortOrder, TableOptions}; use turso_parser::{ - ast::{Cmd, CreateTableBody, ResultColumn, Stmt}, + ast::{Cmd, CreateTableBody, Name, ResultColumn, Stmt}, parser::Parser, }; @@ -1750,6 +1754,102 @@ impl Index { .iter() .position(|c| c.pos_in_table == table_pos) } + + /// Walk the where_clause Expr of a partial index and validate that it doesn't reference any other + /// tables or use any disallowed constructs. + pub fn validate_where_expr(&self, table: &Table) -> bool { + let Some(where_clause) = &self.where_clause else { + return true; + }; + + let tbl_norm = normalize_ident(self.table_name.as_str()); + let has_col = |name: &str| { + let n = normalize_ident(name); + table + .columns() + .iter() + .any(|c| c.name.as_ref().is_some_and(|cn| normalize_ident(cn) == n)) + }; + let is_tbl = |ns: &str| normalize_ident(ns).eq_ignore_ascii_case(&tbl_norm); + let is_deterministic_fn = |name: &str, argc: usize| { + let n = normalize_ident(name); + Func::resolve_function(&n, argc).is_ok_and(|f| f.is_deterministic()) + }; + + let mut ok = true; + let _ = walk_expr(where_clause.as_ref(), &mut |e: &Expr| -> crate::Result< + WalkControl, + > { + if !ok { + return Ok(WalkControl::SkipChildren); + } + match e { + Expr::Literal(_) | Expr::RowId { .. } => {} + // Unqualified identifier: must be a column of the target table or ROWID + Expr::Id(Name::Ident(n)) | Expr::Id(Name::Quoted(n)) => { + let n = n.as_str(); + if !n.eq_ignore_ascii_case("rowid") && !has_col(n) { + ok = false; + } + } + // Qualified: qualifier must match this index's table; column must exist + Expr::Qualified(ns, col) | Expr::DoublyQualified(_, ns, col) => { + if !is_tbl(ns.as_str()) || !has_col(col.as_str()) { + ok = false; + } + } + Expr::FunctionCall { + name, filter_over, .. + } + | Expr::FunctionCallStar { + name, filter_over, .. + } => { + // reject windowed + if filter_over.over_clause.is_some() { + ok = false; + } else { + let argc = match e { + Expr::FunctionCall { args, .. } => args.len(), + Expr::FunctionCallStar { .. } => 0, + _ => unreachable!(), + }; + if !is_deterministic_fn(name.as_str(), argc) { + ok = false; + } + } + } + // Explicitly disallowed constructs + Expr::Exists(_) + | Expr::InSelect { .. } + | Expr::Subquery(_) + | Expr::Raise { .. } + | Expr::Variable(_) => { + ok = false; + } + _ => {} + } + Ok(if ok { + WalkControl::Continue + } else { + WalkControl::SkipChildren + }) + }); + ok + } + + pub fn bind_where_expr( + &self, + table_refs: Option<&mut TableReferences>, + connection: &Arc, + ) -> Option { + let Some(where_clause) = &self.where_clause else { + return None; + }; + let mut params = ParamState::disallow(); + let mut expr = where_clause.clone(); + bind_and_rewrite_expr(&mut expr, table_refs, None, connection, &mut params).ok()?; + Some(*expr) + } } #[cfg(test)] From 421b5b7baed06c7ac84360ccbae121b0755ad255 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 17:42:50 -0400 Subject: [PATCH 44/78] Use new `index.validate_where_expr` and `bind_where_expr` methods on index create --- core/translate/index.rs | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/core/translate/index.rs b/core/translate/index.rs index 875cedf3f..88e90f92b 100644 --- a/core/translate/index.rs +++ b/core/translate/index.rs @@ -4,9 +4,7 @@ use crate::schema::Table; use crate::translate::emitter::{ emit_cdc_full_record, emit_cdc_insns, prepare_cdc_if_necessary, OperationMode, Resolver, }; -use crate::translate::expr::{ - bind_and_rewrite_expr, translate_condition_expr, ConditionMetadata, ParamState, -}; +use crate::translate::expr::{translate_condition_expr, ConditionMetadata}; use crate::translate::plan::{ ColumnUsedMask, IterationDirection, JoinedTable, Operation, Scan, TableReferences, }; @@ -37,7 +35,7 @@ pub fn translate_create_index( syms: &SymbolTable, mut program: ProgramBuilder, connection: &Arc, - mut where_clause: Option>, + where_clause: Option>, ) -> crate::Result { if !schema.indexes_enabled() { crate::bail_parse_error!( @@ -62,10 +60,10 @@ pub fn translate_create_index( } crate::bail_parse_error!("Error: index with name '{idx_name}' already exists."); } - let Some(tbl) = schema.tables.get(&tbl_name) else { + let Some(table) = schema.tables.get(&tbl_name) else { crate::bail_parse_error!("Error: table '{tbl_name}' does not exist."); }; - let Some(tbl) = tbl.btree() else { + let Some(tbl) = table.btree() else { crate::bail_parse_error!("Error: table '{tbl_name}' is not a b-tree table."); }; let columns = resolve_sorted_columns(&tbl, columns)?; @@ -87,10 +85,20 @@ pub fn translate_create_index( unique: unique_if_not_exists.0, ephemeral: false, has_rowid: tbl.has_rowid, - where_clause: where_clause.clone(), // store the *original* where clause, because we need to rewrite it - // before translating, and it cannot reference a table alias + // store the *original* where clause, because we need to rewrite it + // before translating, and it cannot reference a table alias + where_clause: where_clause.clone(), }); + if !idx.validate_where_expr(table) { + crate::bail_parse_error!( + "Error: cannot use aggregate, window functions or reference other tables in WHERE clause of CREATE INDEX:\n {}", + where_clause + .expect("where expr has to exist in order to fail") + .to_string() + ); + } + // Allocate the necessary cursors: // // 1. sqlite_schema_cursor_id - sqlite_schema table @@ -127,16 +135,7 @@ pub fn translate_create_index( }], vec![], ); - let mut param_state = ParamState::default(); - if let Some(where_clause) = where_clause.as_mut() { - bind_and_rewrite_expr( - where_clause, - Some(&mut table_references), - None, - connection, - &mut param_state, - )?; - } + let where_clause = idx.bind_where_expr(Some(&mut table_references), connection); // Create a new B-Tree and store the root page index in a register let root_page_reg = program.alloc_register(); From 93d24d2b50b050a0034fa5207eb8d310dd8ae44d Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 17:43:50 -0400 Subject: [PATCH 45/78] Use new `bind_where_expr` helper method in emitter --- core/translate/emitter.rs | 126 ++++++++++++++++++-------------------- 1 file changed, 58 insertions(+), 68 deletions(-) diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 51b480daf..16106870f 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -25,8 +25,8 @@ use crate::function::Func; use crate::schema::{BTreeTable, Column, Schema, Table}; use crate::translate::compound_select::emit_program_for_compound_select; use crate::translate::expr::{ - bind_and_rewrite_expr, emit_returning_results, walk_expr_mut, ParamState, - ReturningValueRegisters, WalkControl, + emit_returning_results, translate_condition_expr, translate_expr_no_constant_opt, + walk_expr_mut, ConditionMetadata, NoConstantOptReason, ReturningValueRegisters, WalkControl, }; use crate::translate::plan::{DeletePlan, JoinedTable, Plan, QueryDestination, Search}; use crate::translate::result_row::try_fold_expr_to_i64; @@ -573,31 +573,23 @@ fn emit_delete_insns( .unwrap_or_default(); for (index, index_cursor_id) in other_indexes { - let skip_delete_label = if let Some(where_clause) = &index.where_clause { - let mut where_copy = where_clause.as_ref().clone(); - let mut param_state = ParamState::disallow(); - bind_and_rewrite_expr( - &mut where_copy, - Some(table_references), - None, - connection, - &mut param_state, - )?; + let skip_delete_label = if index.where_clause.is_some() { + let where_copy = index + .bind_where_expr(Some(table_references), connection) + .expect("where clause to exist"); let where_result_reg = program.alloc_register(); - translate_expr( + let skip_label = program.allocate_label(); + translate_condition_expr( program, - Some(table_references), + table_references, &where_copy, - where_result_reg, + ConditionMetadata { + jump_if_condition_is_true: false, + jump_target_when_false: skip_label, + jump_target_when_true: BranchOffset::Placeholder, + }, &t_ctx.resolver, )?; - - let skip_label = program.allocate_label(); - program.emit_insn(Insn::IfNot { - reg: where_result_reg, - target_pc: skip_label, - jump_if_null: true, - }); Some(skip_label) } else { None @@ -1077,55 +1069,53 @@ fn emit_update_insns( // partial index, so we can know whether or not to delete the old index entry, // as well as whether or not the NEW values satisfy the predicate, to determine whether // or not to insert a new index entry for a partial index - let (old_satisfies_where, new_satisfies_where) = - if let Some(where_clause) = &index.where_clause { - let mut where_copy = where_clause.as_ref().clone(); - let mut param_state = ParamState::disallow(); - // This means that we need to bind the column references to a copy of the index Expr, - // so we can emit Insn::Column instructions and refer to the old values. - bind_and_rewrite_expr( - &mut where_copy, - Some(&mut plan.table_references), - None, - connection, - &mut param_state, - )?; - let old_satisfied_reg = program.alloc_register(); - translate_expr( - program, - Some(&plan.table_references), - &where_copy, - old_satisfied_reg, - &t_ctx.resolver, - )?; + let (old_satisfies_where, new_satisfies_where) = if index.where_clause.is_some() { + // This means that we need to bind the column references to a copy of the index Expr, + // so we can emit Insn::Column instructions and refer to the old values. + let where_clause = index + .bind_where_expr(Some(&mut plan.table_references), connection) + .expect("where clause to exist"); + let old_satisfied_reg = program.alloc_register(); + let old_satisfied_reg = translate_expr_no_constant_opt( + program, + Some(&plan.table_references), + &where_clause, + old_satisfied_reg, + &t_ctx.resolver, + NoConstantOptReason::RegisterReuse, + )?; - let mut new_where = where_clause.as_ref().clone(); - // Now we need to rewrite the Expr::Id and Expr::Qualified/Expr::RowID (from a copy of the original, un-bound `where` expr), - // to refer to the new values, which are already loaded into registers starting at - // `start`. - rewrite_where_for_update_registers( - &mut new_where, - unsafe { &*table_ref }.columns(), - start, - rowid_set_clause_reg.unwrap_or(beg), - )?; + // grab a new copy of the original where clause from the index + let mut new_where = index + .where_clause + .as_ref() + .expect("checked where clause to exist") + .clone(); + // Now we need to rewrite the Expr::Id and Expr::Qualified/Expr::RowID (from a copy of the original, un-bound `where` expr), + // to refer to the new values, which are already loaded into registers starting at `start`. + rewrite_where_for_update_registers( + &mut new_where, + unsafe { &*table_ref }.columns(), + start, + rowid_set_clause_reg.unwrap_or(beg), + )?; - let new_satisfied_reg = program.alloc_register(); - translate_expr( - program, - None, - &new_where, - new_satisfied_reg, - &t_ctx.resolver, - )?; + let new_satisfied_reg = program.alloc_register(); + translate_expr( + program, + None, + &new_where, + new_satisfied_reg, + &t_ctx.resolver, + )?; - // now we have two registers that tell us whether or not the old and new values satisfy - // the partial index predicate, and we can use those to decide whether or not to - // delete/insert a new index entry for this partial index. - (Some(old_satisfied_reg), Some(new_satisfied_reg)) - } else { - (None, None) - }; + // now we have two registers that tell us whether or not the old and new values satisfy + // the partial index predicate, and we can use those to decide whether or not to + // delete/insert a new index entry for this partial index. + (Some(old_satisfied_reg), Some(new_satisfied_reg)) + } else { + (None, None) + }; let mut skip_delete_label = None; let mut skip_insert_label = None; From 51fb801d872f76c88b3e76a98b4cc25d85c07b49 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 17:44:28 -0400 Subject: [PATCH 46/78] Fix partial index handling in insert to properly map rowid to insertion key --- core/translate/insert.rs | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index a8d6fd379..6339c2788 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -23,7 +23,6 @@ use crate::util::normalize_ident; use crate::vdbe::builder::ProgramBuilderOpts; use crate::vdbe::insn::{IdxInsertFlags, InsertFlags, RegisterOrLiteral}; use crate::vdbe::BranchOffset; -use crate::{bail_parse_error, Result, SymbolTable, VirtualTable}; use crate::{ schema::{Column, Schema}, vdbe::{ @@ -31,6 +30,7 @@ use crate::{ insn::Insn, }, }; +use crate::{Result, SymbolTable, VirtualTable}; use super::emitter::Resolver; use super::expr::{translate_expr, translate_expr_no_constant_opt, NoConstantOptReason}; @@ -1225,27 +1225,18 @@ pub fn rewrite_partial_index_where( expr, &mut |e: &mut ast::Expr| -> crate::Result { match e { - // Unqualified column reference, map to insertion register - Expr::Column { - column, - is_rowid_alias, - .. - } => { - if *is_rowid_alias { - *e = Expr::Register(insertion.key_register()); - } else if let Some(col_mapping) = insertion.col_mappings.get(*column) { - *e = Expr::Register(col_mapping.register); - } else { - bail_parse_error!("Column index {} not found in insertion", column); - } - } + // NOTE: should not have ANY Expr::Columns bound to the expr Expr::Id(ast::Name::Ident(name)) | Expr::Id(ast::Name::Quoted(name)) => { let normalized = normalize_ident(name.as_str()); if normalized.eq_ignore_ascii_case("rowid") { *e = Expr::Register(insertion.key_register()); } else if let Some(col_mapping) = insertion.get_col_mapping_by_name(&normalized) { - *e = Expr::Register(col_mapping.register); + if col_mapping.column.is_rowid_alias { + *e = Expr::Register(insertion.key_register()); + } else { + *e = Expr::Register(col_mapping.register); + } } } Expr::Qualified(_, col) | Expr::DoublyQualified(_, _, col) => { @@ -1254,7 +1245,11 @@ pub fn rewrite_partial_index_where( *e = Expr::Register(insertion.key_register()); } else if let Some(col_mapping) = insertion.get_col_mapping_by_name(&normalized) { - *e = Expr::Register(col_mapping.register); + if col_mapping.column.is_rowid_alias { + *e = Expr::Register(insertion.key_register()); + } else { + *e = Expr::Register(col_mapping.register); + } } } Expr::RowId { .. } => { From 62ee68e4dd50ec637a0fb7da5ff8e0ad75b42fb2 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 18:32:03 -0400 Subject: [PATCH 47/78] Fix INSERT/UPSERT to properly handle and/or reject partial indexes --- core/translate/insert.rs | 29 ++++-- core/translate/upsert.rs | 200 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 222 insertions(+), 7 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 6339c2788..9c942d83e 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -17,7 +17,7 @@ use crate::translate::expr::{ use crate::translate::plan::TableReferences; use crate::translate::planner::ROWID; use crate::translate::upsert::{ - collect_set_clauses_for_upsert, emit_upsert, upsert_matches_index, upsert_matches_pk, + collect_set_clauses_for_upsert, emit_upsert, resolve_upsert_target, ResolvedUpsertTarget, }; use crate::util::normalize_ident; use crate::vdbe::builder::ProgramBuilderOpts; @@ -168,6 +168,12 @@ pub fn translate_insert( } upsert_opt = upsert.as_deref().cloned(); } + // resolve the constrained target for UPSERT if specified + let resolved_upsert = if let Some(upsert) = &upsert_opt { + Some(resolve_upsert_target(schema, &table, upsert)?) + } else { + None + }; let halt_label = program.allocate_label(); let loop_start_label = program.allocate_label(); @@ -438,8 +444,13 @@ pub fn translate_insert( // Conflict on rowid: attempt to route through UPSERT if it targets the PK, otherwise raise constraint. // emit Halt for every case *except* when upsert handles the conflict 'emit_halt: { - if let Some(ref mut upsert) = upsert_opt.as_mut() { - if upsert_matches_pk(upsert, &table) { + if let (Some(ref mut upsert), Some(ref target)) = + (upsert_opt.as_mut(), resolved_upsert.as_ref()) + { + if matches!( + target, + ResolvedUpsertTarget::CatchAll | ResolvedUpsertTarget::PrimaryKey + ) { match upsert.do_clause { UpsertDo::Nothing => { program.emit_insn(Insn::Goto { @@ -451,7 +462,6 @@ pub fn translate_insert( ref mut where_clause, } => { let mut rewritten_sets = collect_set_clauses_for_upsert(&table, sets)?; - emit_upsert( &mut program, schema, @@ -590,11 +600,16 @@ pub fn translate_insert( accum }, ); - // again, emit halt for every case *except* when upsert handles the conflict 'emit_halt: { - if let Some(ref mut upsert) = upsert_opt.as_mut() { - if upsert_matches_index(upsert, index, &table) { + if let (Some(ref mut upsert), Some(ref target)) = + (upsert_opt.as_mut(), resolved_upsert.as_ref()) + { + if match target { + ResolvedUpsertTarget::CatchAll => true, + ResolvedUpsertTarget::Index(tgt) => Arc::ptr_eq(tgt, index), + ResolvedUpsertTarget::PrimaryKey => false, + } { match upsert.do_clause { UpsertDo::Nothing => { program.emit_insn(Insn::Goto { diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index b10a24b45..6cf520ad8 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -2,7 +2,9 @@ use std::{collections::HashMap, sync::Arc}; use turso_parser::ast::{self, Upsert}; +use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY; use crate::translate::expr::WalkControl; +use crate::vdbe::insn::CmpInsFlags; use crate::{ bail_parse_error, error::SQLITE_CONSTRAINT_NOTNULL, @@ -185,6 +187,42 @@ pub fn upsert_matches_index(upsert: &Upsert, index: &Index, table: &Table) -> bo need.is_empty() } +#[derive(Clone)] +pub enum ResolvedUpsertTarget { + // ON CONFLICT DO + CatchAll, + // ON CONFLICT(pk) DO + PrimaryKey, + // matched this non-partial UNIQUE index + Index(Arc), +} + +pub fn resolve_upsert_target( + schema: &Schema, + table: &Table, + upsert: &Upsert, +) -> crate::Result { + // Omitted target, catch-all + if upsert.index.is_none() { + return Ok(ResolvedUpsertTarget::CatchAll); + } + + // Targeted: must match PK or a non-partial UNIQUE index. + if upsert_matches_pk(upsert, table) { + return Ok(ResolvedUpsertTarget::PrimaryKey); + } + + for idx in schema.get_indices(table.get_name()) { + if idx.unique && idx.where_clause.is_none() && upsert_matches_index(upsert, idx, table) { + return Ok(ResolvedUpsertTarget::Index(Arc::clone(idx))); + } + } + // Match SQLite’s error text: + crate::bail_parse_error!( + "ON CONFLICT clause does not match any PRIMARY KEY or UNIQUE constraint" + ); +} + #[allow(clippy::too_many_arguments)] /// Emit the bytecode to implement the `DO UPDATE` arm of an UPSERT. /// @@ -336,6 +374,51 @@ pub fn emit_upsert( .expect("index exists"); let k = idx_meta.columns.len(); + let (before_pred_reg, new_pred_reg) = if let Some(where_clause) = &idx_meta.where_clause + { + // BEFORE image predicate + let mut before_where = where_clause.as_ref().clone(); + rewrite_partial_index_where_for_image( + &mut before_where, + table, + before_start.expect("before_start must exist for index maintenance"), + conflict_rowid_reg, + )?; + let before_reg = program.alloc_register(); + translate_expr_no_constant_opt( + program, + None, + &before_where, + before_reg, + resolver, + NoConstantOptReason::RegisterReuse, + )?; + + // NEW image predicate + let mut new_where = where_clause.as_ref().clone(); + rewrite_partial_index_where_for_image( + &mut new_where, + table, + new_start, + conflict_rowid_reg, + )?; + let new_reg = program.alloc_register(); + translate_expr(program, None, &new_where, new_reg, resolver)?; + + (Some(before_reg), Some(new_reg)) + } else { + (None, None) + }; + let maybe_skip_del = before_pred_reg.map(|r| { + let lbl = program.allocate_label(); + program.emit_insn(Insn::IfNot { + reg: r, + target_pc: lbl, + jump_if_null: true, + }); + lbl + }); + let del = program.alloc_registers(k + 1); for (i, ic) in idx_meta.columns.iter().enumerate() { let (ci, _) = table.get_column_by_name(&ic.name).unwrap(); @@ -357,6 +440,22 @@ pub fn emit_upsert( raise_error_if_no_matching_entry: false, }); + // resolve skipping the delete if it was false/NULL + if let Some(label) = maybe_skip_del { + program.resolve_label(label, program.offset()); + } + + // if NEW does not satisfy partial index, skip the insert + let maybe_skip_ins = new_pred_reg.map(|r| { + let lbl = program.allocate_label(); + program.emit_insn(Insn::IfNot { + reg: r, + target_pc: lbl, + jump_if_null: true, + }); + lbl + }); + let ins = program.alloc_registers(k + 1); for (i, ic) in idx_meta.columns.iter().enumerate() { let (ci, _) = table.get_column_by_name(&ic.name).unwrap(); @@ -380,6 +479,55 @@ pub fn emit_upsert( index_name: Some((*idx_name).clone()), affinity_str: None, }); + + // If unique, perform NoConflict + self-check before IdxInsert + if idx_meta.unique { + let ok_lbl = program.allocate_label(); + program.emit_insn(Insn::NoConflict { + cursor_id: *idx_cid, + target_pc: ok_lbl, + record_reg: ins, + num_regs: k, + }); + + // If there’s a hit, skip it if it’s self, otherwise raise constraint + let hit_rowid = program.alloc_register(); + program.emit_insn(Insn::IdxRowId { + cursor_id: *idx_cid, + dest: hit_rowid, + }); + program.emit_insn(Insn::Eq { + lhs: conflict_rowid_reg, + rhs: hit_rowid, + target_pc: ok_lbl, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + let mut description = String::with_capacity( + table.get_name().len() + + idx_meta + .columns + .iter() + .map(|c| c.name.len() + 2) + .sum::(), + ); + description.push_str(table.get_name()); + description.push_str(".("); + description.push_str( + &idx_meta + .columns + .iter() + .map(|c| c.name.as_str()) + .collect::>() + .join(", "), + ); + description.push(')'); + program.emit_insn(Insn::Halt { + err_code: SQLITE_CONSTRAINT_PRIMARYKEY, + description, + }); + program.preassign_label_to_next_insn(ok_lbl); + } program.emit_insn(Insn::IdxInsert { cursor_id: *idx_cid, record_reg: rec, @@ -387,6 +535,9 @@ pub fn emit_upsert( unpacked_count: Some((k + 1) as u16), flags: IdxInsertFlags::new().nchange(true), }); + if let Some(lbl) = maybe_skip_ins { + program.resolve_label(lbl, program.offset()); + } } } @@ -569,3 +720,52 @@ fn rewrite_upsert_expr_in_place( }, ) } + +/// Rewrite partial-index WHERE to read from a contiguous row image starting at `base_start`. +/// Maps rowid (and the rowid-alias column) to `rowid_reg`... Very similar to the above method +/// but simpler because there is no EXCLUDED or table name to consider. +fn rewrite_partial_index_where_for_image( + expr: &mut ast::Expr, + table: &Table, + base_start: usize, + rowid_reg: usize, +) -> crate::Result { + walk_expr_mut( + expr, + &mut |e: &mut ast::Expr| -> crate::Result { + match e { + ast::Expr::Id(n) => { + let nm = normalize_ident(n.as_str()); + if nm.eq_ignore_ascii_case("rowid") { + *e = ast::Expr::Register(rowid_reg); + } else if let Some((col_idx, _)) = table.get_column_by_name(&nm) { + let col = &table.columns()[col_idx]; + *e = ast::Expr::Register(if col.is_rowid_alias { + rowid_reg + } else { + base_start + col_idx + }); + } + } + ast::Expr::Qualified(_, cn) | ast::Expr::DoublyQualified(_, _, cn) => { + let nm = normalize_ident(cn.as_str()); + if nm.eq_ignore_ascii_case("rowid") { + *e = ast::Expr::Register(rowid_reg); + } else if let Some((col_idx, _)) = table.get_column_by_name(&nm) { + let col = &table.columns()[col_idx]; + *e = ast::Expr::Register(if col.is_rowid_alias { + rowid_reg + } else { + base_start + col_idx + }); + } + } + ast::Expr::RowId { .. } => { + *e = ast::Expr::Register(rowid_reg); + } + _ => {} + } + Ok(WalkControl::Continue) + }, + ) +} From 03149bc92d0d6312d8f54e060ea4f2ee3c277b66 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 18:32:25 -0400 Subject: [PATCH 48/78] Remove unused imports --- core/schema.rs | 2 -- core/translate/emitter.rs | 1 - 2 files changed, 3 deletions(-) diff --git a/core/schema.rs b/core/schema.rs index 7e7fc953e..546af4f00 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1,8 +1,6 @@ use crate::function::Func; use crate::incremental::view::IncrementalView; -use crate::translate::emitter::Resolver; use crate::translate::expr::{bind_and_rewrite_expr, walk_expr, ParamState, WalkControl}; -use crate::translate::optimizer::Optimizable; use parking_lot::RwLock; /// Simple view structure for non-materialized views diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 16106870f..aff649cf8 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -577,7 +577,6 @@ fn emit_delete_insns( let where_copy = index .bind_where_expr(Some(table_references), connection) .expect("where clause to exist"); - let where_result_reg = program.alloc_register(); let skip_label = program.allocate_label(); translate_condition_expr( program, From 33538a1ebfae8b07a72477aab910a570efbf3bfa Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 20 Sep 2025 18:32:50 -0400 Subject: [PATCH 49/78] Add some tests for UPSERT with partial indexes --- testing/partial_idx.test | 204 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 204 insertions(+) diff --git a/testing/partial_idx.test b/testing/partial_idx.test index 350717e69..1bfbc5b16 100755 --- a/testing/partial_idx.test +++ b/testing/partial_idx.test @@ -366,3 +366,207 @@ do_execsql_test_in_memory_error_content partial-index-update-only-predicate-col- INSERT INTO r2 VALUES (2,'A',1); UPDATE r2 SET x = 1 WHERE id = 1; } {UNIQUE constraint failed: r2.k (19)} + + +do_execsql_test_on_specific_db {:memory:} partial-index-multi-predicate-references { + CREATE TABLE r2 (id INTEGER PRIMARY KEY, k TEXT, x INT); + CREATE UNIQUE INDEX idx_k ON r2(k) WHERE x < 10 AND id > 10; + INSERT INTO r2 (k,x) VALUES ('A',1), ('A',2), ('A',3), ('A',4), ('A',5), ('A',6), ('A',7), ('A',8), ('A', 9), ('A', 10), ('A', 10); + -- now `id` will be greater than 10, so anything added with k='A' and x<10 should conflict + INSERT INTO r2 (k,x) VALUES ('A',11); + INSERT INTO r2 (k,x) VALUES ('A',12); + SELECT id FROM r2 ORDER BY id DESC LIMIT 1; +} {13} + +do_execsql_test_in_memory_error_content partial-index-multi-predicate-references-rowid-alais { + CREATE TABLE r2 (id INTEGER PRIMARY KEY, k TEXT, x INT); + CREATE UNIQUE INDEX idx_k ON r2(k) WHERE x < 10 AND id > 10; + INSERT INTO r2 (k,x) VALUES ('A',1), ('A',2), ('A',3), ('A',4), ('A',5), ('A',6), ('A',7), ('A',8), ('A', 9), ('A', 10), ('A', 10); + -- now `id` will be greater than 10, so anything added with k='A' and x<10 should conflict + INSERT INTO r2 (k,x) VALUES ('A',11); + INSERT INTO r2 (k,x) VALUES ('A',12); + INSERT INTO r2 (k,x) VALUES ('A', 3); + INSERT INTO r2 (k,x) VALUES ('A', 9); + -- should fail now +} {UNIQUE constraint failed: r2.k (19)} + + +do_execsql_test_in_memory_any_error upsert-partial-donothing-basic { + CREATE TABLE u1(id INTEGER PRIMARY KEY, email TEXT, status TEXT, note TEXT); + CREATE UNIQUE INDEX idx_active_email ON u1(email) WHERE status='active'; + INSERT INTO u1(email,status,note) + VALUES('a@test','active','n3') + ON CONFLICT(email) DO NOTHING; +} + +do_execsql_test_on_specific_db {:memory:} upsert-partial-doupdate-basic { + CREATE TABLE u2(id INTEGER PRIMARY KEY, email TEXT, status TEXT, note TEXT); + CREATE UNIQUE INDEX idx_active_email ON u2(email) WHERE status='active'; + + INSERT INTO u2 VALUES (1,'a@test','active','n1'); + + INSERT INTO u2(email,status,note) + VALUES('a@test','active','nNEW') + ON CONFLICT DO UPDATE SET note=excluded.note; + + SELECT id,email,status,note FROM u2; +} {1|a@test|active|nNEW} + +do_execsql_test_on_specific_db {:memory:} upsert-partial-doupdate-leave-predicate { + CREATE TABLE u3(id INTEGER PRIMARY KEY, email TEXT, status TEXT); + CREATE UNIQUE INDEX idx_active_email ON u3(email) WHERE status='active'; + + INSERT INTO u3 VALUES (1,'a@test','active'); + + INSERT INTO u3(email,status) + VALUES('a@test','active') + ON CONFLICT DO UPDATE SET status='inactive'; + + -- After update, the conflicting row no longer participates in idx predicate. + -- Insert should now succeed for active variant. + INSERT INTO u3 VALUES (2,'a@test','active'); + + SELECT id,email,status FROM u3 ORDER BY id; +} {1|a@test|inactive 2|a@test|active} + +do_execsql_test_on_specific_db {:memory:} upsert-partial-doupdate-where-skip { + CREATE TABLE u4(id INTEGER PRIMARY KEY, email TEXT, status TEXT, hits INT DEFAULT 0); + CREATE UNIQUE INDEX idx_active_email ON u4(email) WHERE status='active'; + + INSERT INTO u4 VALUES(1,'a@test','active',5); + + INSERT INTO u4(email,status) + VALUES('a@test','active') + ON CONFLICT DO UPDATE SET hits=hits+1 WHERE excluded.status='inactive'; + + -- filter false => no UPDATE; constraint remains => INSERT must be suppressed, + -- SQLite semantics: when WHERE is false, the UPSERT does nothing (no row added). + SELECT id,email,status,hits FROM u4 ORDER BY id; +} {1|a@test|active|5} + +do_execsql_test_on_specific_db {:memory:} upsert-partial-omitted-target-matches { + CREATE TABLE u6(id INTEGER PRIMARY KEY, email TEXT, status TEXT, n INT); + CREATE UNIQUE INDEX idx_active_email ON u6(email) WHERE status='active'; + INSERT INTO u6 VALUES (1,'a@test','active',0); + + INSERT INTO u6(email,status,n) + VALUES('a@test','active',10) + ON CONFLICT DO UPDATE SET n = excluded.n; + + SELECT id,email,status,n FROM u6; +} {1|a@test|active|10} + +do_execsql_test_on_specific_db {:memory:} upsert-partial-multicol-leave-predicate { + CREATE TABLE m2(id INTEGER PRIMARY KEY, sku TEXT, region TEXT, price INT); + CREATE UNIQUE INDEX idx_sr ON m2(sku,region) WHERE price > 100; + + INSERT INTO m2 VALUES(1,'A','US',150); + + INSERT INTO m2(sku,region,price) + VALUES('A','US',150) + ON CONFLICT DO UPDATE SET price=50; + + -- Now predicate false; insert another high-price duplicate should succeed + INSERT INTO m2 VALUES(2,'A','US',200); + + SELECT id,sku,region,price FROM m2 ORDER BY id; +} {1|A|US|50 2|A|US|200} + +do_execsql_test_on_specific_db {:memory:} upsert-partial-func-predicate { + CREATE TABLE d1(id INTEGER PRIMARY KEY, title TEXT, n INT DEFAULT 0); + CREATE UNIQUE INDEX idx_lower_title ON d1(title) WHERE LOWER(title)=title; + + INSERT INTO d1 VALUES(1,'lower',0); + + INSERT INTO d1(title) + VALUES('lower') + ON CONFLICT DO UPDATE SET n = n+1; + + SELECT id,title,n FROM d1; +} {1|lower|1} + +do_execsql_test_on_specific_db {:memory:} upsert-partial-rowid-predicate { + CREATE TABLE r1(id INTEGER PRIMARY KEY, k TEXT, x INT, hits INT DEFAULT 0); + CREATE UNIQUE INDEX idx_k ON r1(k) WHERE x < 10 AND id > 10; + + -- create ids 1..12, with ('A', >=10) rows to push rowid>10 + INSERT INTO r1(k,x) VALUES('A',10),('A',10),('A',10),('A',10),('A',10), + ('A',10),('A',10),('A',10),('A',10),('A',10),('A',11),('A',12); + + -- Now conflict for ('A', 5) is against partial index (id>10 & x<10) + INSERT INTO r1(k,x,hits) + VALUES('A',5,1) + ON CONFLICT DO UPDATE SET hits = hits + excluded.hits; + + SELECT k, SUM(hits) FROM r1 GROUP BY k; +} {A|1} + +# EXCLUDED usage inside DO UPDATE stays within predicate and changes key +do_execsql_test_on_specific_db {:memory:} upsert-partial-excluded-rewrite { + CREATE TABLE ex1(id INTEGER PRIMARY KEY, a TEXT, b INT, c TEXT); + CREATE UNIQUE INDEX idx_a ON ex1(a) WHERE b>0; + + INSERT INTO ex1 VALUES(1,'X',1,'old'); + + INSERT INTO ex1(a,b,c) + VALUES('X',1,'new') + ON CONFLICT DO UPDATE SET c = excluded.c, b = excluded.b; + + SELECT id,a,b,c FROM ex1; +} {1|X|1|new} + +do_execsql_test_on_specific_db {:memory:} upsert-partial-stay-change-to-unique { + CREATE TABLE s1(id INTEGER PRIMARY KEY, a TEXT, flag INT); + CREATE UNIQUE INDEX idx_a ON s1(a) WHERE flag=1; + + INSERT INTO s1 VALUES(1,'K',1); + + INSERT INTO s1(a,flag) + VALUES('K',1) + ON CONFLICT DO UPDATE SET a='K2'; + + SELECT id,a,flag FROM s1; +} {1|K2|1} + +do_execsql_test_on_specific_db {:memory:} upsert-partial-toggle-predicate { + CREATE TABLE tgl(id INTEGER PRIMARY KEY, k TEXT, x INT); + CREATE UNIQUE INDEX idx_k ON tgl(k) WHERE x>0; + + INSERT INTO tgl VALUES(1,'A',1); + + -- Conflicts on 'A', flips x to 0 (leaves predicate) + INSERT INTO tgl(k,x) + VALUES('A',1) + ON CONFLICT DO UPDATE SET x=0; + + -- Now another 'A' with x>0 should insert + INSERT INTO tgl VALUES(2,'A',5); + + SELECT id,k,x FROM tgl ORDER BY id; +} {1|A|0 2|A|5} + +do_execsql_test_in_memory_error_content upsert-partial-target-pk-only { + CREATE TABLE pko(id INTEGER PRIMARY KEY, k TEXT, x INT); + CREATE UNIQUE INDEX idx_k ON pko(k) WHERE x>0; + + INSERT INTO pko VALUES(1,'A',1); + + -- Target PK only; conflict is on idx_k, so DO UPDATE must NOT fire and error is raised + INSERT INTO pko(id,k,x) + VALUES(2,'A',1) + ON CONFLICT(id) DO UPDATE SET x=99; +} {UNIQUE constraint failed: pko.k (19)} + +do_execsql_test_on_specific_db {:memory:} upsert-partial-omitted-no-conflict { + CREATE TABLE insfree(id INTEGER PRIMARY KEY, k TEXT, x INT); + CREATE UNIQUE INDEX idx_k ON insfree(k) WHERE x>0; + + INSERT INTO insfree VALUES(1,'A',1); + + -- x=0 => not in predicate, so no conflict; row must be inserted + INSERT INTO insfree(k,x) + VALUES('A',0) + ON CONFLICT DO NOTHING; + + SELECT COUNT(*) FROM insfree WHERE k='A'; +} {2} From 13260349b08c2b0f625d0c9db414de3bc4a734be Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Sat, 20 Sep 2025 20:34:24 -0300 Subject: [PATCH 50/78] Return a parse error for a non-equality join We currently don't handle non equality, but end up just returning a bogus result. Let's parse error. --- core/incremental/compiler.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index 8c8189261..7c215f2f1 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -1204,6 +1204,20 @@ impl DbspCompiler { .map(|col| col.name.clone()) .collect(); + // Check if there are any non-equijoin conditions in the filter + if join.filter.is_some() { + return Err(LimboError::ParseError( + "Non-equijoin conditions are not supported in materialized views. Only equality joins (=) are allowed.".to_string() + )); + } + + // Check if we have at least one equijoin condition + if join.on.is_empty() { + return Err(LimboError::ParseError( + "Joins in materialized views must have at least one equality condition.".to_string() + )); + } + // Extract join key indices from join conditions // For now, we only support equijoin conditions let mut left_key_indices = Vec::new(); From e5dfc942b12f1c943f97061af3c274797fd287f5 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 21 Sep 2025 13:05:46 -0300 Subject: [PATCH 51/78] remove some unnecessary unsafe impls --- core/fast_lock.rs | 1 - core/io/memory.rs | 3 +-- core/io/unix.rs | 5 ----- core/storage/database.rs | 5 ----- 4 files changed, 1 insertion(+), 13 deletions(-) diff --git a/core/fast_lock.rs b/core/fast_lock.rs index 8abda6a17..a02d617ba 100644 --- a/core/fast_lock.rs +++ b/core/fast_lock.rs @@ -34,7 +34,6 @@ impl DerefMut for SpinLockGuard<'_, T> { } } -unsafe impl Send for SpinLock {} unsafe impl Sync for SpinLock {} impl SpinLock { diff --git a/core/io/memory.rs b/core/io/memory.rs index c69d87dcf..fc0549ca7 100644 --- a/core/io/memory.rs +++ b/core/io/memory.rs @@ -12,7 +12,6 @@ use tracing::debug; pub struct MemoryIO { files: Arc>>>, } -unsafe impl Send for MemoryIO {} // TODO: page size flag const PAGE_SIZE: usize = 4096; @@ -76,7 +75,7 @@ pub struct MemoryFile { pages: UnsafeCell>, size: Cell, } -unsafe impl Send for MemoryFile {} + unsafe impl Sync for MemoryFile {} impl File for MemoryFile { diff --git a/core/io/unix.rs b/core/io/unix.rs index a3cfd6f2f..b0d47f30f 100644 --- a/core/io/unix.rs +++ b/core/io/unix.rs @@ -17,9 +17,6 @@ use tracing::{instrument, trace, Level}; pub struct UnixIO {} -unsafe impl Send for UnixIO {} -unsafe impl Sync for UnixIO {} - impl UnixIO { #[cfg(feature = "fs")] pub fn new() -> Result { @@ -128,8 +125,6 @@ impl IO for UnixIO { pub struct UnixFile { file: Arc>, } -unsafe impl Send for UnixFile {} -unsafe impl Sync for UnixFile {} impl File for UnixFile { fn lock_file(&self, exclusive: bool) -> Result<()> { diff --git a/core/storage/database.rs b/core/storage/database.rs index e7aceebbf..3cbc42b9f 100644 --- a/core/storage/database.rs +++ b/core/storage/database.rs @@ -88,11 +88,6 @@ pub struct DatabaseFile { file: Arc, } -#[cfg(feature = "fs")] -unsafe impl Send for DatabaseFile {} -#[cfg(feature = "fs")] -unsafe impl Sync for DatabaseFile {} - #[cfg(feature = "fs")] impl DatabaseStorage for DatabaseFile { #[instrument(skip_all, level = Level::DEBUG)] From 6e2b0c901eeec4046d7a56393a49933c4e34a279 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 21 Sep 2025 14:02:23 -0300 Subject: [PATCH 52/78] remove PanicGenerationContext and instead just pass the connection context directly --- simulator/generation/mod.rs | 16 ---------------- simulator/generation/plan.rs | 21 ++++++++++++--------- simulator/generation/property.rs | 7 +++---- 3 files changed, 15 insertions(+), 29 deletions(-) diff --git a/simulator/generation/mod.rs b/simulator/generation/mod.rs index d2ccf4a00..80a2d0cff 100644 --- a/simulator/generation/mod.rs +++ b/simulator/generation/mod.rs @@ -1,5 +1,3 @@ -use sql_generation::generation::GenerationContext; - use crate::runner::env::ShadowTablesMut; pub mod plan; @@ -19,17 +17,3 @@ pub(crate) trait Shadow { type Result; fn shadow(&self, tables: &mut ShadowTablesMut<'_>) -> Self::Result; } - -/// Generation context that will always panic when called -/// This is meant to be used when want to ensure that no downstream arbitrary fn will use this context -pub struct PanicGenerationContext; - -impl GenerationContext for PanicGenerationContext { - fn tables(&self) -> &Vec { - unimplemented!("you are not supposed to use this context") - } - - fn opts(&self) -> &sql_generation::generation::Opts { - unimplemented!("you are not supposed to use this context") - } -} diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 8b8f97aa8..64a6574b8 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -25,7 +25,7 @@ use turso_core::{Connection, Result, StepResult}; use crate::{ SimulatorEnv, - generation::{PanicGenerationContext, Shadow}, + generation::Shadow, model::Query, runner::env::{ShadowTablesMut, SimConnection, SimulationType}, }; @@ -226,8 +226,12 @@ impl InteractionPlan { while plan.len() < num_interactions { tracing::debug!("Generating interaction {}/{}", plan.len(), num_interactions); - let interactions = - Interactions::arbitrary_from(rng, &PanicGenerationContext, (env, plan.stats())); + let interactions = { + let conn_index = env.choose_conn(rng); + let conn_ctx = &env.connection_context(conn_index); + Interactions::arbitrary_from(rng, conn_ctx, (env, plan.stats(), conn_index)) + }; + interactions.shadow(&mut env.get_conn_tables_mut(interactions.connection_index)); plan.push(interactions); } @@ -1000,11 +1004,11 @@ fn random_fault(rng: &mut R, env: &SimulatorEnv) -> Interactions { Interactions::new(env.choose_conn(rng), InteractionsType::Fault(fault)) } -impl ArbitraryFrom<(&SimulatorEnv, InteractionStats)> for Interactions { +impl ArbitraryFrom<(&SimulatorEnv, InteractionStats, usize)> for Interactions { fn arbitrary_from( rng: &mut R, - _context: &C, - (env, stats): (&SimulatorEnv, InteractionStats), + conn_ctx: &C, + (env, stats, conn_index): (&SimulatorEnv, InteractionStats, usize), ) -> Self { let remaining_ = remaining( env.opts.max_interactions, @@ -1012,7 +1016,6 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats)> for Interactions { &stats, env.profile.experimental_mvcc, ); - let conn_index = env.choose_conn(rng); frequency( vec![ ( @@ -1022,8 +1025,8 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats)> for Interactions { conn_index, InteractionsType::Property(Property::arbitrary_from( rng, - &PanicGenerationContext, - (env, &stats, conn_index), + conn_ctx, + (env, &stats), )), ) }), diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index bb4678872..847c30593 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1514,13 +1514,12 @@ fn property_faulty_query( } } -impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats, usize)> for Property { +impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { fn arbitrary_from( rng: &mut R, - _context: &C, - (env, stats, conn_index): (&SimulatorEnv, &InteractionStats, usize), + conn_ctx: &C, + (env, stats): (&SimulatorEnv, &InteractionStats), ) -> Self { - let conn_ctx = &env.connection_context(conn_index); let opts = conn_ctx.opts(); let remaining_ = remaining( env.opts.max_interactions, From 63177c42e4da3aa80cc148de2f4281f0744d1901 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sun, 21 Sep 2025 13:22:31 -0400 Subject: [PATCH 53/78] Add SQLITE_CONSTRAINT_UNIQUE error constant --- core/error.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/core/error.rs b/core/error.rs index 759c524cb..5eae56b6b 100644 --- a/core/error.rs +++ b/core/error.rs @@ -164,3 +164,4 @@ impl From for LimboError { pub const SQLITE_CONSTRAINT: usize = 19; pub const SQLITE_CONSTRAINT_PRIMARYKEY: usize = SQLITE_CONSTRAINT | (6 << 8); pub const SQLITE_CONSTRAINT_NOTNULL: usize = SQLITE_CONSTRAINT | (5 << 8); +pub const SQLITE_CONSTRAINT_UNIQUE: usize = 2067; From e545e75e31029519f521dc5dda2ce216d8edac3e Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sun, 21 Sep 2025 13:24:48 -0400 Subject: [PATCH 54/78] Emit Affinity instruction for unique index, and use no_constant_opt --- core/translate/emitter.rs | 57 ++++++++++++++++++++++++++++----------- 1 file changed, 42 insertions(+), 15 deletions(-) diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index aff649cf8..721df5ff7 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -1,6 +1,7 @@ // This module contains code for emitting bytecode instructions for SQL query execution. // It handles translating high-level SQL operations into low-level bytecode that can be executed by the virtual machine. +use std::num::NonZeroUsize; use std::sync::Arc; use tracing::{instrument, Level}; @@ -25,8 +26,8 @@ use crate::function::Func; use crate::schema::{BTreeTable, Column, Schema, Table}; use crate::translate::compound_select::emit_program_for_compound_select; use crate::translate::expr::{ - emit_returning_results, translate_condition_expr, translate_expr_no_constant_opt, - walk_expr_mut, ConditionMetadata, NoConstantOptReason, ReturningValueRegisters, WalkControl, + emit_returning_results, translate_expr_no_constant_opt, walk_expr_mut, NoConstantOptReason, + ReturningValueRegisters, WalkControl, }; use crate::translate::plan::{DeletePlan, JoinedTable, Plan, QueryDestination, Search}; use crate::translate::result_row::try_fold_expr_to_i64; @@ -578,17 +579,20 @@ fn emit_delete_insns( .bind_where_expr(Some(table_references), connection) .expect("where clause to exist"); let skip_label = program.allocate_label(); - translate_condition_expr( + let reg = program.alloc_register(); + translate_expr_no_constant_opt( program, - table_references, + Some(table_references), &where_copy, - ConditionMetadata { - jump_if_condition_is_true: false, - jump_target_when_false: skip_label, - jump_target_when_true: BranchOffset::Placeholder, - }, + reg, &t_ctx.resolver, + NoConstantOptReason::RegisterReuse, )?; + program.emit_insn(Insn::IfNot { + reg, + jump_if_null: true, + target_pc: skip_label, + }); Some(skip_label) } else { None @@ -1075,7 +1079,7 @@ fn emit_update_insns( .bind_where_expr(Some(&mut plan.table_references), connection) .expect("where clause to exist"); let old_satisfied_reg = program.alloc_register(); - let old_satisfied_reg = translate_expr_no_constant_opt( + translate_expr_no_constant_opt( program, Some(&plan.table_references), &where_clause, @@ -1100,12 +1104,13 @@ fn emit_update_insns( )?; let new_satisfied_reg = program.alloc_register(); - translate_expr( + translate_expr_no_constant_opt( program, None, &new_where, new_satisfied_reg, &t_ctx.resolver, + NoConstantOptReason::RegisterReuse, )?; // now we have two registers that tell us whether or not the old and new values satisfy @@ -1204,6 +1209,20 @@ fn emit_update_insns( // Handle unique constraint if index.unique { + let aff = index + .columns + .iter() + .map(|ic| { + unsafe { &*table_ref }.columns()[ic.pos_in_table] + .affinity() + .aff_mask() + }) + .collect::(); + program.emit_insn(Insn::Affinity { + start_reg: idx_start_reg, + count: NonZeroUsize::new(num_cols).expect("nonzero col count"), + affinities: aff, + }); let constraint_check = program.allocate_label(); // check if the record already exists in the index for unique indexes and abort if so program.emit_insn(Insn::NoConflict { @@ -1782,24 +1801,32 @@ fn rewrite_where_for_update_registers( match e { Expr::Qualified(_, col) | Expr::DoublyQualified(_, _, col) => { let normalized = normalize_ident(col.as_str()); - if let Some((idx, _)) = columns.iter().enumerate().find(|(_, c)| { + if let Some((idx, c)) = columns.iter().enumerate().find(|(_, c)| { c.name .as_ref() .is_some_and(|n| n.eq_ignore_ascii_case(&normalized)) }) { - *e = Expr::Register(columns_start_reg + idx); + if c.is_rowid_alias { + *e = Expr::Register(rowid_reg); + } else { + *e = Expr::Register(columns_start_reg + idx); + } } } Expr::Id(ast::Name::Ident(name)) | Expr::Id(ast::Name::Quoted(name)) => { let normalized = normalize_ident(name.as_str()); if normalized.eq_ignore_ascii_case("rowid") { *e = Expr::Register(rowid_reg); - } else if let Some((idx, _)) = columns.iter().enumerate().find(|(_, c)| { + } else if let Some((idx, c)) = columns.iter().enumerate().find(|(_, c)| { c.name .as_ref() .is_some_and(|n| n.eq_ignore_ascii_case(&normalized)) }) { - *e = Expr::Register(columns_start_reg + idx); + if c.is_rowid_alias { + *e = Expr::Register(rowid_reg); + } else { + *e = Expr::Register(columns_start_reg + idx); + } } } Expr::RowId { .. } => { From 0ea6e5714dec061b3077b159bb8ca927a32d9d1d Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sun, 21 Sep 2025 13:27:50 -0400 Subject: [PATCH 55/78] Separate UPSERT behavior into preflight and commit state to prevent inserting idx before violating unique constraint --- core/translate/insert.rs | 482 ++++++++++++++++++++++++++------------- 1 file changed, 324 insertions(+), 158 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 9c942d83e..509eda6ce 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -1,18 +1,20 @@ +use std::num::NonZeroUsize; use std::sync::Arc; use turso_parser::ast::{ self, Expr, InsertBody, OneSelect, QualifiedName, ResolveType, ResultColumn, Upsert, UpsertDo, With, }; -use crate::error::{SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PRIMARYKEY}; -use crate::schema::{self, Table}; +use crate::error::{ + SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PRIMARYKEY, SQLITE_CONSTRAINT_UNIQUE, +}; +use crate::schema::{self, Index, Table}; use crate::translate::emitter::{ emit_cdc_insns, emit_cdc_patch_record, prepare_cdc_if_necessary, OperationMode, }; use crate::translate::expr::{ - bind_and_rewrite_expr, emit_returning_results, process_returning_clause, - translate_condition_expr, walk_expr_mut, ConditionMetadata, ParamState, - ReturningValueRegisters, WalkControl, + bind_and_rewrite_expr, emit_returning_results, process_returning_clause, walk_expr_mut, + ParamState, ReturningValueRegisters, WalkControl, }; use crate::translate::plan::TableReferences; use crate::translate::planner::ROWID; @@ -351,6 +353,7 @@ pub fn translate_insert( program.alloc_cursor_id(CursorType::BTreeTable(btree_table.clone())), ), }; + let has_upsert = upsert_opt.is_some(); // Set up the program to return result columns if RETURNING is specified if !result_columns.is_empty() { @@ -373,6 +376,9 @@ pub fn translate_insert( let insertion = build_insertion(&mut program, &table, &columns, num_values)?; + let upsert_entry = program.allocate_label(); + let conflict_rowid_reg = program.alloc_register(); + if inserting_multiple_rows { translate_rows_multiple( &mut program, @@ -444,41 +450,20 @@ pub fn translate_insert( // Conflict on rowid: attempt to route through UPSERT if it targets the PK, otherwise raise constraint. // emit Halt for every case *except* when upsert handles the conflict 'emit_halt: { - if let (Some(ref mut upsert), Some(ref target)) = - (upsert_opt.as_mut(), resolved_upsert.as_ref()) - { + if let (Some(_), Some(ref target)) = (upsert_opt.as_mut(), resolved_upsert.as_ref()) { if matches!( target, ResolvedUpsertTarget::CatchAll | ResolvedUpsertTarget::PrimaryKey ) { - match upsert.do_clause { - UpsertDo::Nothing => { - program.emit_insn(Insn::Goto { - target_pc: row_done_label, - }); - } - UpsertDo::Set { - ref mut sets, - ref mut where_clause, - } => { - let mut rewritten_sets = collect_set_clauses_for_upsert(&table, sets)?; - emit_upsert( - &mut program, - schema, - &table, - &insertion, - cursor_id, - insertion.key_register(), - &mut rewritten_sets, - where_clause, - &resolver, - &idx_cursors, - &mut result_columns, - cdc_table.as_ref().map(|c| c.0), - row_done_label, - )?; - } - } + // PK conflict: the conflicting rowid is exactly the attempted key + program.emit_insn(Insn::Copy { + src_reg: insertion.key_register(), + dst_reg: conflict_rowid_reg, + extra_amount: 0, + }); + program.emit_insn(Insn::Goto { + target_pc: upsert_entry, + }); break 'emit_halt; } } @@ -507,6 +492,18 @@ pub fn translate_insert( _ => (), } + // We need to separate index handling and insertion into a `preflight` and a + // `commit` phase, because in UPSERT mode we might need to skip the actual insertion, as we can + // have a naked ON CONFLICT DO NOTHING, so if we eagerly insert any indexes, we could insert + // invalid index entries before we hit a conflict down the line. + // + // Preflight phase: evaluate each applicable UNIQUE constraint and probe with NoConflict. + // If any probe hits: + // DO NOTHING -> jump to row_done_label. + // + // DO UPDATE (matching target) -> fetch conflicting rowid and jump to `upsert_entry`. + // + // otherwise, raise SQLITE_CONSTRAINT_UNIQUE for index in schema.get_indices(table_name.as_str()) { let column_mappings = index .columns @@ -519,29 +516,25 @@ pub fn translate_insert( .map(|(_, _, c_id)| *c_id) .expect("no cursor found for index"); - let skip_index_label = if let Some(where_clause) = &index.where_clause { - // Clone and rewrite WHERE to use insertion registers + let maybe_skip_probe_label = if let Some(where_clause) = &index.where_clause { let mut where_for_eval = where_clause.as_ref().clone(); rewrite_partial_index_where(&mut where_for_eval, &insertion)?; - - // Evaluate rewritten WHERE clause - let skip_label = program.allocate_label(); - // We can use an empty TableReferences here because we shouldn't have any - // Expr::Column's in the partial index WHERE clause after rewriting it to use - // regsisters - let table_references = TableReferences::new_empty(); - translate_condition_expr( + let reg = program.alloc_register(); + translate_expr_no_constant_opt( &mut program, - &table_references, + Some(&TableReferences::new_empty()), &where_for_eval, - ConditionMetadata { - jump_if_condition_is_true: false, - jump_target_when_false: skip_label, - jump_target_when_true: BranchOffset::Placeholder, - }, + reg, &resolver, + NoConstantOptReason::RegisterReuse, )?; - Some(skip_label) + let lbl = program.allocate_label(); + program.emit_insn(Insn::IfNot { + reg, + target_pc: lbl, + jump_if_null: true, + }); + Some(lbl) } else { None }; @@ -550,6 +543,7 @@ pub fn translate_insert( // allocate scratch registers for the index columns plus rowid let idx_start_reg = program.alloc_registers(num_cols + 1); + // build unpacked key [idx_start_reg .. idx_start_reg+num_cols-1], and rowid in last reg, // copy each index column from the table's column registers into these scratch regs for (i, column_mapping) in column_mappings.clone().enumerate() { // copy from the table's column register over to the index's scratch register @@ -571,104 +565,131 @@ pub fn translate_insert( extra_amount: 0, }); - let record_reg = program.alloc_register(); - program.emit_insn(Insn::MakeRecord { - start_reg: idx_start_reg, - count: num_cols + 1, - dest_reg: record_reg, - index_name: Some(index.name.clone()), - affinity_str: None, - }); - if index.unique { - let label_idx_insert = program.allocate_label(); - program.emit_insn(Insn::NoConflict { - cursor_id: idx_cursor_id, - target_pc: label_idx_insert, - record_reg: idx_start_reg, - num_regs: num_cols, + let aff = index + .columns + .iter() + .map(|ic| table.columns()[ic.pos_in_table].affinity().aff_mask()) + .collect::(); + program.emit_insn(Insn::Affinity { + start_reg: idx_start_reg, + count: NonZeroUsize::new(num_cols).expect("nonzero col count"), + affinities: aff, }); - let column_names = index.columns.iter().enumerate().fold( - String::with_capacity(50), - |mut accum, (idx, column)| { - if idx > 0 { - accum.push_str(", "); - } - accum.push_str(table_name.as_str()); - accum.push('.'); - accum.push_str(&column.name); - accum - }, - ); - // again, emit halt for every case *except* when upsert handles the conflict - 'emit_halt: { - if let (Some(ref mut upsert), Some(ref target)) = - (upsert_opt.as_mut(), resolved_upsert.as_ref()) + + if has_upsert { + let next_check = program.allocate_label(); + program.emit_insn(Insn::NoConflict { + cursor_id: idx_cursor_id, + target_pc: next_check, + record_reg: idx_start_reg, + num_regs: num_cols, + }); + + // Conflict detected, figure out if this UPSERT handles the conflict + let upsert_matches_this_index = if let (Some(_u), Some(ref target)) = + (upsert_opt.as_ref(), resolved_upsert.as_ref()) { - if match target { + match target { ResolvedUpsertTarget::CatchAll => true, ResolvedUpsertTarget::Index(tgt) => Arc::ptr_eq(tgt, index), + // note: PK handled earlier by rowid path; this is a secondary index ResolvedUpsertTarget::PrimaryKey => false, - } { - match upsert.do_clause { - UpsertDo::Nothing => { - program.emit_insn(Insn::Goto { - target_pc: row_done_label, - }); - } - UpsertDo::Set { - ref mut sets, - ref mut where_clause, - } => { - let mut rewritten_sets = - collect_set_clauses_for_upsert(&table, sets)?; - let conflict_rowid_reg = program.alloc_register(); - program.emit_insn(Insn::IdxRowId { - cursor_id: idx_cursor_id, - dest: conflict_rowid_reg, - }); - emit_upsert( - &mut program, - schema, - &table, - &insertion, - cursor_id, - conflict_rowid_reg, - &mut rewritten_sets, - where_clause, - &resolver, - &idx_cursors, - &mut result_columns, - cdc_table.as_ref().map(|c| c.0), - row_done_label, - )?; - } - } - break 'emit_halt; } + } else { + false + }; + + if upsert_matches_this_index { + // Distinguish DO NOTHING vs DO UPDATE + match upsert_opt.as_ref().unwrap().do_clause { + UpsertDo::Nothing => { + // Bail out without writing anything + program.emit_insn(Insn::Goto { + target_pc: row_done_label, + }); + } + UpsertDo::Set { .. } => { + // Route to DO UPDATE: capture conflicting rowid then jump + program.emit_insn(Insn::IdxRowId { + cursor_id: idx_cursor_id, + dest: conflict_rowid_reg, + }); + program.emit_insn(Insn::Goto { + target_pc: upsert_entry, + }); + } + } + } else { + // No matching UPSERT handler so we emit constraint error + program.emit_insn(Insn::Halt { + err_code: SQLITE_CONSTRAINT_UNIQUE, + description: format_unique_violation_desc(table_name.as_str(), index), + }); } - // No matching UPSERT rule: unique constraint violation. + + // continue preflight with next constraint + program.preassign_label_to_next_insn(next_check); + } else { + // No UPSERT fast-path: probe and immediately insert + let ok = program.allocate_label(); + program.emit_insn(Insn::NoConflict { + cursor_id: idx_cursor_id, + target_pc: ok, + record_reg: idx_start_reg, + num_regs: num_cols, + }); + // Unique violation without ON CONFLICT clause -> error program.emit_insn(Insn::Halt { - err_code: SQLITE_CONSTRAINT_PRIMARYKEY, - description: column_names, + err_code: SQLITE_CONSTRAINT_UNIQUE, + description: format_unique_violation_desc(table_name.as_str(), index), + }); + program.preassign_label_to_next_insn(ok); + + // In the non-UPSERT case, we insert the index + let record_reg = program.alloc_register(); + program.emit_insn(Insn::MakeRecord { + start_reg: idx_start_reg, + count: num_cols + 1, + dest_reg: record_reg, + index_name: Some(index.name.clone()), + affinity_str: None, + }); + program.emit_insn(Insn::IdxInsert { + cursor_id: idx_cursor_id, + record_reg, + unpacked_start: Some(idx_start_reg), + unpacked_count: Some((num_cols + 1) as u16), + flags: IdxInsertFlags::new().nchange(true), + }); + } + } else { + // Non-unique index: in UPSERT mode we postpone writes to commit phase. + if !has_upsert { + // eager insert for non-unique, no UPSERT + let record_reg = program.alloc_register(); + program.emit_insn(Insn::MakeRecord { + start_reg: idx_start_reg, + count: num_cols + 1, + dest_reg: record_reg, + index_name: Some(index.name.clone()), + affinity_str: None, + }); + program.emit_insn(Insn::IdxInsert { + cursor_id: idx_cursor_id, + record_reg, + unpacked_start: Some(idx_start_reg), + unpacked_count: Some((num_cols + 1) as u16), + flags: IdxInsertFlags::new().nchange(true), }); } - program.resolve_label(label_idx_insert, program.offset()); } - // now do the actual index insertion using the unpacked registers - program.emit_insn(Insn::IdxInsert { - cursor_id: idx_cursor_id, - record_reg, - unpacked_start: Some(idx_start_reg), // TODO: enable optimization - unpacked_count: Some((num_cols + 1) as u16), - // TODO: figure out how to determine whether or not we need to seek prior to insert. - flags: IdxInsertFlags::new().nchange(true), - }); - if let Some(skip_label) = skip_index_label { - program.resolve_label(skip_label, program.offset()); + + // Close the partial-index skip (preflight) + if let Some(lbl) = maybe_skip_probe_label { + program.resolve_label(lbl, program.offset()); } } - for column_mapping in insertion .col_mappings .iter() @@ -705,6 +726,7 @@ pub fn translate_insert( }, }); } + // Create and insert the record let affinity_str = insertion .col_mappings @@ -719,6 +741,87 @@ pub fn translate_insert( index_name: None, affinity_str: Some(affinity_str), }); + + if has_upsert { + // COMMIT PHASE: no preflight jumps happened; emit the actual index writes now + // We re-check partial-index predicates against the NEW image, produce packed records, + // and insert into all applicable indexes, we do not re-probe uniqueness here, as preflight + // already guaranteed non-conflict. + for index in schema.get_indices(table_name.as_str()) { + let idx_cursor_id = idx_cursors + .iter() + .find(|(name, _, _)| *name == &index.name) + .map(|(_, _, c_id)| *c_id) + .expect("no cursor found for index"); + + // Re-evaluate partial predicate on the would-be inserted image + let commit_skip_label = if let Some(where_clause) = &index.where_clause { + let mut where_for_eval = where_clause.as_ref().clone(); + rewrite_partial_index_where(&mut where_for_eval, &insertion)?; + let reg = program.alloc_register(); + translate_expr_no_constant_opt( + &mut program, + Some(&TableReferences::new_empty()), + &where_for_eval, + reg, + &resolver, + NoConstantOptReason::RegisterReuse, + )?; + let lbl = program.allocate_label(); + program.emit_insn(Insn::IfNot { + reg, + target_pc: lbl, + jump_if_null: true, + }); + Some(lbl) + } else { + None + }; + + let num_cols = index.columns.len(); + let idx_start_reg = program.alloc_registers(num_cols + 1); + + // Build [key cols..., rowid] from insertion registers + for (i, idx_col) in index.columns.iter().enumerate() { + let Some(cm) = insertion.get_col_mapping_by_name(&idx_col.name) else { + return Err(crate::LimboError::PlanningError( + "Column not found in INSERT (commit phase)".to_string(), + )); + }; + program.emit_insn(Insn::Copy { + src_reg: cm.register, + dst_reg: idx_start_reg + i, + extra_amount: 0, + }); + } + program.emit_insn(Insn::Copy { + src_reg: insertion.key_register(), + dst_reg: idx_start_reg + num_cols, + extra_amount: 0, + }); + + let record_reg = program.alloc_register(); + program.emit_insn(Insn::MakeRecord { + start_reg: idx_start_reg, + count: num_cols + 1, + dest_reg: record_reg, + index_name: Some(index.name.clone()), + affinity_str: None, + }); + program.emit_insn(Insn::IdxInsert { + cursor_id: idx_cursor_id, + record_reg, + unpacked_start: Some(idx_start_reg), + unpacked_count: Some((num_cols + 1) as u16), + flags: IdxInsertFlags::new().nchange(true), + }); + + if let Some(lbl) = commit_skip_label { + program.resolve_label(lbl, program.offset()); + } + } + } + program.emit_insn(Insn::Insert { cursor: cursor_id, key_reg: insertion.key_register(), @@ -764,6 +867,45 @@ pub fn translate_insert( emit_returning_results(&mut program, &result_columns, &value_registers)?; } + program.emit_insn(Insn::Goto { + target_pc: row_done_label, + }); + + // Normal INSERT path is done above + // Any conflict routed to UPSERT jumps past all that to here: + program.preassign_label_to_next_insn(upsert_entry); + if let (Some(mut upsert), Some(_)) = (upsert_opt.take(), resolved_upsert.clone()) { + // Only DO UPDATE (SET ...); DO NOTHING should have already jumped to row_done_label earlier. + if let UpsertDo::Set { + ref mut sets, + ref mut where_clause, + } = upsert.do_clause + { + // Normalize SET pairs once + let mut rewritten_sets = collect_set_clauses_for_upsert(&table, sets)?; + + emit_upsert( + &mut program, + schema, + &table, + &insertion, + cursor_id, + conflict_rowid_reg, + &mut rewritten_sets, + where_clause, + &resolver, + &idx_cursors, + &mut result_columns, + cdc_table.as_ref().map(|c| c.0), + row_done_label, + )?; + } else { + // UpsertDo::Nothing case + program.emit_insn(Insn::Goto { + target_pc: row_done_label, + }); + } + } if inserting_multiple_rows { if let Some(temp_table_ctx) = temp_table_ctx { @@ -1231,11 +1373,52 @@ fn translate_virtual_table_insert( Ok(program) } +#[inline] +/// Build the UNIQUE constraint error description to match sqlite +/// single column: `t.c1` +/// multi-column: `t.(k, c1)` +pub fn format_unique_violation_desc(table_name: &str, index: &Index) -> String { + if index.columns.len() == 1 { + let mut s = String::with_capacity(table_name.len() + 1 + index.columns[0].name.len()); + s.push_str(table_name); + s.push('.'); + s.push_str(&index.columns[0].name); + s + } else { + let mut s = String::with_capacity(table_name.len() + 3 + 4 * index.columns.len()); + s.push_str(table_name); + s.push_str(".("); + s.push_str( + &index + .columns + .iter() + .map(|c| c.name.as_str()) + .collect::>() + .join(", "), + ); + s.push(')'); + s + } +} + /// Rewrite WHERE clause for partial index to reference insertion registers pub fn rewrite_partial_index_where( expr: &mut ast::Expr, insertion: &Insertion, ) -> crate::Result { + let col_reg = |name: &str| -> Option { + if name.eq_ignore_ascii_case("rowid") { + Some(insertion.key_register()) + } else if let Some(c) = insertion.get_col_mapping_by_name(name) { + if c.column.is_rowid_alias { + Some(insertion.key_register()) + } else { + Some(c.register) + } + } else { + None + } + }; walk_expr_mut( expr, &mut |e: &mut ast::Expr| -> crate::Result { @@ -1243,33 +1426,16 @@ pub fn rewrite_partial_index_where( // NOTE: should not have ANY Expr::Columns bound to the expr Expr::Id(ast::Name::Ident(name)) | Expr::Id(ast::Name::Quoted(name)) => { let normalized = normalize_ident(name.as_str()); - if normalized.eq_ignore_ascii_case("rowid") { - *e = Expr::Register(insertion.key_register()); - } else if let Some(col_mapping) = insertion.get_col_mapping_by_name(&normalized) - { - if col_mapping.column.is_rowid_alias { - *e = Expr::Register(insertion.key_register()); - } else { - *e = Expr::Register(col_mapping.register); - } + if let Some(reg) = col_reg(&normalized) { + *e = Expr::Register(reg); } } Expr::Qualified(_, col) | Expr::DoublyQualified(_, _, col) => { let normalized = normalize_ident(col.as_str()); - if normalized.eq_ignore_ascii_case("rowid") { - *e = Expr::Register(insertion.key_register()); - } else if let Some(col_mapping) = insertion.get_col_mapping_by_name(&normalized) - { - if col_mapping.column.is_rowid_alias { - *e = Expr::Register(insertion.key_register()); - } else { - *e = Expr::Register(col_mapping.register); - } + if let Some(reg) = col_reg(&normalized) { + *e = Expr::Register(reg); } } - Expr::RowId { .. } => { - *e = Expr::Register(insertion.key_register()); - } _ => {} } Ok(WalkControl::Continue) From 6fb4b038015d0ccd8317f859f67577dd9e2d019f Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sun, 21 Sep 2025 13:28:36 -0400 Subject: [PATCH 56/78] Fix UPSERT handling, properly rebuild indexes only based on what columns they touch --- core/translate/upsert.rs | 685 ++++++++++++++++++++++++--------------- 1 file changed, 417 insertions(+), 268 deletions(-) diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index 6cf520ad8..ce3c8666c 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -1,9 +1,12 @@ +use std::collections::HashSet; +use std::num::NonZeroUsize; use std::{collections::HashMap, sync::Arc}; use turso_parser::ast::{self, Upsert}; use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY; -use crate::translate::expr::WalkControl; +use crate::translate::expr::{walk_expr, WalkControl}; +use crate::translate::insert::format_unique_violation_desc; use crate::vdbe::insn::CmpInsFlags; use crate::{ bail_parse_error, @@ -109,82 +112,139 @@ pub fn upsert_matches_pk(upsert: &Upsert, table: &Table) -> bool { }) } -#[derive(Hash, Debug, Eq, PartialEq, Clone)] -/// A hashable descriptor of a single index key term used when -/// matching an `ON CONFLICT` target against a UNIQUE index. -/// captures only the attributes (name and effective collation) that -/// determine whether two key terms are equivalent for conflict detection. -pub struct KeySig { - /// column name, normalized to lowercase - name: String, - /// defaults to "binary" if not specified on the target or col - coll: String, +/// Returns array of chaned column indicies and whether rowid was changed. +fn collect_changed_cols( + table: &Table, + set_pairs: &[(usize, Box)], +) -> (HashSet, bool) { + let mut cols_changed = HashSet::with_capacity(table.columns().len()); + let mut rowid_changed = false; + for (col_idx, _) in set_pairs { + if let Some(c) = table.columns().get(*col_idx) { + if c.is_rowid_alias { + rowid_changed = true; + } else { + cols_changed.insert(*col_idx); + } + } + } + (cols_changed, rowid_changed) } -/// Match ON CONFLICT target to a UNIQUE index, ignoring order, requiring exact -/// coverage, and honoring collations. `table` is used to derive effective collation. +#[inline] +fn upsert_index_is_affected( + table: &Table, + idx: &Index, + changed_cols: &HashSet, + rowid_changed: bool, +) -> bool { + if rowid_changed { + return true; + } + let km = index_keys(idx); + let pm = partial_index_cols(idx, table); + for c in km.iter().chain(pm.iter()) { + if changed_cols.contains(c) { + return true; + } + } + false +} + +/// Columns used by index key +#[inline] +fn index_keys(idx: &Index) -> Vec { + idx.columns.iter().map(|ic| ic.pos_in_table).collect() +} + +/// Columns referenced by the partial WHERE (empty if none). +fn partial_index_cols(idx: &Index, table: &Table) -> HashSet { + use ast::{Expr, Name}; + let Some(expr) = &idx.where_clause else { + return HashSet::new(); + }; + let mut out = HashSet::new(); + let _ = walk_expr(expr, &mut |e: &ast::Expr| -> crate::Result { + match e { + Expr::Id(Name::Ident(n) | Name::Quoted(n)) => { + if let Some((i, _)) = table.get_column_by_name(&normalize_ident(n.as_str())) { + out.insert(i); + } + } + Expr::Qualified(ns, Name::Ident(c) | Name::Quoted(c)) + | Expr::DoublyQualified(_, ns, Name::Ident(c) | Name::Quoted(c)) => { + // Only count columns that belong to this table + let nsn = normalize_ident(ns.as_str()); + let tname = normalize_ident(table.get_name()); + if nsn.eq_ignore_ascii_case(&tname) { + if let Some((i, _)) = table.get_column_by_name(&normalize_ident(c.as_str())) { + out.insert(i); + } + } + } + _ => {} + } + Ok(WalkControl::Continue) + }); + out +} + +/// Match ON CONFLICT target to a UNIQUE index, *ignoring order* but requiring +/// exact coverage (same column multiset). If the target specifies a COLLATED +/// column, the collation must match the index column's effective collation. +/// If the target omits collation, any index collation is accepted. +/// Partial (WHERE) indexes never match. pub fn upsert_matches_index(upsert: &Upsert, index: &Index, table: &Table) -> bool { let Some(target) = upsert.index.as_ref() else { - // catch-all return true; }; - // if not unique or column count differs, no match - if !index.unique || target.targets.len() != index.columns.len() { + // must be a non-partial UNIQUE index with identical arity + if !index.unique || index.where_clause.is_some() || target.targets.len() != index.columns.len() + { return false; } - let mut need: HashMap = HashMap::new(); - for ic in &index.columns { - let sig = KeySig { - name: normalize_ident(&ic.name).to_string(), - coll: effective_collation_for_index_col(ic, table), - }; - *need.entry(sig).or_insert(0) += 1; - } - - // Consume from the multiset using target entries, order-insensitive + // Build a multiset of index columns: (normalized name, effective collation) + // effective collation = index collation if set, else table column default, else "binary" + let mut idx_cols: Vec<(String, String)> = index + .columns + .iter() + .map(|ic| { + ( + normalize_ident(&ic.name), + effective_collation_for_index_col(ic, table), + ) + }) + .collect(); + // For each target key, locate a matching index column (name equal ignoring case, + // and collation equal iff the target specifies one). Consume each match once. for te in &target.targets { - let tk = match extract_target_key(&te.expr) { - Some(x) => x, - None => return false, // not a simple column ref + let Some(tk) = extract_target_key(&te.expr) else { + return false; }; + let tname = tk.col_name; + let mut found = None; - // Candidate signatures for this target: - // If target specifies COLLATE, require exact match on (name, coll). - // Otherwise, accept any collation currently present for that name. - let mut matched = false; - if let Some(ref coll) = tk.collate { - let sig = KeySig { - name: tk.col_name.to_string(), - coll: coll.clone(), - }; - if let Some(cnt) = need.get_mut(&sig) { - *cnt -= 1; - if *cnt == 0 { - need.remove(&sig); + for (i, (iname, icoll)) in idx_cols.iter().enumerate() { + if tname.eq_ignore_ascii_case(iname) + && match tk.collate.as_ref() { + Some(c) => c.eq_ignore_ascii_case(icoll), + None => true, // unspecified collation -> accept any } - matched = true; - } - } else { - // Try any available collation for this column name - if let Some((sig, cnt)) = need - .iter_mut() - .find(|(k, _)| k.name.eq_ignore_ascii_case(&tk.col_name)) { - *cnt -= 1; - if *cnt == 0 { - let key = sig.clone(); - need.remove(&key); - } - matched = true; + found = Some(i); + break; } } - if !matched { + if let Some(i) = found { + // consume this index column once (multiset match) + idx_cols.swap_remove(i); + } else { return false; } } - // All targets matched exactly. - need.is_empty() + // All target columns matched exactly once + idx_cols.is_empty() } #[derive(Clone)] @@ -213,7 +273,7 @@ pub fn resolve_upsert_target( } for idx in schema.get_indices(table.get_name()) { - if idx.unique && idx.where_clause.is_none() && upsert_matches_index(upsert, idx, table) { + if idx.unique && upsert_matches_index(upsert, idx, table) { return Ok(ResolvedUpsertTarget::Index(Arc::clone(idx))); } } @@ -261,7 +321,7 @@ pub fn emit_upsert( cdc_cursor_id: Option, row_done_label: BranchOffset, ) -> crate::Result<()> { - // Seek and snapshot current row + // Seek & snapshot CURRENT program.emit_insn(Insn::SeekRowid { cursor_id: tbl_cursor_id, src_reg: conflict_rowid_reg, @@ -285,7 +345,7 @@ pub fn emit_upsert( } } - // Keep BEFORE snapshot if needed + // BEFORE for index maintenance / CDC let before_start = if cdc_cursor_id.is_some() || !idx_cursors.is_empty() { let s = program.alloc_registers(num_cols); program.emit_insn(Insn::Copy { @@ -298,8 +358,7 @@ pub fn emit_upsert( None }; - // NEW snapshot starts as a copy of CURRENT, then SET expressions overwrite - // the assigned columns. matching SQLite semantics of UPDATE reading the old row. + // NEW = CURRENT, then apply SET let new_start = program.alloc_registers(num_cols); program.emit_insn(Insn::Copy { src_reg: current_start, @@ -307,15 +366,16 @@ pub fn emit_upsert( extra_amount: num_cols - 1, }); - // WHERE predicate on the target row. If false or NULL, skip the UPDATE. + // WHERE on target row if let Some(pred) = where_clause.as_mut() { - rewrite_upsert_expr_in_place( + rewrite_expr_to_registers( pred, table, - table.get_name(), current_start, conflict_rowid_reg, - insertion, + Some(table.get_name()), + Some(insertion), + true, )?; let pr = program.alloc_register(); translate_expr(program, None, pred, pr, resolver)?; @@ -326,15 +386,17 @@ pub fn emit_upsert( }); } - // Evaluate each SET expression into the NEW row img + // Apply SET; capture rowid change if any + let mut new_rowid_reg: Option = None; for (col_idx, expr) in set_pairs.iter_mut() { - rewrite_upsert_expr_in_place( + rewrite_expr_to_registers( expr, table, - table.get_name(), current_start, conflict_rowid_reg, - insertion, + Some(table.get_name()), + Some(insertion), + true, )?; translate_expr_no_constant_opt( program, @@ -349,12 +411,22 @@ pub fn emit_upsert( program.emit_insn(Insn::HaltIfNull { target_reg: new_start + *col_idx, err_code: SQLITE_CONSTRAINT_NOTNULL, - description: format!("{}.{}", table.get_name(), col.name.as_ref().unwrap()), + description: String::from(table.get_name()) + col.name.as_ref().unwrap(), }); } + if col.is_rowid_alias { + // Must be integer; remember the NEW rowid value + let r = program.alloc_register(); + program.emit_insn(Insn::Copy { + src_reg: new_start + *col_idx, + dst_reg: r, + extra_amount: 0, + }); + program.emit_insn(Insn::MustBeInt { reg: r }); + new_rowid_reg = Some(r); + } } - // If STRICT, perform type checks on the NEW image if let Some(bt) = table.btree() { if bt.is_strict { program.emit_insn(Insn::TypeCheck { @@ -366,49 +438,34 @@ pub fn emit_upsert( } } - // Rebuild indexes: remove keys corresponding to BEFORE and insert keys for NEW. + // Index rebuild (DELETE old, INSERT new), honoring partial-index WHEREs if let Some(before) = before_start { + let (changed_cols, rowid_changed) = collect_changed_cols(table, set_pairs); + for (idx_name, _root, idx_cid) in idx_cursors { let idx_meta = schema .get_index(table.get_name(), idx_name) .expect("index exists"); + + if !upsert_index_is_affected(table, idx_meta, &changed_cols, rowid_changed) { + continue; // skip untouched index completely + } let k = idx_meta.columns.len(); - let (before_pred_reg, new_pred_reg) = if let Some(where_clause) = &idx_meta.where_clause - { - // BEFORE image predicate - let mut before_where = where_clause.as_ref().clone(); - rewrite_partial_index_where_for_image( - &mut before_where, - table, - before_start.expect("before_start must exist for index maintenance"), - conflict_rowid_reg, - )?; - let before_reg = program.alloc_register(); - translate_expr_no_constant_opt( - program, - None, - &before_where, - before_reg, - resolver, - NoConstantOptReason::RegisterReuse, - )?; + let before_pred_reg = eval_partial_pred_for_row_image( + program, + table, + idx_meta, + before, + conflict_rowid_reg, + resolver, + ); + let new_rowid = new_rowid_reg.unwrap_or(conflict_rowid_reg); + let new_pred_reg = eval_partial_pred_for_row_image( + program, table, idx_meta, new_start, new_rowid, resolver, + ); - // NEW image predicate - let mut new_where = where_clause.as_ref().clone(); - rewrite_partial_index_where_for_image( - &mut new_where, - table, - new_start, - conflict_rowid_reg, - )?; - let new_reg = program.alloc_register(); - translate_expr(program, None, &new_where, new_reg, resolver)?; - - (Some(before_reg), Some(new_reg)) - } else { - (None, None) - }; + // Skip delete if BEFORE predicate false/NULL let maybe_skip_del = before_pred_reg.map(|r| { let lbl = program.allocate_label(); program.emit_insn(Insn::IfNot { @@ -419,6 +476,7 @@ pub fn emit_upsert( lbl }); + // DELETE old key let del = program.alloc_registers(k + 1); for (i, ic) in idx_meta.columns.iter().enumerate() { let (ci, _) = table.get_column_by_name(&ic.name).unwrap(); @@ -439,13 +497,11 @@ pub fn emit_upsert( cursor_id: *idx_cid, raise_error_if_no_matching_entry: false, }); - - // resolve skipping the delete if it was false/NULL if let Some(label) = maybe_skip_del { program.resolve_label(label, program.offset()); } - // if NEW does not satisfy partial index, skip the insert + // Skip insert if NEW predicate false/NULL let maybe_skip_ins = new_pred_reg.map(|r| { let lbl = program.allocate_label(); program.emit_insn(Insn::IfNot { @@ -456,6 +512,7 @@ pub fn emit_upsert( lbl }); + // INSERT new key (use NEW rowid if present) let ins = program.alloc_registers(k + 1); for (i, ic) in idx_meta.columns.iter().enumerate() { let (ci, _) = table.get_column_by_name(&ic.name).unwrap(); @@ -466,7 +523,7 @@ pub fn emit_upsert( }); } program.emit_insn(Insn::Copy { - src_reg: conflict_rowid_reg, + src_reg: new_rowid, dst_reg: ins + k, extra_amount: 0, }); @@ -480,54 +537,51 @@ pub fn emit_upsert( affinity_str: None, }); - // If unique, perform NoConflict + self-check before IdxInsert if idx_meta.unique { - let ok_lbl = program.allocate_label(); + // Affinity on the key columns for the NoConflict probe + let ok = program.allocate_label(); + let aff: String = idx_meta + .columns + .iter() + .map(|c| { + table + .get_column_by_name(&c.name) + .map(|(_, col)| col.affinity().aff_mask()) + .unwrap_or('B') + }) + .collect(); + + program.emit_insn(Insn::Affinity { + start_reg: ins, + count: NonZeroUsize::new(k).unwrap(), + affinities: aff, + }); program.emit_insn(Insn::NoConflict { cursor_id: *idx_cid, - target_pc: ok_lbl, + target_pc: ok, record_reg: ins, num_regs: k, }); - - // If there’s a hit, skip it if it’s self, otherwise raise constraint - let hit_rowid = program.alloc_register(); + let hit = program.alloc_register(); program.emit_insn(Insn::IdxRowId { cursor_id: *idx_cid, - dest: hit_rowid, + dest: hit, }); program.emit_insn(Insn::Eq { - lhs: conflict_rowid_reg, - rhs: hit_rowid, - target_pc: ok_lbl, + lhs: new_rowid, + rhs: hit, + target_pc: ok, flags: CmpInsFlags::default(), collation: program.curr_collation(), }); - let mut description = String::with_capacity( - table.get_name().len() - + idx_meta - .columns - .iter() - .map(|c| c.name.len() + 2) - .sum::(), - ); - description.push_str(table.get_name()); - description.push_str(".("); - description.push_str( - &idx_meta - .columns - .iter() - .map(|c| c.name.as_str()) - .collect::>() - .join(", "), - ); - description.push(')'); + let description = format_unique_violation_desc(table.get_name(), idx_meta); program.emit_insn(Insn::Halt { err_code: SQLITE_CONSTRAINT_PRIMARYKEY, description, }); - program.preassign_label_to_next_insn(ok_lbl); + program.preassign_label_to_next_insn(ok); } + program.emit_insn(Insn::IdxInsert { cursor_id: *idx_cid, record_reg: rec, @@ -535,21 +589,20 @@ pub fn emit_upsert( unpacked_count: Some((k + 1) as u16), flags: IdxInsertFlags::new().nchange(true), }); + if let Some(lbl) = maybe_skip_ins { program.resolve_label(lbl, program.offset()); } } } - // Write table row (same rowid, new payload) + // Build NEW table payload let rec = program.alloc_register(); - let affinity_str = table .columns() .iter() - .map(|col| col.affinity().aff_mask()) + .map(|c| c.affinity().aff_mask()) .collect::(); - program.emit_insn(Insn::MakeRecord { start_reg: new_start, count: num_cols, @@ -557,59 +610,155 @@ pub fn emit_upsert( index_name: None, affinity_str: Some(affinity_str), }); - program.emit_insn(Insn::Insert { - cursor: tbl_cursor_id, - key_reg: conflict_rowid_reg, - record_reg: rec, - flag: InsertFlags::new(), - table_name: table.get_name().to_string(), - }); - if let Some(cdc_id) = cdc_cursor_id { - let after_rec = if program.capture_data_changes_mode().has_after() { - Some(emit_cdc_patch_record( - program, - table, - new_start, - rec, - conflict_rowid_reg, - )) - } else { - None - }; - // Build BEFORE if needed - let before_rec = if program.capture_data_changes_mode().has_before() { - Some(emit_cdc_full_record( - program, - table.columns(), - tbl_cursor_id, - conflict_rowid_reg, - )) - } else { - None - }; - emit_cdc_insns( - program, - resolver, - OperationMode::UPDATE, - cdc_id, - conflict_rowid_reg, - before_rec, - after_rec, - None, - table.get_name(), - )?; + // If rowid changed, first ensure no other row owns it, then delete+insert + if let Some(rnew) = new_rowid_reg { + let ok = program.allocate_label(); + + // If equal to old rowid, skip uniqueness probe + program.emit_insn(Insn::Eq { + lhs: rnew, + rhs: conflict_rowid_reg, + target_pc: ok, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + + // If another row already has rnew -> constraint + program.emit_insn(Insn::NotExists { + cursor: tbl_cursor_id, + rowid_reg: rnew, + target_pc: ok, + }); + program.emit_insn(Insn::Halt { + err_code: SQLITE_CONSTRAINT_PRIMARYKEY, + description: format!( + "{}.{}", + table.get_name(), + table + .columns() + .iter() + .find(|c| c.is_rowid_alias) + .and_then(|c| c.name.as_ref()) + .unwrap_or(&"rowid".to_string()) + ), + }); + program.preassign_label_to_next_insn(ok); + + // Now replace the row + program.emit_insn(Insn::Delete { + cursor_id: tbl_cursor_id, + table_name: table.get_name().to_string(), + }); + program.emit_insn(Insn::Insert { + cursor: tbl_cursor_id, + key_reg: rnew, + record_reg: rec, + flag: InsertFlags::new().require_seek().update_rowid_change(), + table_name: table.get_name().to_string(), + }); + } else { + program.emit_insn(Insn::Insert { + cursor: tbl_cursor_id, + key_reg: conflict_rowid_reg, + record_reg: rec, + flag: InsertFlags::new(), + table_name: table.get_name().to_string(), + }); } + // emit CDC instructions + if let Some(cdc_id) = cdc_cursor_id { + let new_rowid = new_rowid_reg.unwrap_or(conflict_rowid_reg); + if new_rowid_reg.is_some() { + // DELETE (before) + let before_rec = if program.capture_data_changes_mode().has_before() { + Some(emit_cdc_full_record( + program, + table.columns(), + tbl_cursor_id, + conflict_rowid_reg, + )) + } else { + None + }; + emit_cdc_insns( + program, + resolver, + OperationMode::DELETE, + cdc_id, + conflict_rowid_reg, + before_rec, + None, + None, + table.get_name(), + )?; + + // INSERT (after) + let after_rec = if program.capture_data_changes_mode().has_after() { + Some(emit_cdc_patch_record( + program, table, new_start, rec, new_rowid, + )) + } else { + None + }; + emit_cdc_insns( + program, + resolver, + OperationMode::INSERT, + cdc_id, + new_rowid, + None, + after_rec, + None, + table.get_name(), + )?; + } else { + let after_rec = if program.capture_data_changes_mode().has_after() { + Some(emit_cdc_patch_record( + program, + table, + new_start, + rec, + conflict_rowid_reg, + )) + } else { + None + }; + let before_rec = if program.capture_data_changes_mode().has_before() { + Some(emit_cdc_full_record( + program, + table.columns(), + tbl_cursor_id, + conflict_rowid_reg, + )) + } else { + None + }; + emit_cdc_insns( + program, + resolver, + OperationMode::UPDATE, + cdc_id, + conflict_rowid_reg, + before_rec, + after_rec, + None, + table.get_name(), + )?; + } + } + + // RETURNING from NEW image + final rowid if !returning.is_empty() { let regs = ReturningValueRegisters { - rowid_register: conflict_rowid_reg, + rowid_register: new_rowid_reg.unwrap_or(conflict_rowid_reg), columns_start_register: new_start, num_columns: num_cols, }; - emit_returning_results(program, returning, ®s)?; } + program.emit_insn(Insn::Goto { target_pc: row_done_label, }); @@ -620,7 +769,6 @@ pub fn emit_upsert( /// /// Supports multi-target row-value SETs: `SET (a, b) = (expr1, expr2)`. /// Enforces same number of column names and RHS values. -/// Rewrites `EXCLUDED.*` references to direct `Register` reads from the insertion registers /// If the same column is assigned multiple times, the last assignment wins. pub fn collect_set_clauses_for_upsert( table: &Table, @@ -661,108 +809,109 @@ pub fn collect_set_clauses_for_upsert( Ok(out) } -/// Rewrite an UPSERT expression so that: -/// EXCLUDED.x -> Register(insertion.x) -/// t.x / x -> Register(CURRENT.x) when t == target table or unqualified -/// rowid -> Register(conflict_rowid_reg) +fn eval_partial_pred_for_row_image( + prg: &mut ProgramBuilder, + table: &Table, + idx: &Index, + row_start: usize, // base of CURRENT or NEW image + rowid_reg: usize, // rowid for that image + resolver: &Resolver, +) -> Option { + let Some(where_expr) = &idx.where_clause else { + return None; + }; + let mut e = where_expr.as_ref().clone(); + rewrite_expr_to_registers( + &mut e, table, row_start, rowid_reg, None, // table_name + None, // insertion + false, // dont allow EXCLUDED + ) + .ok()?; + let r = prg.alloc_register(); + translate_expr_no_constant_opt( + prg, + None, + &e, + r, + resolver, + NoConstantOptReason::RegisterReuse, + ) + .ok()?; + Some(r) +} + +/// Generic rewriter that maps column references to registers for a given row image. /// -/// Only rewrites names in the current expression scope, does not enter subqueries. -fn rewrite_upsert_expr_in_place( +/// - Id/Qualified refs to the *target table* (when `table_name` is provided) resolve +/// to the CURRENT/NEW row image starting at `base_start`, with `rowid` (or the +/// rowid-alias) mapped to `rowid_reg`. +/// - If `allow_excluded` and `insertion` are provided, `EXCLUDED.x` resolves to the +/// insertion registers (and `EXCLUDED.rowid` resolves to `insertion.key_register()`). +/// - If `table_name` is `None`, qualified refs never match +/// - Leaves names from other tables/namespaces untouched. +fn rewrite_expr_to_registers( e: &mut ast::Expr, table: &Table, - table_name: &str, - current_start: usize, - conflict_rowid_reg: usize, - insertion: &Insertion, + base_start: usize, + rowid_reg: usize, + table_name: Option<&str>, + insertion: Option<&Insertion>, + allow_excluded: bool, ) -> crate::Result { use ast::{Expr, Name}; + let table_name_norm = table_name.map(normalize_ident); - let col_reg = |name: &str| -> Option { + // Map a column name to a register within the row image at `base_start`. + let col_reg_from_row_image = |name: &str| -> Option { if name.eq_ignore_ascii_case("rowid") { - return Some(conflict_rowid_reg); + return Some(rowid_reg); + } + let (idx, c) = table.get_column_by_name(name)?; + if c.is_rowid_alias { + Some(rowid_reg) + } else { + Some(base_start + idx) } - let (idx, _) = table.get_column_by_name(name)?; - Some(current_start + idx) }; walk_expr_mut( e, &mut |expr: &mut ast::Expr| -> crate::Result { match expr { - // EXCLUDED.x or t.x (t may be quoted) Expr::Qualified(ns, Name::Ident(c) | Name::Quoted(c)) | Expr::DoublyQualified(_, ns, Name::Ident(c) | Name::Quoted(c)) => { let ns = normalize_ident(ns.as_str()); let c = normalize_ident(c.as_str()); - if ns.eq_ignore_ascii_case("excluded") { - let Some(reg) = insertion.get_col_mapping_by_name(&c) else { - bail_parse_error!("no such column in EXCLUDED: {}", c); - }; - *expr = Expr::Register(reg.register); - } else if ns.eq_ignore_ascii_case(table_name) { - if let Some(reg) = col_reg(c.as_str()) { - *expr = Expr::Register(reg); + // Handle EXCLUDED.* if enabled + if allow_excluded && ns.eq_ignore_ascii_case("excluded") { + if let Some(ins) = insertion { + if c.eq_ignore_ascii_case("rowid") { + *expr = Expr::Register(ins.key_register()); + } else if let Some(cm) = ins.get_col_mapping_by_name(&c) { + *expr = Expr::Register(cm.register); + } else { + bail_parse_error!("no such column in EXCLUDED: {}", c); + } + } + // If insertion is None, leave EXCLUDED.* untouched. + return Ok(WalkControl::Continue); + } + + // Match the target table namespace if provided + if let Some(ref tn) = table_name_norm { + if ns.eq_ignore_ascii_case(tn) { + if let Some(r) = col_reg_from_row_image(&c) { + *expr = Expr::Register(r); + } } } } - // Unqualified column id -> CURRENT + // Unqualified id -> row image (CURRENT/NEW depending on caller) Expr::Id(Name::Ident(name)) | Expr::Id(Name::Quoted(name)) => { - if let Some(reg) = col_reg(&normalize_ident(name.as_str())) { - *expr = Expr::Register(reg); + if let Some(r) = col_reg_from_row_image(&normalize_ident(name.as_str())) { + *expr = Expr::Register(r); } } - Expr::RowId { .. } => { - *expr = Expr::Register(conflict_rowid_reg); - } - _ => {} - } - Ok(WalkControl::Continue) - }, - ) -} - -/// Rewrite partial-index WHERE to read from a contiguous row image starting at `base_start`. -/// Maps rowid (and the rowid-alias column) to `rowid_reg`... Very similar to the above method -/// but simpler because there is no EXCLUDED or table name to consider. -fn rewrite_partial_index_where_for_image( - expr: &mut ast::Expr, - table: &Table, - base_start: usize, - rowid_reg: usize, -) -> crate::Result { - walk_expr_mut( - expr, - &mut |e: &mut ast::Expr| -> crate::Result { - match e { - ast::Expr::Id(n) => { - let nm = normalize_ident(n.as_str()); - if nm.eq_ignore_ascii_case("rowid") { - *e = ast::Expr::Register(rowid_reg); - } else if let Some((col_idx, _)) = table.get_column_by_name(&nm) { - let col = &table.columns()[col_idx]; - *e = ast::Expr::Register(if col.is_rowid_alias { - rowid_reg - } else { - base_start + col_idx - }); - } - } - ast::Expr::Qualified(_, cn) | ast::Expr::DoublyQualified(_, _, cn) => { - let nm = normalize_ident(cn.as_str()); - if nm.eq_ignore_ascii_case("rowid") { - *e = ast::Expr::Register(rowid_reg); - } else if let Some((col_idx, _)) = table.get_column_by_name(&nm) { - let col = &table.columns()[col_idx]; - *e = ast::Expr::Register(if col.is_rowid_alias { - rowid_reg - } else { - base_start + col_idx - }); - } - } - ast::Expr::RowId { .. } => { - *e = ast::Expr::Register(rowid_reg); - } _ => {} } Ok(WalkControl::Continue) From a1ca56620a24499b8ab242e2da28004be20b45d7 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sun, 21 Sep 2025 13:29:01 -0400 Subject: [PATCH 57/78] Add SQLITE_CONSTRAINT_UNIQUE constraint to op_halt handling --- core/vdbe/execute.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 5099f7831..ec1c9b526 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -1,4 +1,5 @@ #![allow(unused_variables)] +use crate::error::SQLITE_CONSTRAINT_UNIQUE; use crate::function::AlterTableFunc; use crate::numeric::{NullableInteger, Numeric}; use crate::schema::Table; @@ -2094,6 +2095,11 @@ pub fn halt( "NOT NULL constraint failed: {description} (19)" ))); } + SQLITE_CONSTRAINT_UNIQUE => { + return Err(LimboError::Constraint(format!( + "UNIQUE constraint failed: {description} (19)" + ))); + } _ => { return Err(LimboError::Constraint(format!( "undocumented halt error code {description}" From 48679993817215b7df3f202dcc045e9f15636e13 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sun, 21 Sep 2025 13:29:30 -0400 Subject: [PATCH 58/78] Add partial_index_mutation_and_upsert_fuzz test --- tests/integration/fuzz/mod.rs | 285 ++++++++++++++++++++++++++++++++++ 1 file changed, 285 insertions(+) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 7c950035f..4317bbb91 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -656,6 +656,291 @@ mod tests { } } + #[test] + pub fn partial_index_mutation_and_upsert_fuzz() { + let _ = env_logger::try_init(); + const OUTER_ITERS: usize = 5; + const INNER_ITERS: usize = 200; + + let (mut rng, seed) = if std::env::var("SEED").is_ok() { + let seed = std::env::var("SEED").unwrap().parse::().unwrap(); + (ChaCha8Rng::seed_from_u64(seed), seed) + } else { + rng_from_time() + }; + println!("partial_index_mutation_and_upsert_fuzz seed: {seed}"); + // we want to hit unique constraints fairly often so limit the insert values + const K_POOL: [&str; 35] = [ + "a", "aa", "abc", "A", "B", "zzz", "foo", "bar", "baz", "fizz", "buzz", "bb", "cc", + "dd", "ee", "ff", "gg", "hh", "jj", "kk", "ll", "mm", "nn", "oo", "pp", "qq", "rr", + "ss", "tt", "uu", "vv", "ww", "xx", "yy", "zz", + ]; + for outer in 0..OUTER_ITERS { + println!( + "partial_index_mutation_and_upsert_fuzz iteration {}/{}", + outer + 1, + OUTER_ITERS + ); + + // Columns: id (rowid PK), plus a few data columns we can reference in predicates/keys. + let limbo_db = TempDatabase::new_empty(true); + let sqlite_db = TempDatabase::new_empty(true); + let limbo_conn = limbo_db.connect_limbo(); + let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap(); + + let num_cols = rng.random_range(2..=4); + // We'll always include a TEXT "k" and a couple INT columns to give predicates variety. + // Build: id INTEGER PRIMARY KEY, k TEXT, c0 INT, c1 INT, ... + let mut cols: Vec = vec!["id INTEGER PRIMARY KEY".into(), "k TEXT".into()]; + for i in 0..(num_cols - 1) { + cols.push(format!("c{i} INT")); + } + let create = format!("CREATE TABLE t ({})", cols.join(", ")); + limbo_exec_rows(&limbo_db, &limbo_conn, &create); + sqlite.execute(&create, rusqlite::params![]).unwrap(); + + // Helper to list usable columns for keys/predicates + let int_cols: Vec = (0..(num_cols - 1)).map(|i| format!("c{i}")).collect(); + let functions = ["lower", "upper", "length"]; + + let num_pidx = rng.random_range(0..=3); + let mut idx_ddls: Vec = Vec::new(); + for i in 0..num_pidx { + // Pick 1 or 2 key columns; always include "k" sometimes to get frequent conflicts. + let mut key_cols = Vec::new(); + if rng.random_bool(0.7) { + key_cols.push("k".to_string()); + } + if key_cols.is_empty() || rng.random_bool(0.5) { + // Add one INT col to make compound keys common + if !int_cols.is_empty() { + let c = int_cols[rng.random_range(0..int_cols.len())].clone(); + if !key_cols.contains(&c) { + key_cols.push(c); + } + } + } + // Ensure at least one key column + if key_cols.is_empty() { + key_cols.push("k".to_string()); + } + // Build a simple deterministic partial predicate: + // Examples: + // c0 > 10 AND c1 < 50 + // c0 IS NOT NULL + // id > 5 AND c0 >= 0 + // lower(k) = k + let pred = { + // parts we can AND/OR (we’ll only AND for stability) + let mut parts: Vec = Vec::new(); + + // Maybe include rowid (id) bound + if rng.random_bool(0.4) { + let n = rng.random_range(0..20); + let op = *["<", "<=", ">", ">="].choose(&mut rng).unwrap(); + parts.push(format!("id {op} {n}")); + } + + // Maybe include int column comparison + if !int_cols.is_empty() && rng.random_bool(0.8) { + let c = &int_cols[rng.random_range(0..int_cols.len())]; + match rng.random_range(0..3) { + 0 => parts.push(format!("{c} IS NOT NULL")), + 1 => { + let n = rng.random_range(-10..=20); + let op = *["<", "<=", "=", ">=", ">"].choose(&mut rng).unwrap(); + parts.push(format!("{c} {op} {n}")); + } + _ => { + let n = rng.random_range(0..=1); + parts.push(format!( + "{c} IS {}", + if n == 0 { "NULL" } else { "NOT NULL" } + )); + } + } + } + + if rng.random_bool(0.2) { + parts.push(format!("{}(k) = k", functions.choose(&mut rng).unwrap())); + } + // Guarantee at least one part + if parts.is_empty() { + parts.push("1".to_string()); + } + parts.join(" AND ") + }; + + let ddl = format!( + "CREATE UNIQUE INDEX idx_p{}_{} ON t({}) WHERE {}", + outer, + i, + key_cols.join(","), + pred + ); + idx_ddls.push(ddl.clone()); + // Create in both engines + limbo_exec_rows(&limbo_db, &limbo_conn, &ddl); + sqlite.execute(&ddl, rusqlite::params![]).unwrap(); + } + + let seed_rows = rng.random_range(10..=80); + for _ in 0..seed_rows { + let k = *K_POOL.choose(&mut rng).unwrap(); + let mut vals: Vec = vec!["NULL".into(), format!("'{k}'")]; // id NULL -> auto + for _ in 0..(num_cols - 1) { + // bias a bit toward small ints & NULL to make predicate flipping common + let v = match rng.random_range(0..6) { + 0 => "NULL".into(), + _ => rng.random_range(-5..=15).to_string(), + }; + vals.push(v); + } + let ins = format!("INSERT INTO t VALUES ({})", vals.join(", ")); + // Execute on both; ignore errors due to partial unique conflicts (keep seeding going) + let _ = sqlite.execute(&ins, rusqlite::params![]); + let _ = limbo_exec_rows_fallible(&limbo_db, &limbo_conn, &ins); + } + + for _ in 0..INNER_ITERS { + let action = rng.random_range(0..4); // 0: INSERT, 1: UPDATE, 2: DELETE, 3: UPSERT (catch-all) + let stmt = match action { + // INSERT + 0 => { + let k = *K_POOL.choose(&mut rng).unwrap(); + let mut cols_list = vec!["k".to_string()]; + let mut vals_list = vec![format!("'{k}'")]; + for i in 0..(num_cols - 1) { + if rng.random_bool(0.8) { + cols_list.push(format!("c{i}")); + vals_list.push(if rng.random_bool(0.15) { + "NULL".into() + } else { + rng.random_range(-5..=15).to_string() + }); + } + } + format!( + "INSERT INTO t({}) VALUES({})", + cols_list.join(","), + vals_list.join(",") + ) + } + + // UPDATE (randomly touch either key or predicate column) + 1 => { + // choose a column + let col_pick = if rng.random_bool(0.5) { + "k".to_string() + } else { + format!("c{}", rng.random_range(0..(num_cols - 1))) + }; + let new_val = if col_pick == "k" { + format!("'{}'", K_POOL.choose(&mut rng).unwrap()) + } else if rng.random_bool(0.2) { + "NULL".into() + } else { + rng.random_range(-5..=15).to_string() + }; + // predicate to affect some rows + let wc = if rng.random_bool(0.6) { + let pred_col = format!("c{}", rng.random_range(0..(num_cols - 1))); + let op = *["<", "<=", "=", ">=", ">"].choose(&mut rng).unwrap(); + let n = rng.random_range(-5..=15); + format!("WHERE {pred_col} {op} {n}") + } else { + // toggle rows by id parity + "WHERE (id % 2) = 0".into() + }; + format!("UPDATE t SET {col_pick} = {new_val} {wc}") + } + + // DELETE + 2 => { + let wc = if rng.random_bool(0.5) { + // delete rows inside partial predicate zones + match int_cols.len() { + 0 => "WHERE lower(k) = k".to_string(), + _ => { + let c = &int_cols[rng.random_range(0..int_cols.len())]; + let n = rng.random_range(-5..=15); + let op = *["<", "<=", "=", ">=", ">"].choose(&mut rng).unwrap(); + format!("WHERE {c} {op} {n}") + } + } + } else { + "WHERE id % 3 = 1".to_string() + }; + format!("DELETE FROM t {wc}") + } + + // UPSERT catch-all is allowed even if only partial unique constraints exist + 3 => { + let k = *K_POOL.choose(&mut rng).unwrap(); + let mut cols_list = vec!["k".to_string()]; + let mut vals_list = vec![format!("'{k}'")]; + for i in 0..(num_cols - 1) { + if rng.random_bool(0.8) { + cols_list.push(format!("c{i}")); + vals_list.push(if rng.random_bool(0.2) { + "NULL".into() + } else { + rng.random_range(-5..=15).to_string() + }); + } + } + format!( + "INSERT INTO t({}) VALUES({}) ON CONFLICT DO NOTHING", + cols_list.join(","), + vals_list.join(",") + ) + } + _ => unreachable!(), + }; + + // Execute on SQLite first; capture success/error, then run on turso and demand same outcome. + let sqlite_res = sqlite.execute(&stmt, rusqlite::params![]); + let limbo_res = limbo_exec_rows_fallible(&limbo_db, &limbo_conn, &stmt); + + match (sqlite_res, limbo_res) { + (Ok(_), Ok(_)) => { + // Compare canonical table state + let verify = format!( + "SELECT id, k{} FROM t ORDER BY id, k{}", + (0..(num_cols - 1)) + .map(|i| format!(", c{i}")) + .collect::(), + (0..(num_cols - 1)) + .map(|i| format!(", c{i}")) + .collect::(), + ); + let s = sqlite_exec_rows(&sqlite, &verify); + let l = limbo_exec_rows(&limbo_db, &limbo_conn, &verify); + assert_eq!( + l, s, + "stmt: {stmt}, seed: {seed}, create: {create}, idx: {idx_ddls:?}" + ); + } + (Err(_), Err(_)) => { + // Both errored + continue; + } + // Mismatch: dump context + (ok_sqlite, ok_turso) => { + eprintln!("Schema: {create};"); + for d in idx_ddls.iter() { + eprintln!("{d};"); + } + panic!( + "DML outcome mismatch (sqlite: {ok_sqlite:?}, turso ok: {ok_turso:?}) \n + stmt: {stmt}, seed: {seed}" + ); + } + } + } + } + } + #[test] pub fn compound_select_fuzz() { let _ = env_logger::try_init(); From ffeb26b24a9d8de85d010d3d418073823068b894 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 21 Sep 2025 13:50:36 -0300 Subject: [PATCH 59/78] only ever call callbacks once --- core/io/mod.rs | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/core/io/mod.rs b/core/io/mod.rs index 1b8d8a8ad..e537c393d 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -3,6 +3,7 @@ use crate::storage::sqlite3_ondisk::WAL_FRAME_HEADER_SIZE; use crate::{BufferPool, CompletionError, Result}; use bitflags::bitflags; use cfg_block::cfg_block; +use parking_lot::Once; use std::cell::RefCell; use std::fmt; use std::ptr::NonNull; @@ -142,6 +143,8 @@ struct CompletionInner { // Thread safe with OnceLock result: std::sync::OnceLock>, needs_link: bool, + /// before calling callback we check if done is true + done: Once, } impl Debug for CompletionType { @@ -169,6 +172,7 @@ impl Completion { completion_type, result: OnceLock::new(), needs_link: false, + done: Once::new(), }), } } @@ -179,6 +183,7 @@ impl Completion { completion_type, result: OnceLock::new(), needs_link: true, + done: Once::new(), }), } } @@ -258,36 +263,33 @@ impl Completion { pub fn complete(&self, result: i32) { let result = Ok(result); - match &self.inner.completion_type { - CompletionType::Read(r) => r.callback(result), - CompletionType::Write(w) => w.callback(result), - CompletionType::Sync(s) => s.callback(result), // fix - CompletionType::Truncate(t) => t.callback(result), - }; - self.inner - .result - .set(None) - .expect("result must be set only once"); + self.callback(result); } pub fn error(&self, err: CompletionError) { let result = Err(err); - match &self.inner.completion_type { - CompletionType::Read(r) => r.callback(result), - CompletionType::Write(w) => w.callback(result), - CompletionType::Sync(s) => s.callback(result), // fix - CompletionType::Truncate(t) => t.callback(result), - }; - self.inner - .result - .set(Some(err)) - .expect("result must be set only once"); + self.callback(result); } pub fn abort(&self) { self.error(CompletionError::Aborted); } + fn callback(&self, result: Result) { + self.inner.done.call_once(|| { + match &self.inner.completion_type { + CompletionType::Read(r) => r.callback(result), + CompletionType::Write(w) => w.callback(result), + CompletionType::Sync(s) => s.callback(result), // fix + CompletionType::Truncate(t) => t.callback(result), + }; + self.inner + .result + .set(result.err()) + .expect("result must be set only once"); + }); + } + /// only call this method if you are sure that the completion is /// a ReadCompletion, panics otherwise pub fn as_read(&self) -> &ReadCompletion { From 7def22ef3c0c4c270eceaa97971c8b5c1d300004 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sun, 21 Sep 2025 14:47:59 -0400 Subject: [PATCH 60/78] Add DO UPDATE SET case to partial index/upsert fuzzing --- tests/integration/fuzz/mod.rs | 38 +++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 4317bbb91..807c0eee5 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -660,7 +660,7 @@ mod tests { pub fn partial_index_mutation_and_upsert_fuzz() { let _ = env_logger::try_init(); const OUTER_ITERS: usize = 5; - const INNER_ITERS: usize = 200; + const INNER_ITERS: usize = 500; let (mut rng, seed) = if std::env::var("SEED").is_ok() { let seed = std::env::var("SEED").unwrap().parse::().unwrap(); @@ -889,11 +889,37 @@ mod tests { }); } } - format!( - "INSERT INTO t({}) VALUES({}) ON CONFLICT DO NOTHING", - cols_list.join(","), - vals_list.join(",") - ) + if rng.random_bool(0.3) { + // 30% chance ON CONFLICT DO UPDATE SET ... + let mut set_list = Vec::new(); + let num_set = rng.random_range(1..=cols_list.len()); + let set_cols = cols_list + .choose_multiple(&mut rng, num_set) + .cloned() + .collect::>(); + for c in set_cols.iter() { + let v = if c == "k" { + format!("'{}'", K_POOL.choose(&mut rng).unwrap()) + } else if rng.random_bool(0.2) { + "NULL".into() + } else { + rng.random_range(-5..=15).to_string() + }; + set_list.push(format!("{c} = {v}")); + } + format!( + "INSERT INTO t({}) VALUES({}) ON CONFLICT DO UPDATE SET {}", + cols_list.join(","), + vals_list.join(","), + set_list.join(", ") + ) + } else { + format!( + "INSERT INTO t({}) VALUES({}) ON CONFLICT DO NOTHING", + cols_list.join(","), + vals_list.join(",") + ) + } } _ => unreachable!(), }; From 9f54f60d458be90678756d946872494cb95f0bc7 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Sat, 20 Sep 2025 20:06:22 -0300 Subject: [PATCH 61/78] make sure that complex select statements are captured by MV populate The population code extracts table information from the select statement so it can populate the materialized view. But the code, as written today, is naive. It doesn't capture table information correctly if there is more than one select statement (such in the case of a union query). --- core/incremental/view.rs | 1761 ++++++++++++++++++++++++++------------ 1 file changed, 1237 insertions(+), 524 deletions(-) diff --git a/core/incremental/view.rs b/core/incremental/view.rs index fd7b3988a..65fa5e2bb 100644 --- a/core/incremental/view.rs +++ b/core/incremental/view.rs @@ -8,7 +8,7 @@ use crate::types::{IOResult, Value}; use crate::util::{extract_view_columns, ViewColumnSchema}; use crate::{return_if_io, LimboError, Pager, Result, Statement}; use std::cell::RefCell; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::fmt; use std::rc::Rc; use std::sync::{Arc, Mutex}; @@ -195,6 +195,9 @@ pub struct IncrementalView { // Mapping from table name to fully qualified name (e.g., "customers" -> "main.customers") // This preserves database qualification from the original query qualified_table_names: HashMap, + // WHERE conditions for each table (accumulated from all occurrences) + // Multiple conditions from UNION branches or duplicate references are stored as a vector + table_conditions: HashMap>>, // The view's column schema with table relationships pub column_schema: ViewColumnSchema, // State machine for population @@ -312,9 +315,18 @@ impl IncrementalView { // Extract output columns using the shared function let column_schema = extract_view_columns(&select, schema)?; - // Get all tables from FROM clause and JOINs, along with their aliases - let (referenced_tables, table_aliases, qualified_table_names) = - Self::extract_all_tables(&select, schema)?; + let mut referenced_tables = Vec::new(); + let mut table_aliases = HashMap::new(); + let mut qualified_table_names = HashMap::new(); + let mut table_conditions = HashMap::new(); + Self::extract_all_tables( + &select, + schema, + &mut referenced_tables, + &mut table_aliases, + &mut qualified_table_names, + &mut table_conditions, + )?; Self::new( name, @@ -322,6 +334,7 @@ impl IncrementalView { referenced_tables, table_aliases, qualified_table_names, + table_conditions, column_schema, schema, main_data_root, @@ -337,6 +350,7 @@ impl IncrementalView { referenced_tables: Vec>, table_aliases: HashMap, qualified_table_names: HashMap, + table_conditions: HashMap>>, column_schema: ViewColumnSchema, schema: &Schema, main_data_root: usize, @@ -362,6 +376,7 @@ impl IncrementalView { referenced_tables, table_aliases, qualified_table_names, + table_conditions, column_schema, populate_state: PopulateState::Start, tracker, @@ -405,97 +420,249 @@ impl IncrementalView { self.referenced_tables.clone() } - /// Extract all tables and their aliases from the SELECT statement - /// Returns a tuple of (tables, alias_map, qualified_names) - /// where alias_map is alias -> table_name - /// and qualified_names is table_name -> fully_qualified_name - #[allow(clippy::type_complexity)] - fn extract_all_tables( - select: &ast::Select, + /// Process a single table reference from a FROM or JOIN clause + fn process_table_reference( + name: &ast::QualifiedName, + alias: &Option, schema: &Schema, - ) -> Result<( - Vec>, - HashMap, - HashMap, - )> { - let mut tables = Vec::new(); - let mut aliases = HashMap::new(); - let mut qualified_names = HashMap::new(); + table_map: &mut HashMap>, + aliases: &mut HashMap, + qualified_names: &mut HashMap, + cte_names: &HashSet, + ) -> Result<()> { + let table_name = name.name.as_str(); + // Build the fully qualified name + let qualified_name = if let Some(ref db) = name.db_name { + format!("{db}.{table_name}") + } else { + table_name.to_string() + }; + + // Skip CTEs - they're not real tables + if !cte_names.contains(table_name) { + if let Some(table) = schema.get_btree_table(table_name) { + table_map.insert(table_name.to_string(), table.clone()); + qualified_names.insert(table_name.to_string(), qualified_name); + + // Store the alias mapping if there is an alias + if let Some(alias_enum) = alias { + let alias_name = match alias_enum { + ast::As::As(name) | ast::As::Elided(name) => match name { + ast::Name::Ident(s) | ast::Name::Quoted(s) => s, + }, + }; + aliases.insert(alias_name.to_string(), table_name.to_string()); + } + } else { + return Err(LimboError::ParseError(format!( + "Table '{table_name}' not found in schema" + ))); + } + } + Ok(()) + } + + fn extract_one_statement( + select: &ast::OneSelect, + schema: &Schema, + table_map: &mut HashMap>, + aliases: &mut HashMap, + qualified_names: &mut HashMap, + table_conditions: &mut HashMap>>, + cte_names: &HashSet, + ) -> Result<()> { if let ast::OneSelect::Select { from: Some(ref from), .. - } = select.body.select + } = select { // Get the main table from FROM clause if let ast::SelectTable::Table(name, alias, _) = from.select.as_ref() { - let table_name = name.name.as_str(); - - // Build the fully qualified name - let qualified_name = if let Some(ref db) = name.db_name { - format!("{db}.{table_name}") - } else { - table_name.to_string() - }; - - if let Some(table) = schema.get_btree_table(table_name) { - tables.push(table.clone()); - qualified_names.insert(table_name.to_string(), qualified_name); - - // Store the alias mapping if there is an alias - if let Some(alias_name) = alias { - aliases.insert(alias_name.to_string(), table_name.to_string()); - } - } else { - return Err(LimboError::ParseError(format!( - "Table '{table_name}' not found in schema" - ))); - } + Self::process_table_reference( + name, + alias, + schema, + table_map, + aliases, + qualified_names, + cte_names, + )?; } // Get all tables from JOIN clauses for join in &from.joins { if let ast::SelectTable::Table(name, alias, _) = join.table.as_ref() { - let table_name = name.name.as_str(); - - // Build the fully qualified name - let qualified_name = if let Some(ref db) = name.db_name { - format!("{db}.{table_name}") - } else { - table_name.to_string() - }; - - if let Some(table) = schema.get_btree_table(table_name) { - tables.push(table.clone()); - qualified_names.insert(table_name.to_string(), qualified_name); - - // Store the alias mapping if there is an alias - if let Some(alias_name) = alias { - aliases.insert(alias_name.to_string(), table_name.to_string()); - } - } else { - return Err(LimboError::ParseError(format!( - "Table '{table_name}' not found in schema" - ))); - } + Self::process_table_reference( + name, + alias, + schema, + table_map, + aliases, + qualified_names, + cte_names, + )?; } } } + // Extract WHERE conditions for this SELECT + let where_expr = if let ast::OneSelect::Select { + where_clause: Some(ref where_expr), + .. + } = select + { + Some(where_expr.as_ref().clone()) + } else { + None + }; - if tables.is_empty() { - return Err(LimboError::ParseError( - "No tables found in SELECT statement".to_string(), - )); + // Ensure all tables have an entry in table_conditions (even if empty) + for table_name in table_map.keys() { + table_conditions.entry(table_name.clone()).or_default(); } - Ok((tables, aliases, qualified_names)) + // Extract and store table-specific conditions from the WHERE clause + if let Some(ref where_expr) = where_expr { + for table_name in table_map.keys() { + let all_tables: Vec = table_map.keys().cloned().collect(); + let table_specific_condition = Self::extract_conditions_for_table( + where_expr, + table_name, + aliases, + &all_tables, + schema, + ); + // Only add if there's actually a condition for this table + if let Some(condition) = table_specific_condition { + let conditions = table_conditions.get_mut(table_name).unwrap(); + conditions.push(Some(condition)); + } + } + } else { + // No WHERE clause - push None for all tables in this SELECT. It is a way + // of signaling that we need all rows in the table. It is important we signal this + // explicitly, because the same table may appear in many conditions - some of which + // have filters that would otherwise be applied. + for table_name in table_map.keys() { + let conditions = table_conditions.get_mut(table_name).unwrap(); + conditions.push(None); + } + } + + Ok(()) + } + + /// Extract all tables and their aliases from the SELECT statement, handling CTEs + /// Deduplicates tables and accumulates WHERE conditions + fn extract_all_tables( + select: &ast::Select, + schema: &Schema, + tables: &mut Vec>, + aliases: &mut HashMap, + qualified_names: &mut HashMap, + table_conditions: &mut HashMap>>, + ) -> Result<()> { + let mut table_map = HashMap::new(); + Self::extract_all_tables_inner( + select, + schema, + &mut table_map, + aliases, + qualified_names, + table_conditions, + &HashSet::new(), + )?; + + // Convert deduplicated table map to vector + for (_name, table) in table_map { + tables.push(table); + } + + Ok(()) + } + + fn extract_all_tables_inner( + select: &ast::Select, + schema: &Schema, + table_map: &mut HashMap>, + aliases: &mut HashMap, + qualified_names: &mut HashMap, + table_conditions: &mut HashMap>>, + parent_cte_names: &HashSet, + ) -> Result<()> { + let mut cte_names = parent_cte_names.clone(); + + // First, collect CTE names and process any CTEs (WITH clauses) + if let Some(ref with) = select.with { + // First pass: collect all CTE names (needed for recursive CTEs) + for cte in &with.ctes { + cte_names.insert(cte.tbl_name.as_str().to_string()); + } + + // Second pass: extract tables from each CTE's SELECT statement + for cte in &with.ctes { + // Recursively extract tables from each CTE's SELECT statement + Self::extract_all_tables_inner( + &cte.select, + schema, + table_map, + aliases, + qualified_names, + table_conditions, + &cte_names, + )?; + } + } + + // Then process the main SELECT body + Self::extract_one_statement( + &select.body.select, + schema, + table_map, + aliases, + qualified_names, + table_conditions, + &cte_names, + )?; + + // Process any compound selects (UNION, etc.) + for c in &select.body.compounds { + let ast::CompoundSelect { select, .. } = c; + Self::extract_one_statement( + select, + schema, + table_map, + aliases, + qualified_names, + table_conditions, + &cte_names, + )?; + } + + Ok(()) } /// Generate SQL queries for populating the view from each source table /// Returns a vector of SQL statements, one for each referenced table - /// Each query includes only the WHERE conditions relevant to that specific table + /// Each query includes the WHERE conditions accumulated from all occurrences fn sql_for_populate(&self) -> crate::Result> { - if self.referenced_tables.is_empty() { + Self::generate_populate_queries( + &self.select_stmt, + &self.referenced_tables, + &self.table_aliases, + &self.qualified_table_names, + &self.table_conditions, + ) + } + + pub fn generate_populate_queries( + select_stmt: &ast::Select, + referenced_tables: &[Arc], + table_aliases: &HashMap, + qualified_table_names: &HashMap, + table_conditions: &HashMap>>, + ) -> crate::Result> { + if referenced_tables.is_empty() { return Err(LimboError::ParseError( "No tables to populate from".to_string(), )); @@ -503,12 +670,11 @@ impl IncrementalView { let mut queries = Vec::new(); - for table in &self.referenced_tables { + for table in referenced_tables { // Check if the table has a rowid alias (INTEGER PRIMARY KEY column) let has_rowid_alias = table.columns.iter().any(|col| col.is_rowid_alias); - // For now, select all columns since we don't have the static operators - // The circuit will handle filtering and projection + // Select all columns. The circuit will handle filtering and projection // If there's a rowid alias, we don't need to select rowid separately let select_clause = if has_rowid_alias { "*".to_string() @@ -516,12 +682,22 @@ impl IncrementalView { "*, rowid".to_string() }; - // Extract WHERE conditions for this specific table - let where_clause = self.extract_where_clause_for_table(&table.name)?; + // Get accumulated WHERE conditions for this table + let where_clause = if let Some(conditions) = table_conditions.get(&table.name) { + // Combine multiple conditions with OR if there are multiple occurrences + Self::combine_conditions( + select_stmt, + conditions, + &table.name, + referenced_tables, + table_aliases, + )? + } else { + String::new() + }; // Use the qualified table name if available, otherwise just the table name - let table_name = self - .qualified_table_names + let table_name = qualified_table_names .get(&table.name) .cloned() .unwrap_or_else(|| table.name.clone()); @@ -532,347 +708,405 @@ impl IncrementalView { } else { format!("SELECT {select_clause} FROM {table_name} WHERE {where_clause}") }; + tracing::debug!("populating materialized view with `{query}`"); queries.push(query); } Ok(queries) } - /// Extract WHERE conditions that apply to a specific table - /// This analyzes the WHERE clause in the SELECT statement and returns - /// only the conditions that reference the given table - fn extract_where_clause_for_table(&self, table_name: &str) -> crate::Result { - // For single table queries, return the entire WHERE clause (already unqualified) - if self.referenced_tables.len() == 1 { - if let ast::OneSelect::Select { - where_clause: Some(ref where_expr), - .. - } = self.select_stmt.body.select - { - // For single table, the expression should already be unqualified or qualified with the single table - // We need to unqualify it for the single-table query - let unqualified = self.unqualify_expression(where_expr, table_name); - return Ok(unqualified.to_string()); - } + fn combine_conditions( + _select_stmt: &ast::Select, + conditions: &[Option], + table_name: &str, + _referenced_tables: &[Arc], + table_aliases: &HashMap, + ) -> crate::Result { + // Check if any conditions are None (SELECTs without WHERE) + let has_none = conditions.iter().any(|c| c.is_none()); + let non_empty: Vec<_> = conditions.iter().filter_map(|c| c.as_ref()).collect(); + + // If we have both Some and None conditions, that means in some of the expressions where + // this table appear we want all rows. So we need to fetch all rows. + if has_none && !non_empty.is_empty() { return Ok(String::new()); } - // For multi-table queries (JOINs), extract conditions for the specific table - if let ast::OneSelect::Select { - where_clause: Some(ref where_expr), - .. - } = self.select_stmt.body.select - { - // Extract conditions that reference only the specified table - let table_conditions = self.extract_table_conditions(where_expr, table_name)?; - if let Some(conditions) = table_conditions { - // Unqualify the expression for single-table query - let unqualified = self.unqualify_expression(&conditions, table_name); - return Ok(unqualified.to_string()); - } + if non_empty.is_empty() { + return Ok(String::new()); } - Ok(String::new()) + if non_empty.len() == 1 { + // Unqualify the expression before converting to string + let unqualified = Self::unqualify_expression(non_empty[0], table_name, table_aliases); + return Ok(unqualified.to_string()); + } + + // Multiple conditions - combine with OR + // This happens in UNION ALL when the same table appears multiple times + let mut combined_parts = Vec::new(); + for condition in non_empty { + let unqualified = Self::unqualify_expression(condition, table_name, table_aliases); + // Wrap each condition in parentheses to preserve precedence + combined_parts.push(format!("({unqualified})")); + } + + // Join all conditions with OR + Ok(combined_parts.join(" OR ")) + } + /// Resolve a table alias to the actual table name + /// Check if an expression is a simple comparison that can be safely extracted + /// This excludes subqueries, CASE expressions, function calls, etc. + fn is_simple_comparison(expr: &ast::Expr) -> bool { + match expr { + // Simple column references and literals are OK + ast::Expr::Column { .. } | ast::Expr::Literal(_) => true, + + // Simple binary operations between simple expressions are OK + ast::Expr::Binary(left, op, right) => { + match op { + // Logical operators + ast::Operator::And | ast::Operator::Or => { + Self::is_simple_comparison(left) && Self::is_simple_comparison(right) + } + // Comparison operators + ast::Operator::Equals + | ast::Operator::NotEquals + | ast::Operator::Less + | ast::Operator::LessEquals + | ast::Operator::Greater + | ast::Operator::GreaterEquals + | ast::Operator::Is + | ast::Operator::IsNot => { + Self::is_simple_comparison(left) && Self::is_simple_comparison(right) + } + // String concatenation and other operations are NOT simple + ast::Operator::Concat => false, + // Arithmetic might be OK if operands are simple + ast::Operator::Add + | ast::Operator::Subtract + | ast::Operator::Multiply + | ast::Operator::Divide + | ast::Operator::Modulus => { + Self::is_simple_comparison(left) && Self::is_simple_comparison(right) + } + _ => false, + } + } + + // Unary operations might be OK + ast::Expr::Unary( + ast::UnaryOperator::Not + | ast::UnaryOperator::Negative + | ast::UnaryOperator::Positive, + inner, + ) => Self::is_simple_comparison(inner), + ast::Expr::Unary(_, _) => false, + + // Complex expressions are NOT simple + ast::Expr::Case { .. } => false, + ast::Expr::Cast { .. } => false, + ast::Expr::Collate { .. } => false, + ast::Expr::Exists(_) => false, + ast::Expr::FunctionCall { .. } => false, + ast::Expr::InList { .. } => false, + ast::Expr::InSelect { .. } => false, + ast::Expr::Like { .. } => false, + ast::Expr::NotNull(_) => true, // IS NOT NULL is simple enough + ast::Expr::Parenthesized(exprs) => { + // Parenthesized expression can contain multiple expressions + // Only consider it simple if it has exactly one simple expression + exprs.len() == 1 && Self::is_simple_comparison(&exprs[0]) + } + ast::Expr::Subquery(_) => false, + + // BETWEEN might be OK if all operands are simple + ast::Expr::Between { .. } => { + // BETWEEN has a different structure, for safety just exclude it + false + } + + // Qualified references are simple + ast::Expr::DoublyQualified(..) => true, + ast::Expr::Qualified(_, _) => true, + + // These are simple + ast::Expr::Id(_) => true, + ast::Expr::Name(_) => true, + + // Anything else is not simple + _ => false, + } } - /// Extract conditions from an expression that reference only the specified table - fn extract_table_conditions( - &self, + /// Extract conditions from a WHERE clause that apply to a specific table + fn extract_conditions_for_table( expr: &ast::Expr, table_name: &str, - ) -> crate::Result> { + aliases: &HashMap, + all_tables: &[String], + schema: &Schema, + ) -> Option { match expr { ast::Expr::Binary(left, op, right) => { match op { ast::Operator::And => { // For AND, we can extract conditions independently - let left_cond = self.extract_table_conditions(left, table_name)?; - let right_cond = self.extract_table_conditions(right, table_name)?; + let left_cond = Self::extract_conditions_for_table( + left, table_name, aliases, all_tables, schema, + ); + let right_cond = Self::extract_conditions_for_table( + right, table_name, aliases, all_tables, schema, + ); match (left_cond, right_cond) { - (Some(l), Some(r)) => { - // Both conditions apply to this table - Ok(Some(ast::Expr::Binary( - Box::new(l), - ast::Operator::And, - Box::new(r), - ))) - } - (Some(l), None) => Ok(Some(l)), - (None, Some(r)) => Ok(Some(r)), - (None, None) => Ok(None), + (Some(l), Some(r)) => Some(ast::Expr::Binary( + Box::new(l), + ast::Operator::And, + Box::new(r), + )), + (Some(l), None) => Some(l), + (None, Some(r)) => Some(r), + (None, None) => None, } } ast::Operator::Or => { - // For OR, both sides must reference the same table(s) - // If either side references multiple tables, we can't extract it - let left_tables = self.get_referenced_tables_in_expr(left)?; - let right_tables = self.get_referenced_tables_in_expr(right)?; + // For OR, both sides must reference only our table + let left_tables = + Self::get_tables_in_expr(left, aliases, all_tables, schema); + let right_tables = + Self::get_tables_in_expr(right, aliases, all_tables, schema); - // If both sides only reference our table, include the whole OR if left_tables.len() == 1 && left_tables.contains(&table_name.to_string()) && right_tables.len() == 1 && right_tables.contains(&table_name.to_string()) + && Self::is_simple_comparison(expr) { - Ok(Some(expr.clone())) + Some(expr.clone()) } else { - // OR condition involves multiple tables, can't extract - Ok(None) + None } } _ => { - // For comparison operators, check if this condition references only our table - // AND is simple enough to be pushed down (no complex expressions) - let referenced_tables = self.get_referenced_tables_in_expr(expr)?; + // For comparison operators, check if this condition only references our table + let referenced_tables = + Self::get_tables_in_expr(expr, aliases, all_tables, schema); if referenced_tables.len() == 1 && referenced_tables.contains(&table_name.to_string()) + && Self::is_simple_comparison(expr) { - // Check if this is a simple comparison that can be pushed down - // Complex expressions like (a * b) >= c should be handled by the circuit - if self.is_simple_comparison(expr) { - Ok(Some(expr.clone())) - } else { - // Complex expression - let the circuit handle it - Ok(None) - } + Some(expr.clone()) } else { - Ok(None) + None } } } } - ast::Expr::Parenthesized(exprs) => { - if exprs.len() == 1 { - self.extract_table_conditions(&exprs[0], table_name) - } else { - Ok(None) - } - } _ => { - // For other expressions, check if they reference only our table - // AND are simple enough to be pushed down - let referenced_tables = self.get_referenced_tables_in_expr(expr)?; + // For other expressions, check if they only reference our table + let referenced_tables = Self::get_tables_in_expr(expr, aliases, all_tables, schema); if referenced_tables.len() == 1 && referenced_tables.contains(&table_name.to_string()) - && self.is_simple_comparison(expr) + && Self::is_simple_comparison(expr) { - Ok(Some(expr.clone())) + Some(expr.clone()) } else { - Ok(None) + None } } } } - /// Check if an expression is a simple comparison that can be pushed down to table scan - /// Returns true for simple comparisons like "column = value" or "column > value" - /// Returns false for complex expressions like "(a * b) > value" - fn is_simple_comparison(&self, expr: &ast::Expr) -> bool { - match expr { - ast::Expr::Binary(left, op, right) => { - // Check if it's a comparison operator - matches!( - op, - ast::Operator::Equals - | ast::Operator::NotEquals - | ast::Operator::Greater - | ast::Operator::GreaterEquals - | ast::Operator::Less - | ast::Operator::LessEquals - ) && self.is_simple_operand(left) - && self.is_simple_operand(right) - } - _ => false, - } - } - - /// Check if an operand is simple (column reference or literal) - fn is_simple_operand(&self, expr: &ast::Expr) -> bool { - matches!( - expr, - ast::Expr::Id(_) - | ast::Expr::Qualified(_, _) - | ast::Expr::DoublyQualified(_, _, _) - | ast::Expr::Literal(_) - ) - } - - /// Get the set of table names referenced in an expression - fn get_referenced_tables_in_expr(&self, expr: &ast::Expr) -> crate::Result> { - let mut tables = Vec::new(); - self.collect_referenced_tables(expr, &mut tables)?; - // Deduplicate - tables.sort(); - tables.dedup(); - Ok(tables) - } - - /// Recursively collect table references from an expression - fn collect_referenced_tables( - &self, + /// Unqualify column references in an expression + /// Removes table/alias prefixes from qualified column names + fn unqualify_expression( expr: &ast::Expr, - tables: &mut Vec, - ) -> crate::Result<()> { + table_name: &str, + aliases: &HashMap, + ) -> ast::Expr { match expr { - ast::Expr::Binary(left, _, right) => { - self.collect_referenced_tables(left, tables)?; - self.collect_referenced_tables(right, tables)?; - } - ast::Expr::Qualified(table, _) => { - // This is a qualified column reference (table.column or alias.column) - // We need to resolve aliases to actual table names - let actual_table = self.resolve_table_alias(table.as_str()); - tables.push(actual_table); - } - ast::Expr::Id(column) => { - // Unqualified column reference - if self.referenced_tables.len() > 1 { - // In a JOIN context, check which tables have this column - let mut tables_with_column = Vec::new(); - for table in &self.referenced_tables { - if table - .columns - .iter() - .any(|c| c.name.as_ref() == Some(&column.to_string())) - { - tables_with_column.push(table.name.clone()); - } - } - - if tables_with_column.len() > 1 { - // Ambiguous column - this should have been caught earlier - // Return error to be safe - return Err(crate::LimboError::ParseError(format!( - "Ambiguous column name '{}' in WHERE clause - exists in tables: {}", - column, - tables_with_column.join(", ") - ))); - } else if tables_with_column.len() == 1 { - // Unambiguous - only one table has this column - // This is allowed by SQLite - tables.push(tables_with_column[0].clone()); - } else { - // Column doesn't exist in any table - this is an error - // but should be caught during compilation - return Err(crate::LimboError::ParseError(format!( - "Column '{column}' not found in any table" - ))); - } - } else { - // Single table context - unqualified columns belong to that table - if let Some(table) = self.referenced_tables.first() { - tables.push(table.name.clone()); - } - } - } - ast::Expr::DoublyQualified(_database, table, _column) => { - // For database.table.column, resolve the table name - let table_str = table.as_str(); - let actual_table = self.resolve_table_alias(table_str); - tables.push(actual_table); - } - ast::Expr::Parenthesized(exprs) => { - for e in exprs { - self.collect_referenced_tables(e, tables)?; - } - } - _ => { - // Literals and other expressions don't reference tables - } - } - Ok(()) - } - - /// Convert a qualified expression to unqualified for single-table queries - /// This removes table prefixes from column references since they're not needed - /// when querying a single table - fn unqualify_expression(&self, expr: &ast::Expr, table_name: &str) -> ast::Expr { - match expr { - ast::Expr::Binary(left, op, right) => { - // Recursively unqualify both sides - ast::Expr::Binary( - Box::new(self.unqualify_expression(left, table_name)), - *op, - Box::new(self.unqualify_expression(right, table_name)), - ) - } - ast::Expr::Qualified(table, column) => { - // Convert qualified column to unqualified if it's for our table - // Handle both "table.column" and "database.table.column" cases - let table_str = table.as_str(); - - // Check if this is a database.table reference - let actual_table = if table_str.contains('.') { - // Split on '.' and take the last part as the table name + ast::Expr::Binary(left, op, right) => ast::Expr::Binary( + Box::new(Self::unqualify_expression(left, table_name, aliases)), + *op, + Box::new(Self::unqualify_expression(right, table_name, aliases)), + ), + ast::Expr::Qualified(table_or_alias, column) => { + // Check if this qualification refers to our table + let table_str = table_or_alias.as_str(); + let actual_table = if let Some(actual) = aliases.get(table_str) { + actual.clone() + } else if table_str.contains('.') { + // Handle database.table format table_str .split('.') .next_back() .unwrap_or(table_str) .to_string() } else { - // Could be an alias or direct table name - self.resolve_table_alias(table_str) + table_str.to_string() }; if actual_table == table_name { - // Just return the column name without qualification + // Remove the qualification ast::Expr::Id(column.clone()) } else { - // This shouldn't happen if extract_table_conditions worked correctly - // but keep it qualified just in case + // Keep the qualification (shouldn't happen if extraction worked correctly) expr.clone() } } ast::Expr::DoublyQualified(_database, table, column) => { - // This is database.table.column format - // Check if the table matches our target table - let table_str = table.as_str(); - let actual_table = self.resolve_table_alias(table_str); - - if actual_table == table_name { - // Just return the column name without qualification + // Check if this refers to our table + if table.as_str() == table_name { + // Remove the qualification, keep just the column ast::Expr::Id(column.clone()) } else { - // Keep it qualified if it's for a different table + // Keep the qualification (shouldn't happen if extraction worked correctly) expr.clone() } } - ast::Expr::Parenthesized(exprs) => { - // Recursively unqualify expressions in parentheses - let unqualified_exprs: Vec> = exprs + ast::Expr::Unary(op, inner) => ast::Expr::Unary( + *op, + Box::new(Self::unqualify_expression(inner, table_name, aliases)), + ), + ast::Expr::FunctionCall { + name, + args, + distinctness, + filter_over, + order_by, + } => ast::Expr::FunctionCall { + name: name.clone(), + args: args .iter() - .map(|e| Box::new(self.unqualify_expression(e, table_name))) - .collect(); - ast::Expr::Parenthesized(unqualified_exprs) + .map(|arg| Box::new(Self::unqualify_expression(arg, table_name, aliases))) + .collect(), + distinctness: *distinctness, + filter_over: filter_over.clone(), + order_by: order_by.clone(), + }, + ast::Expr::InList { lhs, not, rhs } => ast::Expr::InList { + lhs: Box::new(Self::unqualify_expression(lhs, table_name, aliases)), + not: *not, + rhs: rhs + .iter() + .map(|item| Box::new(Self::unqualify_expression(item, table_name, aliases))) + .collect(), + }, + ast::Expr::Between { + lhs, + not, + start, + end, + } => ast::Expr::Between { + lhs: Box::new(Self::unqualify_expression(lhs, table_name, aliases)), + not: *not, + start: Box::new(Self::unqualify_expression(start, table_name, aliases)), + end: Box::new(Self::unqualify_expression(end, table_name, aliases)), + }, + _ => expr.clone(), + } + } + + /// Get all tables referenced in an expression + fn get_tables_in_expr( + expr: &ast::Expr, + aliases: &HashMap, + all_tables: &[String], + schema: &Schema, + ) -> Vec { + let mut tables = Vec::new(); + Self::collect_tables_in_expr(expr, aliases, all_tables, schema, &mut tables); + tables.sort(); + tables.dedup(); + tables + } + + /// Recursively collect table references from an expression + fn collect_tables_in_expr( + expr: &ast::Expr, + aliases: &HashMap, + all_tables: &[String], + schema: &Schema, + tables: &mut Vec, + ) { + match expr { + ast::Expr::Binary(left, _, right) => { + Self::collect_tables_in_expr(left, aliases, all_tables, schema, tables); + Self::collect_tables_in_expr(right, aliases, all_tables, schema, tables); + } + ast::Expr::Qualified(table_or_alias, _) => { + // Handle database.table or just table/alias + let table_str = table_or_alias.as_str(); + let table_name = if let Some(actual_table) = aliases.get(table_str) { + // It's an alias + actual_table.clone() + } else if table_str.contains('.') { + // It might be database.table format, extract just the table name + table_str + .split('.') + .next_back() + .unwrap_or(table_str) + .to_string() + } else { + // It's a direct table name + table_str.to_string() + }; + tables.push(table_name); + } + ast::Expr::DoublyQualified(_database, table, _column) => { + // For database.table.column, extract the table name + tables.push(table.to_string()); + } + ast::Expr::Id(column) => { + // Unqualified column - try to find which table has this column + if all_tables.len() == 1 { + tables.push(all_tables[0].clone()); + } else { + // Check which table has this column + for table_name in all_tables { + if let Some(table) = schema.get_btree_table(table_name) { + if table + .columns + .iter() + .any(|col| col.name.as_deref() == Some(column.as_str())) + { + tables.push(table_name.clone()); + break; // Found the table, stop looking + } + } + } + } + } + ast::Expr::FunctionCall { args, .. } => { + for arg in args { + Self::collect_tables_in_expr(arg, aliases, all_tables, schema, tables); + } + } + ast::Expr::InList { lhs, rhs, .. } => { + Self::collect_tables_in_expr(lhs, aliases, all_tables, schema, tables); + for item in rhs { + Self::collect_tables_in_expr(item, aliases, all_tables, schema, tables); + } + } + ast::Expr::InSelect { lhs, .. } => { + Self::collect_tables_in_expr(lhs, aliases, all_tables, schema, tables); + } + ast::Expr::Between { + lhs, start, end, .. + } => { + Self::collect_tables_in_expr(lhs, aliases, all_tables, schema, tables); + Self::collect_tables_in_expr(start, aliases, all_tables, schema, tables); + Self::collect_tables_in_expr(end, aliases, all_tables, schema, tables); + } + ast::Expr::Unary(_, expr) => { + Self::collect_tables_in_expr(expr, aliases, all_tables, schema, tables); } _ => { - // Other expression types (literals, unqualified columns, etc.) stay as-is - expr.clone() + // Literals, etc. don't reference tables } } } - - /// Resolve a table alias to the actual table name - fn resolve_table_alias(&self, alias: &str) -> String { - // Check if there's an alias mapping in the FROM/JOIN clauses - // For now, we'll do a simple check - if the alias matches a table name, use it - // Otherwise, try to find it in the FROM clause - - // First check if it's an actual table name - if self.referenced_tables.iter().any(|t| t.name == alias) { - return alias.to_string(); - } - - // Check if it's an alias that maps to a table - if let Some(table_name) = self.table_aliases.get(alias) { - return table_name.clone(); - } - - // If we can't resolve it, return as-is (it might be a table name we don't know about) - alias.to_string() - } - /// Populate the view by scanning the source table using a state machine /// This can be called multiple times and will resume from where it left off /// This method is only for materialized views and will persist data to the btree @@ -1342,17 +1576,58 @@ mod tests { } } + // Type alias for the complex return type of extract_all_tables + type ExtractedTableInfo = ( + Vec>, + HashMap, + HashMap, + HashMap>>, + ); + + fn extract_all_tables(select: &ast::Select, schema: &Schema) -> Result { + let mut referenced_tables = Vec::new(); + let mut table_aliases = HashMap::new(); + let mut qualified_table_names = HashMap::new(); + let mut table_conditions = HashMap::new(); + IncrementalView::extract_all_tables( + select, + schema, + &mut referenced_tables, + &mut table_aliases, + &mut qualified_table_names, + &mut table_conditions, + )?; + Ok(( + referenced_tables, + table_aliases, + qualified_table_names, + table_conditions, + )) + } + #[test] fn test_extract_single_table() { let schema = create_test_schema(); let select = parse_select("SELECT * FROM customers"); - let (tables, _, _) = IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, _, _, _table_conditions) = extract_all_tables(&select, &schema).unwrap(); assert_eq!(tables.len(), 1); assert_eq!(tables[0].name, "customers"); } + #[test] + fn test_tables_from_union() { + let schema = create_test_schema(); + let select = parse_select("SELECT name FROM customers union SELECT name from products"); + + let (tables, _, _, table_conditions) = extract_all_tables(&select, &schema).unwrap(); + + assert_eq!(tables.len(), 2); + assert!(table_conditions.contains_key("customers")); + assert!(table_conditions.contains_key("products")); + } + #[test] fn test_extract_tables_from_inner_join() { let schema = create_test_schema(); @@ -1360,11 +1635,11 @@ mod tests { "SELECT * FROM customers INNER JOIN orders ON customers.id = orders.customer_id", ); - let (tables, _, _) = IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, _, _, table_conditions) = extract_all_tables(&select, &schema).unwrap(); assert_eq!(tables.len(), 2); - assert_eq!(tables[0].name, "customers"); - assert_eq!(tables[1].name, "orders"); + assert!(table_conditions.contains_key("customers")); + assert!(table_conditions.contains_key("orders")); } #[test] @@ -1376,12 +1651,12 @@ mod tests { INNER JOIN products ON orders.id = products.id", ); - let (tables, _, _) = IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, _, _, table_conditions) = extract_all_tables(&select, &schema).unwrap(); assert_eq!(tables.len(), 3); - assert_eq!(tables[0].name, "customers"); - assert_eq!(tables[1].name, "orders"); - assert_eq!(tables[2].name, "products"); + assert!(table_conditions.contains_key("customers")); + assert!(table_conditions.contains_key("orders")); + assert!(table_conditions.contains_key("products")); } #[test] @@ -1391,11 +1666,11 @@ mod tests { "SELECT * FROM customers LEFT JOIN orders ON customers.id = orders.customer_id", ); - let (tables, _, _) = IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, _, _, table_conditions) = extract_all_tables(&select, &schema).unwrap(); assert_eq!(tables.len(), 2); - assert_eq!(tables[0].name, "customers"); - assert_eq!(tables[1].name, "orders"); + assert!(table_conditions.contains_key("customers")); + assert!(table_conditions.contains_key("orders")); } #[test] @@ -1403,11 +1678,11 @@ mod tests { let schema = create_test_schema(); let select = parse_select("SELECT * FROM customers CROSS JOIN orders"); - let (tables, _, _) = IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, _, _, table_conditions) = extract_all_tables(&select, &schema).unwrap(); assert_eq!(tables.len(), 2); - assert_eq!(tables[0].name, "customers"); - assert_eq!(tables[1].name, "orders"); + assert!(table_conditions.contains_key("customers")); + assert!(table_conditions.contains_key("orders")); } #[test] @@ -1416,12 +1691,17 @@ mod tests { let select = parse_select("SELECT * FROM customers c INNER JOIN orders o ON c.id = o.customer_id"); - let (tables, _, _) = IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, _, _table_conditions) = extract_all_tables(&select, &schema).unwrap(); // Should still extract the actual table names, not aliases assert_eq!(tables.len(), 2); - assert_eq!(tables[0].name, "customers"); - assert_eq!(tables[1].name, "orders"); + let table_names: Vec<&str> = tables.iter().map(|t| t.name.as_str()).collect(); + assert!(table_names.contains(&"customers")); + assert!(table_names.contains(&"orders")); + + // Check that aliases are correctly mapped + assert_eq!(aliases.get("c"), Some(&"customers".to_string())); + assert_eq!(aliases.get("o"), Some(&"orders".to_string())); } #[test] @@ -1429,8 +1709,7 @@ mod tests { let schema = create_test_schema(); let select = parse_select("SELECT * FROM nonexistent"); - let result = - IncrementalView::extract_all_tables(&select, &schema).map(|(tables, _, _)| tables); + let result = extract_all_tables(&select, &schema).map(|(tables, _, _, _)| tables); assert!(result.is_err()); assert!(result @@ -1446,8 +1725,7 @@ mod tests { "SELECT * FROM customers INNER JOIN nonexistent ON customers.id = nonexistent.id", ); - let result = - IncrementalView::extract_all_tables(&select, &schema).map(|(tables, _, _)| tables); + let result = extract_all_tables(&select, &schema).map(|(tables, _, _, _)| tables); assert!(result.is_err()); assert!(result @@ -1462,14 +1740,15 @@ mod tests { let schema = create_test_schema(); let select = parse_select("SELECT * FROM customers"); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); let view = IncrementalView::new( "test_view".to_string(), select.clone(), tables, aliases, qualified_names, + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1491,14 +1770,15 @@ mod tests { let schema = create_test_schema(); let select = parse_select("SELECT * FROM customers WHERE id > 10"); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); let view = IncrementalView::new( "test_view".to_string(), select.clone(), tables, aliases, qualified_names, + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1524,14 +1804,15 @@ mod tests { WHERE c.id > 10 AND o.total > 100", ); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); let view = IncrementalView::new( "test_view".to_string(), select.clone(), tables, aliases, qualified_names, + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1547,8 +1828,12 @@ mod tests { // With per-table WHERE extraction: // - customers table gets: c.id > 10 // - orders table gets: o.total > 100 - assert_eq!(queries[0], "SELECT * FROM customers WHERE id > 10"); - assert_eq!(queries[1], "SELECT * FROM orders WHERE total > 100"); + assert!(queries + .iter() + .any(|q| q == "SELECT * FROM customers WHERE id > 10")); + assert!(queries + .iter() + .any(|q| q == "SELECT * FROM orders WHERE total > 100")); } #[test] @@ -1562,14 +1847,15 @@ mod tests { AND o.customer_id = 5 AND (c.id = 15 OR o.total = 200)", ); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); let view = IncrementalView::new( "test_view".to_string(), select.clone(), tables, aliases, qualified_names, + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1587,152 +1873,27 @@ mod tests { // - orders gets: o.total > 100 AND o.customer_id = 5 // Note: The OR condition (c.id = 15 OR o.total = 200) involves both tables, // so it cannot be extracted to either table individually - assert_eq!( - queries[0], - "SELECT * FROM customers WHERE id > 10 AND name = 'John'" - ); - assert_eq!( - queries[1], - "SELECT * FROM orders WHERE total > 100 AND customer_id = 5" - ); - } - - #[test] - fn test_where_extraction_for_three_tables() { - // Test that WHERE clause extraction correctly separates conditions for 3+ tables - // This addresses the concern about conditions "piling up" as joins increase - - // Simulate a three-table scenario - let schema = create_test_schema(); - - // Parse a WHERE clause with conditions for three different tables - let select = parse_select( - "SELECT * FROM customers WHERE c.id > 10 AND o.total > 100 AND p.price > 50", - ); - - // Get the WHERE expression - if let ast::OneSelect::Select { - where_clause: Some(ref where_expr), - .. - } = select.body.select - { - // Create a view with three tables to test extraction - let tables = vec![ - schema.get_btree_table("customers").unwrap(), - schema.get_btree_table("orders").unwrap(), - schema.get_btree_table("products").unwrap(), - ]; - - let mut aliases = HashMap::new(); - aliases.insert("c".to_string(), "customers".to_string()); - aliases.insert("o".to_string(), "orders".to_string()); - aliases.insert("p".to_string(), "products".to_string()); - - // Create a minimal view just to test extraction logic - let view = IncrementalView { - name: "test".to_string(), - select_stmt: select.clone(), - circuit: DbspCircuit::new(1, 2, 3), - referenced_tables: tables, - table_aliases: aliases, - qualified_table_names: HashMap::new(), - column_schema: ViewColumnSchema { - columns: vec![], - tables: vec![], - }, - populate_state: PopulateState::Start, - tracker: Arc::new(Mutex::new(ComputationTracker::new())), - root_page: 0, - }; - - // Test extraction for each table - let customers_conds = view - .extract_table_conditions(where_expr, "customers") - .unwrap(); - let orders_conds = view.extract_table_conditions(where_expr, "orders").unwrap(); - let products_conds = view - .extract_table_conditions(where_expr, "products") - .unwrap(); - - // Verify each table only gets its conditions - if let Some(cond) = customers_conds { - let sql = cond.to_string(); - assert!(sql.contains("id > 10")); - assert!(!sql.contains("total")); - assert!(!sql.contains("price")); - } - - if let Some(cond) = orders_conds { - let sql = cond.to_string(); - assert!(sql.contains("total > 100")); - assert!(!sql.contains("id > 10")); // From customers - assert!(!sql.contains("price")); - } - - if let Some(cond) = products_conds { - let sql = cond.to_string(); - assert!(sql.contains("price > 50")); - assert!(!sql.contains("id > 10")); // From customers - assert!(!sql.contains("total")); - } - } else { - panic!("Failed to parse WHERE clause"); - } - } - - #[test] - fn test_alias_resolution_works_correctly() { - // Test that alias resolution properly maps aliases to table names - let schema = create_test_schema(); - let select = parse_select( - "SELECT * FROM customers c \ - JOIN orders o ON c.id = o.customer_id \ - WHERE c.id > 10 AND o.total > 100", - ); - - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); - let view = IncrementalView::new( - "test_view".to_string(), - select.clone(), - tables, - aliases, - qualified_names, - extract_view_columns(&select, &schema).unwrap(), - &schema, - 1, // main_data_root - 2, // internal_state_root - 3, // internal_state_index_root - ) - .unwrap(); - - // Verify that alias mappings were extracted correctly - assert_eq!(view.table_aliases.get("c"), Some(&"customers".to_string())); - assert_eq!(view.table_aliases.get("o"), Some(&"orders".to_string())); - - // Verify that SQL generation uses the aliases correctly - let queries = view.sql_for_populate().unwrap(); - assert_eq!(queries.len(), 2); - - // Each query should use the actual table name, not the alias - assert!(queries[0].contains("FROM customers") || queries[1].contains("FROM customers")); - assert!(queries[0].contains("FROM orders") || queries[1].contains("FROM orders")); + // Check both queries exist (order doesn't matter) + assert!(queries + .contains(&"SELECT * FROM customers WHERE id > 10 AND name = 'John'".to_string())); + assert!(queries + .contains(&"SELECT * FROM orders WHERE total > 100 AND customer_id = 5".to_string())); } #[test] fn test_sql_for_populate_table_without_rowid_alias() { - // Test that tables without a rowid alias properly include rowid in SELECT let schema = create_test_schema(); let select = parse_select("SELECT * FROM logs WHERE level > 2"); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); let view = IncrementalView::new( "test_view".to_string(), select.clone(), tables, aliases, qualified_names, + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1758,14 +1919,15 @@ mod tests { WHERE c.id > 10 AND l.level > 2", ); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); let view = IncrementalView::new( "test_view".to_string(), select.clone(), tables, aliases, qualified_names, + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1778,8 +1940,8 @@ mod tests { assert_eq!(queries.len(), 2); // customers has rowid alias (id), logs doesn't - assert_eq!(queries[0], "SELECT * FROM customers WHERE id > 10"); - assert_eq!(queries[1], "SELECT *, rowid FROM logs WHERE level > 2"); + assert!(queries.contains(&"SELECT * FROM customers WHERE id > 10".to_string())); + assert!(queries.contains(&"SELECT *, rowid FROM logs WHERE level > 2".to_string())); } #[test] @@ -1792,14 +1954,15 @@ mod tests { // Test with single table using database qualification let select = parse_select("SELECT * FROM main.customers WHERE main.customers.id > 10"); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); let view = IncrementalView::new( "test_view".to_string(), select.clone(), tables, aliases, qualified_names, + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1827,14 +1990,15 @@ mod tests { WHERE main.customers.id > 10 AND main.orders.total > 100", ); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); let view = IncrementalView::new( "test_view".to_string(), select.clone(), tables, aliases, qualified_names, + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1848,8 +2012,93 @@ mod tests { assert_eq!(queries.len(), 2); // The FROM clauses should preserve database qualification, // but WHERE clauses should have unqualified column names - assert_eq!(queries[0], "SELECT * FROM main.customers WHERE id > 10"); - assert_eq!(queries[1], "SELECT * FROM main.orders WHERE total > 100"); + assert!(queries.contains(&"SELECT * FROM main.customers WHERE id > 10".to_string())); + assert!(queries.contains(&"SELECT * FROM main.orders WHERE total > 100".to_string())); + } + + #[test] + fn test_where_extraction_for_three_tables_with_aliases() { + // Test that WHERE clause extraction correctly separates conditions for 3+ tables + // This addresses the concern about conditions "piling up" as joins increase + let schema = create_test_schema(); + let select = parse_select( + "SELECT * FROM customers c + JOIN orders o ON c.id = o.customer_id + JOIN products p ON p.id = o.product_id + WHERE c.id > 10 AND o.total > 100 AND p.price > 50", + ); + + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); + + // Verify we extracted all three tables + assert_eq!(tables.len(), 3); + let table_names: Vec<&str> = tables.iter().map(|t| t.name.as_str()).collect(); + assert!(table_names.contains(&"customers")); + assert!(table_names.contains(&"orders")); + assert!(table_names.contains(&"products")); + + // Verify aliases are correctly mapped + assert_eq!(aliases.get("c"), Some(&"customers".to_string())); + assert_eq!(aliases.get("o"), Some(&"orders".to_string())); + assert_eq!(aliases.get("p"), Some(&"products".to_string())); + + // Generate populate queries to verify each table gets its own conditions + let queries = IncrementalView::generate_populate_queries( + &select, + &tables, + &aliases, + &qualified_names, + &table_conditions, + ) + .unwrap(); + + assert_eq!(queries.len(), 3); + + // Verify the exact queries generated for each table + // The order might vary, so check all possibilities + let expected_queries = vec![ + "SELECT * FROM customers WHERE id > 10", + "SELECT * FROM orders WHERE total > 100", + "SELECT * FROM products WHERE price > 50", + ]; + + for expected in &expected_queries { + assert!( + queries.contains(&expected.to_string()), + "Missing expected query: {expected}. Got: {queries:?}" + ); + } + } + + #[test] + fn test_sql_for_populate_complex_expressions_not_included() { + // Test that complex expressions (subqueries, CASE, string concat) are NOT included in populate queries + let schema = create_test_schema(); + let select = parse_select( + "SELECT * FROM customers + WHERE id > (SELECT MAX(customer_id) FROM orders) + AND name || ' Customer' = 'John Customer' + AND CASE WHEN id > 10 THEN 1 ELSE 0 END = 1 + AND EXISTS (SELECT 1 FROM orders WHERE customer_id = customers.id)", + ); + + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); + + let queries = IncrementalView::generate_populate_queries( + &select, + &tables, + &aliases, + &qualified_names, + &table_conditions, + ) + .unwrap(); + + assert_eq!(queries.len(), 1); + // Since customers table has an INTEGER PRIMARY KEY (id), we should get SELECT * + // without rowid and without WHERE clause (all conditions are complex) + assert_eq!(queries[0], "SELECT * FROM customers"); } #[test] @@ -1862,14 +2111,15 @@ mod tests { WHERE total > 100", // 'total' only exists in orders table ); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); let view = IncrementalView::new( "test_view".to_string(), select.clone(), tables, aliases, qualified_names, + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1883,8 +2133,8 @@ mod tests { assert_eq!(queries.len(), 2); // 'total' is unambiguous (only in orders), so it should be extracted - assert_eq!(queries[0], "SELECT * FROM customers"); - assert_eq!(queries[1], "SELECT * FROM orders WHERE total > 100"); + assert!(queries.contains(&"SELECT * FROM customers".to_string())); + assert!(queries.contains(&"SELECT * FROM orders WHERE total > 100".to_string())); } #[test] @@ -1899,8 +2149,8 @@ mod tests { WHERE c.id > 10", ); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); // Check that qualified names are preserved assert!(qualified_names.contains_key("customers")); @@ -1914,6 +2164,7 @@ mod tests { tables, aliases, qualified_names.clone(), + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1928,8 +2179,8 @@ mod tests { // The FROM clause should contain the database-qualified name // But the WHERE clause should use unqualified column names - assert_eq!(queries[0], "SELECT * FROM main.customers WHERE id > 10"); - assert_eq!(queries[1], "SELECT * FROM main.orders"); + assert!(queries.contains(&"SELECT * FROM main.customers WHERE id > 10".to_string())); + assert!(queries.contains(&"SELECT * FROM main.orders".to_string())); } #[test] @@ -1944,8 +2195,8 @@ mod tests { WHERE c.id > 10 AND o.total < 1000", ); - let (tables, aliases, qualified_names) = - IncrementalView::extract_all_tables(&select, &schema).unwrap(); + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); // Check that qualified names are preserved where specified assert_eq!(qualified_names.get("customers").unwrap(), "main.customers"); @@ -1961,6 +2212,7 @@ mod tests { tables, aliases, qualified_names.clone(), + table_conditions, extract_view_columns(&select, &schema).unwrap(), &schema, 1, // main_data_root @@ -1974,7 +2226,468 @@ mod tests { assert_eq!(queries.len(), 2); // The FROM clause should preserve qualification where specified - assert_eq!(queries[0], "SELECT * FROM main.customers WHERE id > 10"); - assert_eq!(queries[1], "SELECT * FROM orders WHERE total < 1000"); + assert!(queries.contains(&"SELECT * FROM main.customers WHERE id > 10".to_string())); + assert!(queries.contains(&"SELECT * FROM orders WHERE total < 1000".to_string())); + } + + #[test] + fn test_extract_tables_with_simple_cte() { + let schema = create_test_schema(); + let select = parse_select( + "WITH customer_totals AS ( + SELECT c.id, c.name, SUM(o.total) as total_spent + FROM customers c + JOIN orders o ON c.id = o.customer_id + GROUP BY c.id, c.name + ) + SELECT * FROM customer_totals WHERE total_spent > 1000", + ); + + let (tables, aliases, _qualified_names, _table_conditions) = + extract_all_tables(&select, &schema).unwrap(); + + // Check that we found both tables from the CTE + assert_eq!(tables.len(), 2); + let table_names: Vec<&str> = tables.iter().map(|t| t.name.as_str()).collect(); + assert!(table_names.contains(&"customers")); + assert!(table_names.contains(&"orders")); + + // Check aliases from the CTE + assert_eq!(aliases.get("c"), Some(&"customers".to_string())); + assert_eq!(aliases.get("o"), Some(&"orders".to_string())); + } + + #[test] + fn test_extract_tables_with_multiple_ctes() { + let schema = create_test_schema(); + let select = parse_select( + "WITH + high_value_customers AS ( + SELECT id, name + FROM customers + WHERE id IN (SELECT customer_id FROM orders WHERE total > 500) + ), + recent_orders AS ( + SELECT id, customer_id, total + FROM orders + WHERE id > 100 + ) + SELECT hvc.name, ro.total + FROM high_value_customers hvc + JOIN recent_orders ro ON hvc.id = ro.customer_id", + ); + + let (tables, _aliases, _qualified_names, _table_conditions) = + extract_all_tables(&select, &schema).unwrap(); + + // Check that we found both tables from both CTEs + assert_eq!(tables.len(), 2); + let table_names: Vec<&str> = tables.iter().map(|t| t.name.as_str()).collect(); + assert!(table_names.contains(&"customers")); + assert!(table_names.contains(&"orders")); + } + + #[test] + fn test_sql_for_populate_union_mixed_conditions() { + // Test UNION where same table appears with and without WHERE clause + // This should drop ALL conditions to ensure we get all rows + let schema = create_test_schema(); + + let select = parse_select( + "SELECT * FROM customers WHERE id > 10 + UNION ALL + SELECT * FROM customers", + ); + + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); + + let view = IncrementalView::new( + "union_view".to_string(), + select.clone(), + tables, + aliases, + qualified_names, + table_conditions, + extract_view_columns(&select, &schema).unwrap(), + &schema, + 1, // main_data_root + 2, // internal_state_root + 3, // internal_state_index_root + ) + .unwrap(); + + let queries = view.sql_for_populate().unwrap(); + + assert_eq!(queries.len(), 1); + // When the same table appears with and without WHERE conditions in a UNION, + // we must fetch ALL rows (no WHERE clause) because the conditions are incompatible + assert_eq!( + queries[0], "SELECT * FROM customers", + "UNION with mixed conditions (some with WHERE, some without) should fetch ALL rows" + ); + } + + #[test] + fn test_extract_tables_with_nested_cte() { + let schema = create_test_schema(); + let select = parse_select( + "WITH RECURSIVE customer_hierarchy AS ( + SELECT id, name, 0 as level + FROM customers + WHERE id = 1 + UNION ALL + SELECT c.id, c.name, ch.level + 1 + FROM customers c + JOIN orders o ON c.id = o.customer_id + JOIN customer_hierarchy ch ON o.customer_id = ch.id + WHERE ch.level < 3 + ) + SELECT * FROM customer_hierarchy", + ); + + let (tables, _aliases, _qualified_names, _table_conditions) = + extract_all_tables(&select, &schema).unwrap(); + + // Check that we found the tables referenced in the recursive CTE + let table_names: Vec<&str> = tables.iter().map(|t| t.name.as_str()).collect(); + + // We're finding duplicates because "customers" appears twice in the recursive CTE + // Let's deduplicate + let unique_tables: std::collections::HashSet<&str> = table_names.iter().cloned().collect(); + assert_eq!(unique_tables.len(), 2); + assert!(unique_tables.contains("customers")); + assert!(unique_tables.contains("orders")); + } + + #[test] + fn test_extract_tables_with_cte_and_main_query() { + let schema = create_test_schema(); + let select = parse_select( + "WITH customer_stats AS ( + SELECT customer_id, COUNT(*) as order_count + FROM orders + GROUP BY customer_id + ) + SELECT c.name, cs.order_count, p.name as product_name + FROM customers c + JOIN customer_stats cs ON c.id = cs.customer_id + JOIN products p ON p.id = 1", + ); + + let (tables, aliases, _qualified_names, _table_conditions) = + extract_all_tables(&select, &schema).unwrap(); + + // Check that we found tables from both the CTE and the main query + assert_eq!(tables.len(), 3); + let table_names: Vec<&str> = tables.iter().map(|t| t.name.as_str()).collect(); + assert!(table_names.contains(&"customers")); + assert!(table_names.contains(&"orders")); + assert!(table_names.contains(&"products")); + + // Check aliases from main query + assert_eq!(aliases.get("c"), Some(&"customers".to_string())); + assert_eq!(aliases.get("p"), Some(&"products".to_string())); + } + + #[test] + fn test_sql_for_populate_simple_union() { + let schema = create_test_schema(); + let select = parse_select( + "SELECT * FROM orders WHERE total > 1000 + UNION ALL + SELECT * FROM orders WHERE total < 100", + ); + + let (tables, aliases, qualified_names, table_conditions) = + extract_all_tables(&select, &schema).unwrap(); + + // Generate populate queries + let queries = IncrementalView::generate_populate_queries( + &select, + &tables, + &aliases, + &qualified_names, + &table_conditions, + ) + .unwrap(); + + // We should have deduplicated to a single table + assert_eq!(tables.len(), 1, "Should have one unique table"); + assert_eq!(tables[0].name, "orders"); // Single table, order doesn't matter + + // Should have collected two conditions + assert_eq!(table_conditions.get("orders").unwrap().len(), 2); + + // Should combine multiple conditions with OR + assert_eq!(queries.len(), 1); + // Conditions are combined with OR + assert_eq!( + queries[0], + "SELECT * FROM orders WHERE (total > 1000) OR (total < 100)" + ); + } + + #[test] + fn test_sql_for_populate_with_union_and_filters() { + let schema = create_test_schema(); + + // Test UNION with different WHERE conditions on the same table + let select = parse_select( + "SELECT * FROM orders WHERE total > 1000 + UNION ALL + SELECT * FROM orders WHERE total < 100", + ); + + let view = IncrementalView::from_stmt( + ast::QualifiedName { + db_name: None, + name: ast::Name::Ident("test_view".to_string()), + alias: None, + }, + select, + &schema, + 1, + 2, + 3, + ) + .unwrap(); + + let queries = view.sql_for_populate().unwrap(); + + // We deduplicate tables, so we get 1 query for orders + assert_eq!(queries.len(), 1); + + // Multiple conditions on the same table are combined with OR + assert_eq!( + queries[0], + "SELECT * FROM orders WHERE (total > 1000) OR (total < 100)" + ); + } + + #[test] + fn test_sql_for_populate_with_union_mixed_tables() { + let schema = create_test_schema(); + + // Test UNION with different tables + let select = parse_select( + "SELECT id, name FROM customers WHERE id > 10 + UNION ALL + SELECT customer_id as id, 'Order' as name FROM orders WHERE total > 500", + ); + + let view = IncrementalView::from_stmt( + ast::QualifiedName { + db_name: None, + name: ast::Name::Ident("test_view".to_string()), + alias: None, + }, + select, + &schema, + 1, + 2, + 3, + ) + .unwrap(); + + let queries = view.sql_for_populate().unwrap(); + + assert_eq!(queries.len(), 2, "Should have one query per table"); + + // Check that each table gets its appropriate WHERE clause + let customers_query = queries + .iter() + .find(|q| q.contains("FROM customers")) + .unwrap(); + let orders_query = queries.iter().find(|q| q.contains("FROM orders")).unwrap(); + + assert!(customers_query.contains("WHERE id > 10")); + assert!(orders_query.contains("WHERE total > 500")); + } + + #[test] + fn test_sql_for_populate_duplicate_tables_conflicting_filters() { + // This tests what happens when we have duplicate table references with different filters + // We need to manually construct a view to simulate what would happen with CTEs + let schema = create_test_schema(); + + // Get the orders table twice (simulating what would happen with CTEs) + let orders_table = schema.get_btree_table("orders").unwrap(); + + let referenced_tables = vec![orders_table.clone(), orders_table.clone()]; + + // Create a SELECT that would have conflicting WHERE conditions + let select = parse_select( + "SELECT * FROM orders WHERE total > 1000", // This is just for the AST + ); + + let view = IncrementalView::new( + "test_view".to_string(), + select.clone(), + referenced_tables, + HashMap::new(), + HashMap::new(), + HashMap::new(), + extract_view_columns(&select, &schema).unwrap(), + &schema, + 1, + 2, + 3, + ) + .unwrap(); + + let queries = view.sql_for_populate().unwrap(); + + // With duplicates, we should get 2 identical queries + assert_eq!(queries.len(), 2); + + // Both should be the same since they're from the same table reference + assert_eq!(queries[0], queries[1]); + } + + #[test] + fn test_table_extraction_with_nested_ctes_complex_conditions() { + let schema = create_test_schema(); + let select = parse_select( + "WITH + customer_orders AS ( + SELECT c.*, o.total + FROM customers c + JOIN orders o ON c.id = o.customer_id + WHERE c.name LIKE 'A%' AND o.total > 100 + ), + top_customers AS ( + SELECT * FROM customer_orders WHERE total > 500 + ) + SELECT * FROM top_customers", + ); + + // Test table extraction directly without creating a view + let mut tables = Vec::new(); + let mut aliases = HashMap::new(); + let mut qualified_names = HashMap::new(); + let mut table_conditions = HashMap::new(); + + IncrementalView::extract_all_tables( + &select, + &schema, + &mut tables, + &mut aliases, + &mut qualified_names, + &mut table_conditions, + ) + .unwrap(); + + let table_names: Vec<&str> = tables.iter().map(|t| t.name.as_str()).collect(); + + // Should have one reference to each table + assert_eq!(table_names.len(), 2, "Should have 2 table references"); + assert!(table_names.contains(&"customers")); + assert!(table_names.contains(&"orders")); + + // Check aliases + assert_eq!(aliases.get("c"), Some(&"customers".to_string())); + assert_eq!(aliases.get("o"), Some(&"orders".to_string())); + } + + #[test] + fn test_union_all_populate_queries() { + // Test that UNION ALL generates correct populate queries + let schema = create_test_schema(); + + // Create a UNION ALL query that references the same table twice with different WHERE conditions + let sql = " + SELECT id, name FROM customers WHERE id < 5 + UNION ALL + SELECT id, name FROM customers WHERE id > 10 + "; + + let mut parser = Parser::new(sql.as_bytes()); + let cmd = parser.next_cmd().unwrap(); + let select_stmt = match cmd.unwrap() { + turso_parser::ast::Cmd::Stmt(ast::Stmt::Select(select)) => select, + _ => panic!("Expected SELECT statement"), + }; + + // Extract tables and conditions + let (tables, aliases, qualified_names, conditions) = + extract_all_tables(&select_stmt, &schema).unwrap(); + + // Generate populate queries + let queries = IncrementalView::generate_populate_queries( + &select_stmt, + &tables, + &aliases, + &qualified_names, + &conditions, + ) + .unwrap(); + + // Expected query - assuming customers table has INTEGER PRIMARY KEY + // so we don't need to select rowid separately + let expected = "SELECT * FROM customers WHERE (id < 5) OR (id > 10)"; + + assert_eq!( + queries.len(), + 1, + "Should generate exactly 1 query for UNION ALL with same table" + ); + assert_eq!(queries[0], expected, "Query should match expected format"); + } + + #[test] + fn test_union_all_different_tables_populate_queries() { + // Test UNION ALL with different tables + let schema = create_test_schema(); + + let sql = " + SELECT id, name FROM customers WHERE id < 5 + UNION ALL + SELECT id, product_name FROM orders WHERE amount > 100 + "; + + let mut parser = Parser::new(sql.as_bytes()); + let cmd = parser.next_cmd().unwrap(); + let select_stmt = match cmd.unwrap() { + turso_parser::ast::Cmd::Stmt(ast::Stmt::Select(select)) => select, + _ => panic!("Expected SELECT statement"), + }; + + // Extract tables and conditions + let (tables, aliases, qualified_names, conditions) = + extract_all_tables(&select_stmt, &schema).unwrap(); + + // Generate populate queries + let queries = IncrementalView::generate_populate_queries( + &select_stmt, + &tables, + &aliases, + &qualified_names, + &conditions, + ) + .unwrap(); + + // Should generate separate queries for each table + assert_eq!( + queries.len(), + 2, + "Should generate 2 queries for different tables" + ); + + // Check we have queries for both tables + let has_customers = queries.iter().any(|q| q.contains("customers")); + let has_orders = queries.iter().any(|q| q.contains("orders")); + assert!(has_customers, "Should have a query for customers table"); + assert!(has_orders, "Should have a query for orders table"); + + // Verify the customers query has its WHERE clause + let customers_query = queries + .iter() + .find(|q| q.contains("customers")) + .expect("Should have customers query"); + assert!( + customers_query.contains("WHERE"), + "Customers query should have WHERE clause" + ); } } From b419db489a0ca32646d3704b7220a26f9ec18950 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 19 Sep 2025 05:23:10 -0500 Subject: [PATCH 62/78] Implement the DBSP merge operator The Merge operator is a stateless operator that merges two deltas. There are two modes: Distinct, where we merge together values that are the same, and All, where we preserve all values. We use the rowid of the hashable row to guarantee that: In Distinct mode, the rowid is set to 0 in both sides. If they values are the same, they will hash to the same thing. For All, the rowids are different. The merge operator is used for the UNION statement, which is a cornerstone of Recursive CTEs. --- core/incremental/merge_operator.rs | 187 ++++++++++++++++ core/incremental/mod.rs | 1 + core/incremental/operator.rs | 336 +++++++++++++++++++++++++++++ 3 files changed, 524 insertions(+) create mode 100644 core/incremental/merge_operator.rs diff --git a/core/incremental/merge_operator.rs b/core/incremental/merge_operator.rs new file mode 100644 index 000000000..c8547028f --- /dev/null +++ b/core/incremental/merge_operator.rs @@ -0,0 +1,187 @@ +// Merge operator for DBSP - combines two delta streams +// Used in recursive CTEs and UNION operations + +use crate::incremental::dbsp::{Delta, DeltaPair, HashableRow}; +use crate::incremental::operator::{ + ComputationTracker, DbspStateCursors, EvalState, IncrementalOperator, +}; +use crate::types::IOResult; +use crate::Result; +use std::collections::{hash_map::DefaultHasher, HashMap}; +use std::fmt::{self, Display}; +use std::hash::{Hash, Hasher}; +use std::sync::{Arc, Mutex}; + +/// How the merge operator should handle rowids when combining deltas +#[derive(Debug, Clone)] +pub enum UnionMode { + /// For UNION (distinct) - hash values only to merge duplicates + Distinct, + /// For UNION ALL - include source table name in hash to keep duplicates separate + All { + left_table: String, + right_table: String, + }, +} + +/// Merge operator that combines two input deltas into one output delta +/// Handles both recursive CTEs and UNION/UNION ALL operations +#[derive(Debug)] +pub struct MergeOperator { + operator_id: usize, + union_mode: UnionMode, + /// For UNION: tracks seen value hashes with their assigned rowids + /// For UNION ALL: tracks (source_id, original_rowid) -> assigned_rowid mappings + seen_rows: HashMap, // hash -> assigned_rowid + /// Next rowid to assign for new rows + next_rowid: i64, +} + +impl MergeOperator { + /// Create a new merge operator with specified union mode + pub fn new(operator_id: usize, mode: UnionMode) -> Self { + Self { + operator_id, + union_mode: mode, + seen_rows: HashMap::new(), + next_rowid: 1, + } + } + + /// Transform a delta's rowids based on the union mode with state tracking + fn transform_delta(&mut self, delta: Delta, is_left: bool) -> Delta { + match &self.union_mode { + UnionMode::Distinct => { + // For UNION distinct, track seen values and deduplicate + let mut output = Delta::new(); + for (row, weight) in delta.changes { + // Hash only the values (not rowid) for deduplication + let temp_row = HashableRow::new(0, row.values.clone()); + let value_hash = temp_row.cached_hash(); + + // Check if we've seen this value before + let assigned_rowid = + if let Some(&existing_rowid) = self.seen_rows.get(&value_hash) { + // Value already seen - use existing rowid + existing_rowid + } else { + // New value - assign new rowid and remember it + let new_rowid = self.next_rowid; + self.next_rowid += 1; + self.seen_rows.insert(value_hash, new_rowid); + new_rowid + }; + + // Output the row with the assigned rowid + let final_row = HashableRow::new(assigned_rowid, temp_row.values); + output.changes.push((final_row, weight)); + } + output + } + UnionMode::All { + left_table, + right_table, + } => { + // For UNION ALL, maintain consistent rowid mapping per source + let table = if is_left { left_table } else { right_table }; + let mut source_hasher = DefaultHasher::new(); + table.hash(&mut source_hasher); + let source_id = source_hasher.finish(); + + let mut output = Delta::new(); + for (row, weight) in delta.changes { + // Create a unique key for this (source, rowid) pair + let mut key_hasher = DefaultHasher::new(); + source_id.hash(&mut key_hasher); + row.rowid.hash(&mut key_hasher); + let key_hash = key_hasher.finish(); + + // Check if we've seen this (source, rowid) before + let assigned_rowid = + if let Some(&existing_rowid) = self.seen_rows.get(&key_hash) { + // Use existing rowid for this (source, rowid) pair + existing_rowid + } else { + // New row - assign new rowid + let new_rowid = self.next_rowid; + self.next_rowid += 1; + self.seen_rows.insert(key_hash, new_rowid); + new_rowid + }; + + // Create output row with consistent rowid + let final_row = HashableRow::new(assigned_rowid, row.values.clone()); + output.changes.push((final_row, weight)); + } + output + } + } + } +} + +impl Display for MergeOperator { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match &self.union_mode { + UnionMode::Distinct => write!(f, "MergeOperator({}, UNION)", self.operator_id), + UnionMode::All { .. } => write!(f, "MergeOperator({}, UNION ALL)", self.operator_id), + } + } +} + +impl IncrementalOperator for MergeOperator { + fn eval( + &mut self, + input: &mut EvalState, + _cursors: &mut DbspStateCursors, + ) -> Result> { + match input { + EvalState::Init { deltas } => { + // Extract deltas from the evaluation state + let delta_pair = std::mem::take(deltas); + + // Transform deltas based on union mode (with state tracking) + let left_transformed = self.transform_delta(delta_pair.left, true); + let right_transformed = self.transform_delta(delta_pair.right, false); + + // Merge the transformed deltas + let mut output = Delta::new(); + output.merge(&left_transformed); + output.merge(&right_transformed); + + // Move to Done state + *input = EvalState::Done; + + Ok(IOResult::Done(output)) + } + EvalState::Aggregate(_) | EvalState::Join(_) | EvalState::Uninitialized => { + // Merge operator only handles Init state + unreachable!("MergeOperator only handles Init state") + } + EvalState::Done => { + // Already evaluated + Ok(IOResult::Done(Delta::new())) + } + } + } + + fn commit( + &mut self, + deltas: DeltaPair, + _cursors: &mut DbspStateCursors, + ) -> Result> { + // Transform deltas based on union mode + let left_transformed = self.transform_delta(deltas.left, true); + let right_transformed = self.transform_delta(deltas.right, false); + + // Merge the transformed deltas + let mut output = Delta::new(); + output.merge(&left_transformed); + output.merge(&right_transformed); + + Ok(IOResult::Done(output)) + } + + fn set_tracker(&mut self, _tracker: Arc>) { + // Merge operator doesn't need tracking for now + } +} diff --git a/core/incremental/mod.rs b/core/incremental/mod.rs index 67eed60e2..5ac635cce 100644 --- a/core/incremental/mod.rs +++ b/core/incremental/mod.rs @@ -6,6 +6,7 @@ pub mod expr_compiler; pub mod filter_operator; pub mod input_operator; pub mod join_operator; +pub mod merge_operator; pub mod operator; pub mod persistence; pub mod project_operator; diff --git a/core/incremental/operator.rs b/core/incremental/operator.rs index 2af512504..53a5b1949 100644 --- a/core/incremental/operator.rs +++ b/core/incremental/operator.rs @@ -3674,4 +3674,340 @@ mod tests { assert!(was_new, "Duplicate rowid found: {}. This would cause rows to overwrite each other in btree storage!", row.rowid); } } + + // Merge operator tests + use crate::incremental::merge_operator::{MergeOperator, UnionMode}; + + #[test] + fn test_merge_operator_basic() { + let (_pager, table_root_page_id, index_root_page_id) = create_test_pager(); + let table_cursor = BTreeCursor::new_table(None, _pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = + BTreeCursor::new_index(None, _pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + let mut merge_op = MergeOperator::new( + 1, + UnionMode::All { + left_table: "table1".to_string(), + right_table: "table2".to_string(), + }, + ); + + // Create two deltas + let mut left_delta = Delta::new(); + left_delta.insert(1, vec![Value::Integer(1)]); + left_delta.insert(2, vec![Value::Integer(2)]); + + let mut right_delta = Delta::new(); + right_delta.insert(3, vec![Value::Integer(3)]); + right_delta.insert(4, vec![Value::Integer(4)]); + + let delta_pair = DeltaPair::new(left_delta, right_delta); + + // Evaluate merge + let result = merge_op.commit(delta_pair, &mut cursors).unwrap(); + + if let IOResult::Done(merged) = result { + // Should have all 4 entries + assert_eq!(merged.len(), 4); + + // Check that all values are present + let values: Vec = merged + .changes + .iter() + .filter_map(|(row, weight)| { + if *weight > 0 && !row.values.is_empty() { + if let Value::Integer(n) = &row.values[0] { + Some(*n) + } else { + None + } + } else { + None + } + }) + .collect(); + + assert!(values.contains(&1)); + assert!(values.contains(&2)); + assert!(values.contains(&3)); + assert!(values.contains(&4)); + } else { + panic!("Expected Done result"); + } + } + + #[test] + fn test_merge_operator_stateful_distinct() { + let (_pager, table_root_page_id, index_root_page_id) = create_test_pager(); + let table_cursor = BTreeCursor::new_table(None, _pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = + BTreeCursor::new_index(None, _pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + // Test that UNION (distinct) properly deduplicates across multiple operations + let mut merge_op = MergeOperator::new(7, UnionMode::Distinct); + + // First operation: insert values 1, 2, 3 from left and 2, 3, 4 from right + let mut left_delta1 = Delta::new(); + left_delta1.insert(1, vec![Value::Integer(1)]); + left_delta1.insert(2, vec![Value::Integer(2)]); + left_delta1.insert(3, vec![Value::Integer(3)]); + + let mut right_delta1 = Delta::new(); + right_delta1.insert(4, vec![Value::Integer(2)]); // Duplicate value 2 + right_delta1.insert(5, vec![Value::Integer(3)]); // Duplicate value 3 + right_delta1.insert(6, vec![Value::Integer(4)]); + + let result1 = merge_op + .commit(DeltaPair::new(left_delta1, right_delta1), &mut cursors) + .unwrap(); + if let IOResult::Done(merged1) = result1 { + // Should have 4 unique values (1, 2, 3, 4) + // But 6 total entries (3 from left + 3 from right) + assert_eq!(merged1.len(), 6); + + // Collect unique rowids - should be 4 + let unique_rowids: std::collections::HashSet = + merged1.changes.iter().map(|(row, _)| row.rowid).collect(); + assert_eq!( + unique_rowids.len(), + 4, + "Should have 4 unique rowids for 4 unique values" + ); + } else { + panic!("Expected Done result"); + } + + // Second operation: insert value 2 again from left, and value 5 from right + let mut left_delta2 = Delta::new(); + left_delta2.insert(7, vec![Value::Integer(2)]); // Duplicate of existing value + + let mut right_delta2 = Delta::new(); + right_delta2.insert(8, vec![Value::Integer(5)]); // New value + + let result2 = merge_op + .commit(DeltaPair::new(left_delta2, right_delta2), &mut cursors) + .unwrap(); + if let IOResult::Done(merged2) = result2 { + assert_eq!(merged2.len(), 2, "Should have 2 entries in delta"); + + // Check that value 2 got the same rowid as before + let has_existing_rowid = merged2 + .changes + .iter() + .any(|(row, _)| row.values == vec![Value::Integer(2)] && row.rowid <= 4); + assert!(has_existing_rowid, "Value 2 should reuse existing rowid"); + + // Check that value 5 got a new rowid + let has_new_rowid = merged2 + .changes + .iter() + .any(|(row, _)| row.values == vec![Value::Integer(5)] && row.rowid > 4); + assert!(has_new_rowid, "Value 5 should get a new rowid"); + } else { + panic!("Expected Done result"); + } + } + + #[test] + fn test_merge_operator_single_sided_inputs_union_all() { + let (_pager, table_root_page_id, index_root_page_id) = create_test_pager(); + let table_cursor = BTreeCursor::new_table(None, _pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = + BTreeCursor::new_index(None, _pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + // Test UNION ALL with inputs coming from only one side at a time + let mut merge_op = MergeOperator::new( + 10, + UnionMode::All { + left_table: "orders".to_string(), + right_table: "archived_orders".to_string(), + }, + ); + + // First: only left side (orders) has data + let mut left_delta1 = Delta::new(); + left_delta1.insert(100, vec![Value::Integer(1001)]); + left_delta1.insert(101, vec![Value::Integer(1002)]); + + let right_delta1 = Delta::new(); // Empty right side + + let result1 = merge_op + .commit(DeltaPair::new(left_delta1, right_delta1), &mut cursors) + .unwrap(); + + let first_rowids = if let IOResult::Done(ref merged1) = result1 { + assert_eq!(merged1.len(), 2, "Should have 2 entries from left only"); + merged1 + .changes + .iter() + .map(|(row, _)| row.rowid) + .collect::>() + } else { + panic!("Expected Done result"); + }; + + // Second: only right side (archived_orders) has data + let left_delta2 = Delta::new(); // Empty left side + + let mut right_delta2 = Delta::new(); + right_delta2.insert(100, vec![Value::Integer(2001)]); // Same rowid as left, different table + right_delta2.insert(102, vec![Value::Integer(2002)]); + + let result2 = merge_op + .commit(DeltaPair::new(left_delta2, right_delta2), &mut cursors) + .unwrap(); + let second_result_rowid_100 = if let IOResult::Done(ref merged2) = result2 { + assert_eq!(merged2.len(), 2, "Should have 2 entries from right only"); + + // Rowids should be different from the left side even though original rowid 100 is the same + let second_rowids: Vec = + merged2.changes.iter().map(|(row, _)| row.rowid).collect(); + for rowid in &second_rowids { + assert!( + !first_rowids.contains(rowid), + "Right side rowids should be different from left side rowids" + ); + } + + // Save rowid for archived_orders.100 + merged2 + .changes + .iter() + .find(|(row, _)| row.values == vec![Value::Integer(2001)]) + .map(|(row, _)| row.rowid) + .unwrap() + } else { + panic!("Expected Done result"); + }; + + // Third: left side again with same rowids as before + let mut left_delta3 = Delta::new(); + left_delta3.insert(100, vec![Value::Integer(1003)]); // Same rowid 100 from orders + left_delta3.insert(101, vec![Value::Integer(1004)]); // Same rowid 101 from orders + + let right_delta3 = Delta::new(); // Empty right side + + let result3 = merge_op + .commit(DeltaPair::new(left_delta3, right_delta3), &mut cursors) + .unwrap(); + if let IOResult::Done(merged3) = result3 { + assert_eq!(merged3.len(), 2, "Should have 2 entries from left"); + + // Should get the same assigned rowids as the first operation + let third_rowids: Vec = merged3.changes.iter().map(|(row, _)| row.rowid).collect(); + assert_eq!( + first_rowids, third_rowids, + "Same (table, rowid) pairs should get same assigned rowids" + ); + } else { + panic!("Expected Done result"); + } + + // Fourth: right side again with rowid 100 + let left_delta4 = Delta::new(); // Empty left side + + let mut right_delta4 = Delta::new(); + right_delta4.insert(100, vec![Value::Integer(2003)]); // Same rowid 100 from archived_orders + + let result4 = merge_op + .commit(DeltaPair::new(left_delta4, right_delta4), &mut cursors) + .unwrap(); + if let IOResult::Done(merged4) = result4 { + assert_eq!(merged4.len(), 1, "Should have 1 entry from right"); + + // Should get same assigned rowid as second operation for archived_orders.100 + let fourth_rowid = merged4.changes[0].0.rowid; + assert_eq!( + fourth_rowid, second_result_rowid_100, + "archived_orders rowid 100 should consistently map to same assigned rowid" + ); + } else { + panic!("Expected Done result"); + } + } + + #[test] + fn test_merge_operator_both_sides_empty() { + let (_pager, table_root_page_id, index_root_page_id) = create_test_pager(); + let table_cursor = BTreeCursor::new_table(None, _pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = + BTreeCursor::new_index(None, _pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + // Test that both sides being empty works correctly + let mut merge_op = MergeOperator::new( + 12, + UnionMode::All { + left_table: "t1".to_string(), + right_table: "t2".to_string(), + }, + ); + + // First: insert some data to establish state + let mut left_delta1 = Delta::new(); + left_delta1.insert(1, vec![Value::Integer(100)]); + let mut right_delta1 = Delta::new(); + right_delta1.insert(1, vec![Value::Integer(200)]); + + let result1 = merge_op + .commit(DeltaPair::new(left_delta1, right_delta1), &mut cursors) + .unwrap(); + let original_t1_rowid = if let IOResult::Done(ref merged1) = result1 { + assert_eq!(merged1.len(), 2, "Should have 2 entries initially"); + // Save the rowid for t1.rowid=1 + merged1 + .changes + .iter() + .find(|(row, _)| row.values == vec![Value::Integer(100)]) + .map(|(row, _)| row.rowid) + .unwrap() + } else { + panic!("Expected Done result"); + }; + + // Second: both sides empty - should produce empty output + let empty_left = Delta::new(); + let empty_right = Delta::new(); + + let result2 = merge_op + .commit(DeltaPair::new(empty_left, empty_right), &mut cursors) + .unwrap(); + if let IOResult::Done(merged2) = result2 { + assert_eq!( + merged2.len(), + 0, + "Both empty sides should produce empty output" + ); + } else { + panic!("Expected Done result"); + } + + // Third: add more data to verify state is still intact + let mut left_delta3 = Delta::new(); + left_delta3.insert(1, vec![Value::Integer(101)]); // Same rowid as before + let right_delta3 = Delta::new(); + + let result3 = merge_op + .commit(DeltaPair::new(left_delta3, right_delta3), &mut cursors) + .unwrap(); + if let IOResult::Done(merged3) = result3 { + assert_eq!(merged3.len(), 1, "Should have 1 entry"); + // Should reuse the same assigned rowid for t1.rowid=1 + let rowid = merged3.changes[0].0.rowid; + assert_eq!( + rowid, original_t1_rowid, + "Should maintain consistent rowid mapping after empty operation" + ); + } else { + panic!("Expected Done result"); + } + } } From 2627ad44de1cd23dc96a7e00d6bb17afbd1ab3f4 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 19 Sep 2025 05:18:44 -0500 Subject: [PATCH 63/78] support union statements in the DBSP circuit compiler --- core/incremental/compiler.rs | 125 +++++++++++- testing/materialized_views.test | 337 ++++++++++++++++++++++++++++++++ 2 files changed, 461 insertions(+), 1 deletion(-) diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index 8c8189261..cec950f35 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -298,6 +298,8 @@ pub enum DbspOperator { }, /// Input operator - source of data Input { name: String, schema: SchemaRef }, + /// Merge operator for combining streams (used in recursive CTEs and UNION) + Merge { schema: SchemaRef }, } /// Represents an expression in DBSP @@ -807,6 +809,13 @@ impl DbspCircuit { DbspOperator::Input { name, .. } => { writeln!(f, "{indent}Input[{node_id}]: {name}")?; } + DbspOperator::Merge { schema } => { + writeln!( + f, + "{indent}Merge[{node_id}]: UNION/Recursive (schema: {} columns)", + schema.columns.len() + )?; + } } for input_id in &node.inputs { @@ -1286,8 +1295,12 @@ impl DbspCompiler { ); Ok(node_id) } + LogicalPlan::Union(union) => { + // Handle UNION and UNION ALL + self.compile_union(union) + } _ => Err(LimboError::ParseError( - format!("Unsupported operator in DBSP compiler: only Filter, Projection, Join and Aggregate are supported, got: {:?}", + format!("Unsupported operator in DBSP compiler: only Filter, Projection, Join, Aggregate, and Union are supported, got: {:?}", match plan { LogicalPlan::Sort(_) => "Sort", LogicalPlan::Limit(_) => "Limit", @@ -1304,6 +1317,116 @@ impl DbspCompiler { } } + /// Extract a representative table name from a logical plan (for UNION ALL identification) + /// Returns a string that uniquely identifies the source of the data + fn extract_source_identifier(plan: &LogicalPlan) -> String { + match plan { + LogicalPlan::TableScan(scan) => { + // Direct table scan - use the table name + scan.table_name.clone() + } + LogicalPlan::Projection(proj) => { + // Pass through to input + Self::extract_source_identifier(&proj.input) + } + LogicalPlan::Filter(filter) => { + // Pass through to input + Self::extract_source_identifier(&filter.input) + } + LogicalPlan::Aggregate(agg) => { + // Aggregate of a table + format!("agg_{}", Self::extract_source_identifier(&agg.input)) + } + LogicalPlan::Sort(sort) => { + // Pass through to input + Self::extract_source_identifier(&sort.input) + } + LogicalPlan::Limit(limit) => { + // Pass through to input + Self::extract_source_identifier(&limit.input) + } + LogicalPlan::Join(join) => { + // Join of two sources - combine their identifiers + let left_id = Self::extract_source_identifier(&join.left); + let right_id = Self::extract_source_identifier(&join.right); + format!("join_{left_id}_{right_id}") + } + LogicalPlan::Union(union) => { + // Union of multiple sources + if union.inputs.is_empty() { + "union_empty".to_string() + } else { + let identifiers: Vec = union + .inputs + .iter() + .map(|input| Self::extract_source_identifier(input)) + .collect(); + format!("union_{}", identifiers.join("_")) + } + } + LogicalPlan::Distinct(distinct) => { + // Distinct of a source + format!( + "distinct_{}", + Self::extract_source_identifier(&distinct.input) + ) + } + LogicalPlan::WithCTE(with_cte) => { + // CTE body + Self::extract_source_identifier(&with_cte.body) + } + LogicalPlan::CTERef(cte_ref) => { + // CTE reference - use the CTE name + format!("cte_{}", cte_ref.name) + } + LogicalPlan::EmptyRelation(_) => "empty".to_string(), + LogicalPlan::Values(_) => "values".to_string(), + } + } + + /// Compile a UNION operator + fn compile_union(&mut self, union: &crate::translate::logical::Union) -> Result { + if union.inputs.len() != 2 { + return Err(LimboError::ParseError(format!( + "UNION requires exactly 2 inputs, got {}", + union.inputs.len() + ))); + } + + // Extract source identifiers from each input (for UNION ALL) + let left_source = Self::extract_source_identifier(&union.inputs[0]); + let right_source = Self::extract_source_identifier(&union.inputs[1]); + + // Compile left and right inputs + let left_id = self.compile_plan(&union.inputs[0])?; + let right_id = self.compile_plan(&union.inputs[1])?; + + use crate::incremental::merge_operator::{MergeOperator, UnionMode}; + + // Create a merge operator that handles the rowid transformation + let operator_id = self.circuit.next_id; + let mode = if union.all { + // For UNION ALL, pass the source identifiers + UnionMode::All { + left_table: left_source, + right_table: right_source, + } + } else { + UnionMode::Distinct + }; + let merge_operator = Box::new(MergeOperator::new(operator_id, mode)); + + let merge_id = self.circuit.add_node( + DbspOperator::Merge { + schema: union.schema.clone(), + }, + vec![left_id, right_id], + merge_operator, + ); + + Ok(merge_id) + } + /// Convert a logical expression to a DBSP expression fn compile_expr(expr: &LogicalExpr) -> Result { match expr { diff --git a/testing/materialized_views.test b/testing/materialized_views.test index 15229a48c..354f65d39 100755 --- a/testing/materialized_views.test +++ b/testing/materialized_views.test @@ -1091,3 +1091,340 @@ do_execsql_test_on_specific_db {:memory:} matview-join-complex-where { } {Charlie|10|100|1000 Alice|5|100|500 Charlie|6|75|450} + +# Test UNION queries in materialized views +do_execsql_test_on_specific_db {:memory:} matview-union-simple { + CREATE TABLE sales_online(id INTEGER, product TEXT, amount INTEGER); + CREATE TABLE sales_store(id INTEGER, product TEXT, amount INTEGER); + + INSERT INTO sales_online VALUES + (1, 'Laptop', 1200), + (2, 'Mouse', 25), + (3, 'Monitor', 400); + + INSERT INTO sales_store VALUES + (1, 'Keyboard', 75), + (2, 'Chair', 150), + (3, 'Desk', 350); + + -- Create a view that combines both sources + CREATE MATERIALIZED VIEW all_sales AS + SELECT product, amount FROM sales_online + UNION ALL + SELECT product, amount FROM sales_store; + + SELECT * FROM all_sales ORDER BY product; +} {Chair|150 +Desk|350 +Keyboard|75 +Laptop|1200 +Monitor|400 +Mouse|25} + +do_execsql_test_on_specific_db {:memory:} matview-union-with-where { + CREATE TABLE employees(id INTEGER, name TEXT, dept TEXT, salary INTEGER); + CREATE TABLE contractors(id INTEGER, name TEXT, dept TEXT, rate INTEGER); + + INSERT INTO employees VALUES + (1, 'Alice', 'Engineering', 90000), + (2, 'Bob', 'Sales', 60000), + (3, 'Charlie', 'Engineering', 85000); + + INSERT INTO contractors VALUES + (1, 'David', 'Engineering', 150), + (2, 'Eve', 'Marketing', 120), + (3, 'Frank', 'Engineering', 180); + + -- High-earning staff from both categories + CREATE MATERIALIZED VIEW high_earners AS + SELECT name, dept, salary as compensation FROM employees WHERE salary > 80000 + UNION ALL + SELECT name, dept, rate * 2000 as compensation FROM contractors WHERE rate > 140; + + SELECT * FROM high_earners ORDER BY name; +} {Alice|Engineering|90000 +Charlie|Engineering|85000 +David|Engineering|300000 +Frank|Engineering|360000} + +do_execsql_test_on_specific_db {:memory:} matview-union-same-table-different-filters { + CREATE TABLE orders(id INTEGER, customer_id INTEGER, product TEXT, amount INTEGER, status TEXT); + + INSERT INTO orders VALUES + (1, 1, 'Laptop', 1200, 'completed'), + (2, 2, 'Mouse', 25, 'pending'), + (3, 1, 'Monitor', 400, 'completed'), + (4, 3, 'Keyboard', 75, 'cancelled'), + (5, 2, 'Desk', 350, 'completed'), + (6, 3, 'Chair', 150, 'pending'); + + -- View showing priority orders: high-value OR pending status + CREATE MATERIALIZED VIEW priority_orders AS + SELECT id, customer_id, product, amount FROM orders WHERE amount > 300 + UNION ALL + SELECT id, customer_id, product, amount FROM orders WHERE status = 'pending'; + + SELECT * FROM priority_orders ORDER BY id; +} {1|1|Laptop|1200 +2|2|Mouse|25 +3|1|Monitor|400 +5|2|Desk|350 +6|3|Chair|150} + +do_execsql_test_on_specific_db {:memory:} matview-union-with-aggregation { + CREATE TABLE q1_sales(product TEXT, quantity INTEGER, revenue INTEGER); + CREATE TABLE q2_sales(product TEXT, quantity INTEGER, revenue INTEGER); + + INSERT INTO q1_sales VALUES + ('Laptop', 10, 12000), + ('Mouse', 50, 1250), + ('Monitor', 8, 3200); + + INSERT INTO q2_sales VALUES + ('Laptop', 15, 18000), + ('Mouse', 60, 1500), + ('Keyboard', 30, 2250); + + -- Combined quarterly summary + CREATE MATERIALIZED VIEW half_year_summary AS + SELECT 'Q1' as quarter, SUM(quantity) as total_units, SUM(revenue) as total_revenue + FROM q1_sales + UNION ALL + SELECT 'Q2' as quarter, SUM(quantity) as total_units, SUM(revenue) as total_revenue + FROM q2_sales; + + SELECT * FROM half_year_summary ORDER BY quarter; +} {Q1|68|16450 +Q2|105|21750} + +do_execsql_test_on_specific_db {:memory:} matview-union-with-join { + CREATE TABLE customers(id INTEGER PRIMARY KEY, name TEXT, type TEXT); + CREATE TABLE orders(id INTEGER PRIMARY KEY, customer_id INTEGER, amount INTEGER); + CREATE TABLE quotes(id INTEGER PRIMARY KEY, customer_id INTEGER, amount INTEGER); + + INSERT INTO customers VALUES + (1, 'Alice', 'premium'), + (2, 'Bob', 'regular'), + (3, 'Charlie', 'premium'); + + INSERT INTO orders VALUES + (1, 1, 1000), + (2, 2, 500), + (3, 3, 1500); + + INSERT INTO quotes VALUES + (1, 1, 800), + (2, 2, 300), + (3, 3, 2000); + + -- All premium customer transactions (orders and quotes) + CREATE MATERIALIZED VIEW premium_transactions AS + SELECT c.name, 'order' as type, o.amount + FROM customers c + JOIN orders o ON c.id = o.customer_id + WHERE c.type = 'premium' + UNION ALL + SELECT c.name, 'quote' as type, q.amount + FROM customers c + JOIN quotes q ON c.id = q.customer_id + WHERE c.type = 'premium'; + + SELECT * FROM premium_transactions ORDER BY name, type, amount; +} {Alice|order|1000 +Alice|quote|800 +Charlie|order|1500 +Charlie|quote|2000} + +do_execsql_test_on_specific_db {:memory:} matview-union-distinct { + CREATE TABLE active_users(id INTEGER, name TEXT, email TEXT); + CREATE TABLE inactive_users(id INTEGER, name TEXT, email TEXT); + + INSERT INTO active_users VALUES + (1, 'Alice', 'alice@example.com'), + (2, 'Bob', 'bob@example.com'), + (3, 'Charlie', 'charlie@example.com'); + + INSERT INTO inactive_users VALUES + (4, 'David', 'david@example.com'), + (2, 'Bob', 'bob@example.com'), -- Bob appears in both + (5, 'Eve', 'eve@example.com'); + + -- All unique users (using UNION to deduplicate) + CREATE MATERIALIZED VIEW all_users AS + SELECT id, name, email FROM active_users + UNION + SELECT id, name, email FROM inactive_users; + + SELECT * FROM all_users ORDER BY id; +} {1|Alice|alice@example.com +2|Bob|bob@example.com +3|Charlie|charlie@example.com +4|David|david@example.com +5|Eve|eve@example.com} + +do_execsql_test_on_specific_db {:memory:} matview-union-complex-multiple-branches { + CREATE TABLE products(id INTEGER, name TEXT, category TEXT, price INTEGER); + + INSERT INTO products VALUES + (1, 'Laptop', 'Electronics', 1200), + (2, 'Mouse', 'Electronics', 25), + (3, 'Desk', 'Furniture', 350), + (4, 'Chair', 'Furniture', 150), + (5, 'Monitor', 'Electronics', 400), + (6, 'Keyboard', 'Electronics', 75), + (7, 'Bookshelf', 'Furniture', 200), + (8, 'Tablet', 'Electronics', 600); + + -- Products of interest: expensive electronics, all furniture, or very cheap items + CREATE MATERIALIZED VIEW featured_products AS + SELECT name, category, price, 'PremiumElectronic' as tag + FROM products + WHERE category = 'Electronics' AND price > 500 + UNION ALL + SELECT name, category, price, 'Furniture' as tag + FROM products + WHERE category = 'Furniture' + UNION ALL + SELECT name, category, price, 'Budget' as tag + FROM products + WHERE price < 50; + + SELECT * FROM featured_products ORDER BY tag, name; +} {Mouse|Electronics|25|Budget +Bookshelf|Furniture|200|Furniture +Chair|Furniture|150|Furniture +Desk|Furniture|350|Furniture +Laptop|Electronics|1200|PremiumElectronic +Tablet|Electronics|600|PremiumElectronic} + +do_execsql_test_on_specific_db {:memory:} matview-union-maintenance-insert { + CREATE TABLE t1(id INTEGER, value INTEGER); + CREATE TABLE t2(id INTEGER, value INTEGER); + + INSERT INTO t1 VALUES (1, 100), (2, 200); + INSERT INTO t2 VALUES (3, 300), (4, 400); + + CREATE MATERIALIZED VIEW combined AS + SELECT id, value FROM t1 WHERE value > 150 + UNION ALL + SELECT id, value FROM t2 WHERE value > 350; + + SELECT * FROM combined ORDER BY id; + + -- Insert into t1 + INSERT INTO t1 VALUES (5, 500); + SELECT * FROM combined ORDER BY id; + + -- Insert into t2 + INSERT INTO t2 VALUES (6, 600); + SELECT * FROM combined ORDER BY id; +} {2|200 +4|400 +2|200 +4|400 +5|500 +2|200 +4|400 +5|500 +6|600} + +do_execsql_test_on_specific_db {:memory:} matview-union-maintenance-delete { + CREATE TABLE source1(id INTEGER PRIMARY KEY, data TEXT); + CREATE TABLE source2(id INTEGER PRIMARY KEY, data TEXT); + + INSERT INTO source1 VALUES (1, 'A'), (2, 'B'), (3, 'C'); + INSERT INTO source2 VALUES (4, 'D'), (5, 'E'), (6, 'F'); + + CREATE MATERIALIZED VIEW merged AS + SELECT id, data FROM source1 + UNION ALL + SELECT id, data FROM source2; + + SELECT COUNT(*) FROM merged; + + DELETE FROM source1 WHERE id = 2; + SELECT COUNT(*) FROM merged; + + DELETE FROM source2 WHERE id > 4; + SELECT COUNT(*) FROM merged; +} {6 +5 +3} + +do_execsql_test_on_specific_db {:memory:} matview-union-maintenance-update { + CREATE TABLE high_priority(id INTEGER PRIMARY KEY, task TEXT, priority INTEGER); + CREATE TABLE normal_priority(id INTEGER PRIMARY KEY, task TEXT, priority INTEGER); + + INSERT INTO high_priority VALUES (1, 'Task A', 10), (2, 'Task B', 9); + INSERT INTO normal_priority VALUES (3, 'Task C', 5), (4, 'Task D', 6); + + CREATE MATERIALIZED VIEW active_tasks AS + SELECT id, task, priority FROM high_priority WHERE priority >= 9 + UNION ALL + SELECT id, task, priority FROM normal_priority WHERE priority >= 5; + + SELECT COUNT(*) FROM active_tasks; + + -- Update drops a high priority task below threshold + UPDATE high_priority SET priority = 8 WHERE id = 2; + SELECT COUNT(*) FROM active_tasks; + + -- Update brings a normal task above threshold + UPDATE normal_priority SET priority = 3 WHERE id = 3; + SELECT COUNT(*) FROM active_tasks; +} {4 +3 +2} + +# Test UNION ALL with same table and different WHERE conditions +do_execsql_test_on_specific_db {:memory:} matview-union-all-same-table { + CREATE TABLE test(id INTEGER PRIMARY KEY, value INTEGER); + INSERT INTO test VALUES (1, 10), (2, 20); + + -- This UNION ALL should return both rows + CREATE MATERIALIZED VIEW union_view AS + SELECT id, value FROM test WHERE value < 15 + UNION ALL + SELECT id, value FROM test WHERE value > 15; + + -- Should return 2 rows: (1,10) and (2,20) + SELECT * FROM union_view ORDER BY id; +} {1|10 +2|20} + +# Test UNION ALL preserves all rows in count +do_execsql_test_on_specific_db {:memory:} matview-union-all-row-count { + CREATE TABLE data(id INTEGER PRIMARY KEY, num INTEGER); + INSERT INTO data VALUES (1, 5), (2, 15), (3, 25); + + CREATE MATERIALIZED VIEW split_view AS + SELECT id, num FROM data WHERE num <= 10 + UNION ALL + SELECT id, num FROM data WHERE num > 10; + + -- Should return count of 3 + SELECT COUNT(*) FROM split_view; +} {3} + +# Test UNION ALL with text columns and filtering +do_execsql_test_on_specific_db {:memory:} matview-union-all-text-filter { + CREATE TABLE items(id INTEGER PRIMARY KEY, category TEXT, price INTEGER); + INSERT INTO items VALUES + (1, 'cheap', 10), + (2, 'expensive', 100), + (3, 'cheap', 20), + (4, 'expensive', 200); + + CREATE MATERIALIZED VIEW price_categories AS + SELECT id, category, price FROM items WHERE category = 'cheap' + UNION ALL + SELECT id, category, price FROM items WHERE category = 'expensive'; + + -- Should return all 4 items + SELECT COUNT(*) FROM price_categories; + SELECT id FROM price_categories ORDER BY id; +} {4 +1 +2 +3 +4} From fbad158213f1a945c11977e01a2632ab5e8ee80a Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 22 Sep 2025 09:15:55 +0300 Subject: [PATCH 64/78] Turso 0.2.0-pre.4 --- Cargo.lock | 54 ++++++------- Cargo.toml | 34 ++++---- bindings/javascript/package-lock.json | 40 ++++----- bindings/javascript/package.json | 2 +- .../packages/browser-common/package.json | 4 +- .../javascript/packages/browser/package.json | 6 +- .../javascript/packages/common/package.json | 2 +- .../javascript/packages/native/package.json | 4 +- .../packages/wasm-runtime/package.json | 2 +- .../sync/packages/browser/package.json | 8 +- .../sync/packages/common/package.json | 4 +- .../sync/packages/native/package.json | 6 +- bindings/javascript/yarn.lock | 81 +++++++++++-------- 13 files changed, 131 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f774018b0..d0df1bbab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -654,7 +654,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_tester" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "anyhow", "assert_cmd", @@ -2090,7 +2090,7 @@ dependencies = [ [[package]] name = "limbo_completion" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "mimalloc", "turso_ext", @@ -2098,7 +2098,7 @@ dependencies = [ [[package]] name = "limbo_crypto" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "blake3", "data-encoding", @@ -2111,7 +2111,7 @@ dependencies = [ [[package]] name = "limbo_csv" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "csv", "mimalloc", @@ -2121,7 +2121,7 @@ dependencies = [ [[package]] name = "limbo_ipaddr" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "ipnetwork", "mimalloc", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "limbo_percentile" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "mimalloc", "turso_ext", @@ -2138,7 +2138,7 @@ dependencies = [ [[package]] name = "limbo_regexp" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "mimalloc", "regex", @@ -2147,7 +2147,7 @@ dependencies = [ [[package]] name = "limbo_sim" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "anyhow", "chrono", @@ -2180,7 +2180,7 @@ dependencies = [ [[package]] name = "limbo_sqlite_test_ext" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "cc", ] @@ -2901,7 +2901,7 @@ dependencies = [ [[package]] name = "py-turso" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "anyhow", "pyo3", @@ -3581,7 +3581,7 @@ checksum = "d372029cb5195f9ab4e4b9aef550787dce78b124fcaee8d82519925defcd6f0d" [[package]] name = "sql_generation" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "anarchist-readable-name-generator-lib 0.2.0", "anyhow", @@ -4082,7 +4082,7 @@ dependencies = [ [[package]] name = "turso" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "rand 0.9.2", "rand_chacha 0.9.0", @@ -4094,7 +4094,7 @@ dependencies = [ [[package]] name = "turso-java" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "jni", "thiserror 2.0.16", @@ -4103,7 +4103,7 @@ dependencies = [ [[package]] name = "turso_cli" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "anyhow", "cfg-if", @@ -4136,7 +4136,7 @@ dependencies = [ [[package]] name = "turso_core" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "aegis", "aes", @@ -4194,7 +4194,7 @@ dependencies = [ [[package]] name = "turso_dart" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "flutter_rust_bridge", "turso_core", @@ -4202,7 +4202,7 @@ dependencies = [ [[package]] name = "turso_ext" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "chrono", "getrandom 0.3.2", @@ -4211,7 +4211,7 @@ dependencies = [ [[package]] name = "turso_ext_tests" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "env_logger 0.11.7", "lazy_static", @@ -4222,7 +4222,7 @@ dependencies = [ [[package]] name = "turso_macros" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "proc-macro2", "quote", @@ -4231,7 +4231,7 @@ dependencies = [ [[package]] name = "turso_node" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "chrono", "napi", @@ -4244,7 +4244,7 @@ dependencies = [ [[package]] name = "turso_parser" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "bitflags 2.9.4", "criterion", @@ -4260,7 +4260,7 @@ dependencies = [ [[package]] name = "turso_sqlite3" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "env_logger 0.11.7", "libc", @@ -4273,7 +4273,7 @@ dependencies = [ [[package]] name = "turso_sqlite3_parser" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "bitflags 2.9.4", "cc", @@ -4291,7 +4291,7 @@ dependencies = [ [[package]] name = "turso_stress" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "anarchist-readable-name-generator-lib 0.1.2", "antithesis_sdk", @@ -4307,7 +4307,7 @@ dependencies = [ [[package]] name = "turso_sync_engine" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "base64", "bytes", @@ -4333,7 +4333,7 @@ dependencies = [ [[package]] name = "turso_sync_js" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "genawaiter", "napi", @@ -4348,7 +4348,7 @@ dependencies = [ [[package]] name = "turso_whopper" -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" dependencies = [ "anyhow", "clap", diff --git a/Cargo.toml b/Cargo.toml index cad11bee3..5615eceea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,29 +39,29 @@ exclude = [ ] [workspace.package] -version = "0.2.0-pre.3" +version = "0.2.0-pre.4" authors = ["the Limbo authors"] edition = "2021" license = "MIT" repository = "https://github.com/tursodatabase/turso" [workspace.dependencies] -turso = { path = "bindings/rust", version = "0.2.0-pre.3" } -turso_node = { path = "bindings/javascript", version = "0.2.0-pre.3" } -limbo_completion = { path = "extensions/completion", version = "0.2.0-pre.3" } -turso_core = { path = "core", version = "0.2.0-pre.3" } -turso_sync_engine = { path = "sync/engine", version = "0.2.0-pre.3" } -limbo_crypto = { path = "extensions/crypto", version = "0.2.0-pre.3" } -limbo_csv = { path = "extensions/csv", version = "0.2.0-pre.3" } -turso_ext = { path = "extensions/core", version = "0.2.0-pre.3" } -turso_ext_tests = { path = "extensions/tests", version = "0.2.0-pre.3" } -limbo_ipaddr = { path = "extensions/ipaddr", version = "0.2.0-pre.3" } -turso_macros = { path = "macros", version = "0.2.0-pre.3" } -limbo_percentile = { path = "extensions/percentile", version = "0.2.0-pre.3" } -limbo_regexp = { path = "extensions/regexp", version = "0.2.0-pre.3" } -turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.2.0-pre.3" } -limbo_uuid = { path = "extensions/uuid", version = "0.2.0-pre.3" } -turso_parser = { path = "parser", version = "0.2.0-pre.3" } +turso = { path = "bindings/rust", version = "0.2.0-pre.4" } +turso_node = { path = "bindings/javascript", version = "0.2.0-pre.4" } +limbo_completion = { path = "extensions/completion", version = "0.2.0-pre.4" } +turso_core = { path = "core", version = "0.2.0-pre.4" } +turso_sync_engine = { path = "sync/engine", version = "0.2.0-pre.4" } +limbo_crypto = { path = "extensions/crypto", version = "0.2.0-pre.4" } +limbo_csv = { path = "extensions/csv", version = "0.2.0-pre.4" } +turso_ext = { path = "extensions/core", version = "0.2.0-pre.4" } +turso_ext_tests = { path = "extensions/tests", version = "0.2.0-pre.4" } +limbo_ipaddr = { path = "extensions/ipaddr", version = "0.2.0-pre.4" } +turso_macros = { path = "macros", version = "0.2.0-pre.4" } +limbo_percentile = { path = "extensions/percentile", version = "0.2.0-pre.4" } +limbo_regexp = { path = "extensions/regexp", version = "0.2.0-pre.4" } +turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.2.0-pre.4" } +limbo_uuid = { path = "extensions/uuid", version = "0.2.0-pre.4" } +turso_parser = { path = "parser", version = "0.2.0-pre.4" } sql_generation = { path = "sql_generation" } strum = { version = "0.26", features = ["derive"] } strum_macros = "0.26" diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json index 3df161dd7..cb93e0ce1 100644 --- a/bindings/javascript/package-lock.json +++ b/bindings/javascript/package-lock.json @@ -1,11 +1,11 @@ { "name": "javascript", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "lockfileVersion": 3, "requires": true, "packages": { "": { - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "workspaces": [ "packages/wasm-runtime", "packages/common", @@ -4594,11 +4594,11 @@ }, "packages/browser": { "name": "@tursodatabase/database-browser", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.3", - "@tursodatabase/database-common": "^0.2.0-pre.3" + "@tursodatabase/database-browser-common": "^0.2.0-pre.4", + "@tursodatabase/database-common": "^0.2.0-pre.4" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -4611,10 +4611,10 @@ }, "packages/browser-common": { "name": "@tursodatabase/database-browser-common", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/wasm-runtime": "^0.2.0-pre.3" + "@tursodatabase/wasm-runtime": "^0.2.0-pre.4" }, "devDependencies": { "typescript": "^5.9.2" @@ -4622,7 +4622,7 @@ }, "packages/common": { "name": "@tursodatabase/database-common", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "license": "MIT", "devDependencies": { "typescript": "^5.9.2" @@ -4630,10 +4630,10 @@ }, "packages/native": { "name": "@tursodatabase/database", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.3" + "@tursodatabase/database-common": "^0.2.0-pre.4" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -4647,7 +4647,7 @@ }, "packages/wasm-runtime": { "name": "@tursodatabase/wasm-runtime", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "license": "MIT", "dependencies": { "@emnapi/core": "^1.4.5", @@ -4670,12 +4670,12 @@ }, "sync/packages/browser": { "name": "@tursodatabase/sync-browser", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.3", - "@tursodatabase/database-common": "^0.2.0-pre.3", - "@tursodatabase/sync-common": "^0.2.0-pre.3" + "@tursodatabase/database-browser-common": "^0.2.0-pre.4", + "@tursodatabase/database-common": "^0.2.0-pre.4", + "@tursodatabase/sync-common": "^0.2.0-pre.4" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -4688,10 +4688,10 @@ }, "sync/packages/common": { "name": "@tursodatabase/sync-common", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.3" + "@tursodatabase/database-common": "^0.2.0-pre.4" }, "devDependencies": { "typescript": "^5.9.2" @@ -4699,11 +4699,11 @@ }, "sync/packages/native": { "name": "@tursodatabase/sync", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.3", - "@tursodatabase/sync-common": "^0.2.0-pre.3" + "@tursodatabase/database-common": "^0.2.0-pre.4", + "@tursodatabase/sync-common": "^0.2.0-pre.4" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json index 709f035e3..1beb1f34f 100644 --- a/bindings/javascript/package.json +++ b/bindings/javascript/package.json @@ -15,5 +15,5 @@ "sync/packages/native", "sync/packages/browser" ], - "version": "0.2.0-pre.3" + "version": "0.2.0-pre.4" } diff --git a/bindings/javascript/packages/browser-common/package.json b/bindings/javascript/packages/browser-common/package.json index 6881b0c74..97ee47624 100644 --- a/bindings/javascript/packages/browser-common/package.json +++ b/bindings/javascript/packages/browser-common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-browser-common", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -23,6 +23,6 @@ "test": "echo 'no tests'" }, "dependencies": { - "@tursodatabase/wasm-runtime": "^0.2.0-pre.3" + "@tursodatabase/wasm-runtime": "^0.2.0-pre.4" } } diff --git a/bindings/javascript/packages/browser/package.json b/bindings/javascript/packages/browser/package.json index c3f1e9acf..904f50610 100644 --- a/bindings/javascript/packages/browser/package.json +++ b/bindings/javascript/packages/browser/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-browser", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -51,7 +51,7 @@ ] }, "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.3", - "@tursodatabase/database-common": "^0.2.0-pre.3" + "@tursodatabase/database-browser-common": "^0.2.0-pre.4", + "@tursodatabase/database-common": "^0.2.0-pre.4" } } diff --git a/bindings/javascript/packages/common/package.json b/bindings/javascript/packages/common/package.json index 460810ea0..94b4e08f4 100644 --- a/bindings/javascript/packages/common/package.json +++ b/bindings/javascript/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-common", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/native/package.json b/bindings/javascript/packages/native/package.json index 56888f616..b2457dfc6 100644 --- a/bindings/javascript/packages/native/package.json +++ b/bindings/javascript/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -47,7 +47,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.3" + "@tursodatabase/database-common": "^0.2.0-pre.4" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/packages/wasm-runtime/package.json b/bindings/javascript/packages/wasm-runtime/package.json index d03f8b927..f78d0b17c 100644 --- a/bindings/javascript/packages/wasm-runtime/package.json +++ b/bindings/javascript/packages/wasm-runtime/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/wasm-runtime", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "type": "module", "description": "Runtime and polyfill for wasm targets", "author": { diff --git a/bindings/javascript/sync/packages/browser/package.json b/bindings/javascript/sync/packages/browser/package.json index e09740154..9121d6d80 100644 --- a/bindings/javascript/sync/packages/browser/package.json +++ b/bindings/javascript/sync/packages/browser/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-browser", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -54,8 +54,8 @@ "#index": "./index.js" }, "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.3", - "@tursodatabase/database-common": "^0.2.0-pre.3", - "@tursodatabase/sync-common": "^0.2.0-pre.3" + "@tursodatabase/database-browser-common": "^0.2.0-pre.4", + "@tursodatabase/database-common": "^0.2.0-pre.4", + "@tursodatabase/sync-common": "^0.2.0-pre.4" } } diff --git a/bindings/javascript/sync/packages/common/package.json b/bindings/javascript/sync/packages/common/package.json index dc962ad31..81cd1afa3 100644 --- a/bindings/javascript/sync/packages/common/package.json +++ b/bindings/javascript/sync/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-common", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -23,6 +23,6 @@ "test": "echo 'no tests'" }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.3" + "@tursodatabase/database-common": "^0.2.0-pre.4" } } diff --git a/bindings/javascript/sync/packages/native/package.json b/bindings/javascript/sync/packages/native/package.json index c7c52414f..9f3c40135 100644 --- a/bindings/javascript/sync/packages/native/package.json +++ b/bindings/javascript/sync/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync", - "version": "0.2.0-pre.3", + "version": "0.2.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -44,8 +44,8 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.3", - "@tursodatabase/sync-common": "^0.2.0-pre.3" + "@tursodatabase/database-common": "^0.2.0-pre.4", + "@tursodatabase/sync-common": "^0.2.0-pre.4" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/yarn.lock b/bindings/javascript/yarn.lock index f79e485bd..35e17e013 100644 --- a/bindings/javascript/yarn.lock +++ b/bindings/javascript/yarn.lock @@ -1575,11 +1575,11 @@ __metadata: languageName: node linkType: hard -"@tursodatabase/database-browser-common@npm:^0.2.0-pre.3, @tursodatabase/database-browser-common@workspace:packages/browser-common": +"@tursodatabase/database-browser-common@npm:^0.2.0-pre.4, @tursodatabase/database-browser-common@workspace:packages/browser-common": version: 0.0.0-use.local resolution: "@tursodatabase/database-browser-common@workspace:packages/browser-common" dependencies: - "@tursodatabase/wasm-runtime": "npm:^0.2.0-pre.3" + "@tursodatabase/wasm-runtime": "npm:^0.2.0-pre.4" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1589,8 +1589,8 @@ __metadata: resolution: "@tursodatabase/database-browser@workspace:packages/browser" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.3" - "@tursodatabase/database-common": "npm:^0.2.0-pre.3" + "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.4" + "@tursodatabase/database-common": "npm:^0.2.0-pre.4" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1599,7 +1599,7 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/database-common@npm:^0.2.0-pre.3, @tursodatabase/database-common@workspace:packages/common": +"@tursodatabase/database-common@npm:^0.2.0-pre.4, @tursodatabase/database-common@workspace:packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/database-common@workspace:packages/common" dependencies: @@ -1612,7 +1612,7 @@ __metadata: resolution: "@tursodatabase/database@workspace:packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.2.0-pre.3" + "@tursodatabase/database-common": "npm:^0.2.0-pre.4" "@types/node": "npm:^24.3.1" better-sqlite3: "npm:^12.2.0" drizzle-kit: "npm:^0.31.4" @@ -1627,9 +1627,9 @@ __metadata: resolution: "@tursodatabase/sync-browser@workspace:sync/packages/browser" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.3" - "@tursodatabase/database-common": "npm:^0.2.0-pre.3" - "@tursodatabase/sync-common": "npm:^0.2.0-pre.3" + "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.4" + "@tursodatabase/database-common": "npm:^0.2.0-pre.4" + "@tursodatabase/sync-common": "npm:^0.2.0-pre.4" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1638,11 +1638,11 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/sync-common@npm:^0.2.0-pre.3, @tursodatabase/sync-common@workspace:sync/packages/common": +"@tursodatabase/sync-common@npm:^0.2.0-pre.4, @tursodatabase/sync-common@workspace:sync/packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/sync-common@workspace:sync/packages/common" dependencies: - "@tursodatabase/database-common": "npm:^0.2.0-pre.3" + "@tursodatabase/database-common": "npm:^0.2.0-pre.4" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1652,15 +1652,15 @@ __metadata: resolution: "@tursodatabase/sync@workspace:sync/packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.2.0-pre.3" - "@tursodatabase/sync-common": "npm:^0.2.0-pre.3" + "@tursodatabase/database-common": "npm:^0.2.0-pre.4" + "@tursodatabase/sync-common": "npm:^0.2.0-pre.4" "@types/node": "npm:^24.3.1" typescript: "npm:^5.9.2" vitest: "npm:^3.2.4" languageName: unknown linkType: soft -"@tursodatabase/wasm-runtime@npm:^0.2.0-pre.3, @tursodatabase/wasm-runtime@workspace:packages/wasm-runtime": +"@tursodatabase/wasm-runtime@npm:^0.2.0-pre.4, @tursodatabase/wasm-runtime@workspace:packages/wasm-runtime": version: 0.0.0-use.local resolution: "@tursodatabase/wasm-runtime@workspace:packages/wasm-runtime" dependencies: @@ -1865,7 +1865,16 @@ __metadata: languageName: node linkType: hard -"ansi-styles@npm:^4.0.0, ansi-styles@npm:^5.0.0": +"ansi-styles@npm:^4.0.0": + version: 4.3.0 + resolution: "ansi-styles@npm:4.3.0" + dependencies: + color-convert: "npm:^2.0.1" + checksum: 10c0/895a23929da416f2bd3de7e9cb4eabd340949328ab85ddd6e484a637d8f6820d485f53933446f5291c3b760cbc488beb8e88573dd0f9c7daf83dccc8fe81b041 + languageName: node + linkType: hard + +"ansi-styles@npm:^5.0.0": version: 5.2.0 resolution: "ansi-styles@npm:5.2.0" checksum: 10c0/9c4ca80eb3c2fb7b33841c210d2f20807f40865d27008d7c3f707b7f95cab7d67462a565e2388ac3285b71cb3d9bb2173de8da37c57692a362885ec34d6e27df @@ -2066,6 +2075,22 @@ __metadata: languageName: node linkType: hard +"color-convert@npm:^2.0.1": + version: 2.0.1 + resolution: "color-convert@npm:2.0.1" + dependencies: + color-name: "npm:~1.1.4" + checksum: 10c0/37e1150172f2e311fe1b2df62c6293a342ee7380da7b9cfdba67ea539909afbd74da27033208d01d6d5cfc65ee7868a22e18d7e7648e004425441c0f8a15a7d7 + languageName: node + linkType: hard + +"color-name@npm:~1.1.4": + version: 1.1.4 + resolution: "color-name@npm:1.1.4" + checksum: 10c0/a1a3f914156960902f46f7f56bc62effc6c94e84b2cae157a526b1c1f74b677a47ec602bf68a61abfa2b42d15b7c5651c6dbe72a43af720bc588dff885b10f95 + languageName: node + linkType: hard + "colorette@npm:^2.0.20": version: 2.0.20 resolution: "colorette@npm:2.0.20" @@ -2992,12 +3017,12 @@ __metadata: languageName: node linkType: hard -"minizlib@npm:^3.0.1": - version: 3.0.2 - resolution: "minizlib@npm:3.0.2" +"minizlib@npm:^3.0.1, minizlib@npm:^3.1.0": + version: 3.1.0 + resolution: "minizlib@npm:3.1.0" dependencies: minipass: "npm:^7.1.2" - checksum: 10c0/9f3bd35e41d40d02469cb30470c55ccc21cae0db40e08d1d0b1dff01cc8cc89a6f78e9c5d2b7c844e485ec0a8abc2238111213fdc5b2038e6d1012eacf316f78 + checksum: 10c0/5aad75ab0090b8266069c9aabe582c021ae53eb33c6c691054a13a45db3b4f91a7fb1bd79151e6b4e9e9a86727b522527c0a06ec7d45206b745d54cd3097bcec languageName: node linkType: hard @@ -3008,15 +3033,6 @@ __metadata: languageName: node linkType: hard -"mkdirp@npm:^3.0.1": - version: 3.0.1 - resolution: "mkdirp@npm:3.0.1" - bin: - mkdirp: dist/cjs/src/bin.js - checksum: 10c0/9f2b975e9246351f5e3a40dcfac99fcd0baa31fbfab615fe059fb11e51f10e4803c63de1f384c54d656e4db31d000e4767e9ef076a22e12a641357602e31d57d - languageName: node - linkType: hard - "mrmime@npm:^2.0.0": version: 2.0.1 resolution: "mrmime@npm:2.0.1" @@ -3674,16 +3690,15 @@ __metadata: linkType: hard "tar@npm:^7.4.3": - version: 7.4.3 - resolution: "tar@npm:7.4.3" + version: 7.4.4 + resolution: "tar@npm:7.4.4" dependencies: "@isaacs/fs-minipass": "npm:^4.0.0" chownr: "npm:^3.0.0" minipass: "npm:^7.1.2" - minizlib: "npm:^3.0.1" - mkdirp: "npm:^3.0.1" + minizlib: "npm:^3.1.0" yallist: "npm:^5.0.0" - checksum: 10c0/d4679609bb2a9b48eeaf84632b6d844128d2412b95b6de07d53d8ee8baf4ca0857c9331dfa510390a0727b550fd543d4d1a10995ad86cdf078423fbb8d99831d + checksum: 10c0/2db46a140095488ed3244ac748f8e4f9362223b212bcae7859840dd9fd9891bc713f243d122906ce2f28eb64b49fa8cefc13cbdda24e66e8f2a5936a7c392b06 languageName: node linkType: hard From f053b76518fdc248cc7689b05e8d0c28bccecc14 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 22 Sep 2025 08:08:55 +0300 Subject: [PATCH 65/78] core/storage: Move vacuum state machines to VacuumState --- core/storage/pager.rs | 45 +++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 657a3ccb3..942090a10 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -527,14 +527,17 @@ pub struct Pager { max_page_count: AtomicU32, header_ref_state: RwLock, #[cfg(not(feature = "omit_autovacuum"))] + vacuum_state: VacuumState, + pub(crate) io_ctx: RefCell, +} + +#[cfg(not(feature = "omit_autovacuum"))] +pub struct VacuumState { /// State machine for [Pager::ptrmap_get] ptrmap_get_state: RefCell, - #[cfg(not(feature = "omit_autovacuum"))] /// State machine for [Pager::ptrmap_put] ptrmap_put_state: RefCell, - #[cfg(not(feature = "omit_autovacuum"))] btree_create_vacuum_full_state: Cell, - pub(crate) io_ctx: RefCell, } #[derive(Debug, Clone)] @@ -628,13 +631,13 @@ impl Pager { free_page_state: RwLock::new(FreePageState::Start), allocate_page_state: RwLock::new(AllocatePageState::Start), max_page_count: AtomicU32::new(DEFAULT_MAX_PAGE_COUNT), - #[cfg(not(feature = "omit_autovacuum"))] - ptrmap_get_state: RefCell::new(PtrMapGetState::Start), - #[cfg(not(feature = "omit_autovacuum"))] - ptrmap_put_state: RefCell::new(PtrMapPutState::Start), header_ref_state: RwLock::new(HeaderRefState::Start), #[cfg(not(feature = "omit_autovacuum"))] - btree_create_vacuum_full_state: Cell::new(BtreeCreateVacuumFullState::Start), + vacuum_state: VacuumState { + ptrmap_get_state: RefCell::new(PtrMapGetState::Start), + ptrmap_put_state: RefCell::new(PtrMapPutState::Start), + btree_create_vacuum_full_state: Cell::new(BtreeCreateVacuumFullState::Start), + }, io_ctx: RefCell::new(IOContext::default()), }) } @@ -675,7 +678,7 @@ impl Pager { #[cfg(not(feature = "omit_autovacuum"))] pub fn ptrmap_get(&self, target_page_num: u32) -> Result>> { loop { - let ptrmap_get_state = self.ptrmap_get_state.borrow().clone(); + let ptrmap_get_state = self.vacuum_state.ptrmap_get_state.borrow().clone(); match ptrmap_get_state { PtrMapGetState::Start => { tracing::trace!("ptrmap_get(page_idx = {})", target_page_num); @@ -702,7 +705,7 @@ impl Pager { ); let (ptrmap_page, c) = self.read_page(ptrmap_pg_no as usize)?; - self.ptrmap_get_state.replace(PtrMapGetState::Deserialize { + self.vacuum_state.ptrmap_get_state.replace(PtrMapGetState::Deserialize { ptrmap_page, offset_in_ptrmap_page, }); @@ -749,7 +752,7 @@ impl Pager { let entry_slice = &ptrmap_page_data_slice [offset_in_ptrmap_page..offset_in_ptrmap_page + PTRMAP_ENTRY_SIZE]; - self.ptrmap_get_state.replace(PtrMapGetState::Start); + self.vacuum_state.ptrmap_get_state.replace(PtrMapGetState::Start); break match PtrmapEntry::deserialize(entry_slice) { Some(entry) => Ok(IOResult::Done(Some(entry))), None => Err(LimboError::Corrupt(format!( @@ -778,7 +781,7 @@ impl Pager { parent_page_no ); loop { - let ptrmap_put_state = self.ptrmap_put_state.borrow().clone(); + let ptrmap_put_state = self.vacuum_state.ptrmap_put_state.borrow().clone(); match ptrmap_put_state { PtrMapPutState::Start => { let page_size = @@ -806,7 +809,7 @@ impl Pager { ); let (ptrmap_page, c) = self.read_page(ptrmap_pg_no as usize)?; - self.ptrmap_put_state.replace(PtrMapPutState::Deserialize { + self.vacuum_state.ptrmap_put_state.replace(PtrMapPutState::Deserialize { ptrmap_page, offset_in_ptrmap_page, }); @@ -857,7 +860,7 @@ impl Pager { "ptrmap page has unexpected number" ); self.add_dirty(&ptrmap_page); - self.ptrmap_put_state.replace(PtrMapPutState::Start); + self.vacuum_state.ptrmap_put_state.replace(PtrMapPutState::Start); break Ok(IOResult::Done(())); } } @@ -892,7 +895,7 @@ impl Pager { } AutoVacuumMode::Full => { loop { - match self.btree_create_vacuum_full_state.get() { + match self.vacuum_state.btree_create_vacuum_full_state.get() { BtreeCreateVacuumFullState::Start => { let (mut root_page_num, page_size) = return_if_io!(self .with_header(|header| { @@ -910,7 +913,7 @@ impl Pager { root_page_num += 1; } assert!(root_page_num >= 3); // the very first root page is page 3 - self.btree_create_vacuum_full_state.set( + self.vacuum_state.btree_create_vacuum_full_state.set( BtreeCreateVacuumFullState::AllocatePage { root_page_num }, ); } @@ -927,7 +930,7 @@ impl Pager { } // TODO(Zaid): Update the header metadata to reflect the new root page number - self.btree_create_vacuum_full_state.set( + self.vacuum_state.btree_create_vacuum_full_state.set( BtreeCreateVacuumFullState::PtrMapPut { allocated_page_id }, ); } @@ -938,7 +941,7 @@ impl Pager { PtrmapType::RootPage, 0, )); - self.btree_create_vacuum_full_state + self.vacuum_state.btree_create_vacuum_full_state .set(BtreeCreateVacuumFullState::Start); return Ok(IOResult::Done(allocated_page_id)); } @@ -2333,9 +2336,9 @@ impl Pager { *self.free_page_state.write() = FreePageState::Start; #[cfg(not(feature = "omit_autovacuum"))] { - self.ptrmap_get_state.replace(PtrMapGetState::Start); - self.ptrmap_put_state.replace(PtrMapPutState::Start); - self.btree_create_vacuum_full_state + self.vacuum_state.ptrmap_get_state.replace(PtrMapGetState::Start); + self.vacuum_state.ptrmap_put_state.replace(PtrMapPutState::Start); + self.vacuum_state.btree_create_vacuum_full_state .replace(BtreeCreateVacuumFullState::Start); } From 751261566eab9636314bf70a3a1f4f2fa1f5031b Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 22 Sep 2025 10:02:01 +0300 Subject: [PATCH 66/78] Fix Antithesis and simulator Dockerfiles --- Dockerfile.antithesis | 2 ++ simulator-docker-runner/Dockerfile.simulator | 2 ++ 2 files changed, 4 insertions(+) diff --git a/Dockerfile.antithesis b/Dockerfile.antithesis index cd683240c..a277c2128 100644 --- a/Dockerfile.antithesis +++ b/Dockerfile.antithesis @@ -23,6 +23,7 @@ COPY ./extensions ./extensions/ COPY ./macros ./macros/ COPY ./packages ./packages/ COPY ./parser ./parser/ +COPY ./perf/encryption ./perf/encryption COPY ./perf/throughput/rusqlite ./perf/throughput/rusqlite/ COPY ./perf/throughput/turso ./perf/throughput/turso/ COPY ./simulator ./simulator/ @@ -65,6 +66,7 @@ COPY --from=planner /app/extensions ./extensions/ COPY --from=planner /app/macros ./macros/ COPY --from=planner /app/packages ./packages/ COPY --from=planner /app/parser ./parser/ +COPY --from=planner /app/perf/encryption ./perf/encryption COPY --from=planner /app/perf/throughput/rusqlite ./perf/throughput/rusqlite/ COPY --from=planner /app/perf/throughput/turso ./perf/throughput/turso/ COPY --from=planner /app/simulator ./simulator/ diff --git a/simulator-docker-runner/Dockerfile.simulator b/simulator-docker-runner/Dockerfile.simulator index 611c6ba01..5ad11acd9 100644 --- a/simulator-docker-runner/Dockerfile.simulator +++ b/simulator-docker-runner/Dockerfile.simulator @@ -19,6 +19,7 @@ COPY extensions ./extensions/ COPY macros ./macros/ COPY sync ./sync COPY parser ./parser/ +COPY perf/encryption ./perf/encryption COPY perf/throughput/rusqlite ./perf/throughput/rusqlite COPY perf/throughput/turso ./perf/throughput/turso COPY vendored ./vendored/ @@ -45,6 +46,7 @@ COPY --from=planner /app/vendored ./vendored/ COPY --from=planner /app/extensions ./extensions/ COPY --from=planner /app/macros ./macros/ COPY --from=planner /app/parser ./parser/ +COPY --from=planner /app/perf/encryption ./perf/encryption COPY --from=planner /app/perf/throughput/rusqlite ./perf/throughput/rusqlite COPY --from=planner /app/perf/throughput/turso ./perf/throughput/turso COPY --from=planner /app/simulator ./simulator/ From ef9f2f9a338cf5f888f7ebf0ff385d903f62b2bd Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 22 Sep 2025 10:11:42 +0300 Subject: [PATCH 67/78] test/fuzz: add prints to get exact executed statements for debugging --- tests/integration/fuzz/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 807c0eee5..10a663e71 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -676,6 +676,7 @@ mod tests { "ss", "tt", "uu", "vv", "ww", "xx", "yy", "zz", ]; for outer in 0..OUTER_ITERS { + println!(""); println!( "partial_index_mutation_and_upsert_fuzz iteration {}/{}", outer + 1, @@ -696,6 +697,7 @@ mod tests { cols.push(format!("c{i} INT")); } let create = format!("CREATE TABLE t ({})", cols.join(", ")); + println!("{create};"); limbo_exec_rows(&limbo_db, &limbo_conn, &create); sqlite.execute(&create, rusqlite::params![]).unwrap(); @@ -780,6 +782,7 @@ mod tests { ); idx_ddls.push(ddl.clone()); // Create in both engines + println!("{ddl};"); limbo_exec_rows(&limbo_db, &limbo_conn, &ddl); sqlite.execute(&ddl, rusqlite::params![]).unwrap(); } @@ -930,6 +933,7 @@ mod tests { match (sqlite_res, limbo_res) { (Ok(_), Ok(_)) => { + println!("{stmt};"); // Compare canonical table state let verify = format!( "SELECT id, k{} FROM t ORDER BY id, k{}", @@ -953,6 +957,7 @@ mod tests { } // Mismatch: dump context (ok_sqlite, ok_turso) => { + println!("{stmt};"); eprintln!("Schema: {create};"); for d in idx_ddls.iter() { eprintln!("{d};"); From eada24b508f5f1664153fe76b03e2c8abc96492d Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 22 Sep 2025 10:11:50 +0300 Subject: [PATCH 68/78] Store in-memory index definitions most-recently-seen-first This solves an issue where an INSERT statement conflicts with multiple indices. In that case, sqlite iterates the linked list `pTab->pIndex` in order and handles the first conflict encountered. The newest parsed index is always added to the head of the list. To be compatible with this behavior, we also need to put the most recently parsed index definition first in our indexes list for a given table. --- core/schema.rs | 18 +++++++++----- core/translate/delete.rs | 2 +- core/translate/insert.rs | 1 - core/translate/optimizer/constraints.rs | 10 +++++--- core/translate/optimizer/mod.rs | 9 +++++-- core/translate/update.rs | 3 +-- core/translate/upsert.rs | 32 +++++++++++++++++++++++++ 7 files changed, 60 insertions(+), 15 deletions(-) diff --git a/core/schema.rs b/core/schema.rs index 546af4f00..911b1d814 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -27,7 +27,7 @@ use crate::{ }; use crate::{util::normalize_ident, Result}; use core::fmt; -use std::collections::{HashMap, HashSet}; +use std::collections::{HashMap, HashSet, VecDeque}; use std::ops::Deref; use std::sync::Arc; use std::sync::Mutex; @@ -64,7 +64,7 @@ pub struct Schema { pub views: ViewsMap, /// table_name to list of indexes for the table - pub indexes: HashMap>>, + pub indexes: HashMap>>, pub has_indexes: std::collections::HashSet, pub indexes_enabled: bool, pub schema_version: u32, @@ -77,7 +77,7 @@ impl Schema { pub fn new(indexes_enabled: bool) -> Self { let mut tables: HashMap> = HashMap::new(); let has_indexes = std::collections::HashSet::new(); - let indexes: HashMap>> = HashMap::new(); + let indexes: HashMap>> = HashMap::new(); #[allow(clippy::arc_with_non_send_sync)] tables.insert( SCHEMA_TABLE_NAME.to_string(), @@ -244,17 +244,23 @@ impl Schema { pub fn add_index(&mut self, index: Arc) { let table_name = normalize_ident(&index.table_name); + // We must add the new index to the front of the deque, because SQLite stores index definitions as a linked list + // where the newest parsed index entry is at the head of list. If we would add it to the back of a regular Vec for example, + // then we would evaluate ON CONFLICT DO UPDATE clauses in the wrong index iteration order and UPDATE the wrong row. One might + // argue that this is an implementation detail and we should not care about this, but it makes e.g. the fuzz test 'partial_index_mutation_and_upsert_fuzz' + // fail, so let's just be compatible. self.indexes .entry(table_name) .or_default() - .push(index.clone()) + .push_front(index.clone()) } - pub fn get_indices(&self, table_name: &str) -> &[Arc] { + pub fn get_indices(&self, table_name: &str) -> impl Iterator> { let name = normalize_ident(table_name); self.indexes .get(&name) - .map_or_else(|| &[] as &[Arc], |v| v.as_slice()) + .map(|v| v.iter()) + .unwrap_or_default() } pub fn get_index(&self, table_name: &str, index_name: &str) -> Option<&Arc> { diff --git a/core/translate/delete.rs b/core/translate/delete.rs index ccec40138..8e706d693 100644 --- a/core/translate/delete.rs +++ b/core/translate/delete.rs @@ -96,7 +96,7 @@ pub fn prepare_delete_plan( } else { crate::bail_parse_error!("Table is neither a virtual table nor a btree table"); }; - let indexes = schema.get_indices(table.get_name()).to_vec(); + let indexes = schema.get_indices(table.get_name()).cloned().collect(); let joined_tables = vec![JoinedTable { op: Operation::default_scan_for(&table), table, diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 509eda6ce..f760a8106 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -364,7 +364,6 @@ pub fn translate_insert( // (idx name, root_page, idx cursor id) let idx_cursors = schema .get_indices(table_name.as_str()) - .iter() .map(|idx| { ( &idx.name, diff --git a/core/translate/optimizer/constraints.rs b/core/translate/optimizer/constraints.rs index 049408b17..d2dff657c 100644 --- a/core/translate/optimizer/constraints.rs +++ b/core/translate/optimizer/constraints.rs @@ -1,4 +1,8 @@ -use std::{cmp::Ordering, collections::HashMap, sync::Arc}; +use std::{ + cmp::Ordering, + collections::{HashMap, VecDeque}, + sync::Arc, +}; use crate::{ schema::{Column, Index}, @@ -175,7 +179,7 @@ fn estimate_selectivity(column: &Column, op: ast::Operator) -> f64 { pub fn constraints_from_where_clause( where_clause: &[WhereTerm], table_references: &TableReferences, - available_indexes: &HashMap>>, + available_indexes: &HashMap>>, ) -> Result> { let mut constraints = Vec::new(); @@ -315,7 +319,7 @@ pub fn constraints_from_where_clause( } for index in available_indexes .get(table_reference.table.get_name()) - .unwrap_or(&Vec::new()) + .unwrap_or(&VecDeque::new()) { if let Some(position_in_index) = index.column_table_pos_to_index_pos(constraint.table_col_pos) diff --git a/core/translate/optimizer/mod.rs b/core/translate/optimizer/mod.rs index eb883b05e..9273212d7 100644 --- a/core/translate/optimizer/mod.rs +++ b/core/translate/optimizer/mod.rs @@ -1,4 +1,9 @@ -use std::{cell::RefCell, cmp::Ordering, collections::HashMap, sync::Arc}; +use std::{ + cell::RefCell, + cmp::Ordering, + collections::{HashMap, VecDeque}, + sync::Arc, +}; use constraints::{ constraints_from_where_clause, usable_constraints_for_join_order, Constraint, ConstraintRef, @@ -178,7 +183,7 @@ fn optimize_subqueries(plan: &mut SelectPlan, schema: &Schema) -> Result<()> { fn optimize_table_access( schema: &Schema, table_references: &mut TableReferences, - available_indexes: &HashMap>>, + available_indexes: &HashMap>>, where_clause: &mut [WhereTerm], order_by: &mut Vec<(Box, SortOrder)>, group_by: &mut Option, diff --git a/core/translate/update.rs b/core/translate/update.rs index 388e9e3df..679dcef68 100644 --- a/core/translate/update.rs +++ b/core/translate/update.rs @@ -377,12 +377,11 @@ pub fn prepare_update_plan( .any(|(idx, _)| columns[*idx].is_rowid_alias); let indexes_to_update = if rowid_alias_used { // If the rowid alias is used in the SET clause, we need to update all indexes - indexes.to_vec() + indexes.cloned().collect() } else { // otherwise we need to update the indexes whose columns are set in the SET clause, // or if the colunns used in the partial index WHERE clause are being updated indexes - .iter() .filter_map(|idx| { let mut needs = idx .columns diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index ce3c8666c..53493c4b9 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -31,6 +31,38 @@ use crate::{ }, }; +// The following comment is copied directly from SQLite source and should be used as a guiding light +// whenever we encounter compatibility bugs related to conflict clause handling: + +/* UNIQUE and PRIMARY KEY constraints should be handled in the following +** order: +** +** (1) OE_Update +** (2) OE_Abort, OE_Fail, OE_Rollback, OE_Ignore +** (3) OE_Replace +** +** OE_Fail and OE_Ignore must happen before any changes are made. +** OE_Update guarantees that only a single row will change, so it +** must happen before OE_Replace. Technically, OE_Abort and OE_Rollback +** could happen in any order, but they are grouped up front for +** convenience. +** +** 2018-08-14: Ticket https://www.sqlite.org/src/info/908f001483982c43 +** The order of constraints used to have OE_Update as (2) and OE_Abort +** and so forth as (1). But apparently PostgreSQL checks the OE_Update +** constraint before any others, so it had to be moved. +** +** Constraint checking code is generated in this order: +** (A) The rowid constraint +** (B) Unique index constraints that do not have OE_Replace as their +** default conflict resolution strategy +** (C) Unique index that do use OE_Replace by default. +** +** The ordering of (2) and (3) is accomplished by making sure the linked +** list of indexes attached to a table puts all OE_Replace indexes last +** in the list. See sqlite3CreateIndex() for where that happens. +*/ + /// A ConflictTarget is extracted from each ON CONFLICT target, // e.g. INSERT INTO x(a) ON CONFLICT *(a COLLATE nocase)* #[derive(Debug, Clone)] From c0fc2ad234a0c0077b317520ead85b4a51ffa2bd Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 22 Sep 2025 10:18:03 +0300 Subject: [PATCH 69/78] fix optimizer tests --- core/translate/optimizer/join.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/core/translate/optimizer/join.rs b/core/translate/optimizer/join.rs index 81b6e93ef..617bc94dd 100644 --- a/core/translate/optimizer/join.rs +++ b/core/translate/optimizer/join.rs @@ -501,7 +501,7 @@ fn generate_join_bitmasks(table_number_max_exclusive: usize, how_many: usize) -> #[cfg(test)] mod tests { - use std::sync::Arc; + use std::{collections::VecDeque, sync::Arc}; use turso_parser::ast::{self, Expr, Operator, SortOrder, TableInternalId}; @@ -677,7 +677,7 @@ mod tests { root_page: 1, has_rowid: true, }); - available_indexes.insert("test_table".to_string(), vec![index]); + available_indexes.insert("test_table".to_string(), VecDeque::from([index])); let table_constraints = constraints_from_where_clause(&where_clause, &table_references, &available_indexes) @@ -747,7 +747,7 @@ mod tests { root_page: 1, has_rowid: true, }); - available_indexes.insert("table1".to_string(), vec![index1]); + available_indexes.insert("table1".to_string(), VecDeque::from([index1])); // SELECT * FROM table1 JOIN table2 WHERE table1.id = table2.id // expecting table2 to be chosen first due to the index on table1.id @@ -865,7 +865,7 @@ mod tests { root_page: 1, has_rowid: true, }); - available_indexes.insert(table_name.to_string(), vec![index]); + available_indexes.insert(table_name.to_string(), VecDeque::from([index])); }); let customer_id_idx = Arc::new(Index { name: "orders_customer_id_idx".to_string(), @@ -902,10 +902,10 @@ mod tests { available_indexes .entry("orders".to_string()) - .and_modify(|v| v.push(customer_id_idx)); + .and_modify(|v| v.push_front(customer_id_idx)); available_indexes .entry("order_items".to_string()) - .and_modify(|v| v.push(order_id_idx)); + .and_modify(|v| v.push_front(order_id_idx)); // SELECT * FROM orders JOIN customers JOIN order_items // WHERE orders.customer_id = customers.id AND orders.id = order_items.order_id AND customers.id = 42 @@ -1324,7 +1324,7 @@ mod tests { }); let mut available_indexes = HashMap::new(); - available_indexes.insert("t1".to_string(), vec![index]); + available_indexes.insert("t1".to_string(), VecDeque::from([index])); let table = Table::BTree(table); joined_tables.push(JoinedTable { @@ -1416,7 +1416,7 @@ mod tests { ephemeral: false, has_rowid: true, }); - available_indexes.insert("t1".to_string(), vec![index]); + available_indexes.insert("t1".to_string(), VecDeque::from([index])); let table = Table::BTree(table); joined_tables.push(JoinedTable { @@ -1528,7 +1528,7 @@ mod tests { has_rowid: true, unique: false, }); - available_indexes.insert("t1".to_string(), vec![index]); + available_indexes.insert("t1".to_string(), VecDeque::from([index])); let table = Table::BTree(table); joined_tables.push(JoinedTable { From e5a3512f79bf9a7a87de429c7ee8843ee0ec11c6 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 22 Sep 2025 10:19:46 +0300 Subject: [PATCH 70/78] Upgrade dist to 0.30.0 --- .github/workflows/release.yml | 2 +- dist-workspace.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3ad18584e..a1ae31232 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -66,7 +66,7 @@ jobs: # we specify bash to get pipefail; it guards against the `curl` command # failing. otherwise `sh` won't catch that `curl` returned non-0 shell: bash - run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.29.0/cargo-dist-installer.sh | sh" + run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.30.0/cargo-dist-installer.sh | sh" - name: Cache dist uses: actions/upload-artifact@v4 with: diff --git a/dist-workspace.toml b/dist-workspace.toml index 49a205ab6..71bd31d3f 100644 --- a/dist-workspace.toml +++ b/dist-workspace.toml @@ -4,7 +4,7 @@ members = ["cargo:."] # Config for 'dist' [dist] # The preferred dist version to use in CI (Cargo.toml SemVer syntax) -cargo-dist-version = "0.29.0" +cargo-dist-version = "0.30.0" # CI backends to support ci = "github" # The installers to generate for each app From f1b0ffc0533a30f218db4f3d5ee2ed06c1514655 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 22 Sep 2025 10:21:16 +0300 Subject: [PATCH 71/78] clippy is angry about printing an empty string --- tests/integration/fuzz/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 10a663e71..b0a6bc1c1 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -676,7 +676,7 @@ mod tests { "ss", "tt", "uu", "vv", "ww", "xx", "yy", "zz", ]; for outer in 0..OUTER_ITERS { - println!(""); + println!(" "); println!( "partial_index_mutation_and_upsert_fuzz iteration {}/{}", outer + 1, From 6f258b37d96e417d51b17ca5311cfe7252c7ff79 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 22 Sep 2025 09:12:54 +0300 Subject: [PATCH 72/78] core/storage: Wrap Pager vacuum state in RwLock --- core/storage/pager.rs | 68 ++++++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 30 deletions(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 942090a10..846487dbe 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -527,17 +527,17 @@ pub struct Pager { max_page_count: AtomicU32, header_ref_state: RwLock, #[cfg(not(feature = "omit_autovacuum"))] - vacuum_state: VacuumState, + vacuum_state: RwLock, pub(crate) io_ctx: RefCell, } #[cfg(not(feature = "omit_autovacuum"))] pub struct VacuumState { /// State machine for [Pager::ptrmap_get] - ptrmap_get_state: RefCell, + ptrmap_get_state: PtrMapGetState, /// State machine for [Pager::ptrmap_put] - ptrmap_put_state: RefCell, - btree_create_vacuum_full_state: Cell, + ptrmap_put_state: PtrMapPutState, + btree_create_vacuum_full_state: BtreeCreateVacuumFullState, } #[derive(Debug, Clone)] @@ -633,11 +633,11 @@ impl Pager { max_page_count: AtomicU32::new(DEFAULT_MAX_PAGE_COUNT), header_ref_state: RwLock::new(HeaderRefState::Start), #[cfg(not(feature = "omit_autovacuum"))] - vacuum_state: VacuumState { - ptrmap_get_state: RefCell::new(PtrMapGetState::Start), - ptrmap_put_state: RefCell::new(PtrMapPutState::Start), - btree_create_vacuum_full_state: Cell::new(BtreeCreateVacuumFullState::Start), - }, + vacuum_state: RwLock::new(VacuumState { + ptrmap_get_state: PtrMapGetState::Start, + ptrmap_put_state: PtrMapPutState::Start, + btree_create_vacuum_full_state: BtreeCreateVacuumFullState::Start, + }), io_ctx: RefCell::new(IOContext::default()), }) } @@ -678,7 +678,10 @@ impl Pager { #[cfg(not(feature = "omit_autovacuum"))] pub fn ptrmap_get(&self, target_page_num: u32) -> Result>> { loop { - let ptrmap_get_state = self.vacuum_state.ptrmap_get_state.borrow().clone(); + let ptrmap_get_state = { + let vacuum_state = self.vacuum_state.read(); + vacuum_state.ptrmap_get_state.clone() + }; match ptrmap_get_state { PtrMapGetState::Start => { tracing::trace!("ptrmap_get(page_idx = {})", target_page_num); @@ -705,10 +708,10 @@ impl Pager { ); let (ptrmap_page, c) = self.read_page(ptrmap_pg_no as usize)?; - self.vacuum_state.ptrmap_get_state.replace(PtrMapGetState::Deserialize { + self.vacuum_state.write().ptrmap_get_state = PtrMapGetState::Deserialize { ptrmap_page, offset_in_ptrmap_page, - }); + }; if let Some(c) = c { io_yield_one!(c); } @@ -752,7 +755,7 @@ impl Pager { let entry_slice = &ptrmap_page_data_slice [offset_in_ptrmap_page..offset_in_ptrmap_page + PTRMAP_ENTRY_SIZE]; - self.vacuum_state.ptrmap_get_state.replace(PtrMapGetState::Start); + self.vacuum_state.write().ptrmap_get_state = PtrMapGetState::Start; break match PtrmapEntry::deserialize(entry_slice) { Some(entry) => Ok(IOResult::Done(Some(entry))), None => Err(LimboError::Corrupt(format!( @@ -781,7 +784,10 @@ impl Pager { parent_page_no ); loop { - let ptrmap_put_state = self.vacuum_state.ptrmap_put_state.borrow().clone(); + let ptrmap_put_state = { + let vacuum_state = self.vacuum_state.read(); + vacuum_state.ptrmap_put_state.clone() + }; match ptrmap_put_state { PtrMapPutState::Start => { let page_size = @@ -809,10 +815,10 @@ impl Pager { ); let (ptrmap_page, c) = self.read_page(ptrmap_pg_no as usize)?; - self.vacuum_state.ptrmap_put_state.replace(PtrMapPutState::Deserialize { + self.vacuum_state.write().ptrmap_put_state = PtrMapPutState::Deserialize { ptrmap_page, offset_in_ptrmap_page, - }); + }; if let Some(c) = c { io_yield_one!(c); } @@ -860,7 +866,7 @@ impl Pager { "ptrmap page has unexpected number" ); self.add_dirty(&ptrmap_page); - self.vacuum_state.ptrmap_put_state.replace(PtrMapPutState::Start); + self.vacuum_state.write().ptrmap_put_state = PtrMapPutState::Start; break Ok(IOResult::Done(())); } } @@ -895,7 +901,11 @@ impl Pager { } AutoVacuumMode::Full => { loop { - match self.vacuum_state.btree_create_vacuum_full_state.get() { + let btree_create_vacuum_full_state = { + let vacuum_state = self.vacuum_state.read(); + vacuum_state.btree_create_vacuum_full_state + }; + match btree_create_vacuum_full_state { BtreeCreateVacuumFullState::Start => { let (mut root_page_num, page_size) = return_if_io!(self .with_header(|header| { @@ -913,9 +923,8 @@ impl Pager { root_page_num += 1; } assert!(root_page_num >= 3); // the very first root page is page 3 - self.vacuum_state.btree_create_vacuum_full_state.set( - BtreeCreateVacuumFullState::AllocatePage { root_page_num }, - ); + self.vacuum_state.write().btree_create_vacuum_full_state = + BtreeCreateVacuumFullState::AllocatePage { root_page_num }; } BtreeCreateVacuumFullState::AllocatePage { root_page_num } => { // root_page_num here is the desired root page @@ -930,9 +939,8 @@ impl Pager { } // TODO(Zaid): Update the header metadata to reflect the new root page number - self.vacuum_state.btree_create_vacuum_full_state.set( - BtreeCreateVacuumFullState::PtrMapPut { allocated_page_id }, - ); + self.vacuum_state.write().btree_create_vacuum_full_state = + BtreeCreateVacuumFullState::PtrMapPut { allocated_page_id }; } BtreeCreateVacuumFullState::PtrMapPut { allocated_page_id } => { // For now map allocated_page_id since we are not swapping it with root_page_num @@ -941,8 +949,8 @@ impl Pager { PtrmapType::RootPage, 0, )); - self.vacuum_state.btree_create_vacuum_full_state - .set(BtreeCreateVacuumFullState::Start); + self.vacuum_state.write().btree_create_vacuum_full_state = + BtreeCreateVacuumFullState::Start; return Ok(IOResult::Done(allocated_page_id)); } } @@ -2336,10 +2344,10 @@ impl Pager { *self.free_page_state.write() = FreePageState::Start; #[cfg(not(feature = "omit_autovacuum"))] { - self.vacuum_state.ptrmap_get_state.replace(PtrMapGetState::Start); - self.vacuum_state.ptrmap_put_state.replace(PtrMapPutState::Start); - self.vacuum_state.btree_create_vacuum_full_state - .replace(BtreeCreateVacuumFullState::Start); + let mut vacuum_state = self.vacuum_state.write(); + vacuum_state.ptrmap_get_state = PtrMapGetState::Start; + vacuum_state.ptrmap_put_state = PtrMapPutState::Start; + vacuum_state.btree_create_vacuum_full_state = BtreeCreateVacuumFullState::Start; } *self.header_ref_state.write() = HeaderRefState::Start; From f4b0fb17f7893d2f0f31052d4fbe5c0f1a6db68a Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 22 Sep 2025 11:34:22 +0300 Subject: [PATCH 73/78] Turso 0.2.0-pre.5 --- Cargo.lock | 54 +++++++++---------- Cargo.toml | 34 ++++++------ bindings/javascript/package-lock.json | 40 +++++++------- bindings/javascript/package.json | 2 +- .../packages/browser-common/package.json | 4 +- .../javascript/packages/browser/package.json | 6 +-- .../javascript/packages/common/package.json | 2 +- .../javascript/packages/native/package.json | 4 +- .../packages/wasm-runtime/package.json | 2 +- .../sync/packages/browser/package.json | 8 +-- .../sync/packages/common/package.json | 4 +- .../sync/packages/native/package.json | 6 +-- bindings/javascript/yarn.lock | 28 +++++----- 13 files changed, 97 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d0df1bbab..5bd0cf8f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -654,7 +654,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_tester" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "anyhow", "assert_cmd", @@ -2090,7 +2090,7 @@ dependencies = [ [[package]] name = "limbo_completion" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "mimalloc", "turso_ext", @@ -2098,7 +2098,7 @@ dependencies = [ [[package]] name = "limbo_crypto" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "blake3", "data-encoding", @@ -2111,7 +2111,7 @@ dependencies = [ [[package]] name = "limbo_csv" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "csv", "mimalloc", @@ -2121,7 +2121,7 @@ dependencies = [ [[package]] name = "limbo_ipaddr" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "ipnetwork", "mimalloc", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "limbo_percentile" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "mimalloc", "turso_ext", @@ -2138,7 +2138,7 @@ dependencies = [ [[package]] name = "limbo_regexp" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "mimalloc", "regex", @@ -2147,7 +2147,7 @@ dependencies = [ [[package]] name = "limbo_sim" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "anyhow", "chrono", @@ -2180,7 +2180,7 @@ dependencies = [ [[package]] name = "limbo_sqlite_test_ext" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "cc", ] @@ -2901,7 +2901,7 @@ dependencies = [ [[package]] name = "py-turso" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "anyhow", "pyo3", @@ -3581,7 +3581,7 @@ checksum = "d372029cb5195f9ab4e4b9aef550787dce78b124fcaee8d82519925defcd6f0d" [[package]] name = "sql_generation" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "anarchist-readable-name-generator-lib 0.2.0", "anyhow", @@ -4082,7 +4082,7 @@ dependencies = [ [[package]] name = "turso" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "rand 0.9.2", "rand_chacha 0.9.0", @@ -4094,7 +4094,7 @@ dependencies = [ [[package]] name = "turso-java" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "jni", "thiserror 2.0.16", @@ -4103,7 +4103,7 @@ dependencies = [ [[package]] name = "turso_cli" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "anyhow", "cfg-if", @@ -4136,7 +4136,7 @@ dependencies = [ [[package]] name = "turso_core" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "aegis", "aes", @@ -4194,7 +4194,7 @@ dependencies = [ [[package]] name = "turso_dart" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "flutter_rust_bridge", "turso_core", @@ -4202,7 +4202,7 @@ dependencies = [ [[package]] name = "turso_ext" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "chrono", "getrandom 0.3.2", @@ -4211,7 +4211,7 @@ dependencies = [ [[package]] name = "turso_ext_tests" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "env_logger 0.11.7", "lazy_static", @@ -4222,7 +4222,7 @@ dependencies = [ [[package]] name = "turso_macros" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "proc-macro2", "quote", @@ -4231,7 +4231,7 @@ dependencies = [ [[package]] name = "turso_node" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "chrono", "napi", @@ -4244,7 +4244,7 @@ dependencies = [ [[package]] name = "turso_parser" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "bitflags 2.9.4", "criterion", @@ -4260,7 +4260,7 @@ dependencies = [ [[package]] name = "turso_sqlite3" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "env_logger 0.11.7", "libc", @@ -4273,7 +4273,7 @@ dependencies = [ [[package]] name = "turso_sqlite3_parser" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "bitflags 2.9.4", "cc", @@ -4291,7 +4291,7 @@ dependencies = [ [[package]] name = "turso_stress" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "anarchist-readable-name-generator-lib 0.1.2", "antithesis_sdk", @@ -4307,7 +4307,7 @@ dependencies = [ [[package]] name = "turso_sync_engine" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "base64", "bytes", @@ -4333,7 +4333,7 @@ dependencies = [ [[package]] name = "turso_sync_js" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "genawaiter", "napi", @@ -4348,7 +4348,7 @@ dependencies = [ [[package]] name = "turso_whopper" -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" dependencies = [ "anyhow", "clap", diff --git a/Cargo.toml b/Cargo.toml index 5615eceea..46e63fdcc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,29 +39,29 @@ exclude = [ ] [workspace.package] -version = "0.2.0-pre.4" +version = "0.2.0-pre.5" authors = ["the Limbo authors"] edition = "2021" license = "MIT" repository = "https://github.com/tursodatabase/turso" [workspace.dependencies] -turso = { path = "bindings/rust", version = "0.2.0-pre.4" } -turso_node = { path = "bindings/javascript", version = "0.2.0-pre.4" } -limbo_completion = { path = "extensions/completion", version = "0.2.0-pre.4" } -turso_core = { path = "core", version = "0.2.0-pre.4" } -turso_sync_engine = { path = "sync/engine", version = "0.2.0-pre.4" } -limbo_crypto = { path = "extensions/crypto", version = "0.2.0-pre.4" } -limbo_csv = { path = "extensions/csv", version = "0.2.0-pre.4" } -turso_ext = { path = "extensions/core", version = "0.2.0-pre.4" } -turso_ext_tests = { path = "extensions/tests", version = "0.2.0-pre.4" } -limbo_ipaddr = { path = "extensions/ipaddr", version = "0.2.0-pre.4" } -turso_macros = { path = "macros", version = "0.2.0-pre.4" } -limbo_percentile = { path = "extensions/percentile", version = "0.2.0-pre.4" } -limbo_regexp = { path = "extensions/regexp", version = "0.2.0-pre.4" } -turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.2.0-pre.4" } -limbo_uuid = { path = "extensions/uuid", version = "0.2.0-pre.4" } -turso_parser = { path = "parser", version = "0.2.0-pre.4" } +turso = { path = "bindings/rust", version = "0.2.0-pre.5" } +turso_node = { path = "bindings/javascript", version = "0.2.0-pre.5" } +limbo_completion = { path = "extensions/completion", version = "0.2.0-pre.5" } +turso_core = { path = "core", version = "0.2.0-pre.5" } +turso_sync_engine = { path = "sync/engine", version = "0.2.0-pre.5" } +limbo_crypto = { path = "extensions/crypto", version = "0.2.0-pre.5" } +limbo_csv = { path = "extensions/csv", version = "0.2.0-pre.5" } +turso_ext = { path = "extensions/core", version = "0.2.0-pre.5" } +turso_ext_tests = { path = "extensions/tests", version = "0.2.0-pre.5" } +limbo_ipaddr = { path = "extensions/ipaddr", version = "0.2.0-pre.5" } +turso_macros = { path = "macros", version = "0.2.0-pre.5" } +limbo_percentile = { path = "extensions/percentile", version = "0.2.0-pre.5" } +limbo_regexp = { path = "extensions/regexp", version = "0.2.0-pre.5" } +turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.2.0-pre.5" } +limbo_uuid = { path = "extensions/uuid", version = "0.2.0-pre.5" } +turso_parser = { path = "parser", version = "0.2.0-pre.5" } sql_generation = { path = "sql_generation" } strum = { version = "0.26", features = ["derive"] } strum_macros = "0.26" diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json index cb93e0ce1..a6e15d4e3 100644 --- a/bindings/javascript/package-lock.json +++ b/bindings/javascript/package-lock.json @@ -1,11 +1,11 @@ { "name": "javascript", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "lockfileVersion": 3, "requires": true, "packages": { "": { - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "workspaces": [ "packages/wasm-runtime", "packages/common", @@ -4594,11 +4594,11 @@ }, "packages/browser": { "name": "@tursodatabase/database-browser", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "license": "MIT", "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.4", - "@tursodatabase/database-common": "^0.2.0-pre.4" + "@tursodatabase/database-browser-common": "^0.2.0-pre.5", + "@tursodatabase/database-common": "^0.2.0-pre.5" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -4611,10 +4611,10 @@ }, "packages/browser-common": { "name": "@tursodatabase/database-browser-common", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "license": "MIT", "dependencies": { - "@tursodatabase/wasm-runtime": "^0.2.0-pre.4" + "@tursodatabase/wasm-runtime": "^0.2.0-pre.5" }, "devDependencies": { "typescript": "^5.9.2" @@ -4622,7 +4622,7 @@ }, "packages/common": { "name": "@tursodatabase/database-common", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "license": "MIT", "devDependencies": { "typescript": "^5.9.2" @@ -4630,10 +4630,10 @@ }, "packages/native": { "name": "@tursodatabase/database", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.4" + "@tursodatabase/database-common": "^0.2.0-pre.5" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -4647,7 +4647,7 @@ }, "packages/wasm-runtime": { "name": "@tursodatabase/wasm-runtime", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "license": "MIT", "dependencies": { "@emnapi/core": "^1.4.5", @@ -4670,12 +4670,12 @@ }, "sync/packages/browser": { "name": "@tursodatabase/sync-browser", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "license": "MIT", "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.4", - "@tursodatabase/database-common": "^0.2.0-pre.4", - "@tursodatabase/sync-common": "^0.2.0-pre.4" + "@tursodatabase/database-browser-common": "^0.2.0-pre.5", + "@tursodatabase/database-common": "^0.2.0-pre.5", + "@tursodatabase/sync-common": "^0.2.0-pre.5" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -4688,10 +4688,10 @@ }, "sync/packages/common": { "name": "@tursodatabase/sync-common", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.4" + "@tursodatabase/database-common": "^0.2.0-pre.5" }, "devDependencies": { "typescript": "^5.9.2" @@ -4699,11 +4699,11 @@ }, "sync/packages/native": { "name": "@tursodatabase/sync", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.4", - "@tursodatabase/sync-common": "^0.2.0-pre.4" + "@tursodatabase/database-common": "^0.2.0-pre.5", + "@tursodatabase/sync-common": "^0.2.0-pre.5" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json index 1beb1f34f..30c32b537 100644 --- a/bindings/javascript/package.json +++ b/bindings/javascript/package.json @@ -15,5 +15,5 @@ "sync/packages/native", "sync/packages/browser" ], - "version": "0.2.0-pre.4" + "version": "0.2.0-pre.5" } diff --git a/bindings/javascript/packages/browser-common/package.json b/bindings/javascript/packages/browser-common/package.json index 97ee47624..3c9e9b1ce 100644 --- a/bindings/javascript/packages/browser-common/package.json +++ b/bindings/javascript/packages/browser-common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-browser-common", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -23,6 +23,6 @@ "test": "echo 'no tests'" }, "dependencies": { - "@tursodatabase/wasm-runtime": "^0.2.0-pre.4" + "@tursodatabase/wasm-runtime": "^0.2.0-pre.5" } } diff --git a/bindings/javascript/packages/browser/package.json b/bindings/javascript/packages/browser/package.json index 904f50610..9b828b8d8 100644 --- a/bindings/javascript/packages/browser/package.json +++ b/bindings/javascript/packages/browser/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-browser", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -51,7 +51,7 @@ ] }, "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.4", - "@tursodatabase/database-common": "^0.2.0-pre.4" + "@tursodatabase/database-browser-common": "^0.2.0-pre.5", + "@tursodatabase/database-common": "^0.2.0-pre.5" } } diff --git a/bindings/javascript/packages/common/package.json b/bindings/javascript/packages/common/package.json index 94b4e08f4..45fd8421c 100644 --- a/bindings/javascript/packages/common/package.json +++ b/bindings/javascript/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-common", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/native/package.json b/bindings/javascript/packages/native/package.json index b2457dfc6..4ec6b80a7 100644 --- a/bindings/javascript/packages/native/package.json +++ b/bindings/javascript/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -47,7 +47,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.4" + "@tursodatabase/database-common": "^0.2.0-pre.5" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/packages/wasm-runtime/package.json b/bindings/javascript/packages/wasm-runtime/package.json index f78d0b17c..cf9c0bbed 100644 --- a/bindings/javascript/packages/wasm-runtime/package.json +++ b/bindings/javascript/packages/wasm-runtime/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/wasm-runtime", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "type": "module", "description": "Runtime and polyfill for wasm targets", "author": { diff --git a/bindings/javascript/sync/packages/browser/package.json b/bindings/javascript/sync/packages/browser/package.json index 9121d6d80..dafc7bd6c 100644 --- a/bindings/javascript/sync/packages/browser/package.json +++ b/bindings/javascript/sync/packages/browser/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-browser", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -54,8 +54,8 @@ "#index": "./index.js" }, "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.4", - "@tursodatabase/database-common": "^0.2.0-pre.4", - "@tursodatabase/sync-common": "^0.2.0-pre.4" + "@tursodatabase/database-browser-common": "^0.2.0-pre.5", + "@tursodatabase/database-common": "^0.2.0-pre.5", + "@tursodatabase/sync-common": "^0.2.0-pre.5" } } diff --git a/bindings/javascript/sync/packages/common/package.json b/bindings/javascript/sync/packages/common/package.json index 81cd1afa3..e4a19e761 100644 --- a/bindings/javascript/sync/packages/common/package.json +++ b/bindings/javascript/sync/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-common", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -23,6 +23,6 @@ "test": "echo 'no tests'" }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.4" + "@tursodatabase/database-common": "^0.2.0-pre.5" } } diff --git a/bindings/javascript/sync/packages/native/package.json b/bindings/javascript/sync/packages/native/package.json index 9f3c40135..c85cc2e86 100644 --- a/bindings/javascript/sync/packages/native/package.json +++ b/bindings/javascript/sync/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync", - "version": "0.2.0-pre.4", + "version": "0.2.0-pre.5", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -44,8 +44,8 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.4", - "@tursodatabase/sync-common": "^0.2.0-pre.4" + "@tursodatabase/database-common": "^0.2.0-pre.5", + "@tursodatabase/sync-common": "^0.2.0-pre.5" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/yarn.lock b/bindings/javascript/yarn.lock index 35e17e013..7b83ede31 100644 --- a/bindings/javascript/yarn.lock +++ b/bindings/javascript/yarn.lock @@ -1575,11 +1575,11 @@ __metadata: languageName: node linkType: hard -"@tursodatabase/database-browser-common@npm:^0.2.0-pre.4, @tursodatabase/database-browser-common@workspace:packages/browser-common": +"@tursodatabase/database-browser-common@npm:^0.2.0-pre.5, @tursodatabase/database-browser-common@workspace:packages/browser-common": version: 0.0.0-use.local resolution: "@tursodatabase/database-browser-common@workspace:packages/browser-common" dependencies: - "@tursodatabase/wasm-runtime": "npm:^0.2.0-pre.4" + "@tursodatabase/wasm-runtime": "npm:^0.2.0-pre.5" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1589,8 +1589,8 @@ __metadata: resolution: "@tursodatabase/database-browser@workspace:packages/browser" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.4" - "@tursodatabase/database-common": "npm:^0.2.0-pre.4" + "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.5" + "@tursodatabase/database-common": "npm:^0.2.0-pre.5" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1599,7 +1599,7 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/database-common@npm:^0.2.0-pre.4, @tursodatabase/database-common@workspace:packages/common": +"@tursodatabase/database-common@npm:^0.2.0-pre.5, @tursodatabase/database-common@workspace:packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/database-common@workspace:packages/common" dependencies: @@ -1612,7 +1612,7 @@ __metadata: resolution: "@tursodatabase/database@workspace:packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.2.0-pre.4" + "@tursodatabase/database-common": "npm:^0.2.0-pre.5" "@types/node": "npm:^24.3.1" better-sqlite3: "npm:^12.2.0" drizzle-kit: "npm:^0.31.4" @@ -1627,9 +1627,9 @@ __metadata: resolution: "@tursodatabase/sync-browser@workspace:sync/packages/browser" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.4" - "@tursodatabase/database-common": "npm:^0.2.0-pre.4" - "@tursodatabase/sync-common": "npm:^0.2.0-pre.4" + "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.5" + "@tursodatabase/database-common": "npm:^0.2.0-pre.5" + "@tursodatabase/sync-common": "npm:^0.2.0-pre.5" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1638,11 +1638,11 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/sync-common@npm:^0.2.0-pre.4, @tursodatabase/sync-common@workspace:sync/packages/common": +"@tursodatabase/sync-common@npm:^0.2.0-pre.5, @tursodatabase/sync-common@workspace:sync/packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/sync-common@workspace:sync/packages/common" dependencies: - "@tursodatabase/database-common": "npm:^0.2.0-pre.4" + "@tursodatabase/database-common": "npm:^0.2.0-pre.5" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1652,15 +1652,15 @@ __metadata: resolution: "@tursodatabase/sync@workspace:sync/packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.2.0-pre.4" - "@tursodatabase/sync-common": "npm:^0.2.0-pre.4" + "@tursodatabase/database-common": "npm:^0.2.0-pre.5" + "@tursodatabase/sync-common": "npm:^0.2.0-pre.5" "@types/node": "npm:^24.3.1" typescript: "npm:^5.9.2" vitest: "npm:^3.2.4" languageName: unknown linkType: soft -"@tursodatabase/wasm-runtime@npm:^0.2.0-pre.4, @tursodatabase/wasm-runtime@workspace:packages/wasm-runtime": +"@tursodatabase/wasm-runtime@npm:^0.2.0-pre.5, @tursodatabase/wasm-runtime@workspace:packages/wasm-runtime": version: 0.0.0-use.local resolution: "@tursodatabase/wasm-runtime@workspace:packages/wasm-runtime" dependencies: From 26f90257a6844b02dfdca43605b22609c1b8015d Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 22 Sep 2025 11:44:21 +0300 Subject: [PATCH 74/78] Turso 0.2.0-pre.6 --- Cargo.lock | 54 +++++++++---------- Cargo.toml | 34 ++++++------ bindings/javascript/package-lock.json | 40 +++++++------- bindings/javascript/package.json | 2 +- .../packages/browser-common/package.json | 4 +- .../javascript/packages/browser/package.json | 6 +-- .../javascript/packages/common/package.json | 2 +- .../javascript/packages/native/package.json | 4 +- .../packages/wasm-runtime/package.json | 2 +- .../sync/packages/browser/package.json | 8 +-- .../sync/packages/common/package.json | 4 +- .../sync/packages/native/package.json | 6 +-- bindings/javascript/yarn.lock | 28 +++++----- 13 files changed, 97 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5bd0cf8f8..0a0345ac8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -654,7 +654,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_tester" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "anyhow", "assert_cmd", @@ -2090,7 +2090,7 @@ dependencies = [ [[package]] name = "limbo_completion" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "mimalloc", "turso_ext", @@ -2098,7 +2098,7 @@ dependencies = [ [[package]] name = "limbo_crypto" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "blake3", "data-encoding", @@ -2111,7 +2111,7 @@ dependencies = [ [[package]] name = "limbo_csv" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "csv", "mimalloc", @@ -2121,7 +2121,7 @@ dependencies = [ [[package]] name = "limbo_ipaddr" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "ipnetwork", "mimalloc", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "limbo_percentile" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "mimalloc", "turso_ext", @@ -2138,7 +2138,7 @@ dependencies = [ [[package]] name = "limbo_regexp" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "mimalloc", "regex", @@ -2147,7 +2147,7 @@ dependencies = [ [[package]] name = "limbo_sim" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "anyhow", "chrono", @@ -2180,7 +2180,7 @@ dependencies = [ [[package]] name = "limbo_sqlite_test_ext" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "cc", ] @@ -2901,7 +2901,7 @@ dependencies = [ [[package]] name = "py-turso" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "anyhow", "pyo3", @@ -3581,7 +3581,7 @@ checksum = "d372029cb5195f9ab4e4b9aef550787dce78b124fcaee8d82519925defcd6f0d" [[package]] name = "sql_generation" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "anarchist-readable-name-generator-lib 0.2.0", "anyhow", @@ -4082,7 +4082,7 @@ dependencies = [ [[package]] name = "turso" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "rand 0.9.2", "rand_chacha 0.9.0", @@ -4094,7 +4094,7 @@ dependencies = [ [[package]] name = "turso-java" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "jni", "thiserror 2.0.16", @@ -4103,7 +4103,7 @@ dependencies = [ [[package]] name = "turso_cli" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "anyhow", "cfg-if", @@ -4136,7 +4136,7 @@ dependencies = [ [[package]] name = "turso_core" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "aegis", "aes", @@ -4194,7 +4194,7 @@ dependencies = [ [[package]] name = "turso_dart" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "flutter_rust_bridge", "turso_core", @@ -4202,7 +4202,7 @@ dependencies = [ [[package]] name = "turso_ext" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "chrono", "getrandom 0.3.2", @@ -4211,7 +4211,7 @@ dependencies = [ [[package]] name = "turso_ext_tests" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "env_logger 0.11.7", "lazy_static", @@ -4222,7 +4222,7 @@ dependencies = [ [[package]] name = "turso_macros" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "proc-macro2", "quote", @@ -4231,7 +4231,7 @@ dependencies = [ [[package]] name = "turso_node" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "chrono", "napi", @@ -4244,7 +4244,7 @@ dependencies = [ [[package]] name = "turso_parser" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "bitflags 2.9.4", "criterion", @@ -4260,7 +4260,7 @@ dependencies = [ [[package]] name = "turso_sqlite3" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "env_logger 0.11.7", "libc", @@ -4273,7 +4273,7 @@ dependencies = [ [[package]] name = "turso_sqlite3_parser" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "bitflags 2.9.4", "cc", @@ -4291,7 +4291,7 @@ dependencies = [ [[package]] name = "turso_stress" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "anarchist-readable-name-generator-lib 0.1.2", "antithesis_sdk", @@ -4307,7 +4307,7 @@ dependencies = [ [[package]] name = "turso_sync_engine" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "base64", "bytes", @@ -4333,7 +4333,7 @@ dependencies = [ [[package]] name = "turso_sync_js" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "genawaiter", "napi", @@ -4348,7 +4348,7 @@ dependencies = [ [[package]] name = "turso_whopper" -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" dependencies = [ "anyhow", "clap", diff --git a/Cargo.toml b/Cargo.toml index 46e63fdcc..96d3af1a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,29 +39,29 @@ exclude = [ ] [workspace.package] -version = "0.2.0-pre.5" +version = "0.2.0-pre.6" authors = ["the Limbo authors"] edition = "2021" license = "MIT" repository = "https://github.com/tursodatabase/turso" [workspace.dependencies] -turso = { path = "bindings/rust", version = "0.2.0-pre.5" } -turso_node = { path = "bindings/javascript", version = "0.2.0-pre.5" } -limbo_completion = { path = "extensions/completion", version = "0.2.0-pre.5" } -turso_core = { path = "core", version = "0.2.0-pre.5" } -turso_sync_engine = { path = "sync/engine", version = "0.2.0-pre.5" } -limbo_crypto = { path = "extensions/crypto", version = "0.2.0-pre.5" } -limbo_csv = { path = "extensions/csv", version = "0.2.0-pre.5" } -turso_ext = { path = "extensions/core", version = "0.2.0-pre.5" } -turso_ext_tests = { path = "extensions/tests", version = "0.2.0-pre.5" } -limbo_ipaddr = { path = "extensions/ipaddr", version = "0.2.0-pre.5" } -turso_macros = { path = "macros", version = "0.2.0-pre.5" } -limbo_percentile = { path = "extensions/percentile", version = "0.2.0-pre.5" } -limbo_regexp = { path = "extensions/regexp", version = "0.2.0-pre.5" } -turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.2.0-pre.5" } -limbo_uuid = { path = "extensions/uuid", version = "0.2.0-pre.5" } -turso_parser = { path = "parser", version = "0.2.0-pre.5" } +turso = { path = "bindings/rust", version = "0.2.0-pre.6" } +turso_node = { path = "bindings/javascript", version = "0.2.0-pre.6" } +limbo_completion = { path = "extensions/completion", version = "0.2.0-pre.6" } +turso_core = { path = "core", version = "0.2.0-pre.6" } +turso_sync_engine = { path = "sync/engine", version = "0.2.0-pre.6" } +limbo_crypto = { path = "extensions/crypto", version = "0.2.0-pre.6" } +limbo_csv = { path = "extensions/csv", version = "0.2.0-pre.6" } +turso_ext = { path = "extensions/core", version = "0.2.0-pre.6" } +turso_ext_tests = { path = "extensions/tests", version = "0.2.0-pre.6" } +limbo_ipaddr = { path = "extensions/ipaddr", version = "0.2.0-pre.6" } +turso_macros = { path = "macros", version = "0.2.0-pre.6" } +limbo_percentile = { path = "extensions/percentile", version = "0.2.0-pre.6" } +limbo_regexp = { path = "extensions/regexp", version = "0.2.0-pre.6" } +turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.2.0-pre.6" } +limbo_uuid = { path = "extensions/uuid", version = "0.2.0-pre.6" } +turso_parser = { path = "parser", version = "0.2.0-pre.6" } sql_generation = { path = "sql_generation" } strum = { version = "0.26", features = ["derive"] } strum_macros = "0.26" diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json index a6e15d4e3..f853a3b20 100644 --- a/bindings/javascript/package-lock.json +++ b/bindings/javascript/package-lock.json @@ -1,11 +1,11 @@ { "name": "javascript", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "lockfileVersion": 3, "requires": true, "packages": { "": { - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "workspaces": [ "packages/wasm-runtime", "packages/common", @@ -4594,11 +4594,11 @@ }, "packages/browser": { "name": "@tursodatabase/database-browser", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "license": "MIT", "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.5", - "@tursodatabase/database-common": "^0.2.0-pre.5" + "@tursodatabase/database-browser-common": "^0.2.0-pre.6", + "@tursodatabase/database-common": "^0.2.0-pre.6" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -4611,10 +4611,10 @@ }, "packages/browser-common": { "name": "@tursodatabase/database-browser-common", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "license": "MIT", "dependencies": { - "@tursodatabase/wasm-runtime": "^0.2.0-pre.5" + "@tursodatabase/wasm-runtime": "^0.2.0-pre.6" }, "devDependencies": { "typescript": "^5.9.2" @@ -4622,7 +4622,7 @@ }, "packages/common": { "name": "@tursodatabase/database-common", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "license": "MIT", "devDependencies": { "typescript": "^5.9.2" @@ -4630,10 +4630,10 @@ }, "packages/native": { "name": "@tursodatabase/database", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.5" + "@tursodatabase/database-common": "^0.2.0-pre.6" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -4647,7 +4647,7 @@ }, "packages/wasm-runtime": { "name": "@tursodatabase/wasm-runtime", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "license": "MIT", "dependencies": { "@emnapi/core": "^1.4.5", @@ -4670,12 +4670,12 @@ }, "sync/packages/browser": { "name": "@tursodatabase/sync-browser", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "license": "MIT", "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.5", - "@tursodatabase/database-common": "^0.2.0-pre.5", - "@tursodatabase/sync-common": "^0.2.0-pre.5" + "@tursodatabase/database-browser-common": "^0.2.0-pre.6", + "@tursodatabase/database-common": "^0.2.0-pre.6", + "@tursodatabase/sync-common": "^0.2.0-pre.6" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -4688,10 +4688,10 @@ }, "sync/packages/common": { "name": "@tursodatabase/sync-common", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.5" + "@tursodatabase/database-common": "^0.2.0-pre.6" }, "devDependencies": { "typescript": "^5.9.2" @@ -4699,11 +4699,11 @@ }, "sync/packages/native": { "name": "@tursodatabase/sync", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.5", - "@tursodatabase/sync-common": "^0.2.0-pre.5" + "@tursodatabase/database-common": "^0.2.0-pre.6", + "@tursodatabase/sync-common": "^0.2.0-pre.6" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json index 30c32b537..199026e0d 100644 --- a/bindings/javascript/package.json +++ b/bindings/javascript/package.json @@ -15,5 +15,5 @@ "sync/packages/native", "sync/packages/browser" ], - "version": "0.2.0-pre.5" + "version": "0.2.0-pre.6" } diff --git a/bindings/javascript/packages/browser-common/package.json b/bindings/javascript/packages/browser-common/package.json index 3c9e9b1ce..b610d922c 100644 --- a/bindings/javascript/packages/browser-common/package.json +++ b/bindings/javascript/packages/browser-common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-browser-common", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -23,6 +23,6 @@ "test": "echo 'no tests'" }, "dependencies": { - "@tursodatabase/wasm-runtime": "^0.2.0-pre.5" + "@tursodatabase/wasm-runtime": "^0.2.0-pre.6" } } diff --git a/bindings/javascript/packages/browser/package.json b/bindings/javascript/packages/browser/package.json index 9b828b8d8..bc7f944d7 100644 --- a/bindings/javascript/packages/browser/package.json +++ b/bindings/javascript/packages/browser/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-browser", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -51,7 +51,7 @@ ] }, "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.5", - "@tursodatabase/database-common": "^0.2.0-pre.5" + "@tursodatabase/database-browser-common": "^0.2.0-pre.6", + "@tursodatabase/database-common": "^0.2.0-pre.6" } } diff --git a/bindings/javascript/packages/common/package.json b/bindings/javascript/packages/common/package.json index 45fd8421c..159e8dd99 100644 --- a/bindings/javascript/packages/common/package.json +++ b/bindings/javascript/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-common", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/native/package.json b/bindings/javascript/packages/native/package.json index 4ec6b80a7..a996e6e96 100644 --- a/bindings/javascript/packages/native/package.json +++ b/bindings/javascript/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -47,7 +47,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.5" + "@tursodatabase/database-common": "^0.2.0-pre.6" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/packages/wasm-runtime/package.json b/bindings/javascript/packages/wasm-runtime/package.json index cf9c0bbed..18350432b 100644 --- a/bindings/javascript/packages/wasm-runtime/package.json +++ b/bindings/javascript/packages/wasm-runtime/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/wasm-runtime", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "type": "module", "description": "Runtime and polyfill for wasm targets", "author": { diff --git a/bindings/javascript/sync/packages/browser/package.json b/bindings/javascript/sync/packages/browser/package.json index dafc7bd6c..4b137956e 100644 --- a/bindings/javascript/sync/packages/browser/package.json +++ b/bindings/javascript/sync/packages/browser/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-browser", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -54,8 +54,8 @@ "#index": "./index.js" }, "dependencies": { - "@tursodatabase/database-browser-common": "^0.2.0-pre.5", - "@tursodatabase/database-common": "^0.2.0-pre.5", - "@tursodatabase/sync-common": "^0.2.0-pre.5" + "@tursodatabase/database-browser-common": "^0.2.0-pre.6", + "@tursodatabase/database-common": "^0.2.0-pre.6", + "@tursodatabase/sync-common": "^0.2.0-pre.6" } } diff --git a/bindings/javascript/sync/packages/common/package.json b/bindings/javascript/sync/packages/common/package.json index e4a19e761..643741919 100644 --- a/bindings/javascript/sync/packages/common/package.json +++ b/bindings/javascript/sync/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-common", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -23,6 +23,6 @@ "test": "echo 'no tests'" }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.5" + "@tursodatabase/database-common": "^0.2.0-pre.6" } } diff --git a/bindings/javascript/sync/packages/native/package.json b/bindings/javascript/sync/packages/native/package.json index c85cc2e86..5462bcb07 100644 --- a/bindings/javascript/sync/packages/native/package.json +++ b/bindings/javascript/sync/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync", - "version": "0.2.0-pre.5", + "version": "0.2.0-pre.6", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -44,8 +44,8 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0-pre.5", - "@tursodatabase/sync-common": "^0.2.0-pre.5" + "@tursodatabase/database-common": "^0.2.0-pre.6", + "@tursodatabase/sync-common": "^0.2.0-pre.6" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/yarn.lock b/bindings/javascript/yarn.lock index 7b83ede31..1724b9112 100644 --- a/bindings/javascript/yarn.lock +++ b/bindings/javascript/yarn.lock @@ -1575,11 +1575,11 @@ __metadata: languageName: node linkType: hard -"@tursodatabase/database-browser-common@npm:^0.2.0-pre.5, @tursodatabase/database-browser-common@workspace:packages/browser-common": +"@tursodatabase/database-browser-common@npm:^0.2.0-pre.6, @tursodatabase/database-browser-common@workspace:packages/browser-common": version: 0.0.0-use.local resolution: "@tursodatabase/database-browser-common@workspace:packages/browser-common" dependencies: - "@tursodatabase/wasm-runtime": "npm:^0.2.0-pre.5" + "@tursodatabase/wasm-runtime": "npm:^0.2.0-pre.6" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1589,8 +1589,8 @@ __metadata: resolution: "@tursodatabase/database-browser@workspace:packages/browser" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.5" - "@tursodatabase/database-common": "npm:^0.2.0-pre.5" + "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.6" + "@tursodatabase/database-common": "npm:^0.2.0-pre.6" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1599,7 +1599,7 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/database-common@npm:^0.2.0-pre.5, @tursodatabase/database-common@workspace:packages/common": +"@tursodatabase/database-common@npm:^0.2.0-pre.6, @tursodatabase/database-common@workspace:packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/database-common@workspace:packages/common" dependencies: @@ -1612,7 +1612,7 @@ __metadata: resolution: "@tursodatabase/database@workspace:packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.2.0-pre.5" + "@tursodatabase/database-common": "npm:^0.2.0-pre.6" "@types/node": "npm:^24.3.1" better-sqlite3: "npm:^12.2.0" drizzle-kit: "npm:^0.31.4" @@ -1627,9 +1627,9 @@ __metadata: resolution: "@tursodatabase/sync-browser@workspace:sync/packages/browser" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.5" - "@tursodatabase/database-common": "npm:^0.2.0-pre.5" - "@tursodatabase/sync-common": "npm:^0.2.0-pre.5" + "@tursodatabase/database-browser-common": "npm:^0.2.0-pre.6" + "@tursodatabase/database-common": "npm:^0.2.0-pre.6" + "@tursodatabase/sync-common": "npm:^0.2.0-pre.6" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1638,11 +1638,11 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/sync-common@npm:^0.2.0-pre.5, @tursodatabase/sync-common@workspace:sync/packages/common": +"@tursodatabase/sync-common@npm:^0.2.0-pre.6, @tursodatabase/sync-common@workspace:sync/packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/sync-common@workspace:sync/packages/common" dependencies: - "@tursodatabase/database-common": "npm:^0.2.0-pre.5" + "@tursodatabase/database-common": "npm:^0.2.0-pre.6" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1652,15 +1652,15 @@ __metadata: resolution: "@tursodatabase/sync@workspace:sync/packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.2.0-pre.5" - "@tursodatabase/sync-common": "npm:^0.2.0-pre.5" + "@tursodatabase/database-common": "npm:^0.2.0-pre.6" + "@tursodatabase/sync-common": "npm:^0.2.0-pre.6" "@types/node": "npm:^24.3.1" typescript: "npm:^5.9.2" vitest: "npm:^3.2.4" languageName: unknown linkType: soft -"@tursodatabase/wasm-runtime@npm:^0.2.0-pre.5, @tursodatabase/wasm-runtime@workspace:packages/wasm-runtime": +"@tursodatabase/wasm-runtime@npm:^0.2.0-pre.6, @tursodatabase/wasm-runtime@workspace:packages/wasm-runtime": version: 0.0.0-use.local resolution: "@tursodatabase/wasm-runtime@workspace:packages/wasm-runtime" dependencies: From 76f2e4e2170f80509ca967ca74f3c3a12ba9d3cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kacper=20Ko=C5=82odziej?= Date: Mon, 22 Sep 2025 10:45:38 +0200 Subject: [PATCH 75/78] Enable checksum tests if checksum feature is on These tests fail if checksum feature is turned off. --- core/storage/checksum.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/storage/checksum.rs b/core/storage/checksum.rs index 74cb2d518..8aa16fadc 100644 --- a/core/storage/checksum.rs +++ b/core/storage/checksum.rs @@ -107,6 +107,7 @@ mod tests { } #[test] + #[cfg(feature = "checksum")] fn test_add_checksum_to_page() { let ctx = ChecksumContext::new(); let mut page = get_random_page(); @@ -135,6 +136,7 @@ mod tests { } #[test] + #[cfg(feature = "checksum")] fn test_verify_and_strip_checksum_mismatch() { let ctx = ChecksumContext::new(); let mut page = get_random_page(); @@ -160,6 +162,7 @@ mod tests { } #[test] + #[cfg(feature = "checksum")] fn test_verify_and_strip_checksum_corrupted_checksum() { let ctx = ChecksumContext::new(); let mut page = get_random_page(); From 6a20735fe0057d82aa50044e210b569855a97d0d Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 22 Sep 2025 12:40:19 +0300 Subject: [PATCH 76/78] mvcc: add blocking checkpoint lock MVCC checkpoints are always TRUNCATE, plus they block all other transactions. This guarantees that never need to let transactions read from the SQLite WAL. In MVCC, the checkpoint procedure is roughly as follows: - Take the blocking_checkpoint_lock - Write everything in the logical log to the pager, and from there commit to the SQLite WAL. - Immediately TRUNCATE checkpoint the WAL into the database file. - Release the blocking_checkpoint_lock. --- core/mvcc/database/mod.rs | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 547cd9e95..da64dda13 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -900,7 +900,7 @@ impl StateTransition for CommitStateMachine { // But that's a problem for another day. // FIXME: it actually just become a problem for today!!! // TODO: test that reproduces this failure, and then a fix - mvcc_store.txs.remove(&self.tx_id); + mvcc_store.remove_tx(self.tx_id); if mvcc_store.is_exclusive_tx(&self.tx_id) { mvcc_store.release_exclusive_tx(&self.tx_id); @@ -1109,6 +1109,14 @@ pub struct MvStore { exclusive_tx: RwLock>, commit_coordinator: Arc, global_header: Arc>>, + /// MVCC checkpoints are always TRUNCATE, plus they block all other transactions. + /// This guarantees that never need to let transactions read from the SQLite WAL. + /// In MVCC, the checkpoint procedure is roughly as follows: + /// - Take the blocking_checkpoint_lock + /// - Write everything in the logical log to the pager, and from there commit to the SQLite WAL. + /// - Immediately TRUNCATE checkpoint the WAL into the database file. + /// - Release the blocking_checkpoint_lock. + blocking_checkpoint_lock: Arc, } impl MvStore { @@ -1128,6 +1136,7 @@ impl MvStore { commits_waiting: Arc::new(AtomicU64::new(0)), }), global_header: Arc::new(RwLock::new(None)), + blocking_checkpoint_lock: Arc::new(TursoRwLock::new()), } } @@ -1407,6 +1416,11 @@ impl MvStore { pager: Arc, maybe_existing_tx_id: Option, ) -> Result> { + if !self.blocking_checkpoint_lock.read() { + // If there is a stop-the-world checkpoint in progress, we cannot begin any transaction at all. + return Err(LimboError::Busy); + } + let unlock = || self.blocking_checkpoint_lock.unlock(); let tx_id = maybe_existing_tx_id.unwrap_or_else(|| self.get_tx_id()); let begin_ts = if let Some(tx_id) = maybe_existing_tx_id { self.txs.get(&tx_id).unwrap().value().begin_ts @@ -1414,7 +1428,8 @@ impl MvStore { self.get_timestamp() }; - self.acquire_exclusive_tx(&tx_id)?; + self.acquire_exclusive_tx(&tx_id) + .inspect_err(|_| unlock())?; let locked = self.commit_coordinator.pager_commit_lock.write(); if !locked { @@ -1423,6 +1438,7 @@ impl MvStore { tx_id ); self.release_exclusive_tx(&tx_id); + unlock(); return Err(LimboError::Busy); } @@ -1444,6 +1460,10 @@ impl MvStore { /// that you can use to perform operations within the transaction. All changes made within the /// transaction are isolated from other transactions until you commit the transaction. pub fn begin_tx(&self, pager: Arc) -> Result { + if !self.blocking_checkpoint_lock.read() { + // If there is a stop-the-world checkpoint in progress, we cannot begin any transaction at all. + return Err(LimboError::Busy); + } let tx_id = self.get_tx_id(); let begin_ts = self.get_timestamp(); @@ -1456,6 +1476,11 @@ impl MvStore { Ok(tx_id) } + pub fn remove_tx(&self, tx_id: TxID) { + self.txs.remove(&tx_id); + self.blocking_checkpoint_lock.unlock(); + } + fn get_new_transaction_database_header(&self, pager: &Arc) -> DatabaseHeader { if self.global_header.read().is_none() { pager.io.block(|| pager.maybe_allocate_page1()).unwrap(); @@ -1599,7 +1624,7 @@ impl MvStore { tracing::trace!("terminate(tx_id={})", tx_id); // FIXME: verify that we can already remove the transaction here! // Maybe it's fine for snapshot isolation, but too early for serializable? - self.txs.remove(&tx_id); + self.remove_tx(tx_id); Ok(()) } From 372daef656f1369cbaa748308b607eb1798c7c64 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 22 Sep 2025 09:18:17 +0300 Subject: [PATCH 77/78] core: Wrap Pager::io_ctx in RwLock --- core/storage/pager.rs | 24 +++++++++++------------- core/storage/sqlite3_ondisk.rs | 8 ++++---- core/storage/wal.rs | 17 ++++++++--------- 3 files changed, 23 insertions(+), 26 deletions(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 846487dbe..ce47f2c5d 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -528,7 +528,7 @@ pub struct Pager { header_ref_state: RwLock, #[cfg(not(feature = "omit_autovacuum"))] vacuum_state: RwLock, - pub(crate) io_ctx: RefCell, + pub(crate) io_ctx: RwLock, } #[cfg(not(feature = "omit_autovacuum"))] @@ -638,7 +638,7 @@ impl Pager { ptrmap_put_state: PtrMapPutState::Start, btree_create_vacuum_full_state: BtreeCreateVacuumFullState::Start, }), - io_ctx: RefCell::new(IOContext::default()), + io_ctx: RwLock::new(IOContext::default()), }) } @@ -1178,7 +1178,7 @@ impl Pager { ) -> Result<(PageRef, Completion)> { tracing::trace!("read_page_no_cache(page_idx = {})", page_idx); let page = Arc::new(Page::new(page_idx)); - let io_ctx = &self.io_ctx.borrow(); + let io_ctx = self.io_ctx.read(); let Some(wal) = self.wal.as_ref() else { turso_assert!( matches!(frame_watermark, Some(0) | None), @@ -1186,7 +1186,7 @@ impl Pager { ); page.set_locked(); - let c = self.begin_read_disk_page(page_idx, page.clone(), allow_empty_read, io_ctx)?; + let c = self.begin_read_disk_page(page_idx, page.clone(), allow_empty_read, &io_ctx)?; return Ok((page, c)); }; @@ -1199,7 +1199,7 @@ impl Pager { return Ok((page, c)); } - let c = self.begin_read_disk_page(page_idx, page.clone(), allow_empty_read, io_ctx)?; + let c = self.begin_read_disk_page(page_idx, page.clone(), allow_empty_read, &io_ctx)?; Ok((page, c)) } @@ -1993,7 +1993,7 @@ impl Pager { // based on the IOContext set, we will set the reserved space bytes as required by // either the encryption or checksum, or None if they are not set. let reserved_space_bytes = { - let io_ctx = self.io_ctx.borrow(); + let io_ctx = self.io_ctx.read(); io_ctx.get_reserved_space_bytes() }; default_header.reserved_space = reserved_space_bytes; @@ -2366,7 +2366,7 @@ impl Pager { } pub fn is_encryption_ctx_set(&self) -> bool { - self.io_ctx.borrow_mut().encryption_context().is_some() + self.io_ctx.write().encryption_context().is_some() } pub fn set_encryption_context( @@ -2377,25 +2377,23 @@ impl Pager { let page_size = self.get_page_size_unchecked().get() as usize; let encryption_ctx = EncryptionContext::new(cipher_mode, key, page_size)?; { - let mut io_ctx = self.io_ctx.borrow_mut(); + let mut io_ctx = self.io_ctx.write(); io_ctx.set_encryption(encryption_ctx); } let Some(wal) = self.wal.as_ref() else { return Ok(()); }; - wal.borrow_mut() - .set_io_context(self.io_ctx.borrow().clone()); + wal.borrow_mut().set_io_context(self.io_ctx.read().clone()); Ok(()) } pub fn reset_checksum_context(&self) { { - let mut io_ctx = self.io_ctx.borrow_mut(); + let mut io_ctx = self.io_ctx.write(); io_ctx.reset_checksum(); } let Some(wal) = self.wal.as_ref() else { return }; - wal.borrow_mut() - .set_io_context(self.io_ctx.borrow().clone()) + wal.borrow_mut().set_io_context(self.io_ctx.read().clone()) } pub fn set_reserved_space_bytes(&self, value: u8) { diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index 126561058..0988f8eb4 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -975,8 +975,8 @@ pub fn begin_write_btree_page(pager: &Pager, page: &PageRef) -> Result completions.push(c), diff --git a/core/storage/wal.rs b/core/storage/wal.rs index d6b111172..9e58f9361 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -2,7 +2,6 @@ use std::array; use std::borrow::Cow; -use std::cell::RefCell; use std::collections::{BTreeMap, HashMap, HashSet}; use strum::EnumString; use tracing::{instrument, Level}; @@ -588,7 +587,7 @@ pub struct WalFile { /// Manages locks needed for checkpointing checkpoint_guard: Option, - io_ctx: RefCell, + io_ctx: RwLock, } impl fmt::Debug for WalFile { @@ -1124,7 +1123,7 @@ impl Wal for WalFile { buffer_pool, complete, page_idx, - &self.io_ctx.borrow(), + &self.io_ctx.read(), ) } @@ -1135,7 +1134,7 @@ impl Wal for WalFile { let (frame_ptr, frame_len) = (frame.as_mut_ptr(), frame.len()); let encryption_ctx = { - let io_ctx = self.io_ctx.borrow(); + let io_ctx = self.io_ctx.read(); io_ctx.encryption_context().cloned() }; let complete = Box::new(move |res: Result<(Arc, i32), CompletionError>| { @@ -1243,7 +1242,7 @@ impl Wal for WalFile { buffer_pool, complete, page_id as usize, - &self.io_ctx.borrow(), + &self.io_ctx.read(), )?; self.io.wait_for_completion(c)?; return if conflict.get() { @@ -1493,7 +1492,7 @@ impl Wal for WalFile { let plain = page.get_contents().as_ptr(); let data_to_write: std::borrow::Cow<[u8]> = { - let io_ctx = self.io_ctx.borrow(); + let io_ctx = self.io_ctx.read(); match &io_ctx.encryption_or_checksum() { EncryptionOrChecksum::Encryption(ctx) => { Cow::Owned(ctx.encrypt_page(plain, page_id)?) @@ -1573,7 +1572,7 @@ impl Wal for WalFile { } fn set_io_context(&mut self, ctx: IOContext) { - self.io_ctx.replace(ctx); + *self.io_ctx.write() = ctx; } fn update_max_frame(&mut self) { @@ -1624,7 +1623,7 @@ impl WalFile { prev_checkpoint: CheckpointResult::default(), checkpoint_guard: None, header, - io_ctx: RefCell::new(IOContext::default()), + io_ctx: RwLock::new(IOContext::default()), } } @@ -2233,7 +2232,7 @@ impl WalFile { self.buffer_pool.clone(), complete, page_id, - &self.io_ctx.borrow(), + &self.io_ctx.read(), )?; Ok(InflightRead { From 4af49ef98c9de4fa0f61849aa02472a8b5181c44 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 22 Sep 2025 16:28:54 +0300 Subject: [PATCH 78/78] mvcc: simplify StateMachine TransitionResult is an internal implementation detail that tells an invocation of StateMachine::step() to continue looping, but it is of no use to other callers. For this reason, just return an IOResult from StateMachine::step() which simplifies the result handling. --- core/benches/mvcc_benchmark.rs | 18 +++++++----------- core/mvcc/database/mod.rs | 14 ++++---------- core/mvcc/database/tests.rs | 10 ++++------ core/state_machine.rs | 20 +++++++++----------- core/storage/checksum.rs | 1 - core/vdbe/mod.rs | 14 ++------------ 6 files changed, 26 insertions(+), 51 deletions(-) diff --git a/core/benches/mvcc_benchmark.rs b/core/benches/mvcc_benchmark.rs index de8d4bdff..547a473b4 100644 --- a/core/benches/mvcc_benchmark.rs +++ b/core/benches/mvcc_benchmark.rs @@ -5,8 +5,7 @@ use criterion::{criterion_group, criterion_main, Criterion, Throughput}; use pprof::criterion::{Output, PProfProfiler}; use turso_core::mvcc::clock::LocalClock; use turso_core::mvcc::database::{MvStore, Row, RowID}; -use turso_core::state_machine::{StateTransition, TransitionResult}; -use turso_core::types::{ImmutableRecord, Text}; +use turso_core::types::{IOResult, ImmutableRecord, Text}; use turso_core::{Connection, Database, MemoryIO, Value}; struct BenchDb { @@ -55,9 +54,8 @@ fn bench(c: &mut Criterion) { loop { let res = sm.step(mv_store).unwrap(); match res { - TransitionResult::Io(io) => io.wait(db._db.io.as_ref()).unwrap(), - TransitionResult::Continue => continue, - TransitionResult::Done(_) => break, + IOResult::IO(io) => io.wait(db._db.io.as_ref()).unwrap(), + IOResult::Done(_) => break, } } }) @@ -85,9 +83,8 @@ fn bench(c: &mut Criterion) { loop { let res = sm.step(mv_store).unwrap(); match res { - TransitionResult::Io(io) => io.wait(db._db.io.as_ref()).unwrap(), - TransitionResult::Continue => continue, - TransitionResult::Done(_) => break, + IOResult::IO(io) => io.wait(db._db.io.as_ref()).unwrap(), + IOResult::Done(_) => break, } } }) @@ -121,9 +118,8 @@ fn bench(c: &mut Criterion) { loop { let res = sm.step(mv_store).unwrap(); match res { - TransitionResult::Io(io) => io.wait(db._db.io.as_ref()).unwrap(), - TransitionResult::Continue => continue, - TransitionResult::Done(_) => break, + IOResult::IO(io) => io.wait(db._db.io.as_ref()).unwrap(), + IOResult::Done(_) => break, } } }) diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index da64dda13..1b2df0ebb 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -708,11 +708,8 @@ impl StateTransition for CommitStateMachine { } => { let write_row_state_machine = self.write_row_state_machine.as_mut().unwrap(); match write_row_state_machine.step(&())? { - TransitionResult::Io(io) => return Ok(TransitionResult::Io(io)), - TransitionResult::Continue => { - return Ok(TransitionResult::Continue); - } - TransitionResult::Done(_) => { + IOResult::IO(io) => return Ok(TransitionResult::Io(io)), + IOResult::Done(_) => { let requires_seek = { if let Some(next_id) = self.write_set.get(*write_set_index + 1) { let current_id = &self.write_set[*write_set_index]; @@ -744,11 +741,8 @@ impl StateTransition for CommitStateMachine { } => { let delete_row_state_machine = self.delete_row_state_machine.as_mut().unwrap(); match delete_row_state_machine.step(&())? { - TransitionResult::Io(io) => return Ok(TransitionResult::Io(io)), - TransitionResult::Continue => { - return Ok(TransitionResult::Continue); - } - TransitionResult::Done(_) => { + IOResult::IO(io) => return Ok(TransitionResult::Io(io)), + IOResult::Done(_) => { self.state = CommitState::WriteRow { end_ts: *end_ts, write_set_index: *write_set_index + 1, diff --git a/core/mvcc/database/tests.rs b/core/mvcc/database/tests.rs index 2a7a2e5cc..e88f2d0ab 100644 --- a/core/mvcc/database/tests.rs +++ b/core/mvcc/database/tests.rs @@ -803,11 +803,10 @@ pub(crate) fn commit_tx( loop { let res = sm.step(&mv_store)?; match res { - crate::state_machine::TransitionResult::Io(io) => { + IOResult::IO(io) => { io.wait(conn.db.io.as_ref())?; } - crate::state_machine::TransitionResult::Continue => continue, - crate::state_machine::TransitionResult::Done(_) => break, + IOResult::Done(_) => break, } } assert!(sm.is_finalized()); @@ -827,11 +826,10 @@ pub(crate) fn commit_tx_no_conn( loop { let res = sm.step(&mv_store)?; match res { - crate::state_machine::TransitionResult::Io(io) => { + IOResult::IO(io) => { io.wait(conn.db.io.as_ref())?; } - crate::state_machine::TransitionResult::Continue => continue, - crate::state_machine::TransitionResult::Done(_) => break, + IOResult::Done(_) => break, } } assert!(sm.is_finalized()); diff --git a/core/state_machine.rs b/core/state_machine.rs index 0d776df10..b06371cd3 100644 --- a/core/state_machine.rs +++ b/core/state_machine.rs @@ -1,4 +1,7 @@ -use crate::{types::IOCompletions, Result}; +use crate::{ + types::{IOCompletions, IOResult}, + Result, +}; pub enum TransitionResult { Io(IOCompletions), @@ -41,20 +44,15 @@ impl StateMachine { is_finalized: false, } } -} -impl StateTransition for StateMachine { - type Context = State::Context; - type SMResult = State::SMResult; - - fn step<'a>(&mut self, context: &Self::Context) -> Result> { + pub fn step(&mut self, context: &State::Context) -> Result> { loop { if self.is_finalized { unreachable!("StateMachine::transition: state machine is finalized"); } match self.state.step(context)? { TransitionResult::Io(io) => { - return Ok(TransitionResult::Io(io)); + return Ok(IOResult::IO(io)); } TransitionResult::Continue => { continue; @@ -62,19 +60,19 @@ impl StateTransition for StateMachine { TransitionResult::Done(result) => { assert!(self.state.is_finalized()); self.is_finalized = true; - return Ok(TransitionResult::Done(result)); + return Ok(IOResult::Done(result)); } } } } - fn finalize(&mut self, context: &Self::Context) -> Result<()> { + pub fn finalize(&mut self, context: &State::Context) -> Result<()> { self.state.finalize(context)?; self.is_finalized = true; Ok(()) } - fn is_finalized(&self) -> bool { + pub fn is_finalized(&self) -> bool { self.is_finalized } } diff --git a/core/storage/checksum.rs b/core/storage/checksum.rs index 8aa16fadc..e7ba78975 100644 --- a/core/storage/checksum.rs +++ b/core/storage/checksum.rs @@ -92,7 +92,6 @@ impl Default for ChecksumContext { #[cfg(test)] mod tests { use super::*; - use crate::CompletionError; fn get_random_page() -> [u8; CHECKSUM_PAGE_SIZE] { let mut page = [0u8; CHECKSUM_PAGE_SIZE]; diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 0b4fd6c9c..203fb1d55 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -29,7 +29,7 @@ use crate::{ error::LimboError, function::{AggFunc, FuncCtx}, mvcc::{database::CommitStateMachine, LocalClock}, - state_machine::{StateMachine, StateTransition, TransitionResult}, + state_machine::StateMachine, storage::sqlite3_ondisk::SmallVec, translate::{collate::CollationSeq, plan::TableReferences}, types::{IOCompletions, IOResult, RawSlice, TextRef}, @@ -932,17 +932,7 @@ impl Program { commit_state: &mut StateMachine>, mv_store: &Arc, ) -> Result> { - loop { - match commit_state.step(mv_store)? { - TransitionResult::Continue => {} - TransitionResult::Io(iocompletions) => { - return Ok(IOResult::IO(iocompletions)); - } - TransitionResult::Done(_) => { - return Ok(IOResult::Done(())); - } - } - } + commit_state.step(mv_store) } }