From e9b2c41c39ba797c143cbc44af76d294059729e4 Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Thu, 18 Sep 2025 02:23:35 -0300 Subject: [PATCH 001/428] Return better syntax error messages --- parser/src/error.rs | 96 ++++++++++++++++++----- parser/src/lexer.rs | 180 ++++++++++++++++++++++++++++++++++--------- parser/src/parser.rs | 22 +++++- parser/src/token.rs | 43 +++++++++++ 4 files changed, 285 insertions(+), 56 deletions(-) diff --git a/parser/src/error.rs b/parser/src/error.rs index 63d4aef78..26353ffb6 100644 --- a/parser/src/error.rs +++ b/parser/src/error.rs @@ -6,45 +6,101 @@ use crate::token::TokenType; #[diagnostic()] pub enum Error { /// Lexer error - #[error("unrecognized token at {0:?}")] - UnrecognizedToken(#[label("here")] miette::SourceSpan), + #[error("unrecognized token '{token_text}' at offset {offset}")] + UnrecognizedToken { + #[label("here")] + span: miette::SourceSpan, + token_text: String, + offset: usize, + }, /// Missing quote or double-quote or backtick - #[error("non-terminated literal at {0:?}")] - UnterminatedLiteral(#[label("here")] miette::SourceSpan), + #[error("non-terminated literal '{token_text}' at offset {offset}")] + UnterminatedLiteral { + #[label("here")] + span: miette::SourceSpan, + token_text: String, + offset: usize, + }, /// Missing `]` - #[error("non-terminated bracket at {0:?}")] - UnterminatedBracket(#[label("here")] miette::SourceSpan), + #[error("non-terminated bracket '{token_text}' at offset {offset}")] + UnterminatedBracket { + #[label("here")] + span: miette::SourceSpan, + token_text: String, + offset: usize, + }, + /// Missing `*/` + #[error("non-terminated block comment '{token_text}' at offset {offset}")] + UnterminatedBlockComment { + #[label("here")] + span: miette::SourceSpan, + token_text: String, + offset: usize, + }, /// Invalid parameter name - #[error("bad variable name at {0:?}")] - BadVariableName(#[label("here")] miette::SourceSpan), + #[error("bad variable name '{token_text}' at offset {offset}")] + BadVariableName { + #[label("here")] + span: miette::SourceSpan, + token_text: String, + offset: usize, + }, /// Invalid number format - #[error("bad number at {0:?}")] - BadNumber(#[label("here")] miette::SourceSpan), + #[error("bad number '{token_text}' at offset {offset}")] + BadNumber { + #[label("here")] + span: miette::SourceSpan, + token_text: String, + offset: usize, + }, // Bad fractional part of a number - #[error("bad fractional part at {0:?}")] - BadFractionalPart(#[label("here")] miette::SourceSpan), + #[error("bad fractional part '{token_text}' at offset {offset}")] + BadFractionalPart { + #[label("here")] + span: miette::SourceSpan, + token_text: String, + offset: usize, + }, // Bad exponent part of a number - #[error("bad exponent part at {0:?}")] - BadExponentPart(#[label("here")] miette::SourceSpan), + #[error("bad exponent part '{token_text}' at offset {offset}")] + BadExponentPart { + #[label("here")] + span: miette::SourceSpan, + token_text: String, + offset: usize, + }, /// Invalid or missing sign after `!` - #[error("expected = sign at {0:?}")] - ExpectedEqualsSign(#[label("here")] miette::SourceSpan), + #[error("expected = sign '{token_text}' at offset {offset}")] + ExpectedEqualsSign { + #[label("here")] + span: miette::SourceSpan, + token_text: String, + offset: usize, + }, /// Hexadecimal integer literals follow the C-language notation of "0x" or "0X" followed by hexadecimal digits. - #[error("malformed hex integer at {0:?}")] - MalformedHexInteger(#[label("here")] miette::SourceSpan), + #[error("malformed hex integer '{token_text}' at offset {offset}")] + MalformedHexInteger { + #[label("here")] + span: miette::SourceSpan, + token_text: String, + offset: usize, + }, // parse errors // Unexpected end of file #[error("unexpected end of file")] ParseUnexpectedEOF, // Unexpected token - #[error("unexpected token at {parsed_offset:?}")] - #[diagnostic(help("expected {expected:?} but found {got:?}"))] + #[error("unexpected token '{token_text}' at offset {offset}")] + #[diagnostic(help("expected {expected_display} but found '{token_text}'"))] ParseUnexpectedToken { #[label("here")] parsed_offset: miette::SourceSpan, got: TokenType, expected: &'static [TokenType], + token_text: String, + offset: usize, + expected_display: String, }, // Custom error message #[error("{0}")] diff --git a/parser/src/lexer.rs b/parser/src/lexer.rs index 0876e4103..3621da888 100644 --- a/parser/src/lexer.rs +++ b/parser/src/lexer.rs @@ -297,14 +297,27 @@ impl<'a> Lexer<'a> { if start == self.offset { // before the underscore, there was no digit - return Err(Error::BadNumber((start, self.offset - start).into())); + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + return Err(Error::BadNumber { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } match self.peek() { Some(b) if b.is_ascii_digit() => continue, // Continue if next is a digit _ => { // after the underscore, there is no digit - return Err(Error::BadNumber((start, self.offset - start).into())); + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]) + .to_string(); + return Err(Error::BadNumber { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } } } @@ -321,7 +334,13 @@ impl<'a> Lexer<'a> { Some(b'_') => { if start == self.offset { // before the underscore, there was no digit - return Err(Error::BadNumber((start, self.offset - start).into())); + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + return Err(Error::BadNumber { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } self.eat_and_assert(|b| b == b'_'); @@ -329,7 +348,14 @@ impl<'a> Lexer<'a> { Some(b) if b.is_ascii_hexdigit() => continue, // Continue if next is a digit _ => { // after the underscore, there is no digit - return Err(Error::BadNumber((start, self.offset - start).into())); + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]) + .to_string(); + return Err(Error::BadNumber { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } } } @@ -415,11 +441,29 @@ impl<'a> Lexer<'a> { self.eat_and_assert(|b| b == b'/'); break; // End of block comment } - None => break, + None => { + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]) + .to_string(); + return Err(Error::UnterminatedBlockComment { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); + } _ => {} } } - None => break, + None => { + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]) + .to_string(); + return Err(Error::UnterminatedBlockComment { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); + } _ => unreachable!(), // We should not reach here } } @@ -514,9 +558,13 @@ impl<'a> Lexer<'a> { self.eat_and_assert(|b| b == b'='); } _ => { - return Err(Error::ExpectedEqualsSign( - (start, self.offset - start).into(), - )) + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + return Err(Error::ExpectedEqualsSign { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } } @@ -567,9 +615,13 @@ impl<'a> Lexer<'a> { } } None => { - return Err(Error::UnterminatedLiteral( - (start, self.offset - start).into(), - )) + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + return Err(Error::UnterminatedLiteral { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } _ => unreachable!(), }; @@ -598,9 +650,15 @@ impl<'a> Lexer<'a> { token_type: Some(TokenType::TK_FLOAT), }) } - Some(b) if is_identifier_start(b) => Err(Error::BadFractionalPart( - (start, self.offset - start).into(), - )), + Some(b) if is_identifier_start(b) => { + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + Err(Error::BadFractionalPart { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }) + } _ => Ok(Token { value: &self.input[start..self.offset], token_type: Some(TokenType::TK_FLOAT), @@ -627,11 +685,21 @@ impl<'a> Lexer<'a> { let start_num = self.offset; self.eat_while_number_digit()?; if start_num == self.offset { - return Err(Error::BadExponentPart((start, self.offset - start).into())); + let token_text = String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + return Err(Error::BadExponentPart { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } if self.peek().is_some() && is_identifier_start(self.peek().unwrap()) { - return Err(Error::BadExponentPart((start, self.offset - start).into())); + let token_text = String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + return Err(Error::BadExponentPart { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } Ok(Token { @@ -654,13 +722,23 @@ impl<'a> Lexer<'a> { self.eat_while_number_hexdigit()?; if start_hex == self.offset { - return Err(Error::MalformedHexInteger( - (start, self.offset - start).into(), - )); + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + return Err(Error::MalformedHexInteger { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } if self.peek().is_some() && is_identifier_start(self.peek().unwrap()) { - return Err(Error::BadNumber((start, self.offset - start).into())); + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + return Err(Error::BadNumber { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } return Ok(Token { @@ -689,7 +767,13 @@ impl<'a> Lexer<'a> { }) } Some(b) if is_identifier_start(b) => { - Err(Error::BadNumber((start, self.offset - start).into())) + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + Err(Error::BadNumber { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }) } _ => Ok(Token { value: &self.input[start..self.offset], @@ -710,9 +794,15 @@ impl<'a> Lexer<'a> { token_type: Some(TokenType::TK_ID), }) } - None => Err(Error::UnterminatedBracket( - (start, self.offset - start).into(), - )), + None => { + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + Err(Error::UnterminatedBracket { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }) + } _ => unreachable!(), // We should not reach here } } @@ -737,7 +827,13 @@ impl<'a> Lexer<'a> { // empty variable name if start_id == self.offset { - return Err(Error::BadVariableName((start, self.offset - start).into())); + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + return Err(Error::BadVariableName { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } Ok(Token { @@ -767,9 +863,14 @@ impl<'a> Lexer<'a> { self.eat_and_assert(|b| b == b'\''); if (end_hex - start_hex) % 2 != 0 { - return Err(Error::UnrecognizedToken( - (start, self.offset - start).into(), - )); + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]) + .to_string(); + return Err(Error::UnrecognizedToken { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }); } Ok(Token { @@ -777,9 +878,15 @@ impl<'a> Lexer<'a> { token_type: Some(TokenType::TK_BLOB), }) } - _ => Err(Error::UnterminatedLiteral( - (start, self.offset - start).into(), - )), + _ => { + let token_text = + String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + Err(Error::UnterminatedLiteral { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }) + } } } _ => { @@ -796,9 +903,12 @@ impl<'a> Lexer<'a> { fn eat_unrecognized(&mut self) -> Result> { let start = self.offset; self.eat_while(|b| b.is_some() && !b.unwrap().is_ascii_whitespace()); - Err(Error::UnrecognizedToken( - (start, self.offset - start).into(), - )) + let token_text = String::from_utf8_lossy(&self.input[start..self.offset]).to_string(); + Err(Error::UnrecognizedToken { + span: (start, self.offset - start).into(), + token_text, + offset: start, + }) } } diff --git a/parser/src/parser.rs b/parser/src/parser.rs index 87ed5cabf..4655944d1 100644 --- a/parser/src/parser.rs +++ b/parser/src/parser.rs @@ -28,12 +28,17 @@ macro_rules! peek_expect { match (TK_ID, tt.fallback_id_if_ok()) { $(($x, TK_ID) => token,)* _ => { + let token_text = String::from_utf8_lossy(token.value).to_string(); + let offset = $parser.offset(); return Err(Error::ParseUnexpectedToken { parsed_offset: ($parser.offset(), token_len).into(), expected: &[ $($x,)* ], got: tt, + token_text: token_text.clone(), + offset, + expected_display: crate::token::TokenType::format_expected_tokens(&[$($x,)*]), }) } } @@ -219,10 +224,17 @@ impl<'a> Parser<'a> { Some(token) => { if !found_semi { let tt = token.token_type.unwrap(); + let token_text = String::from_utf8_lossy(token.value).to_string(); + let offset = self.offset(); return Err(Error::ParseUnexpectedToken { - parsed_offset: (self.offset(), 1).into(), + parsed_offset: (offset, 1).into(), expected: &[TK_SEMI], got: tt, + token_text: token_text.clone(), + offset, + expected_display: crate::token::TokenType::format_expected_tokens(&[ + TK_SEMI, + ]), }); } @@ -1472,10 +1484,18 @@ impl<'a> Parser<'a> { Some(self.parse_nm()?) } else if tok.token_type == Some(TK_LP) { if can_be_lit_str { + let token = self.peek_no_eof()?; + let token_text = String::from_utf8_lossy(token.value).to_string(); + let offset = self.offset(); return Err(Error::ParseUnexpectedToken { parsed_offset: (self.offset() - name.len(), name.len()).into(), got: TK_STRING, expected: &[TK_ID, TK_INDEXED, TK_JOIN_KW], + token_text: token_text.clone(), + offset, + expected_display: crate::token::TokenType::format_expected_tokens( + &[TK_ID, TK_INDEXED, TK_JOIN_KW], + ), }); } // can not be literal string in function name diff --git a/parser/src/token.rs b/parser/src/token.rs index ed8f416c5..0f0719741 100644 --- a/parser/src/token.rs +++ b/parser/src/token.rs @@ -548,4 +548,47 @@ impl TokenType { _ => self, } } + + /// Get user-friendly display name for error messages + pub fn user_friendly_name(&self) -> &'static str { + match self.as_str() { + Some(s) => s, + None => match self { + TokenType::TK_ID => "identifier", + TokenType::TK_STRING => "string", + TokenType::TK_INTEGER => "integer", + TokenType::TK_FLOAT => "float", + TokenType::TK_BLOB => "blob", + TokenType::TK_VARIABLE => "variable", + TokenType::TK_ILLEGAL => "illegal token", + TokenType::TK_EOF => "end of file", + TokenType::TK_LIKE_KW => "LIKE", + TokenType::TK_JOIN_KW => "JOIN", + TokenType::TK_CTIME_KW => "datetime function", + TokenType::TK_ISNOT => "IS NOT", + TokenType::TK_ISNULL => "ISNULL", + TokenType::TK_NOTNULL => "NOTNULL", + TokenType::TK_PTR => "->", + _ => "unknown token", + }, + } + } + + /// Format multiple tokens for error messages + pub fn format_expected_tokens(tokens: &[TokenType]) -> String { + if tokens.is_empty() { + return "nothing".to_string(); + } + if tokens.len() == 1 { + return tokens[0].user_friendly_name().to_string(); + } + + let names: Vec<&str> = tokens.iter().map(|t| t.user_friendly_name()).collect(); + if names.len() == 2 { + format!("{} or {}", names[0], names[1]) + } else { + let (last, rest) = names.split_last().unwrap(); + format!("{}, or {}", rest.join(", "), last) + } + } } From ff11ba08c7375248da632c29f627504719648830 Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Thu, 18 Sep 2025 11:49:25 -0300 Subject: [PATCH 002/428] Makes tests pass --- parser/src/lexer.rs | 22 ++-------------------- testing/cli_tests/extensions.py | 4 ++-- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/parser/src/lexer.rs b/parser/src/lexer.rs index 3621da888..33429917a 100644 --- a/parser/src/lexer.rs +++ b/parser/src/lexer.rs @@ -441,29 +441,11 @@ impl<'a> Lexer<'a> { self.eat_and_assert(|b| b == b'/'); break; // End of block comment } - None => { - let token_text = - String::from_utf8_lossy(&self.input[start..self.offset]) - .to_string(); - return Err(Error::UnterminatedBlockComment { - span: (start, self.offset - start).into(), - token_text, - offset: start, - }); - } + None => break, _ => {} } } - None => { - let token_text = - String::from_utf8_lossy(&self.input[start..self.offset]) - .to_string(); - return Err(Error::UnterminatedBlockComment { - span: (start, self.offset - start).into(), - token_text, - offset: start, - }); - } + None => break, _ => unreachable!(), // We should not reach here } } diff --git a/testing/cli_tests/extensions.py b/testing/cli_tests/extensions.py index 5f90014a9..ad6e99687 100755 --- a/testing/cli_tests/extensions.py +++ b/testing/cli_tests/extensions.py @@ -792,12 +792,12 @@ def test_csv(): ) turso.run_test_fn( "create virtual table t1 using csv(data='1'\\'2');", - lambda res: "unrecognized token at" in res, + lambda res: "unrecognized token " in res, "Create CSV table with malformed escape sequence", ) turso.run_test_fn( "create virtual table t1 using csv(data=\"12');", - lambda res: "non-terminated literal at" in res, + lambda res: "non-terminated literal " in res, "Create CSV table with unterminated quoted string", ) From c57567d77667fd2c7b8cb71b0de3f6c389ef64ab Mon Sep 17 00:00:00 2001 From: Duy Dang <55247256+ddwalias@users.noreply.github.com> Date: Sun, 5 Oct 2025 23:51:50 +0700 Subject: [PATCH 003/428] Adopt Hekaton solution for rollback tx --- .../mvcc/database/checkpoint_state_machine.rs | 2 +- core/mvcc/database/mod.rs | 48 +++++++++++-------- core/mvcc/database/tests.rs | 32 +++++++------ 3 files changed, 45 insertions(+), 37 deletions(-) diff --git a/core/mvcc/database/checkpoint_state_machine.rs b/core/mvcc/database/checkpoint_state_machine.rs index a207d5ad2..bfe8f51d2 100644 --- a/core/mvcc/database/checkpoint_state_machine.rs +++ b/core/mvcc/database/checkpoint_state_machine.rs @@ -168,7 +168,7 @@ impl CheckpointStateMachine { let mut exists_in_db_file = false; for (i, version) in row_versions.iter().enumerate() { let is_last = i == row_versions.len() - 1; - if let TxTimestampOrID::Timestamp(ts) = &version.begin { + if let Some(TxTimestampOrID::Timestamp(ts)) = &version.begin { if *ts <= self.checkpointed_txid_max_old { exists_in_db_file = true; } diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index f00690aff..5fa28c19c 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -115,9 +115,10 @@ impl Row { } /// A row version. +/// TODO: we can optimize this by using bitpacking for the begin and end fields. #[derive(Clone, Debug, PartialEq)] pub struct RowVersion { - pub begin: TxTimestampOrID, + pub begin: Option, pub end: Option, pub row: Row, } @@ -572,11 +573,11 @@ impl StateTransition for CommitStateMachine { if let Some(row_versions) = mvcc_store.rows.get(id) { let mut row_versions = row_versions.value().write(); for row_version in row_versions.iter_mut() { - if let TxTimestampOrID::TxID(id) = row_version.begin { + if let Some(TxTimestampOrID::TxID(id)) = row_version.begin { if id == self.tx_id { // New version is valid STARTING FROM committing transaction's end timestamp // See diagram on page 299: https://www.cs.cmu.edu/~15721-f24/papers/Hekaton.pdf - row_version.begin = TxTimestampOrID::Timestamp(*end_ts); + row_version.begin = Some(TxTimestampOrID::Timestamp(*end_ts)); mvcc_store.insert_version_raw( &mut log_record.row_versions, row_version.clone(), @@ -1091,7 +1092,7 @@ impl MvStore { assert_eq!(tx.state, TransactionState::Active); let id = row.id; let row_version = RowVersion { - begin: TxTimestampOrID::TxID(tx.tx_id), + begin: Some(TxTimestampOrID::TxID(tx.tx_id)), end: None, row, }; @@ -1535,14 +1536,14 @@ impl MvStore { // Hekaton uses oldest-to-newest order for row versions, so we reverse iterate to find the newest one // this transaction changed. for row_version in row_versions.iter_mut().rev() { - if let TxTimestampOrID::TxID(id) = row_version.begin { + if let Some(TxTimestampOrID::TxID(id)) = row_version.begin { turso_assert!( id == tx_id, "only one tx(0) should exist on loading logical log" ); // New version is valid STARTING FROM committing transaction's end timestamp // See diagram on page 299: https://www.cs.cmu.edu/~15721-f24/papers/Hekaton.pdf - row_version.begin = TxTimestampOrID::Timestamp(end_ts); + row_version.begin = Some(TxTimestampOrID::Timestamp(end_ts)); } if let Some(TxTimestampOrID::TxID(id)) = row_version.end { turso_assert!( @@ -1578,27 +1579,30 @@ impl MvStore { assert!(tx.state == TransactionState::Active || tx.state == TransactionState::Preparing); tx.state.store(TransactionState::Aborted); tracing::trace!("abort(tx_id={})", tx_id); - let write_set: Vec = tx.write_set.iter().map(|v| *v.value()).collect(); if self.is_exclusive_tx(&tx_id) { self.commit_coordinator.pager_commit_lock.unlock(); self.release_exclusive_tx(&tx_id); } - for ref id in write_set { - if let Some(row_versions) = self.rows.get(id) { + for rowid in &tx.write_set { + let rowid = rowid.value(); + if let Some(row_versions) = self.rows.get(rowid) { let mut row_versions = row_versions.value().write(); for rv in row_versions.iter_mut() { - if rv.end == Some(TxTimestampOrID::TxID(tx_id)) { + if let Some(TxTimestampOrID::TxID(id)) = rv.begin { + assert_eq!(id, tx_id); + // If the transaction has aborted, + // it marks all its new versions as garbage and sets their Begin + // and End timestamps to infinity to make them invisible + // See section 2.4: https://www.cs.cmu.edu/~15721-f24/papers/Hekaton.pdf + rv.begin = None; + rv.end = None; + } else if rv.end == Some(TxTimestampOrID::TxID(tx_id)) { // undo deletions by this transaction rv.end = None; } } - // remove insertions by this transaction - row_versions.retain(|rv| rv.begin != TxTimestampOrID::TxID(tx_id)); - if row_versions.is_empty() { - self.rows.remove(id); - } } } @@ -1747,10 +1751,11 @@ impl MvStore { // Extracts the begin timestamp from a transaction #[inline] - fn get_begin_timestamp(&self, ts_or_id: &TxTimestampOrID) -> u64 { + fn get_begin_timestamp(&self, ts_or_id: &Option) -> u64 { match ts_or_id { - TxTimestampOrID::Timestamp(ts) => *ts, - TxTimestampOrID::TxID(tx_id) => self.txs.get(tx_id).unwrap().value().begin_ts, + Some(TxTimestampOrID::Timestamp(ts)) => *ts, + Some(TxTimestampOrID::TxID(tx_id)) => self.txs.get(tx_id).unwrap().value().begin_ts, + None => 0, } } @@ -1902,7 +1907,7 @@ impl MvStore { self.insert_version( id, RowVersion { - begin: TxTimestampOrID::Timestamp(0), + begin: Some(TxTimestampOrID::Timestamp(0)), end: None, row: Row::new(id, record.get_payload().to_vec(), column_count), }, @@ -2068,8 +2073,8 @@ impl RowVersion { fn is_begin_visible(txs: &SkipMap, tx: &Transaction, rv: &RowVersion) -> bool { match rv.begin { - TxTimestampOrID::Timestamp(rv_begin_ts) => tx.begin_ts >= rv_begin_ts, - TxTimestampOrID::TxID(rv_begin) => { + Some(TxTimestampOrID::Timestamp(rv_begin_ts)) => tx.begin_ts >= rv_begin_ts, + Some(TxTimestampOrID::TxID(rv_begin)) => { let tb = txs.get(&rv_begin).unwrap(); let tb = tb.value(); let visible = match tb.state.load() { @@ -2089,6 +2094,7 @@ fn is_begin_visible(txs: &SkipMap, tx: &Transaction, rv: &Row ); visible } + None => false, } } diff --git a/core/mvcc/database/tests.rs b/core/mvcc/database/tests.rs index 35a45e728..c8a95e0c7 100644 --- a/core/mvcc/database/tests.rs +++ b/core/mvcc/database/tests.rs @@ -1076,7 +1076,7 @@ fn test_snapshot_isolation_tx_visible1() { let current_tx = new_tx(4, 4, TransactionState::Preparing); - let rv_visible = |begin: TxTimestampOrID, end: Option| { + let rv_visible = |begin: Option, end: Option| { let row_version = RowVersion { begin, end, @@ -1088,60 +1088,60 @@ fn test_snapshot_isolation_tx_visible1() { // begin visible: transaction committed with ts < current_tx.begin_ts // end visible: inf - assert!(rv_visible(TxTimestampOrID::TxID(1), None)); + assert!(rv_visible(Some(TxTimestampOrID::TxID(1)), None)); // begin invisible: transaction committed with ts > current_tx.begin_ts - assert!(!rv_visible(TxTimestampOrID::TxID(2), None)); + assert!(!rv_visible(Some(TxTimestampOrID::TxID(2)), None)); // begin invisible: transaction aborted - assert!(!rv_visible(TxTimestampOrID::TxID(3), None)); + assert!(!rv_visible(Some(TxTimestampOrID::TxID(3)), None)); // begin visible: timestamp < current_tx.begin_ts // end invisible: transaction committed with ts > current_tx.begin_ts assert!(!rv_visible( - TxTimestampOrID::Timestamp(0), + Some(TxTimestampOrID::Timestamp(0)), Some(TxTimestampOrID::TxID(1)) )); // begin visible: timestamp < current_tx.begin_ts // end visible: transaction committed with ts < current_tx.begin_ts assert!(rv_visible( - TxTimestampOrID::Timestamp(0), + Some(TxTimestampOrID::Timestamp(0)), Some(TxTimestampOrID::TxID(2)) )); // begin visible: timestamp < current_tx.begin_ts // end invisible: transaction aborted assert!(!rv_visible( - TxTimestampOrID::Timestamp(0), + Some(TxTimestampOrID::Timestamp(0)), Some(TxTimestampOrID::TxID(3)) )); // begin invisible: transaction preparing - assert!(!rv_visible(TxTimestampOrID::TxID(5), None)); + assert!(!rv_visible(Some(TxTimestampOrID::TxID(5)), None)); // begin invisible: transaction committed with ts > current_tx.begin_ts - assert!(!rv_visible(TxTimestampOrID::TxID(6), None)); + assert!(!rv_visible(Some(TxTimestampOrID::TxID(6)), None)); // begin invisible: transaction active - assert!(!rv_visible(TxTimestampOrID::TxID(7), None)); + assert!(!rv_visible(Some(TxTimestampOrID::TxID(7)), None)); // begin invisible: transaction committed with ts > current_tx.begin_ts - assert!(!rv_visible(TxTimestampOrID::TxID(6), None)); + assert!(!rv_visible(Some(TxTimestampOrID::TxID(6)), None)); // begin invisible: transaction active - assert!(!rv_visible(TxTimestampOrID::TxID(7), None)); + assert!(!rv_visible(Some(TxTimestampOrID::TxID(7)), None)); // begin visible: timestamp < current_tx.begin_ts // end invisible: transaction preparing assert!(!rv_visible( - TxTimestampOrID::Timestamp(0), + Some(TxTimestampOrID::Timestamp(0)), Some(TxTimestampOrID::TxID(5)) )); // begin invisible: timestamp > current_tx.begin_ts assert!(!rv_visible( - TxTimestampOrID::Timestamp(6), + Some(TxTimestampOrID::Timestamp(6)), Some(TxTimestampOrID::TxID(6)) )); @@ -1150,9 +1150,11 @@ fn test_snapshot_isolation_tx_visible1() { // but that hasn't happened // (this is the https://avi.im/blag/2023/hekaton-paper-typo/ case, I believe!) assert!(rv_visible( - TxTimestampOrID::Timestamp(0), + Some(TxTimestampOrID::Timestamp(0)), Some(TxTimestampOrID::TxID(7)) )); + + assert!(!rv_visible(None, None)); } #[test] From 911b6791b90b516fb36ef42e47e0c2e5e913e70d Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 18 Sep 2025 01:23:26 -0300 Subject: [PATCH 004/428] when pwritev fails, clear the dirty pages add flag to `clear_page_cache` --- core/lib.rs | 11 +++-------- core/storage/page_cache.rs | 10 +++++----- core/storage/pager.rs | 14 ++++++++------ core/vdbe/execute.rs | 2 +- 4 files changed, 17 insertions(+), 20 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 5e0288670..198371ad9 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -488,7 +488,7 @@ impl Database { conn.pragma_update("cipher", format!("'{}'", encryption_opts.cipher))?; conn.pragma_update("hexkey", format!("'{}'", encryption_opts.hexkey))?; // Clear page cache so the header page can be reread from disk and decrypted using the encryption context. - pager.clear_page_cache(); + pager.clear_page_cache(false); } db.with_schema_mut(|schema| { let header_schema_cookie = pager @@ -1515,7 +1515,7 @@ impl Connection { let pager = conn.pager.read(); if db.db_state.is_initialized() { // Clear page cache so the header page can be reread from disk and decrypted using the encryption context. - pager.clear_page_cache(); + pager.clear_page_cache(false); } } Ok((io, conn)) @@ -1737,11 +1737,6 @@ impl Connection { self.pager.read().cacheflush() } - pub fn clear_page_cache(&self) -> Result<()> { - self.pager.read().clear_page_cache(); - Ok(()) - } - pub fn checkpoint(&self, mode: CheckpointMode) -> Result { if self.is_closed() { return Err(LimboError::InternalError("Connection closed".to_string())); @@ -1890,7 +1885,7 @@ impl Connection { shared_wal.enabled.store(false, Ordering::SeqCst); shared_wal.file = None; } - self.pager.write().clear_page_cache(); + self.pager.write().clear_page_cache(false); let pager = self.db.init_pager(Some(size.get() as usize))?; pager.enable_encryption(self.db.opts.enable_encryption); *self.pager.write() = Arc::new(pager); diff --git a/core/storage/page_cache.rs b/core/storage/page_cache.rs index 396332eb2..25bfe357a 100644 --- a/core/storage/page_cache.rs +++ b/core/storage/page_cache.rs @@ -425,11 +425,11 @@ impl PageCache { Err(CacheError::Full) } - pub fn clear(&mut self) -> Result<(), CacheError> { + pub fn clear(&mut self, clear_dirty: bool) -> Result<(), CacheError> { // Check all pages are clean for &entry_ptr in self.map.values() { let entry = unsafe { &*entry_ptr }; - if entry.page.is_dirty() { + if entry.page.is_dirty() && !clear_dirty { return Err(CacheError::Dirty { pgno: entry.page.get().id, }); @@ -852,7 +852,7 @@ mod tests { let key1 = insert_page(&mut cache, 1); let key2 = insert_page(&mut cache, 2); - assert!(cache.clear().is_ok()); + assert!(cache.clear(false).is_ok()); assert!(cache.get(&key1).unwrap().is_none()); assert!(cache.get(&key2).unwrap().is_none()); assert_eq!(cache.len(), 0); @@ -1141,7 +1141,7 @@ mod tests { cache.insert(key, page).unwrap(); } - cache.clear().unwrap(); + cache.clear(false).unwrap(); drop(cache); } @@ -1231,7 +1231,7 @@ mod tests { for i in 1..=3 { let _ = insert_page(&mut c, i); } - c.clear().unwrap(); + c.clear(false).unwrap(); // No elements; insert should not rely on stale hand let _ = insert_page(&mut c, 10); diff --git a/core/storage/pager.rs b/core/storage/pager.rs index bf11ef1d3..cb4e4d006 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1119,7 +1119,7 @@ impl Pager { let changed = wal.borrow_mut().begin_read_tx()?; if changed { // Someone else changed the database -> assume our page cache is invalid (this is default SQLite behavior, we can probably do better with more granular invalidation) - self.clear_page_cache(); + self.clear_page_cache(false); } Ok(()) } @@ -1800,7 +1800,7 @@ impl Pager { /// Invalidates entire page cache by removing all dirty and clean pages. Usually used in case /// of a rollback or in case we want to invalidate page cache after starting a read transaction /// right after new writes happened which would invalidate current page cache. - pub fn clear_page_cache(&self) { + pub fn clear_page_cache(&self, clear_dirty: bool) { let dirty_pages = self.dirty_pages.read(); let mut cache = self.page_cache.write(); for page_id in dirty_pages.iter() { @@ -1809,7 +1809,9 @@ impl Pager { page.clear_dirty(); } } - cache.clear().expect("Failed to clear page cache"); + cache + .clear(clear_dirty) + .expect("Failed to clear page cache"); } /// Checkpoint in Truncate mode and delete the WAL file. This method is _only_ to be called @@ -1914,7 +1916,7 @@ impl Pager { // TODO: only clear cache of things that are really invalidated self.page_cache .write() - .clear() + .clear(false) .map_err(|e| LimboError::InternalError(format!("Failed to clear page cache: {e:?}")))?; Ok(IOResult::Done(())) } @@ -2400,7 +2402,7 @@ impl Pager { is_write: bool, ) -> Result<(), LimboError> { tracing::debug!(schema_did_change); - self.clear_page_cache(); + self.clear_page_cache(is_write); if is_write { self.dirty_pages.write().clear(); } else { @@ -2483,7 +2485,7 @@ impl Pager { // might have been loaded with page 1 to initialise the connection. During initialisation, // we only read the header which is unencrypted, but the rest of the page is. If so, lets // clear the cache. - self.clear_page_cache(); + self.clear_page_cache(false); Ok(()) } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 87fbaec0a..03b6fe96a 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2138,7 +2138,7 @@ pub fn halt( ) -> Result { if err_code > 0 { // invalidate page cache in case of error - pager.clear_page_cache(); + pager.clear_page_cache(false); } match err_code { 0 => {} From 38d263096915636cd942f6f81935233e3be07d29 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Mon, 6 Oct 2025 12:15:15 +0400 Subject: [PATCH 005/428] remove unnecessary SchemaLocked error - lock() return error in case when another thread panicked while holding the same lock - we better to just panic too in any such case --- core/error.rs | 2 -- core/lib.rs | 50 +++++++++++++-------------------------- core/mvcc/database/mod.rs | 4 ++-- core/storage/pager.rs | 4 ++-- 4 files changed, 21 insertions(+), 39 deletions(-) diff --git a/core/error.rs b/core/error.rs index 3dd4841ad..76bac45f0 100644 --- a/core/error.rs +++ b/core/error.rs @@ -49,8 +49,6 @@ pub enum LimboError { ExtensionError(String), #[error("Runtime error: integer overflow")] IntegerOverflow, - #[error("Schema is locked for write")] - SchemaLocked, #[error("Runtime error: database table is locked")] TableLocked, #[error("Error: Resource is read-only")] diff --git a/core/lib.rs b/core/lib.rs index 11b85be81..29450c471 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -558,12 +558,7 @@ impl Database { let conn = Arc::new(Connection { db: self.clone(), pager: RwLock::new(pager), - schema: RwLock::new( - self.schema - .lock() - .map_err(|_| LimboError::SchemaLocked)? - .clone(), - ), + schema: RwLock::new(self.schema.lock().unwrap().clone()), database_schemas: RwLock::new(std::collections::HashMap::new()), auto_commit: AtomicBool::new(true), transaction_state: RwLock::new(TransactionState::None), @@ -835,17 +830,17 @@ impl Database { #[inline] pub(crate) fn with_schema_mut(&self, f: impl FnOnce(&mut Schema) -> Result) -> Result { - let mut schema_ref = self.schema.lock().map_err(|_| LimboError::SchemaLocked)?; + let mut schema_ref = self.schema.lock().unwrap(); let schema = Arc::make_mut(&mut *schema_ref); f(schema) } - pub(crate) fn clone_schema(&self) -> Result> { - let schema = self.schema.lock().map_err(|_| LimboError::SchemaLocked)?; - Ok(schema.clone()) + pub(crate) fn clone_schema(&self) -> Arc { + let schema = self.schema.lock().unwrap(); + schema.clone() } - pub(crate) fn update_schema_if_newer(&self, another: Arc) -> Result<()> { - let mut schema = self.schema.lock().map_err(|_| LimboError::SchemaLocked)?; + pub(crate) fn update_schema_if_newer(&self, another: Arc) { + let mut schema = self.schema.lock().unwrap(); if schema.schema_version < another.schema_version { tracing::debug!( "DB schema is outdated: {} < {}", @@ -860,7 +855,6 @@ impl Database { another.schema_version ); } - Ok(()) } pub fn get_mv_store(&self) -> Option<&Arc> { @@ -1154,7 +1148,7 @@ impl Connection { let input = str::from_utf8(&sql.as_bytes()[..byte_offset_end]) .unwrap() .trim(); - self.maybe_update_schema()?; + self.maybe_update_schema(); let pager = self.pager.read().clone(); let mode = QueryMode::new(&cmd); let (Cmd::Stmt(stmt) | Cmd::Explain(stmt) | Cmd::ExplainQueryPlan(stmt)) = cmd; @@ -1248,7 +1242,8 @@ impl Connection { reparse_result?; let schema = self.schema.read().clone(); - self.db.update_schema_if_newer(schema) + self.db.update_schema_if_newer(schema); + Ok(()) } fn reparse_schema(self: &Arc) -> Result<()> { @@ -1303,7 +1298,7 @@ impl Connection { "The supplied SQL string contains no statements".to_string(), )); } - self.maybe_update_schema()?; + self.maybe_update_schema(); let sql = sql.as_ref(); tracing::trace!("Preparing and executing batch: {}", sql); let mut parser = Parser::new(sql.as_bytes()); @@ -1337,7 +1332,7 @@ impl Connection { return Err(LimboError::InternalError("Connection closed".to_string())); } let sql = sql.as_ref(); - self.maybe_update_schema()?; + self.maybe_update_schema(); tracing::trace!("Querying: {}", sql); let mut parser = Parser::new(sql.as_bytes()); let cmd = parser.next_cmd()?; @@ -1389,7 +1384,7 @@ impl Connection { return Err(LimboError::InternalError("Connection closed".to_string())); } let sql = sql.as_ref(); - self.maybe_update_schema()?; + self.maybe_update_schema(); let mut parser = Parser::new(sql.as_bytes()); while let Some(cmd) = parser.next_cmd()? { let syms = self.syms.read(); @@ -1540,20 +1535,14 @@ impl Connection { Ok(db) } - pub fn maybe_update_schema(&self) -> Result<()> { + pub fn maybe_update_schema(&self) { let current_schema_version = self.schema.read().schema_version; - let schema = self - .db - .schema - .lock() - .map_err(|_| LimboError::SchemaLocked)?; + let schema = self.db.schema.lock().unwrap(); if matches!(self.get_tx_state(), TransactionState::None) && current_schema_version != schema.schema_version { *self.schema.write() = schema.clone(); } - - Ok(()) } /// Read schema version at current transaction @@ -2075,12 +2064,7 @@ impl Connection { ))); } - let use_indexes = self - .db - .schema - .lock() - .map_err(|_| LimboError::SchemaLocked)? - .indexes_enabled(); + let use_indexes = self.db.schema.lock().unwrap().indexes_enabled(); let use_mvcc = self.db.mv_store.is_some(); let use_views = self.db.experimental_views_enabled(); let use_strict = self.db.experimental_strict_enabled(); @@ -2598,7 +2582,7 @@ impl Statement { fn reprepare(&mut self) -> Result<()> { tracing::trace!("repreparing statement"); let conn = self.program.connection.clone(); - *conn.schema.write() = conn.db.clone_schema()?; + *conn.schema.write() = conn.db.clone_schema(); self.program = { let mut parser = Parser::new(self.program.sql.as_bytes()); let cmd = parser.next_cmd()?; diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index f00690aff..45b7cb7e8 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -654,7 +654,7 @@ impl StateTransition for CommitStateMachine { let schema_did_change = self.did_commit_schema_change; if schema_did_change { let schema = connection.schema.read().clone(); - connection.db.update_schema_if_newer(schema)?; + connection.db.update_schema_if_newer(schema); } let tx = mvcc_store.txs.get(&self.tx_id).unwrap(); let tx_unlocked = tx.value(); @@ -1606,7 +1606,7 @@ impl MvStore { > connection.db.schema.lock().unwrap().schema_version { // Connection made schema changes during tx and rolled back -> revert connection-local schema. - *connection.schema.write() = connection.db.clone_schema()?; + *connection.schema.write() = connection.db.clone_schema(); } let tx = tx_unlocked.value(); diff --git a/core/storage/pager.rs b/core/storage/pager.rs index bf11ef1d3..f2bde3d04 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1198,7 +1198,7 @@ impl Pager { if schema_did_change { let schema = connection.schema.read().clone(); - connection.db.update_schema_if_newer(schema)?; + connection.db.update_schema_if_newer(schema); } Ok(IOResult::Done(commit_status)) } @@ -2411,7 +2411,7 @@ impl Pager { } self.reset_internal_states(); if schema_did_change { - *connection.schema.write() = connection.db.clone_schema()?; + *connection.schema.write() = connection.db.clone_schema(); } if is_write { if let Some(wal) = self.wal.as_ref() { From 8dae601fac879cd28a5fc04326cae7db7db17fc5 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Mon, 6 Oct 2025 13:21:45 +0400 Subject: [PATCH 006/428] make rollback non-failing method --- core/benches/mvcc_benchmark.rs | 3 +- core/lib.rs | 33 ++++------- .../mvcc/database/checkpoint_state_machine.rs | 10 +--- core/mvcc/database/mod.rs | 9 +-- core/mvcc/database/tests.rs | 6 +- core/schema.rs | 2 +- core/storage/btree.rs | 18 +++--- core/storage/pager.rs | 59 ++++++++++--------- core/storage/wal.rs | 9 ++- core/vdbe/execute.rs | 6 +- core/vdbe/mod.rs | 19 +++--- 11 files changed, 77 insertions(+), 97 deletions(-) diff --git a/core/benches/mvcc_benchmark.rs b/core/benches/mvcc_benchmark.rs index 0ebd33fa5..7d316707d 100644 --- a/core/benches/mvcc_benchmark.rs +++ b/core/benches/mvcc_benchmark.rs @@ -36,8 +36,7 @@ fn bench(c: &mut Criterion) { let conn = db.conn.clone(); let tx_id = db.mvcc_store.begin_tx(conn.get_pager().clone()).unwrap(); db.mvcc_store - .rollback_tx(tx_id, conn.get_pager().clone(), &conn) - .unwrap(); + .rollback_tx(tx_id, conn.get_pager().clone(), &conn); }) }); diff --git a/core/lib.rs b/core/lib.rs index 29450c471..f45715dc2 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -498,7 +498,7 @@ impl Database { let result = schema .make_from_btree(None, pager.clone(), &syms) .or_else(|e| { - pager.end_read_tx()?; + pager.end_read_tx(); Err(e) }); if let Err(LimboError::ExtensionError(e)) = result { @@ -1195,11 +1195,11 @@ impl Connection { 0 } Err(err) => { - pager.end_read_tx().expect("read txn must be finished"); + pager.end_read_tx(); return Err(err); } }; - pager.end_read_tx().expect("read txn must be finished"); + pager.end_read_tx(); let db_schema_version = self.db.schema.lock().unwrap().schema_version; tracing::debug!( @@ -1236,7 +1236,7 @@ impl Connection { // close opened transaction if it was kept open // (in most cases, it will be automatically closed if stmt was executed properly) if previous == TransactionState::Read { - pager.end_read_tx().expect("read txn must be finished"); + pager.end_read_tx(); } reparse_result?; @@ -1654,7 +1654,7 @@ impl Connection { let pager = self.pager.read(); pager.begin_read_tx()?; pager.io.block(|| pager.begin_write_tx()).inspect_err(|_| { - pager.end_read_tx().expect("read txn must be closed"); + pager.end_read_tx(); })?; // start write transaction and disable auto-commit mode as SQL can be executed within WAL session (at caller own risk) @@ -1702,13 +1702,11 @@ impl Connection { wal.end_read_tx(); } - let rollback_err = if !force_commit { + if !force_commit { // remove all non-commited changes in case if WAL session left some suffix without commit frame - pager.rollback(false, self, true).err() - } else { - None - }; - if let Some(err) = commit_err.or(rollback_err) { + pager.rollback(false, self, true); + } + if let Some(err) = commit_err { return Err(err); } } @@ -1752,12 +1750,7 @@ impl Connection { _ => { if !self.mvcc_enabled() { let pager = self.pager.read(); - pager.io.block(|| { - pager.end_tx( - true, // rollback = true for close - self, - ) - })?; + pager.rollback_tx(self); } self.set_tx_state(TransactionState::None); } @@ -2632,12 +2625,8 @@ impl Statement { } let state = self.program.connection.get_tx_state(); if let TransactionState::Write { .. } = state { - let end_tx_res = self.pager.end_tx(true, &self.program.connection)?; + self.pager.rollback_tx(&self.program.connection); self.program.connection.set_tx_state(TransactionState::None); - assert!( - matches!(end_tx_res, IOResult::Done(_)), - "end_tx should not return IO as it should just end txn without flushing anything. Got {end_tx_res:?}" - ); } } res diff --git a/core/mvcc/database/checkpoint_state_machine.rs b/core/mvcc/database/checkpoint_state_machine.rs index a207d5ad2..fee93c2d8 100644 --- a/core/mvcc/database/checkpoint_state_machine.rs +++ b/core/mvcc/database/checkpoint_state_machine.rs @@ -548,7 +548,7 @@ impl CheckpointStateMachine { CheckpointState::CommitPagerTxn => { tracing::debug!("Committing pager transaction"); - let result = self.pager.end_tx(false, &self.connection)?; + let result = self.pager.commit_tx(&self.connection)?; match result { IOResult::Done(_) => { self.state = CheckpointState::TruncateLogicalLog; @@ -642,16 +642,12 @@ impl StateTransition for CheckpointStateMachine { Err(err) => { tracing::info!("Error in checkpoint state machine: {err}"); if self.lock_states.pager_write_tx { - let rollback = true; - self.pager - .io - .block(|| self.pager.end_tx(rollback, self.connection.as_ref())) - .expect("failed to end pager write tx"); + self.pager.rollback_tx(self.connection.as_ref()); if self.update_transaction_state { *self.connection.transaction_state.write() = TransactionState::None; } } else if self.lock_states.pager_read_tx { - self.pager.end_read_tx().unwrap(); + self.pager.end_read_tx(); if self.update_transaction_state { *self.connection.transaction_state.write() = TransactionState::None; } diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 45b7cb7e8..a03fba7ba 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -1566,12 +1566,7 @@ impl MvStore { /// # Arguments /// /// * `tx_id` - The ID of the transaction to abort. - pub fn rollback_tx( - &self, - tx_id: TxID, - _pager: Arc, - connection: &Connection, - ) -> Result<()> { + pub fn rollback_tx(&self, tx_id: TxID, _pager: Arc, connection: &Connection) { let tx_unlocked = self.txs.get(&tx_id).unwrap(); let tx = tx_unlocked.value(); *connection.mv_tx.write() = None; @@ -1615,8 +1610,6 @@ impl MvStore { // FIXME: verify that we can already remove the transaction here! // Maybe it's fine for snapshot isolation, but too early for serializable? self.remove_tx(tx_id); - - Ok(()) } /// Returns true if the given transaction is the exclusive transaction. diff --git a/core/mvcc/database/tests.rs b/core/mvcc/database/tests.rs index 35a45e728..e559bda52 100644 --- a/core/mvcc/database/tests.rs +++ b/core/mvcc/database/tests.rs @@ -347,8 +347,7 @@ fn test_rollback() { .unwrap(); assert_eq!(row3, row4); db.mvcc_store - .rollback_tx(tx1, db.conn.pager.read().clone(), &db.conn) - .unwrap(); + .rollback_tx(tx1, db.conn.pager.read().clone(), &db.conn); let tx2 = db .mvcc_store .begin_tx(db.conn.pager.read().clone()) @@ -592,8 +591,7 @@ fn test_lost_update() { )); // hack: in the actual tursodb database we rollback the mvcc tx ourselves, so manually roll it back here db.mvcc_store - .rollback_tx(tx3, conn3.pager.read().clone(), &conn3) - .unwrap(); + .rollback_tx(tx3, conn3.pager.read().clone(), &conn3); commit_tx(db.mvcc_store.clone(), &conn2, tx2).unwrap(); assert!(matches!( diff --git a/core/schema.rs b/core/schema.rs index 5188f962f..9208c5d1d 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -472,7 +472,7 @@ impl Schema { pager.io.block(|| cursor.next())?; } - pager.end_read_tx()?; + pager.end_read_tx(); self.populate_indices(from_sql_indexes, automatic_indices)?; diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 0dfaff8c1..9ab4c689e 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -8183,7 +8183,7 @@ mod tests { // force allocate page1 with a transaction pager.begin_read_tx().unwrap(); run_until_done(|| pager.begin_write_tx(), &pager).unwrap(); - run_until_done(|| pager.end_tx(false, &conn), &pager).unwrap(); + run_until_done(|| pager.commit_tx(&conn), &pager).unwrap(); let page2 = run_until_done(|| pager.allocate_page(), &pager).unwrap(); btree_init_page(&page2, PageType::TableLeaf, 0, pager.usable_space()); @@ -8495,7 +8495,7 @@ mod tests { pager.deref(), ) .unwrap(); - pager.io.block(|| pager.end_tx(false, &conn)).unwrap(); + pager.io.block(|| pager.commit_tx(&conn)).unwrap(); pager.begin_read_tx().unwrap(); // FIXME: add sorted vector instead, should be okay for small amounts of keys for now :P, too lazy to fix right now let _c = cursor.move_to_root().unwrap(); @@ -8524,7 +8524,7 @@ mod tests { println!("btree after:\n{btree_after}"); panic!("invalid btree"); } - pager.end_read_tx().unwrap(); + pager.end_read_tx(); } pager.begin_read_tx().unwrap(); tracing::info!( @@ -8546,7 +8546,7 @@ mod tests { "key {key} is not found, got {cursor_rowid}" ); } - pager.end_read_tx().unwrap(); + pager.end_read_tx(); } } @@ -8641,7 +8641,7 @@ mod tests { if let Some(c) = c { pager.io.wait_for_completion(c).unwrap(); } - pager.io.block(|| pager.end_tx(false, &conn)).unwrap(); + pager.io.block(|| pager.commit_tx(&conn)).unwrap(); } // Check that all keys can be found by seeking @@ -8702,7 +8702,7 @@ mod tests { } prev = Some(cur); } - pager.end_read_tx().unwrap(); + pager.end_read_tx(); } } @@ -8848,7 +8848,7 @@ mod tests { if let Some(c) = c { pager.io.wait_for_completion(c).unwrap(); } - pager.io.block(|| pager.end_tx(false, &conn)).unwrap(); + pager.io.block(|| pager.commit_tx(&conn)).unwrap(); } // Final validation @@ -8856,7 +8856,7 @@ mod tests { sorted_keys.sort(); validate_expected_keys(&pager, &mut cursor, &sorted_keys, seed); - pager.end_read_tx().unwrap(); + pager.end_read_tx(); } } @@ -8939,7 +8939,7 @@ mod tests { "key {key:?} is not found, seed: {seed}" ); } - pager.end_read_tx().unwrap(); + pager.end_read_tx(); } #[test] diff --git a/core/storage/pager.rs b/core/storage/pager.rs index f2bde3d04..32f9e834e 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1161,33 +1161,20 @@ impl Pager { } #[instrument(skip_all, level = Level::DEBUG)] - pub fn end_tx( - &self, - rollback: bool, - connection: &Connection, - ) -> Result> { + pub fn commit_tx(&self, connection: &Connection) -> Result> { if connection.is_nested_stmt.load(Ordering::SeqCst) { // Parent statement will handle the transaction rollback. return Ok(IOResult::Done(PagerCommitResult::Rollback)); } - tracing::trace!("end_tx(rollback={})", rollback); let Some(wal) = self.wal.as_ref() else { // TODO: Unsure what the semantics of "end_tx" is for in-memory databases, ephemeral tables and ephemeral indexes. return Ok(IOResult::Done(PagerCommitResult::Rollback)); }; - let (is_write, schema_did_change) = match connection.get_tx_state() { + let (_, schema_did_change) = match connection.get_tx_state() { TransactionState::Write { schema_did_change } => (true, schema_did_change), _ => (false, false), }; - tracing::trace!("end_tx(schema_did_change={})", schema_did_change); - if rollback { - if is_write { - wal.borrow().end_write_tx(); - } - wal.borrow().end_read_tx(); - self.rollback(schema_did_change, connection, is_write)?; - return Ok(IOResult::Done(PagerCommitResult::Rollback)); - } + tracing::trace!("commit_tx(schema_did_change={})", schema_did_change); let commit_status = return_if_io!(self.commit_dirty_pages( connection.is_wal_auto_checkpoint_disabled(), connection.get_sync_mode(), @@ -1204,12 +1191,33 @@ impl Pager { } #[instrument(skip_all, level = Level::DEBUG)] - pub fn end_read_tx(&self) -> Result<()> { + pub fn rollback_tx(&self, connection: &Connection) { + if connection.is_nested_stmt.load(Ordering::SeqCst) { + // Parent statement will handle the transaction rollback. + return; + } let Some(wal) = self.wal.as_ref() else { - return Ok(()); + // TODO: Unsure what the semantics of "end_tx" is for in-memory databases, ephemeral tables and ephemeral indexes. + return; + }; + let (is_write, schema_did_change) = match connection.get_tx_state() { + TransactionState::Write { schema_did_change } => (true, schema_did_change), + _ => (false, false), + }; + tracing::trace!("rollback_tx(schema_did_change={})", schema_did_change); + if is_write { + wal.borrow().end_write_tx(); + } + wal.borrow().end_read_tx(); + self.rollback(schema_did_change, connection, is_write); + } + + #[instrument(skip_all, level = Level::DEBUG)] + pub fn end_read_tx(&self) { + let Some(wal) = self.wal.as_ref() else { + return; }; wal.borrow().end_read_tx(); - Ok(()) } /// Reads a page from disk (either WAL or DB file) bypassing page-cache @@ -2393,12 +2401,7 @@ impl Pager { } #[instrument(skip_all, level = Level::DEBUG)] - pub fn rollback( - &self, - schema_did_change: bool, - connection: &Connection, - is_write: bool, - ) -> Result<(), LimboError> { + pub fn rollback(&self, schema_did_change: bool, connection: &Connection, is_write: bool) { tracing::debug!(schema_did_change); self.clear_page_cache(); if is_write { @@ -2415,11 +2418,9 @@ impl Pager { } if is_write { if let Some(wal) = self.wal.as_ref() { - wal.borrow_mut().rollback()?; + wal.borrow_mut().rollback(); } } - - Ok(()) } fn reset_internal_states(&self) { @@ -2764,7 +2765,7 @@ mod ptrmap_tests { use super::*; use crate::io::{MemoryIO, OpenFlags, IO}; use crate::storage::buffer_pool::BufferPool; - use crate::storage::database::{DatabaseFile, DatabaseStorage}; + use crate::storage::database::DatabaseFile; use crate::storage::page_cache::PageCache; use crate::storage::pager::Pager; use crate::storage::sqlite3_ondisk::PageSize; diff --git a/core/storage/wal.rs b/core/storage/wal.rs index c590219bd..1f55e1cc0 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -302,7 +302,7 @@ pub trait Wal: Debug { fn get_checkpoint_seq(&self) -> u32; fn get_max_frame(&self) -> u64; fn get_min_frame(&self) -> u64; - fn rollback(&mut self) -> Result<()>; + fn rollback(&mut self); /// Return unique set of pages changed **after** frame_watermark position and until current WAL session max_frame_no fn changed_pages_after(&self, frame_watermark: u64) -> Result>; @@ -1351,8 +1351,8 @@ impl Wal for WalFile { self.min_frame.load(Ordering::Acquire) } - #[instrument(err, skip_all, level = Level::DEBUG)] - fn rollback(&mut self) -> Result<()> { + #[instrument(skip_all, level = Level::DEBUG)] + fn rollback(&mut self) { let (max_frame, last_checksum) = { let shared = self.get_shared(); let max_frame = shared.max_frame.load(Ordering::Acquire); @@ -1369,7 +1369,6 @@ impl Wal for WalFile { self.last_checksum = last_checksum; self.max_frame.store(max_frame, Ordering::Release); self.reset_internal_states(); - Ok(()) } #[instrument(skip_all, level = Level::DEBUG)] @@ -2825,7 +2824,7 @@ pub mod test { } } drop(w); - conn2.pager.write().end_read_tx().unwrap(); + conn2.pager.write().end_read_tx(); conn1 .execute("create table test(id integer primary key, value text)") diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 87fbaec0a..ec5d6f3f9 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2372,7 +2372,7 @@ pub fn op_transaction_inner( // That is, if the transaction had not started, end the read transaction so that next time we // start a new one. if matches!(current_state, TransactionState::None) { - pager.end_read_tx()?; + pager.end_read_tx(); conn.set_tx_state(TransactionState::None); } assert_eq!(conn.get_tx_state(), current_state); @@ -2456,10 +2456,10 @@ pub fn op_auto_commit( // TODO(pere): add rollback I/O logic once we implement rollback journal if let Some(mv_store) = mv_store { if let Some(tx_id) = conn.get_mv_tx_id() { - mv_store.rollback_tx(tx_id, pager.clone(), &conn)?; + mv_store.rollback_tx(tx_id, pager.clone(), &conn); } } else { - return_if_io!(pager.end_tx(true, &conn)); + pager.rollback_tx(&conn); } conn.set_tx_state(TransactionState::None); conn.auto_commit.store(true, Ordering::SeqCst); diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index fa1a88df8..ad3c7ad3b 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -30,7 +30,7 @@ use crate::{ function::{AggFunc, FuncCtx}, mvcc::{database::CommitStateMachine, LocalClock}, state_machine::StateMachine, - storage::sqlite3_ondisk::SmallVec, + storage::{pager::PagerCommitResult, sqlite3_ondisk::SmallVec}, translate::{collate::CollationSeq, plan::TableReferences}, types::{IOCompletions, IOResult, RawSlice, TextRef}, vdbe::{ @@ -41,7 +41,7 @@ use crate::{ }, metrics::StatementMetrics, }, - IOExt, RefValue, + RefValue, }; use crate::{ @@ -533,7 +533,7 @@ impl Program { // Connection is closed for whatever reason, rollback the transaction. let state = self.connection.get_tx_state(); if let TransactionState::Write { .. } = state { - pager.io.block(|| pager.end_tx(true, &self.connection))?; + pager.rollback_tx(&self.connection); } return Err(LimboError::InternalError("Connection closed".to_string())); } @@ -588,7 +588,7 @@ impl Program { // Connection is closed for whatever reason, rollback the transaction. let state = self.connection.get_tx_state(); if let TransactionState::Write { .. } = state { - pager.io.block(|| pager.end_tx(true, &self.connection))?; + pager.rollback_tx(&self.connection); } return Err(LimboError::InternalError("Connection closed".to_string())); } @@ -636,7 +636,7 @@ impl Program { // Connection is closed for whatever reason, rollback the transaction. let state = self.connection.get_tx_state(); if let TransactionState::Write { .. } = state { - pager.io.block(|| pager.end_tx(true, &self.connection))?; + pager.rollback_tx(&self.connection); } return Err(LimboError::InternalError("Connection closed".to_string())); } @@ -888,7 +888,7 @@ impl Program { ), TransactionState::Read => { connection.set_tx_state(TransactionState::None); - pager.end_read_tx()?; + pager.end_read_tx(); Ok(IOResult::Done(())) } TransactionState::None => Ok(IOResult::Done(())), @@ -914,7 +914,12 @@ impl Program { connection: &Connection, rollback: bool, ) -> Result> { - let cacheflush_status = pager.end_tx(rollback, connection)?; + let cacheflush_status = if !rollback { + pager.commit_tx(connection)? + } else { + pager.rollback_tx(connection); + IOResult::Done(PagerCommitResult::Rollback) + }; match cacheflush_status { IOResult::Done(_) => { if self.change_cnt_on { From 48ca3864b86da611afac297f1c71e489d00bdb5d Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Mon, 6 Oct 2025 13:22:26 +0400 Subject: [PATCH 007/428] properly abort statement in case of reset (when statement wasn't executed till completion) and interrupt --- core/lib.rs | 2 + core/vdbe/builder.rs | 2 + core/vdbe/mod.rs | 101 +++++++++++++++++++++++++------------------ 3 files changed, 63 insertions(+), 42 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index f45715dc2..afbcfa09b 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -2722,6 +2722,8 @@ impl Statement { pub fn _reset(&mut self, max_registers: Option, max_cursors: Option) { self.state.reset(max_registers, max_cursors); + self.program + .abort(self.mv_store.as_ref(), &self.pager, None); self.busy = false; self.busy_timeout = None; } diff --git a/core/vdbe/builder.rs b/core/vdbe/builder.rs index 3d1a333ec..c2b6741aa 100644 --- a/core/vdbe/builder.rs +++ b/core/vdbe/builder.rs @@ -16,6 +16,7 @@ use crate::{ expr::ParamState, plan::{ResultSetColumn, TableReferences}, }, + vdbe::PROGRAM_STATE_ACTIVE, CaptureDataChangesMode, Connection, Value, VirtualTable, }; @@ -1019,6 +1020,7 @@ impl ProgramBuilder { table_references: self.table_references, sql: sql.to_string(), accesses_db: !matches!(self.txn_mode, TransactionMode::None), + program_state: PROGRAM_STATE_ACTIVE.into(), } } } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index ad3c7ad3b..ed7fa4a12 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -66,7 +66,7 @@ use std::{ collections::HashMap, num::NonZero, sync::{ - atomic::{AtomicI64, Ordering}, + atomic::{AtomicI64, AtomicU32, Ordering}, Arc, }, }; @@ -483,6 +483,10 @@ macro_rules! get_cursor { }; } +const PROGRAM_STATE_ACTIVE: u32 = 0; +const PROGRAM_STATE_ABORTED: u32 = 1; +const PROGRAM_STATE_DONE: u32 = 2; + pub struct Program { pub max_registers: usize, // we store original indices because we don't want to create new vec from @@ -501,6 +505,9 @@ pub struct Program { /// Used to determine whether we need to check for schema changes when /// starting a transaction. pub accesses_db: bool, + /// Current state of the program + /// Used to execute abort only once + pub program_state: AtomicU32, } impl Program { @@ -641,6 +648,7 @@ impl Program { return Err(LimboError::InternalError("Connection closed".to_string())); } if state.is_interrupted() { + self.abort(mv_store, &pager, None); return Ok(StepResult::Interrupt); } if let Some(io) = &state.io_completions { @@ -649,7 +657,7 @@ impl Program { } if let Some(err) = io.get_error() { let err = err.into(); - handle_program_error(&pager, &self.connection, &err, mv_store)?; + self.abort(mv_store, &pager, Some(&err)); return Err(err); } state.io_completions = None; @@ -672,6 +680,8 @@ impl Program { Ok(InsnFunctionStepResult::Done) => { // Instruction completed execution state.metrics.insn_executed = state.metrics.insn_executed.saturating_add(1); + self.program_state + .store(PROGRAM_STATE_DONE, Ordering::SeqCst); return Ok(StepResult::Done); } Ok(InsnFunctionStepResult::IO(io)) => { @@ -693,7 +703,7 @@ impl Program { return Ok(StepResult::Busy); } Err(err) => { - handle_program_error(&pager, &self.connection, &err, mv_store)?; + self.abort(mv_store, &pager, Some(&err)); return Err(err); } } @@ -946,6 +956,52 @@ impl Program { ) -> Result> { commit_state.step(mv_store) } + + /// Aborts the program due to various conditions (explicit error, interrupt or reset of unfinished statement) by rolling back the transaction + /// This method is no-op if program was already finished (either aborted or executed to completion) + pub fn abort( + &self, + mv_store: Option<&Arc>, + pager: &Arc, + err: Option<&LimboError>, + ) { + let Ok(..) = self.program_state.compare_exchange( + PROGRAM_STATE_ACTIVE, + PROGRAM_STATE_ABORTED, + Ordering::SeqCst, + Ordering::SeqCst, + ) else { + // no need to abort: program was either already aborted or executed to completion successfully + return; + }; + + if self.connection.is_nested_stmt.load(Ordering::SeqCst) { + // Errors from nested statements are handled by the parent statement. + return; + } + if self.connection.get_tx_state() == TransactionState::None { + return; + } + match err { + // Transaction errors, e.g. trying to start a nested transaction, do not cause a rollback. + Some(LimboError::TxError(_)) => {} + // Table locked errors, e.g. trying to checkpoint in an interactive transaction, do not cause a rollback. + Some(LimboError::TableLocked) => {} + // Busy errors do not cause a rollback. + Some(LimboError::Busy) => {} + _ => { + if let Some(mv_store) = mv_store { + if let Some(tx_id) = self.connection.get_mv_tx_id() { + self.connection.auto_commit.store(true, Ordering::SeqCst); + mv_store.rollback_tx(tx_id, pager.clone(), &self.connection); + } + } else { + pager.rollback_tx(&self.connection); + } + self.connection.set_tx_state(TransactionState::None); + } + } + } } fn make_record(registers: &[Register], start_reg: &usize, count: &usize) -> ImmutableRecord { @@ -1068,42 +1124,3 @@ impl Row { self.count } } - -/// Handle a program error by rolling back the transaction -pub fn handle_program_error( - pager: &Arc, - connection: &Connection, - err: &LimboError, - mv_store: Option<&Arc>, -) -> Result<()> { - if connection.is_nested_stmt.load(Ordering::SeqCst) { - // Errors from nested statements are handled by the parent statement. - return Ok(()); - } - match err { - // Transaction errors, e.g. trying to start a nested transaction, do not cause a rollback. - LimboError::TxError(_) => {} - // Table locked errors, e.g. trying to checkpoint in an interactive transaction, do not cause a rollback. - LimboError::TableLocked => {} - // Busy errors do not cause a rollback. - LimboError::Busy => {} - _ => { - if let Some(mv_store) = mv_store { - if let Some(tx_id) = connection.get_mv_tx_id() { - connection.set_tx_state(TransactionState::None); - connection.auto_commit.store(true, Ordering::SeqCst); - mv_store.rollback_tx(tx_id, pager.clone(), connection)?; - } - } else { - pager - .io - .block(|| pager.end_tx(true, connection)) - .inspect_err(|e| { - tracing::error!("end_tx failed: {e}"); - })?; - } - connection.set_tx_state(TransactionState::None); - } - } - Ok(()) -} From a3ca5f6bf26c22f9adc94e1bfe5382dce03a6fa7 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Mon, 6 Oct 2025 13:27:42 +0400 Subject: [PATCH 008/428] implement Drop for Statement --- core/lib.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/lib.rs b/core/lib.rs index afbcfa09b..d4818f3d7 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -2741,6 +2741,12 @@ impl Statement { } } +impl Drop for Statement { + fn drop(&mut self) { + self.reset(); + } +} + pub type Row = vdbe::Row; pub type StepResult = vdbe::StepResult; From afe9a1949641934bb1bcf8898c195ecc9fac1fd4 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Mon, 6 Oct 2025 13:30:05 +0400 Subject: [PATCH 009/428] add simple integration test --- .../query_processing/test_read_path.rs | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/tests/integration/query_processing/test_read_path.rs b/tests/integration/query_processing/test_read_path.rs index a0b72cd60..98874283b 100644 --- a/tests/integration/query_processing/test_read_path.rs +++ b/tests/integration/query_processing/test_read_path.rs @@ -1,4 +1,4 @@ -use crate::common::TempDatabase; +use crate::common::{limbo_exec_rows, TempDatabase}; use turso_core::{StepResult, Value}; #[test] @@ -876,3 +876,24 @@ fn test_upsert_parameters_order() -> anyhow::Result<()> { ); Ok(()) } + +#[test] +fn test_multiple_connections_visibility() -> anyhow::Result<()> { + let tmp_db = TempDatabase::new_with_rusqlite( + "CREATE TABLE test (k INTEGER PRIMARY KEY, v INTEGER);", + false, + ); + let conn1 = tmp_db.connect_limbo(); + let conn2 = tmp_db.connect_limbo(); + conn1.execute("BEGIN")?; + conn1.execute("INSERT INTO test VALUES (1, 2), (3, 4)")?; + let mut stmt = conn2.prepare("SELECT COUNT(*) FROM test").unwrap(); + let _ = stmt.step().unwrap(); + // intentionally drop not-fully-consumed statement in order to check that on Drop statement will execute reset with proper cleanup + drop(stmt); + conn1.execute("COMMIT")?; + + let rows = limbo_exec_rows(&tmp_db, &conn2, "SELECT COUNT(*) FROM test"); + assert_eq!(rows, vec![vec![rusqlite::types::Value::Integer(2)]]); + Ok(()) +} From 4877180784acfad61dde30bd0325406f16497765 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Mon, 6 Oct 2025 13:34:16 +0400 Subject: [PATCH 010/428] fix clippy --- core/lib.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index d4818f3d7..75b85849d 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -497,10 +497,7 @@ impl Database { schema.schema_version = header_schema_cookie; let result = schema .make_from_btree(None, pager.clone(), &syms) - .or_else(|e| { - pager.end_read_tx(); - Err(e) - }); + .inspect_err(|_| pager.end_read_tx()); if let Err(LimboError::ExtensionError(e)) = result { // this means that a vtab exists and we no longer have the module loaded. we print // a warning to the user to load the module From 0ace1f9d903edfbb4f08199c7d81bb6b92d414fc Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Mon, 6 Oct 2025 15:10:37 +0400 Subject: [PATCH 011/428] fix code in order to not reset internal prepared statements created during DDL execution --- core/lib.rs | 14 +++++++++++--- core/vdbe/execute.rs | 10 ++++++++-- core/vdbe/mod.rs | 6 +++--- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 75b85849d..f331a8952 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -47,6 +47,7 @@ use crate::types::{WalFrameInfo, WalState}; #[cfg(feature = "fs")] use crate::util::{OpenMode, OpenOptions}; use crate::vdbe::metrics::ConnectionMetrics; +use crate::vdbe::PROGRAM_STATE_DONE; use crate::vtab::VirtualTable; use crate::{incremental::view::AllViewsTxState, translate::emitter::TransactionMode}; use core::str; @@ -2572,6 +2573,7 @@ impl Statement { fn reprepare(&mut self) -> Result<()> { tracing::trace!("repreparing statement"); let conn = self.program.connection.clone(); + *conn.schema.write() = conn.db.clone_schema(); self.program = { let mut parser = Parser::new(self.program.sql.as_bytes()); @@ -2600,7 +2602,7 @@ impl Statement { QueryMode::Explain => (EXPLAIN_COLUMNS.len(), 0), QueryMode::ExplainQueryPlan => (EXPLAIN_QUERY_PLAN_COLUMNS.len(), 0), }; - self._reset(Some(max_registers), Some(cursor_count)); + self.reset_internal(Some(max_registers), Some(cursor_count)); // Load the parameters back into the state self.state.parameters = parameters; Ok(()) @@ -2714,10 +2716,16 @@ impl Statement { } pub fn reset(&mut self) { - self._reset(None, None); + self.reset_internal(None, None); } - pub fn _reset(&mut self, max_registers: Option, max_cursors: Option) { + pub(crate) fn mark_as_done(&self) { + self.program + .program_state + .store(PROGRAM_STATE_DONE, Ordering::SeqCst); + } + + fn reset_internal(&mut self, max_registers: Option, max_cursors: Option) { self.state.reset(max_registers, max_cursors); self.program .abort(self.mv_store.as_ref(), &self.pager, None); diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index ec5d6f3f9..d3ef412f1 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -8018,12 +8018,15 @@ pub fn op_drop_column( let schema = conn.schema.read(); for (view_name, view) in schema.views.iter() { let view_select_sql = format!("SELECT * FROM {view_name}"); - conn.prepare(view_select_sql.as_str()).map_err(|e| { + let stmt = conn.prepare(view_select_sql.as_str()).map_err(|e| { LimboError::ParseError(format!( "cannot drop column \"{}\": referenced in VIEW {view_name}: {}", column_name, view.sql, )) })?; + // this is internal statement running during active Program execution + // so, we must not interact with transaction state and explicitly mark this statement as done avoiding cleanup on reset + stmt.mark_as_done(); } } @@ -8149,12 +8152,15 @@ pub fn op_alter_column( for (view_name, view) in schema.views.iter() { let view_select_sql = format!("SELECT * FROM {view_name}"); // FIXME: this should rewrite the view to reference the new column name - conn.prepare(view_select_sql.as_str()).map_err(|e| { + let stmt = conn.prepare(view_select_sql.as_str()).map_err(|e| { LimboError::ParseError(format!( "cannot rename column \"{}\": referenced in VIEW {view_name}: {}", old_column_name, view.sql, )) })?; + // this is internal statement running during active Program execution + // so, we must not interact with transaction state and explicitly mark this statement as done avoiding cleanup on reset + stmt.mark_as_done(); } } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index ed7fa4a12..ed09762e1 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -483,9 +483,9 @@ macro_rules! get_cursor { }; } -const PROGRAM_STATE_ACTIVE: u32 = 0; -const PROGRAM_STATE_ABORTED: u32 = 1; -const PROGRAM_STATE_DONE: u32 = 2; +pub(crate) const PROGRAM_STATE_ACTIVE: u32 = 1; +pub(crate) const PROGRAM_STATE_ABORTED: u32 = 2; +pub(crate) const PROGRAM_STATE_DONE: u32 = 3; pub struct Program { pub max_registers: usize, From e2f73106177aa7f52fd7bea84f562edb37daa8e0 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Mon, 6 Oct 2025 17:51:43 +0400 Subject: [PATCH 012/428] add explicit tracker for Txn cleanup necessary for statement --- core/incremental/expr_compiler.rs | 5 -- core/lib.rs | 28 +++--- core/mvcc/database/mod.rs | 4 +- core/vdbe/builder.rs | 2 - core/vdbe/execute.rs | 19 ++-- core/vdbe/mod.rs | 88 ++++++++----------- .../query_processing/test_transactions.rs | 1 + 7 files changed, 62 insertions(+), 85 deletions(-) diff --git a/core/incremental/expr_compiler.rs b/core/incremental/expr_compiler.rs index 44b2cef49..f823c870d 100644 --- a/core/incremental/expr_compiler.rs +++ b/core/incremental/expr_compiler.rs @@ -458,11 +458,6 @@ impl CompiledExpression { "Expression evaluation produced unexpected row".to_string(), )); } - crate::vdbe::execute::InsnFunctionStepResult::Interrupt => { - return Err(crate::LimboError::InternalError( - "Expression evaluation was interrupted".to_string(), - )); - } crate::vdbe::execute::InsnFunctionStepResult::Step => { pc = state.pc as usize; } diff --git a/core/lib.rs b/core/lib.rs index f331a8952..44297643b 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -47,7 +47,6 @@ use crate::types::{WalFrameInfo, WalState}; #[cfg(feature = "fs")] use crate::util::{OpenMode, OpenOptions}; use crate::vdbe::metrics::ConnectionMetrics; -use crate::vdbe::PROGRAM_STATE_DONE; use crate::vtab::VirtualTable; use crate::{incremental::view::AllViewsTxState, translate::emitter::TransactionMode}; use core::str; @@ -2424,6 +2423,12 @@ pub struct Statement { busy_timeout: Option, } +impl Drop for Statement { + fn drop(&mut self) { + self.reset(); + } +} + impl Statement { pub fn new( program: vdbe::Program, @@ -2719,16 +2724,15 @@ impl Statement { self.reset_internal(None, None); } - pub(crate) fn mark_as_done(&self) { - self.program - .program_state - .store(PROGRAM_STATE_DONE, Ordering::SeqCst); - } - fn reset_internal(&mut self, max_registers: Option, max_cursors: Option) { + // as abort uses auto_txn_cleanup value - it needs to be called before state.reset + self.program.abort( + self.mv_store.as_ref(), + &self.pager, + None, + &mut self.state.auto_txn_cleanup, + ); self.state.reset(max_registers, max_cursors); - self.program - .abort(self.mv_store.as_ref(), &self.pager, None); self.busy = false; self.busy_timeout = None; } @@ -2746,12 +2750,6 @@ impl Statement { } } -impl Drop for Statement { - fn drop(&mut self) { - self.reset(); - } -} - pub type Row = vdbe::Row; pub type StepResult = vdbe::StepResult; diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index a03fba7ba..d9fd32f4b 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -1342,7 +1342,7 @@ impl MvStore { &self, pager: Arc, maybe_existing_tx_id: Option, - ) -> Result> { + ) -> Result { if !self.blocking_checkpoint_lock.read() { // If there is a stop-the-world checkpoint in progress, we cannot begin any transaction at all. return Err(LimboError::Busy); @@ -1378,7 +1378,7 @@ impl MvStore { ); tracing::debug!("begin_exclusive_tx: tx_id={} succeeded", tx_id); self.txs.insert(tx_id, tx); - Ok(IOResult::Done(tx_id)) + Ok(tx_id) } /// Begins a new transaction in the database. diff --git a/core/vdbe/builder.rs b/core/vdbe/builder.rs index c2b6741aa..3d1a333ec 100644 --- a/core/vdbe/builder.rs +++ b/core/vdbe/builder.rs @@ -16,7 +16,6 @@ use crate::{ expr::ParamState, plan::{ResultSetColumn, TableReferences}, }, - vdbe::PROGRAM_STATE_ACTIVE, CaptureDataChangesMode, Connection, Value, VirtualTable, }; @@ -1020,7 +1019,6 @@ impl ProgramBuilder { table_references: self.table_references, sql: sql.to_string(), accesses_db: !matches!(self.txn_mode, TransactionMode::None), - program_state: PROGRAM_STATE_ACTIVE.into(), } } } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index d3ef412f1..01def8029 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -19,7 +19,7 @@ use crate::types::{ }; use crate::util::normalize_ident; use crate::vdbe::insn::InsertFlags; -use crate::vdbe::registers_to_ref_values; +use crate::vdbe::{registers_to_ref_values, TxnCleanup}; use crate::vector::{vector_concat, vector_slice}; use crate::{ error::{ @@ -157,7 +157,6 @@ pub enum InsnFunctionStepResult { Done, IO(IOCompletions), Row, - Interrupt, Step, } @@ -2328,7 +2327,7 @@ pub fn op_transaction_inner( | TransactionMode::Read | TransactionMode::Concurrent => mv_store.begin_tx(pager.clone())?, TransactionMode::Write => { - return_if_io!(mv_store.begin_exclusive_tx(pager.clone(), None)) + mv_store.begin_exclusive_tx(pager.clone(), None)? } }; *program.connection.mv_tx.write() = Some((tx_id, *tx_mode)); @@ -2343,7 +2342,7 @@ pub fn op_transaction_inner( if matches!(new_transaction_state, TransactionState::Write { .. }) && matches!(actual_tx_mode, TransactionMode::Write) { - return_if_io!(mv_store.begin_exclusive_tx(pager.clone(), Some(tx_id))); + mv_store.begin_exclusive_tx(pager.clone(), Some(tx_id))?; } } } else { @@ -2359,6 +2358,7 @@ pub fn op_transaction_inner( "nested stmt should not begin a new read transaction" ); pager.begin_read_tx()?; + state.auto_txn_cleanup = TxnCleanup::RollbackTxn; } if updated && matches!(new_transaction_state, TransactionState::Write { .. }) { @@ -2374,6 +2374,7 @@ pub fn op_transaction_inner( if matches!(current_state, TransactionState::None) { pager.end_read_tx(); conn.set_tx_state(TransactionState::None); + state.auto_txn_cleanup = TxnCleanup::None; } assert_eq!(conn.get_tx_state(), current_state); return Err(LimboError::Busy); @@ -8018,15 +8019,12 @@ pub fn op_drop_column( let schema = conn.schema.read(); for (view_name, view) in schema.views.iter() { let view_select_sql = format!("SELECT * FROM {view_name}"); - let stmt = conn.prepare(view_select_sql.as_str()).map_err(|e| { + let _ = conn.prepare(view_select_sql.as_str()).map_err(|e| { LimboError::ParseError(format!( "cannot drop column \"{}\": referenced in VIEW {view_name}: {}", column_name, view.sql, )) })?; - // this is internal statement running during active Program execution - // so, we must not interact with transaction state and explicitly mark this statement as done avoiding cleanup on reset - stmt.mark_as_done(); } } @@ -8152,15 +8150,12 @@ pub fn op_alter_column( for (view_name, view) in schema.views.iter() { let view_select_sql = format!("SELECT * FROM {view_name}"); // FIXME: this should rewrite the view to reference the new column name - let stmt = conn.prepare(view_select_sql.as_str()).map_err(|e| { + let _ = conn.prepare(view_select_sql.as_str()).map_err(|e| { LimboError::ParseError(format!( "cannot rename column \"{}\": referenced in VIEW {view_name}: {}", old_column_name, view.sql, )) })?; - // this is internal statement running during active Program execution - // so, we must not interact with transaction state and explicitly mark this statement as done avoiding cleanup on reset - stmt.mark_as_done(); } } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index ed09762e1..4b11c8c1d 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -66,7 +66,7 @@ use std::{ collections::HashMap, num::NonZero, sync::{ - atomic::{AtomicI64, AtomicU32, Ordering}, + atomic::{AtomicI64, Ordering}, Arc, }, }; @@ -265,6 +265,12 @@ pub struct Row { count: usize, } +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum TxnCleanup { + None, + RollbackTxn, +} + /// The program state describes the environment in which the program executes. pub struct ProgramState { pub io_completions: Option, @@ -302,6 +308,10 @@ pub struct ProgramState { op_checkpoint_state: OpCheckpointState, /// State machine for committing view deltas with I/O handling view_delta_state: ViewDeltaCommitState, + /// Marker which tells about auto transaction cleanup necessary for that connection in case of reset + /// This is used when statement in auto-commit mode reseted after previous uncomplete execution - in which case we may need to rollback transaction started on previous attempt + /// Note, that MVCC transactions are always explicit - so they do not update auto_txn_cleanup marker + pub(crate) auto_txn_cleanup: TxnCleanup, } impl ProgramState { @@ -346,6 +356,7 @@ impl ProgramState { op_transaction_state: OpTransactionState::Start, op_checkpoint_state: OpCheckpointState::StartCheckpoint, view_delta_state: ViewDeltaCommitState::NotStarted, + auto_txn_cleanup: TxnCleanup::None, } } @@ -428,6 +439,7 @@ impl ProgramState { self.op_column_state = OpColumnState::Start; self.op_row_id_state = OpRowIdState::Start; self.view_delta_state = ViewDeltaCommitState::NotStarted; + self.auto_txn_cleanup = TxnCleanup::None; } pub fn get_cursor(&mut self, cursor_id: CursorID) -> &mut Cursor { @@ -483,10 +495,6 @@ macro_rules! get_cursor { }; } -pub(crate) const PROGRAM_STATE_ACTIVE: u32 = 1; -pub(crate) const PROGRAM_STATE_ABORTED: u32 = 2; -pub(crate) const PROGRAM_STATE_DONE: u32 = 3; - pub struct Program { pub max_registers: usize, // we store original indices because we don't want to create new vec from @@ -505,9 +513,6 @@ pub struct Program { /// Used to determine whether we need to check for schema changes when /// starting a transaction. pub accesses_db: bool, - /// Current state of the program - /// Used to execute abort only once - pub program_state: AtomicU32, } impl Program { @@ -648,7 +653,7 @@ impl Program { return Err(LimboError::InternalError("Connection closed".to_string())); } if state.is_interrupted() { - self.abort(mv_store, &pager, None); + self.abort(mv_store, &pager, None, &mut state.auto_txn_cleanup); return Ok(StepResult::Interrupt); } if let Some(io) = &state.io_completions { @@ -657,7 +662,7 @@ impl Program { } if let Some(err) = io.get_error() { let err = err.into(); - self.abort(mv_store, &pager, Some(&err)); + self.abort(mv_store, &pager, Some(&err), &mut state.auto_txn_cleanup); return Err(err); } state.io_completions = None; @@ -680,8 +685,7 @@ impl Program { Ok(InsnFunctionStepResult::Done) => { // Instruction completed execution state.metrics.insn_executed = state.metrics.insn_executed.saturating_add(1); - self.program_state - .store(PROGRAM_STATE_DONE, Ordering::SeqCst); + state.auto_txn_cleanup = TxnCleanup::None; return Ok(StepResult::Done); } Ok(InsnFunctionStepResult::IO(io)) => { @@ -694,16 +698,12 @@ impl Program { state.metrics.insn_executed = state.metrics.insn_executed.saturating_add(1); return Ok(StepResult::Row); } - Ok(InsnFunctionStepResult::Interrupt) => { - // Instruction interrupted - may resume at same PC - return Ok(StepResult::Interrupt); - } Err(LimboError::Busy) => { // Instruction blocked - will retry at same PC return Ok(StepResult::Busy); } Err(err) => { - self.abort(mv_store, &pager, Some(&err)); + self.abort(mv_store, &pager, Some(&err), &mut state.auto_txn_cleanup); return Err(err); } } @@ -964,43 +964,33 @@ impl Program { mv_store: Option<&Arc>, pager: &Arc, err: Option<&LimboError>, + cleanup: &mut TxnCleanup, ) { - let Ok(..) = self.program_state.compare_exchange( - PROGRAM_STATE_ACTIVE, - PROGRAM_STATE_ABORTED, - Ordering::SeqCst, - Ordering::SeqCst, - ) else { - // no need to abort: program was either already aborted or executed to completion successfully - return; - }; - - if self.connection.is_nested_stmt.load(Ordering::SeqCst) { - // Errors from nested statements are handled by the parent statement. - return; - } - if self.connection.get_tx_state() == TransactionState::None { - return; - } - match err { - // Transaction errors, e.g. trying to start a nested transaction, do not cause a rollback. - Some(LimboError::TxError(_)) => {} - // Table locked errors, e.g. trying to checkpoint in an interactive transaction, do not cause a rollback. - Some(LimboError::TableLocked) => {} - // Busy errors do not cause a rollback. - Some(LimboError::Busy) => {} - _ => { - if let Some(mv_store) = mv_store { - if let Some(tx_id) = self.connection.get_mv_tx_id() { - self.connection.auto_commit.store(true, Ordering::SeqCst); - mv_store.rollback_tx(tx_id, pager.clone(), &self.connection); + // Errors from nested statements are handled by the parent statement. + if !self.connection.is_nested_stmt.load(Ordering::SeqCst) { + match err { + // Transaction errors, e.g. trying to start a nested transaction, do not cause a rollback. + Some(LimboError::TxError(_)) => {} + // Table locked errors, e.g. trying to checkpoint in an interactive transaction, do not cause a rollback. + Some(LimboError::TableLocked) => {} + // Busy errors do not cause a rollback. + Some(LimboError::Busy) => {} + _ => { + if *cleanup != TxnCleanup::None || err.is_some() { + if let Some(mv_store) = mv_store { + if let Some(tx_id) = self.connection.get_mv_tx_id() { + self.connection.auto_commit.store(true, Ordering::SeqCst); + mv_store.rollback_tx(tx_id, pager.clone(), &self.connection); + } + } else { + pager.rollback_tx(&self.connection); + } + self.connection.set_tx_state(TransactionState::None); } - } else { - pager.rollback_tx(&self.connection); } - self.connection.set_tx_state(TransactionState::None); } } + *cleanup = TxnCleanup::None; } } diff --git a/tests/integration/query_processing/test_transactions.rs b/tests/integration/query_processing/test_transactions.rs index 53ab7f00e..a96235153 100644 --- a/tests/integration/query_processing/test_transactions.rs +++ b/tests/integration/query_processing/test_transactions.rs @@ -95,6 +95,7 @@ fn test_deferred_transaction_no_restart() { .execute("INSERT INTO test (id, value) VALUES (2, 'second')") .unwrap(); conn2.execute("COMMIT").unwrap(); + drop(stmt); let mut stmt = conn1.query("SELECT COUNT(*) FROM test").unwrap().unwrap(); if let StepResult::Row = stmt.step().unwrap() { From 26a3e069fbf7854e09f4848a837b093ee48774c3 Mon Sep 17 00:00:00 2001 From: Duy Dang <55247256+ddwalias@users.noreply.github.com> Date: Tue, 7 Oct 2025 01:11:46 +0700 Subject: [PATCH 013/428] Fix missing row versions to checkpoint --- .../mvcc/database/checkpoint_state_machine.rs | 117 +++++++----------- 1 file changed, 46 insertions(+), 71 deletions(-) diff --git a/core/mvcc/database/checkpoint_state_machine.rs b/core/mvcc/database/checkpoint_state_machine.rs index bfe8f51d2..85f09b2a7 100644 --- a/core/mvcc/database/checkpoint_state_machine.rs +++ b/core/mvcc/database/checkpoint_state_machine.rs @@ -164,92 +164,67 @@ impl CheckpointStateMachine { // 2. A checkpointed table that was destroyed in the logical log. We need to destroy the btree in the pager/btree layer. continue; } + let row_versions = entry.value().read(); + + let mut version_to_checkpoint = None; let mut exists_in_db_file = false; - for (i, version) in row_versions.iter().enumerate() { - let is_last = i == row_versions.len() - 1; - if let Some(TxTimestampOrID::Timestamp(ts)) = &version.begin { - if *ts <= self.checkpointed_txid_max_old { + for version in row_versions.iter() { + if let Some(TxTimestampOrID::Timestamp(ts)) = version.begin { + //TODO: garbage collect row versions after checkpointing. + if ts > self.checkpointed_txid_max_old { + version_to_checkpoint = Some(version); + } else { exists_in_db_file = true; } + } + } - let current_version_ts = - if let Some(TxTimestampOrID::Timestamp(ts_end)) = version.end { - ts_end.max(*ts) - } else { - *ts - }; - if current_version_ts <= self.checkpointed_txid_max_old { - // already checkpointed. TODO: garbage collect row versions after checkpointing. - continue; - } + if let Some(version) = version_to_checkpoint { + let is_delete = version.end.is_some(); + if let Some(TxTimestampOrID::Timestamp(ts)) = version.begin { + max_timestamp = max_timestamp.max(ts); + } - // Row versions in sqlite_schema are temporarily assigned a negative root page that is equal to the table id, - // because the root page is not known until it's actually allocated during the checkpoint. - // However, existing tables have a real root page. - let get_table_id_or_root_page_from_sqlite_schema = |row_data: &Vec| { - let row_data = ImmutableRecord::from_bin_record(row_data.clone()); + // Only write the row to the B-tree if it is not a delete, or if it is a delete and it exists in + // the database file. + if !is_delete || exists_in_db_file { + let mut special_write = None; + + if version.row.id.table_id == SQLITE_SCHEMA_MVCC_TABLE_ID { + let row_data = ImmutableRecord::from_bin_record(version.row.data.clone()); let mut record_cursor = RecordCursor::new(); record_cursor.parse_full_header(&row_data).unwrap(); - let RefValue::Integer(root_page) = + + if let RefValue::Integer(root_page) = record_cursor.get_value(&row_data, 3).unwrap() - else { - panic!( - "Expected integer value for root page, got {:?}", - record_cursor.get_value(&row_data, 3) - ); - }; - root_page - }; + { + if is_delete { + let table_id = self + .mvstore + .table_id_to_rootpage + .iter() + .find(|entry| { + entry.value().is_some_and(|r| r == root_page as u64) + }) + .map(|entry| *entry.key()) + .unwrap(); // This assumes a valid mapping exists. + self.destroyed_tables.insert(table_id); - max_timestamp = max_timestamp.max(current_version_ts); - if is_last { - let is_delete = version.end.is_some(); - let is_delete_of_table = - is_delete && version.row.id.table_id == SQLITE_SCHEMA_MVCC_TABLE_ID; - let is_create_of_table = !exists_in_db_file - && !is_delete - && version.row.id.table_id == SQLITE_SCHEMA_MVCC_TABLE_ID; - - // We might need to create or destroy a B-tree in the pager during checkpoint if a row in root page 1 is deleted or created. - let special_write = if is_delete_of_table { - let root_page = - get_table_id_or_root_page_from_sqlite_schema(&version.row.data); - assert!(root_page > 0, "rootpage is positive integer"); - let root_page = root_page as u64; - let table_id = *self - .mvstore - .table_id_to_rootpage - .iter() - .find(|entry| entry.value().is_some_and(|r| r == root_page)) - .unwrap() - .key(); - self.destroyed_tables.insert(table_id); - - if exists_in_db_file { - Some(SpecialWrite::BTreeDestroy { + // We might need to create or destroy a B-tree in the pager during checkpoint if a row in root page 1 is deleted or created. + special_write = Some(SpecialWrite::BTreeDestroy { table_id, - root_page, + root_page: root_page as u64, num_columns: version.row.column_count, - }) - } else { - None + }); + } else if !exists_in_db_file { + let table_id = MVTableId::from(root_page); + special_write = Some(SpecialWrite::BTreeCreate { table_id }); } - } else if is_create_of_table { - let table_id = - get_table_id_or_root_page_from_sqlite_schema(&version.row.data); - let table_id = MVTableId::from(table_id); - Some(SpecialWrite::BTreeCreate { table_id }) - } else { - None - }; - - // Only write the row to the B-tree if it is not a delete, or if it is a delete and it exists in the database file. - let should_be_deleted_from_db_file = is_delete && exists_in_db_file; - if !is_delete || should_be_deleted_from_db_file { - self.write_set.push((version.clone(), special_write)); } } + + self.write_set.push((version.clone(), special_write)); } } } From beb44e8e8cc1deeb655ae5f6b459b5b22e4e8ac9 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 6 Oct 2025 20:12:49 -0500 Subject: [PATCH 014/428] fix mviews with re-insertion of data with the same key There is currently a bug found in our materialized view implementation that happens when we delete a row, and then re-insert another row with the same primary key. Our insert code needs to detect updates and generate a DELETE + INSERT. But in this case, after the initial DELETE, the fresh insert generates another delete. We ended up with the wrong response for aggregations (and I am pretty sure even filter-only views would manifest the bug as well), where groups that should still be present just disappeared because of the extra delete. A new test case is added that fails without the fix. --- core/vdbe/execute.rs | 49 +++++++++++++++++++++++---------- testing/materialized_views.test | 29 +++++++++++++++++++ 2 files changed, 64 insertions(+), 14 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 87fbaec0a..7df18008e 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -5695,31 +5695,52 @@ pub fn op_insert( turso_assert!(!flag.has(InsertFlags::REQUIRE_SEEK), "to capture old record accurately, we must be located at the correct position in the table"); + // Get the key we're going to insert + let insert_key = match &state.registers[*key_reg].get_value() { + Value::Integer(i) => *i, + _ => { + // If key is not an integer, we can't check - assume no old record + state.op_insert_state.old_record = None; + state.op_insert_state.sub_state = if flag.has(InsertFlags::REQUIRE_SEEK) { + OpInsertSubState::Seek + } else { + OpInsertSubState::Insert + }; + continue; + } + }; + let old_record = { let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_btree_mut(); // Get the current key - for INSERT operations, there may not be a current row let maybe_key = return_if_io!(cursor.rowid()); if let Some(key) = maybe_key { - // Get the current record before deletion and extract values - let maybe_record = return_if_io!(cursor.record()); - if let Some(record) = maybe_record { - let mut values = record - .get_values() - .into_iter() - .map(|v| v.to_owned()) - .collect::>(); + // Only capture as old record if the cursor is at the position we're inserting to + if key == insert_key { + // Get the current record before deletion and extract values + let maybe_record = return_if_io!(cursor.record()); + if let Some(record) = maybe_record { + let mut values = record + .get_values() + .into_iter() + .map(|v| v.to_owned()) + .collect::>(); - // Fix rowid alias columns: replace Null with actual rowid value - if let Some(table) = schema.get_table(table_name) { - for (i, col) in table.columns().iter().enumerate() { - if col.is_rowid_alias && i < values.len() { - values[i] = Value::Integer(key); + // Fix rowid alias columns: replace Null with actual rowid value + if let Some(table) = schema.get_table(table_name) { + for (i, col) in table.columns().iter().enumerate() { + if col.is_rowid_alias && i < values.len() { + values[i] = Value::Integer(key); + } } } + Some((key, values)) + } else { + None } - Some((key, values)) } else { + // Cursor is at wrong position - this is a fresh INSERT, not a replacement None } } else { diff --git a/testing/materialized_views.test b/testing/materialized_views.test index dd2652d7d..a2a8eb5c4 100755 --- a/testing/materialized_views.test +++ b/testing/materialized_views.test @@ -3,6 +3,35 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +# Test that INSERT with reused primary keys maintains correct aggregate counts +# When a row is deleted and a new row is inserted with the same primary key +# but different group value, all groups should maintain correct counts +do_execsql_test_on_specific_db {:memory:} matview-insert-reused-key-maintains-all-groups { + CREATE TABLE t(id INTEGER PRIMARY KEY, val TEXT); + INSERT INTO t VALUES (1, 'A'), (2, 'B'); + + CREATE MATERIALIZED VIEW v AS + SELECT val, COUNT(*) as cnt + FROM t + GROUP BY val; + + -- Initial state: A=1, B=1 + SELECT * FROM v ORDER BY val; + + -- Delete id=1 (which has 'A') + DELETE FROM t WHERE id = 1; + SELECT * FROM v ORDER BY val; + + -- Insert id=1 with different value 'C' + -- This should NOT affect group 'B' + INSERT INTO t VALUES (1, 'C'); + SELECT * FROM v ORDER BY val; +} {A|1 +B|1 +B|1 +B|1 +C|1} + do_execsql_test_on_specific_db {:memory:} matview-basic-filter-population { CREATE TABLE products(id INTEGER, name TEXT, price INTEGER, category TEXT); INSERT INTO products VALUES From 17da71ee3c4b0992ac98f487d4b5c1866b2daff6 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Mon, 6 Oct 2025 21:33:20 -0400 Subject: [PATCH 015/428] Open db with proper IO when attaching database to fix #3540 --- core/lib.rs | 23 ++++++++++++++++------- core/vdbe/execute.rs | 2 +- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 11b85be81..fb31a557d 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -62,6 +62,7 @@ pub use io::{ }; use parking_lot::RwLock; use schema::Schema; +use std::cell::Cell; use std::{ borrow::Cow, cell::RefCell, @@ -217,7 +218,7 @@ pub struct Database { shared_wal: Arc>, db_state: Arc, init_lock: Arc>, - open_flags: OpenFlags, + open_flags: Cell, builtin_syms: RwLock, opts: DatabaseOpts, n_connections: AtomicUsize, @@ -231,7 +232,7 @@ impl fmt::Debug for Database { let mut debug_struct = f.debug_struct("Database"); debug_struct .field("path", &self.path) - .field("open_flags", &self.open_flags); + .field("open_flags", &self.open_flags.get()); // Database state information let db_state_value = match self.db_state.get() { @@ -468,7 +469,7 @@ impl Database { db_file, builtin_syms: syms.into(), io: io.clone(), - open_flags: flags, + open_flags: flags.into(), db_state: Arc::new(AtomicDbState::new(db_state)), init_lock: Arc::new(Mutex::new(())), opts, @@ -599,7 +600,7 @@ impl Database { } pub fn is_readonly(&self) -> bool { - self.open_flags.contains(OpenFlags::ReadOnly) + self.open_flags.get().contains(OpenFlags::ReadOnly) } /// If we do not have a physical WAL file, but we know the database file is initialized on disk, @@ -1529,7 +1530,6 @@ impl Connection { ) -> Result> { let mut opts = OpenOptions::parse(uri)?; // FIXME: for now, only support read only attach - opts.mode = OpenMode::ReadOnly; let flags = opts.get_flags()?; let io = opts.vfs.map(Database::io_for_vfs).unwrap_or(Ok(io))?; let db = Database::open_file_with_flags(io.clone(), &opts.path, flags, db_opts, None)?; @@ -2090,9 +2090,18 @@ impl Connection { .with_indexes(use_indexes) .with_views(use_views) .with_strict(use_strict); - let db = Self::from_uri_attached(path, db_opts, self.db.io.clone())?; + let is_memory = path.contains(":memory:"); + let io: Arc = if is_memory { + Arc::new(MemoryIO::new()) + } else { + Arc::new(PlatformIO::new()?) + }; + let db = Self::from_uri_attached(path, db_opts, io).expect("FAILURE"); let pager = Arc::new(db.init_pager(None)?); - + // set back to read-only now that we have + if is_memory { + db.open_flags.set(OpenFlags::ReadOnly); + } self.attached_databases.write().insert(alias, (db, pager)); Ok(()) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 87fbaec0a..a911cd527 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2258,7 +2258,7 @@ pub fn op_transaction_inner( OpTransactionState::Start => { let conn = program.connection.clone(); let write = matches!(tx_mode, TransactionMode::Write); - if write && conn.db.open_flags.contains(OpenFlags::ReadOnly) { + if write && conn.db.open_flags.get().contains(OpenFlags::ReadOnly) { return Err(LimboError::ReadOnly); } From addb9ef65bda2952915336099d29e64b7ba3c0e4 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Mon, 6 Oct 2025 21:33:42 -0400 Subject: [PATCH 016/428] Add regression test for #3540 attach issue --- testing/attach.test | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/testing/attach.test b/testing/attach.test index 0d799a927..58d71eea1 100755 --- a/testing/attach.test +++ b/testing/attach.test @@ -73,3 +73,11 @@ do_execsql_test_error query-after-detach { DETACH DATABASE small; select * from small.sqlite_schema; } {(.*no such.*)} + +# regression test for https://github.com/tursodatabase/turso/issues/3540 +do_execsql_test_on_specifc_db {:memory:} attach-from-memory-db { + CREATE TABLE t(a); + INSERT INTO t SELECT value from generate_series(1,10); + ATTACH DATABASE 'testing/testing.db' as a; + SELECT * from a.products, t LIMIT 1; +} {1|hat|79.0|1} From 20d2ca55fe5a623c048332e8e9d5e54c0b91a8fa Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Mon, 6 Oct 2025 21:43:48 -0400 Subject: [PATCH 017/428] fix clippy warning --- core/lib.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index fb31a557d..d5f9281b3 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -1528,8 +1528,7 @@ impl Connection { db_opts: DatabaseOpts, io: Arc, ) -> Result> { - let mut opts = OpenOptions::parse(uri)?; - // FIXME: for now, only support read only attach + let opts = OpenOptions::parse(uri)?; let flags = opts.get_flags()?; let io = opts.vfs.map(Database::io_for_vfs).unwrap_or(Ok(io))?; let db = Database::open_file_with_flags(io.clone(), &opts.path, flags, db_opts, None)?; @@ -2090,18 +2089,15 @@ impl Connection { .with_indexes(use_indexes) .with_views(use_views) .with_strict(use_strict); - let is_memory = path.contains(":memory:"); - let io: Arc = if is_memory { + let io: Arc = if path.contains(":memory:") { Arc::new(MemoryIO::new()) } else { Arc::new(PlatformIO::new()?) }; - let db = Self::from_uri_attached(path, db_opts, io).expect("FAILURE"); + let db = Self::from_uri_attached(path, db_opts, io)?; let pager = Arc::new(db.init_pager(None)?); - // set back to read-only now that we have - if is_memory { - db.open_flags.set(OpenFlags::ReadOnly); - } + // FIXME: for now, only support read only attach + db.open_flags.set(OpenFlags::ReadOnly); self.attached_databases.write().insert(alias, (db, pager)); Ok(()) From e2694ff88b9e68276f1619d4a6e67325b13d33c6 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 6 Oct 2025 21:21:53 -0500 Subject: [PATCH 018/428] implement is null / is not null tests for mview filter Just overlook on our side that they were not generated before. --- core/incremental/compiler.rs | 25 ++++ core/incremental/filter_operator.rs | 216 ++++++++++++++++++++++++++++ 2 files changed, 241 insertions(+) diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index 40a8ca2af..f87792e1a 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -2149,6 +2149,31 @@ impl DbspCompiler { )) } } + LogicalExpr::IsNull { expr, negated } => { + // Extract column index from the inner expression + if let LogicalExpr::Column(col) = expr.as_ref() { + let column_idx = schema + .columns + .iter() + .position(|c| c.name == col.name) + .ok_or_else(|| { + LimboError::ParseError(format!( + "Column '{}' not found in schema for IS NULL filter", + col.name + )) + })?; + + if *negated { + Ok(FilterPredicate::IsNotNull { column_idx }) + } else { + Ok(FilterPredicate::IsNull { column_idx }) + } + } else { + Err(LimboError::ParseError( + "IS NULL/IS NOT NULL expects a column reference".to_string(), + )) + } + } _ => Err(LimboError::ParseError(format!( "Unsupported filter expression: {expr:?}" ))), diff --git a/core/incremental/filter_operator.rs b/core/incremental/filter_operator.rs index 84a3c53ce..5b9c7e5d9 100644 --- a/core/incremental/filter_operator.rs +++ b/core/incremental/filter_operator.rs @@ -39,6 +39,11 @@ pub enum FilterPredicate { /// Column <= Column comparisons ColumnLessThanOrEqual { left_idx: usize, right_idx: usize }, + /// Column IS NULL check + IsNull { column_idx: usize }, + /// Column IS NOT NULL check + IsNotNull { column_idx: usize }, + /// Logical AND of two predicates And(Box, Box), /// Logical OR of two predicates @@ -214,6 +219,18 @@ impl FilterOperator { } false } + FilterPredicate::IsNull { column_idx } => { + if let Some(v) = values.get(*column_idx) { + return matches!(v, Value::Null); + } + false + } + FilterPredicate::IsNotNull { column_idx } => { + if let Some(v) = values.get(*column_idx) { + return !matches!(v, Value::Null); + } + false + } } } } @@ -293,3 +310,202 @@ impl IncrementalOperator for FilterOperator { self.tracker = Some(tracker); } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::Text; + + #[test] + fn test_is_null_predicate() { + let predicate = FilterPredicate::IsNull { column_idx: 1 }; + let filter = FilterOperator::new(predicate); + + // Test with NULL value + let values_with_null = vec![ + Value::Integer(1), + Value::Null, + Value::Text(Text::from("test")), + ]; + assert!(filter.evaluate_predicate(&values_with_null)); + + // Test with non-NULL value + let values_without_null = vec![ + Value::Integer(1), + Value::Integer(42), + Value::Text(Text::from("test")), + ]; + assert!(!filter.evaluate_predicate(&values_without_null)); + + // Test with different non-NULL types + let values_with_text = vec![ + Value::Integer(1), + Value::Text(Text::from("not null")), + Value::Text(Text::from("test")), + ]; + assert!(!filter.evaluate_predicate(&values_with_text)); + + let values_with_blob = vec![ + Value::Integer(1), + Value::Blob(vec![1, 2, 3]), + Value::Text(Text::from("test")), + ]; + assert!(!filter.evaluate_predicate(&values_with_blob)); + } + + #[test] + fn test_is_not_null_predicate() { + let predicate = FilterPredicate::IsNotNull { column_idx: 1 }; + let filter = FilterOperator::new(predicate); + + // Test with NULL value + let values_with_null = vec![ + Value::Integer(1), + Value::Null, + Value::Text(Text::from("test")), + ]; + assert!(!filter.evaluate_predicate(&values_with_null)); + + // Test with non-NULL value (Integer) + let values_with_integer = vec![ + Value::Integer(1), + Value::Integer(42), + Value::Text(Text::from("test")), + ]; + assert!(filter.evaluate_predicate(&values_with_integer)); + + // Test with non-NULL value (Text) + let values_with_text = vec![ + Value::Integer(1), + Value::Text(Text::from("not null")), + Value::Text(Text::from("test")), + ]; + assert!(filter.evaluate_predicate(&values_with_text)); + + // Test with non-NULL value (Blob) + let values_with_blob = vec![ + Value::Integer(1), + Value::Blob(vec![1, 2, 3]), + Value::Text(Text::from("test")), + ]; + assert!(filter.evaluate_predicate(&values_with_blob)); + } + + #[test] + fn test_is_null_with_and() { + // Test: column_0 = 1 AND column_1 IS NULL + let predicate = FilterPredicate::And( + Box::new(FilterPredicate::Equals { + column_idx: 0, + value: Value::Integer(1), + }), + Box::new(FilterPredicate::IsNull { column_idx: 1 }), + ); + let filter = FilterOperator::new(predicate); + + // Should match: column_0 = 1 AND column_1 IS NULL + let values_match = vec![ + Value::Integer(1), + Value::Null, + Value::Text(Text::from("test")), + ]; + assert!(filter.evaluate_predicate(&values_match)); + + // Should not match: column_0 = 2 AND column_1 IS NULL + let values_wrong_first = vec![ + Value::Integer(2), + Value::Null, + Value::Text(Text::from("test")), + ]; + assert!(!filter.evaluate_predicate(&values_wrong_first)); + + // Should not match: column_0 = 1 AND column_1 IS NOT NULL + let values_not_null = vec![ + Value::Integer(1), + Value::Integer(42), + Value::Text(Text::from("test")), + ]; + assert!(!filter.evaluate_predicate(&values_not_null)); + } + + #[test] + fn test_is_not_null_with_or() { + // Test: column_0 = 1 OR column_1 IS NOT NULL + let predicate = FilterPredicate::Or( + Box::new(FilterPredicate::Equals { + column_idx: 0, + value: Value::Integer(1), + }), + Box::new(FilterPredicate::IsNotNull { column_idx: 1 }), + ); + let filter = FilterOperator::new(predicate); + + // Should match: column_0 = 1 (regardless of column_1) + let values_first_matches = vec![ + Value::Integer(1), + Value::Null, + Value::Text(Text::from("test")), + ]; + assert!(filter.evaluate_predicate(&values_first_matches)); + + // Should match: column_1 IS NOT NULL (regardless of column_0) + let values_second_matches = vec![ + Value::Integer(2), + Value::Integer(42), + Value::Text(Text::from("test")), + ]; + assert!(filter.evaluate_predicate(&values_second_matches)); + + // Should not match: column_0 != 1 AND column_1 IS NULL + let values_no_match = vec![ + Value::Integer(2), + Value::Null, + Value::Text(Text::from("test")), + ]; + assert!(!filter.evaluate_predicate(&values_no_match)); + } + + #[test] + fn test_complex_null_predicates() { + // Test: (column_0 IS NULL OR column_1 IS NOT NULL) AND column_2 = 'test' + let predicate = FilterPredicate::And( + Box::new(FilterPredicate::Or( + Box::new(FilterPredicate::IsNull { column_idx: 0 }), + Box::new(FilterPredicate::IsNotNull { column_idx: 1 }), + )), + Box::new(FilterPredicate::Equals { + column_idx: 2, + value: Value::Text(Text::from("test")), + }), + ); + let filter = FilterOperator::new(predicate); + + // Should match: column_0 IS NULL, column_2 = 'test' + let values1 = vec![Value::Null, Value::Null, Value::Text(Text::from("test"))]; + assert!(filter.evaluate_predicate(&values1)); + + // Should match: column_1 IS NOT NULL, column_2 = 'test' + let values2 = vec![ + Value::Integer(1), + Value::Integer(42), + Value::Text(Text::from("test")), + ]; + assert!(filter.evaluate_predicate(&values2)); + + // Should not match: column_2 != 'test' + let values3 = vec![ + Value::Null, + Value::Integer(42), + Value::Text(Text::from("other")), + ]; + assert!(!filter.evaluate_predicate(&values3)); + + // Should not match: column_0 IS NOT NULL AND column_1 IS NULL AND column_2 = 'test' + let values4 = vec![ + Value::Integer(1), + Value::Null, + Value::Text(Text::from("test")), + ]; + assert!(!filter.evaluate_predicate(&values4)); + } +} From 66c69461fba0638e46f382d0b474c595ecdb141c Mon Sep 17 00:00:00 2001 From: bit-aloo Date: Tue, 7 Oct 2025 10:09:39 +0530 Subject: [PATCH 019/428] Add getter/setter for checkpoint threshold in LogicalLog Wire threshold access through Storage Add checkpoint threshold accessors to MvStore --- core/mvcc/database/mod.rs | 4 ++++ core/mvcc/persistent_storage/logical_log.rs | 4 ++++ core/mvcc/persistent_storage/mod.rs | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 3c3b4aaff..5425adead 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -2035,6 +2035,10 @@ impl MvStore { pub fn set_checkpoint_threshold(&self, threshold: u64) { self.storage.set_checkpoint_threshold(threshold) } + + pub fn checkpoint_threshold(&self) -> u64 { + self.storage.checkpoint_threshold() + } } /// A write-write conflict happens when transaction T_current attempts to update a diff --git a/core/mvcc/persistent_storage/logical_log.rs b/core/mvcc/persistent_storage/logical_log.rs index ee572f225..a902bac98 100644 --- a/core/mvcc/persistent_storage/logical_log.rs +++ b/core/mvcc/persistent_storage/logical_log.rs @@ -235,6 +235,10 @@ impl LogicalLog { pub fn set_checkpoint_threshold(&mut self, threshold: u64) { self.checkpoint_threshold = threshold; } + + pub fn checkpoint_threshold(&self) -> u64 { + self.checkpoint_threshold + } } pub enum StreamingResult { diff --git a/core/mvcc/persistent_storage/mod.rs b/core/mvcc/persistent_storage/mod.rs index 1cc8d0c2b..ede456bc3 100644 --- a/core/mvcc/persistent_storage/mod.rs +++ b/core/mvcc/persistent_storage/mod.rs @@ -49,6 +49,10 @@ impl Storage { .unwrap() .set_checkpoint_threshold(threshold) } + + pub fn checkpoint_threshold(&self) -> u64 { + self.logical_log.read().unwrap().checkpoint_threshold() + } } impl Debug for Storage { From fb5f5d9a9010d0d720534597cf7077d7cf2afaf3 Mon Sep 17 00:00:00 2001 From: bit-aloo Date: Tue, 7 Oct 2025 10:10:47 +0530 Subject: [PATCH 020/428] Add MVCC checkpoint threshold APIs to Connection --- core/lib.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/core/lib.rs b/core/lib.rs index 11b85be81..9327d7447 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -2360,6 +2360,23 @@ impl Connection { pub(crate) fn get_mv_tx(&self) -> Option<(u64, TransactionMode)> { *self.mv_tx.read() } + + pub(crate) fn set_mvcc_checkpoint_threshold(&self, threshold: u64) -> Result<()> { + match self.db.mv_store.as_ref() { + Some(mv_store) => { + mv_store.set_checkpoint_threshold(threshold); + Ok(()) + } + None => Err(LimboError::InternalError("MVCC not enabled".into())), + } + } + + pub(crate) fn mvcc_checkpoint_threshold(&self) -> Result { + match self.db.mv_store.as_ref() { + Some(mv_store) => Ok(mv_store.checkpoint_threshold()), + None => Err(LimboError::InternalError("MVCC not enabled".into())), + } + } } #[derive(Debug)] From 551dbf518e2f8abf5c819eab7af5469b7196a772 Mon Sep 17 00:00:00 2001 From: bit-aloo Date: Tue, 7 Oct 2025 10:11:20 +0530 Subject: [PATCH 021/428] Add new mvcc_checkpoint_threshold pragma name --- core/pragma.rs | 4 ++++ parser/src/ast.rs | 2 ++ 2 files changed, 6 insertions(+) diff --git a/core/pragma.rs b/core/pragma.rs index c83509a69..c238134e4 100644 --- a/core/pragma.rs +++ b/core/pragma.rs @@ -127,6 +127,10 @@ pub fn pragma_for(pragma: &PragmaName) -> Pragma { PragmaFlags::Result0 | PragmaFlags::SchemaReq | PragmaFlags::NoColumns1, &["cipher"], ), + PragmaName::MvccCheckpointThreshold => Pragma::new( + PragmaFlags::NoColumns1 | PragmaFlags::Result0, + &["mvcc_checkpoint_threshold"], + ), } } diff --git a/parser/src/ast.rs b/parser/src/ast.rs index ced93b722..6b69682f0 100644 --- a/parser/src/ast.rs +++ b/parser/src/ast.rs @@ -1449,6 +1449,8 @@ pub enum PragmaName { UserVersion, /// trigger a checkpoint to run on database(s) if WAL is enabled WalCheckpoint, + /// Sets or queries the threshold (in bytes) at which MVCC triggers an automatic checkpoint. + MvccCheckpointThreshold, } /// `CREATE TRIGGER` time From 68b6ffe57cac29cb208baaaa46edd74a98d5c051 Mon Sep 17 00:00:00 2001 From: bit-aloo Date: Tue, 7 Oct 2025 10:11:53 +0530 Subject: [PATCH 022/428] Implement mvcc_checkpoint_threshold pragma --- core/translate/pragma.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/core/translate/pragma.rs b/core/translate/pragma.rs index d8b26143a..37707533e 100644 --- a/core/translate/pragma.rs +++ b/core/translate/pragma.rs @@ -378,6 +378,15 @@ fn update_pragma( connection.set_data_sync_retry(retry_enabled); Ok((program, TransactionMode::None)) } + PragmaName::MvccCheckpointThreshold => { + let threshold = match parse_signed_number(&value)? { + Value::Integer(size) if size > 0 => size as u64, + _ => bail_parse_error!("mvcc_checkpoint_threshold must be a positive integer"), + }; + + connection.set_mvcc_checkpoint_threshold(threshold)?; + Ok((program, TransactionMode::None)) + } } } @@ -687,6 +696,14 @@ fn query_pragma( program.add_pragma_result_column(pragma.to_string()); Ok((program, TransactionMode::None)) } + PragmaName::MvccCheckpointThreshold => { + let threshold = connection.mvcc_checkpoint_threshold()?; + let register = program.alloc_register(); + program.emit_int(threshold as i64, register); + program.emit_result_row(register, 1); + program.add_pragma_result_column(pragma.to_string()); + Ok((program, TransactionMode::None)) + }, } } From afadb32c4ce69a4ded180c91f02d9d5765a8866e Mon Sep 17 00:00:00 2001 From: bit-aloo Date: Tue, 7 Oct 2025 10:20:13 +0530 Subject: [PATCH 023/428] fmt fixes --- core/translate/pragma.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/translate/pragma.rs b/core/translate/pragma.rs index 37707533e..19542adad 100644 --- a/core/translate/pragma.rs +++ b/core/translate/pragma.rs @@ -703,7 +703,7 @@ fn query_pragma( program.emit_result_row(register, 1); program.add_pragma_result_column(pragma.to_string()); Ok((program, TransactionMode::None)) - }, + } } } From 1d1b09dc171a111af8cfac0dc73309d2197898f2 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 3 Oct 2025 18:13:56 -0300 Subject: [PATCH 024/428] modify query generation to always sample from valid queries --- simulator/generation/query.rs | 167 ++++++++++++++++++++++++++++------ simulator/model/mod.rs | 2 +- 2 files changed, 139 insertions(+), 30 deletions(-) diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index 72541c4d7..f7d5ac478 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -1,42 +1,151 @@ -use crate::model::Query; -use rand::Rng; +use crate::model::{Query, QueryDiscriminants}; +use rand::{ + Rng, + distr::{Distribution, weighted::WeightedIndex}, +}; use sql_generation::{ - generation::{Arbitrary, ArbitraryFrom, GenerationContext, frequency}, - model::query::{Create, Delete, Insert, Select, update::Update}, + generation::{Arbitrary, ArbitraryFrom, GenerationContext, query::SelectFree}, + model::{ + query::{Create, CreateIndex, Delete, Insert, Select, update::Update}, + table::Table, + }, }; use super::property::Remaining; +fn random_create(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { + let mut create = Create::arbitrary(rng, conn_ctx); + while conn_ctx + .tables() + .iter() + .any(|t| t.name == create.table.name) + { + create = Create::arbitrary(rng, conn_ctx); + } + Query::Create(create) +} + +fn random_select(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { + if rng.random_bool(0.7) { + Query::Select(Select::arbitrary(rng, conn_ctx)) + } else { + // Random expression + Query::Select(SelectFree::arbitrary(rng, conn_ctx).0) + } +} + +fn random_insert(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { + assert!(!conn_ctx.tables().is_empty()); + Query::Insert(Insert::arbitrary(rng, conn_ctx)) +} + +fn random_delete(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { + assert!(!conn_ctx.tables().is_empty()); + Query::Delete(Delete::arbitrary(rng, conn_ctx)) +} + +fn random_update(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { + assert!(!conn_ctx.tables().is_empty()); + Query::Update(Update::arbitrary(rng, conn_ctx)) +} + +fn random_drop(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { + assert!(!conn_ctx.tables().is_empty()); + Query::Drop(sql_generation::model::query::Drop::arbitrary(rng, conn_ctx)) +} + +fn random_create_index(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { + assert!(!conn_ctx.tables().is_empty()); + + let mut create_index = CreateIndex::arbitrary(rng, conn_ctx); + while conn_ctx + .tables() + .iter() + .find(|t| t.name == create_index.table_name) + .expect("table should exist") + .indexes + .iter() + .any(|i| i == &create_index.index_name) + { + create_index = CreateIndex::arbitrary(rng, conn_ctx); + } + + Query::CreateIndex(create_index) +} + +/// Possible queries that can be generated given the table state +/// +/// Does not take into account transactional statements +pub fn possible_queries(tables: &[Table]) -> Vec { + let mut queries = vec![QueryDiscriminants::Select, QueryDiscriminants::Create]; + if !tables.is_empty() { + queries.extend([ + QueryDiscriminants::Insert, + QueryDiscriminants::Update, + QueryDiscriminants::Delete, + QueryDiscriminants::Drop, + QueryDiscriminants::CreateIndex, + ]); + } + queries +} + +type QueryGenFunc = fn(&mut R, &G) -> Query; + +impl QueryDiscriminants { + pub fn gen_function(&self) -> QueryGenFunc + where + R: rand::Rng, + G: GenerationContext, + { + match self { + QueryDiscriminants::Create => random_create, + QueryDiscriminants::Select => random_select, + QueryDiscriminants::Insert => random_insert, + QueryDiscriminants::Delete => random_delete, + QueryDiscriminants::Update => random_update, + QueryDiscriminants::Drop => random_drop, + QueryDiscriminants::CreateIndex => random_create_index, + QueryDiscriminants::Begin + | QueryDiscriminants::Commit + | QueryDiscriminants::Rollback => { + unreachable!("transactional queries should not be generated") + } + } + } + + pub fn weight(&self, remaining: &Remaining) -> u32 { + match self { + QueryDiscriminants::Create => remaining.create, + QueryDiscriminants::Select => remaining.select + remaining.select / 3, // remaining.select / 3 is for the random_expr generation + QueryDiscriminants::Insert => remaining.insert, + QueryDiscriminants::Delete => remaining.delete, + QueryDiscriminants::Update => remaining.update, + QueryDiscriminants::Drop => 0, + QueryDiscriminants::CreateIndex => remaining.create_index, + QueryDiscriminants::Begin + | QueryDiscriminants::Commit + | QueryDiscriminants::Rollback => { + unreachable!("transactional queries should not be generated") + } + } + } +} + impl ArbitraryFrom<&Remaining> for Query { fn arbitrary_from( rng: &mut R, context: &C, remaining: &Remaining, ) -> Self { - frequency( - vec![ - ( - remaining.create, - Box::new(|rng| Self::Create(Create::arbitrary(rng, context))), - ), - ( - remaining.select, - Box::new(|rng| Self::Select(Select::arbitrary(rng, context))), - ), - ( - remaining.insert, - Box::new(|rng| Self::Insert(Insert::arbitrary(rng, context))), - ), - ( - remaining.update, - Box::new(|rng| Self::Update(Update::arbitrary(rng, context))), - ), - ( - remaining.insert.min(remaining.delete), - Box::new(|rng| Self::Delete(Delete::arbitrary(rng, context))), - ), - ], - rng, - ) + let queries = possible_queries(context.tables()); + let weights = + WeightedIndex::new(queries.iter().map(|query| query.weight(remaining))).unwrap(); + + let idx = weights.sample(rng); + let query_fn = queries[idx].gen_function(); + let query = (query_fn)(rng, context); + + query } } diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 551c08b1d..807e0f2a1 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -18,7 +18,7 @@ use turso_parser::ast::Distinctness; use crate::{generation::Shadow, runner::env::ShadowTablesMut}; // This type represents the potential queries on the database. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, strum::EnumDiscriminants)] pub enum Query { Create(Create), Select(Select), From bb9c8dea4ff411f43ba49e054a2c000b98c5a00a Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 3 Oct 2025 18:13:56 -0300 Subject: [PATCH 025/428] rework interaction generation to only generate possible queries + do less allocations --- Cargo.lock | 1 + simulator/Cargo.toml | 1 + simulator/generation/plan.rs | 107 +++++------ simulator/generation/property.rs | 305 ++++++++++++++++++++----------- simulator/generation/query.rs | 16 +- simulator/model/mod.rs | 70 +++++++ 6 files changed, 321 insertions(+), 179 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ed065f587..2ddd72015 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2346,6 +2346,7 @@ name = "limbo_sim" version = "0.2.0" dependencies = [ "anyhow", + "bitflags 2.9.4", "bitmaps", "chrono", "clap", diff --git a/simulator/Cargo.toml b/simulator/Cargo.toml index 09401f2cc..7fd5dbeff 100644 --- a/simulator/Cargo.toml +++ b/simulator/Cargo.toml @@ -46,3 +46,4 @@ either = "1.15.0" similar = { workspace = true } similar-asserts = { workspace = true } bitmaps = { workspace = true } +bitflags.workspace = true diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 3c07d73e3..fb03ee42a 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -8,6 +8,7 @@ use std::{ }; use indexmap::IndexSet; +use rand::distr::weighted::WeightedIndex; use serde::{Deserialize, Serialize}; use sql_generation::{ @@ -26,7 +27,7 @@ use turso_core::{Connection, Result, StepResult}; use crate::{ SimulatorEnv, - generation::Shadow, + generation::{Shadow, property::possiple_properties, query::possible_queries}, model::Query, runner::env::{ShadowTablesMut, SimConnection, SimulationType}, }; @@ -1077,26 +1078,27 @@ fn random_create(rng: &mut R, env: &SimulatorEnv, conn_index: usiz Interactions::new(conn_index, InteractionsType::Query(Query::Create(create))) } -fn random_read(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { - Interactions::new( - conn_index, - InteractionsType::Query(Query::Select(Select::arbitrary( - rng, - &env.connection_context(conn_index), - ))), - ) +fn random_select(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { + if rng.random_bool(0.7) { + Interactions::new( + conn_index, + InteractionsType::Query(Query::Select(Select::arbitrary( + rng, + &env.connection_context(conn_index), + ))), + ) + } else { + // Random expression + Interactions::new( + conn_index, + InteractionsType::Query(Query::Select( + SelectFree::arbitrary(rng, &env.connection_context(conn_index)).0, + )), + ) + } } -fn random_expr(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { - Interactions::new( - conn_index, - InteractionsType::Query(Query::Select( - SelectFree::arbitrary(rng, &env.connection_context(conn_index)).0, - )), - ) -} - -fn random_write(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { +fn random_insert(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { Interactions::new( conn_index, InteractionsType::Query(Query::Insert(Insert::arbitrary( @@ -1164,14 +1166,14 @@ fn random_create_index( )) } -fn random_fault(rng: &mut R, env: &SimulatorEnv) -> Interactions { +fn random_fault(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { let faults = if env.opts.disable_reopen_database { vec![Fault::Disconnect] } else { vec![Fault::Disconnect, Fault::ReopenDatabase] }; let fault = faults[rng.random_range(0..faults.len())]; - Interactions::new(env.choose_conn(rng), InteractionsType::Fault(fault)) + Interactions::new(conn_index, InteractionsType::Fault(fault)) } impl ArbitraryFrom<(&SimulatorEnv, InteractionStats, usize)> for Interactions { @@ -1186,10 +1188,24 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats, usize)> for Interactions { &stats, env.profile.experimental_mvcc, ); + + // TODO: find a way to be more efficient and pass the weights and properties down to the ArbitraryFrom functions + let queries = possible_queries(conn_ctx.tables()); + let query_weights = + WeightedIndex::new(queries.iter().map(|query| query.weight(&remaining_))).unwrap(); + + let properties = possiple_properties(conn_ctx.tables()); + let property_weights = WeightedIndex::new( + properties + .iter() + .map(|property| property.weight(env, &remaining_, conn_ctx.opts())), + ) + .unwrap(); + frequency( vec![ ( - u32::min(remaining_.select, remaining_.insert) + remaining_.create, + property_weights.total_weight(), Box::new(|rng: &mut R| { Interactions::new( conn_index, @@ -1202,52 +1218,25 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats, usize)> for Interactions { }), ), ( - remaining_.select, - Box::new(|rng: &mut R| random_read(rng, env, conn_index)), - ), - ( - remaining_.select / 3, - Box::new(|rng: &mut R| random_expr(rng, env, conn_index)), - ), - ( - remaining_.insert, - Box::new(|rng: &mut R| random_write(rng, env, conn_index)), - ), - ( - remaining_.create, - Box::new(|rng: &mut R| random_create(rng, env, conn_index)), - ), - ( - remaining_.create_index, + query_weights.total_weight(), Box::new(|rng: &mut R| { - if let Some(interaction) = random_create_index(rng, env, conn_index) { - interaction - } else { - // if no tables exist, we can't create an index, so fallback to creating a table - random_create(rng, env, conn_index) - } + Interactions::new( + conn_index, + InteractionsType::Query(Query::arbitrary_from( + rng, + conn_ctx, + &remaining_, + )), + ) }), ), - ( - remaining_.delete, - Box::new(|rng: &mut R| random_delete(rng, env, conn_index)), - ), - ( - remaining_.update, - Box::new(|rng: &mut R| random_update(rng, env, conn_index)), - ), - ( - // remaining_.drop, - 0, - Box::new(|rng: &mut R| random_drop(rng, env, conn_index)), - ), ( remaining_ .select .min(remaining_.insert) .min(remaining_.create) .max(1), - Box::new(|rng: &mut R| random_fault(rng, env)), + Box::new(|rng: &mut R| random_fault(rng, env, conn_index)), ), ], rng, diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index a113dc939..8fe3f486f 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1,6 +1,7 @@ +use rand::distr::{Distribution, weighted::WeightedIndex}; use serde::{Deserialize, Serialize}; use sql_generation::{ - generation::{Arbitrary, ArbitraryFrom, GenerationContext, frequency, pick, pick_index}, + generation::{Arbitrary, ArbitraryFrom, GenerationContext, Opts, pick, pick_index}, model::{ query::{ Create, Delete, Drop, Insert, Select, @@ -9,16 +10,17 @@ use sql_generation::{ transaction::{Begin, Commit, Rollback}, update::Update, }, - table::SimValue, + table::{SimValue, Table}, }, }; +use strum::IntoEnumIterator; use turso_core::{LimboError, types}; use turso_parser::ast::{self, Distinctness}; use crate::{ common::print_diff, - generation::{Shadow as _, plan::InteractionType}, - model::Query, + generation::{Shadow as _, plan::InteractionType, query::possible_queries}, + model::{Query, QueryCapabilities, QueryDiscriminants}, profiles::query::QueryProfile, runner::env::SimulatorEnv, }; @@ -27,7 +29,8 @@ use super::plan::{Assertion, Interaction, InteractionStats, ResultSet}; /// Properties are representations of executable specifications /// about the database behavior. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, strum::EnumDiscriminants)] +#[strum_discriminants(derive(strum::EnumIter))] pub enum Property { /// Insert-Select is a property in which the inserted row /// must be in the resulting rows of a select query that has a @@ -1308,7 +1311,9 @@ fn property_insert_values_select( fn property_read_your_updates_back( rng: &mut R, + _remaining: &Remaining, ctx: &impl GenerationContext, + _mvcc: bool, ) -> Property { // e.g. UPDATE t SET a=1, b=2 WHERE c=1; let update = Update::arbitrary(rng, ctx); @@ -1330,7 +1335,9 @@ fn property_read_your_updates_back( fn property_table_has_expected_content( rng: &mut R, + _remaining: &Remaining, ctx: &impl GenerationContext, + _mvcc: bool, ) -> Property { // Get a random table let table = pick(ctx.tables(), rng); @@ -1339,7 +1346,12 @@ fn property_table_has_expected_content( } } -fn property_select_limit(rng: &mut R, ctx: &impl GenerationContext) -> Property { +fn property_select_limit( + rng: &mut R, + _remaining: &Remaining, + ctx: &impl GenerationContext, + _mvcc: bool, +) -> Property { // Get a random table let table = pick(ctx.tables(), rng); // Select the table @@ -1357,6 +1369,7 @@ fn property_double_create_failure( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, + _mvcc: bool, ) -> Property { // Create the table let create_query = Create::arbitrary(rng, ctx); @@ -1389,6 +1402,7 @@ fn property_delete_select( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, + _mvcc: bool, ) -> Property { // Get a random table let table = pick(ctx.tables(), rng); @@ -1447,6 +1461,7 @@ fn property_drop_select( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, + _mvcc: bool, ) -> Property { // Get a random table let table = pick(ctx.tables(), rng); @@ -1480,7 +1495,9 @@ fn property_drop_select( fn property_select_select_optimizer( rng: &mut R, + _remaining: &Remaining, ctx: &impl GenerationContext, + _mvcc: bool, ) -> Property { // Get a random table let table = pick(ctx.tables(), rng); @@ -1501,7 +1518,9 @@ fn property_select_select_optimizer( fn property_where_true_false_null( rng: &mut R, + _remaining: &Remaining, ctx: &impl GenerationContext, + _mvcc: bool, ) -> Property { // Get a random table let table = pick(ctx.tables(), rng); @@ -1520,7 +1539,9 @@ fn property_where_true_false_null( fn property_union_all_preserves_cardinality( rng: &mut R, + _remaining: &Remaining, ctx: &impl GenerationContext, + _mvcc: bool, ) -> Property { // Get a random table let table = pick(ctx.tables(), rng); @@ -1547,6 +1568,7 @@ fn property_fsync_no_wait( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, + _mvcc: bool, ) -> Property { Property::FsyncNoWait { query: Query::arbitrary_from(rng, ctx, remaining), @@ -1558,6 +1580,7 @@ fn property_faulty_query( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, + _mvcc: bool, ) -> Property { Property::FaultyQuery { query: Query::arbitrary_from(rng, ctx, remaining), @@ -1565,6 +1588,161 @@ fn property_faulty_query( } } +type PropertyGenFunc = fn(&mut R, &Remaining, &G, bool) -> Property; + +impl PropertyDiscriminants { + pub fn gen_function(&self) -> PropertyGenFunc + where + R: rand::Rng, + G: GenerationContext, + { + match self { + PropertyDiscriminants::InsertValuesSelect => property_insert_values_select, + PropertyDiscriminants::ReadYourUpdatesBack => property_read_your_updates_back, + PropertyDiscriminants::TableHasExpectedContent => property_table_has_expected_content, + PropertyDiscriminants::DoubleCreateFailure => property_double_create_failure, + PropertyDiscriminants::SelectLimit => property_select_limit, + PropertyDiscriminants::DeleteSelect => property_delete_select, + PropertyDiscriminants::DropSelect => property_drop_select, + PropertyDiscriminants::SelectSelectOptimizer => property_select_select_optimizer, + PropertyDiscriminants::WhereTrueFalseNull => property_where_true_false_null, + PropertyDiscriminants::UNIONAllPreservesCardinality => { + property_union_all_preserves_cardinality + } + PropertyDiscriminants::FsyncNoWait => property_fsync_no_wait, + PropertyDiscriminants::FaultyQuery => property_faulty_query, + PropertyDiscriminants::Queries => { + unreachable!("should not try to generate queries property") + } + } + } + + pub fn weight(&self, env: &SimulatorEnv, remaining: &Remaining, opts: &Opts) -> u32 { + match self { + PropertyDiscriminants::InsertValuesSelect => { + if !env.opts.disable_insert_values_select { + u32::min(remaining.select, remaining.insert).max(1) + } else { + 0 + } + } + PropertyDiscriminants::ReadYourUpdatesBack => { + u32::min(remaining.select, remaining.insert).max(1) + } + PropertyDiscriminants::TableHasExpectedContent => remaining.select.max(1), + PropertyDiscriminants::DoubleCreateFailure => { + if !env.opts.disable_double_create_failure { + remaining.create / 2 + } else { + 0 + } + } + PropertyDiscriminants::SelectLimit => { + if !env.opts.disable_select_limit { + remaining.select + } else { + 0 + } + } + PropertyDiscriminants::DeleteSelect => { + if !env.opts.disable_delete_select { + u32::min(remaining.select, remaining.insert).min(remaining.delete) + } else { + 0 + } + } + PropertyDiscriminants::DropSelect => { + if !env.opts.disable_drop_select { + // remaining.drop + 0 + } else { + 0 + } + } + PropertyDiscriminants::SelectSelectOptimizer => { + if !env.opts.disable_select_optimizer { + remaining.select / 2 + } else { + 0 + } + } + PropertyDiscriminants::WhereTrueFalseNull => { + if opts.indexes && !env.opts.disable_where_true_false_null { + remaining.select / 2 + } else { + 0 + } + } + PropertyDiscriminants::UNIONAllPreservesCardinality => { + if opts.indexes && !env.opts.disable_union_all_preserves_cardinality { + remaining.select / 3 + } else { + 0 + } + } + PropertyDiscriminants::FsyncNoWait => { + if env.profile.io.enable && !env.opts.disable_fsync_no_wait { + 50 // Freestyle number + } else { + 0 + } + } + PropertyDiscriminants::FaultyQuery => { + if env.profile.io.enable + && env.profile.io.fault.enable + && !env.opts.disable_faulty_query + { + 20 + } else { + 0 + } + } + PropertyDiscriminants::Queries => { + unreachable!("queries property should not be generated") + } + } + } + + fn can_generate(queries: &[QueryDiscriminants]) -> Vec { + let queries_capabilities = QueryCapabilities::from_list_queries(queries); + + PropertyDiscriminants::iter() + .filter(|property| queries_capabilities.contains(property.requirements())) + .collect() + } + + pub const fn requirements(&self) -> QueryCapabilities { + match self { + PropertyDiscriminants::InsertValuesSelect => { + QueryCapabilities::SELECT.union(QueryCapabilities::INSERT) + } + PropertyDiscriminants::ReadYourUpdatesBack => { + QueryCapabilities::SELECT.union(QueryCapabilities::UPDATE) + } + PropertyDiscriminants::TableHasExpectedContent => QueryCapabilities::SELECT, + PropertyDiscriminants::DoubleCreateFailure => QueryCapabilities::CREATE, + PropertyDiscriminants::SelectLimit => QueryCapabilities::SELECT, + PropertyDiscriminants::DeleteSelect => { + QueryCapabilities::SELECT.union(QueryCapabilities::DELETE) + } + PropertyDiscriminants::DropSelect => { + QueryCapabilities::SELECT.union(QueryCapabilities::DROP) + } + PropertyDiscriminants::SelectSelectOptimizer => QueryCapabilities::SELECT, + PropertyDiscriminants::WhereTrueFalseNull => QueryCapabilities::SELECT, + PropertyDiscriminants::UNIONAllPreservesCardinality => QueryCapabilities::SELECT, + PropertyDiscriminants::FsyncNoWait => QueryCapabilities::all(), + PropertyDiscriminants::FaultyQuery => QueryCapabilities::all(), + PropertyDiscriminants::Queries => panic!("queries property should not be generated"), + } + } +} + +pub fn possiple_properties(tables: &[Table]) -> Vec { + let queries = possible_queries(tables); + PropertyDiscriminants::can_generate(queries) +} + impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { fn arbitrary_from( rng: &mut R, @@ -1579,110 +1757,19 @@ impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { env.profile.experimental_mvcc, ); - #[allow(clippy::type_complexity)] - let choices: Vec<(_, Box Property>)> = vec![ - ( - if !env.opts.disable_insert_values_select { - u32::min(remaining_.select, remaining_.insert).max(1) - } else { - 0 - }, - Box::new(|rng: &mut R| { - property_insert_values_select( - rng, - &remaining_, - conn_ctx, - env.profile.experimental_mvcc, - ) - }), - ), - ( - remaining_.select.max(1), - Box::new(|rng: &mut R| property_table_has_expected_content(rng, conn_ctx)), - ), - ( - u32::min(remaining_.select, remaining_.insert).max(1), - Box::new(|rng: &mut R| property_read_your_updates_back(rng, conn_ctx)), - ), - ( - if !env.opts.disable_double_create_failure { - remaining_.create / 2 - } else { - 0 - }, - Box::new(|rng: &mut R| property_double_create_failure(rng, &remaining_, conn_ctx)), - ), - ( - if !env.opts.disable_select_limit { - remaining_.select - } else { - 0 - }, - Box::new(|rng: &mut R| property_select_limit(rng, conn_ctx)), - ), - ( - if !env.opts.disable_delete_select { - u32::min(remaining_.select, remaining_.insert).min(remaining_.delete) - } else { - 0 - }, - Box::new(|rng: &mut R| property_delete_select(rng, &remaining_, conn_ctx)), - ), - ( - if !env.opts.disable_drop_select { - // remaining_.drop - 0 - } else { - 0 - }, - Box::new(|rng: &mut R| property_drop_select(rng, &remaining_, conn_ctx)), - ), - ( - if !env.opts.disable_select_optimizer { - remaining_.select / 2 - } else { - 0 - }, - Box::new(|rng: &mut R| property_select_select_optimizer(rng, conn_ctx)), - ), - ( - if opts.indexes && !env.opts.disable_where_true_false_null { - remaining_.select / 2 - } else { - 0 - }, - Box::new(|rng: &mut R| property_where_true_false_null(rng, conn_ctx)), - ), - ( - if opts.indexes && !env.opts.disable_union_all_preserves_cardinality { - remaining_.select / 3 - } else { - 0 - }, - Box::new(|rng: &mut R| property_union_all_preserves_cardinality(rng, conn_ctx)), - ), - ( - if env.profile.io.enable && !env.opts.disable_fsync_no_wait { - 50 // Freestyle number - } else { - 0 - }, - Box::new(|rng: &mut R| property_fsync_no_wait(rng, &remaining_, conn_ctx)), - ), - ( - if env.profile.io.enable - && env.profile.io.fault.enable - && !env.opts.disable_faulty_query - { - 20 - } else { - 0 - }, - Box::new(|rng: &mut R| property_faulty_query(rng, &remaining_, conn_ctx)), - ), - ]; + let properties = possiple_properties(conn_ctx.tables()); + let weights = WeightedIndex::new( + properties + .iter() + .map(|property| property.weight(env, &remaining_, opts)), + ) + .unwrap(); - frequency(choices, rng) + let idx = weights.sample(rng); + let property_fn = properties[idx].gen_function(); + let property = (property_fn)(rng, &remaining_, conn_ctx, env.profile.experimental_mvcc); + + property } } diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index f7d5ac478..5bb3a62ef 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -76,18 +76,12 @@ fn random_create_index(rng: &mut R, conn_ctx: &impl GenerationCont /// Possible queries that can be generated given the table state /// /// Does not take into account transactional statements -pub fn possible_queries(tables: &[Table]) -> Vec { - let mut queries = vec![QueryDiscriminants::Select, QueryDiscriminants::Create]; - if !tables.is_empty() { - queries.extend([ - QueryDiscriminants::Insert, - QueryDiscriminants::Update, - QueryDiscriminants::Delete, - QueryDiscriminants::Drop, - QueryDiscriminants::CreateIndex, - ]); +pub const fn possible_queries(tables: &[Table]) -> &'static [QueryDiscriminants] { + if tables.is_empty() { + &[QueryDiscriminants::Select, QueryDiscriminants::Create] + } else { + QueryDiscriminants::ALL_NO_TRANSACTION } - queries } type QueryGenFunc = fn(&mut R, &G) -> Query; diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 807e0f2a1..510922f6b 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -1,6 +1,7 @@ use std::fmt::Display; use anyhow::Context; +use bitflags::bitflags; use indexmap::IndexSet; use itertools::Itertools; use serde::{Deserialize, Serialize}; @@ -19,6 +20,7 @@ use crate::{generation::Shadow, runner::env::ShadowTablesMut}; // This type represents the potential queries on the database. #[derive(Debug, Clone, Serialize, Deserialize, strum::EnumDiscriminants)] +#[strum_discriminants(derive(strum::VariantArray, strum::EnumIter))] pub enum Query { Create(Create), Select(Select), @@ -115,6 +117,74 @@ impl Shadow for Query { } } +bitflags! { + pub struct QueryCapabilities: u32 { + const CREATE = 1 << 0; + const SELECT = 1 << 1; + const INSERT = 1 << 2; + const DELETE = 1 << 3; + const UPDATE = 1 << 4; + const DROP = 1 << 5; + const CREATE_INDEX = 1 << 6; + } +} + +impl QueryCapabilities { + // TODO: can be const fn in the future + pub fn from_list_queries(queries: &[QueryDiscriminants]) -> Self { + queries + .iter() + .fold(Self::empty(), |accum, q| accum.union(q.into())) + } +} + +impl From<&QueryDiscriminants> for QueryCapabilities { + fn from(value: &QueryDiscriminants) -> Self { + (*value).into() + } +} + +impl From for QueryCapabilities { + fn from(value: QueryDiscriminants) -> Self { + match value { + QueryDiscriminants::Create => Self::CREATE, + QueryDiscriminants::Select => Self::SELECT, + QueryDiscriminants::Insert => Self::INSERT, + QueryDiscriminants::Delete => Self::DELETE, + QueryDiscriminants::Update => Self::UPDATE, + QueryDiscriminants::Drop => Self::DROP, + QueryDiscriminants::CreateIndex => Self::CREATE_INDEX, + QueryDiscriminants::Begin + | QueryDiscriminants::Commit + | QueryDiscriminants::Rollback => { + unreachable!("QueryCapabilities do not apply to transaction queries") + } + } + } +} + +impl QueryDiscriminants { + pub const ALL_NO_TRANSACTION: &[QueryDiscriminants] = &[ + QueryDiscriminants::Select, + QueryDiscriminants::Create, + QueryDiscriminants::Insert, + QueryDiscriminants::Update, + QueryDiscriminants::Delete, + QueryDiscriminants::Drop, + QueryDiscriminants::CreateIndex, + ]; + + #[inline] + pub fn is_transaction(&self) -> bool { + matches!(self, Self::Begin | Self::Commit | Self::Rollback) + } + + #[inline] + pub fn is_ddl(&self) -> bool { + matches!(self, Self::Create | Self::CreateIndex | Self::Drop) + } +} + impl Shadow for Create { type Result = anyhow::Result>>; From a5845285be5374d709dc689158ea7191b1ffe676 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 5 Oct 2025 16:55:48 -0300 Subject: [PATCH 026/428] remove unnecessary functions --- simulator/generation/plan.rs | 106 +---------------------------------- 1 file changed, 2 insertions(+), 104 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index fb03ee42a..a4bf7777e 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -12,12 +12,11 @@ use rand::distr::weighted::WeightedIndex; use serde::{Deserialize, Serialize}; use sql_generation::{ - generation::{Arbitrary, ArbitraryFrom, GenerationContext, frequency, query::SelectFree}, + generation::{Arbitrary, ArbitraryFrom, GenerationContext, frequency}, model::{ query::{ - Create, CreateIndex, Delete, Drop, Insert, Select, + Create, transaction::{Begin, Commit}, - update::Update, }, table::SimValue, }, @@ -1065,107 +1064,6 @@ fn reopen_database(env: &mut SimulatorEnv) { }; } -fn random_create(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { - let conn_ctx = env.connection_context(conn_index); - let mut create = Create::arbitrary(rng, &conn_ctx); - while conn_ctx - .tables() - .iter() - .any(|t| t.name == create.table.name) - { - create = Create::arbitrary(rng, &conn_ctx); - } - Interactions::new(conn_index, InteractionsType::Query(Query::Create(create))) -} - -fn random_select(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { - if rng.random_bool(0.7) { - Interactions::new( - conn_index, - InteractionsType::Query(Query::Select(Select::arbitrary( - rng, - &env.connection_context(conn_index), - ))), - ) - } else { - // Random expression - Interactions::new( - conn_index, - InteractionsType::Query(Query::Select( - SelectFree::arbitrary(rng, &env.connection_context(conn_index)).0, - )), - ) - } -} - -fn random_insert(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { - Interactions::new( - conn_index, - InteractionsType::Query(Query::Insert(Insert::arbitrary( - rng, - &env.connection_context(conn_index), - ))), - ) -} - -fn random_delete(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { - Interactions::new( - conn_index, - InteractionsType::Query(Query::Delete(Delete::arbitrary( - rng, - &env.connection_context(conn_index), - ))), - ) -} - -fn random_update(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { - Interactions::new( - conn_index, - InteractionsType::Query(Query::Update(Update::arbitrary( - rng, - &env.connection_context(conn_index), - ))), - ) -} - -fn random_drop(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { - Interactions::new( - conn_index, - InteractionsType::Query(Query::Drop(Drop::arbitrary( - rng, - &env.connection_context(conn_index), - ))), - ) -} - -fn random_create_index( - rng: &mut R, - env: &SimulatorEnv, - conn_index: usize, -) -> Option { - let conn_ctx = env.connection_context(conn_index); - if conn_ctx.tables().is_empty() { - return None; - } - let mut create_index = CreateIndex::arbitrary(rng, &conn_ctx); - while conn_ctx - .tables() - .iter() - .find(|t| t.name == create_index.table_name) - .expect("table should exist") - .indexes - .iter() - .any(|i| i == &create_index.index_name) - { - create_index = CreateIndex::arbitrary(rng, &conn_ctx); - } - - Some(Interactions::new( - conn_index, - InteractionsType::Query(Query::CreateIndex(create_index)), - )) -} - fn random_fault(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { let faults = if env.opts.disable_reopen_database { vec![Fault::Disconnect] From aec666872558580be382bcd598214cb9a3327a9f Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 5 Oct 2025 17:04:20 -0300 Subject: [PATCH 027/428] add `?Sized` to Arbitrary traits --- sql_generation/generation/expr.rs | 32 ++++++++------- sql_generation/generation/mod.rs | 41 ++++++++++++------- sql_generation/generation/predicate/binary.rs | 12 +++--- sql_generation/generation/predicate/mod.rs | 16 +++++--- sql_generation/generation/predicate/unary.rs | 16 ++++---- sql_generation/generation/query.rs | 26 ++++++------ sql_generation/generation/table.rs | 24 +++++------ 7 files changed, 93 insertions(+), 74 deletions(-) diff --git a/sql_generation/generation/expr.rs b/sql_generation/generation/expr.rs index 25bdc0d97..e60baf78f 100644 --- a/sql_generation/generation/expr.rs +++ b/sql_generation/generation/expr.rs @@ -14,7 +14,7 @@ impl Arbitrary for Box where T: Arbitrary, { - fn arbitrary(rng: &mut R, context: &C) -> Self { + fn arbitrary(rng: &mut R, context: &C) -> Self { Box::from(T::arbitrary(rng, context)) } } @@ -23,7 +23,7 @@ impl ArbitrarySized for Box where T: ArbitrarySized, { - fn arbitrary_sized( + fn arbitrary_sized( rng: &mut R, context: &C, size: usize, @@ -36,7 +36,7 @@ impl ArbitrarySizedFrom for Box where T: ArbitrarySizedFrom, { - fn arbitrary_sized_from( + fn arbitrary_sized_from( rng: &mut R, context: &C, t: A, @@ -50,7 +50,7 @@ impl Arbitrary for Option where T: Arbitrary, { - fn arbitrary(rng: &mut R, context: &C) -> Self { + fn arbitrary(rng: &mut R, context: &C) -> Self { rng.random_bool(0.5).then_some(T::arbitrary(rng, context)) } } @@ -59,7 +59,7 @@ impl ArbitrarySizedFrom for Option where T: ArbitrarySizedFrom, { - fn arbitrary_sized_from( + fn arbitrary_sized_from( rng: &mut R, context: &C, t: A, @@ -74,7 +74,11 @@ impl ArbitraryFrom for Vec where T: ArbitraryFrom, { - fn arbitrary_from(rng: &mut R, context: &C, t: A) -> Self { + fn arbitrary_from( + rng: &mut R, + context: &C, + t: A, + ) -> Self { let size = rng.random_range(0..5); (0..size) .map(|_| T::arbitrary_from(rng, context, t)) @@ -84,7 +88,7 @@ where // Freestyling generation impl ArbitrarySized for Expr { - fn arbitrary_sized( + fn arbitrary_sized( rng: &mut R, context: &C, size: usize, @@ -188,7 +192,7 @@ impl ArbitrarySized for Expr { } impl Arbitrary for Operator { - fn arbitrary(rng: &mut R, _context: &C) -> Self { + fn arbitrary(rng: &mut R, _context: &C) -> Self { let choices = [ Operator::Add, Operator::And, @@ -219,7 +223,7 @@ impl Arbitrary for Operator { } impl Arbitrary for Type { - fn arbitrary(rng: &mut R, _context: &C) -> Self { + fn arbitrary(rng: &mut R, _context: &C) -> Self { let name = pick(&["INT", "INTEGER", "REAL", "TEXT", "BLOB", "ANY"], rng).to_string(); Self { name, @@ -229,7 +233,7 @@ impl Arbitrary for Type { } impl Arbitrary for QualifiedName { - fn arbitrary(rng: &mut R, context: &C) -> Self { + fn arbitrary(rng: &mut R, context: &C) -> Self { // TODO: for now just generate table name let table_idx = pick_index(context.tables().len(), rng); let table = &context.tables()[table_idx]; @@ -243,7 +247,7 @@ impl Arbitrary for QualifiedName { } impl Arbitrary for LikeOperator { - fn arbitrary(rng: &mut R, _t: &C) -> Self { + fn arbitrary(rng: &mut R, _t: &C) -> Self { let choice = rng.random_range(0..4); match choice { 0 => LikeOperator::Glob, @@ -257,7 +261,7 @@ impl Arbitrary for LikeOperator { // Current implementation does not take into account the columns affinity nor if table is Strict impl Arbitrary for ast::Literal { - fn arbitrary(rng: &mut R, _t: &C) -> Self { + fn arbitrary(rng: &mut R, _t: &C) -> Self { loop { let choice = rng.random_range(0..5); let lit = match choice { @@ -284,7 +288,7 @@ impl Arbitrary for ast::Literal { // Creates a litreal value impl ArbitraryFrom<&Vec<&SimValue>> for ast::Expr { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, _context: &C, values: &Vec<&SimValue>, @@ -299,7 +303,7 @@ impl ArbitraryFrom<&Vec<&SimValue>> for ast::Expr { } impl Arbitrary for UnaryOperator { - fn arbitrary(rng: &mut R, _t: &C) -> Self { + fn arbitrary(rng: &mut R, _t: &C) -> Self { let choice = rng.random_range(0..4); match choice { 0 => Self::BitwiseNot, diff --git a/sql_generation/generation/mod.rs b/sql_generation/generation/mod.rs index 25f353673..1292b3448 100644 --- a/sql_generation/generation/mod.rs +++ b/sql_generation/generation/mod.rs @@ -19,7 +19,7 @@ type Choice<'a, R, T> = (usize, Box Option + 'a>); /// the possible values of the type, with a bias towards smaller values for /// practicality. pub trait Arbitrary { - fn arbitrary(rng: &mut R, context: &C) -> Self; + fn arbitrary(rng: &mut R, context: &C) -> Self; } /// ArbitrarySized trait for generating random values of a specific size @@ -29,8 +29,11 @@ pub trait Arbitrary { /// must fit in the given size. This is useful for generating values that are /// constrained by a specific size, such as integers or strings. pub trait ArbitrarySized { - fn arbitrary_sized(rng: &mut R, context: &C, size: usize) - -> Self; + fn arbitrary_sized( + rng: &mut R, + context: &C, + size: usize, + ) -> Self; } /// ArbitraryFrom trait for generating random values from a given value @@ -39,7 +42,11 @@ pub trait ArbitrarySized { /// such as generating an integer within an interval, or a value that fits in a table, /// or a predicate satisfying a given table row. pub trait ArbitraryFrom { - fn arbitrary_from(rng: &mut R, context: &C, t: T) -> Self; + fn arbitrary_from( + rng: &mut R, + context: &C, + t: T, + ) -> Self; } /// ArbitrarySizedFrom trait for generating random values from a given value @@ -51,7 +58,7 @@ pub trait ArbitraryFrom { /// This is useful for generating values that are constrained by a specific size, /// such as integers or strings, while still being dependent on the given value. pub trait ArbitrarySizedFrom { - fn arbitrary_sized_from( + fn arbitrary_sized_from( rng: &mut R, context: &C, t: T, @@ -61,7 +68,7 @@ pub trait ArbitrarySizedFrom { /// ArbitraryFromMaybe trait for fallibally generating random values from a given value pub trait ArbitraryFromMaybe { - fn arbitrary_from_maybe( + fn arbitrary_from_maybe( rng: &mut R, context: &C, t: T, @@ -77,7 +84,11 @@ pub trait ArbitraryFromMaybe { /// the operations we require for the implementation. // todo: switch to a simpler type signature that can accommodate all integer and float types, which // should be enough for our purposes. -pub fn frequency( +pub fn frequency< + T, + R: Rng + ?Sized, + N: Sum + PartialOrd + Copy + Default + SampleUniform + SubAssign, +>( choices: Vec<(N, ArbitraryFromFunc)>, rng: &mut R, ) -> T { @@ -95,7 +106,7 @@ pub fn frequency(choices: Vec>, rng: &mut R) -> T { +pub fn one_of(choices: Vec>, rng: &mut R) -> T { let index = rng.random_range(0..choices.len()); choices[index](rng) } @@ -103,7 +114,7 @@ pub fn one_of(choices: Vec>, rng: &mut R) -> /// backtrack is a helper function for composing different "failable" generators. /// The function takes a list of functions that return an Option, along with number of retries /// to make before giving up. -pub fn backtrack(mut choices: Vec>, rng: &mut R) -> Option { +pub fn backtrack(mut choices: Vec>, rng: &mut R) -> Option { loop { // If there are no more choices left, we give up let choices_ = choices @@ -129,20 +140,20 @@ pub fn backtrack(mut choices: Vec>, rng: &mut R) -> Opti } /// pick is a helper function for uniformly picking a random element from a slice -pub fn pick<'a, T, R: Rng>(choices: &'a [T], rng: &mut R) -> &'a T { +pub fn pick<'a, T, R: Rng + ?Sized>(choices: &'a [T], rng: &mut R) -> &'a T { let index = rng.random_range(0..choices.len()); &choices[index] } /// pick_index is typically used for picking an index from a slice to later refer to the element /// at that index. -pub fn pick_index(choices: usize, rng: &mut R) -> usize { +pub fn pick_index(choices: usize, rng: &mut R) -> usize { rng.random_range(0..choices) } /// pick_n_unique is a helper function for uniformly picking N unique elements from a range. /// The elements themselves are usize, typically representing indices. -pub fn pick_n_unique( +pub fn pick_n_unique( range: std::ops::Range, n: usize, rng: &mut R, @@ -155,7 +166,7 @@ pub fn pick_n_unique( /// gen_random_text uses `anarchist_readable_name_generator_lib` to generate random /// readable names for tables, columns, text values etc. -pub fn gen_random_text(rng: &mut T) -> String { +pub fn gen_random_text(rng: &mut R) -> String { let big_text = rng.random_ratio(1, 1000); if big_text { // let max_size: u64 = 2 * 1024 * 1024 * 1024; @@ -172,10 +183,10 @@ pub fn gen_random_text(rng: &mut T) -> String { } } -pub fn pick_unique<'a, T: PartialEq>( +pub fn pick_unique<'a, T: PartialEq, R: Rng + ?Sized>( items: &'a [T], count: usize, - rng: &mut impl rand::Rng, + rng: &mut R, ) -> impl Iterator { let mut picked: Vec<&T> = Vec::new(); while picked.len() < count { diff --git a/sql_generation/generation/predicate/binary.rs b/sql_generation/generation/predicate/binary.rs index 9867a561a..37b2e4e93 100644 --- a/sql_generation/generation/predicate/binary.rs +++ b/sql_generation/generation/predicate/binary.rs @@ -17,7 +17,7 @@ use crate::{ impl Predicate { /// Generate an [ast::Expr::Binary] [Predicate] from a column and [SimValue] - pub fn from_column_binary( + pub fn from_column_binary( rng: &mut R, context: &C, column_name: &str, @@ -55,7 +55,7 @@ impl Predicate { } /// Produces a true [ast::Expr::Binary] [Predicate] that is true for the provided row in the given table - pub fn true_binary( + pub fn true_binary( rng: &mut R, context: &C, t: &Table, @@ -168,7 +168,7 @@ impl Predicate { } /// Produces an [ast::Expr::Binary] [Predicate] that is false for the provided row in the given table - pub fn false_binary( + pub fn false_binary( rng: &mut R, context: &C, t: &Table, @@ -253,7 +253,7 @@ impl Predicate { impl SimplePredicate { /// Generates a true [ast::Expr::Binary] [SimplePredicate] from a [TableContext] for a row in the table - pub fn true_binary( + pub fn true_binary( rng: &mut R, context: &C, table: &T, @@ -311,7 +311,7 @@ impl SimplePredicate { } /// Generates a false [ast::Expr::Binary] [SimplePredicate] from a [TableContext] for a row in the table - pub fn false_binary( + pub fn false_binary( rng: &mut R, context: &C, table: &T, @@ -373,7 +373,7 @@ impl CompoundPredicate { /// Decide if you want to create an AND or an OR /// /// Creates a Compound Predicate that is TRUE or FALSE for at least a single row - pub fn from_table_binary( + pub fn from_table_binary( rng: &mut R, context: &C, table: &T, diff --git a/sql_generation/generation/predicate/mod.rs b/sql_generation/generation/predicate/mod.rs index 78fa30ae4..d0dd375bb 100644 --- a/sql_generation/generation/predicate/mod.rs +++ b/sql_generation/generation/predicate/mod.rs @@ -21,7 +21,7 @@ struct CompoundPredicate(Predicate); struct SimplePredicate(Predicate); impl, T: TableContext> ArbitraryFrom<(&T, A, bool)> for SimplePredicate { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, context: &C, (table, row, predicate_value): (&T, A, bool), @@ -46,7 +46,7 @@ impl, T: TableContext> ArbitraryFrom<(&T, A, bool)> for Sim } impl ArbitraryFrom<(&T, bool)> for CompoundPredicate { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, context: &C, (table, predicate_value): (&T, bool), @@ -56,14 +56,18 @@ impl ArbitraryFrom<(&T, bool)> for CompoundPredicate { } impl ArbitraryFrom<&T> for Predicate { - fn arbitrary_from(rng: &mut R, context: &C, table: &T) -> Self { + fn arbitrary_from( + rng: &mut R, + context: &C, + table: &T, + ) -> Self { let predicate_value = rng.random_bool(0.5); Predicate::arbitrary_from(rng, context, (table, predicate_value)).parens() } } impl ArbitraryFrom<(&T, bool)> for Predicate { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, context: &C, (table, predicate_value): (&T, bool), @@ -73,7 +77,7 @@ impl ArbitraryFrom<(&T, bool)> for Predicate { } impl ArbitraryFrom<(&str, &SimValue)> for Predicate { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, context: &C, (column_name, value): (&str, &SimValue), @@ -83,7 +87,7 @@ impl ArbitraryFrom<(&str, &SimValue)> for Predicate { } impl ArbitraryFrom<(&Table, &Vec)> for Predicate { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, context: &C, (t, row): (&Table, &Vec), diff --git a/sql_generation/generation/predicate/unary.rs b/sql_generation/generation/predicate/unary.rs index 1cc0e0d24..31dfc2a7a 100644 --- a/sql_generation/generation/predicate/unary.rs +++ b/sql_generation/generation/predicate/unary.rs @@ -17,7 +17,7 @@ use crate::{ pub struct TrueValue(pub SimValue); impl ArbitraryFromMaybe<&SimValue> for TrueValue { - fn arbitrary_from_maybe( + fn arbitrary_from_maybe( _rng: &mut R, _context: &C, value: &SimValue, @@ -31,7 +31,7 @@ impl ArbitraryFromMaybe<&SimValue> for TrueValue { } impl ArbitraryFromMaybe<&Vec<&SimValue>> for TrueValue { - fn arbitrary_from_maybe( + fn arbitrary_from_maybe( rng: &mut R, context: &C, values: &Vec<&SimValue>, @@ -51,7 +51,7 @@ impl ArbitraryFromMaybe<&Vec<&SimValue>> for TrueValue { pub struct FalseValue(pub SimValue); impl ArbitraryFromMaybe<&SimValue> for FalseValue { - fn arbitrary_from_maybe( + fn arbitrary_from_maybe( _rng: &mut R, _context: &C, value: &SimValue, @@ -65,7 +65,7 @@ impl ArbitraryFromMaybe<&SimValue> for FalseValue { } impl ArbitraryFromMaybe<&Vec<&SimValue>> for FalseValue { - fn arbitrary_from_maybe( + fn arbitrary_from_maybe( rng: &mut R, context: &C, values: &Vec<&SimValue>, @@ -86,7 +86,7 @@ impl ArbitraryFromMaybe<&Vec<&SimValue>> for FalseValue { pub struct BitNotValue(pub SimValue); impl ArbitraryFromMaybe<(&SimValue, bool)> for BitNotValue { - fn arbitrary_from_maybe( + fn arbitrary_from_maybe( _rng: &mut R, _context: &C, (value, predicate): (&SimValue, bool), @@ -101,7 +101,7 @@ impl ArbitraryFromMaybe<(&SimValue, bool)> for BitNotValue { } impl ArbitraryFromMaybe<(&Vec<&SimValue>, bool)> for BitNotValue { - fn arbitrary_from_maybe( + fn arbitrary_from_maybe( rng: &mut R, context: &C, (values, predicate): (&Vec<&SimValue>, bool), @@ -121,7 +121,7 @@ impl ArbitraryFromMaybe<(&Vec<&SimValue>, bool)> for BitNotValue { // TODO: have some more complex generation with columns names here as well impl SimplePredicate { /// Generates a true [ast::Expr::Unary] [SimplePredicate] from a [TableContext] for some values in the table - pub fn true_unary( + pub fn true_unary( rng: &mut R, context: &C, _table: &T, @@ -187,7 +187,7 @@ impl SimplePredicate { } /// Generates a false [ast::Expr::Unary] [SimplePredicate] from a [TableContext] for a row in the table - pub fn false_unary( + pub fn false_unary( rng: &mut R, context: &C, _table: &T, diff --git a/sql_generation/generation/query.rs b/sql_generation/generation/query.rs index a0e0e47b0..f2264720e 100644 --- a/sql_generation/generation/query.rs +++ b/sql_generation/generation/query.rs @@ -18,7 +18,7 @@ use turso_parser::ast::{Expr, SortOrder}; use super::{backtrack, pick}; impl Arbitrary for Create { - fn arbitrary(rng: &mut R, context: &C) -> Self { + fn arbitrary(rng: &mut R, context: &C) -> Self { Create { table: Table::arbitrary(rng, context), } @@ -26,7 +26,7 @@ impl Arbitrary for Create { } impl Arbitrary for FromClause { - fn arbitrary(rng: &mut R, context: &C) -> Self { + fn arbitrary(rng: &mut R, context: &C) -> Self { let opts = &context.opts().query.from_clause; let weights = opts.as_weighted_index(); let num_joins = opts.joins[rng.sample(weights)].num_joins; @@ -85,7 +85,7 @@ impl Arbitrary for FromClause { } impl Arbitrary for SelectInner { - fn arbitrary(rng: &mut R, env: &C) -> Self { + fn arbitrary(rng: &mut R, env: &C) -> Self { let from = FromClause::arbitrary(rng, env); let tables = env.tables().clone(); let join_table = from.into_join_table(&tables); @@ -144,7 +144,7 @@ impl Arbitrary for SelectInner { } impl ArbitrarySized for SelectInner { - fn arbitrary_sized( + fn arbitrary_sized( rng: &mut R, env: &C, num_result_columns: usize, @@ -179,7 +179,7 @@ impl ArbitrarySized for SelectInner { } impl Arbitrary for Distinctness { - fn arbitrary(rng: &mut R, _context: &C) -> Self { + fn arbitrary(rng: &mut R, _context: &C) -> Self { match rng.random_range(0..=5) { 0..4 => Distinctness::All, _ => Distinctness::Distinct, @@ -188,7 +188,7 @@ impl Arbitrary for Distinctness { } impl Arbitrary for CompoundOperator { - fn arbitrary(rng: &mut R, _context: &C) -> Self { + fn arbitrary(rng: &mut R, _context: &C) -> Self { match rng.random_range(0..=1) { 0 => CompoundOperator::Union, 1 => CompoundOperator::UnionAll, @@ -203,7 +203,7 @@ impl Arbitrary for CompoundOperator { pub struct SelectFree(pub Select); impl Arbitrary for SelectFree { - fn arbitrary(rng: &mut R, env: &C) -> Self { + fn arbitrary(rng: &mut R, env: &C) -> Self { let expr = Predicate(Expr::arbitrary_sized(rng, env, 8)); let select = Select::expr(expr); Self(select) @@ -211,7 +211,7 @@ impl Arbitrary for SelectFree { } impl Arbitrary for Select { - fn arbitrary(rng: &mut R, env: &C) -> Self { + fn arbitrary(rng: &mut R, env: &C) -> Self { // Generate a number of selects based on the query size // If experimental indexes are enabled, we can have selects with compounds // Otherwise, we just have a single select with no compounds @@ -259,7 +259,7 @@ impl Arbitrary for Select { } impl Arbitrary for Insert { - fn arbitrary(rng: &mut R, env: &C) -> Self { + fn arbitrary(rng: &mut R, env: &C) -> Self { let opts = &env.opts().query.insert; let gen_values = |rng: &mut R| { let table = pick(env.tables(), rng); @@ -300,7 +300,7 @@ impl Arbitrary for Insert { } impl Arbitrary for Delete { - fn arbitrary(rng: &mut R, env: &C) -> Self { + fn arbitrary(rng: &mut R, env: &C) -> Self { let table = pick(env.tables(), rng); Self { table: table.name.clone(), @@ -310,7 +310,7 @@ impl Arbitrary for Delete { } impl Arbitrary for Drop { - fn arbitrary(rng: &mut R, env: &C) -> Self { + fn arbitrary(rng: &mut R, env: &C) -> Self { let table = pick(env.tables(), rng); Self { table: table.name.clone(), @@ -319,7 +319,7 @@ impl Arbitrary for Drop { } impl Arbitrary for CreateIndex { - fn arbitrary(rng: &mut R, env: &C) -> Self { + fn arbitrary(rng: &mut R, env: &C) -> Self { assert!( !env.tables().is_empty(), "Cannot create an index when no tables exist in the environment." @@ -366,7 +366,7 @@ impl Arbitrary for CreateIndex { } impl Arbitrary for Update { - fn arbitrary(rng: &mut R, env: &C) -> Self { + fn arbitrary(rng: &mut R, env: &C) -> Self { let table = pick(env.tables(), rng); let num_cols = rng.random_range(1..=table.columns.len()); let columns = pick_unique(&table.columns, num_cols, rng); diff --git a/sql_generation/generation/table.rs b/sql_generation/generation/table.rs index ce0ff97f4..9f038a379 100644 --- a/sql_generation/generation/table.rs +++ b/sql_generation/generation/table.rs @@ -10,14 +10,14 @@ use crate::model::table::{Column, ColumnType, Name, SimValue, Table}; use super::ArbitraryFromMaybe; impl Arbitrary for Name { - fn arbitrary(rng: &mut R, _c: &C) -> Self { + fn arbitrary(rng: &mut R, _c: &C) -> Self { let name = readable_name_custom("_", rng); Name(name.replace("-", "_")) } } impl Arbitrary for Table { - fn arbitrary(rng: &mut R, context: &C) -> Self { + fn arbitrary(rng: &mut R, context: &C) -> Self { let opts = context.opts().table.clone(); let name = Name::arbitrary(rng, context).0; let large_table = @@ -45,7 +45,7 @@ impl Arbitrary for Table { } impl Arbitrary for Column { - fn arbitrary(rng: &mut R, context: &C) -> Self { + fn arbitrary(rng: &mut R, context: &C) -> Self { let name = Name::arbitrary(rng, context).0; let column_type = ColumnType::arbitrary(rng, context); Self { @@ -58,13 +58,13 @@ impl Arbitrary for Column { } impl Arbitrary for ColumnType { - fn arbitrary(rng: &mut R, _context: &C) -> Self { + fn arbitrary(rng: &mut R, _context: &C) -> Self { pick(&[Self::Integer, Self::Float, Self::Text, Self::Blob], rng).to_owned() } } impl ArbitraryFrom<&Table> for Vec { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, context: &C, table: &Table, @@ -79,7 +79,7 @@ impl ArbitraryFrom<&Table> for Vec { } impl ArbitraryFrom<&Vec<&SimValue>> for SimValue { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, _context: &C, values: &Vec<&Self>, @@ -93,7 +93,7 @@ impl ArbitraryFrom<&Vec<&SimValue>> for SimValue { } impl ArbitraryFrom<&ColumnType> for SimValue { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, _context: &C, column_type: &ColumnType, @@ -111,7 +111,7 @@ impl ArbitraryFrom<&ColumnType> for SimValue { pub struct LTValue(pub SimValue); impl ArbitraryFrom<&Vec<&SimValue>> for LTValue { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, context: &C, values: &Vec<&SimValue>, @@ -127,7 +127,7 @@ impl ArbitraryFrom<&Vec<&SimValue>> for LTValue { } impl ArbitraryFrom<&SimValue> for LTValue { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, _context: &C, value: &SimValue, @@ -181,7 +181,7 @@ impl ArbitraryFrom<&SimValue> for LTValue { pub struct GTValue(pub SimValue); impl ArbitraryFrom<&Vec<&SimValue>> for GTValue { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, context: &C, values: &Vec<&SimValue>, @@ -197,7 +197,7 @@ impl ArbitraryFrom<&Vec<&SimValue>> for GTValue { } impl ArbitraryFrom<&SimValue> for GTValue { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, _context: &C, value: &SimValue, @@ -251,7 +251,7 @@ impl ArbitraryFrom<&SimValue> for GTValue { pub struct LikeValue(pub SimValue); impl ArbitraryFromMaybe<&SimValue> for LikeValue { - fn arbitrary_from_maybe( + fn arbitrary_from_maybe( rng: &mut R, _context: &C, value: &SimValue, From b1c26505b8c5f058bc055212db4b822d57b37fec Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 5 Oct 2025 17:04:20 -0300 Subject: [PATCH 028/428] adjust Rng generic to include ?Sized + introduce WeightedDistribution trait --- simulator/generation/mod.rs | 15 ++++++ simulator/generation/plan.rs | 10 ++-- simulator/generation/property.rs | 38 ++++++-------- simulator/generation/query.rs | 85 +++++++++++++++++++++++--------- 4 files changed, 100 insertions(+), 48 deletions(-) diff --git a/simulator/generation/mod.rs b/simulator/generation/mod.rs index 80a2d0cff..6206c9a62 100644 --- a/simulator/generation/mod.rs +++ b/simulator/generation/mod.rs @@ -1,3 +1,6 @@ +use rand::distr::weighted::WeightedIndex; +use sql_generation::generation::GenerationContext; + use crate::runner::env::ShadowTablesMut; pub mod plan; @@ -17,3 +20,15 @@ pub(crate) trait Shadow { type Result; fn shadow(&self, tables: &mut ShadowTablesMut<'_>) -> Self::Result; } + +pub(super) trait WeightedDistribution { + type Item; + type GenItem; + fn items(&self) -> &[Self::Item]; + fn weights(&self) -> &WeightedIndex; + fn sample( + &self, + rng: &mut R, + context: &C, + ) -> Self::GenItem; +} diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index a4bf7777e..27ba49c66 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -1064,7 +1064,11 @@ fn reopen_database(env: &mut SimulatorEnv) { }; } -fn random_fault(rng: &mut R, env: &SimulatorEnv, conn_index: usize) -> Interactions { +fn random_fault( + rng: &mut R, + env: &SimulatorEnv, + conn_index: usize, +) -> Interactions { let faults = if env.opts.disable_reopen_database { vec![Fault::Disconnect] } else { @@ -1075,7 +1079,7 @@ fn random_fault(rng: &mut R, env: &SimulatorEnv, conn_index: usize } impl ArbitraryFrom<(&SimulatorEnv, InteractionStats, usize)> for Interactions { - fn arbitrary_from( + fn arbitrary_from( rng: &mut R, conn_ctx: &C, (env, stats, conn_index): (&SimulatorEnv, InteractionStats, usize), @@ -1110,7 +1114,7 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats, usize)> for Interactions { InteractionsType::Property(Property::arbitrary_from( rng, conn_ctx, - (env, &stats), + (env, &remaining_), )), ) }), diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 8fe3f486f..aeb6e5ea1 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1208,7 +1208,7 @@ pub(crate) fn remaining( } } -fn property_insert_values_select( +fn property_insert_values_select( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, @@ -1309,7 +1309,7 @@ fn property_insert_values_select( } } -fn property_read_your_updates_back( +fn property_read_your_updates_back( rng: &mut R, _remaining: &Remaining, ctx: &impl GenerationContext, @@ -1333,7 +1333,7 @@ fn property_read_your_updates_back( Property::ReadYourUpdatesBack { update, select } } -fn property_table_has_expected_content( +fn property_table_has_expected_content( rng: &mut R, _remaining: &Remaining, ctx: &impl GenerationContext, @@ -1346,7 +1346,7 @@ fn property_table_has_expected_content( } } -fn property_select_limit( +fn property_select_limit( rng: &mut R, _remaining: &Remaining, ctx: &impl GenerationContext, @@ -1365,7 +1365,7 @@ fn property_select_limit( Property::SelectLimit { select } } -fn property_double_create_failure( +fn property_double_create_failure( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, @@ -1398,7 +1398,7 @@ fn property_double_create_failure( } } -fn property_delete_select( +fn property_delete_select( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, @@ -1457,7 +1457,7 @@ fn property_delete_select( } } -fn property_drop_select( +fn property_drop_select( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, @@ -1493,7 +1493,7 @@ fn property_drop_select( } } -fn property_select_select_optimizer( +fn property_select_select_optimizer( rng: &mut R, _remaining: &Remaining, ctx: &impl GenerationContext, @@ -1516,7 +1516,7 @@ fn property_select_select_optimizer( } } -fn property_where_true_false_null( +fn property_where_true_false_null( rng: &mut R, _remaining: &Remaining, ctx: &impl GenerationContext, @@ -1537,7 +1537,7 @@ fn property_where_true_false_null( } } -fn property_union_all_preserves_cardinality( +fn property_union_all_preserves_cardinality( rng: &mut R, _remaining: &Remaining, ctx: &impl GenerationContext, @@ -1564,7 +1564,7 @@ fn property_union_all_preserves_cardinality( } } -fn property_fsync_no_wait( +fn property_fsync_no_wait( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, @@ -1576,7 +1576,7 @@ fn property_fsync_no_wait( } } -fn property_faulty_query( +fn property_faulty_query( rng: &mut R, remaining: &Remaining, ctx: &impl GenerationContext, @@ -1593,7 +1593,7 @@ type PropertyGenFunc = fn(&mut R, &Remaining, &G, bool) -> Property; impl PropertyDiscriminants { pub fn gen_function(&self) -> PropertyGenFunc where - R: rand::Rng, + R: rand::Rng + ?Sized, G: GenerationContext, { match self { @@ -1743,19 +1743,13 @@ pub fn possiple_properties(tables: &[Table]) -> Vec { PropertyDiscriminants::can_generate(queries) } -impl ArbitraryFrom<(&SimulatorEnv, &InteractionStats)> for Property { - fn arbitrary_from( +impl ArbitraryFrom<(&SimulatorEnv, &Remaining)> for Property { + fn arbitrary_from( rng: &mut R, conn_ctx: &C, - (env, stats): (&SimulatorEnv, &InteractionStats), + (env, remaining_): (&SimulatorEnv, &Remaining), ) -> Self { let opts = conn_ctx.opts(); - let remaining_ = remaining( - env.opts.max_interactions, - &env.profile.query, - stats, - env.profile.experimental_mvcc, - ); let properties = possiple_properties(conn_ctx.tables()); let weights = WeightedIndex::new( diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index 5bb3a62ef..88cd95126 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -1,4 +1,7 @@ -use crate::model::{Query, QueryDiscriminants}; +use crate::{ + generation::WeightedDistribution, + model::{Query, QueryDiscriminants}, +}; use rand::{ Rng, distr::{Distribution, weighted::WeightedIndex}, @@ -13,7 +16,7 @@ use sql_generation::{ use super::property::Remaining; -fn random_create(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { +fn random_create(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { let mut create = Create::arbitrary(rng, conn_ctx); while conn_ctx .tables() @@ -25,7 +28,7 @@ fn random_create(rng: &mut R, conn_ctx: &impl GenerationContext) - Query::Create(create) } -fn random_select(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { +fn random_select(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { if rng.random_bool(0.7) { Query::Select(Select::arbitrary(rng, conn_ctx)) } else { @@ -34,27 +37,30 @@ fn random_select(rng: &mut R, conn_ctx: &impl GenerationContext) - } } -fn random_insert(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { +fn random_insert(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { assert!(!conn_ctx.tables().is_empty()); Query::Insert(Insert::arbitrary(rng, conn_ctx)) } -fn random_delete(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { +fn random_delete(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { assert!(!conn_ctx.tables().is_empty()); Query::Delete(Delete::arbitrary(rng, conn_ctx)) } -fn random_update(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { +fn random_update(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { assert!(!conn_ctx.tables().is_empty()); Query::Update(Update::arbitrary(rng, conn_ctx)) } -fn random_drop(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { +fn random_drop(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { assert!(!conn_ctx.tables().is_empty()); Query::Drop(sql_generation::model::query::Drop::arbitrary(rng, conn_ctx)) } -fn random_create_index(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { +fn random_create_index( + rng: &mut R, + conn_ctx: &impl GenerationContext, +) -> Query { assert!(!conn_ctx.tables().is_empty()); let mut create_index = CreateIndex::arbitrary(rng, conn_ctx); @@ -89,7 +95,7 @@ type QueryGenFunc = fn(&mut R, &G) -> Query; impl QueryDiscriminants { pub fn gen_function(&self) -> QueryGenFunc where - R: rand::Rng, + R: rand::Rng + ?Sized, G: GenerationContext, { match self { @@ -126,20 +132,53 @@ impl QueryDiscriminants { } } -impl ArbitraryFrom<&Remaining> for Query { - fn arbitrary_from( - rng: &mut R, - context: &C, - remaining: &Remaining, - ) -> Self { - let queries = possible_queries(context.tables()); - let weights = +pub(super) struct QueryDistribution { + queries: &'static [QueryDiscriminants], + weights: WeightedIndex, +} + +impl QueryDistribution { + pub fn new(queries: &'static [QueryDiscriminants], remaining: &Remaining) -> Self { + let query_weights = WeightedIndex::new(queries.iter().map(|query| query.weight(remaining))).unwrap(); - - let idx = weights.sample(rng); - let query_fn = queries[idx].gen_function(); - let query = (query_fn)(rng, context); - - query + Self { + queries, + weights: query_weights, + } + } +} + +impl WeightedDistribution for QueryDistribution { + type Item = QueryDiscriminants; + type GenItem = Query; + + fn items(&self) -> &[Self::Item] { + self.queries + } + + fn weights(&self) -> &WeightedIndex { + &self.weights + } + + fn sample( + &self, + rng: &mut R, + ctx: &C, + ) -> Self::GenItem { + let weights = &self.weights; + + let idx = weights.sample(rng); + let query_fn = self.queries[idx].gen_function(); + (query_fn)(rng, ctx) + } +} + +impl ArbitraryFrom<&QueryDistribution> for Query { + fn arbitrary_from( + rng: &mut R, + context: &C, + query_distr: &QueryDistribution, + ) -> Self { + query_distr.sample(rng, context) } } From 91da12390d4c4d09c7cd5f2d66cb27058948b8ab Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 5 Oct 2025 19:04:34 -0300 Subject: [PATCH 029/428] refactor property generation to use query distribution and avoid more memory allocations --- simulator/generation/plan.rs | 28 ++++---- simulator/generation/property.rs | 114 ++++++++++++++++++++++--------- 2 files changed, 92 insertions(+), 50 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 27ba49c66..05373312f 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -8,7 +8,6 @@ use std::{ }; use indexmap::IndexSet; -use rand::distr::weighted::WeightedIndex; use serde::{Deserialize, Serialize}; use sql_generation::{ @@ -26,7 +25,11 @@ use turso_core::{Connection, Result, StepResult}; use crate::{ SimulatorEnv, - generation::{Shadow, property::possiple_properties, query::possible_queries}, + generation::{ + Shadow, WeightedDistribution, + property::PropertyDistribution, + query::{QueryDistribution, possible_queries}, + }, model::Query, runner::env::{ShadowTablesMut, SimConnection, SimulationType}, }; @@ -1091,43 +1094,36 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats, usize)> for Interactions { env.profile.experimental_mvcc, ); - // TODO: find a way to be more efficient and pass the weights and properties down to the ArbitraryFrom functions let queries = possible_queries(conn_ctx.tables()); - let query_weights = - WeightedIndex::new(queries.iter().map(|query| query.weight(&remaining_))).unwrap(); + let query_distr = QueryDistribution::new(queries, &remaining_); - let properties = possiple_properties(conn_ctx.tables()); - let property_weights = WeightedIndex::new( - properties - .iter() - .map(|property| property.weight(env, &remaining_, conn_ctx.opts())), - ) - .unwrap(); + let property_distr = + PropertyDistribution::new(env, &remaining_, &query_distr, conn_ctx.opts()); frequency( vec![ ( - property_weights.total_weight(), + property_distr.weights().total_weight(), Box::new(|rng: &mut R| { Interactions::new( conn_index, InteractionsType::Property(Property::arbitrary_from( rng, conn_ctx, - (env, &remaining_), + &property_distr, )), ) }), ), ( - query_weights.total_weight(), + query_distr.weights().total_weight(), Box::new(|rng: &mut R| { Interactions::new( conn_index, InteractionsType::Query(Query::arbitrary_from( rng, conn_ctx, - &remaining_, + &query_distr, )), ) }), diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index aeb6e5ea1..5e84e23cc 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -19,7 +19,11 @@ use turso_parser::ast::{self, Distinctness}; use crate::{ common::print_diff, - generation::{Shadow as _, plan::InteractionType, query::possible_queries}, + generation::{ + Shadow as _, WeightedDistribution, + plan::InteractionType, + query::{QueryDistribution, possible_queries}, + }, model::{Query, QueryCapabilities, QueryDiscriminants}, profiles::query::QueryProfile, runner::env::SimulatorEnv, @@ -1210,7 +1214,7 @@ pub(crate) fn remaining( fn property_insert_values_select( rng: &mut R, - remaining: &Remaining, + query_distr: &QueryDistribution, ctx: &impl GenerationContext, mvcc: bool, ) -> Property { @@ -1254,7 +1258,7 @@ fn property_insert_values_select( })); } for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, ctx, remaining); + let query = Query::arbitrary_from(rng, ctx, query_distr); match &query { Query::Delete(Delete { table: t, @@ -1311,7 +1315,7 @@ fn property_insert_values_select( fn property_read_your_updates_back( rng: &mut R, - _remaining: &Remaining, + _query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1335,7 +1339,7 @@ fn property_read_your_updates_back( fn property_table_has_expected_content( rng: &mut R, - _remaining: &Remaining, + _query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1348,7 +1352,7 @@ fn property_table_has_expected_content( fn property_select_limit( rng: &mut R, - _remaining: &Remaining, + _query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1367,7 +1371,7 @@ fn property_select_limit( fn property_double_create_failure( rng: &mut R, - remaining: &Remaining, + query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1381,7 +1385,7 @@ fn property_double_create_failure( // - [x] There will be no errors in the middle interactions.(best effort) // - [ ] Table `t` will not be renamed or dropped.(todo: add this constraint once ALTER or DROP is implemented) for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, ctx, remaining); + let query = Query::arbitrary_from(rng, ctx, query_distr); if let Query::Create(Create { table: t }) = &query { // There will be no errors in the middle interactions. // - Creating the same table is an error @@ -1400,7 +1404,7 @@ fn property_double_create_failure( fn property_delete_select( rng: &mut R, - remaining: &Remaining, + query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1415,7 +1419,7 @@ fn property_delete_select( // - [x] A row that holds for the predicate will not be inserted. // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, ctx, remaining); + let query = Query::arbitrary_from(rng, ctx, query_distr); match &query { Query::Insert(Insert::Values { table: t, values }) => { // A row that holds for the predicate will not be inserted. @@ -1459,7 +1463,7 @@ fn property_delete_select( fn property_drop_select( rng: &mut R, - remaining: &Remaining, + query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1471,7 +1475,7 @@ fn property_drop_select( // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) // - [-] The table `t` will not be created, no table will be renamed to `t`. (todo: update this constraint once ALTER is implemented) for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, ctx, remaining); + let query = Query::arbitrary_from(rng, ctx, query_distr); if let Query::Create(Create { table: t }) = &query { // - The table `t` will not be created if t.name == table.name { @@ -1495,7 +1499,7 @@ fn property_drop_select( fn property_select_select_optimizer( rng: &mut R, - _remaining: &Remaining, + _query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1518,7 +1522,7 @@ fn property_select_select_optimizer( fn property_where_true_false_null( rng: &mut R, - _remaining: &Remaining, + _query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1539,7 +1543,7 @@ fn property_where_true_false_null( fn property_union_all_preserves_cardinality( rng: &mut R, - _remaining: &Remaining, + _query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1566,32 +1570,32 @@ fn property_union_all_preserves_cardinality( fn property_fsync_no_wait( rng: &mut R, - remaining: &Remaining, + query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { Property::FsyncNoWait { - query: Query::arbitrary_from(rng, ctx, remaining), + query: Query::arbitrary_from(rng, ctx, query_distr), tables: ctx.tables().iter().map(|t| t.name.clone()).collect(), } } fn property_faulty_query( rng: &mut R, - remaining: &Remaining, + query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { Property::FaultyQuery { - query: Query::arbitrary_from(rng, ctx, remaining), + query: Query::arbitrary_from(rng, ctx, query_distr), tables: ctx.tables().iter().map(|t| t.name.clone()).collect(), } } -type PropertyGenFunc = fn(&mut R, &Remaining, &G, bool) -> Property; +type PropertyGenFunc = fn(&mut R, &QueryDistribution, &G, bool) -> Property; impl PropertyDiscriminants { - pub fn gen_function(&self) -> PropertyGenFunc + pub(super) fn gen_function(&self) -> PropertyGenFunc where R: rand::Rng + ?Sized, G: GenerationContext, @@ -1743,27 +1747,69 @@ pub fn possiple_properties(tables: &[Table]) -> Vec { PropertyDiscriminants::can_generate(queries) } -impl ArbitraryFrom<(&SimulatorEnv, &Remaining)> for Property { - fn arbitrary_from( - rng: &mut R, - conn_ctx: &C, - (env, remaining_): (&SimulatorEnv, &Remaining), - ) -> Self { - let opts = conn_ctx.opts(); +pub(super) struct PropertyDistribution<'a> { + properties: Vec, + weights: WeightedIndex, + query_distr: &'a QueryDistribution, + mvcc: bool, +} - let properties = possiple_properties(conn_ctx.tables()); +impl<'a> PropertyDistribution<'a> { + pub fn new( + env: &SimulatorEnv, + remaining: &Remaining, + query_distr: &'a QueryDistribution, + opts: &Opts, + ) -> Self { + let properties = PropertyDiscriminants::can_generate(query_distr.items()); let weights = WeightedIndex::new( properties .iter() - .map(|property| property.weight(env, &remaining_, opts)), + .map(|property| property.weight(env, remaining, opts)), ) .unwrap(); - let idx = weights.sample(rng); - let property_fn = properties[idx].gen_function(); - let property = (property_fn)(rng, &remaining_, conn_ctx, env.profile.experimental_mvcc); + Self { + properties, + weights, + query_distr, + mvcc: env.profile.experimental_mvcc, + } + } +} - property +impl<'a> WeightedDistribution for PropertyDistribution<'a> { + type Item = PropertyDiscriminants; + + type GenItem = Property; + + fn items(&self) -> &[Self::Item] { + &self.properties + } + + fn weights(&self) -> &WeightedIndex { + &self.weights + } + + fn sample( + &self, + rng: &mut R, + conn_ctx: &C, + ) -> Self::GenItem { + let properties = &self.properties; + let idx = self.weights.sample(rng); + let property_fn = properties[idx].gen_function(); + (property_fn)(rng, self.query_distr, conn_ctx, self.mvcc) + } +} + +impl<'a> ArbitraryFrom<&PropertyDistribution<'a>> for Property { + fn arbitrary_from( + rng: &mut R, + conn_ctx: &C, + property_distr: &PropertyDistribution<'a>, + ) -> Self { + property_distr.sample(rng, conn_ctx) } } From 8b6456f8430e11c5f147283dfe0ea13658dadb7b Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 5 Oct 2025 19:29:39 -0300 Subject: [PATCH 030/428] do not allow `Property::Queries` to attempt to be generated --- simulator/generation/property.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 5e84e23cc..b00a1114d 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1711,7 +1711,10 @@ impl PropertyDiscriminants { let queries_capabilities = QueryCapabilities::from_list_queries(queries); PropertyDiscriminants::iter() - .filter(|property| queries_capabilities.contains(property.requirements())) + .filter(|property| { + !matches!(property, PropertyDiscriminants::Queries) + && queries_capabilities.contains(property.requirements()) + }) .collect() } From d5c49c17c7afe0c8dac5983fca4c0be0ae150fe3 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 7 Oct 2025 09:56:32 +0300 Subject: [PATCH 031/428] perf/throughput: Delete database before benchmark run Let's reduce variance between benchmark runs by starting with an empty database. --- perf/throughput/rusqlite/scripts/bench.sh | 1 + perf/throughput/turso/scripts/bench.sh | 1 + 2 files changed, 2 insertions(+) diff --git a/perf/throughput/rusqlite/scripts/bench.sh b/perf/throughput/rusqlite/scripts/bench.sh index a4efcc6e5..7d9f13207 100755 --- a/perf/throughput/rusqlite/scripts/bench.sh +++ b/perf/throughput/rusqlite/scripts/bench.sh @@ -6,6 +6,7 @@ echo "system,threads,batch_size,compute,throughput" for threads in 1 2 4 8; do for compute in 0 100 500 1000; do + rm -f write_throughput_test.db* ../../../target/release/write-throughput-sqlite --threads ${threads} --batch-size 100 --compute ${compute} -i 1000 done done diff --git a/perf/throughput/turso/scripts/bench.sh b/perf/throughput/turso/scripts/bench.sh index 6d6f75d4b..27ad51e0b 100755 --- a/perf/throughput/turso/scripts/bench.sh +++ b/perf/throughput/turso/scripts/bench.sh @@ -6,6 +6,7 @@ echo "system,threads,batch_size,compute,throughput" for threads in 1 2 4 8; do for compute in 0 100 500 1000; do + rm -f write_throughput_test.db* ../../../target/release/write-throughput --threads ${threads} --batch-size 100 --compute ${compute} -i 1000 --mode concurrent done done From bd1013d62f72bc93170a50619e8288ed7f6ac32e Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 7 Oct 2025 12:28:55 +0400 Subject: [PATCH 032/428] emit proper column information for explain prepared statements --- .../javascript/packages/wasm/promise.test.ts | 92 +++++++++++++++++++ core/lib.rs | 31 +++++++ core/vdbe/explain.rs | 4 + 3 files changed, 127 insertions(+) diff --git a/bindings/javascript/packages/wasm/promise.test.ts b/bindings/javascript/packages/wasm/promise.test.ts index 77176d9f9..d80dee8b7 100644 --- a/bindings/javascript/packages/wasm/promise.test.ts +++ b/bindings/javascript/packages/wasm/promise.test.ts @@ -1,6 +1,97 @@ import { expect, test } from 'vitest' import { connect, Database } from './promise-default.js' +test('explain', async () => { + const db = await connect(":memory:"); + const stmt = db.prepare("EXPLAIN SELECT 1"); + expect(stmt.columns()).toEqual([ + { + "name": "addr", + "type": "INTEGER", + }, + { + "name": "opcode", + "type": "TEXT", + }, + { + "name": "p1", + "type": "INTEGER", + }, + { + "name": "p2", + "type": "INTEGER", + }, + { + "name": "p3", + "type": "INTEGER", + }, + { + "name": "p4", + "type": "INTEGER", + }, + { + "name": "p5", + "type": "INTEGER", + }, + { + "name": "comment", + "type": "TEXT", + }, + ].map(x => ({ ...x, column: null, database: null, table: null }))); + expect(await stmt.all()).toEqual([ + { + "addr": 0, + "comment": "Start at 3", + "opcode": "Init", + "p1": 0, + "p2": 3, + "p3": 0, + "p4": "", + "p5": 0, + }, + { + "addr": 1, + "comment": "output=r[1]", + "opcode": "ResultRow", + "p1": 1, + "p2": 1, + "p3": 0, + "p4": "", + "p5": 0, + }, + { + "addr": 2, + "comment": "", + "opcode": "Halt", + "p1": 0, + "p2": 0, + "p3": 0, + "p4": "", + "p5": 0, + }, + { + "addr": 3, + "comment": "r[1]=1", + "opcode": "Integer", + "p1": 1, + "p2": 1, + "p3": 0, + "p4": "", + "p5": 0, + }, + { + "addr": 4, + "comment": "", + "opcode": "Goto", + "p1": 0, + "p2": 1, + "p3": 0, + "p4": "", + "p5": 0, + }, + ]); +}) + test('in-memory db', async () => { const db = await connect(":memory:"); await db.exec("CREATE TABLE t(x)"); @@ -10,6 +101,7 @@ test('in-memory db', async () => { expect(rows).toEqual([{ x: 1 }, { x: 3 }]); }) + test('implicit connect', async () => { const db = new Database(':memory:'); const defer = db.prepare("SELECT * FROM t"); diff --git a/core/lib.rs b/core/lib.rs index 211876d89..cf51ae283 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -46,6 +46,7 @@ use crate::translate::pragma::TURSO_CDC_DEFAULT_TABLE_NAME; use crate::types::{WalFrameInfo, WalState}; #[cfg(feature = "fs")] use crate::util::{OpenMode, OpenOptions}; +use crate::vdbe::explain::{EXPLAIN_COLUMNS_TYPE, EXPLAIN_QUERY_PLAN_COLUMNS_TYPE}; use crate::vdbe::metrics::ConnectionMetrics; use crate::vtab::VirtualTable; use crate::{incremental::view::AllViewsTxState, translate::emitter::TransactionMode}; @@ -2667,6 +2668,17 @@ impl Statement { } pub fn get_column_name(&self, idx: usize) -> Cow<'_, str> { + if self.query_mode == QueryMode::Explain { + return Cow::Owned(EXPLAIN_COLUMNS.get(idx).expect("No column").to_string()); + } + if self.query_mode == QueryMode::ExplainQueryPlan { + return Cow::Owned( + EXPLAIN_QUERY_PLAN_COLUMNS + .get(idx) + .expect("No column") + .to_string(), + ); + } match self.query_mode { QueryMode::Normal => { let column = &self.program.result_columns.get(idx).expect("No column"); @@ -2685,6 +2697,9 @@ impl Statement { } pub fn get_column_table_name(&self, idx: usize) -> Option> { + if self.query_mode == QueryMode::Explain || self.query_mode == QueryMode::ExplainQueryPlan { + return None; + } let column = &self.program.result_columns.get(idx).expect("No column"); match &column.expr { turso_parser::ast::Expr::Column { table, .. } => self @@ -2697,6 +2712,22 @@ impl Statement { } pub fn get_column_type(&self, idx: usize) -> Option { + if self.query_mode == QueryMode::Explain { + return Some( + EXPLAIN_COLUMNS_TYPE + .get(idx) + .expect("No column") + .to_string(), + ); + } + if self.query_mode == QueryMode::ExplainQueryPlan { + return Some( + EXPLAIN_QUERY_PLAN_COLUMNS_TYPE + .get(idx) + .expect("No column") + .to_string(), + ); + } let column = &self.program.result_columns.get(idx).expect("No column"); match &column.expr { turso_parser::ast::Expr::Column { diff --git a/core/vdbe/explain.rs b/core/vdbe/explain.rs index f480a8a4c..5e8dde2fe 100644 --- a/core/vdbe/explain.rs +++ b/core/vdbe/explain.rs @@ -6,7 +6,11 @@ use super::{Insn, InsnReference, Program, Value}; use crate::function::{Func, ScalarFunc}; pub const EXPLAIN_COLUMNS: [&str; 8] = ["addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment"]; +pub const EXPLAIN_COLUMNS_TYPE: [&str; 8] = [ + "INTEGER", "TEXT", "INTEGER", "INTEGER", "INTEGER", "INTEGER", "INTEGER", "TEXT", +]; pub const EXPLAIN_QUERY_PLAN_COLUMNS: [&str; 4] = ["id", "parent", "notused", "detail"]; +pub const EXPLAIN_QUERY_PLAN_COLUMNS_TYPE: [&str; 4] = ["INTEGER", "INTEGER", "INTEGER", "TEXT"]; pub fn insn_to_row( program: &Program, From 603b7121de55e4f828a1fa905afa8863b77dacc2 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 7 Oct 2025 11:34:25 +0300 Subject: [PATCH 033/428] integrity check: check index root pages too we had code in the integrity check state machine for indexes, but index root pages were never added to the list to check. --- core/translate/integrity_check.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/translate/integrity_check.rs b/core/translate/integrity_check.rs index fc750fef8..a2bafe3f0 100644 --- a/core/translate/integrity_check.rs +++ b/core/translate/integrity_check.rs @@ -16,6 +16,11 @@ pub fn translate_integrity_check( for table in schema.tables.values() { if let crate::schema::Table::BTree(table) = table.as_ref() { root_pages.push(table.root_page); + if let Some(indexes) = schema.indexes.get(table.name.as_str()) { + for index in indexes.iter() { + root_pages.push(index.root_page); + } + } }; } let message_register = program.alloc_register(); From 5941c03a4ffa7443c33333481866a63540638d99 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 7 Oct 2025 11:35:38 +0300 Subject: [PATCH 034/428] integrity check: check for dangling (unused) pages --- core/storage/btree.rs | 8 ++++++-- core/vdbe/execute.rs | 21 ++++++++++++++++----- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 9ab4c689e..ffb08aacc 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -5811,6 +5811,8 @@ pub enum IntegrityCheckError { actual_count: usize, expected_count: usize, }, + #[error("Page {page_id}: never used")] + PageNeverUsed { page_id: i64 }, } #[derive(Debug, Clone, Copy, PartialEq)] @@ -5836,16 +5838,18 @@ struct IntegrityCheckPageEntry { } pub struct IntegrityCheckState { page_stack: Vec, + pub db_size: usize, first_leaf_level: Option, - page_reference: HashMap, + pub page_reference: HashMap, page: Option, pub freelist_count: CheckFreelist, } impl IntegrityCheckState { - pub fn new() -> Self { + pub fn new(db_size: usize) -> Self { Self { page_stack: Vec::new(), + db_size, page_reference: HashMap::new(), first_leaf_level: None, page: None, diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index fca7a2ca9..73ea1d223 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -7817,12 +7817,13 @@ pub fn op_integrity_check( ); match &mut state.op_integrity_check_state { OpIntegrityCheckState::Start => { - let freelist_trunk_page = - return_if_io!(with_header(pager, mv_store, program, |header| header - .freelist_trunk_page - .get())); + let (freelist_trunk_page, db_size) = + return_if_io!(with_header(pager, mv_store, program, |header| ( + header.freelist_trunk_page.get(), + header.database_size.get() + ))); let mut errors = Vec::new(); - let mut integrity_check_state = IntegrityCheckState::new(); + let mut integrity_check_state = IntegrityCheckState::new(db_size as usize); let mut current_root_idx = 0; // check freelist pages first, if there are any for database if freelist_trunk_page > 0 { @@ -7865,6 +7866,16 @@ pub fn op_integrity_check( expected_count: integrity_check_state.freelist_count.expected_count, }); } + for page_number in 2..=integrity_check_state.db_size { + if !integrity_check_state + .page_reference + .contains_key(&(page_number as i64)) + { + errors.push(IntegrityCheckError::PageNeverUsed { + page_id: page_number as i64, + }); + } + } let message = if errors.is_empty() { "ok".to_string() } else { From 44152f11d030de35e83167fd13715256a04d7a1b Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 7 Oct 2025 11:15:48 +0200 Subject: [PATCH 035/428] core/mvcc/logical-log: switch RwLock to parking_lot --- core/mvcc/database/mod.rs | 2 +- core/mvcc/persistent_storage/mod.rs | 20 +++++++++----------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 675695b78..f88d21251 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -2016,7 +2016,7 @@ impl MvStore { } StreamingResult::Eof => { // Set offset to the end so that next writes go to the end of the file - self.storage.logical_log.write().unwrap().offset = reader.offset as u64; + self.storage.logical_log.write().offset = reader.offset as u64; break; } } diff --git a/core/mvcc/persistent_storage/mod.rs b/core/mvcc/persistent_storage/mod.rs index ede456bc3..0c5514f6c 100644 --- a/core/mvcc/persistent_storage/mod.rs +++ b/core/mvcc/persistent_storage/mod.rs @@ -1,5 +1,6 @@ +use parking_lot::RwLock; use std::fmt::Debug; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; pub mod logical_log; use crate::mvcc::database::LogRecord; @@ -20,7 +21,7 @@ impl Storage { impl Storage { pub fn log_tx(&self, m: &LogRecord) -> Result { - self.logical_log.write().unwrap().log_tx(m) + self.logical_log.write().log_tx(m) } pub fn read_tx_log(&self) -> Result> { @@ -28,30 +29,27 @@ impl Storage { } pub fn sync(&self) -> Result { - self.logical_log.write().unwrap().sync() + self.logical_log.write().sync() } pub fn truncate(&self) -> Result { - self.logical_log.write().unwrap().truncate() + self.logical_log.write().truncate() } pub fn get_logical_log_file(&self) -> Arc { - self.logical_log.write().unwrap().file.clone() + self.logical_log.write().file.clone() } pub fn should_checkpoint(&self) -> bool { - self.logical_log.read().unwrap().should_checkpoint() + self.logical_log.read().should_checkpoint() } pub fn set_checkpoint_threshold(&self, threshold: u64) { - self.logical_log - .write() - .unwrap() - .set_checkpoint_threshold(threshold) + self.logical_log.write().set_checkpoint_threshold(threshold) } pub fn checkpoint_threshold(&self) -> u64 { - self.logical_log.read().unwrap().checkpoint_threshold() + self.logical_log.read().checkpoint_threshold() } } From 3e508a4b42b8901d00674c9459e0028f86e555c7 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 7 Oct 2025 12:00:21 +0200 Subject: [PATCH 036/428] core/io: remove new_dummy in place of new_yield Yield is a completion that does not allocate any inner state. By design it is completed from the start and has no errors. This allows lightly yield without allocating any locks nor heap allocate inner state. --- core/incremental/view.rs | 2 +- core/io/mod.rs | 99 +++++++++++++++++++++------------- core/io/vfs.rs | 4 +- core/mvcc/database/mod.rs | 2 +- core/storage/btree.rs | 2 +- core/storage/pager.rs | 2 +- core/storage/sqlite3_ondisk.rs | 2 +- 7 files changed, 69 insertions(+), 44 deletions(-) diff --git a/core/incremental/view.rs b/core/incremental/view.rs index cb90b57b9..f82aeadcf 100644 --- a/core/incremental/view.rs +++ b/core/incremental/view.rs @@ -1282,7 +1282,7 @@ impl IncrementalView { pending_row: None, // No pending row when interrupted between rows }; // TODO: Get the actual I/O completion from the statement - let completion = crate::io::Completion::new_dummy(); + let completion = crate::io::Completion::new_yield(); return Ok(IOResult::IO(crate::types::IOCompletions::Single( completion, ))); diff --git a/core/io/mod.rs b/core/io/mod.rs index 0698df9c6..c1940e191 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -102,8 +102,10 @@ pub trait IO: Clock + Send + Sync { while !c.finished() { self.step()? } - if let Some(Some(err)) = c.inner.result.get().copied() { - return Err(err.into()); + if let Some(inner) = &c.inner { + if let Some(Some(err)) = inner.result.get().copied() { + return Err(err.into()); + } } Ok(()) } @@ -133,7 +135,8 @@ pub type TruncateComplete = dyn Fn(Result); #[must_use] #[derive(Debug, Clone)] pub struct Completion { - inner: Arc, + /// Optional completion state. If None, it means we are Yield in order to not allocate anything + inner: Option>, } struct CompletionInner { @@ -194,7 +197,7 @@ impl CompletionGroup { c.link_internal(&group); continue; } - let group_inner = match &group.inner.completion_type { + let group_inner = match &group.get_inner().completion_type { CompletionType::Group(g) => &g.inner, _ => unreachable!(), }; @@ -209,7 +212,7 @@ impl CompletionGroup { group_inner.outstanding.fetch_sub(1, Ordering::SeqCst); } - let group_inner = match &group.inner.completion_type { + let group_inner = match &group.get_inner().completion_type { CompletionType::Group(g) => &g.inner, _ => unreachable!(), }; @@ -276,6 +279,7 @@ impl Debug for CompletionType { Self::Sync(..) => f.debug_tuple("Sync").finish(), Self::Truncate(..) => f.debug_tuple("Truncate").finish(), Self::Group(..) => f.debug_tuple("Group").finish(), + Self::Yield => f.debug_tuple("Yield").finish(), } } } @@ -286,33 +290,38 @@ pub enum CompletionType { Sync(SyncCompletion), Truncate(TruncateCompletion), Group(GroupCompletion), + Yield, } impl Completion { pub fn new(completion_type: CompletionType) -> Self { Self { - inner: Arc::new(CompletionInner { + inner: Some(Arc::new(CompletionInner { completion_type, result: OnceLock::new(), needs_link: false, parent: OnceLock::new(), - }), + })), } } pub fn new_linked(completion_type: CompletionType) -> Self { Self { - inner: Arc::new(CompletionInner { + inner: Some(Arc::new(CompletionInner { completion_type, result: OnceLock::new(), needs_link: true, parent: OnceLock::new(), - }), + })), } } + pub(self) fn get_inner(&self) -> &Arc { + self.inner.as_ref().unwrap() + } + pub fn needs_link(&self) -> bool { - self.inner.needs_link + self.get_inner().needs_link } pub fn new_write_linked(complete: F) -> Self @@ -360,43 +369,56 @@ impl Completion { )))) } - /// Create a dummy completed completion - pub fn new_dummy() -> Self { - let c = Self::new_write(|_| {}); - c.complete(0); - c + /// Create a yield completion. These are completed by default allowing to yield control without + /// allocating memory. + pub fn new_yield() -> Self { + Self { inner: None } } pub fn succeeded(&self) -> bool { - match &self.inner.completion_type { - CompletionType::Group(g) => { - g.inner.outstanding.load(Ordering::SeqCst) == 0 - && g.inner.result.get().is_none_or(|e| e.is_none()) - } - _ => self.inner.result.get().is_some(), + match &self.inner { + Some(inner) => match &inner.completion_type { + CompletionType::Group(g) => { + g.inner.outstanding.load(Ordering::SeqCst) == 0 + && g.inner.result.get().is_none_or(|e| e.is_none()) + } + _ => inner.result.get().is_some(), + }, + None => true, } } pub fn failed(&self) -> bool { - self.inner.result.get().is_some_and(|val| val.is_some()) + match &self.inner { + Some(inner) => inner.result.get().is_some_and(|val| val.is_some()), + None => false, + } } pub fn get_error(&self) -> Option { - match &self.inner.completion_type { - CompletionType::Group(g) => { - // For groups, check the group's cached result field - // (set when the last completion finishes) - g.inner.result.get().and_then(|res| *res) + match &self.inner { + Some(inner) => { + match &inner.completion_type { + CompletionType::Group(g) => { + // For groups, check the group's cached result field + // (set when the last completion finishes) + g.inner.result.get().and_then(|res| *res) + } + _ => inner.result.get().and_then(|res| *res), + } } - _ => self.inner.result.get().and_then(|res| *res), + None => None, } } /// Checks if the Completion completed or errored pub fn finished(&self) -> bool { - match &self.inner.completion_type { - CompletionType::Group(g) => g.inner.outstanding.load(Ordering::SeqCst) == 0, - _ => self.inner.result.get().is_some(), + match &self.inner { + Some(inner) => match &inner.completion_type { + CompletionType::Group(g) => g.inner.outstanding.load(Ordering::SeqCst) == 0, + _ => inner.result.get().is_some(), + }, + None => true, } } @@ -415,16 +437,18 @@ impl Completion { } fn callback(&self, result: Result) { - self.inner.result.get_or_init(|| { - match &self.inner.completion_type { + let inner = self.get_inner(); + inner.result.get_or_init(|| { + match &inner.completion_type { CompletionType::Read(r) => r.callback(result), CompletionType::Write(w) => w.callback(result), CompletionType::Sync(s) => s.callback(result), // fix CompletionType::Truncate(t) => t.callback(result), CompletionType::Group(g) => g.callback(result), + CompletionType::Yield => {} }; - if let Some(group) = self.inner.parent.get() { + if let Some(group) = inner.parent.get() { // Capture first error in group if let Err(err) = result { let _ = group.result.set(Some(err)); @@ -446,7 +470,8 @@ impl Completion { /// only call this method if you are sure that the completion is /// a ReadCompletion, panics otherwise pub fn as_read(&self) -> &ReadCompletion { - match self.inner.completion_type { + let inner = self.get_inner(); + match inner.completion_type { CompletionType::Read(ref r) => r, _ => unreachable!(), } @@ -454,13 +479,13 @@ impl Completion { /// Link this completion to a group completion (internal use only) fn link_internal(&mut self, group: &Completion) { - let group_inner = match &group.inner.completion_type { + let group_inner = match &group.get_inner().completion_type { CompletionType::Group(g) => &g.inner, _ => panic!("link_internal() requires a group completion"), }; // Set the parent (can only be set once) - if self.inner.parent.set(group_inner.clone()).is_err() { + if self.get_inner().parent.set(group_inner.clone()).is_err() { panic!("completion can only be linked once"); } } diff --git a/core/io/vfs.rs b/core/io/vfs.rs index 9c7b116c0..52722a82e 100644 --- a/core/io/vfs.rs +++ b/core/io/vfs.rs @@ -86,14 +86,14 @@ impl VfsMod { /// that the into_raw/from_raw contract will hold unsafe extern "C" fn callback_fn(result: i32, ctx: SendPtr) { let completion = Completion { - inner: (Arc::from_raw(ctx.inner().as_ptr() as *mut CompletionInner)), + inner: (Some(Arc::from_raw(ctx.inner().as_ptr() as *mut CompletionInner))), }; completion.complete(result); } fn to_callback(c: Completion) -> IOCallback { IOCallback::new(callback_fn, unsafe { - NonNull::new_unchecked(Arc::into_raw(c.inner) as *mut c_void) + NonNull::new_unchecked(Arc::into_raw(c.get_inner().clone()) as *mut c_void) }) } diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 675695b78..0726504bb 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -625,7 +625,7 @@ impl StateTransition for CommitStateMachine { let locked = self.commit_coordinator.pager_commit_lock.write(); if !locked { return Ok(TransitionResult::Io(IOCompletions::Single( - Completion::new_dummy(), + Completion::new_yield(), ))); } } diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 9ab4c689e..71a63aba6 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -5717,7 +5717,7 @@ impl BTreeCursor { self.valid_state = CursorValidState::RequireAdvance(ctx.seek_op.iteration_direction()); self.context = Some(ctx); - io_yield_one!(Completion::new_dummy()); + io_yield_one!(Completion::new_yield()); } self.valid_state = CursorValidState::Valid; Ok(IOResult::Done(())) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index e3281fc7c..0d748467e 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1142,7 +1142,7 @@ impl Pager { } } else { // Give a chance for the allocation to happen elsewhere - io_yield_one!(Completion::new_dummy()); + io_yield_one!(Completion::new_yield()); } } Ok(IOResult::Done(())) diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index 5fb353804..6dfe12bd1 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -1746,7 +1746,7 @@ impl StreamingWalReader { .min((self.file_size - offset) as usize); if read_size == 0 { // end-of-file; let caller finalize - return Ok((0, Completion::new_dummy())); + return Ok((0, Completion::new_yield())); } let buf = Arc::new(Buffer::new_temporary(read_size)); From a7d2462c0519c300255e9f8729f0e6159027de45 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 7 Oct 2025 12:05:54 +0200 Subject: [PATCH 037/428] core/io/uring: fix inner usages Yield is a completion that does not allocate any inner state. By design it is completed from the start and has no errors. This allows lightly yield without allocating any locks nor heap allocate inner state. --- core/io/io_uring.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/io/io_uring.rs b/core/io/io_uring.rs index 03f8dc3a0..d9f79b874 100644 --- a/core/io/io_uring.rs +++ b/core/io/io_uring.rs @@ -709,14 +709,16 @@ impl Clock for UringIO { /// use the callback pointer as the user_data for the operation as is /// common practice for io_uring to prevent more indirection fn get_key(c: Completion) -> u64 { - Arc::into_raw(c.inner.clone()) as u64 + Arc::into_raw(c.get_inner().clone()) as u64 } #[inline(always)] /// convert the user_data back to an Completion pointer fn completion_from_key(key: u64) -> Completion { let c_inner = unsafe { Arc::from_raw(key as *const CompletionInner) }; - Completion { inner: c_inner } + Completion { + inner: Some(c_inner), + } } pub struct UringFile { From 73efe5d853c5c75859e5c621728631cbeccc7abd Mon Sep 17 00:00:00 2001 From: bit-aloo Date: Tue, 7 Oct 2025 17:49:15 +0530 Subject: [PATCH 038/428] make table name not repeat in simulator --- sql_generation/generation/table.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/sql_generation/generation/table.rs b/sql_generation/generation/table.rs index 9f038a379..5e3c371ac 100644 --- a/sql_generation/generation/table.rs +++ b/sql_generation/generation/table.rs @@ -1,3 +1,5 @@ +use std::sync::atomic::{AtomicU64, Ordering}; + use indexmap::IndexSet; use rand::Rng; use turso_core::Value; @@ -9,10 +11,13 @@ use crate::model::table::{Column, ColumnType, Name, SimValue, Table}; use super::ArbitraryFromMaybe; +static COUNTER: AtomicU64 = AtomicU64::new(0); + impl Arbitrary for Name { fn arbitrary(rng: &mut R, _c: &C) -> Self { - let name = readable_name_custom("_", rng); - Name(name.replace("-", "_")) + let base = readable_name_custom("_", rng).replace("-", "_"); + let id = COUNTER.fetch_add(1, Ordering::Relaxed); + Name(format!("{}_{}", base, id)) } } From f977c5f0a4c2b41b869fdc476ff6ac2dcdb68a46 Mon Sep 17 00:00:00 2001 From: Henrik Ingo Date: Tue, 7 Oct 2025 15:38:04 +0300 Subject: [PATCH 039/428] =?UTF-8?q?Add=20Nightly=20versions=20of=20benchma?= =?UTF-8?q?rks=20that=20run=20on=20Nyrki=C3=B6=20runners?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/perf_nightly.yml | 165 +++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 .github/workflows/perf_nightly.yml diff --git a/.github/workflows/perf_nightly.yml b/.github/workflows/perf_nightly.yml new file mode 100644 index 000000000..ec2ba7ee0 --- /dev/null +++ b/.github/workflows/perf_nightly.yml @@ -0,0 +1,165 @@ +name: Nightly Benchmarks on Nyrkiö Runners (stability) + +on: + workflow_dispatch: ["main", "notmain", "master"] + schedule: + - branches: ["main"] + - cron: '24 6,11,14,20 * * *' + push: + # branches: ["main", "notmain", "master"] + branches: ["notmain"] + pull_request: + # branches: ["main", "notmain", "master"] + branches: ["notmain"] + +env: + CARGO_TERM_COLOR: never + +jobs: + bench: + runs-on: nyrkio_perf_server_2cpu_ubuntu2404 + timeout-minutes: 30 + steps: + - uses: actions/checkout@v3 + - uses: useblacksmith/setup-node@v5 + with: + node-version: 20 + # cache: 'npm' + # - name: Install dependencies + # run: npm install && npm run build + + - name: Bench + run: make bench-exclude-tpc-h 2>&1 | tee output.txt + - name: Analyze benchmark result with Nyrkiö + uses: nyrkio/change-detection@HEAD + with: + name: nightly/turso + tool: criterion + output-file-path: output.txt + + # What to do if a change is immediately detected by Nyrkiö. + # Note that smaller changes are only detected with delay, usually after a change + # persisted over 2-7 commits. Go to nyrkiö.com to view those or configure alerts. + # Note that Nyrkiö will find all changes, also improvements. This means fail-on-alert + # on pull events isn't compatible with this workflow being required to pass branch protection. + fail-on-alert: false + comment-on-alert: true + comment-always: true + # Nyrkiö configuration + # Get yours from https://nyrkio.com/docs/getting-started + nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }} + # HTTP requests will fail for all non-core contributors that don't have their own token. + # Don't want that to spoil the build, so: + never-fail: true + # Make results and change points public, so that any oss contributor can see them + nyrkio-public: true + + # parameters of the algorithm. Note: These are global, so we only set them once and for all. + # Smaller p-value = less change points found. Larger p-value = more, but also more false positives. + nyrkio-settings-pvalue: 0.0001 + # Ignore changes smaller than this. + nyrkio-settings-threshold: 0% + + clickbench: + runs-on: nyrkio_perf_server_2cpu_ubuntu2404 + timeout-minutes: 30 + steps: + - uses: actions/checkout@v3 + - uses: useblacksmith/setup-node@v5 + with: + node-version: 20 + + - name: Clickbench + run: make clickbench + + - name: Analyze TURSO result with Nyrkiö + uses: nyrkio/change-detection@HEAD + with: + name: nightly/clickbench/turso + tool: time + output-file-path: clickbench-tursodb.txt + # What to do if a change is immediately detected by Nyrkiö. + # Note that smaller changes are only detected with delay, usually after a change + # persisted over 2-7 commits. Go to nyrkiö.com to view those or configure alerts. + # Note that Nyrkiö will find all changes, also improvements. This means fail-on-alert + # on pull events isn't compatible with this workflow being required to pass branch protection. + fail-on-alert: false + comment-on-alert: true + comment-always: true + # Nyrkiö configuration + # Get yours from https://nyrkio.com/docs/getting-started + nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }} + # HTTP requests will fail for all non-core contributors that don't have their own token. + # Don't want that to spoil the build, so: + never-fail: true + # Make results and change points public, so that any oss contributor can see them + nyrkio-public: true + + - name: Analyze SQLITE3 result with Nyrkiö + uses: nyrkio/change-detection@HEAD + with: + name: clickbench/sqlite3 + tool: time + output-file-path: clickbench-sqlite3.txt + fail-on-alert: false + comment-on-alert: true + comment-always: false + nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }} + never-fail: true + nyrkio-public: true + + tpc-h-criterion: + runs-on: nyrkio_perf_server_2cpu_ubuntu2404 + timeout-minutes: 60 + env: + DB_FILE: "perf/tpc-h/TPC-H.db" + steps: + - uses: actions/checkout@v3 + - uses: useblacksmith/rust-cache@v3 + with: + prefix-key: "v1-rust" # can be updated if we need to reset caches due to non-trivial change in the dependencies (for example, custom env var were set for single workspace project) + + - name: Cache TPC-H + id: cache-primes + uses: useblacksmith/cache@v5 + with: + path: ${{ env.DB_FILE }} + key: tpc-h + - name: Download TPC-H + if: steps.cache-primes.outputs.cache-hit != 'true' + env: + DB_URL: "https://github.com/lovasoa/TPCH-sqlite/releases/download/v1.0/TPC-H.db" + run: wget -O $DB_FILE --no-verbose $DB_URL + + - name: Bench + run: cargo bench --bench tpc_h_benchmark 2>&1 | tee output.txt + - name: Analyze benchmark result with Nyrkiö + uses: nyrkio/change-detection@HEAD + with: + name: nightly/tpc-h + tool: criterion + output-file-path: output.txt + + # What to do if a change is immediately detected by Nyrkiö. + # Note that smaller changes are only detected with delay, usually after a change + # persisted over 2-7 commits. Go to nyrkiö.com to view those or configure alerts. + # Note that Nyrkiö will find all changes, also improvements. This means fail-on-alert + # on pull events isn't compatible with this workflow being required to pass branch protection. + fail-on-alert: false + comment-on-alert: true + comment-always: true + # Nyrkiö configuration + # Get yours from https://nyrkio.com/docs/getting-started + nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }} + # HTTP requests will fail for all non-core contributors that don't have their own token. + # Don't want that to spoil the build, so: + never-fail: true + # Make results and change points public, so that any oss contributor can see them + nyrkio-public: true + + # parameters of the algorithm. Note: These are global, so we only set them once and for all. + # Smaller p-value = less change points found. Larger p-value = more, but also more false positives. + nyrkio-settings-pvalue: 0.0001 + # Ignore changes smaller than this. + nyrkio-settings-threshold: 0% + From 5583e769817fe18e291421aa1c9a1137f58d7943 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 7 Oct 2025 15:29:10 +0300 Subject: [PATCH 040/428] Fix re-entrancy of op_destroy (used by DROP TABLE) op_destroy was assuming we never yield IO from BTreeCursor::btree_destroy(), so every so often it would just not complete the procedure and leave dangling pages in the database --- core/vdbe/execute.rs | 32 ++++++++++++++++++++++++-------- core/vdbe/mod.rs | 8 +++++--- tests/integration/fuzz/mod.rs | 22 ++++++++++++++++++---- 3 files changed, 47 insertions(+), 15 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index fca7a2ca9..aa9fe59b4 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -6832,6 +6832,11 @@ pub fn op_create_btree( Ok(InsnFunctionStepResult::Step) } +pub enum OpDestroyState { + CreateCursor, + DestroyBtree(Arc>), +} + pub fn op_destroy( program: &Program, state: &mut ProgramState, @@ -6855,15 +6860,26 @@ pub fn op_destroy( state.pc += 1; return Ok(InsnFunctionStepResult::Step); } - // TODO not sure if should be BTreeCursor::new_table or BTreeCursor::new_index here or neither and just pass an emtpy vec - let mut cursor = BTreeCursor::new(None, pager.clone(), *root, 0); - let former_root_page_result = cursor.btree_destroy()?; - if let IOResult::Done(former_root_page) = former_root_page_result { - state.registers[*former_root_reg] = - Register::Value(Value::Integer(former_root_page.unwrap_or(0) as i64)); + + loop { + match state.op_destroy_state { + OpDestroyState::CreateCursor => { + // Destroy doesn't do anything meaningful with the table/index distinction so we can just use a + // table btree cursor for both. + let cursor = BTreeCursor::new(None, pager.clone(), *root, 0); + state.op_destroy_state = + OpDestroyState::DestroyBtree(Arc::new(RwLock::new(cursor))); + } + OpDestroyState::DestroyBtree(ref mut cursor) => { + let maybe_former_root_page = return_if_io!(cursor.write().btree_destroy()); + state.registers[*former_root_reg] = + Register::Value(Value::Integer(maybe_former_root_page.unwrap_or(0) as i64)); + state.op_destroy_state = OpDestroyState::CreateCursor; + state.pc += 1; + return Ok(InsnFunctionStepResult::Step); + } + } } - state.pc += 1; - Ok(InsnFunctionStepResult::Step) } pub fn op_reset_sorter( diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 4b11c8c1d..90b14225f 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -35,9 +35,9 @@ use crate::{ types::{IOCompletions, IOResult, RawSlice, TextRef}, vdbe::{ execute::{ - OpCheckpointState, OpColumnState, OpDeleteState, OpDeleteSubState, OpIdxInsertState, - OpInsertState, OpInsertSubState, OpNewRowidState, OpNoConflictState, OpRowIdState, - OpSeekState, OpTransactionState, + OpCheckpointState, OpColumnState, OpDeleteState, OpDeleteSubState, OpDestroyState, + OpIdxInsertState, OpInsertState, OpInsertSubState, OpNewRowidState, OpNoConflictState, + OpRowIdState, OpSeekState, OpTransactionState, }, metrics::StatementMetrics, }, @@ -290,6 +290,7 @@ pub struct ProgramState { #[cfg(feature = "json")] json_cache: JsonCacheCell, op_delete_state: OpDeleteState, + op_destroy_state: OpDestroyState, op_idx_delete_state: Option, op_integrity_check_state: OpIntegrityCheckState, /// Metrics collected during statement execution @@ -338,6 +339,7 @@ impl ProgramState { sub_state: OpDeleteSubState::MaybeCaptureRecord, deleted_record: None, }, + op_destroy_state: OpDestroyState::CreateCursor, op_idx_delete_state: None, op_integrity_check_state: OpIntegrityCheckState::Start, metrics: StatementMetrics::new(), diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 1f40c8ae0..eb3f8a1af 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -13,8 +13,8 @@ mod tests { use crate::{ common::{ do_flush, limbo_exec_rows, limbo_exec_rows_fallible, limbo_stmt_get_column_names, - maybe_setup_tracing, rng_from_time, rng_from_time_or_env, sqlite_exec_rows, - TempDatabase, + maybe_setup_tracing, rng_from_time, rng_from_time_or_env, rusqlite_integrity_check, + sqlite_exec_rows, TempDatabase, }, fuzz::grammar_generator::{const_str, rand_int, rand_str, GrammarGenerator}, }; @@ -2663,7 +2663,7 @@ mod tests { } #[test] - pub fn fuzz_long_create_table_drop_table_alter_table() { + pub fn fuzz_long_create_table_drop_table_alter_table_normal() { _fuzz_long_create_table_drop_table_alter_table(false); } @@ -2695,6 +2695,8 @@ mod tests { let mut undroppable_cols = HashSet::new(); + let mut stmts = vec![]; + for iteration in 0..2000 { println!("iteration: {iteration} (seed: {seed})"); let operation = rng.random_range(0..100); // 0: create, 1: drop, 2: alter, 3: alter rename @@ -2749,8 +2751,8 @@ mod tests { format!("CREATE TABLE {table_name} ({})", columns.join(", ")); // Execute the create table statement + stmts.push(create_sql.clone()); limbo_exec_rows(&db, &limbo_conn, &create_sql); - let column_names = columns .iter() .map(|c| c.split_whitespace().next().unwrap().to_string()) @@ -2765,6 +2767,7 @@ mod tests { .collect::>() .join(", ") ); + stmts.push(insert_sql.clone()); limbo_exec_rows(&db, &limbo_conn, &insert_sql); // Successfully created table, update our tracking @@ -2779,6 +2782,7 @@ mod tests { let table_to_drop = &table_names[rng.random_range(0..table_names.len())]; let drop_sql = format!("DROP TABLE {table_to_drop}"); + stmts.push(drop_sql.clone()); limbo_exec_rows(&db, &limbo_conn, &drop_sql); // Successfully dropped table, update our tracking @@ -2799,6 +2803,7 @@ mod tests { table_to_alter, &new_col_name, col_type ); + stmts.push(alter_sql.clone()); limbo_exec_rows(&db, &limbo_conn, &alter_sql); // Successfully added column, update our tracking @@ -2830,6 +2835,7 @@ mod tests { let alter_sql = format!( "ALTER TABLE {table_to_alter} DROP COLUMN {col_to_drop}" ); + stmts.push(alter_sql.clone()); limbo_exec_rows(&db, &limbo_conn, &alter_sql); // Successfully dropped column, update our tracking @@ -2866,6 +2872,14 @@ mod tests { "seed: {seed}, mvcc: {mvcc}, table: {table_name}" ); } + if !mvcc { + if let Err(e) = rusqlite_integrity_check(&db.path) { + for stmt in stmts.iter() { + println!("{stmt};"); + } + panic!("seed: {seed}, mvcc: {mvcc}, error: {e}"); + } + } } // Final verification - the test passes if we didn't crash From 1c4a54a73c4b466db8bb0121db082d7ab3c618b2 Mon Sep 17 00:00:00 2001 From: Duy Dang <55247256+ddwalias@users.noreply.github.com> Date: Tue, 7 Oct 2025 19:46:26 +0700 Subject: [PATCH 041/428] Add comment explaining the 0 for infinity timestamp --- core/mvcc/database/mod.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 5fa28c19c..9adeb1e75 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -1755,6 +1755,15 @@ impl MvStore { match ts_or_id { Some(TxTimestampOrID::Timestamp(ts)) => *ts, Some(TxTimestampOrID::TxID(tx_id)) => self.txs.get(tx_id).unwrap().value().begin_ts, + // This function is intended to be used in the ordering of row versions within the row version chain in `insert_version_raw`. + // + // The row version chain should be append-only (aside from garbage collection), + // so the specific ordering handled by this function may not be critical. We might + // be able to append directly to the row version chain in the future. + // + // The value 0 is used here to represent an infinite timestamp value. This is a deliberate + // choice for a planned future bitpacking optimization, reserving 0 for this purpose, + // while actual timestamps will start from 1. None => 0, } } From 24438f7e4ed8d0679df0ddefd6cd88f40cffb62c Mon Sep 17 00:00:00 2001 From: Henrik Ingo Date: Tue, 7 Oct 2025 15:51:34 +0300 Subject: [PATCH 042/428] Fix: perf_nightly.yml YAML syntax --- .github/workflows/perf_nightly.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/perf_nightly.yml b/.github/workflows/perf_nightly.yml index ec2ba7ee0..d2f3557a5 100644 --- a/.github/workflows/perf_nightly.yml +++ b/.github/workflows/perf_nightly.yml @@ -3,7 +3,6 @@ name: Nightly Benchmarks on Nyrkiö Runners (stability) on: workflow_dispatch: ["main", "notmain", "master"] schedule: - - branches: ["main"] - cron: '24 6,11,14,20 * * *' push: # branches: ["main", "notmain", "master"] From e5c44cced0e8624a1e2eb720165d0776d5ded509 Mon Sep 17 00:00:00 2001 From: Henrik Ingo Date: Tue, 7 Oct 2025 15:53:17 +0300 Subject: [PATCH 043/428] fix: perf_nightly YAML syntax --- .github/workflows/perf_nightly.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/perf_nightly.yml b/.github/workflows/perf_nightly.yml index d2f3557a5..2579d4521 100644 --- a/.github/workflows/perf_nightly.yml +++ b/.github/workflows/perf_nightly.yml @@ -1,7 +1,8 @@ name: Nightly Benchmarks on Nyrkiö Runners (stability) on: - workflow_dispatch: ["main", "notmain", "master"] + workflow_dispatch: + branches: ["main", "notmain", "master"] schedule: - cron: '24 6,11,14,20 * * *' push: From 7f8c139638827951a0fac9434b7cbafc289273fc Mon Sep 17 00:00:00 2001 From: bit-aloo Date: Tue, 7 Oct 2025 18:27:02 +0530 Subject: [PATCH 044/428] make clippy happy --- sql_generation/generation/table.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql_generation/generation/table.rs b/sql_generation/generation/table.rs index 5e3c371ac..6e55942c3 100644 --- a/sql_generation/generation/table.rs +++ b/sql_generation/generation/table.rs @@ -17,7 +17,7 @@ impl Arbitrary for Name { fn arbitrary(rng: &mut R, _c: &C) -> Self { let base = readable_name_custom("_", rng).replace("-", "_"); let id = COUNTER.fetch_add(1, Ordering::Relaxed); - Name(format!("{}_{}", base, id)) + Name(format!("{base}_{id}")) } } From 77a412f6afb85da2389510cbd9a39306fcd55b5f Mon Sep 17 00:00:00 2001 From: "Levy A." Date: Sun, 5 Oct 2025 21:31:29 -0300 Subject: [PATCH 045/428] refactor: remove unsafe reference semantics from `RefValue` also renames `RefValue` to `ValueRef`, to align with rusqlite and other crates --- core/incremental/aggregate_operator.rs | 8 +- core/incremental/cursor.rs | 15 +- core/json/mod.rs | 40 +- core/lib.rs | 2 +- .../mvcc/database/checkpoint_state_machine.rs | 6 +- core/mvcc/database/mod.rs | 4 +- core/mvcc/database/tests.rs | 6 +- core/mvcc/persistent_storage/logical_log.rs | 8 +- core/schema.rs | 28 +- core/storage/btree.rs | 10 +- core/storage/sqlite3_ondisk.rs | 37 +- core/types.rs | 378 +++++++++--------- core/vdbe/execute.rs | 34 +- core/vdbe/mod.rs | 20 +- core/vdbe/sorter.rs | 81 ++-- 15 files changed, 329 insertions(+), 348 deletions(-) diff --git a/core/incremental/aggregate_operator.rs b/core/incremental/aggregate_operator.rs index e24b930a9..e577f05e2 100644 --- a/core/incremental/aggregate_operator.rs +++ b/core/incremental/aggregate_operator.rs @@ -7,7 +7,7 @@ use crate::incremental::operator::{ generate_storage_id, ComputationTracker, DbspStateCursors, EvalState, IncrementalOperator, }; use crate::incremental::persistence::{ReadRecord, WriteRow}; -use crate::types::{IOResult, ImmutableRecord, RefValue, SeekKey, SeekOp, SeekResult}; +use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult, ValueRef}; use crate::{return_and_restore_if_io, return_if_io, LimboError, Result, Value}; use std::collections::{BTreeMap, HashMap, HashSet}; use std::fmt::{self, Display}; @@ -1546,7 +1546,7 @@ impl ScanState { }; // Check if we're still in the same group - if let RefValue::Integer(rec_sid) = rec_storage_id { + if let ValueRef::Integer(rec_sid) = rec_storage_id { if *rec_sid != storage_id { return Ok(IOResult::Done(None)); } @@ -1555,8 +1555,8 @@ impl ScanState { } // Compare zset_hash as blob - if let RefValue::Blob(rec_zset_blob) = rec_zset_hash { - if let Some(rec_hash) = Hash128::from_blob(rec_zset_blob.to_slice()) { + if let ValueRef::Blob(rec_zset_blob) = rec_zset_hash { + if let Some(rec_hash) = Hash128::from_blob(rec_zset_blob) { if rec_hash != zset_hash { return Ok(IOResult::Done(None)); } diff --git a/core/incremental/cursor.rs b/core/incremental/cursor.rs index 67814bd0d..22070f29d 100644 --- a/core/incremental/cursor.rs +++ b/core/incremental/cursor.rs @@ -117,15 +117,12 @@ impl MaterializedViewCursor { Some(rowid) => rowid, }; - let btree_record = return_if_io!(self.btree_cursor.record()); - let btree_ref_values = btree_record - .ok_or_else(|| { - crate::LimboError::InternalError( - "Invalid data in materialized view: found a rowid, but not the row!" - .to_string(), - ) - })? - .get_values(); + let btree_record = return_if_io!(self.btree_cursor.record()).ok_or_else(|| { + crate::LimboError::InternalError( + "Invalid data in materialized view: found a rowid, but not the row!".to_string(), + ) + })?; + let btree_ref_values = btree_record.get_values(); // Convert RefValues to Values (copying for now - can optimize later) let mut btree_values: Vec = diff --git a/core/json/mod.rs b/core/json/mod.rs index 6209b6774..d3b1f4e19 100644 --- a/core/json/mod.rs +++ b/core/json/mod.rs @@ -11,9 +11,9 @@ pub use crate::json::ops::{ jsonb_replace, }; use crate::json::path::{json_path, JsonPath, PathElement}; -use crate::types::{RawSlice, Text, TextRef, TextSubtype, Value, ValueType}; +use crate::types::{Text, TextSubtype, Value, ValueType}; use crate::vdbe::Register; -use crate::{bail_constraint_error, bail_parse_error, LimboError, RefValue}; +use crate::{bail_constraint_error, bail_parse_error, LimboError, ValueRef}; pub use cache::JsonCacheCell; use jsonb::{ElementType, Jsonb, JsonbHeader, PathOperationMode, SearchOperation, SetOperation}; use std::borrow::Cow; @@ -105,14 +105,12 @@ pub fn json_from_raw_bytes_agg(data: &[u8], raw: bool) -> crate::Result { pub fn convert_dbtype_to_jsonb(val: &Value, strict: Conv) -> crate::Result { convert_ref_dbtype_to_jsonb( - &match val { - Value::Null => RefValue::Null, - Value::Integer(x) => RefValue::Integer(*x), - Value::Float(x) => RefValue::Float(*x), - Value::Text(text) => { - RefValue::Text(TextRef::create_from(text.as_str().as_bytes(), text.subtype)) - } - Value::Blob(items) => RefValue::Blob(RawSlice::create_from(items)), + match val { + Value::Null => ValueRef::Null, + Value::Integer(x) => ValueRef::Integer(*x), + Value::Float(x) => ValueRef::Float(*x), + Value::Text(text) => ValueRef::Text(text.as_str().as_bytes(), text.subtype), + Value::Blob(items) => ValueRef::Blob(items.as_slice()), }, strict, ) @@ -124,14 +122,14 @@ fn parse_as_json_text(slice: &[u8]) -> crate::Result { Jsonb::from_str_with_mode(str, Conv::Strict).map_err(Into::into) } -pub fn convert_ref_dbtype_to_jsonb(val: &RefValue, strict: Conv) -> crate::Result { +pub fn convert_ref_dbtype_to_jsonb(val: ValueRef<'_>, strict: Conv) -> crate::Result { match val { - RefValue::Text(text) => { - let res = if text.subtype == TextSubtype::Json || matches!(strict, Conv::Strict) { - Jsonb::from_str_with_mode(text.as_str(), strict) + ValueRef::Text(text, subtype) => { + let res = if subtype == TextSubtype::Json || matches!(strict, Conv::Strict) { + Jsonb::from_str_with_mode(&String::from_utf8_lossy(text), strict) } else { // Handle as a string literal otherwise - let mut str = text.as_str().replace('"', "\\\""); + let mut str = String::from_utf8_lossy(text).replace('"', "\\\""); // Quote the string to make it a JSON string str.insert(0, '"'); str.push('"'); @@ -139,8 +137,8 @@ pub fn convert_ref_dbtype_to_jsonb(val: &RefValue, strict: Conv) -> crate::Resul }; res.map_err(|_| LimboError::ParseError("malformed JSON".to_string())) } - RefValue::Blob(blob) => { - let bytes = blob.to_slice(); + ValueRef::Blob(blob) => { + let bytes = blob; // Valid JSON can start with these whitespace characters let index = bytes .iter() @@ -177,15 +175,15 @@ pub fn convert_ref_dbtype_to_jsonb(val: &RefValue, strict: Conv) -> crate::Resul json.element_type()?; Ok(json) } - RefValue::Null => Ok(Jsonb::from_raw_data( + ValueRef::Null => Ok(Jsonb::from_raw_data( JsonbHeader::make_null().into_bytes().as_bytes(), )), - RefValue::Float(float) => { + ValueRef::Float(float) => { let mut buff = ryu::Buffer::new(); - Jsonb::from_str(buff.format(*float)) + Jsonb::from_str(buff.format(float)) .map_err(|_| LimboError::ParseError("malformed JSON".to_string())) } - RefValue::Integer(int) => Jsonb::from_str(&int.to_string()) + ValueRef::Integer(int) => Jsonb::from_str(&int.to_string()) .map_err(|_| LimboError::ParseError("malformed JSON".to_string())), } } diff --git a/core/lib.rs b/core/lib.rs index cf51ae283..8145af6e7 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -97,8 +97,8 @@ use turso_macros::match_ignore_ascii_case; use turso_parser::ast::fmt::ToTokens; use turso_parser::{ast, ast::Cmd, parser::Parser}; use types::IOResult; -pub use types::RefValue; pub use types::Value; +pub use types::ValueRef; use util::parse_schema_rows; pub use util::IOExt; pub use vdbe::{builder::QueryMode, explain::EXPLAIN_COLUMNS, explain::EXPLAIN_QUERY_PLAN_COLUMNS}; diff --git a/core/mvcc/database/checkpoint_state_machine.rs b/core/mvcc/database/checkpoint_state_machine.rs index 5a7e6a38f..fbe8b2b84 100644 --- a/core/mvcc/database/checkpoint_state_machine.rs +++ b/core/mvcc/database/checkpoint_state_machine.rs @@ -9,8 +9,8 @@ use crate::storage::pager::CreateBTreeFlags; use crate::storage::wal::{CheckpointMode, TursoRwLock}; use crate::types::{IOCompletions, IOResult, ImmutableRecord, RecordCursor}; use crate::{ - CheckpointResult, Completion, Connection, IOExt, Pager, RefValue, Result, TransactionState, - Value, + CheckpointResult, Completion, Connection, IOExt, Pager, Result, TransactionState, Value, + ValueRef, }; use parking_lot::RwLock; use std::collections::{HashMap, HashSet}; @@ -191,7 +191,7 @@ impl CheckpointStateMachine { let row_data = ImmutableRecord::from_bin_record(row_data.clone()); let mut record_cursor = RecordCursor::new(); record_cursor.parse_full_header(&row_data).unwrap(); - let RefValue::Integer(root_page) = + let ValueRef::Integer(root_page) = record_cursor.get_value(&row_data, 3).unwrap() else { panic!( diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 7f1dd573a..95014cbc2 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -18,11 +18,11 @@ use crate::Completion; use crate::File; use crate::IOExt; use crate::LimboError; -use crate::RefValue; use crate::Result; use crate::Statement; use crate::StepResult; use crate::Value; +use crate::ValueRef; use crate::{Connection, Pager}; use crossbeam_skiplist::{SkipMap, SkipSet}; use parking_lot::RwLock; @@ -1978,7 +1978,7 @@ impl MvStore { let record = ImmutableRecord::from_bin_record(row_data); let mut record_cursor = RecordCursor::new(); let record_values = record_cursor.get_values(&record).unwrap(); - let RefValue::Integer(root_page) = record_values[3] else { + let ValueRef::Integer(root_page) = record_values[3] else { panic!( "Expected integer value for root page, got {:?}", record_values[3] diff --git a/core/mvcc/database/tests.rs b/core/mvcc/database/tests.rs index a3aa8db5d..2c8d23106 100644 --- a/core/mvcc/database/tests.rs +++ b/core/mvcc/database/tests.rs @@ -714,7 +714,7 @@ use crate::types::Text; use crate::Value; use crate::{Database, StepResult}; use crate::{MemoryIO, Statement}; -use crate::{RefValue, DATABASE_MANAGER}; +use crate::{ValueRef, DATABASE_MANAGER}; // Simple atomic clock implementation for testing @@ -978,7 +978,7 @@ fn test_cursor_modification_during_scan() { record.start_serialization(&row.data); let value = record.get_value(0).unwrap(); match value { - RefValue::Text(text) => { + ValueRef::Text(text) => { assert_eq!(text.as_str(), "new_row"); } _ => panic!("Expected Text value"), @@ -1210,7 +1210,7 @@ fn test_restart() { .unwrap(); let record = get_record_value(&row); match record.get_value(0).unwrap() { - RefValue::Text(text) => { + ValueRef::Text(text) => { assert_eq!(text.as_str(), "bar"); } _ => panic!("Expected Text value"), diff --git a/core/mvcc/persistent_storage/logical_log.rs b/core/mvcc/persistent_storage/logical_log.rs index a902bac98..52a01bb27 100644 --- a/core/mvcc/persistent_storage/logical_log.rs +++ b/core/mvcc/persistent_storage/logical_log.rs @@ -501,7 +501,7 @@ mod tests { LocalClock, MvStore, }, types::{ImmutableRecord, Text}, - OpenFlags, RefValue, Value, + OpenFlags, Value, ValueRef, }; use super::LogRecordType; @@ -565,7 +565,7 @@ mod tests { let record = ImmutableRecord::from_bin_record(row.data.clone()); let values = record.get_values(); let foo = values.first().unwrap(); - let RefValue::Text(foo) = foo else { + let ValueRef::Text(foo) = foo else { unreachable!() }; assert_eq!(foo.as_str(), "foo"); @@ -637,7 +637,7 @@ mod tests { let record = ImmutableRecord::from_bin_record(row.data.clone()); let values = record.get_values(); let foo = values.first().unwrap(); - let RefValue::Text(foo) = foo else { + let ValueRef::Text(foo) = foo else { unreachable!() }; assert_eq!(foo.as_str(), value.as_str()); @@ -758,7 +758,7 @@ mod tests { let record = ImmutableRecord::from_bin_record(row.data.clone()); let values = record.get_values(); let foo = values.first().unwrap(); - let RefValue::Text(foo) = foo else { + let ValueRef::Text(foo) = foo else { unreachable!() }; diff --git a/core/schema.rs b/core/schema.rs index 9208c5d1d..106dc30c5 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -80,7 +80,7 @@ use crate::util::{ }; use crate::{ bail_parse_error, contains_ignore_ascii_case, eq_ignore_ascii_case, match_ignore_ascii_case, - Connection, LimboError, MvCursor, MvStore, Pager, RefValue, SymbolTable, VirtualTable, + Connection, LimboError, MvCursor, MvStore, Pager, SymbolTable, ValueRef, VirtualTable, }; use crate::{util::normalize_ident, Result}; use core::fmt; @@ -428,36 +428,36 @@ impl Schema { let mut record_cursor = cursor.record_cursor.borrow_mut(); // sqlite schema table has 5 columns: type, name, tbl_name, rootpage, sql let ty_value = record_cursor.get_value(&row, 0)?; - let RefValue::Text(ty) = ty_value else { + let ValueRef::Text(ty, _) = ty_value else { return Err(LimboError::ConversionError("Expected text value".into())); }; - let ty = ty.as_str(); - let RefValue::Text(name) = record_cursor.get_value(&row, 1)? else { + let ty = String::from_utf8_lossy(ty); + let ValueRef::Text(name, _) = record_cursor.get_value(&row, 1)? else { return Err(LimboError::ConversionError("Expected text value".into())); }; - let name = name.as_str(); + let name = String::from_utf8_lossy(name); let table_name_value = record_cursor.get_value(&row, 2)?; - let RefValue::Text(table_name) = table_name_value else { + let ValueRef::Text(table_name, _) = table_name_value else { return Err(LimboError::ConversionError("Expected text value".into())); }; - let table_name = table_name.as_str(); + let table_name = String::from_utf8_lossy(table_name); let root_page_value = record_cursor.get_value(&row, 3)?; - let RefValue::Integer(root_page) = root_page_value else { + let ValueRef::Integer(root_page) = root_page_value else { return Err(LimboError::ConversionError("Expected integer value".into())); }; let sql_value = record_cursor.get_value(&row, 4)?; let sql_textref = match sql_value { - RefValue::Text(sql) => Some(sql), + ValueRef::Text(sql, _) => Some(sql), _ => None, }; - let sql = sql_textref.as_ref().map(|s| s.as_str()); + let sql = sql_textref.map(|s| String::from_utf8_lossy(s)); self.handle_schema_row( - ty, - name, - table_name, + &ty, + &name, + &table_name, root_page, - sql, + sql.as_deref(), syms, &mut from_sql_indexes, &mut automatic_indices, diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 71a63aba6..7935d3595 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -28,7 +28,7 @@ use crate::{ use crate::{ return_corrupt, return_if_io, - types::{compare_immutable, IOResult, ImmutableRecord, RefValue, SeekKey, SeekOp, Value}, + types::{compare_immutable, IOResult, ImmutableRecord, SeekKey, SeekOp, Value, ValueRef}, LimboError, Result, }; @@ -705,7 +705,7 @@ impl BTreeCursor { .unwrap() .last_value(record_cursor) { - Some(Ok(RefValue::Integer(rowid))) => rowid, + Some(Ok(ValueRef::Integer(rowid))) => rowid, _ => unreachable!( "index where has_rowid() is true should have an integer rowid as the last value" ), @@ -2164,7 +2164,7 @@ impl BTreeCursor { fn compare_with_current_record( &self, - key_values: &[RefValue], + key_values: &[ValueRef], seek_op: SeekOp, record_comparer: &RecordCompare, index_info: &IndexInfo, @@ -8930,7 +8930,7 @@ mod tests { let record = record.as_ref().unwrap(); let cur = record.get_values().clone(); let cur = cur.first().unwrap(); - let RefValue::Blob(ref cur) = cur else { + let ValueRef::Blob(ref cur) = cur else { panic!("expected blob, got {cur:?}"); }; assert_eq!( @@ -9473,7 +9473,7 @@ mod tests { let value = record.unwrap().get_value(0)?; assert_eq!( value, - RefValue::Integer(i), + ValueRef::Integer(i), "Unexpected value for record {i}", ); } diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index 6dfe12bd1..08f7addc3 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -61,7 +61,7 @@ use crate::storage::buffer_pool::BufferPool; use crate::storage::database::{DatabaseFile, DatabaseStorage, EncryptionOrChecksum}; use crate::storage::pager::Pager; use crate::storage::wal::READMARK_NOT_USED; -use crate::types::{RawSlice, RefValue, SerialType, SerialTypeKind, TextRef, TextSubtype}; +use crate::types::{SerialType, SerialTypeKind, TextSubtype, ValueRef}; use crate::{ bail_corrupt_error, turso_assert, CompletionError, File, IOContext, Result, WalFileShared, }; @@ -1320,22 +1320,22 @@ impl Iterator for SmallVecIter<'_, T, N> { /// Reads a value that might reference the buffer it is reading from. Be sure to store RefValue with the buffer /// always. #[inline(always)] -pub fn read_value(buf: &[u8], serial_type: SerialType) -> Result<(RefValue, usize)> { +pub fn read_value<'a>(buf: &'a [u8], serial_type: SerialType) -> Result<(ValueRef<'a>, usize)> { match serial_type.kind() { - SerialTypeKind::Null => Ok((RefValue::Null, 0)), + SerialTypeKind::Null => Ok((ValueRef::Null, 0)), SerialTypeKind::I8 => { if buf.is_empty() { crate::bail_corrupt_error!("Invalid UInt8 value"); } let val = buf[0] as i8; - Ok((RefValue::Integer(val as i64), 1)) + Ok((ValueRef::Integer(val as i64), 1)) } SerialTypeKind::I16 => { if buf.len() < 2 { crate::bail_corrupt_error!("Invalid BEInt16 value"); } Ok(( - RefValue::Integer(i16::from_be_bytes([buf[0], buf[1]]) as i64), + ValueRef::Integer(i16::from_be_bytes([buf[0], buf[1]]) as i64), 2, )) } @@ -1345,7 +1345,7 @@ pub fn read_value(buf: &[u8], serial_type: SerialType) -> Result<(RefValue, usiz } let sign_extension = if buf[0] <= 127 { 0 } else { 255 }; Ok(( - RefValue::Integer( + ValueRef::Integer( i32::from_be_bytes([sign_extension, buf[0], buf[1], buf[2]]) as i64 ), 3, @@ -1356,7 +1356,7 @@ pub fn read_value(buf: &[u8], serial_type: SerialType) -> Result<(RefValue, usiz crate::bail_corrupt_error!("Invalid BEInt32 value"); } Ok(( - RefValue::Integer(i32::from_be_bytes([buf[0], buf[1], buf[2], buf[3]]) as i64), + ValueRef::Integer(i32::from_be_bytes([buf[0], buf[1], buf[2], buf[3]]) as i64), 4, )) } @@ -1366,7 +1366,7 @@ pub fn read_value(buf: &[u8], serial_type: SerialType) -> Result<(RefValue, usiz } let sign_extension = if buf[0] <= 127 { 0 } else { 255 }; Ok(( - RefValue::Integer(i64::from_be_bytes([ + ValueRef::Integer(i64::from_be_bytes([ sign_extension, sign_extension, buf[0], @@ -1384,7 +1384,7 @@ pub fn read_value(buf: &[u8], serial_type: SerialType) -> Result<(RefValue, usiz crate::bail_corrupt_error!("Invalid BEInt64 value"); } Ok(( - RefValue::Integer(i64::from_be_bytes([ + ValueRef::Integer(i64::from_be_bytes([ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], ])), 8, @@ -1395,26 +1395,20 @@ pub fn read_value(buf: &[u8], serial_type: SerialType) -> Result<(RefValue, usiz crate::bail_corrupt_error!("Invalid BEFloat64 value"); } Ok(( - RefValue::Float(f64::from_be_bytes([ + ValueRef::Float(f64::from_be_bytes([ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], ])), 8, )) } - SerialTypeKind::ConstInt0 => Ok((RefValue::Integer(0), 0)), - SerialTypeKind::ConstInt1 => Ok((RefValue::Integer(1), 0)), + SerialTypeKind::ConstInt0 => Ok((ValueRef::Integer(0), 0)), + SerialTypeKind::ConstInt1 => Ok((ValueRef::Integer(1), 0)), SerialTypeKind::Blob => { let content_size = serial_type.size(); if buf.len() < content_size { crate::bail_corrupt_error!("Invalid Blob value"); } - if content_size == 0 { - Ok((RefValue::Blob(RawSlice::new(std::ptr::null(), 0)), 0)) - } else { - let ptr = &buf[0] as *const u8; - let slice = RawSlice::new(ptr, content_size); - Ok((RefValue::Blob(slice), content_size)) - } + Ok((ValueRef::Blob(&buf[..content_size]), content_size)) } SerialTypeKind::Text => { let content_size = serial_type.size(); @@ -1427,10 +1421,7 @@ pub fn read_value(buf: &[u8], serial_type: SerialType) -> Result<(RefValue, usiz } Ok(( - RefValue::Text(TextRef::create_from( - &buf[..content_size], - TextSubtype::Text, - )), + ValueRef::Text(&buf[..content_size], TextSubtype::Text), content_size, )) } diff --git a/core/types.rs b/core/types.rs index dc4b2162e..b86c72726 100644 --- a/core/types.rs +++ b/core/types.rs @@ -255,24 +255,24 @@ pub struct RawSlice { len: usize, } -#[derive(PartialEq, Clone)] -pub enum RefValue { +#[derive(PartialEq, Clone, Copy)] +pub enum ValueRef<'a> { Null, Integer(i64), Float(f64), - Text(TextRef), - Blob(RawSlice), + Text(&'a [u8], TextSubtype), + Blob(&'a [u8]), } -impl Debug for RefValue { +impl Debug for ValueRef<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - RefValue::Null => write!(f, "Null"), - RefValue::Integer(i) => f.debug_tuple("Integer").field(i).finish(), - RefValue::Float(float) => f.debug_tuple("Float").field(float).finish(), - RefValue::Text(text_ref) => { + ValueRef::Null => write!(f, "Null"), + ValueRef::Integer(i) => f.debug_tuple("Integer").field(i).finish(), + ValueRef::Float(float) => f.debug_tuple("Float").field(float).finish(), + ValueRef::Text(text_ref, _) => { // truncate string to at most 256 chars - let text = text_ref.as_str(); + let text = String::from_utf8_lossy(text_ref); let max_len = text.len().min(256); f.debug_struct("Text") .field("data", &&text[0..max_len]) @@ -280,9 +280,8 @@ impl Debug for RefValue { .field("truncated", &(text.len() > max_len)) .finish() } - RefValue::Blob(raw_slice) => { + ValueRef::Blob(blob) => { // truncate blob_slice to at most 32 bytes - let blob = raw_slice.to_slice(); let max_len = blob.len().min(32); f.debug_struct("Blob") .field("data", &&blob[0..max_len]) @@ -295,6 +294,16 @@ impl Debug for RefValue { } impl Value { + pub fn as_ref<'a>(&'a self) -> ValueRef<'a> { + match self { + Value::Null => ValueRef::Null, + Value::Integer(v) => ValueRef::Integer(*v), + Value::Float(v) => ValueRef::Float(*v), + Value::Text(v) => ValueRef::Text(v.value.as_slice(), v.subtype), + Value::Blob(v) => ValueRef::Blob(v.as_slice()), + } + } + // A helper function that makes building a text Value easier. pub fn build_text(text: impl AsRef) -> Self { Self::Text(Text::new(text.as_ref())) @@ -833,34 +842,36 @@ impl std::ops::DivAssign for Value { } } -impl<'a> TryFrom<&'a RefValue> for i64 { +impl TryFrom> for i64 { type Error = LimboError; - fn try_from(value: &'a RefValue) -> Result { + fn try_from(value: ValueRef<'_>) -> Result { match value { - RefValue::Integer(i) => Ok(*i), + ValueRef::Integer(i) => Ok(i), _ => Err(LimboError::ConversionError("Expected integer value".into())), } } } -impl<'a> TryFrom<&'a RefValue> for String { +impl TryFrom> for String { type Error = LimboError; - fn try_from(value: &'a RefValue) -> Result { + fn try_from(value: ValueRef<'_>) -> Result { match value { - RefValue::Text(s) => Ok(s.as_str().to_string()), + ValueRef::Text(s, _) => Ok(String::from_utf8_lossy(s).to_string()), _ => Err(LimboError::ConversionError("Expected text value".into())), } } } -impl<'a> TryFrom<&'a RefValue> for &'a str { +impl<'a> TryFrom> for &'a str { type Error = LimboError; - fn try_from(value: &'a RefValue) -> Result { + fn try_from(value: ValueRef<'a>) -> Result { match value { - RefValue::Text(s) => Ok(s.as_str()), + ValueRef::Text(s, _) => Ok(str::from_utf8(s).map_err(|_| { + LimboError::ConversionError("Expected a valid UTF8 string".to_string()) + })?), _ => Err(LimboError::ConversionError("Expected text value".into())), } } @@ -987,7 +998,7 @@ impl ImmutableRecord { // TODO: inline the complete record parsing code here. // Its probably more efficient. - pub fn get_values(&self) -> Vec { + pub fn get_values<'a>(&'a self) -> Vec> { let mut cursor = RecordCursor::new(); cursor.get_values(self).unwrap_or_default() } @@ -1007,7 +1018,6 @@ impl ImmutableRecord { values: impl IntoIterator + Clone, len: usize, ) -> Self { - let mut ref_values = Vec::with_capacity(len); let mut serials = Vec::with_capacity(len); let mut size_header = 0; let mut size_values = 0; @@ -1044,13 +1054,9 @@ impl ImmutableRecord { // write content for value in values { - let start_offset = writer.pos; match value { - Value::Null => { - ref_values.push(RefValue::Null); - } + Value::Null => {} Value::Integer(i) => { - ref_values.push(RefValue::Integer(*i)); let serial_type = SerialType::from(value); match serial_type.kind() { SerialTypeKind::ConstInt0 | SerialTypeKind::ConstInt1 => {} @@ -1065,27 +1071,12 @@ impl ImmutableRecord { other => panic!("Serial type is not an integer: {other:?}"), } } - Value::Float(f) => { - ref_values.push(RefValue::Float(*f)); - writer.extend_from_slice(&f.to_be_bytes()) - } + Value::Float(f) => writer.extend_from_slice(&f.to_be_bytes()), Value::Text(t) => { writer.extend_from_slice(&t.value); - let end_offset = writer.pos; - let len = end_offset - start_offset; - let ptr = unsafe { writer.buf.as_ptr().add(start_offset) }; - let value = RefValue::Text(TextRef { - value: RawSlice::new(ptr, len), - subtype: t.subtype, - }); - ref_values.push(value); } Value::Blob(b) => { writer.extend_from_slice(b); - let end_offset = writer.pos; - let len = end_offset - start_offset; - let ptr = unsafe { writer.buf.as_ptr().add(start_offset) }; - ref_values.push(RefValue::Blob(RawSlice::new(ptr, len))); } }; } @@ -1132,7 +1123,10 @@ impl ImmutableRecord { // TODO: its probably better to not instantiate the RecordCurosr. Instead do the deserialization // inside the function. - pub fn last_value(&self, record_cursor: &mut RecordCursor) -> Option> { + pub fn last_value<'a>( + &'a self, + record_cursor: &mut RecordCursor, + ) -> Option>> { if self.is_invalidated() { return Some(Err(LimboError::InternalError( "Record is invalidated".into(), @@ -1143,12 +1137,12 @@ impl ImmutableRecord { Some(record_cursor.get_value(self, last_idx)) } - pub fn get_value(&self, idx: usize) -> Result { + pub fn get_value<'a>(&'a self, idx: usize) -> Result> { let mut cursor = RecordCursor::new(); cursor.get_value(self, idx) } - pub fn get_value_opt(&self, idx: usize) -> Option { + pub fn get_value_opt<'a>(&'a self, idx: usize) -> Option> { if self.is_invalidated() { return None; } @@ -1314,23 +1308,27 @@ impl RecordCursor { /// # Special Cases /// /// - Returns `RefValue::Null` for out-of-bounds indices - pub fn deserialize_column(&self, record: &ImmutableRecord, idx: usize) -> Result { + pub fn deserialize_column<'a>( + &self, + record: &'a ImmutableRecord, + idx: usize, + ) -> Result> { if idx >= self.serial_types.len() { - return Ok(RefValue::Null); + return Ok(ValueRef::Null); } let serial_type = self.serial_types[idx]; let serial_type_obj = SerialType::try_from(serial_type)?; match serial_type_obj.kind() { - SerialTypeKind::Null => return Ok(RefValue::Null), - SerialTypeKind::ConstInt0 => return Ok(RefValue::Integer(0)), - SerialTypeKind::ConstInt1 => return Ok(RefValue::Integer(1)), + SerialTypeKind::Null => return Ok(ValueRef::Null), + SerialTypeKind::ConstInt0 => return Ok(ValueRef::Integer(0)), + SerialTypeKind::ConstInt1 => return Ok(ValueRef::Integer(1)), _ => {} // continue } if idx + 1 >= self.offsets.len() { - return Ok(RefValue::Null); + return Ok(ValueRef::Null); } let start = self.offsets[idx]; @@ -1358,7 +1356,11 @@ impl RecordCursor { /// * `Err(LimboError)` - Access failed due to invalid record or parsing error /// #[inline(always)] - pub fn get_value(&mut self, record: &ImmutableRecord, idx: usize) -> Result { + pub fn get_value<'a>( + &mut self, + record: &'a ImmutableRecord, + idx: usize, + ) -> Result> { if record.is_invalidated() { return Err(LimboError::InternalError("Record not initialized".into())); } @@ -1380,11 +1382,11 @@ impl RecordCursor { /// * `Some(Err(LimboError))` - Parsing succeeded but deserialization failed /// * `None` - Record is invalid or index is out of bounds /// - pub fn get_value_opt( + pub fn get_value_opt<'a>( &mut self, - record: &ImmutableRecord, + record: &'a ImmutableRecord, idx: usize, - ) -> Option> { + ) -> Option>> { if record.is_invalidated() { return None; } @@ -1443,7 +1445,7 @@ impl RecordCursor { /// * `Ok(Vec)` - All values in column order /// * `Err(LimboError)` - Parsing or deserialization failed /// - pub fn get_values(&mut self, record: &ImmutableRecord) -> Result> { + pub fn get_values<'a>(&mut self, record: &'a ImmutableRecord) -> Result>> { if record.is_invalidated() { return Ok(Vec::new()); } @@ -1459,62 +1461,62 @@ impl RecordCursor { } } -impl RefValue { +impl<'a> ValueRef<'a> { pub fn to_ffi(&self) -> ExtValue { match self { Self::Null => ExtValue::null(), Self::Integer(i) => ExtValue::from_integer(*i), Self::Float(fl) => ExtValue::from_float(*fl), - Self::Text(text) => ExtValue::from_text( - std::str::from_utf8(text.value.to_slice()) - .unwrap() - .to_string(), - ), - Self::Blob(blob) => ExtValue::from_blob(blob.to_slice().to_vec()), + Self::Text(text, _) => { + ExtValue::from_text(std::str::from_utf8(text).unwrap().to_string()) + } + Self::Blob(blob) => ExtValue::from_blob(blob.to_vec()), + } + } + + pub fn to_blob(&self) -> Option<&'a [u8]> { + match self { + Self::Blob(blob) => Some(*blob), + _ => None, } } pub fn to_owned(&self) -> Value { match self { - RefValue::Null => Value::Null, - RefValue::Integer(i) => Value::Integer(*i), - RefValue::Float(f) => Value::Float(*f), - RefValue::Text(text_ref) => Value::Text(Text { - value: text_ref.value.to_slice().to_vec(), - subtype: text_ref.subtype, + ValueRef::Null => Value::Null, + ValueRef::Integer(i) => Value::Integer(*i), + ValueRef::Float(f) => Value::Float(*f), + ValueRef::Text(text, subtype) => Value::Text(Text { + value: text.to_vec(), + subtype: *subtype, }), - RefValue::Blob(b) => Value::Blob(b.to_slice().to_vec()), - } - } - pub fn to_blob(&self) -> Option<&[u8]> { - match self { - Self::Blob(blob) => Some(blob.to_slice()), - _ => None, + ValueRef::Blob(b) => Value::Blob(b.to_vec()), } } } -impl Display for RefValue { +impl Display for ValueRef<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Null => write!(f, "NULL"), Self::Integer(i) => write!(f, "{i}"), Self::Float(fl) => write!(f, "{fl:?}"), - Self::Text(s) => write!(f, "{}", s.as_str()), - Self::Blob(b) => write!(f, "{}", String::from_utf8_lossy(b.to_slice())), + Self::Text(s, _) => write!(f, "{}", String::from_utf8_lossy(s)), + Self::Blob(b) => write!(f, "{}", String::from_utf8_lossy(b)), } } } -impl Eq for RefValue {} -impl Ord for RefValue { +impl Eq for ValueRef<'_> {} + +impl Ord for ValueRef<'_> { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.partial_cmp(other).unwrap() } } #[allow(clippy::non_canonical_partial_ord_impl)] -impl PartialOrd for RefValue { +impl<'a> PartialOrd> for ValueRef<'a> { fn partial_cmp(&self, other: &Self) -> Option { match (self, other) { (Self::Integer(int_left), Self::Integer(int_right)) => int_left.partial_cmp(int_right), @@ -1528,24 +1530,21 @@ impl PartialOrd for RefValue { float_left.partial_cmp(float_right) } // Numeric vs Text/Blob - (Self::Integer(_) | Self::Float(_), Self::Text(_) | Self::Blob(_)) => { + (Self::Integer(_) | Self::Float(_), Self::Text(_, _) | Self::Blob(_)) => { Some(std::cmp::Ordering::Less) } - (Self::Text(_) | Self::Blob(_), Self::Integer(_) | Self::Float(_)) => { + (Self::Text(_, _) | Self::Blob(_), Self::Integer(_) | Self::Float(_)) => { Some(std::cmp::Ordering::Greater) } - (Self::Text(text_left), Self::Text(text_right)) => text_left - .value - .to_slice() - .partial_cmp(text_right.value.to_slice()), - // Text vs Blob - (Self::Text(_), Self::Blob(_)) => Some(std::cmp::Ordering::Less), - (Self::Blob(_), Self::Text(_)) => Some(std::cmp::Ordering::Greater), - - (Self::Blob(blob_left), Self::Blob(blob_right)) => { - blob_left.to_slice().partial_cmp(blob_right.to_slice()) + (Self::Text(text_left, _), Self::Text(text_right, _)) => { + text_left.partial_cmp(text_right) } + // Text vs Blob + (Self::Text(_, _), Self::Blob(_)) => Some(std::cmp::Ordering::Less), + (Self::Blob(_), Self::Text(_, _)) => Some(std::cmp::Ordering::Greater), + + (Self::Blob(blob_left), Self::Blob(blob_right)) => blob_left.partial_cmp(blob_right), (Self::Null, Self::Null) => Some(std::cmp::Ordering::Equal), (Self::Null, _) => Some(std::cmp::Ordering::Less), (_, Self::Null) => Some(std::cmp::Ordering::Greater), @@ -1635,8 +1634,8 @@ impl IndexInfo { } pub fn compare_immutable( - l: &[RefValue], - r: &[RefValue], + l: &[ValueRef], + r: &[ValueRef], column_info: &[KeyInfo], ) -> std::cmp::Ordering { assert_eq!(l.len(), r.len()); @@ -1645,9 +1644,10 @@ pub fn compare_immutable( let column_order = column_info[i].sort_order; let collation = column_info[i].collation; let cmp = match (l, r) { - (RefValue::Text(left), RefValue::Text(right)) => { - collation.compare_strings(left.as_str(), right.as_str()) - } + (ValueRef::Text(left, _), ValueRef::Text(right, _)) => collation.compare_strings( + &String::from_utf8_lossy(left), + &String::from_utf8_lossy(right), + ), _ => l.partial_cmp(r).unwrap(), }; if !cmp.is_eq() { @@ -1671,7 +1671,7 @@ impl RecordCompare { pub fn compare( &self, serialized: &ImmutableRecord, - unpacked: &[RefValue], + unpacked: &[ValueRef], index_info: &IndexInfo, skip: usize, tie_breaker: std::cmp::Ordering, @@ -1690,11 +1690,11 @@ impl RecordCompare { } } -pub fn find_compare(unpacked: &[RefValue], index_info: &IndexInfo) -> RecordCompare { +pub fn find_compare(unpacked: &[ValueRef], index_info: &IndexInfo) -> RecordCompare { if !unpacked.is_empty() && index_info.num_cols <= 13 { match &unpacked[0] { - RefValue::Integer(_) => RecordCompare::Int, - RefValue::Text(_) if index_info.key_info[0].collation == CollationSeq::Binary => { + ValueRef::Integer(_) => RecordCompare::Int, + ValueRef::Text(_, _) if index_info.key_info[0].collation == CollationSeq::Binary => { RecordCompare::String } _ => RecordCompare::Generic, @@ -1760,7 +1760,7 @@ pub fn get_tie_breaker_from_seek_op(seek_op: SeekOp) -> std::cmp::Ordering { /// delegates to `compare_records_generic()` with `skip=1` fn compare_records_int( serialized: &ImmutableRecord, - unpacked: &[RefValue], + unpacked: &[ValueRef], index_info: &IndexInfo, tie_breaker: std::cmp::Ordering, ) -> Result { @@ -1794,7 +1794,7 @@ fn compare_records_int( let data_start = header_size; let lhs_int = read_integer(&payload[data_start..], first_serial_type as u8)?; - let RefValue::Integer(rhs_int) = unpacked[0] else { + let ValueRef::Integer(rhs_int) = unpacked[0] else { return compare_records_generic(serialized, unpacked, index_info, 0, tie_breaker); }; let comparison = match index_info.key_info[0].sort_order { @@ -1853,7 +1853,7 @@ fn compare_records_int( /// delegates to `compare_records_generic()` with `skip=1` fn compare_records_string( serialized: &ImmutableRecord, - unpacked: &[RefValue], + unpacked: &[ValueRef], index_info: &IndexInfo, tie_breaker: std::cmp::Ordering, ) -> Result { @@ -1884,7 +1884,7 @@ fn compare_records_string( return compare_records_generic(serialized, unpacked, index_info, 0, tie_breaker); } - let RefValue::Text(rhs_text) = &unpacked[0] else { + let ValueRef::Text(rhs_text, _) = &unpacked[0] else { return compare_records_generic(serialized, unpacked, index_info, 0, tie_breaker); }; @@ -1896,12 +1896,15 @@ fn compare_records_string( let serial_type = SerialType::try_from(first_serial_type)?; let (lhs_value, _) = read_value(&payload[data_start..], serial_type)?; - let RefValue::Text(lhs_text) = lhs_value else { + let ValueRef::Text(lhs_text, _) = lhs_value else { return compare_records_generic(serialized, unpacked, index_info, 0, tie_breaker); }; let collation = index_info.key_info[0].collation; - let comparison = collation.compare_strings(lhs_text.as_str(), rhs_text.as_str()); + let comparison = collation.compare_strings( + &String::from_utf8_lossy(lhs_text), + &String::from_utf8_lossy(rhs_text), + ); let final_comparison = match index_info.key_info[0].sort_order { SortOrder::Asc => comparison, @@ -1910,7 +1913,7 @@ fn compare_records_string( match final_comparison { std::cmp::Ordering::Equal => { - let len_cmp = lhs_text.value.len.cmp(&rhs_text.value.len); + let len_cmp = lhs_text.len().cmp(&rhs_text.len()); if len_cmp != std::cmp::Ordering::Equal { let adjusted = match index_info.key_info[0].sort_order { SortOrder::Asc => len_cmp, @@ -1962,7 +1965,7 @@ fn compare_records_string( /// `tie_breaker` is returned. pub fn compare_records_generic( serialized: &ImmutableRecord, - unpacked: &[RefValue], + unpacked: &[ValueRef], index_info: &IndexInfo, skip: usize, tie_breaker: std::cmp::Ordering, @@ -2009,9 +2012,9 @@ pub fn compare_records_generic( let rhs_value = &unpacked[field_idx]; let lhs_value = match serial_type.kind() { - SerialTypeKind::ConstInt0 => RefValue::Integer(0), - SerialTypeKind::ConstInt1 => RefValue::Integer(1), - SerialTypeKind::Null => RefValue::Null, + SerialTypeKind::ConstInt0 => ValueRef::Integer(0), + SerialTypeKind::ConstInt1 => ValueRef::Integer(1), + SerialTypeKind::Null => ValueRef::Null, _ => { let (value, field_size) = read_value(&payload[data_pos..], serial_type)?; data_pos += field_size; @@ -2020,15 +2023,18 @@ pub fn compare_records_generic( }; let comparison = match (&lhs_value, rhs_value) { - (RefValue::Text(lhs_text), RefValue::Text(rhs_text)) => index_info.key_info[field_idx] - .collation - .compare_strings(lhs_text.as_str(), rhs_text.as_str()), + (ValueRef::Text(lhs_text, _), ValueRef::Text(rhs_text, _)) => { + index_info.key_info[field_idx].collation.compare_strings( + &String::from_utf8_lossy(lhs_text), + &String::from_utf8_lossy(rhs_text), + ) + } - (RefValue::Integer(lhs_int), RefValue::Float(rhs_float)) => { + (ValueRef::Integer(lhs_int), ValueRef::Float(rhs_float)) => { sqlite_int_float_compare(*lhs_int, *rhs_float) } - (RefValue::Float(lhs_float), RefValue::Integer(rhs_int)) => { + (ValueRef::Float(lhs_float), ValueRef::Integer(rhs_int)) => { sqlite_int_float_compare(*rhs_int, *lhs_float).reverse() } @@ -2623,8 +2629,8 @@ mod tests { use crate::translate::collate::CollationSeq; pub fn compare_immutable_for_testing( - l: &[RefValue], - r: &[RefValue], + l: &[ValueRef], + r: &[ValueRef], index_key_info: &[KeyInfo], tie_breaker: std::cmp::Ordering, ) -> std::cmp::Ordering { @@ -2635,7 +2641,7 @@ mod tests { let collation = index_key_info[i].collation; let cmp = match (&l[i], &r[i]) { - (RefValue::Text(left), RefValue::Text(right)) => { + (ValueRef::Text(left), ValueRef::Text(right)) => { collation.compare_strings(left.as_str(), right.as_str()) } _ => l[i].partial_cmp(&r[i]).unwrap_or(std::cmp::Ordering::Equal), @@ -2676,16 +2682,16 @@ mod tests { } } - fn value_to_ref_value(value: &Value) -> RefValue { + fn value_to_ref_value(value: &Value) -> ValueRef { match value { - Value::Null => RefValue::Null, - Value::Integer(i) => RefValue::Integer(*i), - Value::Float(f) => RefValue::Float(*f), - Value::Text(text) => RefValue::Text(TextRef { + Value::Null => ValueRef::Null, + Value::Integer(i) => ValueRef::Integer(*i), + Value::Float(f) => ValueRef::Float(*f), + Value::Text(text) => ValueRef::Text(TextRef { value: RawSlice::from_slice(&text.value), subtype: text.subtype, }), - Value::Blob(blob) => RefValue::Blob(RawSlice::from_slice(blob)), + Value::Blob(blob) => ValueRef::Blob(RawSlice::from_slice(blob)), } } @@ -2709,13 +2715,13 @@ mod tests { fn assert_compare_matches_full_comparison( serialized_values: Vec, - unpacked_values: Vec, + unpacked_values: Vec, index_info: &IndexInfo, test_name: &str, ) { let serialized = create_record(serialized_values.clone()); - let serialized_ref_values: Vec = + let serialized_ref_values: Vec = serialized_values.iter().map(value_to_ref_value).collect(); let tie_breaker = std::cmp::Ordering::Equal; @@ -2815,52 +2821,52 @@ mod tests { let test_cases = vec![ ( vec![Value::Integer(42)], - vec![RefValue::Integer(42)], + vec![ValueRef::Integer(42)], "equal_integers", ), ( vec![Value::Integer(10)], - vec![RefValue::Integer(20)], + vec![ValueRef::Integer(20)], "less_than_integers", ), ( vec![Value::Integer(30)], - vec![RefValue::Integer(20)], + vec![ValueRef::Integer(20)], "greater_than_integers", ), ( vec![Value::Integer(0)], - vec![RefValue::Integer(0)], + vec![ValueRef::Integer(0)], "zero_integers", ), ( vec![Value::Integer(-5)], - vec![RefValue::Integer(-5)], + vec![ValueRef::Integer(-5)], "negative_integers", ), ( vec![Value::Integer(i64::MAX)], - vec![RefValue::Integer(i64::MAX)], + vec![ValueRef::Integer(i64::MAX)], "max_integers", ), ( vec![Value::Integer(i64::MIN)], - vec![RefValue::Integer(i64::MIN)], + vec![ValueRef::Integer(i64::MIN)], "min_integers", ), ( vec![Value::Integer(42), Value::Text(Text::new("hello"))], vec![ - RefValue::Integer(42), - RefValue::Text(TextRef::from_str("hello")), + ValueRef::Integer(42), + ValueRef::Text(TextRef::from_str("hello")), ], "integer_text_equal", ), ( vec![Value::Integer(42), Value::Text(Text::new("hello"))], vec![ - RefValue::Integer(42), - RefValue::Text(TextRef::from_str("world")), + ValueRef::Integer(42), + ValueRef::Text(TextRef::from_str("world")), ], "integer_equal_text_different", ), @@ -2887,43 +2893,43 @@ mod tests { let test_cases = vec![ ( vec![Value::Text(Text::new("hello"))], - vec![RefValue::Text(TextRef::from_str("hello"))], + vec![ValueRef::Text(TextRef::from_str("hello"))], "equal_strings", ), ( vec![Value::Text(Text::new("abc"))], - vec![RefValue::Text(TextRef::from_str("def"))], + vec![ValueRef::Text(TextRef::from_str("def"))], "less_than_strings", ), ( vec![Value::Text(Text::new("xyz"))], - vec![RefValue::Text(TextRef::from_str("abc"))], + vec![ValueRef::Text(TextRef::from_str("abc"))], "greater_than_strings", ), ( vec![Value::Text(Text::new(""))], - vec![RefValue::Text(TextRef::from_str(""))], + vec![ValueRef::Text(TextRef::from_str(""))], "empty_strings", ), ( vec![Value::Text(Text::new("a"))], - vec![RefValue::Text(TextRef::from_str("aa"))], + vec![ValueRef::Text(TextRef::from_str("aa"))], "prefix_strings", ), // Multi-field with string first ( vec![Value::Text(Text::new("hello")), Value::Integer(42)], vec![ - RefValue::Text(TextRef::from_str("hello")), - RefValue::Integer(42), + ValueRef::Text(TextRef::from_str("hello")), + ValueRef::Integer(42), ], "string_integer_equal", ), ( vec![Value::Text(Text::new("hello")), Value::Integer(42)], vec![ - RefValue::Text(TextRef::from_str("hello")), - RefValue::Integer(99), + ValueRef::Text(TextRef::from_str("hello")), + ValueRef::Integer(99), ], "string_equal_integer_different", ), @@ -2948,65 +2954,65 @@ mod tests { // NULL vs others ( vec![Value::Null], - vec![RefValue::Integer(42)], + vec![ValueRef::Integer(42)], "null_vs_integer", ), ( vec![Value::Null], - vec![RefValue::Float(64.4)], + vec![ValueRef::Float(64.4)], "null_vs_float", ), ( vec![Value::Null], - vec![RefValue::Text(TextRef::from_str("hello"))], + vec![ValueRef::Text(TextRef::from_str("hello"))], "null_vs_text", ), ( vec![Value::Null], - vec![RefValue::Blob(RawSlice::from_slice(b"blob"))], + vec![ValueRef::Blob(RawSlice::from_slice(b"blob"))], "null_vs_blob", ), // Numbers vs Text/Blob ( vec![Value::Integer(42)], - vec![RefValue::Text(TextRef::from_str("hello"))], + vec![ValueRef::Text(TextRef::from_str("hello"))], "integer_vs_text", ), ( vec![Value::Float(64.4)], - vec![RefValue::Text(TextRef::from_str("hello"))], + vec![ValueRef::Text(TextRef::from_str("hello"))], "float_vs_text", ), ( vec![Value::Integer(42)], - vec![RefValue::Blob(RawSlice::from_slice(b"blob"))], + vec![ValueRef::Blob(RawSlice::from_slice(b"blob"))], "integer_vs_blob", ), ( vec![Value::Float(64.4)], - vec![RefValue::Blob(RawSlice::from_slice(b"blob"))], + vec![ValueRef::Blob(RawSlice::from_slice(b"blob"))], "float_vs_blob", ), // Text vs Blob ( vec![Value::Text(Text::new("hello"))], - vec![RefValue::Blob(RawSlice::from_slice(b"blob"))], + vec![ValueRef::Blob(RawSlice::from_slice(b"blob"))], "text_vs_blob", ), // Integer vs Float (affinity conversion) ( vec![Value::Integer(42)], - vec![RefValue::Float(42.0)], + vec![ValueRef::Float(42.0)], "integer_vs_equal_float", ), ( vec![Value::Integer(42)], - vec![RefValue::Float(42.5)], + vec![ValueRef::Float(42.5)], "integer_vs_different_float", ), ( vec![Value::Float(42.5)], - vec![RefValue::Integer(42)], + vec![ValueRef::Integer(42)], "float_vs_integer", ), ]; @@ -3033,20 +3039,20 @@ mod tests { // DESC order should reverse first field comparison ( vec![Value::Integer(10)], - vec![RefValue::Integer(20)], + vec![ValueRef::Integer(20)], "desc_integer_reversed", ), ( vec![Value::Text(Text::new("abc"))], - vec![RefValue::Text(TextRef::from_str("def"))], + vec![ValueRef::Text(TextRef::from_str("def"))], "desc_string_reversed", ), // Mixed sort orders ( vec![Value::Integer(10), Value::Text(Text::new("hello"))], vec![ - RefValue::Integer(20), - RefValue::Text(TextRef::from_str("hello")), + ValueRef::Integer(20), + ValueRef::Text(TextRef::from_str("hello")), ], "desc_first_asc_second", ), @@ -3071,38 +3077,38 @@ mod tests { ( vec![Value::Integer(42)], vec![ - RefValue::Integer(42), - RefValue::Text(TextRef::from_str("extra")), + ValueRef::Integer(42), + ValueRef::Text(TextRef::from_str("extra")), ], "fewer_serialized_fields", ), ( vec![Value::Integer(42), Value::Text(Text::new("extra"))], - vec![RefValue::Integer(42)], + vec![ValueRef::Integer(42)], "fewer_unpacked_fields", ), (vec![], vec![], "both_empty"), - (vec![], vec![RefValue::Integer(42)], "empty_serialized"), + (vec![], vec![ValueRef::Integer(42)], "empty_serialized"), ( (0..15).map(Value::Integer).collect(), - (0..15).map(RefValue::Integer).collect(), + (0..15).map(ValueRef::Integer).collect(), "large_field_count", ), ( vec![Value::Blob(vec![1, 2, 3])], - vec![RefValue::Blob(RawSlice::from_slice(&[1, 2, 3]))], + vec![ValueRef::Blob(RawSlice::from_slice(&[1, 2, 3]))], "blob_first_field", ), ( vec![Value::Text(Text::new("hello")), Value::Integer(5)], - vec![RefValue::Text(TextRef::from_str("hello"))], + vec![ValueRef::Text(TextRef::from_str("hello"))], "equal_text_prefix_but_more_serialized_fields", ), ( vec![Value::Text(Text::new("same")), Value::Integer(5)], vec![ - RefValue::Text(TextRef::from_str("same")), - RefValue::Integer(5), + ValueRef::Text(TextRef::from_str("same")), + ValueRef::Integer(5), ], "equal_text_then_equal_int", ), @@ -3132,9 +3138,9 @@ mod tests { Value::Integer(3), ]); let unpacked = vec![ - RefValue::Integer(1), - RefValue::Integer(99), - RefValue::Integer(3), + ValueRef::Integer(1), + ValueRef::Integer(99), + ValueRef::Integer(3), ]; let tie_breaker = std::cmp::Ordering::Equal; @@ -3160,8 +3166,8 @@ mod tests { let index_info_large = create_index_info(15, vec![SortOrder::Asc; 15], collations_large); let int_values = vec![ - RefValue::Integer(42), - RefValue::Text(TextRef::from_str("hello")), + ValueRef::Integer(42), + ValueRef::Text(TextRef::from_str("hello")), ]; assert!(matches!( find_compare(&int_values, &index_info_small), @@ -3169,21 +3175,21 @@ mod tests { )); let string_values = vec![ - RefValue::Text(TextRef::from_str("hello")), - RefValue::Integer(42), + ValueRef::Text(TextRef::from_str("hello")), + ValueRef::Integer(42), ]; assert!(matches!( find_compare(&string_values, &index_info_small), RecordCompare::String )); - let large_values: Vec = (0..15).map(RefValue::Integer).collect(); + let large_values: Vec = (0..15).map(ValueRef::Integer).collect(); assert!(matches!( find_compare(&large_values, &index_info_large), RecordCompare::Generic )); - let blob_values = vec![RefValue::Blob(RawSlice::from_slice(&[1, 2, 3]))]; + let blob_values = vec![ValueRef::Blob(RawSlice::from_slice(&[1, 2, 3]))]; assert!(matches!( find_compare(&blob_values, &index_info_small), RecordCompare::Generic diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index aa9fe59b4..9a03ca9bc 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -65,7 +65,7 @@ use crate::{ vector::{vector32, vector64, vector_distance_cos, vector_distance_l2, vector_extract}, }; -use crate::{info, turso_assert, OpenFlags, RefValue, Row, TransactionState}; +use crate::{info, turso_assert, OpenFlags, Row, TransactionState, ValueRef}; use super::{ insn::{Cookie, RegisterOrLiteral}, @@ -2705,7 +2705,7 @@ pub fn op_row_id( let record_cursor = record_cursor_ref.deref_mut(); let rowid = record.last_value(record_cursor).unwrap(); match rowid { - Ok(RefValue::Integer(rowid)) => rowid, + Ok(ValueRef::Integer(rowid)) => rowid, _ => unreachable!(), } }; @@ -4275,7 +4275,14 @@ pub fn op_sorter_compare( &record.get_values()[..*num_regs] }; - let cursor = state.get_cursor(*cursor_id); + // Inlined `state.get_cursor` to prevent borrowing conflit with `state.registers` + let cursor = state + .cursors + .get_mut(*cursor_id) + .unwrap_or_else(|| panic!("cursor id {cursor_id} out of bounds")) + .as_mut() + .unwrap_or_else(|| panic!("cursor id {cursor_id} is None")); + let cursor = cursor.as_sorter_mut(); let Some(current_sorter_record) = cursor.record() else { return Err(LimboError::InternalError( @@ -4287,7 +4294,7 @@ pub fn op_sorter_compare( // If the current sorter record has a NULL in any of the significant fields, the comparison is not equal. let is_equal = current_sorter_values .iter() - .all(|v| !matches!(v, RefValue::Null)) + .all(|v| !matches!(v, ValueRef::Null)) && compare_immutable( previous_sorter_values, current_sorter_values, @@ -4953,7 +4960,7 @@ pub fn op_function( } #[cfg(feature = "json")] { - use crate::types::{TextRef, TextSubtype}; + use crate::types::TextSubtype; let table = state.registers[*start_reg].get_value(); let Value::Text(table) = table else { @@ -4978,10 +4985,7 @@ pub fn op_function( for column in table.columns() { let name = column.name.as_ref().unwrap(); let name_json = json::convert_ref_dbtype_to_jsonb( - &RefValue::Text(TextRef::create_from( - name.as_str().as_bytes(), - TextSubtype::Text, - )), + ValueRef::Text(name.as_bytes(), TextSubtype::Text), json::Conv::ToString, )?; json.append_jsonb_to_end(name_json.data()); @@ -5049,13 +5053,13 @@ pub fn op_function( json.append_jsonb_to_end(column_name.data()); let val = record_cursor.get_value(&record, i)?; - if let RefValue::Blob(..) = val { + if let ValueRef::Blob(..) = val { return Err(LimboError::InvalidArgument( "bin_record_json_object: formatting of BLOB values stored in binary record is not supported".to_string() )); } let val_json = - json::convert_ref_dbtype_to_jsonb(&val, json::Conv::NotStrict)?; + json::convert_ref_dbtype_to_jsonb(val, json::Conv::NotStrict)?; json.append_jsonb_to_end(val_json.data()); } json.finalize_unsafe(json::jsonb::ElementType::OBJECT)?; @@ -6249,9 +6253,9 @@ pub fn op_idx_insert( // UNIQUE indexes disallow duplicates like (a=1,b=2,rowid=1) and (a=1,b=2,rowid=2). let existing_key = if cursor.has_rowid() { let count = cursor.record_cursor.borrow_mut().count(record); - record.get_values()[..count.saturating_sub(1)].to_vec() + &record.get_values()[..count.saturating_sub(1)] } else { - record.get_values().to_vec() + &record.get_values()[..] }; let inserted_key_vals = &record_to_insert.get_values(); if existing_key.len() != inserted_key_vals.len() { @@ -6259,7 +6263,7 @@ pub fn op_idx_insert( } let conflict = compare_immutable( - existing_key.as_slice(), + existing_key, inserted_key_vals, &cursor.index_info.as_ref().unwrap().key_info, ) == std::cmp::Ordering::Equal; @@ -6550,7 +6554,7 @@ pub fn op_no_conflict( record .get_values() .iter() - .any(|val| matches!(val, RefValue::Null)) + .any(|val| matches!(val, ValueRef::Null)) } RecordSource::Unpacked { start_reg, diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 90b14225f..16695bd0f 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -32,7 +32,7 @@ use crate::{ state_machine::StateMachine, storage::{pager::PagerCommitResult, sqlite3_ondisk::SmallVec}, translate::{collate::CollationSeq, plan::TableReferences}, - types::{IOCompletions, IOResult, RawSlice, TextRef}, + types::{IOCompletions, IOResult}, vdbe::{ execute::{ OpCheckpointState, OpColumnState, OpDeleteState, OpDeleteSubState, OpDestroyState, @@ -41,7 +41,7 @@ use crate::{ }, metrics::StatementMetrics, }, - RefValue, + ValueRef, }; use crate::{ @@ -1001,22 +1001,10 @@ fn make_record(registers: &[Register], start_reg: &usize, count: &usize) -> Immu ImmutableRecord::from_registers(regs, regs.len()) } -pub fn registers_to_ref_values(registers: &[Register]) -> Vec { +pub fn registers_to_ref_values<'a>(registers: &'a [Register]) -> Vec> { registers .iter() - .map(|reg| { - let value = reg.get_value(); - match value { - Value::Null => RefValue::Null, - Value::Integer(i) => RefValue::Integer(*i), - Value::Float(f) => RefValue::Float(*f), - Value::Text(t) => RefValue::Text(TextRef { - value: RawSlice::new(t.value.as_ptr(), t.value.len()), - subtype: t.subtype, - }), - Value::Blob(b) => RefValue::Blob(RawSlice::new(b.as_ptr(), b.len())), - } - }) + .map(|reg| reg.get_value().as_ref()) .collect() } diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index f2ef80c69..bedf6de18 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -1,6 +1,6 @@ use turso_parser::ast::SortOrder; -use std::cell::RefCell; +use std::cell::{RefCell, UnsafeCell}; use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd, Reverse}; use std::collections::BinaryHeap; use std::rc::Rc; @@ -15,7 +15,7 @@ use crate::{ storage::sqlite3_ondisk::{read_varint, varint_len, write_varint}, translate::collate::CollationSeq, turso_assert, - types::{IOResult, ImmutableRecord, KeyInfo, RecordCursor, RefValue}, + types::{IOResult, ImmutableRecord, KeyInfo, RecordCursor, ValueRef}, Result, }; use crate::{io_yield_many, io_yield_one, return_if_io, CompletionError}; @@ -614,7 +614,8 @@ impl SortedChunk { struct SortableImmutableRecord { record: ImmutableRecord, cursor: RecordCursor, - key_values: RefCell>, + // SAFETY: borrows from 'self + key_values: UnsafeCell>>, key_len: usize, index_key_info: Rc>, /// The key deserialization error, if any. @@ -636,29 +637,34 @@ impl SortableImmutableRecord { Ok(Self { record, cursor, - key_values: RefCell::new(Vec::with_capacity(key_len)), + key_values: UnsafeCell::new(Vec::with_capacity(key_len)), index_key_info, deserialization_error: RefCell::new(None), key_len, }) } - /// Attempts to deserialize the key value at the given index. - /// If the key value has already been deserialized, this does nothing. - /// The deserialized key value is stored in the `key_values` field. - /// In case of an error, the error is stored in the `deserialization_error` field. - fn try_deserialize_key(&self, idx: usize) { - let mut key_values = self.key_values.borrow_mut(); - if idx < key_values.len() { - // The key value with this index has already been deserialized. - return; - } - match self.cursor.deserialize_column(&self.record, idx) { - Ok(value) => key_values.push(value), - Err(error) => { - self.deserialization_error.replace(Some(error)); - } + fn key_value<'a>(&'a self, i: usize) -> Option> { + // SAFETY: there are no other active references + let key_values = unsafe { &mut *self.key_values.get() }; + + if i >= key_values.len() { + assert_eq!(key_values.len(), i, "access must be sequential"); + + let value = match self.cursor.deserialize_column(&self.record, i) { + Ok(value) => value, + Err(err) => { + self.deserialization_error.replace(Some(err)); + return None; + } + }; + + // SAFETY: no 'static lifetime is exposed, all references are bound to 'self + let value: ValueRef<'static> = unsafe { std::mem::transmute(value) }; + key_values.push(value); } + + Some(key_values[i]) } } @@ -674,34 +680,25 @@ impl Ord for SortableImmutableRecord { self.cursor.serial_types.len(), other.cursor.serial_types.len() ); - let this_key_values_len = self.key_values.borrow().len(); - let other_key_values_len = other.key_values.borrow().len(); for i in 0..self.key_len { - // Lazily deserialize the key values if they haven't been deserialized already. - if i >= this_key_values_len { - self.try_deserialize_key(i); - if self.deserialization_error.borrow().is_some() { - return Ordering::Equal; - } - } - if i >= other_key_values_len { - other.try_deserialize_key(i); - if other.deserialization_error.borrow().is_some() { - return Ordering::Equal; - } - } + let Some(this_key_value) = self.key_value(i) else { + return Ordering::Equal; + }; + + let Some(other_key_value) = other.key_value(i) else { + return Ordering::Equal; + }; - let this_key_value = &self.key_values.borrow()[i]; - let other_key_value = &other.key_values.borrow()[i]; let column_order = self.index_key_info[i].sort_order; let collation = self.index_key_info[i].collation; let cmp = match (this_key_value, other_key_value) { - (RefValue::Text(left), RefValue::Text(right)) => { - collation.compare_strings(left.as_str(), right.as_str()) - } - _ => this_key_value.partial_cmp(other_key_value).unwrap(), + (ValueRef::Text(left, _), ValueRef::Text(right, _)) => collation.compare_strings( + &String::from_utf8_lossy(left), + &String::from_utf8_lossy(right), + ), + _ => this_key_value.partial_cmp(&other_key_value).unwrap(), }; if !cmp.is_eq() { return match column_order { @@ -742,7 +739,7 @@ enum SortedChunkIOState { mod tests { use super::*; use crate::translate::collate::CollationSeq; - use crate::types::{ImmutableRecord, RefValue, Value, ValueType}; + use crate::types::{ImmutableRecord, Value, ValueRef, ValueType}; use crate::util::IOExt; use crate::PlatformIO; use rand_chacha::{ @@ -806,7 +803,7 @@ mod tests { for i in 0..num_records { assert!(sorter.has_more()); let record = sorter.record().unwrap(); - assert_eq!(record.get_values()[0], RefValue::Integer(i)); + assert_eq!(record.get_values()[0], ValueRef::Integer(i)); // Check that the record remained unchanged after sorting. assert_eq!(record, &initial_records[(num_records - i - 1) as usize]); From cf53ecb7e345a996bbc174d6fb34b9c606daff98 Mon Sep 17 00:00:00 2001 From: "Levy A." Date: Sun, 5 Oct 2025 23:43:12 -0300 Subject: [PATCH 046/428] refactor: remove `TextRef` and `RawSlice` and fix tests --- core/mvcc/database/tests.rs | 8 +- core/mvcc/persistent_storage/logical_log.rs | 15 +- core/storage/btree.rs | 16 +- core/types.rs | 157 ++++---------------- 4 files changed, 51 insertions(+), 145 deletions(-) diff --git a/core/mvcc/database/tests.rs b/core/mvcc/database/tests.rs index 2c8d23106..3cc76db23 100644 --- a/core/mvcc/database/tests.rs +++ b/core/mvcc/database/tests.rs @@ -978,8 +978,8 @@ fn test_cursor_modification_during_scan() { record.start_serialization(&row.data); let value = record.get_value(0).unwrap(); match value { - ValueRef::Text(text) => { - assert_eq!(text.as_str(), "new_row"); + ValueRef::Text(text, _) => { + assert_eq!(text, b"new_row"); } _ => panic!("Expected Text value"), } @@ -1210,8 +1210,8 @@ fn test_restart() { .unwrap(); let record = get_record_value(&row); match record.get_value(0).unwrap() { - ValueRef::Text(text) => { - assert_eq!(text.as_str(), "bar"); + ValueRef::Text(text, _) => { + assert_eq!(text, b"bar"); } _ => panic!("Expected Text value"), } diff --git a/core/mvcc/persistent_storage/logical_log.rs b/core/mvcc/persistent_storage/logical_log.rs index 52a01bb27..a340d6c43 100644 --- a/core/mvcc/persistent_storage/logical_log.rs +++ b/core/mvcc/persistent_storage/logical_log.rs @@ -565,10 +565,10 @@ mod tests { let record = ImmutableRecord::from_bin_record(row.data.clone()); let values = record.get_values(); let foo = values.first().unwrap(); - let ValueRef::Text(foo) = foo else { + let ValueRef::Text(foo, _) = foo else { unreachable!() }; - assert_eq!(foo.as_str(), "foo"); + assert_eq!(foo, b"foo"); } #[test] @@ -637,10 +637,10 @@ mod tests { let record = ImmutableRecord::from_bin_record(row.data.clone()); let values = record.get_values(); let foo = values.first().unwrap(); - let ValueRef::Text(foo) = foo else { + let ValueRef::Text(foo, _) = foo else { unreachable!() }; - assert_eq!(foo.as_str(), value.as_str()); + assert_eq!(*foo, value.as_bytes()); } } @@ -758,11 +758,14 @@ mod tests { let record = ImmutableRecord::from_bin_record(row.data.clone()); let values = record.get_values(); let foo = values.first().unwrap(); - let ValueRef::Text(foo) = foo else { + let ValueRef::Text(foo, _) = foo else { unreachable!() }; - assert_eq!(foo.as_str(), format!("row_{}", present_rowid.row_id as u64)); + assert_eq!( + String::from_utf8_lossy(foo), + format!("row_{}", present_rowid.row_id as u64) + ); } // Check rowids that were deleted diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 7935d3595..80b831231 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -8690,7 +8690,11 @@ mod tests { run_until_done(|| cursor.next(), pager.deref()).unwrap(); let record = run_until_done(|| cursor.record(), &pager).unwrap(); let record = record.as_ref().unwrap(); - let cur = record.get_values().clone(); + let cur = record + .get_values() + .iter() + .map(ValueRef::to_owned) + .collect::>(); if let Some(prev) = prev { if prev >= cur { println!("Seed: {seed}"); @@ -8933,11 +8937,7 @@ mod tests { let ValueRef::Blob(ref cur) = cur else { panic!("expected blob, got {cur:?}"); }; - assert_eq!( - cur.to_slice(), - key, - "key {key:?} is not found, seed: {seed}" - ); + assert_eq!(cur, key, "key {key:?} is not found, seed: {seed}"); } pager.end_read_tx(); } @@ -9469,8 +9469,8 @@ mod tests { let exists = run_until_done(|| cursor.next(), &pager)?; assert!(exists, "Record {i} not found"); - let record = run_until_done(|| cursor.record(), &pager)?; - let value = record.unwrap().get_value(0)?; + let record = run_until_done(|| cursor.record(), &pager)?.unwrap(); + let value = record.get_value(0)?; assert_eq!( value, ValueRef::Integer(i), diff --git a/core/types.rs b/core/types.rs index b86c72726..6de047f9b 100644 --- a/core/types.rs +++ b/core/types.rs @@ -68,12 +68,6 @@ impl Display for Text { } } -#[derive(Debug, Clone, PartialEq)] -pub struct TextRef { - pub value: RawSlice, - pub subtype: TextSubtype, -} - impl Text { pub fn new(value: &str) -> Self { Self { @@ -119,24 +113,12 @@ pub trait AnyText: AsRef { fn subtype(&self) -> TextSubtype; } -impl AsRef for TextRef { - fn as_ref(&self) -> &str { - self.as_str() - } -} - impl AnyText for Text { fn subtype(&self) -> TextSubtype { self.subtype } } -impl AnyText for TextRef { - fn subtype(&self) -> TextSubtype { - self.subtype - } -} - impl AnyText for &str { fn subtype(&self) -> TextSubtype { TextSubtype::Text @@ -147,12 +129,6 @@ pub trait AnyBlob { fn as_slice(&self) -> &[u8]; } -impl AnyBlob for RawSlice { - fn as_slice(&self) -> &[u8] { - self.to_slice() - } -} - impl AnyBlob for Vec { fn as_slice(&self) -> &[u8] { self.as_slice() @@ -195,22 +171,6 @@ impl From for String { } } -impl Display for TextRef { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.as_str()) - } -} - -impl TextRef { - pub fn create_from(value: &[u8], subtype: TextSubtype) -> Self { - let value = RawSlice::create_from(value); - Self { value, subtype } - } - pub fn as_str(&self) -> &str { - unsafe { std::str::from_utf8_unchecked(self.value.to_slice()) } - } -} - #[cfg(feature = "serde")] fn float_to_string(float: &f64, serializer: S) -> Result where @@ -249,12 +209,6 @@ pub enum Value { Blob(Vec), } -#[derive(Debug, Clone, PartialEq)] -pub struct RawSlice { - data: *const u8, - len: usize, -} - #[derive(PartialEq, Clone, Copy)] pub enum ValueRef<'a> { Null, @@ -2559,27 +2513,6 @@ pub enum SeekKey<'a> { IndexKey(&'a ImmutableRecord), } -impl RawSlice { - pub fn create_from(value: &[u8]) -> Self { - if value.is_empty() { - RawSlice::new(std::ptr::null(), 0) - } else { - let ptr = &value[0] as *const u8; - RawSlice::new(ptr, value.len()) - } - } - pub fn new(data: *const u8, len: usize) -> Self { - Self { data, len } - } - pub fn to_slice(&self) -> &[u8] { - if self.data.is_null() { - &[] - } else { - unsafe { std::slice::from_raw_parts(self.data, self.len) } - } - } -} - #[derive(Debug)] pub enum DatabaseChangeType { Delete, @@ -2641,9 +2574,10 @@ mod tests { let collation = index_key_info[i].collation; let cmp = match (&l[i], &r[i]) { - (ValueRef::Text(left), ValueRef::Text(right)) => { - collation.compare_strings(left.as_str(), right.as_str()) - } + (ValueRef::Text(left, _), ValueRef::Text(right, _)) => collation.compare_strings( + &String::from_utf8_lossy(left), + &String::from_utf8_lossy(right), + ), _ => l[i].partial_cmp(&r[i]).unwrap_or(std::cmp::Ordering::Equal), }; @@ -2682,37 +2616,6 @@ mod tests { } } - fn value_to_ref_value(value: &Value) -> ValueRef { - match value { - Value::Null => ValueRef::Null, - Value::Integer(i) => ValueRef::Integer(*i), - Value::Float(f) => ValueRef::Float(*f), - Value::Text(text) => ValueRef::Text(TextRef { - value: RawSlice::from_slice(&text.value), - subtype: text.subtype, - }), - Value::Blob(blob) => ValueRef::Blob(RawSlice::from_slice(blob)), - } - } - - impl TextRef { - fn from_str(s: &str) -> Self { - TextRef { - value: RawSlice::from_slice(s.as_bytes()), - subtype: crate::types::TextSubtype::Text, - } - } - } - - impl RawSlice { - fn from_slice(data: &[u8]) -> Self { - Self { - data: data.as_ptr(), - len: data.len(), - } - } - } - fn assert_compare_matches_full_comparison( serialized_values: Vec, unpacked_values: Vec, @@ -2722,7 +2625,7 @@ mod tests { let serialized = create_record(serialized_values.clone()); let serialized_ref_values: Vec = - serialized_values.iter().map(value_to_ref_value).collect(); + serialized_values.iter().map(Value::as_ref).collect(); let tie_breaker = std::cmp::Ordering::Equal; @@ -2858,7 +2761,7 @@ mod tests { vec![Value::Integer(42), Value::Text(Text::new("hello"))], vec![ ValueRef::Integer(42), - ValueRef::Text(TextRef::from_str("hello")), + ValueRef::Text(b"hello", TextSubtype::Text), ], "integer_text_equal", ), @@ -2866,7 +2769,7 @@ mod tests { vec![Value::Integer(42), Value::Text(Text::new("hello"))], vec![ ValueRef::Integer(42), - ValueRef::Text(TextRef::from_str("world")), + ValueRef::Text(b"world", TextSubtype::Text), ], "integer_equal_text_different", ), @@ -2893,34 +2796,34 @@ mod tests { let test_cases = vec![ ( vec![Value::Text(Text::new("hello"))], - vec![ValueRef::Text(TextRef::from_str("hello"))], + vec![ValueRef::Text(b"hello", TextSubtype::Text)], "equal_strings", ), ( vec![Value::Text(Text::new("abc"))], - vec![ValueRef::Text(TextRef::from_str("def"))], + vec![ValueRef::Text(b"def", TextSubtype::Text)], "less_than_strings", ), ( vec![Value::Text(Text::new("xyz"))], - vec![ValueRef::Text(TextRef::from_str("abc"))], + vec![ValueRef::Text(b"abc", TextSubtype::Text)], "greater_than_strings", ), ( vec![Value::Text(Text::new(""))], - vec![ValueRef::Text(TextRef::from_str(""))], + vec![ValueRef::Text(b"", TextSubtype::Text)], "empty_strings", ), ( vec![Value::Text(Text::new("a"))], - vec![ValueRef::Text(TextRef::from_str("aa"))], + vec![ValueRef::Text(b"aa", TextSubtype::Text)], "prefix_strings", ), // Multi-field with string first ( vec![Value::Text(Text::new("hello")), Value::Integer(42)], vec![ - ValueRef::Text(TextRef::from_str("hello")), + ValueRef::Text(b"hello", TextSubtype::Text), ValueRef::Integer(42), ], "string_integer_equal", @@ -2928,7 +2831,7 @@ mod tests { ( vec![Value::Text(Text::new("hello")), Value::Integer(42)], vec![ - ValueRef::Text(TextRef::from_str("hello")), + ValueRef::Text(b"hello", TextSubtype::Text), ValueRef::Integer(99), ], "string_equal_integer_different", @@ -2964,39 +2867,39 @@ mod tests { ), ( vec![Value::Null], - vec![ValueRef::Text(TextRef::from_str("hello"))], + vec![ValueRef::Text(b"hello", TextSubtype::Text)], "null_vs_text", ), ( vec![Value::Null], - vec![ValueRef::Blob(RawSlice::from_slice(b"blob"))], + vec![ValueRef::Blob(b"blob")], "null_vs_blob", ), // Numbers vs Text/Blob ( vec![Value::Integer(42)], - vec![ValueRef::Text(TextRef::from_str("hello"))], + vec![ValueRef::Text(b"hello", TextSubtype::Text)], "integer_vs_text", ), ( vec![Value::Float(64.4)], - vec![ValueRef::Text(TextRef::from_str("hello"))], + vec![ValueRef::Text(b"hello", TextSubtype::Text)], "float_vs_text", ), ( vec![Value::Integer(42)], - vec![ValueRef::Blob(RawSlice::from_slice(b"blob"))], + vec![ValueRef::Blob(b"blob")], "integer_vs_blob", ), ( vec![Value::Float(64.4)], - vec![ValueRef::Blob(RawSlice::from_slice(b"blob"))], + vec![ValueRef::Blob(b"blob")], "float_vs_blob", ), // Text vs Blob ( vec![Value::Text(Text::new("hello"))], - vec![ValueRef::Blob(RawSlice::from_slice(b"blob"))], + vec![ValueRef::Blob(b"blob")], "text_vs_blob", ), // Integer vs Float (affinity conversion) @@ -3044,7 +2947,7 @@ mod tests { ), ( vec![Value::Text(Text::new("abc"))], - vec![ValueRef::Text(TextRef::from_str("def"))], + vec![ValueRef::Text(b"def", TextSubtype::Text)], "desc_string_reversed", ), // Mixed sort orders @@ -3052,7 +2955,7 @@ mod tests { vec![Value::Integer(10), Value::Text(Text::new("hello"))], vec![ ValueRef::Integer(20), - ValueRef::Text(TextRef::from_str("hello")), + ValueRef::Text(b"hello", TextSubtype::Text), ], "desc_first_asc_second", ), @@ -3078,7 +2981,7 @@ mod tests { vec![Value::Integer(42)], vec![ ValueRef::Integer(42), - ValueRef::Text(TextRef::from_str("extra")), + ValueRef::Text(b"extra", TextSubtype::Text), ], "fewer_serialized_fields", ), @@ -3096,18 +2999,18 @@ mod tests { ), ( vec![Value::Blob(vec![1, 2, 3])], - vec![ValueRef::Blob(RawSlice::from_slice(&[1, 2, 3]))], + vec![ValueRef::Blob(&[1, 2, 3])], "blob_first_field", ), ( vec![Value::Text(Text::new("hello")), Value::Integer(5)], - vec![ValueRef::Text(TextRef::from_str("hello"))], + vec![ValueRef::Text(b"hello", TextSubtype::Text)], "equal_text_prefix_but_more_serialized_fields", ), ( vec![Value::Text(Text::new("same")), Value::Integer(5)], vec![ - ValueRef::Text(TextRef::from_str("same")), + ValueRef::Text(b"same", TextSubtype::Text), ValueRef::Integer(5), ], "equal_text_then_equal_int", @@ -3167,7 +3070,7 @@ mod tests { let int_values = vec![ ValueRef::Integer(42), - ValueRef::Text(TextRef::from_str("hello")), + ValueRef::Text(b"hello", TextSubtype::Text), ]; assert!(matches!( find_compare(&int_values, &index_info_small), @@ -3175,7 +3078,7 @@ mod tests { )); let string_values = vec![ - ValueRef::Text(TextRef::from_str("hello")), + ValueRef::Text(b"hello", TextSubtype::Text), ValueRef::Integer(42), ]; assert!(matches!( @@ -3189,7 +3092,7 @@ mod tests { RecordCompare::Generic )); - let blob_values = vec![ValueRef::Blob(RawSlice::from_slice(&[1, 2, 3]))]; + let blob_values = vec![ValueRef::Blob(&[1, 2, 3])]; assert!(matches!( find_compare(&blob_values, &index_info_small), RecordCompare::Generic From 76af79c4faf9eb97aa61e5cfb22173ecbe7216c9 Mon Sep 17 00:00:00 2001 From: Kim Seon Woo <69591622+seonwoo960000@users.noreply.github.com> Date: Tue, 7 Oct 2025 23:33:02 +0900 Subject: [PATCH 047/428] Add javadoc on classes and public methods for publishing to maven central --- .github/data/limbo-cargo-output.txt | 222 ------------------ bindings/java/.gitignore | 1 + .../java/src/main/java/tech/turso/JDBC.java | 11 + .../java/tech/turso/core/TursoDBFactory.java | 2 +- .../java/tech/turso/core/TursoResultSet.java | 5 +- .../java/tech/turso/core/TursoStatement.java | 4 +- .../tech/turso/jdbc4/JDBC4Connection.java | 40 ++++ .../turso/jdbc4/JDBC4DatabaseMetaData.java | 8 + .../turso/jdbc4/JDBC4PreparedStatement.java | 10 + .../java/tech/turso/jdbc4/JDBC4ResultSet.java | 19 ++ .../java/tech/turso/jdbc4/JDBC4Statement.java | 17 ++ .../java/tech/turso/utils/ByteArrayUtils.java | 13 + 12 files changed, 126 insertions(+), 226 deletions(-) delete mode 100644 .github/data/limbo-cargo-output.txt diff --git a/.github/data/limbo-cargo-output.txt b/.github/data/limbo-cargo-output.txt deleted file mode 100644 index 5a33c5fd0..000000000 --- a/.github/data/limbo-cargo-output.txt +++ /dev/null @@ -1,222 +0,0 @@ -Benchmarking limbo/Prepare statement: 'SELECT 1' -Benchmarking limbo/Prepare statement: 'SELECT 1': Warming up for 3.0000 s -Benchmarking limbo/Prepare statement: 'SELECT 1': Collecting 100 samples in estimated 5.0062 s (2.4M iterations) -Benchmarking limbo/Prepare statement: 'SELECT 1': Analyzing -limbo/Prepare statement: 'SELECT 1' - time: [2.0631 µs 2.0688 µs 2.0763 µs] - thrpt: [481.63 Kelem/s 483.37 Kelem/s 484.70 Kelem/s] -Found 13 outliers among 100 measurements (13.00%) - 1 (1.00%) low severe - 5 (5.00%) high mild - 7 (7.00%) high severe -Benchmarking limbo/Prepare statement: 'SELECT * FROM users LIMIT 1' -Benchmarking limbo/Prepare statement: 'SELECT * FROM users LIMIT 1': Warming up for 3.0000 s -Benchmarking limbo/Prepare statement: 'SELECT * FROM users LIMIT 1': Collecting 100 samples in estimated 5.0159 s (1.3M iterations) -Benchmarking limbo/Prepare statement: 'SELECT * FROM users LIMIT 1': Analyzing -limbo/Prepare statement: 'SELECT * FROM users LIMIT 1' - time: [3.9747 µs 3.9842 µs 3.9937 µs] - thrpt: [250.39 Kelem/s 250.99 Kelem/s 251.59 Kelem/s] -Found 17 outliers among 100 measurements (17.00%) - 5 (5.00%) low severe - 3 (3.00%) low mild - 3 (3.00%) high mild - 6 (6.00%) high severe -Benchmarking limbo/Prepare statement: 'SELECT first_name, count(1) FROM users GROUP BY first_name HAVING count(1)... -Benchmarking limbo/Prepare statement: 'SELECT first_name, count(1) FROM users GROUP BY first_name HAVING count(1)...: Warming up for 3.0000 s -Benchmarking limbo/Prepare statement: 'SELECT first_name, count(1) FROM users GROUP BY first_name HAVING count(1)...: Collecting 100 samples in estimated 5.0099 s (500k iterations) -Benchmarking limbo/Prepare statement: 'SELECT first_name, count(1) FROM users GROUP BY first_name HAVING count(1)...: Analyzing -limbo/Prepare statement: 'SELECT first_name, count(1) FROM users GROUP BY first_name HAVING count(1)... - time: [10.220 µs 10.280 µs 10.358 µs] - thrpt: [96.544 Kelem/s 97.281 Kelem/s 97.846 Kelem/s] -Found 13 outliers among 100 measurements (13.00%) - 1 (1.00%) low severe - 2 (2.00%) high mild - 10 (10.00%) high severe -Benchmarking limbo/Execute prepared statement: 'SELECT 1' -Benchmarking limbo/Execute prepared statement: 'SELECT 1': Warming up for 3.0000 s -Benchmarking limbo/Execute prepared statement: 'SELECT 1': Collecting 100 samples in estimated 5.0006 s (27M iterations) -Benchmarking limbo/Execute prepared statement: 'SELECT 1': Analyzing -limbo/Execute prepared statement: 'SELECT 1' - time: [181.95 ns 182.33 ns 182.70 ns] - thrpt: [5.4736 Melem/s 5.4844 Melem/s 5.4960 Melem/s] -Found 18 outliers among 100 measurements (18.00%) - 6 (6.00%) low severe - 3 (3.00%) low mild - 6 (6.00%) high mild - 3 (3.00%) high severe -Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 1' -Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Warming up for 3.0000 s -Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Collecting 100 samples in estimated 5.0008 s (4.0M iterations) -Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Analyzing -limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 1' - time: [1.2549 µs 1.2572 µs 1.2594 µs] - thrpt: [794.03 Kelem/s 795.44 Kelem/s 796.89 Kelem/s] -Found 15 outliers among 100 measurements (15.00%) - 6 (6.00%) low severe - 3 (3.00%) high mild - 6 (6.00%) high severe -Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 100' -Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Warming up for 3.0000 s -Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Collecting 100 samples in estimated 5.0047 s (4.0M iterations) -Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Analyzing -limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 100' - time: [1.2503 µs 1.2528 µs 1.2560 µs] - thrpt: [796.20 Kelem/s 798.23 Kelem/s 799.84 Kelem/s] -Found 14 outliers among 100 measurements (14.00%) - 2 (2.00%) low severe - 1 (1.00%) low mild - 5 (5.00%) high mild - 6 (6.00%) high severe - -Benchmarking rusqlite/Prepare statement: 'SELECT 1' -Benchmarking rusqlite/Prepare statement: 'SELECT 1': Warming up for 3.0000 s -Benchmarking rusqlite/Prepare statement: 'SELECT 1': Collecting 100 samples in estimated 5.0010 s (6.5M iterations) -Benchmarking rusqlite/Prepare statement: 'SELECT 1': Analyzing -rusqlite/Prepare statement: 'SELECT 1' - time: [768.58 ns 770.50 ns 772.43 ns] - thrpt: [1.2946 Melem/s 1.2979 Melem/s 1.3011 Melem/s] -Found 16 outliers among 100 measurements (16.00%) - 5 (5.00%) low severe - 2 (2.00%) low mild - 1 (1.00%) high mild - 8 (8.00%) high severe -Benchmarking rusqlite/Prepare statement: 'SELECT * FROM users LIMIT 1' -Benchmarking rusqlite/Prepare statement: 'SELECT * FROM users LIMIT 1': Warming up for 3.0000 s -Benchmarking rusqlite/Prepare statement: 'SELECT * FROM users LIMIT 1': Collecting 100 samples in estimated 5.0083 s (1.6M iterations) -Benchmarking rusqlite/Prepare statement: 'SELECT * FROM users LIMIT 1': Analyzing -rusqlite/Prepare statement: 'SELECT * FROM users LIMIT 1' - time: [3.2006 µs 3.2038 µs 3.2084 µs] - thrpt: [311.68 Kelem/s 312.13 Kelem/s 312.45 Kelem/s] -Found 11 outliers among 100 measurements (11.00%) - 5 (5.00%) low severe - 1 (1.00%) low mild - 2 (2.00%) high mild - 3 (3.00%) high severe -Benchmarking rusqlite/Execute prepared statement: 'SELECT 1' -Benchmarking rusqlite/Execute prepared statement: 'SELECT 1': Warming up for 3.0000 s -Benchmarking rusqlite/Execute prepared statement: 'SELECT 1': Collecting 100 samples in estimated 5.0002 s (82M iterations) -Benchmarking rusqlite/Execute prepared statement: 'SELECT 1': Analyzing -rusqlite/Execute prepared statement: 'SELECT 1' - time: [60.613 ns 60.788 ns 61.098 ns] - thrpt: [16.367 Melem/s 16.451 Melem/s 16.498 Melem/s] -Found 8 outliers among 100 measurements (8.00%) - 1 (1.00%) low mild - 1 (1.00%) high mild - 6 (6.00%) high severe -Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 1' -Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Warming up for 3.0000 s -Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Collecting 100 samples in estimated 5.0014 s (4.3M iterations) -Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Analyzing -rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 1' - time: [1.1686 µs 1.1702 µs 1.1716 µs] - thrpt: [853.52 Kelem/s 854.55 Kelem/s 855.74 Kelem/s] -Found 13 outliers among 100 measurements (13.00%) - 2 (2.00%) low severe - 1 (1.00%) low mild - 6 (6.00%) high mild - 4 (4.00%) high severe -Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 100' -Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Warming up for 3.0000 s -Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Collecting 100 samples in estimated 5.0016 s (4.3M iterations) -Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Analyzing -rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 100' - time: [1.1643 µs 1.1710 µs 1.1789 µs] - thrpt: [848.26 Kelem/s 853.98 Kelem/s 858.85 Kelem/s] -Found 17 outliers among 100 measurements (17.00%) - 5 (5.00%) low severe - 3 (3.00%) low mild - 4 (4.00%) high mild - 5 (5.00%) high severe - - Running unittests src/lib.rs (target/release/deps/limbo_ext-8b70654a7fccf221) - -running 0 tests - -test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s - - Running unittests src/lib.rs (target/release/deps/limbo_libsql-66ab9a0ee1a27f4c) - -running 1 test -test params::tests::test_serialize_array ... ignored - -test result: ok. 0 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in 0.00s - - Running unittests src/lib.rs (target/release/deps/limbo_macros-669ce5abb31e687e) - Running unittests main.rs (target/release/deps/limbo_sim-d28081335520ff9c) - -running 0 tests - Running unittests src/lib.rs (target/release/deps/limbo_sqlite3-1d7f53c447b1fc8a) - Running unittests src/lib.rs (target/release/deps/limbo_uuid-c0d9c3b2e9e30eee) - -test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s - - -running 0 tests - -test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s - - -running 0 tests - -test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s - - -running 0 tests - -test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s - - Running unittests src/lib.rs (target/release/deps/_limbo-9562557e55b9bbed) - -running 0 tests - -test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s - - Running unittests src/lib.rs (target/release/deps/sqlite3_parser-475bbf4de217f28a) - -running 38 tests -test lexer::sql::test::alter_add_column_primary_key ... ignored -test lexer::sql::test::alter_add_column_unique ... ignored -test lexer::sql::test::alter_rename_same ... ignored -test lexer::sql::test::cast_without_typename ... ignored -test lexer::sql::test::column_specified_more_than_once ... ignored -test lexer::sql::test::count_named_placeholders ... ignored -test lexer::sql::test::count_numbered_placeholders ... ignored -test lexer::sql::test::count_placeholders ... ignored -test lexer::sql::test::count_unused_placeholders ... ignored -test lexer::sql::test::create_strict_table_generated_column ... ignored -test lexer::sql::test::create_strict_table_missing_datatype ... ignored -test lexer::sql::test::create_strict_table_unknown_datatype ... ignored -test lexer::sql::test::create_table_with_only_generated_column ... ignored -test lexer::sql::test::create_table_without_column ... ignored -test lexer::sql::test::create_table_without_rowid_missing_pk ... ignored -test lexer::sql::test::create_temporary_table_with_qualified_name ... ignored -test lexer::sql::test::create_view_duplicate_column_name ... ignored - Running benches/keyword.rs (target/release/deps/keyword-255a8492c4f11233) -test lexer::sql::test::create_view_mismatch_count ... ignored -test lexer::sql::test::delete_order_by_without_limit ... ignored -test lexer::sql::test::duplicate_column ... ignored -test lexer::sql::test::extra_comments_between_statements ... ignored -test lexer::sql::test::extra_semicolons_between_statements ... ignored -test lexer::sql::test::foreign_key_on_column ... ignored -test lexer::sql::test::indexed_by_clause_within_triggers ... ignored -test lexer::sql::test::insert_default_values ... ignored -test lexer::sql::test::insert_mismatch_count ... ignored -test lexer::sql::test::missing_join_clause ... ignored -test lexer::sql::test::natural_join_on ... ignored -test lexer::sql::test::only_semicolons_no_statements ... ignored -test lexer::sql::test::qualified_table_name_within_triggers ... ignored -test lexer::sql::test::selects_compound_mismatch_columns_count ... ignored -test lexer::sql::test::unknown_table_option ... ignored -test lexer::sql::test::update_order_by_without_limit ... ignored -test lexer::sql::test::values_mismatch_columns_count ... ignored -test lexer::sql::test::vtab_args ... ignored -test lexer::sql::tests::fallible_iterator ... ignored -test lexer::sql::tests::invalid_number_literal ... ignored -test parser::ast::test::test_dequote ... ignored - -test result: ok. 0 passed; 0 failed; 38 ignored; 0 measured; 0 filtered out; finished in 0.00s - - -running 0 tests - -test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s diff --git a/bindings/java/.gitignore b/bindings/java/.gitignore index f4f8fc542..ea2eb6d0a 100644 --- a/bindings/java/.gitignore +++ b/bindings/java/.gitignore @@ -41,3 +41,4 @@ bin/ ### turso builds ### libs +temp \ No newline at end of file diff --git a/bindings/java/src/main/java/tech/turso/JDBC.java b/bindings/java/src/main/java/tech/turso/JDBC.java index 9611398d9..360098d91 100644 --- a/bindings/java/src/main/java/tech/turso/JDBC.java +++ b/bindings/java/src/main/java/tech/turso/JDBC.java @@ -10,6 +10,9 @@ import tech.turso.jdbc4.JDBC4Connection; import tech.turso.utils.Logger; import tech.turso.utils.LoggerFactory; +/** + * Turso JDBC driver implementation. + */ public final class JDBC implements Driver { private static final Logger logger = LoggerFactory.getLogger(JDBC.class); @@ -24,6 +27,14 @@ public final class JDBC implements Driver { } } + /** + * Creates a new Turso JDBC connection. + * + * @param url the database URL + * @param properties connection properties + * @return a new connection instance, or null if the URL is not valid + * @throws SQLException if a database access error occurs + */ @Nullable public static JDBC4Connection createConnection(String url, Properties properties) throws SQLException { diff --git a/bindings/java/src/main/java/tech/turso/core/TursoDBFactory.java b/bindings/java/src/main/java/tech/turso/core/TursoDBFactory.java index 0076011e4..900c0f681 100644 --- a/bindings/java/src/main/java/tech/turso/core/TursoDBFactory.java +++ b/bindings/java/src/main/java/tech/turso/core/TursoDBFactory.java @@ -23,7 +23,7 @@ public final class TursoDBFactory { * @param url the URL of the database * @param filePath the path to the database file * @param properties additional properties for the database connection - * @return an instance of {@link tursoDB} + * @return an instance of {@link TursoDB} * @throws SQLException if there is an error opening the connection * @throws IllegalArgumentException if the fileName is empty */ diff --git a/bindings/java/src/main/java/tech/turso/core/TursoResultSet.java b/bindings/java/src/main/java/tech/turso/core/TursoResultSet.java index fe9d72067..168639674 100644 --- a/bindings/java/src/main/java/tech/turso/core/TursoResultSet.java +++ b/bindings/java/src/main/java/tech/turso/core/TursoResultSet.java @@ -57,7 +57,7 @@ public final class TursoResultSet { } /** - * Moves the cursor forward one row from its current position. A {@link tursoResultSet} cursor is + * Moves the cursor forward one row from its current position. A {@link TursoResultSet} cursor is * initially positioned before the first fow; the first call to the method next makes * the first row the current row; the second call makes the second row the current row, and so on. * When a call to the next method returns false, the cursor is @@ -65,6 +65,9 @@ public final class TursoResultSet { * *

Note that turso only supports ResultSet.TYPE_FORWARD_ONLY, which means that the * cursor can only move forward. + * + * @return true if the new current row is valid; false if there are no more rows + * @throws SQLException if a database access error occurs */ public boolean next() throws SQLException { if (!open) { diff --git a/bindings/java/src/main/java/tech/turso/core/TursoStatement.java b/bindings/java/src/main/java/tech/turso/core/TursoStatement.java index de4d86e7a..0c15f6586 100644 --- a/bindings/java/src/main/java/tech/turso/core/TursoStatement.java +++ b/bindings/java/src/main/java/tech/turso/core/TursoStatement.java @@ -91,8 +91,8 @@ public final class TursoStatement { private native void _close(long statementPointer); /** - * Initializes the column metadata, such as the names of the columns. Since {@link tursoStatement} - * can only have a single {@link tursoResultSet}, it is appropriate to place the initialization of + * Initializes the column metadata, such as the names of the columns. Since {@link TursoStatement} + * can only have a single {@link TursoResultSet}, it is appropriate to place the initialization of * column metadata here. * * @throws SQLException if a database access error occurs while retrieving column names diff --git a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Connection.java b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Connection.java index 6841a5cbc..172dd3940 100644 --- a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Connection.java +++ b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Connection.java @@ -9,20 +9,45 @@ import tech.turso.annotations.SkipNullableCheck; import tech.turso.core.TursoConnection; import tech.turso.core.TursoStatement; +/** + * JDBC 4 Connection implementation for Turso databases. + */ public final class JDBC4Connection implements Connection { private final TursoConnection connection; private Map> typeMap = new HashMap<>(); + /** + * Creates a new JDBC4 connection. + * + * @param url the database URL + * @param filePath the database file path + * @throws SQLException if a database access error occurs + */ public JDBC4Connection(String url, String filePath) throws SQLException { this.connection = new TursoConnection(url, filePath); } + /** + * Creates a new JDBC4 connection with properties. + * + * @param url the database URL + * @param filePath the database file path + * @param properties connection properties + * @throws SQLException if a database access error occurs + */ public JDBC4Connection(String url, String filePath, Properties properties) throws SQLException { this.connection = new TursoConnection(url, filePath, properties); } + /** + * Prepares a SQL statement for execution. + * + * @param sql the SQL statement to prepare + * @return the prepared statement + * @throws SQLException if a database access error occurs + */ public TursoStatement prepare(String sql) throws SQLException { final TursoStatement statement = connection.prepare(sql); statement.initializeColumnMetadata(); @@ -357,6 +382,11 @@ public final class JDBC4Connection implements Connection { return false; } + /** + * Sets the busy timeout for the connection. + * + * @param busyTimeout the timeout in milliseconds + */ public void setBusyTimeout(int busyTimeout) { // TODO: add support for busy timeout } @@ -367,10 +397,20 @@ public final class JDBC4Connection implements Connection { return 0; } + /** + * Gets the database URL. + * + * @return the database URL + */ public String getUrl() { return this.connection.getUrl(); } + /** + * Checks if the connection is open. + * + * @throws SQLException if the connection is closed + */ public void checkOpen() throws SQLException { connection.checkOpen(); } diff --git a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4DatabaseMetaData.java b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4DatabaseMetaData.java index c0137c96d..3dcc49851 100644 --- a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4DatabaseMetaData.java +++ b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4DatabaseMetaData.java @@ -13,6 +13,9 @@ import tech.turso.core.TursoPropertiesHolder; import tech.turso.utils.Logger; import tech.turso.utils.LoggerFactory; +/** + * JDBC 4 DatabaseMetaData implementation for Turso databases. + */ public final class JDBC4DatabaseMetaData implements DatabaseMetaData { private static final Logger logger = LoggerFactory.getLogger(JDBC4DatabaseMetaData.class); @@ -51,6 +54,11 @@ public final class JDBC4DatabaseMetaData implements DatabaseMetaData { @Nullable private PreparedStatement getColumnPrivileges = null; + /** + * Creates a new JDBC4DatabaseMetaData instance. + * + * @param connection the database connection + */ public JDBC4DatabaseMetaData(JDBC4Connection connection) { this.connection = connection; } diff --git a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4PreparedStatement.java b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4PreparedStatement.java index d9508e0dd..4b777e710 100644 --- a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4PreparedStatement.java +++ b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4PreparedStatement.java @@ -26,11 +26,21 @@ import java.util.Calendar; import tech.turso.annotations.SkipNullableCheck; import tech.turso.core.TursoResultSet; +/** + * JDBC 4 PreparedStatement implementation for Turso databases. + */ public final class JDBC4PreparedStatement extends JDBC4Statement implements PreparedStatement { private final String sql; private final JDBC4ResultSet resultSet; + /** + * Creates a new JDBC4PreparedStatement. + * + * @param connection the database connection + * @param sql the SQL statement to prepare + * @throws SQLException if a database access error occurs + */ public JDBC4PreparedStatement(JDBC4Connection connection, String sql) throws SQLException { super(connection); this.sql = sql; diff --git a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4ResultSet.java b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4ResultSet.java index 5d9156b88..448015c52 100644 --- a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4ResultSet.java +++ b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4ResultSet.java @@ -27,10 +27,18 @@ import tech.turso.annotations.Nullable; import tech.turso.annotations.SkipNullableCheck; import tech.turso.core.TursoResultSet; +/** + * JDBC 4 ResultSet implementation for Turso databases. + */ public final class JDBC4ResultSet implements ResultSet, ResultSetMetaData { private final TursoResultSet resultSet; + /** + * Creates a new JDBC4ResultSet. + * + * @param resultSet the underlying Turso result set + */ public JDBC4ResultSet(TursoResultSet resultSet) { this.resultSet = resultSet; } @@ -1279,8 +1287,19 @@ public final class JDBC4ResultSet implements ResultSet, ResultSetMetaData { throw new UnsupportedOperationException("not implemented"); } + /** + * Functional interface for result set value suppliers. + * + * @param the type of value to supply + */ @FunctionalInterface public interface ResultSetSupplier { + /** + * Gets a result from the result set. + * + * @return the result value + * @throws Exception if an error occurs + */ T get() throws Exception; } diff --git a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Statement.java b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Statement.java index eb31c8d0b..b96eb1cda 100644 --- a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Statement.java +++ b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Statement.java @@ -17,6 +17,9 @@ import tech.turso.annotations.SkipNullableCheck; import tech.turso.core.TursoResultSet; import tech.turso.core.TursoStatement; +/** + * JDBC 4 Statement implementation for Turso databases. + */ public class JDBC4Statement implements Statement { private static final Pattern BATCH_COMPATIBLE_PATTERN = @@ -35,7 +38,10 @@ public class JDBC4Statement implements Statement { private final JDBC4Connection connection; + /** The underlying Turso statement. */ @Nullable protected TursoStatement statement = null; + + /** The number of rows affected by the last update operation. */ protected long updateCount; // Because JDBC4Statement has different life cycle in compared to tursoStatement, let's use this @@ -475,8 +481,19 @@ public class JDBC4Statement implements Statement { } } + /** + * Functional interface for SQL callable operations. + * + * @param the return type + */ @FunctionalInterface protected interface SQLCallable { + /** + * Executes the SQL operation. + * + * @return the result of the operation + * @throws SQLException if a database access error occurs + */ T call() throws SQLException; } diff --git a/bindings/java/src/main/java/tech/turso/utils/ByteArrayUtils.java b/bindings/java/src/main/java/tech/turso/utils/ByteArrayUtils.java index 4922984f0..bd366b5ee 100644 --- a/bindings/java/src/main/java/tech/turso/utils/ByteArrayUtils.java +++ b/bindings/java/src/main/java/tech/turso/utils/ByteArrayUtils.java @@ -3,7 +3,14 @@ package tech.turso.utils; import java.nio.charset.StandardCharsets; import tech.turso.annotations.Nullable; +/** Utility class for converting between byte arrays and strings using UTF-8 encoding. */ public final class ByteArrayUtils { + /** + * Converts a UTF-8 encoded byte array to a string. + * + * @param buffer the byte array to convert, may be null + * @return the string representation, or null if the input is null + */ @Nullable public static String utf8ByteBufferToString(@Nullable byte[] buffer) { if (buffer == null) { @@ -13,6 +20,12 @@ public final class ByteArrayUtils { return new String(buffer, StandardCharsets.UTF_8); } + /** + * Converts a string to a UTF-8 encoded byte array. + * + * @param str the string to convert, may be null + * @return the byte array representation, or null if the input is null + */ @Nullable public static byte[] stringToUtf8ByteArray(@Nullable String str) { if (str == null) { From fe7027e8eeec1ada817ac5bbae8011bcc9edfc4a Mon Sep 17 00:00:00 2001 From: Kim Seon Woo <69591622+seonwoo960000@users.noreply.github.com> Date: Tue, 7 Oct 2025 23:33:13 +0900 Subject: [PATCH 048/428] Fix Makefile libs command --- bindings/java/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/java/Makefile b/bindings/java/Makefile index 572521784..9251afe83 100644 --- a/bindings/java/Makefile +++ b/bindings/java/Makefile @@ -10,7 +10,7 @@ LINUX_X86_DIR := $(RELEASE_DIR)/linux_x86 .PHONY: libs macos_x86 macos_arm64 windows lint lint_apply test build_test -libs: macos_x86 macos_arm64 windows +libs: macos_x86 macos_arm64 windows linux_x86 macos_x86: @echo "Building release version for macOS x86_64..." From 8af9a5381842a37f319a11d2d8cd5459390b4c02 Mon Sep 17 00:00:00 2001 From: Kim Seon Woo <69591622+seonwoo960000@users.noreply.github.com> Date: Tue, 7 Oct 2025 23:37:53 +0900 Subject: [PATCH 049/428] Update build.gradle.kts for publishing --- bindings/java/build.gradle.kts | 256 +++++++++++++++++++++++++++++++- bindings/java/gradle.properties | 18 ++- 2 files changed, 267 insertions(+), 7 deletions(-) diff --git a/bindings/java/build.gradle.kts b/bindings/java/build.gradle.kts index 1df88faf9..0def7e18d 100644 --- a/bindings/java/build.gradle.kts +++ b/bindings/java/build.gradle.kts @@ -2,37 +2,283 @@ import net.ltgt.gradle.errorprone.CheckSeverity import net.ltgt.gradle.errorprone.errorprone import org.gradle.api.tasks.testing.logging.TestExceptionFormat import org.gradle.api.tasks.testing.logging.TestLogEvent +import java.security.MessageDigest plugins { java application `java-library` `maven-publish` + signing id("net.ltgt.errorprone") version "3.1.0" // If you're stuck on JRE 8, use id 'com.diffplug.spotless' version '6.13.0' or older. id("com.diffplug.spotless") version "6.13.0" } -group = properties["projectGroup"]!! -version = properties["projectVersion"]!! +// Helper function to read properties with defaults +fun prop(key: String, default: String? = null): String? = + findProperty(key)?.toString() ?: default + +group = prop("projectGroup") ?: error("projectGroup must be set in gradle.properties") +version = prop("projectVersion") ?: error("projectVersion must be set in gradle.properties") java { sourceCompatibility = JavaVersion.VERSION_1_8 targetCompatibility = JavaVersion.VERSION_1_8 + withJavadocJar() + withSourcesJar() +} + +// TODO: Add javadoc to required class and methods. After that, let's remove this settings +tasks.withType { + options { + (this as StandardJavadocDocletOptions).apply { + addStringOption("Xdoclint:none", "-quiet") + } + } } publishing { publications { create("mavenJava") { from(components["java"]) - groupId = "tech.turso" - artifactId = "turso" - version = "0.0.1-SNAPSHOT" + groupId = prop("projectGroup")!! + artifactId = prop("projectArtifactId")!! + version = prop("projectVersion")!! + + pom { + name.set(prop("pomName")) + description.set(prop("pomDescription")) + url.set(prop("pomUrl")) + + licenses { + license { + name.set(prop("pomLicenseName")) + url.set(prop("pomLicenseUrl")) + } + } + + developers { + developer { + id.set(prop("pomDeveloperId")) + name.set(prop("pomDeveloperName")) + email.set(prop("pomDeveloperEmail")) + } + } + + scm { + connection.set(prop("pomScmConnection")) + developerConnection.set(prop("pomScmDeveloperConnection")) + url.set(prop("pomScmUrl")) + } + } } } } +signing { + // Make signing required for publishing + setRequired(true) + + // For CI/GitHub Actions: use in-memory keys + val signingKey = providers.environmentVariable("GPG_PRIVATE_KEY").orNull + val signingPassword = providers.environmentVariable("GPG_PASSPHRASE").orNull + + if (signingKey != null && signingPassword != null) { + // CI mode: use in-memory keys + useInMemoryPgpKeys(signingKey, signingPassword) + } else { + // Local mode: use GPG command from system + useGpgCmd() + } + + sign(publishing.publications["mavenJava"]) +} + +// Helper task to generate checksums +val generateChecksums by tasks.registering { + dependsOn("jar", "sourcesJar", "javadocJar", "generatePomFileForMavenJavaPublication") + + val checksumDir = layout.buildDirectory.dir("checksums") + + doLast { + val files = listOf( + tasks.jar.get().archiveFile.get().asFile, + tasks.named("sourcesJar").get().outputs.files.singleFile, + tasks.named("javadocJar").get().outputs.files.singleFile, + layout.buildDirectory.file("publications/mavenJava/pom-default.xml").get().asFile + ) + + checksumDir.get().asFile.mkdirs() + + files.forEach { file -> + if (file.exists()) { + // MD5 + val md5 = MessageDigest.getInstance("MD5") + .digest(file.readBytes()) + .joinToString("") { "%02x".format(it) } + file("${file.absolutePath}.md5").writeText(md5) + + // SHA1 + val sha1 = MessageDigest.getInstance("SHA-1") + .digest(file.readBytes()) + .joinToString("") { "%02x".format(it) } + file("${file.absolutePath}.sha1").writeText(sha1) + } + } + } +} + +// Task to create a bundle zip for Maven Central Portal +val createMavenCentralBundle by tasks.registering(Zip::class) { + group = "publishing" + description = "Creates a bundle zip for Maven Central Portal upload" + + dependsOn("generatePomFileForMavenJavaPublication", "jar", "sourcesJar", "javadocJar", "signMavenJavaPublication", generateChecksums) + + // Ensure signing happens before bundle creation + mustRunAfter("signMavenJavaPublication") + + val groupId = prop("projectGroup")!!.replace(".", "/") + val artifactId = prop("projectArtifactId")!! + val projectVer = version.toString() + + // Validate version is not SNAPSHOT for Maven Central + doFirst { + if (projectVer.contains("SNAPSHOT")) { + throw GradleException( + "Cannot publish SNAPSHOT version to Maven Central. " + + "Please change projectVersion in gradle.properties to a release version (e.g., 0.0.1)" + ) + } + } + + archiveFileName.set("$artifactId-$projectVer-bundle.zip") + destinationDirectory.set(layout.buildDirectory.dir("maven-central")) + + // Maven Central expects files in groupId/artifactId/version/ structure + val basePath = "$groupId/$artifactId/$projectVer" + + // Main JAR + checksums + signature + from(tasks.jar.get().archiveFile) { + into(basePath) + rename { "$artifactId-$projectVer.jar" } + } + from(tasks.jar.get().archiveFile.get().asFile.absolutePath + ".md5") { + into(basePath) + rename { "$artifactId-$projectVer.jar.md5" } + } + from(tasks.jar.get().archiveFile.get().asFile.absolutePath + ".sha1") { + into(basePath) + rename { "$artifactId-$projectVer.jar.sha1" } + } + + // Sources JAR + checksums + signature + from(tasks.named("sourcesJar").get().outputs.files) { + into(basePath) + rename { "$artifactId-$projectVer-sources.jar" } + } + from(tasks.named("sourcesJar").get().outputs.files.singleFile.absolutePath + ".md5") { + into(basePath) + rename { "$artifactId-$projectVer-sources.jar.md5" } + } + from(tasks.named("sourcesJar").get().outputs.files.singleFile.absolutePath + ".sha1") { + into(basePath) + rename { "$artifactId-$projectVer-sources.jar.sha1" } + } + + // Javadoc JAR + checksums + signature + from(tasks.named("javadocJar").get().outputs.files) { + into(basePath) + rename { "$artifactId-$projectVer-javadoc.jar" } + } + from(tasks.named("javadocJar").get().outputs.files.singleFile.absolutePath + ".md5") { + into(basePath) + rename { "$artifactId-$projectVer-javadoc.jar.md5" } + } + from(tasks.named("javadocJar").get().outputs.files.singleFile.absolutePath + ".sha1") { + into(basePath) + rename { "$artifactId-$projectVer-javadoc.jar.sha1" } + } + + // POM + checksums + signature + from(layout.buildDirectory.file("publications/mavenJava/pom-default.xml")) { + into(basePath) + rename { "$artifactId-$projectVer.pom" } + } + from(layout.buildDirectory.file("publications/mavenJava/pom-default.xml").get().asFile.absolutePath + ".md5") { + into(basePath) + rename { "$artifactId-$projectVer.pom.md5" } + } + from(layout.buildDirectory.file("publications/mavenJava/pom-default.xml").get().asFile.absolutePath + ".sha1") { + into(basePath) + rename { "$artifactId-$projectVer.pom.sha1" } + } + + // Signature files - get them from the signing task outputs + doFirst { + val signingTask = tasks.named("signMavenJavaPublication").get() + logger.lifecycle("Signing task outputs: ${signingTask.outputs.files.files}") + } + + // Include signature files generated by the signing plugin + from(tasks.named("signMavenJavaPublication").get().outputs.files) { + into(basePath) + include("*.jar.asc", "pom-default.xml.asc") + exclude("module.json.asc") // Exclude gradle module metadata signature + rename { name -> + // Only rename the POM signature file + // JAR signatures are already correctly named by the signing plugin + if (name == "pom-default.xml.asc") { + "$artifactId-$projectVer.pom.asc" + } else { + name // Keep original name (already correct) + } + } + } +} + +// Task to upload bundle to Maven Central Portal +tasks.register("publishToMavenCentral") { + group = "publishing" + description = "Publishes artifacts to Maven Central Portal" + + // Run publish first to generate signatures, then create bundle + dependsOn("publish") + dependsOn(createMavenCentralBundle) + + // Make sure bundle creation happens after publish + createMavenCentralBundle.get().mustRunAfter("publish") + + doLast { + val username = providers.environmentVariable("MAVEN_CENTRAL_USERNAME").orNull + val password = providers.environmentVariable("MAVEN_CENTRAL_PASSWORD").orNull + val bundleFile = createMavenCentralBundle.get().archiveFile.get().asFile + + require(username != null) { "MAVEN_CENTRAL_USERNAME environment variable must be set" } + require(password != null) { "MAVEN_CENTRAL_PASSWORD environment variable must be set" } + require(bundleFile.exists()) { "Bundle file does not exist: ${bundleFile.absolutePath}" } + + logger.lifecycle("Uploading bundle to Maven Central Portal...") + logger.lifecycle("Bundle: ${bundleFile.absolutePath}") + logger.lifecycle("Size: ${bundleFile.length() / 1024} KB") + + // Use curl for uploading (simple and available on most systems) + exec { + commandLine( + "curl", + "-X", "POST", + "-u", "$username:$password", + "-F", "bundle=@${bundleFile.absolutePath}", + "https://central.sonatype.com/api/v1/publisher/upload?name=${bundleFile.name}&publishingType=AUTOMATIC" + ) + } + + logger.lifecycle("Upload completed. Check https://central.sonatype.com/publishing for status.") + } +} + repositories { mavenCentral() } diff --git a/bindings/java/gradle.properties b/bindings/java/gradle.properties index 4b6e7d55e..c2f38979b 100644 --- a/bindings/java/gradle.properties +++ b/bindings/java/gradle.properties @@ -1,2 +1,16 @@ -projectGroup="tech.turso" -projectVersion=0.0.1-SNAPSHOT +projectGroup=tech.turso +projectVersion=0.0.1 +projectArtifactId=turso + +# POM metadata +pomName=Turso JDBC Driver +pomDescription=Turso JDBC driver for Java applications +pomUrl=https://github.com/tursodatabase/turso +pomLicenseName=MIT License +pomLicenseUrl=https://opensource.org/licenses/MIT +pomDeveloperId=turso +pomDeveloperName=Turso +pomDeveloperEmail=penberg@iki.fi +pomScmConnection=scm:git:git://github.com/tursodatabase/turso.git +pomScmDeveloperConnection=scm:git:ssh://github.com:tursodatabase/turso.git +pomScmUrl=https://github.com/tursodatabase/turso \ No newline at end of file From c149f65b914032eec0c5da3aa5a9d1aee95ad08a Mon Sep 17 00:00:00 2001 From: Kim Seon Woo <69591622+seonwoo960000@users.noreply.github.com> Date: Tue, 7 Oct 2025 23:38:07 +0900 Subject: [PATCH 050/428] Add java-publish.yml workflow --- .github/workflows/java-publish.yml | 143 +++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 .github/workflows/java-publish.yml diff --git a/.github/workflows/java-publish.yml b/.github/workflows/java-publish.yml new file mode 100644 index 000000000..1d94a3ed1 --- /dev/null +++ b/.github/workflows/java-publish.yml @@ -0,0 +1,143 @@ +name: Publish Java Bindings to Maven Central + +on: + # Manually trigger the workflow + workflow_dispatch: + +env: + working-directory: bindings/java + +jobs: + # Build native libraries for each platform + build-natives: + strategy: + matrix: + include: + - os: ubuntu-latest + target: x86_64-unknown-linux-gnu + make-target: linux_x86 + artifact-name: linux-x86_64 + - os: macos-latest + target: x86_64-apple-darwin + make-target: macos_x86 + artifact-name: macos-x86_64 + - os: macos-latest + target: aarch64-apple-darwin + make-target: macos_arm64 + artifact-name: macos-arm64 + - os: ubuntu-latest + target: x86_64-pc-windows-gnu + make-target: windows + artifact-name: windows-x86_64 + + runs-on: ${{ matrix.os }} + timeout-minutes: 30 + + defaults: + run: + working-directory: ${{ env.working-directory }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + targets: ${{ matrix.target }} + + - name: Verify and install Rust target + run: | + echo "Installing target: ${{ matrix.target }}" + rustup target add ${{ matrix.target }} + echo "Installed targets:" + rustup target list --installed + echo "Rust version:" + rustc --version + + - name: Install cross-compilation tools (Windows on Linux) + if: matrix.target == 'x86_64-pc-windows-gnu' + run: | + sudo apt-get update + sudo apt-get install -y mingw-w64 + + - name: Build native library + run: make ${{ matrix.make-target }} + + - name: Verify build output + run: | + echo "Build completed for ${{ matrix.target }}" + ls -lah libs/ + find libs/ -type f + + - name: Upload native library + uses: actions/upload-artifact@v4 + with: + name: native-${{ matrix.artifact-name }} + path: ${{ env.working-directory }}/libs/ + retention-days: 1 + + # Publish to Maven Central with all native libraries + publish: + needs: build-natives + runs-on: ubuntu-latest + timeout-minutes: 30 + + defaults: + run: + working-directory: ${{ env.working-directory }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up JDK + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '8' + + - name: Setup Gradle + uses: gradle/actions/setup-gradle@v3 + + - name: Install Rust (for test builds) + uses: dtolnay/rust-toolchain@stable + + - name: Download all native libraries + uses: actions/download-artifact@v4 + with: + pattern: native-* + path: ${{ env.working-directory }}/libs-temp + merge-multiple: true + + - name: Organize native libraries + run: | + # Move downloaded artifacts to libs directory + rm -rf libs + mv libs-temp libs + echo "Native libraries collected:" + ls -R libs/ + + - name: Build test natives + run: make build_test + + - name: Run tests + run: ./gradlew test + + - name: Publish to Maven Central + env: + MAVEN_CENTRAL_USERNAME: ${{ secrets.MAVEN_CENTRAL_USERNAME }} + MAVEN_CENTRAL_PASSWORD: ${{ secrets.MAVEN_CENTRAL_PASSWORD }} + GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} + GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + run: | + echo "Building, signing, and publishing to Maven Central..." + ./gradlew clean publishToMavenCentral --no-daemon --stacktrace + + - name: Upload bundle artifact + if: always() + uses: actions/upload-artifact@v4 + with: + name: maven-central-bundle + path: ${{ env.working-directory }}/build/maven-central/*.zip + retention-days: 7 \ No newline at end of file From 7f93f64fc51d807f5f7566c48216121fb07330a4 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 5 Oct 2025 19:32:48 -0300 Subject: [PATCH 051/428] enable Drop statements --- simulator/generation/property.rs | 3 +-- simulator/generation/query.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index b00a1114d..5ad72c5c6 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1657,8 +1657,7 @@ impl PropertyDiscriminants { } PropertyDiscriminants::DropSelect => { if !env.opts.disable_drop_select { - // remaining.drop - 0 + remaining.drop } else { 0 } diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index 88cd95126..a78df68c6 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -121,7 +121,7 @@ impl QueryDiscriminants { QueryDiscriminants::Insert => remaining.insert, QueryDiscriminants::Delete => remaining.delete, QueryDiscriminants::Update => remaining.update, - QueryDiscriminants::Drop => 0, + QueryDiscriminants::Drop => remaining.drop, QueryDiscriminants::CreateIndex => remaining.create_index, QueryDiscriminants::Begin | QueryDiscriminants::Commit From 3e8867c8f5e46a47d7cc7821471e1c60a2f37c00 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 6 Oct 2025 00:10:55 -0300 Subject: [PATCH 052/428] `DropSelect` property should only fail when error is not a parse error on the table name --- simulator/generation/property.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 5ad72c5c6..ff7856474 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -691,21 +691,21 @@ impl Property { format!("select query should result in an error for table '{table}'"), move |stack: &Vec, _| { let last = stack.last().unwrap(); + dbg!(last); match last { Ok(success) => Ok(Err(format!( "expected table creation to fail but it succeeded: {success:?}" ))), - Err(e) => { - if e.to_string() - .contains(&format!("Table {table_name} does not exist")) + Err(e) => match e { + LimboError::ParseError(e) + if e.contains(&format!("no such table: {table_name}")) => { Ok(Ok(())) - } else { - Ok(Err(format!( - "expected table does not exist error, got: {e}" - ))) } - } + _ => Ok(Err(format!( + "expected table does not exist error, got: {e}" + ))), + }, } }, )); @@ -726,7 +726,7 @@ impl Property { .into_iter() .map(|q| Interaction::new(connection_index, InteractionType::Query(q))), ); - interactions.push(Interaction::new(connection_index, select)); + interactions.push(Interaction::new_ignore_error(connection_index, select)); interactions.push(Interaction::new(connection_index, assertion)); interactions From 4fc7be5042b7df705b6480646ee58dcfb4fafb8b Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 6 Oct 2025 00:19:24 -0300 Subject: [PATCH 053/428] as we have DROP table now, if we want to generate extensional queries eagerly, without affecting how we document interactions with MVCC, we need to travel `forward` in time and shadow queries eagerly so we can generate queries correctly. This involves cloning the tables unfortunately which is inneficient but correct --- simulator/generation/property.rs | 189 ++++++++++++++++++++++--------- simulator/model/mod.rs | 24 ++++ simulator/runner/env.rs | 12 ++ 3 files changed, 174 insertions(+), 51 deletions(-) diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index ff7856474..2f8dcc43c 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1,3 +1,10 @@ +//! FIXME: With the current API and generation logic in plan.rs, +//! for Properties that have intermediary queries we need to CLONE the current Context tables +//! to properly generate queries, as we need to shadow after each query generated to make sure we are generating +//! queries that are valid. This is specially valid with DROP and ALTER TABLE in the mix, because with outdated context +//! we can generate queries that reference tables that do not exist. This is not a correctness issue, but more of +//! an optimization issue that is good to point out for the future + use rand::distr::{Distribution, weighted::WeightedIndex}; use serde::{Deserialize, Serialize}; use sql_generation::{ @@ -26,11 +33,34 @@ use crate::{ }, model::{Query, QueryCapabilities, QueryDiscriminants}, profiles::query::QueryProfile, - runner::env::SimulatorEnv, + runner::env::{ShadowTablesMut, SimulatorEnv}, }; use super::plan::{Assertion, Interaction, InteractionStats, ResultSet}; +#[derive(Debug, Clone, Copy)] +struct PropertyGenContext<'a> { + tables: &'a Vec, + opts: &'a sql_generation::generation::Opts, +} + +impl<'a> PropertyGenContext<'a> { + #[inline] + fn new(tables: &'a Vec, opts: &'a Opts) -> Self { + Self { tables, opts } + } +} + +impl<'a> GenerationContext for PropertyGenContext<'a> { + fn tables(&self) -> &Vec { + self.tables + } + + fn opts(&self) -> &sql_generation::generation::Opts { + self.opts + } +} + /// Properties are representations of executable specifications /// about the database behavior. #[derive(Debug, Clone, Serialize, Deserialize, strum::EnumDiscriminants)] @@ -1230,10 +1260,10 @@ fn property_insert_values_select( let row = rows[row_index].clone(); // Insert the rows - let insert_query = Insert::Values { + let insert_query = Query::Insert(Insert::Values { table: table.name.clone(), values: rows, - }; + }); // Choose if we want queries to be executed in an interactive transaction let interactive = if !mvcc && rng.random_bool(0.5) { @@ -1244,21 +1274,15 @@ fn property_insert_values_select( } else { None }; - // Create random queries respecting the constraints - let mut queries = Vec::new(); + + let amount = rng.random_range(0..3); + // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) // - [x] The inserted row will not be deleted. // - [x] The inserted row will not be updated. // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) - if let Some(ref interactive) = interactive { - queries.push(Query::Begin(if interactive.start_with_immediate { - Begin::Immediate - } else { - Begin::Deferred - })); - } - for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, ctx, query_distr); + let mut queries = generate_queries(rng, ctx, amount, &[&insert_query], |rng, ctx| { + let query = Query::arbitrary_from(rng, &ctx, query_distr); match &query { Query::Delete(Delete { table: t, @@ -1266,14 +1290,14 @@ fn property_insert_values_select( }) => { // The inserted row will not be deleted. if t == &table.name && predicate.test(&row, table) { - continue; + return None; } } Query::Create(Create { table: t }) => { // There will be no errors in the middle interactions. // - Creating the same table is an error if t.name == table.name { - continue; + return None; } } Query::Update(Update { @@ -1283,14 +1307,23 @@ fn property_insert_values_select( }) => { // The inserted row will not be updated. if t == &table.name && predicate.test(&row, table) { - continue; + return None; } } _ => (), } - queries.push(query); - } + Some(query) + }); + if let Some(ref interactive) = interactive { + queries.insert( + 0, + Query::Begin(if interactive.start_with_immediate { + Begin::Immediate + } else { + Begin::Deferred + }), + ); queries.push(if interactive.end_with_commit { Query::Commit(Commit) } else { @@ -1305,7 +1338,7 @@ fn property_insert_values_select( ); Property::InsertValuesSelect { - insert: insert_query, + insert: insert_query.unwrap_insert(), row_index, queries, select: select_query, @@ -1376,28 +1409,28 @@ fn property_double_create_failure( _mvcc: bool, ) -> Property { // Create the table - let create_query = Create::arbitrary(rng, ctx); - let table = &create_query.table; + let create_query = Query::Create(Create::arbitrary(rng, ctx)); + let table = &create_query.as_create().table; + + let amount = rng.random_range(0..3); - // Create random queries respecting the constraints - let mut queries = Vec::new(); // The interactions in the middle has the following constraints; // - [x] There will be no errors in the middle interactions.(best effort) // - [ ] Table `t` will not be renamed or dropped.(todo: add this constraint once ALTER or DROP is implemented) - for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, ctx, query_distr); + let queries = generate_queries(rng, ctx, amount, &[&create_query], |rng, ctx| { + let query = Query::arbitrary_from(rng, &ctx, query_distr); if let Query::Create(Create { table: t }) = &query { // There will be no errors in the middle interactions. // - Creating the same table is an error if t.name == table.name { - continue; + return None; } } - queries.push(query); - } + Some(query) + }); Property::DoubleCreateFailure { - create: create_query, + create: create_query.unwrap_create(), queries, } } @@ -1413,18 +1446,23 @@ fn property_delete_select( // Generate a random predicate let predicate = Predicate::arbitrary_from(rng, ctx, table); - // Create random queries respecting the constraints - let mut queries = Vec::new(); + let amount = rng.random_range(0..3); + + let delete = Query::Delete(Delete { + predicate: predicate.clone(), + table: table.name.clone(), + }); + // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) // - [x] A row that holds for the predicate will not be inserted. // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) - for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, ctx, query_distr); + let queries = generate_queries(rng, ctx, amount, &[&delete], |rng, tmp_ctx| { + let query = Query::arbitrary_from(rng, &tmp_ctx, query_distr); match &query { Query::Insert(Insert::Values { table: t, values }) => { // A row that holds for the predicate will not be inserted. if t == &table.name && values.iter().any(|v| predicate.test(v, table)) { - continue; + return None; } } Query::Insert(Insert::Select { @@ -1433,26 +1471,26 @@ fn property_delete_select( }) => { // A row that holds for the predicate will not be inserted. if t == &table.name { - continue; + return None; } } Query::Update(Update { table: t, .. }) => { // A row that holds for the predicate will not be updated. if t == &table.name { - continue; + return None; } } Query::Create(Create { table: t }) => { // There will be no errors in the middle interactions. // - Creating the same table is an error if t.name == table.name { - continue; + return None; } } _ => (), } - queries.push(query); - } + Some(query) + }); Property::DeleteSelect { table: table.name.clone(), @@ -1470,20 +1508,26 @@ fn property_drop_select( // Get a random table let table = pick(ctx.tables(), rng); + let drop = Query::Drop(Drop { + table: table.name.clone(), + }); + + let amount = rng.random_range(0..3); // Create random queries respecting the constraints - let mut queries = Vec::new(); - // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) - // - [-] The table `t` will not be created, no table will be renamed to `t`. (todo: update this constraint once ALTER is implemented) - for _ in 0..rng.random_range(0..3) { - let query = Query::arbitrary_from(rng, ctx, query_distr); - if let Query::Create(Create { table: t }) = &query { + let queries = generate_queries(rng, ctx, amount, &[&drop], |rng, tmp_ctx| { + // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) + // - [-] The table `t` will not be created, no table will be renamed to `t`. (todo: update this constraint once ALTER is implemented) + + let query = Query::arbitrary_from(rng, &tmp_ctx, query_distr); + if let Query::Create(Create { table: t }) = &query + && t.name == table.name + { // - The table `t` will not be created - if t.name == table.name { - continue; - } + None + } else { + Some(query) } - queries.push(query); - } + }); let select = Select::simple( table.name.clone(), @@ -1815,6 +1859,49 @@ impl<'a> ArbitraryFrom<&PropertyDistribution<'a>> for Property { } } +fn generate_queries( + rng: &mut R, + ctx: &impl GenerationContext, + amount: usize, + init_queries: &[&Query], + func: F, +) -> Vec +where + F: Fn(&mut R, PropertyGenContext) -> Option, +{ + // Create random queries respecting the constraints + let mut queries = Vec::new(); + + let range = 0..amount; + if !range.is_empty() { + let mut tmp_tables = ctx.tables().clone(); + + for query in init_queries { + tmp_shadow(&mut tmp_tables, query); + } + + for _ in range { + let tmp_ctx = PropertyGenContext::new(&tmp_tables, ctx.opts()); + + let Some(query) = func(rng, tmp_ctx) else { + continue; + }; + + tmp_shadow(&mut tmp_tables, &query); + + queries.push(query); + } + } + queries +} + +fn tmp_shadow(tmp_tables: &mut Vec
, query: &Query) { + let mut tx_tables = None; + let mut tmp_shadow_tables = ShadowTablesMut::new(tmp_tables, &mut tx_tables); + + let _ = query.shadow(&mut tmp_shadow_tables); +} + fn print_row(row: &[SimValue]) -> String { row.iter() .map(|v| match &v.0 { diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 510922f6b..4af61b353 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -35,6 +35,28 @@ pub enum Query { } impl Query { + pub fn as_create(&self) -> &Create { + match self { + Self::Create(create) => create, + _ => unreachable!(), + } + } + + pub fn unwrap_create(self) -> Create { + match self { + Self::Create(create) => create, + _ => unreachable!(), + } + } + + #[inline] + pub fn unwrap_insert(self) -> Insert { + match self { + Self::Insert(insert) => insert, + _ => unreachable!(), + } + } + pub fn dependencies(&self) -> IndexSet { match self { Query::Select(select) => select.dependencies(), @@ -102,6 +124,7 @@ impl Shadow for Query { type Result = anyhow::Result>>; fn shadow(&self, env: &mut ShadowTablesMut) -> Self::Result { + tracing::info!("SHADOW {:?}", self); match self { Query::Create(create) => create.shadow(env), Query::Insert(insert) => insert.shadow(env), @@ -239,6 +262,7 @@ impl Shadow for Drop { type Result = anyhow::Result>>; fn shadow(&self, tables: &mut ShadowTablesMut) -> Self::Result { + tracing::info!("dropping {:?}", self); if !tables.iter().any(|t| t.name == self.table) { // If the table does not exist, we return an error return Err(anyhow::anyhow!( diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index 52b57052a..300b08c84 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -83,6 +83,18 @@ impl<'a, 'b> ShadowTablesMut<'a> where 'a: 'b, { + /// Creation of [ShadowTablesMut] outside of [SimulatorEnv] should be done sparingly and carefully. + /// Should only need to call this function if we need to do shadowing in a temporary model table + pub fn new( + commited_tables: &'a mut Vec
, + transaction_tables: &'a mut Option, + ) -> Self { + ShadowTablesMut { + commited_tables, + transaction_tables, + } + } + fn tables(&'a self) -> &'a Vec
{ self.transaction_tables .as_ref() From 07cc1c548be6dd3c2095a7fcc9c3efe136668c7b Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 6 Oct 2025 13:10:02 -0300 Subject: [PATCH 054/428] adjust query generation to avoid DROP for certain extensional queries --- simulator/generation/property.rs | 74 ++++++++++++++++---------------- 1 file changed, 36 insertions(+), 38 deletions(-) diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 2f8dcc43c..3fef90b51 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1287,32 +1287,29 @@ fn property_insert_values_select( Query::Delete(Delete { table: t, predicate, - }) => { + }) if t == &table.name && predicate.test(&row, table) => { // The inserted row will not be deleted. - if t == &table.name && predicate.test(&row, table) { - return None; - } + None } - Query::Create(Create { table: t }) => { + Query::Create(Create { table: t }) if t.name == table.name => { // There will be no errors in the middle interactions. // - Creating the same table is an error - if t.name == table.name { - return None; - } + None } Query::Update(Update { table: t, set_values: _, predicate, - }) => { + }) if t == &table.name && predicate.test(&row, table) => { // The inserted row will not be updated. - if t == &table.name && predicate.test(&row, table) { - return None; - } + None } - _ => (), + Query::Drop(Drop { table: t }) if *t == table.name => { + // Cannot drop the table we are inserting + None + } + _ => Some(query), } - Some(query) }); if let Some(ref interactive) = interactive { @@ -1419,14 +1416,18 @@ fn property_double_create_failure( // - [ ] Table `t` will not be renamed or dropped.(todo: add this constraint once ALTER or DROP is implemented) let queries = generate_queries(rng, ctx, amount, &[&create_query], |rng, ctx| { let query = Query::arbitrary_from(rng, &ctx, query_distr); - if let Query::Create(Create { table: t }) = &query { - // There will be no errors in the middle interactions. - // - Creating the same table is an error - if t.name == table.name { - return None; + match &query { + Query::Create(Create { table: t }) if t.name == table.name => { + // There will be no errors in the middle interactions. + // - Creating the same table is an error + None } + Query::Drop(Drop { table: t }) if *t == table.name => { + // Cannot Drop the created table + None + } + _ => Some(query), } - Some(query) }); Property::DoubleCreateFailure { @@ -1459,37 +1460,34 @@ fn property_delete_select( let queries = generate_queries(rng, ctx, amount, &[&delete], |rng, tmp_ctx| { let query = Query::arbitrary_from(rng, &tmp_ctx, query_distr); match &query { - Query::Insert(Insert::Values { table: t, values }) => { + Query::Insert(Insert::Values { table: t, values }) + if t == &table.name && values.iter().any(|v| predicate.test(v, table)) => + { // A row that holds for the predicate will not be inserted. - if t == &table.name && values.iter().any(|v| predicate.test(v, table)) { - return None; - } + None } Query::Insert(Insert::Select { table: t, select: _, - }) => { + }) if t == &table.name => { // A row that holds for the predicate will not be inserted. - if t == &table.name { - return None; - } + None } - Query::Update(Update { table: t, .. }) => { + Query::Update(Update { table: t, .. }) if t == &table.name => { // A row that holds for the predicate will not be updated. - if t == &table.name { - return None; - } + None } - Query::Create(Create { table: t }) => { + Query::Create(Create { table: t }) if t.name == table.name => { // There will be no errors in the middle interactions. // - Creating the same table is an error - if t.name == table.name { - return None; - } + None } - _ => (), + Query::Drop(Drop { table: t }) if *t == table.name => { + // Cannot Drop the same table + None + } + _ => Some(query), } - Some(query) }); Property::DeleteSelect { From 7eb504baefcfc71b6e00c8445773571151a5a394 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 6 Oct 2025 14:00:38 -0300 Subject: [PATCH 055/428] certain properties cannot be generated if there are no tables in the current context --- simulator/generation/plan.rs | 3 +- simulator/generation/property.rs | 48 ++++++++++++++++++++++++-------- simulator/generation/query.rs | 2 +- 3 files changed, 39 insertions(+), 14 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 05373312f..3e6e1ab9e 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -1097,8 +1097,7 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats, usize)> for Interactions { let queries = possible_queries(conn_ctx.tables()); let query_distr = QueryDistribution::new(queries, &remaining_); - let property_distr = - PropertyDistribution::new(env, &remaining_, &query_distr, conn_ctx.opts()); + let property_distr = PropertyDistribution::new(env, &remaining_, &query_distr, conn_ctx); frequency( vec![ diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 3fef90b51..b82eab8bf 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1248,6 +1248,7 @@ fn property_insert_values_select( ctx: &impl GenerationContext, mvcc: bool, ) -> Property { + assert!(!ctx.tables().is_empty()); // Get a random table let table = pick(ctx.tables(), rng); // Generate rows to insert @@ -1373,6 +1374,7 @@ fn property_table_has_expected_content( ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { + assert!(!ctx.tables().is_empty()); // Get a random table let table = pick(ctx.tables(), rng); Property::TableHasExpectedContent { @@ -1386,6 +1388,7 @@ fn property_select_limit( ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { + assert!(!ctx.tables().is_empty()); // Get a random table let table = pick(ctx.tables(), rng); // Select the table @@ -1442,6 +1445,7 @@ fn property_delete_select( ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { + assert!(!ctx.tables().is_empty()); // Get a random table let table = pick(ctx.tables(), rng); // Generate a random predicate @@ -1503,6 +1507,7 @@ fn property_drop_select( ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { + assert!(!ctx.tables().is_empty()); // Get a random table let table = pick(ctx.tables(), rng); @@ -1545,6 +1550,7 @@ fn property_select_select_optimizer( ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { + assert!(!ctx.tables().is_empty()); // Get a random table let table = pick(ctx.tables(), rng); // Generate a random predicate @@ -1568,6 +1574,7 @@ fn property_where_true_false_null( ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { + assert!(!ctx.tables().is_empty()); // Get a random table let table = pick(ctx.tables(), rng); // Generate a random predicate @@ -1589,6 +1596,7 @@ fn property_union_all_preserves_cardinality( ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { + assert!(!ctx.tables().is_empty()); // Get a random table let table = pick(ctx.tables(), rng); // Generate a random predicate @@ -1663,10 +1671,16 @@ impl PropertyDiscriminants { } } - pub fn weight(&self, env: &SimulatorEnv, remaining: &Remaining, opts: &Opts) -> u32 { + pub fn weight( + &self, + env: &SimulatorEnv, + remaining: &Remaining, + ctx: &impl GenerationContext, + ) -> u32 { + let opts = ctx.opts(); match self { PropertyDiscriminants::InsertValuesSelect => { - if !env.opts.disable_insert_values_select { + if !env.opts.disable_insert_values_select && !ctx.tables().is_empty() { u32::min(remaining.select, remaining.insert).max(1) } else { 0 @@ -1675,7 +1689,13 @@ impl PropertyDiscriminants { PropertyDiscriminants::ReadYourUpdatesBack => { u32::min(remaining.select, remaining.insert).max(1) } - PropertyDiscriminants::TableHasExpectedContent => remaining.select.max(1), + PropertyDiscriminants::TableHasExpectedContent => { + if !ctx.tables().is_empty() { + remaining.select.max(1) + } else { + 0 + } + } PropertyDiscriminants::DoubleCreateFailure => { if !env.opts.disable_double_create_failure { remaining.create / 2 @@ -1684,42 +1704,48 @@ impl PropertyDiscriminants { } } PropertyDiscriminants::SelectLimit => { - if !env.opts.disable_select_limit { + if !env.opts.disable_select_limit && !ctx.tables().is_empty() { remaining.select } else { 0 } } PropertyDiscriminants::DeleteSelect => { - if !env.opts.disable_delete_select { + if !env.opts.disable_delete_select && !ctx.tables().is_empty() { u32::min(remaining.select, remaining.insert).min(remaining.delete) } else { 0 } } PropertyDiscriminants::DropSelect => { - if !env.opts.disable_drop_select { + if !env.opts.disable_drop_select && !ctx.tables().is_empty() { remaining.drop } else { 0 } } PropertyDiscriminants::SelectSelectOptimizer => { - if !env.opts.disable_select_optimizer { + if !env.opts.disable_select_optimizer && !ctx.tables().is_empty() { remaining.select / 2 } else { 0 } } PropertyDiscriminants::WhereTrueFalseNull => { - if opts.indexes && !env.opts.disable_where_true_false_null { + if opts.indexes + && !env.opts.disable_where_true_false_null + && !ctx.tables().is_empty() + { remaining.select / 2 } else { 0 } } PropertyDiscriminants::UNIONAllPreservesCardinality => { - if opts.indexes && !env.opts.disable_union_all_preserves_cardinality { + if opts.indexes + && !env.opts.disable_union_all_preserves_cardinality + && !ctx.tables().is_empty() + { remaining.select / 3 } else { 0 @@ -1803,13 +1829,13 @@ impl<'a> PropertyDistribution<'a> { env: &SimulatorEnv, remaining: &Remaining, query_distr: &'a QueryDistribution, - opts: &Opts, + ctx: &impl GenerationContext, ) -> Self { let properties = PropertyDiscriminants::can_generate(query_distr.items()); let weights = WeightedIndex::new( properties .iter() - .map(|property| property.weight(env, remaining, opts)), + .map(|property| property.weight(env, remaining, ctx)), ) .unwrap(); diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index a78df68c6..068e4f5e7 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -29,7 +29,7 @@ fn random_create(rng: &mut R, conn_ctx: &impl GenerationC } fn random_select(rng: &mut R, conn_ctx: &impl GenerationContext) -> Query { - if rng.random_bool(0.7) { + if !conn_ctx.tables().is_empty() && rng.random_bool(0.7) { Query::Select(Select::arbitrary(rng, conn_ctx)) } else { // Random expression From 6d5443d4f02b46bb7764b1c43060b347d41ee899 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 6 Oct 2025 14:20:56 -0300 Subject: [PATCH 056/428] add Query::Placeholder --- simulator/generation/plan.rs | 6 ++++++ simulator/generation/query.rs | 6 ++++++ simulator/model/mod.rs | 10 +++++++++- simulator/runner/execution.rs | 3 +++ 4 files changed, 24 insertions(+), 1 deletion(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 3e6e1ab9e..12935d3d3 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -195,6 +195,7 @@ impl InteractionPlan { Query::Begin(_) => stats.begin_count += 1, Query::Commit(_) => stats.commit_count += 1, Query::Rollback(_) => stats.rollback_count += 1, + Query::Placeholder => {} } } for interactions in &self.plan { @@ -766,6 +767,11 @@ impl InteractionType { pub(crate) fn execute_query(&self, conn: &mut Arc) -> ResultSet { if let Self::Query(query) = self { + assert!( + !matches!(query, Query::Placeholder), + "simulation cannot have a placeholder Query for execution" + ); + let query_str = query.to_string(); let rows = conn.query(&query_str); if rows.is_err() { diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index 068e4f5e7..3408dca3b 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -111,6 +111,9 @@ impl QueryDiscriminants { | QueryDiscriminants::Rollback => { unreachable!("transactional queries should not be generated") } + QueryDiscriminants::Placeholder => { + unreachable!("Query Placeholders should not be generated") + } } } @@ -128,6 +131,9 @@ impl QueryDiscriminants { | QueryDiscriminants::Rollback => { unreachable!("transactional queries should not be generated") } + QueryDiscriminants::Placeholder => { + unreachable!("Query Placeholders should not be generated") + } } } } diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 4af61b353..9e3d29db2 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -32,6 +32,8 @@ pub enum Query { Begin(Begin), Commit(Commit), Rollback(Rollback), + /// Placeholder query that still needs to be generated + Placeholder, } impl Query { @@ -70,6 +72,7 @@ impl Query { IndexSet::from_iter([table_name.clone()]) } Query::Begin(_) | Query::Commit(_) | Query::Rollback(_) => IndexSet::new(), + Query::Placeholder => IndexSet::new(), } } pub fn uses(&self) -> Vec { @@ -83,6 +86,7 @@ impl Query { | Query::Drop(Drop { table, .. }) => vec![table.clone()], Query::CreateIndex(CreateIndex { table_name, .. }) => vec![table_name.clone()], Query::Begin(..) | Query::Commit(..) | Query::Rollback(..) => vec![], + Query::Placeholder => vec![], } } @@ -116,6 +120,7 @@ impl Display for Query { Self::Begin(begin) => write!(f, "{begin}"), Self::Commit(commit) => write!(f, "{commit}"), Self::Rollback(rollback) => write!(f, "{rollback}"), + Self::Placeholder => Ok(()), } } } @@ -124,7 +129,6 @@ impl Shadow for Query { type Result = anyhow::Result>>; fn shadow(&self, env: &mut ShadowTablesMut) -> Self::Result { - tracing::info!("SHADOW {:?}", self); match self { Query::Create(create) => create.shadow(env), Query::Insert(insert) => insert.shadow(env), @@ -136,6 +140,7 @@ impl Shadow for Query { Query::Begin(begin) => Ok(begin.shadow(env)), Query::Commit(commit) => Ok(commit.shadow(env)), Query::Rollback(rollback) => Ok(rollback.shadow(env)), + Query::Placeholder => Ok(vec![]), } } } @@ -182,6 +187,9 @@ impl From for QueryCapabilities { | QueryDiscriminants::Rollback => { unreachable!("QueryCapabilities do not apply to transaction queries") } + QueryDiscriminants::Placeholder => { + unreachable!("QueryCapabilities do not apply to query Placeholder") + } } } } diff --git a/simulator/runner/execution.rs b/simulator/runner/execution.rs index e877a972f..e3cfef375 100644 --- a/simulator/runner/execution.rs +++ b/simulator/runner/execution.rs @@ -368,6 +368,9 @@ fn execute_query_rusqlite( } Ok(result) } + Query::Placeholder => { + unreachable!("simulation cannot have a placeholder Query for execution") + } _ => { connection.execute(query.to_string().as_str(), ())?; Ok(vec![]) From e9ccdf15d8dcb96019b479a2056d1d2d6b6a7bd8 Mon Sep 17 00:00:00 2001 From: Kim Seon Woo <69591622+seonwoo960000@users.noreply.github.com> Date: Tue, 7 Oct 2025 23:52:03 +0900 Subject: [PATCH 057/428] Apply lint --- bindings/java/src/main/java/tech/turso/JDBC.java | 4 +--- .../java/src/main/java/tech/turso/jdbc4/JDBC4Connection.java | 4 +--- .../src/main/java/tech/turso/jdbc4/JDBC4DatabaseMetaData.java | 4 +--- .../main/java/tech/turso/jdbc4/JDBC4PreparedStatement.java | 4 +--- .../java/src/main/java/tech/turso/jdbc4/JDBC4ResultSet.java | 4 +--- .../java/src/main/java/tech/turso/jdbc4/JDBC4Statement.java | 4 +--- 6 files changed, 6 insertions(+), 18 deletions(-) diff --git a/bindings/java/src/main/java/tech/turso/JDBC.java b/bindings/java/src/main/java/tech/turso/JDBC.java index 360098d91..904ece6c3 100644 --- a/bindings/java/src/main/java/tech/turso/JDBC.java +++ b/bindings/java/src/main/java/tech/turso/JDBC.java @@ -10,9 +10,7 @@ import tech.turso.jdbc4.JDBC4Connection; import tech.turso.utils.Logger; import tech.turso.utils.LoggerFactory; -/** - * Turso JDBC driver implementation. - */ +/** Turso JDBC driver implementation. */ public final class JDBC implements Driver { private static final Logger logger = LoggerFactory.getLogger(JDBC.class); diff --git a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Connection.java b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Connection.java index 172dd3940..1629a52ce 100644 --- a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Connection.java +++ b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Connection.java @@ -9,9 +9,7 @@ import tech.turso.annotations.SkipNullableCheck; import tech.turso.core.TursoConnection; import tech.turso.core.TursoStatement; -/** - * JDBC 4 Connection implementation for Turso databases. - */ +/** JDBC 4 Connection implementation for Turso databases. */ public final class JDBC4Connection implements Connection { private final TursoConnection connection; diff --git a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4DatabaseMetaData.java b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4DatabaseMetaData.java index 3dcc49851..97f455b0a 100644 --- a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4DatabaseMetaData.java +++ b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4DatabaseMetaData.java @@ -13,9 +13,7 @@ import tech.turso.core.TursoPropertiesHolder; import tech.turso.utils.Logger; import tech.turso.utils.LoggerFactory; -/** - * JDBC 4 DatabaseMetaData implementation for Turso databases. - */ +/** JDBC 4 DatabaseMetaData implementation for Turso databases. */ public final class JDBC4DatabaseMetaData implements DatabaseMetaData { private static final Logger logger = LoggerFactory.getLogger(JDBC4DatabaseMetaData.class); diff --git a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4PreparedStatement.java b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4PreparedStatement.java index 4b777e710..6eba787b5 100644 --- a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4PreparedStatement.java +++ b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4PreparedStatement.java @@ -26,9 +26,7 @@ import java.util.Calendar; import tech.turso.annotations.SkipNullableCheck; import tech.turso.core.TursoResultSet; -/** - * JDBC 4 PreparedStatement implementation for Turso databases. - */ +/** JDBC 4 PreparedStatement implementation for Turso databases. */ public final class JDBC4PreparedStatement extends JDBC4Statement implements PreparedStatement { private final String sql; diff --git a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4ResultSet.java b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4ResultSet.java index 448015c52..8ad970496 100644 --- a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4ResultSet.java +++ b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4ResultSet.java @@ -27,9 +27,7 @@ import tech.turso.annotations.Nullable; import tech.turso.annotations.SkipNullableCheck; import tech.turso.core.TursoResultSet; -/** - * JDBC 4 ResultSet implementation for Turso databases. - */ +/** JDBC 4 ResultSet implementation for Turso databases. */ public final class JDBC4ResultSet implements ResultSet, ResultSetMetaData { private final TursoResultSet resultSet; diff --git a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Statement.java b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Statement.java index b96eb1cda..8f1f50da3 100644 --- a/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Statement.java +++ b/bindings/java/src/main/java/tech/turso/jdbc4/JDBC4Statement.java @@ -17,9 +17,7 @@ import tech.turso.annotations.SkipNullableCheck; import tech.turso.core.TursoResultSet; import tech.turso.core.TursoStatement; -/** - * JDBC 4 Statement implementation for Turso databases. - */ +/** JDBC 4 Statement implementation for Turso databases. */ public class JDBC4Statement implements Statement { private static final Pattern BATCH_COMPATIBLE_PATTERN = From d90d0f3f9f697563d6833c68cbeda8ab181d081d Mon Sep 17 00:00:00 2001 From: Kim Seon Woo <69591622+seonwoo960000@users.noreply.github.com> Date: Tue, 7 Oct 2025 23:52:12 +0900 Subject: [PATCH 058/428] Separate publish.gradle.kts from build.gradle.kts --- bindings/java/build.gradle.kts | 244 +---------------------- bindings/java/gradle/publish.gradle.kts | 250 ++++++++++++++++++++++++ 2 files changed, 253 insertions(+), 241 deletions(-) create mode 100644 bindings/java/gradle/publish.gradle.kts diff --git a/bindings/java/build.gradle.kts b/bindings/java/build.gradle.kts index 0def7e18d..4fb7a704e 100644 --- a/bindings/java/build.gradle.kts +++ b/bindings/java/build.gradle.kts @@ -2,7 +2,6 @@ import net.ltgt.gradle.errorprone.CheckSeverity import net.ltgt.gradle.errorprone.errorprone import org.gradle.api.tasks.testing.logging.TestExceptionFormat import org.gradle.api.tasks.testing.logging.TestLogEvent -import java.security.MessageDigest plugins { java @@ -16,6 +15,9 @@ plugins { id("com.diffplug.spotless") version "6.13.0" } +// Apply publishing configuration +apply(from = "gradle/publish.gradle.kts") + // Helper function to read properties with defaults fun prop(key: String, default: String? = null): String? = findProperty(key)?.toString() ?: default @@ -39,246 +41,6 @@ tasks.withType { } } -publishing { - publications { - create("mavenJava") { - from(components["java"]) - groupId = prop("projectGroup")!! - artifactId = prop("projectArtifactId")!! - version = prop("projectVersion")!! - - pom { - name.set(prop("pomName")) - description.set(prop("pomDescription")) - url.set(prop("pomUrl")) - - licenses { - license { - name.set(prop("pomLicenseName")) - url.set(prop("pomLicenseUrl")) - } - } - - developers { - developer { - id.set(prop("pomDeveloperId")) - name.set(prop("pomDeveloperName")) - email.set(prop("pomDeveloperEmail")) - } - } - - scm { - connection.set(prop("pomScmConnection")) - developerConnection.set(prop("pomScmDeveloperConnection")) - url.set(prop("pomScmUrl")) - } - } - } - } -} - -signing { - // Make signing required for publishing - setRequired(true) - - // For CI/GitHub Actions: use in-memory keys - val signingKey = providers.environmentVariable("GPG_PRIVATE_KEY").orNull - val signingPassword = providers.environmentVariable("GPG_PASSPHRASE").orNull - - if (signingKey != null && signingPassword != null) { - // CI mode: use in-memory keys - useInMemoryPgpKeys(signingKey, signingPassword) - } else { - // Local mode: use GPG command from system - useGpgCmd() - } - - sign(publishing.publications["mavenJava"]) -} - -// Helper task to generate checksums -val generateChecksums by tasks.registering { - dependsOn("jar", "sourcesJar", "javadocJar", "generatePomFileForMavenJavaPublication") - - val checksumDir = layout.buildDirectory.dir("checksums") - - doLast { - val files = listOf( - tasks.jar.get().archiveFile.get().asFile, - tasks.named("sourcesJar").get().outputs.files.singleFile, - tasks.named("javadocJar").get().outputs.files.singleFile, - layout.buildDirectory.file("publications/mavenJava/pom-default.xml").get().asFile - ) - - checksumDir.get().asFile.mkdirs() - - files.forEach { file -> - if (file.exists()) { - // MD5 - val md5 = MessageDigest.getInstance("MD5") - .digest(file.readBytes()) - .joinToString("") { "%02x".format(it) } - file("${file.absolutePath}.md5").writeText(md5) - - // SHA1 - val sha1 = MessageDigest.getInstance("SHA-1") - .digest(file.readBytes()) - .joinToString("") { "%02x".format(it) } - file("${file.absolutePath}.sha1").writeText(sha1) - } - } - } -} - -// Task to create a bundle zip for Maven Central Portal -val createMavenCentralBundle by tasks.registering(Zip::class) { - group = "publishing" - description = "Creates a bundle zip for Maven Central Portal upload" - - dependsOn("generatePomFileForMavenJavaPublication", "jar", "sourcesJar", "javadocJar", "signMavenJavaPublication", generateChecksums) - - // Ensure signing happens before bundle creation - mustRunAfter("signMavenJavaPublication") - - val groupId = prop("projectGroup")!!.replace(".", "/") - val artifactId = prop("projectArtifactId")!! - val projectVer = version.toString() - - // Validate version is not SNAPSHOT for Maven Central - doFirst { - if (projectVer.contains("SNAPSHOT")) { - throw GradleException( - "Cannot publish SNAPSHOT version to Maven Central. " + - "Please change projectVersion in gradle.properties to a release version (e.g., 0.0.1)" - ) - } - } - - archiveFileName.set("$artifactId-$projectVer-bundle.zip") - destinationDirectory.set(layout.buildDirectory.dir("maven-central")) - - // Maven Central expects files in groupId/artifactId/version/ structure - val basePath = "$groupId/$artifactId/$projectVer" - - // Main JAR + checksums + signature - from(tasks.jar.get().archiveFile) { - into(basePath) - rename { "$artifactId-$projectVer.jar" } - } - from(tasks.jar.get().archiveFile.get().asFile.absolutePath + ".md5") { - into(basePath) - rename { "$artifactId-$projectVer.jar.md5" } - } - from(tasks.jar.get().archiveFile.get().asFile.absolutePath + ".sha1") { - into(basePath) - rename { "$artifactId-$projectVer.jar.sha1" } - } - - // Sources JAR + checksums + signature - from(tasks.named("sourcesJar").get().outputs.files) { - into(basePath) - rename { "$artifactId-$projectVer-sources.jar" } - } - from(tasks.named("sourcesJar").get().outputs.files.singleFile.absolutePath + ".md5") { - into(basePath) - rename { "$artifactId-$projectVer-sources.jar.md5" } - } - from(tasks.named("sourcesJar").get().outputs.files.singleFile.absolutePath + ".sha1") { - into(basePath) - rename { "$artifactId-$projectVer-sources.jar.sha1" } - } - - // Javadoc JAR + checksums + signature - from(tasks.named("javadocJar").get().outputs.files) { - into(basePath) - rename { "$artifactId-$projectVer-javadoc.jar" } - } - from(tasks.named("javadocJar").get().outputs.files.singleFile.absolutePath + ".md5") { - into(basePath) - rename { "$artifactId-$projectVer-javadoc.jar.md5" } - } - from(tasks.named("javadocJar").get().outputs.files.singleFile.absolutePath + ".sha1") { - into(basePath) - rename { "$artifactId-$projectVer-javadoc.jar.sha1" } - } - - // POM + checksums + signature - from(layout.buildDirectory.file("publications/mavenJava/pom-default.xml")) { - into(basePath) - rename { "$artifactId-$projectVer.pom" } - } - from(layout.buildDirectory.file("publications/mavenJava/pom-default.xml").get().asFile.absolutePath + ".md5") { - into(basePath) - rename { "$artifactId-$projectVer.pom.md5" } - } - from(layout.buildDirectory.file("publications/mavenJava/pom-default.xml").get().asFile.absolutePath + ".sha1") { - into(basePath) - rename { "$artifactId-$projectVer.pom.sha1" } - } - - // Signature files - get them from the signing task outputs - doFirst { - val signingTask = tasks.named("signMavenJavaPublication").get() - logger.lifecycle("Signing task outputs: ${signingTask.outputs.files.files}") - } - - // Include signature files generated by the signing plugin - from(tasks.named("signMavenJavaPublication").get().outputs.files) { - into(basePath) - include("*.jar.asc", "pom-default.xml.asc") - exclude("module.json.asc") // Exclude gradle module metadata signature - rename { name -> - // Only rename the POM signature file - // JAR signatures are already correctly named by the signing plugin - if (name == "pom-default.xml.asc") { - "$artifactId-$projectVer.pom.asc" - } else { - name // Keep original name (already correct) - } - } - } -} - -// Task to upload bundle to Maven Central Portal -tasks.register("publishToMavenCentral") { - group = "publishing" - description = "Publishes artifacts to Maven Central Portal" - - // Run publish first to generate signatures, then create bundle - dependsOn("publish") - dependsOn(createMavenCentralBundle) - - // Make sure bundle creation happens after publish - createMavenCentralBundle.get().mustRunAfter("publish") - - doLast { - val username = providers.environmentVariable("MAVEN_CENTRAL_USERNAME").orNull - val password = providers.environmentVariable("MAVEN_CENTRAL_PASSWORD").orNull - val bundleFile = createMavenCentralBundle.get().archiveFile.get().asFile - - require(username != null) { "MAVEN_CENTRAL_USERNAME environment variable must be set" } - require(password != null) { "MAVEN_CENTRAL_PASSWORD environment variable must be set" } - require(bundleFile.exists()) { "Bundle file does not exist: ${bundleFile.absolutePath}" } - - logger.lifecycle("Uploading bundle to Maven Central Portal...") - logger.lifecycle("Bundle: ${bundleFile.absolutePath}") - logger.lifecycle("Size: ${bundleFile.length() / 1024} KB") - - // Use curl for uploading (simple and available on most systems) - exec { - commandLine( - "curl", - "-X", "POST", - "-u", "$username:$password", - "-F", "bundle=@${bundleFile.absolutePath}", - "https://central.sonatype.com/api/v1/publisher/upload?name=${bundleFile.name}&publishingType=AUTOMATIC" - ) - } - - logger.lifecycle("Upload completed. Check https://central.sonatype.com/publishing for status.") - } -} - repositories { mavenCentral() } diff --git a/bindings/java/gradle/publish.gradle.kts b/bindings/java/gradle/publish.gradle.kts new file mode 100644 index 000000000..38f797305 --- /dev/null +++ b/bindings/java/gradle/publish.gradle.kts @@ -0,0 +1,250 @@ +import java.security.MessageDigest +import org.gradle.api.publish.PublishingExtension +import org.gradle.api.publish.maven.MavenPublication +import org.gradle.plugins.signing.SigningExtension + +// Helper function to read properties with defaults +fun prop(key: String, default: String? = null): String? = + project.findProperty(key)?.toString() ?: default + +// Maven Publishing Configuration +configure { + publications { + create("mavenJava") { + from(components["java"]) + groupId = prop("projectGroup")!! + artifactId = prop("projectArtifactId")!! + version = prop("projectVersion")!! + + pom { + name.set(prop("pomName")) + description.set(prop("pomDescription")) + url.set(prop("pomUrl")) + + licenses { + license { + name.set(prop("pomLicenseName")) + url.set(prop("pomLicenseUrl")) + } + } + + developers { + developer { + id.set(prop("pomDeveloperId")) + name.set(prop("pomDeveloperName")) + email.set(prop("pomDeveloperEmail")) + } + } + + scm { + connection.set(prop("pomScmConnection")) + developerConnection.set(prop("pomScmDeveloperConnection")) + url.set(prop("pomScmUrl")) + } + } + } + } +} + +// GPG Signing Configuration +configure { + // Make signing required for publishing + setRequired(true) + + // For CI/GitHub Actions: use in-memory keys + val signingKey = providers.environmentVariable("GPG_PRIVATE_KEY").orNull + val signingPassword = providers.environmentVariable("GPG_PASSPHRASE").orNull + + if (signingKey != null && signingPassword != null) { + // CI mode: use in-memory keys + useInMemoryPgpKeys(signingKey, signingPassword) + } else { + // Local mode: use GPG command from system + useGpgCmd() + } + + sign(the().publications["mavenJava"]) +} + +// Helper task to generate checksums +val generateChecksums by tasks.registering { + dependsOn("jar", "sourcesJar", "javadocJar", "generatePomFileForMavenJavaPublication") + + val checksumDir = layout.buildDirectory.dir("checksums") + + doLast { + val files = listOf( + tasks.named("jar").get().outputs.files.singleFile, + tasks.named("sourcesJar").get().outputs.files.singleFile, + tasks.named("javadocJar").get().outputs.files.singleFile, + layout.buildDirectory.file("publications/mavenJava/pom-default.xml").get().asFile + ) + + checksumDir.get().asFile.mkdirs() + + files.forEach { file -> + if (file.exists()) { + // MD5 + val md5 = MessageDigest.getInstance("MD5") + .digest(file.readBytes()) + .joinToString("") { "%02x".format(it) } + file("${file.absolutePath}.md5").writeText(md5) + + // SHA1 + val sha1 = MessageDigest.getInstance("SHA-1") + .digest(file.readBytes()) + .joinToString("") { "%02x".format(it) } + file("${file.absolutePath}.sha1").writeText(sha1) + } + } + } +} + +// Task to create a bundle zip for Maven Central Portal +val createMavenCentralBundle by tasks.registering(Zip::class) { + group = "publishing" + description = "Creates a bundle zip for Maven Central Portal upload" + + dependsOn("generatePomFileForMavenJavaPublication", "jar", "sourcesJar", "javadocJar", "signMavenJavaPublication", generateChecksums) + + // Ensure signing happens before bundle creation + mustRunAfter("signMavenJavaPublication") + + val groupId = prop("projectGroup")!!.replace(".", "/") + val artifactId = prop("projectArtifactId")!! + val projectVer = project.version.toString() + + // Validate version is not SNAPSHOT for Maven Central + doFirst { + if (projectVer.contains("SNAPSHOT")) { + throw GradleException( + "Cannot publish SNAPSHOT version to Maven Central. " + + "Please change projectVersion in gradle.properties to a release version (e.g., 0.0.1)" + ) + } + } + + archiveFileName.set("$artifactId-$projectVer-bundle.zip") + destinationDirectory.set(layout.buildDirectory.dir("maven-central")) + + // Maven Central expects files in groupId/artifactId/version/ structure + val basePath = "$groupId/$artifactId/$projectVer" + + // Main JAR + checksums + signature + from(tasks.named("jar").get().outputs.files) { + into(basePath) + rename { "$artifactId-$projectVer.jar" } + } + from(tasks.named("jar").get().outputs.files.singleFile.absolutePath + ".md5") { + into(basePath) + rename { "$artifactId-$projectVer.jar.md5" } + } + from(tasks.named("jar").get().outputs.files.singleFile.absolutePath + ".sha1") { + into(basePath) + rename { "$artifactId-$projectVer.jar.sha1" } + } + + // Sources JAR + checksums + signature + from(tasks.named("sourcesJar").get().outputs.files) { + into(basePath) + rename { "$artifactId-$projectVer-sources.jar" } + } + from(tasks.named("sourcesJar").get().outputs.files.singleFile.absolutePath + ".md5") { + into(basePath) + rename { "$artifactId-$projectVer-sources.jar.md5" } + } + from(tasks.named("sourcesJar").get().outputs.files.singleFile.absolutePath + ".sha1") { + into(basePath) + rename { "$artifactId-$projectVer-sources.jar.sha1" } + } + + // Javadoc JAR + checksums + signature + from(tasks.named("javadocJar").get().outputs.files) { + into(basePath) + rename { "$artifactId-$projectVer-javadoc.jar" } + } + from(tasks.named("javadocJar").get().outputs.files.singleFile.absolutePath + ".md5") { + into(basePath) + rename { "$artifactId-$projectVer-javadoc.jar.md5" } + } + from(tasks.named("javadocJar").get().outputs.files.singleFile.absolutePath + ".sha1") { + into(basePath) + rename { "$artifactId-$projectVer-javadoc.jar.sha1" } + } + + // POM + checksums + signature + from(layout.buildDirectory.file("publications/mavenJava/pom-default.xml")) { + into(basePath) + rename { "$artifactId-$projectVer.pom" } + } + from(layout.buildDirectory.file("publications/mavenJava/pom-default.xml").get().asFile.absolutePath + ".md5") { + into(basePath) + rename { "$artifactId-$projectVer.pom.md5" } + } + from(layout.buildDirectory.file("publications/mavenJava/pom-default.xml").get().asFile.absolutePath + ".sha1") { + into(basePath) + rename { "$artifactId-$projectVer.pom.sha1" } + } + + // Signature files - get them from the signing task outputs + doFirst { + val signingTask = tasks.named("signMavenJavaPublication").get() + logger.lifecycle("Signing task outputs: ${signingTask.outputs.files.files}") + } + + // Include signature files generated by the signing plugin + from(tasks.named("signMavenJavaPublication").get().outputs.files) { + into(basePath) + include("*.jar.asc", "pom-default.xml.asc") + exclude("module.json.asc") // Exclude gradle module metadata signature + rename { name -> + // Only rename the POM signature file + // JAR signatures are already correctly named by the signing plugin + if (name == "pom-default.xml.asc") { + "$artifactId-$projectVer.pom.asc" + } else { + name // Keep original name (already correct) + } + } + } +} + +// Task to upload bundle to Maven Central Portal +tasks.register("publishToMavenCentral") { + group = "publishing" + description = "Publishes artifacts to Maven Central Portal" + + // Run publish first to generate signatures, then create bundle + dependsOn("publish") + dependsOn(createMavenCentralBundle) + + // Make sure bundle creation happens after publish + createMavenCentralBundle.get().mustRunAfter("publish") + + doLast { + val username = providers.environmentVariable("MAVEN_CENTRAL_USERNAME").orNull + val password = providers.environmentVariable("MAVEN_CENTRAL_PASSWORD").orNull + val bundleFile = createMavenCentralBundle.get().archiveFile.get().asFile + + require(username != null) { "MAVEN_CENTRAL_USERNAME environment variable must be set" } + require(password != null) { "MAVEN_CENTRAL_PASSWORD environment variable must be set" } + require(bundleFile.exists()) { "Bundle file does not exist: ${bundleFile.absolutePath}" } + + logger.lifecycle("Uploading bundle to Maven Central Portal...") + logger.lifecycle("Bundle: ${bundleFile.absolutePath}") + logger.lifecycle("Size: ${bundleFile.length() / 1024} KB") + + // Use curl for uploading (simple and available on most systems) + exec { + commandLine( + "curl", + "-X", "POST", + "-u", "$username:$password", + "-F", "bundle=@${bundleFile.absolutePath}", + "https://central.sonatype.com/api/v1/publisher/upload?name=${bundleFile.name}&publishingType=AUTOMATIC" + ) + } + + logger.lifecycle("Upload completed. Check https://central.sonatype.com/publishing for status.") + } +} From 6bad5d04ce323b79290529b45c77ba99347288aa Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 6 Oct 2025 14:28:03 -0300 Subject: [PATCH 059/428] generate extensional queries when iterating over the next interaction, not when generating the property. This is necessary as the extensional queries can modify schema and thus could cause the next queries to fail because the DB enviroment context was not updated on generation time. Rule of thumb: queries should never be generated in bulk, always one a a time so the enviroment can be shadowed accordingly --- simulator/generation/plan.rs | 157 +++++++++++-- simulator/generation/property.rs | 328 +++++++++++++++++---------- sql_generation/model/query/insert.rs | 7 + 3 files changed, 343 insertions(+), 149 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 12935d3d3..df68a17b8 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -293,16 +293,7 @@ impl InteractionPlan { self.push(interactions); Some(out_interactions) } else { - // after we generated all interactions if some connection is still in a transaction, commit - (0..env.connections.len()) - .find(|idx| env.conn_in_transaction(*idx)) - .map(|conn_index| { - let query = Query::Commit(Commit); - let interaction = Interactions::new(conn_index, InteractionsType::Query(query)); - let out_interactions = interaction.interactions(); - self.push(interaction); - out_interactions - }) + None } } @@ -314,6 +305,7 @@ impl InteractionPlan { let iter = interactions.into_iter(); PlanGenerator { plan: self, + peek: None, iter, rng, } @@ -383,28 +375,145 @@ impl InteractionPlanIterator for &mut T { pub struct PlanGenerator<'a, R: rand::Rng> { plan: &'a mut InteractionPlan, + peek: Option, iter: as IntoIterator>::IntoIter, rng: &'a mut R, } +impl<'a, R: rand::Rng> PlanGenerator<'a, R> { + fn next_interaction(&mut self, env: &mut SimulatorEnv) -> Option { + self.iter + .next() + .or_else(|| { + // Iterator ended, try to create a new iterator + // This will not be an infinte sequence because generate_next_interaction will eventually + // stop generating + let mut iter = self + .plan + .generate_next_interaction(self.rng, env) + .map_or(Vec::new().into_iter(), |interactions| { + interactions.into_iter() + }); + let next = iter.next(); + self.iter = iter; + + next + }) + .map(|interaction| { + // Certain properties can generate intermediate queries + // we need to generate them here and substitute + if let InteractionType::Query(Query::Placeholder) = &interaction.interaction { + let stats = self.plan.stats(); + + let remaining_ = remaining( + env.opts.max_interactions, + &env.profile.query, + &stats, + env.profile.experimental_mvcc, + ); + + let InteractionsType::Property(property) = + &mut self.plan.last_mut().unwrap().interactions + else { + unreachable!("only properties have extensional queries"); + }; + + let conn_ctx = env.connection_context(interaction.connection_index); + + let queries = possible_queries(conn_ctx.tables()); + let query_distr = QueryDistribution::new(queries, &remaining_); + + let query_gen = property.get_extensional_query_gen_function(); + + let mut count = 0; + let new_query = loop { + if count > 1_000_000 { + panic!("possible infinite loop in query generation"); + } + if let Some(new_query) = + (query_gen)(self.rng, &conn_ctx, &query_distr, property) + { + let queries = property.get_extensional_queries().unwrap(); + let query = queries + .iter_mut() + .find(|query| matches!(query, Query::Placeholder)) + .expect("Placeholder should be present in extensional queries"); + *query = new_query.clone(); + break new_query; + } + count += 1; + }; + Interaction::new( + interaction.connection_index, + InteractionType::Query(new_query), + ) + } else { + interaction + } + }) + } + + fn peek(&mut self, env: &mut SimulatorEnv) -> Option<&Interaction> { + if self.peek.is_none() { + self.peek = self.next_interaction(env); + } + self.peek.as_ref() + } +} + impl<'a, R: rand::Rng> InteractionPlanIterator for PlanGenerator<'a, R> { /// try to generate the next [Interactions] and store it fn next(&mut self, env: &mut SimulatorEnv) -> Option { - self.iter.next().or_else(|| { - // Iterator ended, try to create a new iterator - // This will not be an infinte sequence because generate_next_interaction will eventually - // stop generating - let mut iter = self - .plan - .generate_next_interaction(self.rng, env) - .map_or(Vec::new().into_iter(), |interactions| { - interactions.into_iter() - }); - let next = iter.next(); - self.iter = iter; + let mvcc = self.plan.mvcc; + match self.peek(env) { + Some(peek_interaction) => { + if mvcc && peek_interaction.is_ddl() { + // try to commit a transaction as we cannot execute DDL statements in concurrent mode - next - }) + let commit_connection = (0..env.connections.len()) + .find(|idx| env.conn_in_transaction(*idx)) + .map(|conn_index| { + let query = Query::Commit(Commit); + let interaction = Interactions::new( + conn_index, + InteractionsType::Query(query.clone()), + ); + + // Connections are queued for commit on `generate_next_interaction` if Interactions::Query or Interactions::Property produce a DDL statement. + // This means that the only way we will reach here, is if the DDL statement was created later in the extensional query of a Property + let queries = self + .plan + .last_mut() + .unwrap() + .get_extensional_queries() + .unwrap(); + queries.insert(0, query.clone()); + + self.plan.push(interaction); + + Interaction::new(conn_index, InteractionType::Query(query)) + }); + if commit_connection.is_some() { + return commit_connection; + } + } + + self.peek.take() + } + None => { + // after we generated all interactions if some connection is still in a transaction, commit + (0..env.connections.len()) + .find(|idx| env.conn_in_transaction(*idx)) + .map(|conn_index| { + let query = Query::Commit(Commit); + let interaction = + Interactions::new(conn_index, InteractionsType::Query(query)); + self.plan.push(interaction); + + Interaction::new(conn_index, InteractionType::Query(Query::Commit(Commit))) + }) + } + } } } diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index b82eab8bf..e3a2f0f8a 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -240,6 +240,9 @@ pub struct InteractiveQueryInfo { end_with_commit: bool, } +type PropertyQueryGenFunc<'a, R, G> = + fn(&mut R, &G, &QueryDistribution, &Property) -> Option; + impl Property { pub(crate) fn name(&self) -> &str { match self { @@ -276,6 +279,186 @@ impl Property { } } + pub(super) fn get_extensional_query_gen_function(&self) -> PropertyQueryGenFunc + where + R: rand::Rng + ?Sized, + G: GenerationContext, + { + match self { + Property::InsertValuesSelect { .. } => { + // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) + // - [x] The inserted row will not be deleted. + // - [x] The inserted row will not be updated. + // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) + |rng: &mut R, ctx: &G, query_distr: &QueryDistribution, property: &Property| { + let Property::InsertValuesSelect { + insert, row_index, .. + } = property + else { + unreachable!(); + }; + let query = Query::arbitrary_from(rng, ctx, query_distr); + let table_name = insert.table(); + let table = ctx + .tables() + .iter() + .find(|table| table.name == table_name) + .unwrap(); + + let rows = insert.rows(); + let row = &rows[*row_index]; + + match &query { + Query::Delete(Delete { + table: t, + predicate, + }) if t == &table.name && predicate.test(row, table) => { + // The inserted row will not be deleted. + None + } + Query::Create(Create { table: t }) if t.name == table.name => { + // There will be no errors in the middle interactions. + // - Creating the same table is an error + None + } + Query::Update(Update { + table: t, + set_values: _, + predicate, + }) if t == &table.name && predicate.test(row, table) => { + // The inserted row will not be updated. + None + } + Query::Drop(Drop { table: t }) if *t == table.name => { + // Cannot drop the table we are inserting + None + } + _ => Some(query), + } + } + } + Property::DoubleCreateFailure { .. } => { + // The interactions in the middle has the following constraints; + // - [x] There will be no errors in the middle interactions.(best effort) + // - [ ] Table `t` will not be renamed or dropped.(todo: add this constraint once ALTER or DROP is implemented) + |rng: &mut R, ctx: &G, query_distr: &QueryDistribution, property: &Property| { + let Property::DoubleCreateFailure { create, .. } = property else { + unreachable!() + }; + + let table_name = create.table.name.clone(); + let table = ctx + .tables() + .iter() + .find(|table| table.name == table_name) + .unwrap(); + + let query = Query::arbitrary_from(rng, ctx, query_distr); + match &query { + Query::Create(Create { table: t }) if t.name == table.name => { + // There will be no errors in the middle interactions. + // - Creating the same table is an error + None + } + Query::Drop(Drop { table: t }) if *t == table.name => { + // Cannot Drop the created table + None + } + _ => Some(query), + } + } + } + Property::DeleteSelect { .. } => { + // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) + // - [x] A row that holds for the predicate will not be inserted. + // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) + + |rng, ctx, query_distr, property| { + let Property::DeleteSelect { + table: table_name, + predicate, + .. + } = property + else { + unreachable!() + }; + + let table_name = table_name.clone(); + let table = ctx + .tables() + .iter() + .find(|table| table.name == table_name) + .unwrap(); + let query = Query::arbitrary_from(rng, ctx, query_distr); + match &query { + Query::Insert(Insert::Values { table: t, values }) + if *t == table_name + && values.iter().any(|v| predicate.test(v, table)) => + { + // A row that holds for the predicate will not be inserted. + None + } + Query::Insert(Insert::Select { + table: t, + select: _, + }) if t == &table.name => { + // A row that holds for the predicate will not be inserted. + None + } + Query::Update(Update { table: t, .. }) if t == &table.name => { + // A row that holds for the predicate will not be updated. + None + } + Query::Create(Create { table: t }) if t.name == table.name => { + // There will be no errors in the middle interactions. + // - Creating the same table is an error + None + } + Query::Drop(Drop { table: t }) if *t == table.name => { + // Cannot Drop the same table + None + } + _ => Some(query), + } + } + } + Property::DropSelect { .. } => { + // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) + // - [-] The table `t` will not be created, no table will be renamed to `t`. (todo: update this constraint once ALTER is implemented) + |rng, ctx, query_distr, property: &Property| { + let Property::DropSelect { + table: table_name, .. + } = property + else { + unreachable!() + }; + + let query = Query::arbitrary_from(rng, ctx, query_distr); + if let Query::Create(Create { table: t }) = &query + && t.name == *table_name + { + // - The table `t` will not be created + None + } else { + Some(query) + } + } + } + Property::Queries { .. } => { + unreachable!("No extensional querie generation for `Property::Queries`") + } + Property::FsyncNoWait { .. } | Property::FaultyQuery { .. } => { + unreachable!("No extensional queries") + } + Property::SelectLimit { .. } + | Property::SelectSelectOptimizer { .. } + | Property::WhereTrueFalseNull { .. } + | Property::UNIONAllPreservesCardinality { .. } + | Property::ReadYourUpdatesBack { .. } + | Property::TableHasExpectedContent { .. } => unreachable!("No extensional queries"), + } + } + /// interactions construct a list of interactions, which is an executable representation of the property. /// the requirement of property -> vec conversion emerges from the need to serialize the property, /// and `interaction` cannot be serialized directly. @@ -1244,7 +1427,7 @@ pub(crate) fn remaining( fn property_insert_values_select( rng: &mut R, - query_distr: &QueryDistribution, + _query_distr: &QueryDistribution, ctx: &impl GenerationContext, mvcc: bool, ) -> Property { @@ -1278,50 +1461,19 @@ fn property_insert_values_select( let amount = rng.random_range(0..3); - // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) - // - [x] The inserted row will not be deleted. - // - [x] The inserted row will not be updated. - // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) - let mut queries = generate_queries(rng, ctx, amount, &[&insert_query], |rng, ctx| { - let query = Query::arbitrary_from(rng, &ctx, query_distr); - match &query { - Query::Delete(Delete { - table: t, - predicate, - }) if t == &table.name && predicate.test(&row, table) => { - // The inserted row will not be deleted. - None - } - Query::Create(Create { table: t }) if t.name == table.name => { - // There will be no errors in the middle interactions. - // - Creating the same table is an error - None - } - Query::Update(Update { - table: t, - set_values: _, - predicate, - }) if t == &table.name && predicate.test(&row, table) => { - // The inserted row will not be updated. - None - } - Query::Drop(Drop { table: t }) if *t == table.name => { - // Cannot drop the table we are inserting - None - } - _ => Some(query), - } - }); + let mut queries = Vec::with_capacity(amount + 2); + + if let Some(ref interactive) = interactive { + queries.push(Query::Begin(if interactive.start_with_immediate { + Begin::Immediate + } else { + Begin::Deferred + })); + } + + queries.extend(std::iter::repeat_n(Query::Placeholder, amount)); if let Some(ref interactive) = interactive { - queries.insert( - 0, - Query::Begin(if interactive.start_with_immediate { - Begin::Immediate - } else { - Begin::Deferred - }), - ); queries.push(if interactive.end_with_commit { Query::Commit(Commit) } else { @@ -1404,44 +1556,26 @@ fn property_select_limit( fn property_double_create_failure( rng: &mut R, - query_distr: &QueryDistribution, + _query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { // Create the table - let create_query = Query::Create(Create::arbitrary(rng, ctx)); - let table = &create_query.as_create().table; + let create_query = Create::arbitrary(rng, ctx); let amount = rng.random_range(0..3); - // The interactions in the middle has the following constraints; - // - [x] There will be no errors in the middle interactions.(best effort) - // - [ ] Table `t` will not be renamed or dropped.(todo: add this constraint once ALTER or DROP is implemented) - let queries = generate_queries(rng, ctx, amount, &[&create_query], |rng, ctx| { - let query = Query::arbitrary_from(rng, &ctx, query_distr); - match &query { - Query::Create(Create { table: t }) if t.name == table.name => { - // There will be no errors in the middle interactions. - // - Creating the same table is an error - None - } - Query::Drop(Drop { table: t }) if *t == table.name => { - // Cannot Drop the created table - None - } - _ => Some(query), - } - }); + let queries = vec![Query::Placeholder; amount]; Property::DoubleCreateFailure { - create: create_query.unwrap_create(), + create: create_query, queries, } } fn property_delete_select( rng: &mut R, - query_distr: &QueryDistribution, + _query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1453,46 +1587,7 @@ fn property_delete_select( let amount = rng.random_range(0..3); - let delete = Query::Delete(Delete { - predicate: predicate.clone(), - table: table.name.clone(), - }); - - // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) - // - [x] A row that holds for the predicate will not be inserted. - // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) - let queries = generate_queries(rng, ctx, amount, &[&delete], |rng, tmp_ctx| { - let query = Query::arbitrary_from(rng, &tmp_ctx, query_distr); - match &query { - Query::Insert(Insert::Values { table: t, values }) - if t == &table.name && values.iter().any(|v| predicate.test(v, table)) => - { - // A row that holds for the predicate will not be inserted. - None - } - Query::Insert(Insert::Select { - table: t, - select: _, - }) if t == &table.name => { - // A row that holds for the predicate will not be inserted. - None - } - Query::Update(Update { table: t, .. }) if t == &table.name => { - // A row that holds for the predicate will not be updated. - None - } - Query::Create(Create { table: t }) if t.name == table.name => { - // There will be no errors in the middle interactions. - // - Creating the same table is an error - None - } - Query::Drop(Drop { table: t }) if *t == table.name => { - // Cannot Drop the same table - None - } - _ => Some(query), - } - }); + let queries = vec![Query::Placeholder; amount]; Property::DeleteSelect { table: table.name.clone(), @@ -1503,7 +1598,7 @@ fn property_delete_select( fn property_drop_select( rng: &mut R, - query_distr: &QueryDistribution, + _query_distr: &QueryDistribution, ctx: &impl GenerationContext, _mvcc: bool, ) -> Property { @@ -1511,26 +1606,9 @@ fn property_drop_select( // Get a random table let table = pick(ctx.tables(), rng); - let drop = Query::Drop(Drop { - table: table.name.clone(), - }); - let amount = rng.random_range(0..3); - // Create random queries respecting the constraints - let queries = generate_queries(rng, ctx, amount, &[&drop], |rng, tmp_ctx| { - // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) - // - [-] The table `t` will not be created, no table will be renamed to `t`. (todo: update this constraint once ALTER is implemented) - let query = Query::arbitrary_from(rng, &tmp_ctx, query_distr); - if let Query::Create(Create { table: t }) = &query - && t.name == table.name - { - // - The table `t` will not be created - None - } else { - Some(query) - } - }); + let queries = vec![Query::Placeholder; amount]; let select = Select::simple( table.name.clone(), diff --git a/sql_generation/model/query/insert.rs b/sql_generation/model/query/insert.rs index d69921388..4e5994f14 100644 --- a/sql_generation/model/query/insert.rs +++ b/sql_generation/model/query/insert.rs @@ -24,6 +24,13 @@ impl Insert { Insert::Values { table, .. } | Insert::Select { table, .. } => table, } } + + pub fn rows(&self) -> &[Vec] { + match self { + Insert::Values { values, .. } => values, + Insert::Select { .. } => unreachable!(), + } + } } impl Display for Insert { From 21fc8bae2a691141a595baa380cd9edae8a6de78 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Tue, 7 Oct 2025 12:15:57 -0300 Subject: [PATCH 060/428] `Property::FaultyQuery` and `FsyncNoWait` stored a list of tables to check the on the database. Again, the FaultyQuery could be a Drop Table which meant that we could be running a SELECT on an inexistent table. To solve this, just insert a Property that check all the tables in the db after a Faulty Property --- simulator/generation/plan.rs | 30 +++++++++++++ simulator/generation/property.rs | 72 +++++++++++++++++++++++--------- simulator/shrink/plan.rs | 19 +++------ 3 files changed, 87 insertions(+), 34 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index df68a17b8..836c24780 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -239,6 +239,28 @@ impl InteractionPlan { env: &mut SimulatorEnv, ) -> Option> { let num_interactions = env.opts.max_interactions as usize; + // If last interaction needs to check all db tables, generate the Property to do so + if let Some(i) = self.plan.last() + && i.check_tables() + { + let check_all_tables = Interactions::new( + i.connection_index, + InteractionsType::Property(Property::AllTableHaveExpectedContent { + tables: env + .connection_context(i.connection_index) + .tables() + .iter() + .map(|t| t.name.clone()) + .collect(), + }), + ); + + let out_interactions = check_all_tables.interactions(); + + self.push(check_all_tables); + return Some(out_interactions); + } + if self.len() < num_interactions { let conn_index = env.choose_conn(rng); let interactions = if self.mvcc && !env.conn_in_transaction(conn_index) { @@ -561,6 +583,14 @@ impl Interactions { InteractionsType::Query(..) | InteractionsType::Fault(..) => None, } } + + /// Whether the interaction needs to check the database tables + pub fn check_tables(&self) -> bool { + match &self.interactions { + InteractionsType::Property(property) => property.check_tables(), + InteractionsType::Query(..) | InteractionsType::Fault(..) => false, + } + } } impl Deref for Interactions { diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index e3a2f0f8a..8d6b4cb79 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -116,6 +116,17 @@ pub enum Property { TableHasExpectedContent { table: String, }, + /// AllTablesHaveExpectedContent is a property in which the table + /// must have the expected content, i.e. all the insertions and + /// updates and deletions should have been persisted in the way + /// we think they were. + /// The execution of the property is as follows + /// SELECT * FROM + /// ASSERT + /// for each table in the simulator model + AllTableHaveExpectedContent { + tables: Vec, + }, /// Double Create Failure is a property in which creating /// the same table twice leads to an error. /// The execution of the property is as follows @@ -222,11 +233,9 @@ pub enum Property { /// FsyncNoWait { query: Query, - tables: Vec, }, FaultyQuery { query: Query, - tables: Vec, }, /// Property used to subsititute a property with its queries only Queries { @@ -249,6 +258,7 @@ impl Property { Property::InsertValuesSelect { .. } => "Insert-Values-Select", Property::ReadYourUpdatesBack { .. } => "Read-Your-Updates-Back", Property::TableHasExpectedContent { .. } => "Table-Has-Expected-Content", + Property::AllTableHaveExpectedContent { .. } => "All-Tables-Have-Expected-Content", Property::DoubleCreateFailure { .. } => "Double-Create-Failure", Property::SelectLimit { .. } => "Select-Limit", Property::DeleteSelect { .. } => "Delete-Select", @@ -262,6 +272,11 @@ impl Property { } } + /// Property Does some sort of fault injection + pub fn check_tables(&self) -> bool { + matches!(self, Property::FsyncNoWait { .. } | Property::FaultyQuery { .. }) + } + pub fn get_extensional_queries(&mut self) -> Option<&mut Vec> { match self { Property::InsertValuesSelect { queries, .. } @@ -275,7 +290,8 @@ impl Property { | Property::WhereTrueFalseNull { .. } | Property::UNIONAllPreservesCardinality { .. } | Property::ReadYourUpdatesBack { .. } - | Property::TableHasExpectedContent { .. } => None, + | Property::TableHasExpectedContent { .. } + | Property::AllTableHaveExpectedContent { .. } => None, } } @@ -455,7 +471,10 @@ impl Property { | Property::WhereTrueFalseNull { .. } | Property::UNIONAllPreservesCardinality { .. } | Property::ReadYourUpdatesBack { .. } - | Property::TableHasExpectedContent { .. } => unreachable!("No extensional queries"), + | Property::TableHasExpectedContent { .. } + | Property::AllTableHaveExpectedContent { .. } => { + unreachable!("No extensional queries") + } } } @@ -464,6 +483,9 @@ impl Property { /// and `interaction` cannot be serialized directly. pub(crate) fn interactions(&self, connection_index: usize) -> Vec { match self { + Property::AllTableHaveExpectedContent { tables } => { + assert_all_table_values(tables, connection_index).collect() + } Property::TableHasExpectedContent { table } => { let table = table.to_string(); let table_name = table.clone(); @@ -1033,18 +1055,13 @@ impl Property { Interaction::new(connection_index, assertion), ] } - Property::FsyncNoWait { query, tables } => { - let checks = assert_all_table_values(tables, connection_index); - Vec::from_iter( - std::iter::once(Interaction::new( - connection_index, - InteractionType::FsyncQuery(query.clone()), - )) - .chain(checks), - ) + Property::FsyncNoWait { query } => { + vec![Interaction::new( + connection_index, + InteractionType::FsyncQuery(query.clone()), + )] } - Property::FaultyQuery { query, tables } => { - let checks = assert_all_table_values(tables, connection_index); + Property::FaultyQuery { query } => { let query_clone = query.clone(); // A fault may not occur as we first signal we want a fault injected, // then when IO is called the fault triggers. It may happen that a fault is injected @@ -1071,13 +1088,13 @@ impl Property { } }, ); - let first = [ + [ InteractionType::FaultyQuery(query.clone()), InteractionType::Assertion(assert), ] .into_iter() - .map(|i| Interaction::new(connection_index, i)); - Vec::from_iter(first.chain(checks)) + .map(|i| Interaction::new(connection_index, i)) + .collect() } Property::WhereTrueFalseNull { select, predicate } => { let assumption = InteractionType::Assumption(Assertion::new( @@ -1534,6 +1551,17 @@ fn property_table_has_expected_content( } } +fn property_all_tables_have_expected_content( + _rng: &mut R, + _query_distr: &QueryDistribution, + ctx: &impl GenerationContext, + _mvcc: bool, +) -> Property { + Property::AllTableHaveExpectedContent { + tables: ctx.tables().iter().map(|t| t.name.clone()).collect(), + } +} + fn property_select_limit( rng: &mut R, _query_distr: &QueryDistribution, @@ -1704,7 +1732,6 @@ fn property_fsync_no_wait( ) -> Property { Property::FsyncNoWait { query: Query::arbitrary_from(rng, ctx, query_distr), - tables: ctx.tables().iter().map(|t| t.name.clone()).collect(), } } @@ -1716,7 +1743,6 @@ fn property_faulty_query( ) -> Property { Property::FaultyQuery { query: Query::arbitrary_from(rng, ctx, query_distr), - tables: ctx.tables().iter().map(|t| t.name.clone()).collect(), } } @@ -1732,6 +1758,9 @@ impl PropertyDiscriminants { PropertyDiscriminants::InsertValuesSelect => property_insert_values_select, PropertyDiscriminants::ReadYourUpdatesBack => property_read_your_updates_back, PropertyDiscriminants::TableHasExpectedContent => property_table_has_expected_content, + PropertyDiscriminants::AllTableHaveExpectedContent => { + property_all_tables_have_expected_content + } PropertyDiscriminants::DoubleCreateFailure => property_double_create_failure, PropertyDiscriminants::SelectLimit => property_select_limit, PropertyDiscriminants::DeleteSelect => property_delete_select, @@ -1774,6 +1803,8 @@ impl PropertyDiscriminants { 0 } } + // AllTableHaveExpectedContent should only be generated by Properties that inject faults + PropertyDiscriminants::AllTableHaveExpectedContent => 0, PropertyDiscriminants::DoubleCreateFailure => { if !env.opts.disable_double_create_failure { remaining.create / 2 @@ -1872,6 +1903,7 @@ impl PropertyDiscriminants { QueryCapabilities::SELECT.union(QueryCapabilities::UPDATE) } PropertyDiscriminants::TableHasExpectedContent => QueryCapabilities::SELECT, + PropertyDiscriminants::AllTableHaveExpectedContent => QueryCapabilities::SELECT, PropertyDiscriminants::DoubleCreateFailure => QueryCapabilities::CREATE, PropertyDiscriminants::SelectLimit => QueryCapabilities::SELECT, PropertyDiscriminants::DeleteSelect => { diff --git a/simulator/shrink/plan.rs b/simulator/shrink/plan.rs index 93f2f1702..6da5d93e8 100644 --- a/simulator/shrink/plan.rs +++ b/simulator/shrink/plan.rs @@ -101,12 +101,6 @@ impl InteractionPlan { // Remove all properties that do not use the failing tables self.retain_mut(|interactions| { let retain = if idx == failing_interaction_index { - if let InteractionsType::Property( - Property::FsyncNoWait { tables, .. } | Property::FaultyQuery { tables, .. }, - ) = &mut interactions.interactions - { - tables.retain(|table| depending_tables.contains(table)); - } true } else { let mut has_table = interactions @@ -128,14 +122,10 @@ impl InteractionPlan { | Property::Queries { queries } => { extensional_queries.append(queries); } - Property::FsyncNoWait { tables, query } - | Property::FaultyQuery { tables, query } => { - if !query.uses().iter().any(|t| depending_tables.contains(t)) { - tables.clear(); - } else { - tables.retain(|table| depending_tables.contains(table)); - } + Property::AllTableHaveExpectedContent { tables } => { + tables.retain(|table| depending_tables.contains(table)); } + Property::FsyncNoWait { .. } | Property::FaultyQuery { .. } => {} Property::SelectLimit { .. } | Property::SelectSelectOptimizer { .. } | Property::WhereTrueFalseNull { .. } @@ -350,7 +340,8 @@ impl InteractionPlan { | Property::FaultyQuery { .. } | Property::FsyncNoWait { .. } | Property::ReadYourUpdatesBack { .. } - | Property::TableHasExpectedContent { .. } => {} + | Property::TableHasExpectedContent { .. } + | Property::AllTableHaveExpectedContent { .. } => {} } } } From c578f7ba96249e233753a4547a36624591587b29 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Tue, 7 Oct 2025 13:31:27 -0300 Subject: [PATCH 061/428] Faultless should produce any type of query, just not faulty --- simulator/profiles/mod.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/simulator/profiles/mod.rs b/simulator/profiles/mod.rs index 8c8d1f670..e4ea1dc06 100644 --- a/simulator/profiles/mod.rs +++ b/simulator/profiles/mod.rs @@ -93,11 +93,6 @@ impl Profile { }, ..Default::default() }, - query: QueryProfile { - create_table_weight: 0, - create_index_weight: 4, - ..Default::default() - }, ..Default::default() }; From 3b2583c540c67ebf8276a550df8dd17ff574ba57 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Tue, 7 Oct 2025 13:31:27 -0300 Subject: [PATCH 062/428] adjust Interaction generation to take into account possibilty of `PropertyDistribution` to have 0 Weights --- simulator/generation/plan.rs | 79 ++++++++++++++++---------------- simulator/generation/property.rs | 14 +++--- simulator/generation/query.rs | 5 +- 3 files changed, 51 insertions(+), 47 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 836c24780..a468e87ec 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -1242,46 +1242,45 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats, usize)> for Interactions { let queries = possible_queries(conn_ctx.tables()); let query_distr = QueryDistribution::new(queries, &remaining_); - let property_distr = PropertyDistribution::new(env, &remaining_, &query_distr, conn_ctx); + #[allow(clippy::type_complexity)] + let mut choices: Vec<(u32, Box Interactions>)> = vec![ + ( + query_distr.weights().total_weight(), + Box::new(|rng: &mut R| { + Interactions::new( + conn_index, + InteractionsType::Query(Query::arbitrary_from(rng, conn_ctx, &query_distr)), + ) + }), + ), + ( + remaining_ + .select + .min(remaining_.insert) + .min(remaining_.create) + .max(1), + Box::new(|rng: &mut R| random_fault(rng, env, conn_index)), + ), + ]; - frequency( - vec![ - ( - property_distr.weights().total_weight(), - Box::new(|rng: &mut R| { - Interactions::new( - conn_index, - InteractionsType::Property(Property::arbitrary_from( - rng, - conn_ctx, - &property_distr, - )), - ) - }), - ), - ( - query_distr.weights().total_weight(), - Box::new(|rng: &mut R| { - Interactions::new( - conn_index, - InteractionsType::Query(Query::arbitrary_from( - rng, - conn_ctx, - &query_distr, - )), - ) - }), - ), - ( - remaining_ - .select - .min(remaining_.insert) - .min(remaining_.create) - .max(1), - Box::new(|rng: &mut R| random_fault(rng, env, conn_index)), - ), - ], - rng, - ) + if let Ok(property_distr) = + PropertyDistribution::new(env, &remaining_, &query_distr, conn_ctx) + { + choices.push(( + property_distr.weights().total_weight(), + Box::new(move |rng: &mut R| { + Interactions::new( + conn_index, + InteractionsType::Property(Property::arbitrary_from( + rng, + conn_ctx, + &property_distr, + )), + ) + }), + )); + }; + + frequency(choices, rng) } } diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 8d6b4cb79..9df45db2e 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -274,7 +274,10 @@ impl Property { /// Property Does some sort of fault injection pub fn check_tables(&self) -> bool { - matches!(self, Property::FsyncNoWait { .. } | Property::FaultyQuery { .. }) + matches!( + self, + Property::FsyncNoWait { .. } | Property::FaultyQuery { .. } + ) } pub fn get_extensional_queries(&mut self) -> Option<&mut Vec> { @@ -1940,21 +1943,20 @@ impl<'a> PropertyDistribution<'a> { remaining: &Remaining, query_distr: &'a QueryDistribution, ctx: &impl GenerationContext, - ) -> Self { + ) -> Result { let properties = PropertyDiscriminants::can_generate(query_distr.items()); let weights = WeightedIndex::new( properties .iter() .map(|property| property.weight(env, remaining, ctx)), - ) - .unwrap(); + )?; - Self { + Ok(Self { properties, weights, query_distr, mvcc: env.profile.experimental_mvcc, - } + }) } } diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index 3408dca3b..914b44b35 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -120,7 +120,9 @@ impl QueryDiscriminants { pub fn weight(&self, remaining: &Remaining) -> u32 { match self { QueryDiscriminants::Create => remaining.create, - QueryDiscriminants::Select => remaining.select + remaining.select / 3, // remaining.select / 3 is for the random_expr generation + // remaining.select / 3 is for the random_expr generation + // have a max of 1 so that we always generate at least a non zero weight for `QueryDistribution` + QueryDiscriminants::Select => (remaining.select + remaining.select / 3).max(1), QueryDiscriminants::Insert => remaining.insert, QueryDiscriminants::Delete => remaining.delete, QueryDiscriminants::Update => remaining.update, @@ -138,6 +140,7 @@ impl QueryDiscriminants { } } +#[derive(Debug)] pub(super) struct QueryDistribution { queries: &'static [QueryDiscriminants], weights: WeightedIndex, From 300d918040344bcfe23bba204031cd3f585542ed Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Tue, 7 Oct 2025 14:58:56 -0300 Subject: [PATCH 063/428] fix differential check for parse error --- simulator/generation/property.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 9df45db2e..47b352406 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -929,14 +929,14 @@ impl Property { format!("select query should result in an error for table '{table}'"), move |stack: &Vec, _| { let last = stack.last().unwrap(); - dbg!(last); match last { Ok(success) => Ok(Err(format!( "expected table creation to fail but it succeeded: {success:?}" ))), Err(e) => match e { - LimboError::ParseError(e) - if e.contains(&format!("no such table: {table_name}")) => + e if e + .to_string() + .contains(&format!("no such table: {table_name}")) => { Ok(Ok(())) } From b40e784903bfb0d78778a58a9daa70d7fa810db6 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 27 Sep 2025 20:45:17 -0400 Subject: [PATCH 064/428] Update COMPAT.md, add fk related opcodes --- COMPAT.md | 4 ++-- parser/src/ast.rs | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/COMPAT.md b/COMPAT.md index d3f651453..d668c6f20 100644 --- a/COMPAT.md +++ b/COMPAT.md @@ -448,8 +448,8 @@ Modifiers: | Eq | Yes | | | Expire | No | | | Explain | No | | -| FkCounter | No | | -| FkIfZero | No | | +| FkCounter | Yes | | +| FkIfZero | Yes | | | Found | Yes | | | Function | Yes | | | Ge | Yes | | diff --git a/parser/src/ast.rs b/parser/src/ast.rs index 6b69682f0..dae656cc4 100644 --- a/parser/src/ast.rs +++ b/parser/src/ast.rs @@ -1416,6 +1416,8 @@ pub enum PragmaName { Encoding, /// Current free page count. FreelistCount, + /// Enable or disable foreign key constraint enforcement + ForeignKeys, /// Run integrity check on the database file IntegrityCheck, /// `journal_mode` pragma From d04b07b8b74a28b72d55689c458616e353dbd8b8 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 27 Sep 2025 20:48:42 -0400 Subject: [PATCH 065/428] Add pragma foreign_keys and fk_if_zero and fk_counter opcodes --- core/pragma.rs | 4 +++ core/translate/pragma.rs | 25 ++++++++++++++++- core/vdbe/execute.rs | 59 ++++++++++++++++++++++++++++++++++++++++ core/vdbe/explain.rs | 20 +++++++++++++- core/vdbe/insn.rs | 16 +++++++++++ core/vdbe/mod.rs | 2 ++ 6 files changed, 124 insertions(+), 2 deletions(-) diff --git a/core/pragma.rs b/core/pragma.rs index c238134e4..8cf9a99c5 100644 --- a/core/pragma.rs +++ b/core/pragma.rs @@ -131,6 +131,10 @@ pub fn pragma_for(pragma: &PragmaName) -> Pragma { PragmaFlags::NoColumns1 | PragmaFlags::Result0, &["mvcc_checkpoint_threshold"], ), + ForeignKeys => Pragma::new( + PragmaFlags::NoColumns1 | PragmaFlags::Result0, + &["foreign_keys"], + ), } } diff --git a/core/translate/pragma.rs b/core/translate/pragma.rs index 19542adad..f08bd5a15 100644 --- a/core/translate/pragma.rs +++ b/core/translate/pragma.rs @@ -4,7 +4,7 @@ use chrono::Datelike; use std::sync::Arc; use turso_macros::match_ignore_ascii_case; -use turso_parser::ast::{self, ColumnDefinition, Expr, Literal}; +use turso_parser::ast::{self, ColumnDefinition, Expr, Literal, Name}; use turso_parser::ast::{PragmaName, QualifiedName}; use super::integrity_check::translate_integrity_check; @@ -387,6 +387,21 @@ fn update_pragma( connection.set_mvcc_checkpoint_threshold(threshold)?; Ok((program, TransactionMode::None)) } + PragmaName::ForeignKeys => { + let enabled = match &value { + Expr::Literal(Literal::Keyword(name)) | Expr::Id(name) => { + let name_bytes = name.as_bytes(); + match_ignore_ascii_case!(match name_bytes { + b"ON" | b"TRUE" | b"YES" | b"1" => true, + _ => false, + }) + } + Expr::Literal(Literal::Numeric(n)) => !matches!(n.as_str(), "0"), + _ => false, + }; + connection.set_foreign_keys(enabled); + Ok((program, TransactionMode::None)) + } } } @@ -704,6 +719,14 @@ fn query_pragma( program.add_pragma_result_column(pragma.to_string()); Ok((program, TransactionMode::None)) } + PragmaName::ForeignKeys => { + let enabled = connection.foreign_keys_enabled(); + let register = program.alloc_register(); + program.emit_int(enabled as i64, register); + program.emit_result_row(register, 1); + program.add_pragma_result_column(pragma.to_string()); + Ok((program, TransactionMode::None)) + } } } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 9a03ca9bc..4c93ded2b 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -8276,6 +8276,65 @@ fn handle_text_sum(acc: &mut Value, sum_state: &mut SumAggState, parsed_number: } } +pub fn op_fk_counter( + program: &Program, + state: &mut ProgramState, + insn: &Insn, + pager: &Arc, + mv_store: Option<&Arc>, +) -> Result { + load_insn!( + FkCounter { + increment_value, + check_abort, + }, + insn + ); + state.fk_constraint_counter = state.fk_constraint_counter.saturating_add(*increment_value); + + // If check_abort is true and counter is negative, abort with constraint error + // This shouldn't happen in well-formed bytecode but acts as a safety check + if *check_abort && state.fk_constraint_counter < 0 { + return Err(LimboError::Constraint( + "FOREIGN KEY constraint failed".into(), + )); + } + + state.pc += 1; + Ok(InsnFunctionStepResult::Step) +} + +pub fn op_fk_if_zero( + program: &Program, + state: &mut ProgramState, + insn: &Insn, + _pager: &Arc, + _mv_store: Option<&Arc>, +) -> Result { + load_insn!(FkIfZero { target_pc, if_zero }, insn); + let fk_enabled = program.connection.foreign_keys_enabled(); + + // Jump if any: + // Foreign keys are disabled globally + // p1 is true AND deferred constraint counter is zero + // p1 is false AND deferred constraint counter is non-zero + let should_jump = if !fk_enabled { + true + } else if *if_zero { + state.fk_constraint_counter == 0 + } else { + state.fk_constraint_counter != 0 + }; + + if should_jump { + state.pc = target_pc.as_offset_int(); + } else { + state.pc += 1; + } + + Ok(InsnFunctionStepResult::Step) +} + mod cmath { extern "C" { pub fn exp(x: f64) -> f64; diff --git a/core/vdbe/explain.rs b/core/vdbe/explain.rs index 5e8dde2fe..15485bab7 100644 --- a/core/vdbe/explain.rs +++ b/core/vdbe/explain.rs @@ -1804,7 +1804,25 @@ pub fn insn_to_row( 0, String::new(), ), - } + Insn::FkCounter{check_abort, increment_value} => ( + "FkCounter", + *check_abort as i32, + *increment_value as i32, + 0, + Value::build_text(""), + 0, + String::new(), + ), + Insn::FkIfZero{target_pc, if_zero } => ( + "FkIfZero", + target_pc.as_debug_int(), + *if_zero as i32, + 0, + Value::build_text(""), + 0, + String::new(), + ), + } } pub fn insn_to_row_with_comment( diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index 67e1b784d..06e392902 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -1169,6 +1169,20 @@ pub enum Insn { p2: Option, // P2: address of parent explain instruction detail: String, // P4: detail text }, + // Increment a "constraint counter" by P2 (P2 may be negative or positive). + // If P1 is non-zero, the database constraint counter is incremented (deferred foreign key constraints). + // Otherwise, if P1 is zero, the statement counter is incremented (immediate foreign key constraints). + FkCounter { + check_abort: bool, + increment_value: isize, + }, + // This opcode tests if a foreign key constraint-counter is currently zero. If so, jump to instruction P2. Otherwise, fall through to the next instruction. + // If P1 is non-zero, then the jump is taken if the database constraint-counter is zero (the one that counts deferred constraint violations). + // If P1 is zero, the jump is taken if the statement constraint-counter is zero (immediate foreign key constraint violations). + FkIfZero { + if_zero: bool, + target_pc: BranchOffset, + }, } const fn get_insn_virtual_table() -> [InsnFunction; InsnVariants::COUNT] { @@ -1335,6 +1349,8 @@ impl InsnVariants { InsnVariants::MemMax => execute::op_mem_max, InsnVariants::Sequence => execute::op_sequence, InsnVariants::SequenceTest => execute::op_sequence_test, + InsnVariants::FkCounter => execute::op_fk_counter, + InsnVariants::FkIfZero => execute::op_fk_if_zero, } } } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 16695bd0f..4c558a2cc 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -313,6 +313,7 @@ pub struct ProgramState { /// This is used when statement in auto-commit mode reseted after previous uncomplete execution - in which case we may need to rollback transaction started on previous attempt /// Note, that MVCC transactions are always explicit - so they do not update auto_txn_cleanup marker pub(crate) auto_txn_cleanup: TxnCleanup, + fk_constraint_counter: isize, } impl ProgramState { @@ -359,6 +360,7 @@ impl ProgramState { op_checkpoint_state: OpCheckpointState::StartCheckpoint, view_delta_state: ViewDeltaCommitState::NotStarted, auto_txn_cleanup: TxnCleanup::None, + fk_constraint_counter: 0, } } From c2b70261311e11f8fa5a61ec8948e501af53ef85 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 27 Sep 2025 20:49:02 -0400 Subject: [PATCH 066/428] Add FOREIGN_KEY constraint error --- core/error.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/core/error.rs b/core/error.rs index 76bac45f0..dd76fddfc 100644 --- a/core/error.rs +++ b/core/error.rs @@ -163,6 +163,7 @@ impl From for LimboError { pub const SQLITE_CONSTRAINT: usize = 19; pub const SQLITE_CONSTRAINT_PRIMARYKEY: usize = SQLITE_CONSTRAINT | (6 << 8); +pub const SQLITE_CONSTRAINT_FOREIGNKEY: usize = SQLITE_CONSTRAINT | (7 << 8); pub const SQLITE_CONSTRAINT_NOTNULL: usize = SQLITE_CONSTRAINT | (5 << 8); pub const SQLITE_FULL: usize = 13; // we want this in autoincrement - incase if user inserts max allowed int pub const SQLITE_CONSTRAINT_UNIQUE: usize = 2067; From 223b060a6ad6a5873355c4562067dff162641569 Mon Sep 17 00:00:00 2001 From: Henrik Ingo Date: Tue, 7 Oct 2025 23:26:19 +0300 Subject: [PATCH 067/428] Increase instance size to avoid OOM --- .github/workflows/perf_nightly.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/perf_nightly.yml b/.github/workflows/perf_nightly.yml index 2579d4521..2f6d41cbb 100644 --- a/.github/workflows/perf_nightly.yml +++ b/.github/workflows/perf_nightly.yml @@ -17,7 +17,7 @@ env: jobs: bench: - runs-on: nyrkio_perf_server_2cpu_ubuntu2404 + runs-on: nyrkio_perf_server_4cpu_ubuntu2404 timeout-minutes: 30 steps: - uses: actions/checkout@v3 @@ -61,7 +61,7 @@ jobs: nyrkio-settings-threshold: 0% clickbench: - runs-on: nyrkio_perf_server_2cpu_ubuntu2404 + runs-on: nyrkio_perf_server_4cpu_ubuntu2404 timeout-minutes: 30 steps: - uses: actions/checkout@v3 @@ -109,7 +109,7 @@ jobs: nyrkio-public: true tpc-h-criterion: - runs-on: nyrkio_perf_server_2cpu_ubuntu2404 + runs-on: nyrkio_perf_server_4cpu_ubuntu2404 timeout-minutes: 60 env: DB_FILE: "perf/tpc-h/TPC-H.db" From 346e6fedfa69c7c1d8b4fdbcd566d7f49bf6b162 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 27 Sep 2025 20:49:26 -0400 Subject: [PATCH 068/428] Create ForeignKey, ResolvedFkRef types and FK resolution --- core/lib.rs | 22 +- core/schema.rs | 529 +++++++++++++++++++++++++++++++++++++- core/translate/insert.rs | 256 +++++++++++++++++- core/translate/planner.rs | 1 + core/translate/pragma.rs | 11 +- core/translate/schema.rs | 1 + core/translate/update.rs | 1 + core/translate/view.rs | 1 + core/translate/window.rs | 1 + core/vdbe/execute.rs | 44 +++- core/vdbe/explain.rs | 4 +- core/vdbe/insn.rs | 1 + core/vdbe/mod.rs | 4 +- 13 files changed, 836 insertions(+), 40 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 8145af6e7..ee55c34ca 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -63,17 +63,16 @@ pub use io::{ }; use parking_lot::RwLock; use schema::Schema; -use std::cell::Cell; use std::{ borrow::Cow, - cell::RefCell, + cell::{Cell, RefCell}, collections::HashMap, fmt::{self, Display}, num::NonZero, ops::Deref, rc::Rc, sync::{ - atomic::{AtomicBool, AtomicI32, AtomicI64, AtomicU16, AtomicUsize, Ordering}, + atomic::{AtomicBool, AtomicI32, AtomicI64, AtomicIsize, AtomicU16, AtomicUsize, Ordering}, Arc, LazyLock, Mutex, Weak, }, time::Duration, @@ -583,6 +582,7 @@ impl Database { data_sync_retry: AtomicBool::new(false), busy_timeout: RwLock::new(Duration::new(0, 0)), is_mvcc_bootstrap_connection: AtomicBool::new(is_mvcc_bootstrap_connection), + fk_pragma: AtomicBool::new(false), }); self.n_connections .fetch_add(1, std::sync::atomic::Ordering::SeqCst); @@ -1100,6 +1100,7 @@ pub struct Connection { busy_timeout: RwLock, /// Whether this is an internal connection used for MVCC bootstrap is_mvcc_bootstrap_connection: AtomicBool, + fk_pragma: AtomicBool, } impl Drop for Connection { @@ -1532,6 +1533,21 @@ impl Connection { Ok(db) } + pub fn set_foreign_keys_enabled(&self, enable: bool) { + self.fk_pragma.store(enable, Ordering::Release); + } + pub fn foreign_keys_enabled(&self) -> bool { + self.fk_pragma.load(Ordering::Acquire) + } + + pub(crate) fn clear_deferred_foreign_key_violations(&self) -> isize { + self.fk_deferred_violations.swap(0, Ordering::Release) + } + + pub(crate) fn get_deferred_foreign_key_violations(&self) -> isize { + self.fk_deferred_violations.load(Ordering::Acquire) + } + pub fn maybe_update_schema(&self) { let current_schema_version = self.schema.read().schema_version; let schema = self.db.schema.lock().unwrap(); diff --git a/core/schema.rs b/core/schema.rs index 106dc30c5..4ef57684b 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -89,7 +89,9 @@ use std::ops::Deref; use std::sync::Arc; use std::sync::Mutex; use tracing::trace; -use turso_parser::ast::{self, ColumnDefinition, Expr, Literal, SortOrder, TableOptions}; +use turso_parser::ast::{ + self, ColumnDefinition, Expr, InitDeferredPred, Literal, RefAct, SortOrder, TableOptions, +}; use turso_parser::{ ast::{Cmd, CreateTableBody, ResultColumn, Stmt}, parser::Parser, @@ -298,9 +300,18 @@ impl Schema { self.views.get(&name).cloned() } - pub fn add_btree_table(&mut self, table: Arc) { + pub fn add_btree_table(&mut self, mut table: Arc) -> Result<()> { let name = normalize_ident(&table.name); + let mut resolved_fks: Vec> = Vec::with_capacity(table.foreign_keys.len()); + // when we built the BTreeTable from SQL, we didn't have access to the Schema to validate + // any FK relationships, so we do that now + self.validate_and_normalize_btree_foreign_keys(&table, &mut resolved_fks)?; + + // there should only be 1 reference to the table so Arc::make_mut shouldnt copy + let t = Arc::make_mut(&mut table); + t.foreign_keys = resolved_fks; self.tables.insert(name, Table::BTree(table).into()); + Ok(()) } pub fn add_virtual_table(&mut self, table: Arc) { @@ -393,6 +404,31 @@ impl Schema { self.indexes_enabled } + pub fn get_foreign_keys_for_table(&self, table_name: &str) -> Vec> { + self.get_table(table_name) + .and_then(|t| t.btree()) + .map(|t| t.foreign_keys.clone()) + .unwrap_or_default() + } + + /// Get foreign keys where this table is the parent (referenced by other tables) + pub fn get_referencing_foreign_keys( + &self, + parent_table: &str, + ) -> Vec<(String, Arc)> { + let mut refs = Vec::new(); + for table in self.tables.values() { + if let Table::BTree(btree) = table.deref() { + for fk in &btree.foreign_keys { + if fk.parent_table == parent_table { + refs.push((btree.name.as_str().to_string(), fk.clone())); + } + } + } + } + refs + } + /// Update [Schema] by scanning the first root page (sqlite_schema) pub fn make_from_btree( &mut self, @@ -646,6 +682,7 @@ impl Schema { has_rowid: true, is_strict: false, has_autoincrement: false, + foreign_keys: vec![], unique_sets: vec![], }))); @@ -732,7 +769,10 @@ impl Schema { } } - self.add_btree_table(Arc::new(table)); + if let Some(mv_store) = mv_store { + mv_store.mark_table_as_loaded(root_page); + } + self.add_btree_table(Arc::new(table))?; } } "index" => { @@ -842,6 +882,264 @@ impl Schema { Ok(()) } + + fn validate_and_normalize_btree_foreign_keys( + &self, + table: &Arc, + resolved_fks: &mut Vec>, + ) -> Result<()> { + for key in &table.foreign_keys { + let Some(parent) = self.get_btree_table(&key.parent_table) else { + return Err(LimboError::ParseError(format!( + "Foreign key references missing table {}", + key.parent_table + ))); + }; + + let child_cols: Vec = key + .child_columns + .iter() + .map(|c| normalize_ident(c)) + .collect(); + for c in &child_cols { + if table.get_column(c).is_none() && !c.eq_ignore_ascii_case("rowid") { + return Err(LimboError::ParseError(format!( + "Foreign key child column not found: {}.{}", + table.name, c + ))); + } + } + + // Resolve parent cols: + // if explicitly listed, we normalize them + // else, we default to parent's PRIMARY KEY columns. + // if parent has no declared PK, SQLite defaults to single "rowid" + let parent_cols: Vec = if key.parent_columns.is_empty() { + if !parent.primary_key_columns.is_empty() { + parent + .primary_key_columns + .iter() + .map(|(n, _)| normalize_ident(n)) + .collect() + } else { + vec!["rowid".to_string()] + } + } else { + key.parent_columns + .iter() + .map(|c| normalize_ident(c)) + .collect() + }; + + if parent_cols.len() != child_cols.len() { + return Err(LimboError::ParseError(format!( + "Foreign key column count mismatch: child {child_cols:?} vs parent {parent_cols:?}", + ))); + } + + // Ensure each parent col exists + for col in &parent_cols { + if !col.eq_ignore_ascii_case("rowid") && parent.get_column(col).is_none() { + return Err(LimboError::ParseError(format!( + "Foreign key references missing column {}.{col}", + key.parent_table + ))); + } + } + + // Parent side must be UNIQUE/PK, rowid counts as unique + let parent_is_pk = !parent.primary_key_columns.is_empty() + && parent_cols.len() == parent.primary_key_columns.len() + && parent_cols + .iter() + .zip(&parent.primary_key_columns) + .all(|(a, (b, _))| a.eq_ignore_ascii_case(b)); + + let parent_is_rowid = + parent_cols.len() == 1 && parent_cols[0].eq_ignore_ascii_case("rowid"); + + let parent_is_unique = parent_is_pk + || parent_is_rowid + || self.get_indices(&parent.name).any(|idx| { + idx.unique + && idx.columns.len() == parent_cols.len() + && idx + .columns + .iter() + .zip(&parent_cols) + .all(|(ic, pc)| ic.name.eq_ignore_ascii_case(pc)) + }); + + if !parent_is_unique { + return Err(LimboError::ParseError(format!( + "Foreign key references {}({:?}) which is not UNIQUE or PRIMARY KEY", + key.parent_table, parent_cols + ))); + } + + let resolved = ForeignKey { + parent_table: normalize_ident(&key.parent_table), + parent_columns: parent_cols, + child_columns: child_cols, + on_delete: key.on_delete, + on_update: key.on_update, + on_insert: key.on_insert, + deferred: key.deferred, + }; + resolved_fks.push(Arc::new(resolved)); + } + Ok(()) + } + + pub fn incoming_fks_to(&self, table_name: &str) -> Vec { + let target = normalize_ident(table_name); + let mut out = vec![]; + + // Resolve the parent table once + let parent_tbl = self + .get_btree_table(&target) + .expect("incoming_fks_to: parent table must exist"); + + // Precompute helper to find parent unique index, if it's not the rowid + let find_parent_unique = |cols: &Vec| -> Option> { + // If matches PK exactly, we don't need a secondary index probe + let matches_pk = !parent_tbl.primary_key_columns.is_empty() + && parent_tbl.primary_key_columns.len() == cols.len() + && parent_tbl + .primary_key_columns + .iter() + .zip(cols.iter()) + .all(|((n, _ord), c)| n.eq_ignore_ascii_case(c)); + + if matches_pk { + return None; + } + + self.get_indices(&parent_tbl.name) + .find(|idx| { + idx.unique + && idx.columns.len() == cols.len() + && idx + .columns + .iter() + .zip(cols.iter()) + .all(|(ic, pc)| ic.name.eq_ignore_ascii_case(pc)) + }) + .cloned() + }; + + for t in self.tables.values() { + let Some(child) = t.btree() else { + continue; + }; + + for fk in &child.foreign_keys { + if normalize_ident(&fk.parent_table) != target { + continue; + } + + // Resolve + normalize columns + let child_cols: Vec = fk + .child_columns + .iter() + .map(|c| normalize_ident(c)) + .collect(); + + // If no explicit parent columns were given, they were validated in add_btree_table() + // to match the parent's PK. We resolve them the same way here. + let parent_cols: Vec = if fk.parent_columns.is_empty() { + parent_tbl + .primary_key_columns + .iter() + .map(|(n, _)| normalize_ident(n)) + .collect() + } else { + fk.parent_columns + .iter() + .map(|c| normalize_ident(c)) + .collect() + }; + + // Child positions + let child_pos: Vec = child_cols + .iter() + .map(|cname| { + child.get_column(cname).map(|(i, _)| i).unwrap_or_else(|| { + panic!( + "incoming_fks_to: child col {}.{} missing", + child.name, cname + ) + }) + }) + .collect(); + + let parent_pos: Vec = parent_cols + .iter() + .map(|cname| { + // Allow "rowid" sentinel; return 0 but it won't be used when parent_uses_rowid == true + parent_tbl + .get_column(cname) + .map(|(i, _)| i) + .or_else(|| { + if cname.eq_ignore_ascii_case("rowid") { + Some(0) + } else { + None + } + }) + .unwrap_or_else(|| { + panic!( + "incoming_fks_to: parent col {}.{cname} missing", + parent_tbl.name + ) + }) + }) + .collect(); + + // Detect parent rowid usage (single-column and rowid/alias) + let parent_uses_rowid = parent_cols.len() == 1 && { + let c = parent_cols[0].as_str(); + c.eq_ignore_ascii_case("rowid") + || parent_tbl.columns.iter().any(|col| { + col.is_rowid_alias + && col + .name + .as_deref() + .is_some_and(|n| n.eq_ignore_ascii_case(c)) + }) + }; + + let parent_unique_index = if parent_uses_rowid { + None + } else { + find_parent_unique(&parent_cols) + }; + + out.push(IncomingFkRef { + child_table: Arc::clone(&child), + fk: Arc::clone(fk), + parent_cols, + child_cols, + child_pos, + parent_pos, + parent_uses_rowid, + parent_unique_index, + }); + } + } + out + } + + pub fn any_incoming_fk_to(&self, table_name: &str) -> bool { + self.tables.values().any(|t| { + let Some(bt) = t.btree() else { + return false; + }; + bt.foreign_keys + .iter() + .any(|fk| fk.parent_table == table_name) + }) + } } impl Clone for Schema { @@ -1016,6 +1314,7 @@ pub struct BTreeTable { pub is_strict: bool, pub has_autoincrement: bool, pub unique_sets: Vec, + pub foreign_keys: Vec>, } impl BTreeTable { @@ -1146,6 +1445,7 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R let mut has_rowid = true; let mut has_autoincrement = false; let mut primary_key_columns = vec![]; + let mut foreign_keys = vec![]; let mut cols = vec![]; let is_strict: bool; let mut unique_sets: Vec = vec![]; @@ -1219,6 +1519,85 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R is_primary_key: false, }; unique_sets.push(unique_set); + } else if let ast::TableConstraint::ForeignKey { + columns, + clause, + defer_clause, + } = &c.constraint + { + let child_columns: Vec = columns + .iter() + .map(|ic| normalize_ident(ic.col_name.as_str())) + .collect(); + + // derive parent columns: explicit or default to parent PK + let parent_table = normalize_ident(clause.tbl_name.as_str()); + let parent_columns: Vec = clause + .columns + .iter() + .map(|ic| normalize_ident(ic.col_name.as_str())) + .collect(); + + // arity check + if child_columns.len() != parent_columns.len() { + crate::bail_parse_error!( + "foreign key on \"{}\" has {} child column(s) but {} parent column(s)", + tbl_name, + child_columns.len(), + parent_columns.len() + ); + } + // deferrable semantics + let deferred = match defer_clause { + Some(d) => { + d.deferrable + && matches!( + d.init_deferred, + Some(InitDeferredPred::InitiallyDeferred) + ) + } + None => false, // NOT DEFERRABLE INITIALLY IMMEDIATE by default + }; + let fk = ForeignKey { + parent_table, + parent_columns, + child_columns, + on_delete: clause + .args + .iter() + .find_map(|a| { + if let ast::RefArg::OnDelete(x) = a { + Some(*x) + } else { + None + } + }) + .unwrap_or(RefAct::NoAction), + on_insert: clause + .args + .iter() + .find_map(|a| { + if let ast::RefArg::OnInsert(x) = a { + Some(*x) + } else { + None + } + }) + .unwrap_or(RefAct::NoAction), + on_update: clause + .args + .iter() + .find_map(|a| { + if let ast::RefArg::OnUpdate(x) = a { + Some(*x) + } else { + None + } + }) + .unwrap_or(RefAct::NoAction), + deferred, + }; + foreign_keys.push(Arc::new(fk)); } } for ast::ColumnDefinition { @@ -1259,7 +1638,7 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R let mut unique = false; let mut collation = None; for c_def in constraints { - match c_def.constraint { + match &c_def.constraint { ast::ColumnConstraint::PrimaryKey { order: o, auto_increment, @@ -1272,11 +1651,11 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R ); } primary_key = true; - if auto_increment { + if *auto_increment { has_autoincrement = true; } if let Some(o) = o { - order = o; + order = *o; } unique_sets.push(UniqueSet { columns: vec![(name.clone(), order)], @@ -1305,6 +1684,55 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R ast::ColumnConstraint::Collate { ref collation_name } => { collation = Some(CollationSeq::new(collation_name.as_str())?); } + ast::ColumnConstraint::ForeignKey { + clause, + defer_clause, + } => { + let fk = ForeignKey { + parent_table: clause.tbl_name.to_string(), + parent_columns: clause + .columns + .iter() + .map(|c| c.col_name.as_str().to_string()) + .collect(), + on_delete: clause + .args + .iter() + .find_map(|arg| { + if let ast::RefArg::OnDelete(act) = arg { + Some(*act) + } else { + None + } + }) + .unwrap_or(RefAct::NoAction), + on_insert: clause + .args + .iter() + .find_map(|arg| { + if let ast::RefArg::OnInsert(act) = arg { + Some(*act) + } else { + None + } + }) + .unwrap_or(RefAct::NoAction), + on_update: clause + .args + .iter() + .find_map(|arg| { + if let ast::RefArg::OnUpdate(act) = arg { + Some(*act) + } else { + None + } + }) + .unwrap_or(RefAct::NoAction), + child_columns: vec![name.clone()], + deferred: defer_clause.is_some(), + }; + foreign_keys.push(Arc::new(fk)); + } _ => {} } } @@ -1384,6 +1812,7 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R has_autoincrement, columns: cols, is_strict, + foreign_keys, unique_sets: { // If there are any unique sets that have identical column names in the same order (even if they are PRIMARY KEY and UNIQUE and have different sort orders), remove the duplicates. // Examples: @@ -1441,6 +1870,93 @@ pub fn _build_pseudo_table(columns: &[ResultColumn]) -> PseudoCursorType { table } +#[derive(Debug, Clone)] +pub struct ForeignKey { + /// Columns in this table + pub child_columns: Vec, + /// Referenced table + pub parent_table: String, + /// Referenced columns + pub parent_columns: Vec, + pub on_delete: RefAct, + pub on_update: RefAct, + pub on_insert: RefAct, + /// DEFERRABLE INITIALLY DEFERRED + pub deferred: bool, +} + +/// A single foreign key where `parent_table == target`. +#[derive(Clone, Debug)] +pub struct IncomingFkRef { + /// Child table that owns the FK. + pub child_table: Arc, + /// The FK as declared on the child table. + pub fk: Arc, + + /// Resolved, normalized column names. + pub parent_cols: Vec, + pub child_cols: Vec, + + /// Column positions in the child/parent tables (pos_in_table) + pub child_pos: Vec, + pub parent_pos: Vec, + + /// If the parent key is rowid or a rowid-alias (single-column only) + pub parent_uses_rowid: bool, + /// For non-rowid parents: the UNIQUE index that enforces the parent key. + /// (None when `parent_uses_rowid == true`.) + pub parent_unique_index: Option>, +} + +impl IncomingFkRef { + /// Returns if any referenced parent column can change when these column positions are updated. + pub fn parent_key_may_change( + &self, + updated_parent_positions: &HashSet, + parent_tbl: &BTreeTable, + ) -> bool { + if self.parent_uses_rowid { + // parent rowid changes if the parent's rowid or alias is updated + if let Some((idx, _)) = parent_tbl + .columns + .iter() + .enumerate() + .find(|(_, c)| c.is_rowid_alias) + { + return updated_parent_positions.contains(&idx); + } + // Without a rowid alias, a direct rowid update is represented separately with ROWID_SENTINEL + return true; + } + self.parent_pos + .iter() + .any(|p| updated_parent_positions.contains(p)) + } + + /// Returns if any child column of this FK is in `updated_child_positions` + pub fn child_key_changed( + &self, + updated_child_positions: &HashSet, + child_tbl: &BTreeTable, + ) -> bool { + if self + .child_pos + .iter() + .any(|p| updated_child_positions.contains(p)) + { + return true; + } + // special case: if FK uses a rowid alias on child, and rowid changed + if self.child_cols.len() == 1 { + let (i, col) = child_tbl.get_column(&self.child_cols[0]).unwrap(); + if col.is_rowid_alias && updated_child_positions.contains(&i) { + return true; + } + } + false + } +} + #[derive(Debug, Clone)] pub struct Column { pub name: Option, @@ -1782,6 +2298,7 @@ pub fn sqlite_schema_table() -> BTreeTable { hidden: false, }, ], + foreign_keys: vec![], unique_sets: vec![], } } diff --git a/core/translate/insert.rs b/core/translate/insert.rs index e46a5607d..14511bb0f 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -83,6 +83,11 @@ pub fn translate_insert( ); } let table_name = &tbl_name.name; + let has_child_fks = connection.foreign_keys_enabled() + && !resolver + .schema + .get_foreign_keys_for_table(table_name.as_str()) + .is_empty(); // Check if this is a system table that should be protected from direct writes if crate::schema::is_system_table(table_name.as_str()) { @@ -222,6 +227,8 @@ pub fn translate_insert( let halt_label = program.allocate_label(); let loop_start_label = program.allocate_label(); let row_done_label = program.allocate_label(); + let stmt_epilogue = program.allocate_label(); + let mut select_exhausted_label: Option = None; let cdc_table = prepare_cdc_if_necessary(&mut program, resolver.schema, table.get_name())?; @@ -234,6 +241,14 @@ pub fn translate_insert( connection, )?; + if has_child_fks { + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: true, + }); + } + let mut yield_reg_opt = None; let mut temp_table_ctx = None; let (num_values, cursor_id) = match body { @@ -254,11 +269,11 @@ pub fn translate_insert( jump_on_definition: jump_on_definition_label, start_offset: start_offset_label, }); - program.preassign_label_to_next_insn(start_offset_label); let query_destination = QueryDestination::CoroutineYield { yield_reg, + // keep implementation_start as halt_label (producer internals) coroutine_implementation_start: halt_label, }; program.incr_nesting(); @@ -298,18 +313,14 @@ pub fn translate_insert( }); // Main loop - // FIXME: rollback is not implemented. E.g. if you insert 2 rows and one fails to unique constraint violation, - // the other row will still be inserted. program.preassign_label_to_next_insn(loop_start_label); - let yield_label = program.allocate_label(); - program.emit_insn(Insn::Yield { yield_reg, - end_offset: yield_label, + end_offset: yield_label, // stays local, we’ll route at loop end }); - let record_reg = program.alloc_register(); + let record_reg = program.alloc_register(); let affinity_str = if columns.is_empty() { btree_table .columns @@ -352,7 +363,6 @@ pub fn translate_insert( rowid_reg, prev_largest_reg: 0, }); - program.emit_insn(Insn::Insert { cursor: temp_cursor_id, key_reg: rowid_reg, @@ -361,12 +371,10 @@ pub fn translate_insert( flag: InsertFlags::new().require_seek(), table_name: "".to_string(), }); - // loop back program.emit_insn(Insn::Goto { target_pc: loop_start_label, }); - program.preassign_label_to_next_insn(yield_label); program.emit_insn(Insn::OpenWrite { @@ -381,13 +389,14 @@ pub fn translate_insert( db: 0, }); - // Main loop - // FIXME: rollback is not implemented. E.g. if you insert 2 rows and one fails to unique constraint violation, - // the other row will still be inserted. program.preassign_label_to_next_insn(loop_start_label); + + // on EOF, jump to select_exhausted to check FK constraints + let select_exhausted = program.allocate_label(); + select_exhausted_label = Some(select_exhausted); program.emit_insn(Insn::Yield { yield_reg, - end_offset: halt_label, + end_offset: select_exhausted, }); } @@ -1033,6 +1042,9 @@ pub fn translate_insert( } } } + if has_child_fks { + emit_fk_checks_for_insert(&mut program, resolver, &insertion, table_name.as_str())?; + } program.emit_insn(Insn::Insert { cursor: cursor_id, @@ -1154,15 +1166,38 @@ pub fn translate_insert( program.emit_insn(Insn::Close { cursor_id: temp_table_ctx.cursor_id, }); + program.emit_insn(Insn::Goto { + target_pc: stmt_epilogue, + }); } else { // For multiple rows which not require a temp table, loop back program.resolve_label(row_done_label, program.offset()); program.emit_insn(Insn::Goto { target_pc: loop_start_label, }); + if let Some(sel_eof) = select_exhausted_label { + program.preassign_label_to_next_insn(sel_eof); + program.emit_insn(Insn::Goto { + target_pc: stmt_epilogue, + }); + } } } else { program.resolve_label(row_done_label, program.offset()); + // single-row falls through to epilogue + program.emit_insn(Insn::Goto { + target_pc: stmt_epilogue, + }); + } + + program.preassign_label_to_next_insn(stmt_epilogue); + if has_child_fks { + // close FK scope and surface deferred violations + program.emit_insn(Insn::FkCounter { + increment_value: -1, + check_abort: true, + is_scope: true, + }); } program.resolve_label(halt_label, program.offset()); @@ -1857,3 +1892,196 @@ fn emit_update_sqlite_sequence( Ok(()) } + +/// Emit child->parent foreign key checks for an INSERT, for the current row +fn emit_fk_checks_for_insert( + program: &mut ProgramBuilder, + resolver: &Resolver, + insertion: &Insertion, + table_name: &str, +) -> Result<()> { + let after_all = program.allocate_label(); + program.emit_insn(Insn::FkIfZero { + target_pc: after_all, + if_zero: true, + }); + + // Iterate child FKs declared on this table + for fk in resolver.schema.get_foreign_keys_for_table(table_name) { + let fk_ok = program.allocate_label(); + + // If any child column is NULL, skip this FK + for child_col in &fk.child_columns { + let mapping = insertion + .get_col_mapping_by_name(child_col) + .ok_or_else(|| { + crate::LimboError::InternalError(format!("FK column {child_col} not found")) + })?; + let src = if mapping.column.is_rowid_alias { + insertion.key_register() + } else { + mapping.register + }; + program.emit_insn(Insn::IsNull { + reg: src, + target_pc: fk_ok, + }); + } + + // Parent lookup: rowid path or unique-index path + let parent_tbl = resolver.schema.get_table(&fk.parent_table).ok_or_else(|| { + crate::LimboError::InternalError(format!("Parent table {} not found", fk.parent_table)) + })?; + + let uses_rowid = { + // If single parent column equals rowid or aliases rowid + fk.parent_columns.len() == 1 && { + let parent_col = fk.parent_columns[0].as_str(); + parent_col.eq_ignore_ascii_case("rowid") + || parent_tbl.columns().iter().any(|c| { + c.is_rowid_alias + && c.name + .as_ref() + .is_some_and(|n| n.eq_ignore_ascii_case(parent_col)) + }) + } + }; + + if uses_rowid { + // Simple rowid probe on parent table + let parent_bt = parent_tbl.btree().ok_or_else(|| { + crate::LimboError::InternalError("Parent table is not a BTree".into()) + })?; + let pcur = program.alloc_cursor_id(CursorType::BTreeTable(parent_bt.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: pcur, + root_page: parent_bt.root_page, + db: 0, + }); + + // Child value register + let cm = insertion + .get_col_mapping_by_name(&fk.child_columns[0]) + .ok_or_else(|| { + crate::LimboError::InternalError("FK child column not found".into()) + })?; + let val_reg = if cm.column.is_rowid_alias { + insertion.key_register() + } else { + cm.register + }; + + let violation = program.allocate_label(); + // NotExists: jump to violation if missing in parent + program.emit_insn(Insn::NotExists { + cursor: pcur, + rowid_reg: val_reg, + target_pc: violation, + }); + // OK + program.emit_insn(Insn::Close { cursor_id: pcur }); + program.emit_insn(Insn::Goto { target_pc: fk_ok }); + + // Violation + program.preassign_label_to_next_insn(violation); + program.emit_insn(Insn::Close { cursor_id: pcur }); + + // Deferred vs immediate + if fk.deferred { + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: false, + }); + } else { + program.emit_insn(Insn::Halt { + err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, + description: "FOREIGN KEY constraint failed".to_string(), + }); + } + } else { + // Multi-column (or non-rowid) parent, we have to match a UNIQUE index with + // the exact column set and order + let parent_idx = resolver + .schema + .get_indices(&fk.parent_table) + .find(|idx| { + idx.unique + && idx.columns.len() == fk.parent_columns.len() + && idx + .columns + .iter() + .zip(fk.parent_columns.iter()) + .all(|(ic, pc)| ic.name.eq_ignore_ascii_case(pc)) + }) + .ok_or_else(|| { + crate::LimboError::InternalError(format!( + "No UNIQUE index on parent {}({:?}) for FK", + fk.parent_table, fk.parent_columns + )) + })?; + + let icur = program.alloc_cursor_id(CursorType::BTreeIndex(parent_idx.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: icur, + root_page: parent_idx.root_page, + db: 0, + }); + + // Build packed search key registers from the *child* values + let n = fk.child_columns.len(); + let start = program.alloc_registers(n); + for (i, child_col) in fk.child_columns.iter().enumerate() { + let cm = insertion + .get_col_mapping_by_name(child_col) + .ok_or_else(|| { + crate::LimboError::InternalError(format!("Column {child_col} not found")) + })?; + let src = if cm.column.is_rowid_alias { + insertion.key_register() + } else { + cm.register + }; + program.emit_insn(Insn::Copy { + src_reg: src, + dst_reg: start + i, + extra_amount: 0, + }); + } + + let found = program.allocate_label(); + program.emit_insn(Insn::Found { + cursor_id: icur, + target_pc: found, + record_reg: start, + num_regs: n, + }); + + // Violation path + program.emit_insn(Insn::Close { cursor_id: icur }); + if fk.deferred { + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: false, + }); + } else { + program.emit_insn(Insn::Halt { + err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, + description: "FOREIGN KEY constraint failed".to_string(), + }); + } + program.emit_insn(Insn::Goto { target_pc: fk_ok }); + + // Found OK + program.preassign_label_to_next_insn(found); + program.emit_insn(Insn::Close { cursor_id: icur }); + } + + // Done with this FK + program.preassign_label_to_next_insn(fk_ok); + } + + program.resolve_label(after_all, program.offset()); + Ok(()) +} diff --git a/core/translate/planner.rs b/core/translate/planner.rs index 589b45f3f..ba74b47c0 100644 --- a/core/translate/planner.rs +++ b/core/translate/planner.rs @@ -478,6 +478,7 @@ fn parse_table( has_autoincrement: false, unique_sets: vec![], + foreign_keys: vec![], }); drop(view_guard); diff --git a/core/translate/pragma.rs b/core/translate/pragma.rs index f08bd5a15..57a658212 100644 --- a/core/translate/pragma.rs +++ b/core/translate/pragma.rs @@ -389,7 +389,14 @@ fn update_pragma( } PragmaName::ForeignKeys => { let enabled = match &value { - Expr::Literal(Literal::Keyword(name)) | Expr::Id(name) => { + Expr::Id(name) | Expr::Name(name) => { + let name_str = name.as_str().as_bytes(); + match_ignore_ascii_case!(match name_str { + b"ON" | b"TRUE" | b"YES" | b"1" => true, + _ => false, + }) + } + Expr::Literal(Literal::Keyword(name) | Literal::String(name)) => { let name_bytes = name.as_bytes(); match_ignore_ascii_case!(match name_bytes { b"ON" | b"TRUE" | b"YES" | b"1" => true, @@ -399,7 +406,7 @@ fn update_pragma( Expr::Literal(Literal::Numeric(n)) => !matches!(n.as_str(), "0"), _ => false, }; - connection.set_foreign_keys(enabled); + connection.set_foreign_keys_enabled(enabled); Ok((program, TransactionMode::None)) } } diff --git a/core/translate/schema.rs b/core/translate/schema.rs index ce85756ff..a78e1b630 100644 --- a/core/translate/schema.rs +++ b/core/translate/schema.rs @@ -812,6 +812,7 @@ pub fn translate_drop_table( }], is_strict: false, unique_sets: vec![], + foreign_keys: vec![], }); // cursor id 2 let ephemeral_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(simple_table_rc)); diff --git a/core/translate/update.rs b/core/translate/update.rs index f89ddedff..867b919ea 100644 --- a/core/translate/update.rs +++ b/core/translate/update.rs @@ -353,6 +353,7 @@ pub fn prepare_update_plan( }], is_strict: false, unique_sets: vec![], + foreign_keys: vec![], }); let temp_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(table.clone())); diff --git a/core/translate/view.rs b/core/translate/view.rs index 399664ab1..b9b5ddcc0 100644 --- a/core/translate/view.rs +++ b/core/translate/view.rs @@ -80,6 +80,7 @@ pub fn translate_create_materialized_view( has_autoincrement: false, unique_sets: vec![], + foreign_keys: vec![], }); // Allocate a cursor for writing to the view's btree during population diff --git a/core/translate/window.rs b/core/translate/window.rs index 91d783ff0..7ab80207d 100644 --- a/core/translate/window.rs +++ b/core/translate/window.rs @@ -505,6 +505,7 @@ pub fn init_window<'a>( is_strict: false, unique_sets: vec![], has_autoincrement: false, + foreign_keys: vec![], }); let cursor_buffer_read = program.alloc_cursor_id(CursorType::BTreeTable(buffer_table.clone())); let cursor_buffer_write = program.alloc_cursor_id(CursorType::BTreeTable(buffer_table.clone())); diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 4c93ded2b..40642b87d 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -1,5 +1,5 @@ #![allow(unused_variables)] -use crate::error::SQLITE_CONSTRAINT_UNIQUE; +use crate::error::{SQLITE_CONSTRAINT_FOREIGNKEY, SQLITE_CONSTRAINT_UNIQUE}; use crate::function::AlterTableFunc; use crate::mvcc::database::CheckpointStateMachine; use crate::numeric::{NullableInteger, Numeric}; @@ -2156,6 +2156,9 @@ pub fn halt( "UNIQUE constraint failed: {description} (19)" ))); } + SQLITE_CONSTRAINT_FOREIGNKEY => { + return Err(LimboError::Constraint(format!("{description} (19)"))); + } _ => { return Err(LimboError::Constraint(format!( "undocumented halt error code {description}" @@ -8287,17 +8290,34 @@ pub fn op_fk_counter( FkCounter { increment_value, check_abort, + is_scope, }, insn ); - state.fk_constraint_counter = state.fk_constraint_counter.saturating_add(*increment_value); + if *is_scope { + // Adjust FK scope depth + state.fk_scope_counter = state.fk_scope_counter.saturating_add(*increment_value); - // If check_abort is true and counter is negative, abort with constraint error - // This shouldn't happen in well-formed bytecode but acts as a safety check - if *check_abort && state.fk_constraint_counter < 0 { - return Err(LimboError::Constraint( - "FOREIGN KEY constraint failed".into(), - )); + // raise if there were deferred violations in this statement. + if *check_abort { + if state.fk_scope_counter < 0 { + return Err(LimboError::Constraint( + "FOREIGN KEY constraint failed".into(), + )); + } + if state.fk_scope_counter == 0 && state.fk_deferred_violations > 0 { + // Clear violations for safety, a new statement will re-open scope. + state.fk_deferred_violations = 0; + return Err(LimboError::Constraint( + "FOREIGN KEY constraint failed".into(), + )); + } + } + } else { + // Adjust deferred violations counter + state.fk_deferred_violations = state + .fk_deferred_violations + .saturating_add(*increment_value); } state.pc += 1; @@ -8317,13 +8337,15 @@ pub fn op_fk_if_zero( // Jump if any: // Foreign keys are disabled globally // p1 is true AND deferred constraint counter is zero - // p1 is false AND deferred constraint counter is non-zero + // p1 is false AND deferred constraint counter is non-zero + let scope_zero = state.fk_scope_counter == 0; + let should_jump = if !fk_enabled { true } else if *if_zero { - state.fk_constraint_counter == 0 + scope_zero } else { - state.fk_constraint_counter != 0 + !scope_zero }; if should_jump { diff --git a/core/vdbe/explain.rs b/core/vdbe/explain.rs index 15485bab7..3f99fe809 100644 --- a/core/vdbe/explain.rs +++ b/core/vdbe/explain.rs @@ -1804,11 +1804,11 @@ pub fn insn_to_row( 0, String::new(), ), - Insn::FkCounter{check_abort, increment_value} => ( + Insn::FkCounter{check_abort, increment_value, is_scope } => ( "FkCounter", *check_abort as i32, *increment_value as i32, - 0, + *is_scope as i32, Value::build_text(""), 0, String::new(), diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index 06e392902..917038b80 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -1175,6 +1175,7 @@ pub enum Insn { FkCounter { check_abort: bool, increment_value: isize, + is_scope: bool, }, // This opcode tests if a foreign key constraint-counter is currently zero. If so, jump to instruction P2. Otherwise, fall through to the next instruction. // If P1 is non-zero, then the jump is taken if the database constraint-counter is zero (the one that counts deferred constraint violations). diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 4c558a2cc..d192be864 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -313,7 +313,7 @@ pub struct ProgramState { /// This is used when statement in auto-commit mode reseted after previous uncomplete execution - in which case we may need to rollback transaction started on previous attempt /// Note, that MVCC transactions are always explicit - so they do not update auto_txn_cleanup marker pub(crate) auto_txn_cleanup: TxnCleanup, - fk_constraint_counter: isize, + fk_scope_counter: isize, } impl ProgramState { @@ -360,7 +360,7 @@ impl ProgramState { op_checkpoint_state: OpCheckpointState::StartCheckpoint, view_delta_state: ViewDeltaCommitState::NotStarted, auto_txn_cleanup: TxnCleanup::None, - fk_constraint_counter: 0, + fk_scope_counter: 0, } } From 2db18f82301c854fb8c555d52eefa904b0695201 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Mon, 29 Sep 2025 19:15:14 -0400 Subject: [PATCH 069/428] Add fk_fuzzing sql file to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 2e6cf78f8..666b560b0 100644 --- a/.gitignore +++ b/.gitignore @@ -43,6 +43,7 @@ simulator.log **/*.txt profile.json.gz simulator-output/ +tests/*.sql &1 bisected.sql From 16d19fd39e789c34ffad00e5dc7de4d179f16d3d Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Mon, 29 Sep 2025 19:15:50 -0400 Subject: [PATCH 070/428] Add tcl tests for foreign keys --- testing/foreign_keys.test | 194 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100644 testing/foreign_keys.test diff --git a/testing/foreign_keys.test b/testing/foreign_keys.test new file mode 100644 index 000000000..7db9b876c --- /dev/null +++ b/testing/foreign_keys.test @@ -0,0 +1,194 @@ +#!/usr/bin/env tclsh + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/sqlite3/tester.tcl + +do_execsql_test_on_specific_db {:memory:} fk-basic-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t (id INTEGER PRIMARY KEY, a TEXT); + CREATE TABLE t2 (id INTEGER PRIMARY KEY, tid REFERENCES t(id)); + INSERT INTO t VALUES (1,'x'),(2,'y'); + INSERT INTO t2 VALUES (10,1),(11,NULL); -- NULL child ok + SELECT id,tid FROM t2 ORDER BY id; +} {10|1 +11|} + +do_execsql_test_in_memory_any_error fk-insert-child-missing-parent { + PRAGMA foreign_keys=ON; + CREATE TABLE t (id INTEGER PRIMARY KEY, a TEXT); + CREATE TABLE t2 (id INTEGER PRIMARY KEY, tid REFERENCES t(id)); + INSERT INTO t2 VALUES (20,99); +} + +do_execsql_test_in_memory_any_error fk-update-child-to-missing-parent { + PRAGMA foreign_keys=ON; + CREATE TABLE t (id INTEGER PRIMARY KEY, a TEXT); + CREATE TABLE t2 (id INTEGER PRIMARY KEY, tid REFERENCES t(id)); + INSERT INTO t VALUES (1,'x'); + INSERT INTO t2 VALUES (10,1); + UPDATE t2 SET tid = 42 WHERE id = 10; -- now missing +} + +do_execsql_test_on_specific_db {:memory:} fk-update-child-to-null-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t (id INTEGER PRIMARY KEY); + CREATE TABLE t2 (id INTEGER PRIMARY KEY, tid REFERENCES t(id)); + INSERT INTO t VALUES (1); + INSERT INTO t2 VALUES (7,1); + UPDATE t2 SET tid = NULL WHERE id = 7; + SELECT id, tid FROM t2; +} {7|} + +do_execsql_test_in_memory_any_error fk-delete-parent-blocked { + PRAGMA foreign_keys=ON; + CREATE TABLE t (id INTEGER PRIMARY KEY, a TEXT); + CREATE TABLE t2 (id INTEGER PRIMARY KEY, tid REFERENCES t(id)); + INSERT INTO t VALUES (1,'x'),(2,'y'); + INSERT INTO t2 VALUES (10,2); + DELETE FROM t WHERE id=2; +} + +do_execsql_test_on_specific_db {:memory:} fk-delete-parent-ok-when-no-child { + PRAGMA foreign_keys=ON; + CREATE TABLE t (id INTEGER PRIMARY KEY, a TEXT); + CREATE TABLE t2 (id INTEGER PRIMARY KEY, tid REFERENCES t(id)); + INSERT INTO t VALUES (1,'x'),(2,'y'); + INSERT INTO t2 VALUES (10,1); + DELETE FROM t WHERE id=2; + SELECT id FROM t ORDER BY id; +} {1} + + +do_execsql_test_on_specific_db {:memory:} fk-composite-pk-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE p( + a INT NOT NULL, + b INT NOT NULL, + PRIMARY KEY(a,b) + ); + CREATE TABLE c( + id INT PRIMARY KEY, + x INT, y INT, + FOREIGN KEY(x,y) REFERENCES p(a,b) + ); + INSERT INTO p VALUES (1,1),(1,2); + INSERT INTO c VALUES (10,1,1),(11,1,2),(12,NULL,2); -- NULL in child allowed + SELECT id,x,y FROM c ORDER BY id; +} {10|1|1 +11|1|2 +12||2} + +do_execsql_test_in_memory_any_error fk-composite-pk-missing { + PRAGMA foreign_keys=ON; + CREATE TABLE p( + a INT NOT NULL, + b INT NOT NULL, + PRIMARY KEY(a,b) + ); + CREATE TABLE c( + id INT PRIMARY KEY, + x INT, y INT, + FOREIGN KEY(x,y) REFERENCES p(a,b) + ); + INSERT INTO p VALUES (1,1); + INSERT INTO c VALUES (20,1,2); -- (1,2) missing +} + +do_execsql_test_in_memory_any_error fk-composite-update-child-missing { + PRAGMA foreign_keys=ON; + CREATE TABLE p(a INT NOT NULL, b INT NOT NULL, PRIMARY KEY(a,b)); + CREATE TABLE c(id INT PRIMARY KEY, x INT, y INT, + FOREIGN KEY(x,y) REFERENCES p(a,b)); + INSERT INTO p VALUES (1,1),(2,2); + INSERT INTO c VALUES (5,1,1); + UPDATE c SET x=2,y=3 WHERE id=5; +} + +do_execsql_test_on_specific_db {:memory:} fk-composite-unique-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(u TEXT, v TEXT, pad INT, UNIQUE(u,v)); + CREATE TABLE child(id INT PRIMARY KEY, cu TEXT, cv TEXT, + FOREIGN KEY(cu,cv) REFERENCES parent(u,v)); + INSERT INTO parent VALUES ('A','B',0),('A','C',0); + INSERT INTO child VALUES (1,'A','B'); + SELECT id, cu, cv FROM child ORDER BY id; +} {1|A|B} + +do_execsql_test_in_memory_any_error fk-composite-unique-missing { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(u TEXT, v TEXT, pad INT, UNIQUE(u,v)); + CREATE TABLE child(id INT PRIMARY KEY, cu TEXT, cv TEXT, + FOREIGN KEY(cu,cv) REFERENCES parent(u,v)); + INSERT INTO parent VALUES ('A','B',0); + INSERT INTO child VALUES (2,'A','X'); -- no ('A','X') in parent +} + +do_execsql_test_on_specific_db {:memory:} fk-rowid-alias-parent-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t(id INTEGER PRIMARY KEY, a TEXT); + CREATE TABLE c(cid INTEGER PRIMARY KEY, rid REFERENCES t(rowid)); + INSERT INTO t VALUES (100,'x'); + INSERT INTO c VALUES (1, 100); + SELECT cid, rid FROM c; +} {1|100} + +do_execsql_test_in_memory_any_error fk-rowid-alias-parent-missing { + PRAGMA foreign_keys=ON; + CREATE TABLE t(id INTEGER PRIMARY KEY, a TEXT); + CREATE TABLE c(cid INTEGER PRIMARY KEY, rid REFERENCES t(rowid)); + INSERT INTO c VALUES (1, 9999); +} + +do_execsql_test_on_specific_db {:memory:} fk-update-child-noop-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid REFERENCES p(id)); + INSERT INTO p VALUES (1); + INSERT INTO c VALUES (10,1); + UPDATE c SET id = id WHERE id = 10; -- no FK column touched + SELECT id, pid FROM c; +} {10|1} + +do_execsql_test_in_memory_any_error fk-delete-parent-composite-scan { + PRAGMA foreign_keys=ON; + CREATE TABLE p(a INT NOT NULL, b INT NOT NULL, PRIMARY KEY(a,b)); + CREATE TABLE c(id INT PRIMARY KEY, x INT, y INT, + FOREIGN KEY(x,y) REFERENCES p(a,b)); + INSERT INTO p VALUES (1,2),(2,3); + INSERT INTO c VALUES (7,2,3); + DELETE FROM p WHERE a=2 AND b=3; +} + +do_execsql_test_on_specific_db {:memory:} fk-update-child-to-existing-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t(id INTEGER PRIMARY KEY); + CREATE TABLE t2(id INTEGER PRIMARY KEY, tid REFERENCES t(id)); + INSERT INTO t VALUES (1),(2); + INSERT INTO t2 VALUES (9,1); + UPDATE t2 SET tid = 2 WHERE id = 9; + SELECT id, tid FROM t2; +} {9|2} + +do_execsql_test_on_specific_db {:memory:} fk-composite-pk-delete-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE p(a INT NOT NULL, b INT NOT NULL, PRIMARY KEY(a,b)); + CREATE TABLE c(id INT PRIMARY KEY, x INT, y INT, + FOREIGN KEY(x,y) REFERENCES p(a,b)); + INSERT INTO p VALUES (1,2),(2,3); + INSERT INTO c VALUES (7,2,3); + -- Deleting a non-referenced parent tuple is OK + DELETE FROM p WHERE a=1 AND b=2; + SELECT a,b FROM p ORDER BY a,b; +} {2|3} + +do_execsql_test_in_memory_any_error fk-composite-pk-delete-violate { + PRAGMA foreign_keys=ON; + CREATE TABLE p(a INT NOT NULL, b INT NOT NULL, PRIMARY KEY(a,b)); + CREATE TABLE c(id INT PRIMARY KEY, x INT, y INT, + FOREIGN KEY(x,y) REFERENCES p(a,b)); + INSERT INTO p VALUES (2,3); + INSERT INTO c VALUES (7,2,3); + -- Deleting the referenced tuple should fail + DELETE FROM p WHERE a=2 AND b=3; +} From a343dacaafb66184b0a7335b8ae5c640d82c5852 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 7 Oct 2025 23:34:26 +0300 Subject: [PATCH 071/428] translate: make bind_and_rewrite_expr() reject identifiers if no referenced tables exist --- core/translate/expr.rs | 431 +++++++++++++++++++++-------------------- testing/select.test | 4 + 2 files changed, 225 insertions(+), 210 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index e110523f3..feb2960f3 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -3326,242 +3326,253 @@ pub fn bind_and_rewrite_expr<'a>( } _ => {} } - if let Some(referenced_tables) = &mut referenced_tables { - match expr { - Expr::Id(id) => { - let normalized_id = normalize_ident(id.as_str()); + match expr { + Expr::Id(id) => { + let Some(referenced_tables) = &mut referenced_tables else { + crate::bail_parse_error!("no such column: {}", id.as_str()); + }; + let normalized_id = normalize_ident(id.as_str()); - if binding_behavior == BindingBehavior::TryResultColumnsFirst { - if let Some(result_columns) = result_columns { - for result_column in result_columns.iter() { - if result_column.name(referenced_tables).is_some_and(|name| { - name.eq_ignore_ascii_case(&normalized_id) - }) { - *expr = result_column.expr.clone(); - return Ok(WalkControl::Continue); - } + if binding_behavior == BindingBehavior::TryResultColumnsFirst { + if let Some(result_columns) = result_columns { + for result_column in result_columns.iter() { + if result_column + .name(referenced_tables) + .is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id)) + { + *expr = result_column.expr.clone(); + return Ok(WalkControl::Continue); } } } - let mut match_result = None; + } + let mut match_result = None; - // First check joined tables - for joined_table in referenced_tables.joined_tables().iter() { - let col_idx = joined_table.table.columns().iter().position(|c| { + // First check joined tables + for joined_table in referenced_tables.joined_tables().iter() { + let col_idx = joined_table.table.columns().iter().position(|c| { + c.name + .as_ref() + .is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id)) + }); + if col_idx.is_some() { + if match_result.is_some() { + let mut ok = false; + // Column name ambiguity is ok if it is in the USING clause because then it is deduplicated + // and the left table is used. + if let Some(join_info) = &joined_table.join_info { + if join_info.using.iter().any(|using_col| { + using_col.as_str().eq_ignore_ascii_case(&normalized_id) + }) { + ok = true; + } + } + if !ok { + crate::bail_parse_error!("Column {} is ambiguous", id.as_str()); + } + } else { + let col = + joined_table.table.columns().get(col_idx.unwrap()).unwrap(); + match_result = Some(( + joined_table.internal_id, + col_idx.unwrap(), + col.is_rowid_alias, + )); + } + // only if we haven't found a match, check for explicit rowid reference + } else if let Some(row_id_expr) = parse_row_id( + &normalized_id, + referenced_tables.joined_tables()[0].internal_id, + || referenced_tables.joined_tables().len() != 1, + )? { + *expr = row_id_expr; + + return Ok(WalkControl::Continue); + } + } + + // Then check outer query references, if we still didn't find something. + // Normally finding multiple matches for a non-qualified column is an error (column x is ambiguous) + // but in the case of subqueries, the inner query takes precedence. + // For example: + // SELECT * FROM t WHERE x = (SELECT x FROM t2) + // In this case, there is no ambiguity: + // - x in the outer query refers to t.x, + // - x in the inner query refers to t2.x. + if match_result.is_none() { + for outer_ref in referenced_tables.outer_query_refs().iter() { + let col_idx = outer_ref.table.columns().iter().position(|c| { c.name .as_ref() .is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id)) }); if col_idx.is_some() { if match_result.is_some() { - let mut ok = false; - // Column name ambiguity is ok if it is in the USING clause because then it is deduplicated - // and the left table is used. - if let Some(join_info) = &joined_table.join_info { - if join_info.using.iter().any(|using_col| { - using_col.as_str().eq_ignore_ascii_case(&normalized_id) - }) { - ok = true; - } - } - if !ok { - crate::bail_parse_error!( - "Column {} is ambiguous", - id.as_str() - ); - } - } else { - let col = - joined_table.table.columns().get(col_idx.unwrap()).unwrap(); - match_result = Some(( - joined_table.internal_id, - col_idx.unwrap(), - col.is_rowid_alias, - )); + crate::bail_parse_error!("Column {} is ambiguous", id.as_str()); } - // only if we haven't found a match, check for explicit rowid reference - } else if let Some(row_id_expr) = parse_row_id( - &normalized_id, - referenced_tables.joined_tables()[0].internal_id, - || referenced_tables.joined_tables().len() != 1, - )? { - *expr = row_id_expr; - - return Ok(WalkControl::Continue); + let col = outer_ref.table.columns().get(col_idx.unwrap()).unwrap(); + match_result = Some(( + outer_ref.internal_id, + col_idx.unwrap(), + col.is_rowid_alias, + )); } } - - // Then check outer query references, if we still didn't find something. - // Normally finding multiple matches for a non-qualified column is an error (column x is ambiguous) - // but in the case of subqueries, the inner query takes precedence. - // For example: - // SELECT * FROM t WHERE x = (SELECT x FROM t2) - // In this case, there is no ambiguity: - // - x in the outer query refers to t.x, - // - x in the inner query refers to t2.x. - if match_result.is_none() { - for outer_ref in referenced_tables.outer_query_refs().iter() { - let col_idx = outer_ref.table.columns().iter().position(|c| { - c.name.as_ref().is_some_and(|name| { - name.eq_ignore_ascii_case(&normalized_id) - }) - }); - if col_idx.is_some() { - if match_result.is_some() { - crate::bail_parse_error!( - "Column {} is ambiguous", - id.as_str() - ); - } - let col = - outer_ref.table.columns().get(col_idx.unwrap()).unwrap(); - match_result = Some(( - outer_ref.internal_id, - col_idx.unwrap(), - col.is_rowid_alias, - )); - } - } - } - - if let Some((table_id, col_idx, is_rowid_alias)) = match_result { - *expr = Expr::Column { - database: None, // TODO: support different databases - table: table_id, - column: col_idx, - is_rowid_alias, - }; - referenced_tables.mark_column_used(table_id, col_idx); - return Ok(WalkControl::Continue); - } - - if binding_behavior == BindingBehavior::TryCanonicalColumnsFirst { - if let Some(result_columns) = result_columns { - for result_column in result_columns.iter() { - if result_column.name(referenced_tables).is_some_and(|name| { - name.eq_ignore_ascii_case(&normalized_id) - }) { - *expr = result_column.expr.clone(); - return Ok(WalkControl::Continue); - } - } - } - } - - // SQLite behavior: Only double-quoted identifiers get fallback to string literals - // Single quotes are handled as literals earlier, unquoted identifiers must resolve to columns - if id.quoted_with('"') { - // Convert failed double-quoted identifier to string literal - *expr = Expr::Literal(ast::Literal::String(id.as_literal())); - return Ok(WalkControl::Continue); - } else { - // Unquoted identifiers must resolve to columns - no fallback - crate::bail_parse_error!("no such column: {}", id.as_str()) - } } - Expr::Qualified(tbl, id) => { - tracing::debug!("bind_and_rewrite_expr({:?}, {:?})", tbl, id); - let normalized_table_name = normalize_ident(tbl.as_str()); - let matching_tbl = referenced_tables - .find_table_and_internal_id_by_identifier(&normalized_table_name); - if matching_tbl.is_none() { - crate::bail_parse_error!("no such table: {}", normalized_table_name); - } - let (tbl_id, tbl) = matching_tbl.unwrap(); - let normalized_id = normalize_ident(id.as_str()); - let col_idx = tbl.columns().iter().position(|c| { - c.name - .as_ref() - .is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id)) - }); - if let Some(row_id_expr) = parse_row_id(&normalized_id, tbl_id, || false)? { - *expr = row_id_expr; - return Ok(WalkControl::Continue); - } - let Some(col_idx) = col_idx else { - crate::bail_parse_error!("no such column: {}", normalized_id); - }; - let col = tbl.columns().get(col_idx).unwrap(); + if let Some((table_id, col_idx, is_rowid_alias)) = match_result { *expr = Expr::Column { database: None, // TODO: support different databases - table: tbl_id, + table: table_id, column: col_idx, - is_rowid_alias: col.is_rowid_alias, + is_rowid_alias, }; - tracing::debug!("rewritten to column"); - referenced_tables.mark_column_used(tbl_id, col_idx); + referenced_tables.mark_column_used(table_id, col_idx); return Ok(WalkControl::Continue); } - Expr::DoublyQualified(db_name, tbl_name, col_name) => { - let normalized_col_name = normalize_ident(col_name.as_str()); - // Create a QualifiedName and use existing resolve_database_id method - let qualified_name = ast::QualifiedName { - db_name: Some(db_name.clone()), - name: tbl_name.clone(), - alias: None, - }; - let database_id = connection.resolve_database_id(&qualified_name)?; - - // Get the table from the specified database - let table = connection - .with_schema(database_id, |schema| schema.get_table(tbl_name.as_str())) - .ok_or_else(|| { - crate::LimboError::ParseError(format!( - "no such table: {}.{}", - db_name.as_str(), - tbl_name.as_str() - )) - })?; - - // Find the column in the table - let col_idx = table - .columns() - .iter() - .position(|c| { - c.name.as_ref().is_some_and(|name| { - name.eq_ignore_ascii_case(&normalized_col_name) - }) - }) - .ok_or_else(|| { - crate::LimboError::ParseError(format!( - "Column: {}.{}.{} not found", - db_name.as_str(), - tbl_name.as_str(), - col_name.as_str() - )) - })?; - - let col = table.columns().get(col_idx).unwrap(); - - // Check if this is a rowid alias - let is_rowid_alias = col.is_rowid_alias; - - // Convert to Column expression - since this is a cross-database reference, - // we need to create a synthetic table reference for it - // For now, we'll error if the table isn't already in the referenced tables - let normalized_tbl_name = normalize_ident(tbl_name.as_str()); - let matching_tbl = referenced_tables - .find_table_and_internal_id_by_identifier(&normalized_tbl_name); - - if let Some((tbl_id, _)) = matching_tbl { - // Table is already in referenced tables, use existing internal ID - *expr = Expr::Column { - database: Some(database_id), - table: tbl_id, - column: col_idx, - is_rowid_alias, - }; - referenced_tables.mark_column_used(tbl_id, col_idx); - } else { - return Err(crate::LimboError::ParseError(format!( - "table {normalized_tbl_name} is not in FROM clause - cross-database column references require the table to be explicitly joined" - ))); + if binding_behavior == BindingBehavior::TryCanonicalColumnsFirst { + if let Some(result_columns) = result_columns { + for result_column in result_columns.iter() { + if result_column + .name(referenced_tables) + .is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id)) + { + *expr = result_column.expr.clone(); + return Ok(WalkControl::Continue); + } + } } } - _ => {} + + // SQLite behavior: Only double-quoted identifiers get fallback to string literals + // Single quotes are handled as literals earlier, unquoted identifiers must resolve to columns + if id.quoted_with('"') { + // Convert failed double-quoted identifier to string literal + *expr = Expr::Literal(ast::Literal::String(id.as_literal())); + return Ok(WalkControl::Continue); + } else { + // Unquoted identifiers must resolve to columns - no fallback + crate::bail_parse_error!("no such column: {}", id.as_str()) + } } + Expr::Qualified(tbl, id) => { + tracing::debug!("bind_and_rewrite_expr({:?}, {:?})", tbl, id); + let Some(referenced_tables) = &mut referenced_tables else { + crate::bail_parse_error!( + "no such column: {}.{}", + tbl.as_str(), + id.as_str() + ); + }; + let normalized_table_name = normalize_ident(tbl.as_str()); + let matching_tbl = referenced_tables + .find_table_and_internal_id_by_identifier(&normalized_table_name); + if matching_tbl.is_none() { + crate::bail_parse_error!("no such table: {}", normalized_table_name); + } + let (tbl_id, tbl) = matching_tbl.unwrap(); + let normalized_id = normalize_ident(id.as_str()); + let col_idx = tbl.columns().iter().position(|c| { + c.name + .as_ref() + .is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id)) + }); + if let Some(row_id_expr) = parse_row_id(&normalized_id, tbl_id, || false)? { + *expr = row_id_expr; + + return Ok(WalkControl::Continue); + } + let Some(col_idx) = col_idx else { + crate::bail_parse_error!("no such column: {}", normalized_id); + }; + let col = tbl.columns().get(col_idx).unwrap(); + *expr = Expr::Column { + database: None, // TODO: support different databases + table: tbl_id, + column: col_idx, + is_rowid_alias: col.is_rowid_alias, + }; + tracing::debug!("rewritten to column"); + referenced_tables.mark_column_used(tbl_id, col_idx); + return Ok(WalkControl::Continue); + } + Expr::DoublyQualified(db_name, tbl_name, col_name) => { + let Some(referenced_tables) = &mut referenced_tables else { + crate::bail_parse_error!( + "no such column: {}.{}.{}", + db_name.as_str(), + tbl_name.as_str(), + col_name.as_str() + ); + }; + let normalized_col_name = normalize_ident(col_name.as_str()); + + // Create a QualifiedName and use existing resolve_database_id method + let qualified_name = ast::QualifiedName { + db_name: Some(db_name.clone()), + name: tbl_name.clone(), + alias: None, + }; + let database_id = connection.resolve_database_id(&qualified_name)?; + + // Get the table from the specified database + let table = connection + .with_schema(database_id, |schema| schema.get_table(tbl_name.as_str())) + .ok_or_else(|| { + crate::LimboError::ParseError(format!( + "no such table: {}.{}", + db_name.as_str(), + tbl_name.as_str() + )) + })?; + + // Find the column in the table + let col_idx = table + .columns() + .iter() + .position(|c| { + c.name + .as_ref() + .is_some_and(|name| name.eq_ignore_ascii_case(&normalized_col_name)) + }) + .ok_or_else(|| { + crate::LimboError::ParseError(format!( + "Column: {}.{}.{} not found", + db_name.as_str(), + tbl_name.as_str(), + col_name.as_str() + )) + })?; + + let col = table.columns().get(col_idx).unwrap(); + + // Check if this is a rowid alias + let is_rowid_alias = col.is_rowid_alias; + + // Convert to Column expression - since this is a cross-database reference, + // we need to create a synthetic table reference for it + // For now, we'll error if the table isn't already in the referenced tables + let normalized_tbl_name = normalize_ident(tbl_name.as_str()); + let matching_tbl = referenced_tables + .find_table_and_internal_id_by_identifier(&normalized_tbl_name); + + if let Some((tbl_id, _)) = matching_tbl { + // Table is already in referenced tables, use existing internal ID + *expr = Expr::Column { + database: Some(database_id), + table: tbl_id, + column: col_idx, + is_rowid_alias, + }; + referenced_tables.mark_column_used(tbl_id, col_idx); + } else { + return Err(crate::LimboError::ParseError(format!( + "table {normalized_tbl_name} is not in FROM clause - cross-database column references require the table to be explicitly joined" + ))); + } + } + _ => {} } Ok(WalkControl::Continue) }, diff --git a/testing/select.test b/testing/select.test index 5b35d3eda..c434e7bb7 100755 --- a/testing/select.test +++ b/testing/select.test @@ -812,3 +812,7 @@ do_execsql_test_on_specific_db {:memory:} null-in-search { 2|2 2|2} +do_execsql_test_in_memory_any_error limit-column-reference-error { + CREATE TABLE t(a); + SELECT * FROM t LIMIT (t.a); +} From ae975afe49766980a0c5ab4ebabb97b6e943c089 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Mon, 29 Sep 2025 19:16:03 -0400 Subject: [PATCH 072/428] Remove unnecessary FK resolution on schema parsing --- core/incremental/compiler.rs | 7 ++ core/incremental/view.rs | 4 + core/schema.rs | 125 +------------------------------ core/translate/insert.rs | 9 ++- core/translate/logical.rs | 3 + core/translate/optimizer/join.rs | 1 + testing/all.test | 1 + 7 files changed, 25 insertions(+), 125 deletions(-) diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index f87792e1a..84f50bfc6 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -2245,6 +2245,7 @@ mod tests { is_strict: false, has_autoincrement: false, unique_sets: vec![], + foreign_keys: vec![], }; schema.add_btree_table(Arc::new(users_table)); @@ -2298,6 +2299,7 @@ mod tests { is_strict: false, has_autoincrement: false, unique_sets: vec![], + foreign_keys: vec![], }; schema.add_btree_table(Arc::new(products_table)); @@ -2363,6 +2365,7 @@ mod tests { has_autoincrement: false, is_strict: false, unique_sets: vec![], + foreign_keys: vec![], }; schema.add_btree_table(Arc::new(orders_table)); @@ -2401,6 +2404,7 @@ mod tests { is_strict: false, has_autoincrement: false, unique_sets: vec![], + foreign_keys: vec![], }; schema.add_btree_table(Arc::new(customers_table)); @@ -2463,6 +2467,7 @@ mod tests { is_strict: false, has_autoincrement: false, unique_sets: vec![], + foreign_keys: vec![], }; schema.add_btree_table(Arc::new(purchases_table)); @@ -2513,6 +2518,7 @@ mod tests { is_strict: false, has_autoincrement: false, unique_sets: vec![], + foreign_keys: vec![], }; schema.add_btree_table(Arc::new(vendors_table)); @@ -2550,6 +2556,7 @@ mod tests { is_strict: false, has_autoincrement: false, unique_sets: vec![], + foreign_keys: vec![], }; schema.add_btree_table(Arc::new(sales_table)); diff --git a/core/incremental/view.rs b/core/incremental/view.rs index f82aeadcf..fc4a8bba6 100644 --- a/core/incremental/view.rs +++ b/core/incremental/view.rs @@ -1411,6 +1411,7 @@ mod tests { has_rowid: true, is_strict: false, unique_sets: vec![], + foreign_keys: vec![], has_autoincrement: false, }; @@ -1460,6 +1461,7 @@ mod tests { has_rowid: true, is_strict: false, has_autoincrement: false, + foreign_keys: vec![], unique_sets: vec![], }; @@ -1509,6 +1511,7 @@ mod tests { has_rowid: true, is_strict: false, has_autoincrement: false, + foreign_keys: vec![], unique_sets: vec![], }; @@ -1558,6 +1561,7 @@ mod tests { has_rowid: true, // Has implicit rowid but no alias is_strict: false, has_autoincrement: false, + foreign_keys: vec![], unique_sets: vec![], }; diff --git a/core/schema.rs b/core/schema.rs index 4ef57684b..b79b37017 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -300,18 +300,9 @@ impl Schema { self.views.get(&name).cloned() } - pub fn add_btree_table(&mut self, mut table: Arc) -> Result<()> { + pub fn add_btree_table(&mut self, table: Arc) { let name = normalize_ident(&table.name); - let mut resolved_fks: Vec> = Vec::with_capacity(table.foreign_keys.len()); - // when we built the BTreeTable from SQL, we didn't have access to the Schema to validate - // any FK relationships, so we do that now - self.validate_and_normalize_btree_foreign_keys(&table, &mut resolved_fks)?; - - // there should only be 1 reference to the table so Arc::make_mut shouldnt copy - let t = Arc::make_mut(&mut table); - t.foreign_keys = resolved_fks; self.tables.insert(name, Table::BTree(table).into()); - Ok(()) } pub fn add_virtual_table(&mut self, table: Arc) { @@ -769,10 +760,7 @@ impl Schema { } } - if let Some(mv_store) = mv_store { - mv_store.mark_table_as_loaded(root_page); - } - self.add_btree_table(Arc::new(table))?; + self.add_btree_table(Arc::new(table)); } } "index" => { @@ -883,114 +871,6 @@ impl Schema { Ok(()) } - fn validate_and_normalize_btree_foreign_keys( - &self, - table: &Arc, - resolved_fks: &mut Vec>, - ) -> Result<()> { - for key in &table.foreign_keys { - let Some(parent) = self.get_btree_table(&key.parent_table) else { - return Err(LimboError::ParseError(format!( - "Foreign key references missing table {}", - key.parent_table - ))); - }; - - let child_cols: Vec = key - .child_columns - .iter() - .map(|c| normalize_ident(c)) - .collect(); - for c in &child_cols { - if table.get_column(c).is_none() && !c.eq_ignore_ascii_case("rowid") { - return Err(LimboError::ParseError(format!( - "Foreign key child column not found: {}.{}", - table.name, c - ))); - } - } - - // Resolve parent cols: - // if explicitly listed, we normalize them - // else, we default to parent's PRIMARY KEY columns. - // if parent has no declared PK, SQLite defaults to single "rowid" - let parent_cols: Vec = if key.parent_columns.is_empty() { - if !parent.primary_key_columns.is_empty() { - parent - .primary_key_columns - .iter() - .map(|(n, _)| normalize_ident(n)) - .collect() - } else { - vec!["rowid".to_string()] - } - } else { - key.parent_columns - .iter() - .map(|c| normalize_ident(c)) - .collect() - }; - - if parent_cols.len() != child_cols.len() { - return Err(LimboError::ParseError(format!( - "Foreign key column count mismatch: child {child_cols:?} vs parent {parent_cols:?}", - ))); - } - - // Ensure each parent col exists - for col in &parent_cols { - if !col.eq_ignore_ascii_case("rowid") && parent.get_column(col).is_none() { - return Err(LimboError::ParseError(format!( - "Foreign key references missing column {}.{col}", - key.parent_table - ))); - } - } - - // Parent side must be UNIQUE/PK, rowid counts as unique - let parent_is_pk = !parent.primary_key_columns.is_empty() - && parent_cols.len() == parent.primary_key_columns.len() - && parent_cols - .iter() - .zip(&parent.primary_key_columns) - .all(|(a, (b, _))| a.eq_ignore_ascii_case(b)); - - let parent_is_rowid = - parent_cols.len() == 1 && parent_cols[0].eq_ignore_ascii_case("rowid"); - - let parent_is_unique = parent_is_pk - || parent_is_rowid - || self.get_indices(&parent.name).any(|idx| { - idx.unique - && idx.columns.len() == parent_cols.len() - && idx - .columns - .iter() - .zip(&parent_cols) - .all(|(ic, pc)| ic.name.eq_ignore_ascii_case(pc)) - }); - - if !parent_is_unique { - return Err(LimboError::ParseError(format!( - "Foreign key references {}({:?}) which is not UNIQUE or PRIMARY KEY", - key.parent_table, parent_cols - ))); - } - - let resolved = ForeignKey { - parent_table: normalize_ident(&key.parent_table), - parent_columns: parent_cols, - child_columns: child_cols, - on_delete: key.on_delete, - on_update: key.on_update, - on_insert: key.on_insert, - deferred: key.deferred, - }; - resolved_fks.push(Arc::new(resolved)); - } - Ok(()) - } - pub fn incoming_fks_to(&self, table_name: &str) -> Vec { let target = normalize_ident(table_name); let mut out = vec![]; @@ -2909,6 +2789,7 @@ mod tests { hidden: false, }], unique_sets: vec![], + foreign_keys: vec![], }; let result = diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 14511bb0f..8f4b8158f 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -83,11 +83,13 @@ pub fn translate_insert( ); } let table_name = &tbl_name.name; - let has_child_fks = connection.foreign_keys_enabled() + let fk_enabled = connection.foreign_keys_enabled(); + let has_child_fks = fk_enabled && !resolver .schema .get_foreign_keys_for_table(table_name.as_str()) .is_empty(); + let has_parent_fks = fk_enabled && resolver.schema.any_incoming_fk_to(table_name.as_str()); // Check if this is a system table that should be protected from direct writes if crate::schema::is_system_table(table_name.as_str()) { @@ -241,7 +243,7 @@ pub fn translate_insert( connection, )?; - if has_child_fks { + if has_child_fks || has_parent_fks { program.emit_insn(Insn::FkCounter { increment_value: 1, check_abort: false, @@ -1042,7 +1044,7 @@ pub fn translate_insert( } } } - if has_child_fks { + if has_child_fks || has_parent_fks { emit_fk_checks_for_insert(&mut program, resolver, &insertion, table_name.as_str())?; } @@ -1144,6 +1146,7 @@ pub fn translate_insert( &mut result_columns, cdc_table.as_ref().map(|c| c.0), row_done_label, + connection, )?; } else { // UpsertDo::Nothing case diff --git a/core/translate/logical.rs b/core/translate/logical.rs index 349b5f64b..6564e2ba3 100644 --- a/core/translate/logical.rs +++ b/core/translate/logical.rs @@ -2389,6 +2389,7 @@ mod tests { name: "users".to_string(), root_page: 2, primary_key_columns: vec![("id".to_string(), turso_parser::ast::SortOrder::Asc)], + foreign_keys: vec![], columns: vec![ SchemaColumn { name: Some("id".to_string()), @@ -2505,6 +2506,7 @@ mod tests { is_strict: false, has_autoincrement: false, unique_sets: vec![], + foreign_keys: vec![], }; schema.add_btree_table(Arc::new(orders_table)); @@ -2567,6 +2569,7 @@ mod tests { is_strict: false, has_autoincrement: false, unique_sets: vec![], + foreign_keys: vec![], }; schema.add_btree_table(Arc::new(products_table)); diff --git a/core/translate/optimizer/join.rs b/core/translate/optimizer/join.rs index db5e71000..fe1a41bbb 100644 --- a/core/translate/optimizer/join.rs +++ b/core/translate/optimizer/join.rs @@ -1664,6 +1664,7 @@ mod tests { has_rowid: true, is_strict: false, unique_sets: vec![], + foreign_keys: vec![], }) } diff --git a/testing/all.test b/testing/all.test index 4d578e31d..602174abf 100755 --- a/testing/all.test +++ b/testing/all.test @@ -47,3 +47,4 @@ source $testdir/vtab.test source $testdir/upsert.test source $testdir/window.test source $testdir/partial_idx.test +source $testdir/foreign_keys.test From 37c8abf247acff95ca1eed173d28faf83b8f6580 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Mon, 29 Sep 2025 19:44:39 -0400 Subject: [PATCH 073/428] Fix schema representation and methods for ForeignKey resolution --- core/schema.rs | 185 +++++-- core/translate/emitter.rs | 955 +++++++++++++++++++++++++++++++++- core/translate/insert.rs | 5 +- core/translate/pragma.rs | 10 +- core/translate/upsert.rs | 179 ++++++- tests/integration/fuzz/mod.rs | 421 ++++++++++++++- 6 files changed, 1705 insertions(+), 50 deletions(-) diff --git a/core/schema.rs b/core/schema.rs index b79b37017..6619aaaa2 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -395,31 +395,6 @@ impl Schema { self.indexes_enabled } - pub fn get_foreign_keys_for_table(&self, table_name: &str) -> Vec> { - self.get_table(table_name) - .and_then(|t| t.btree()) - .map(|t| t.foreign_keys.clone()) - .unwrap_or_default() - } - - /// Get foreign keys where this table is the parent (referenced by other tables) - pub fn get_referencing_foreign_keys( - &self, - parent_table: &str, - ) -> Vec<(String, Arc)> { - let mut refs = Vec::new(); - for table in self.tables.values() { - if let Table::BTree(btree) = table.deref() { - for fk in &btree.foreign_keys { - if fk.parent_table == parent_table { - refs.push((btree.name.as_str().to_string(), fk.clone())); - } - } - } - } - refs - } - /// Update [Schema] by scanning the first root page (sqlite_schema) pub fn make_from_btree( &mut self, @@ -871,11 +846,9 @@ impl Schema { Ok(()) } - pub fn incoming_fks_to(&self, table_name: &str) -> Vec { + pub fn incoming_fks_to(&self, table_name: &str) -> Vec { let target = normalize_ident(table_name); let mut out = vec![]; - - // Resolve the parent table once let parent_tbl = self .get_btree_table(&target) .expect("incoming_fks_to: parent table must exist"); @@ -995,7 +968,7 @@ impl Schema { find_parent_unique(&parent_cols) }; - out.push(IncomingFkRef { + out.push(ResolvedFkRef { child_table: Arc::clone(&child), fk: Arc::clone(fk), parent_cols, @@ -1010,6 +983,117 @@ impl Schema { out } + pub fn outgoing_fks_of(&self, child_table: &str) -> Vec { + let child_name = normalize_ident(child_table); + let Some(child) = self.get_btree_table(&child_name) else { + return vec![]; + }; + + // Helper to find the UNIQUE/index on the parent that matches the resolved parent cols + let find_parent_unique = + |parent_tbl: &BTreeTable, cols: &Vec| -> Option> { + let matches_pk = !parent_tbl.primary_key_columns.is_empty() + && parent_tbl.primary_key_columns.len() == cols.len() + && parent_tbl + .primary_key_columns + .iter() + .zip(cols.iter()) + .all(|((n, _), c)| n.eq_ignore_ascii_case(c)); + if matches_pk { + return None; + } + self.get_indices(&parent_tbl.name) + .find(|idx| { + idx.unique + && idx.columns.len() == cols.len() + && idx + .columns + .iter() + .zip(cols.iter()) + .all(|(ic, pc)| ic.name.eq_ignore_ascii_case(pc)) + }) + .cloned() + }; + + let mut out = Vec::new(); + for fk in &child.foreign_keys { + let parent_name = normalize_ident(&fk.parent_table); + let Some(parent_tbl) = self.get_btree_table(&parent_name) else { + continue; + }; + + // Normalize columns (same rules you used in validation) + let child_cols: Vec = fk + .child_columns + .iter() + .map(|s| normalize_ident(s)) + .collect(); + let parent_cols: Vec = if fk.parent_columns.is_empty() { + if !parent_tbl.primary_key_columns.is_empty() { + parent_tbl + .primary_key_columns + .iter() + .map(|(n, _)| normalize_ident(n)) + .collect() + } else { + vec!["rowid".to_string()] + } + } else { + fk.parent_columns + .iter() + .map(|s| normalize_ident(s)) + .collect() + }; + + // Positions + let child_pos: Vec = child_cols + .iter() + .map(|c| child.get_column(c).expect("child col missing").0) + .collect(); + let parent_pos: Vec = parent_cols + .iter() + .map(|c| { + parent_tbl + .get_column(c) + .map(|(i, _)| i) + .or_else(|| c.eq_ignore_ascii_case("rowid").then_some(0)) + .expect("parent col missing") + }) + .collect(); + + // Parent uses rowid? + let parent_uses_rowid = parent_cols.len() == 1 && { + let c = parent_cols[0].as_str(); + c.eq_ignore_ascii_case("rowid") + || parent_tbl.columns.iter().any(|col| { + col.is_rowid_alias + && col + .name + .as_deref() + .is_some_and(|n| n.eq_ignore_ascii_case(c)) + }) + }; + + let parent_unique_index = if parent_uses_rowid { + None + } else { + find_parent_unique(&parent_tbl, &parent_cols) + }; + + out.push(ResolvedFkRef { + child_table: Arc::clone(&child), + fk: Arc::clone(fk), + parent_cols, + child_cols, + child_pos, + parent_pos, + parent_uses_rowid, + parent_unique_index, + }); + } + out + } + pub fn any_incoming_fk_to(&self, table_name: &str) -> bool { self.tables.values().any(|t| { let Some(bt) = t.btree() else { @@ -1020,6 +1104,37 @@ impl Schema { .any(|fk| fk.parent_table == table_name) }) } + + /// Returns if this table declares any outgoing FKs (is a child of some parent) + pub fn has_child_fks(&self, table_name: &str) -> bool { + self.get_table(table_name) + .and_then(|t| t.btree()) + .is_some_and(|t| !t.foreign_keys.is_empty()) + } + + /// Return the *declared* (unresolved) FKs for a table. Callers that need + /// positions/rowid/unique info should use `incoming_fks_to` instead. + pub fn get_fks_for_table(&self, table_name: &str) -> Vec> { + self.get_table(table_name) + .and_then(|t| t.btree()) + .map(|t| t.foreign_keys.clone()) + .unwrap_or_default() + } + + /// Return pairs of (child_table_name, FK) for FKs that reference `parent_table` + pub fn get_referencing_fks(&self, parent_table: &str) -> Vec<(String, Arc)> { + let mut refs = Vec::new(); + for table in self.tables.values() { + if let Table::BTree(btree) = table.deref() { + for fk in &btree.foreign_keys { + if fk.parent_table == parent_table { + refs.push((btree.name.as_str().to_string(), fk.clone())); + } + } + } + } + refs + } } impl Clone for Schema { @@ -1752,11 +1867,11 @@ pub fn _build_pseudo_table(columns: &[ResultColumn]) -> PseudoCursorType { #[derive(Debug, Clone)] pub struct ForeignKey { - /// Columns in this table + /// Columns in this table (child side) pub child_columns: Vec, - /// Referenced table + /// Referenced (parent) table pub parent_table: String, - /// Referenced columns + /// Parent-side referenced columns pub parent_columns: Vec, pub on_delete: RefAct, pub on_update: RefAct, @@ -1765,9 +1880,9 @@ pub struct ForeignKey { pub deferred: bool, } -/// A single foreign key where `parent_table == target`. +/// A single resolved foreign key where `parent_table == target`. #[derive(Clone, Debug)] -pub struct IncomingFkRef { +pub struct ResolvedFkRef { /// Child table that owns the FK. pub child_table: Arc, /// The FK as declared on the child table. @@ -1788,7 +1903,7 @@ pub struct IncomingFkRef { pub parent_unique_index: Option>, } -impl IncomingFkRef { +impl ResolvedFkRef { /// Returns if any referenced parent column can change when these column positions are updated. pub fn parent_key_may_change( &self, diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 5e60617e6..f569743be 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -1,6 +1,7 @@ // This module contains code for emitting bytecode instructions for SQL query execution. // It handles translating high-level SQL operations into low-level bytecode that can be executed by the virtual machine. +use std::collections::HashSet; use std::num::NonZeroUsize; use std::sync::Arc; @@ -23,7 +24,7 @@ use super::select::emit_simple_count; use super::subquery::emit_subqueries; use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY; use crate::function::Func; -use crate::schema::{BTreeTable, Column, Schema, Table, ROWID_SENTINEL}; +use crate::schema::{BTreeTable, Column, ResolvedFkRef, Schema, Table, ROWID_SENTINEL}; use crate::translate::compound_select::emit_program_for_compound_select; use crate::translate::expr::{ emit_returning_results, translate_expr_no_constant_opt, walk_expr_mut, NoConstantOptReason, @@ -431,6 +432,25 @@ fn emit_program_for_delete( }); } + let has_parent_fks = connection.foreign_keys_enabled() && { + let table_name = plan + .table_references + .joined_tables() + .first() + .unwrap() + .table + .get_name(); + resolver.schema.any_incoming_fk_to(table_name) + }; + // Open FK scope for the whole statement + if has_parent_fks { + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: true, + }); + } + // Initialize cursors and other resources needed for query execution init_loop( program, @@ -469,7 +489,13 @@ fn emit_program_for_delete( None, )?; program.preassign_label_to_next_insn(after_main_loop_label); - + if has_parent_fks { + program.emit_insn(Insn::FkCounter { + increment_value: -1, + check_abort: true, + is_scope: true, + }); + } // Finalize program program.result_columns = plan.result_columns; program.table_references.extend(plan.table_references); @@ -514,6 +540,19 @@ fn emit_delete_insns( dest: key_reg, }); + if connection.foreign_keys_enabled() + && unsafe { &*table_reference }.btree().is_some() + && t_ctx.resolver.schema.any_incoming_fk_to(table_name) + { + emit_fk_parent_existence_checks( + program, + &t_ctx.resolver, + table_name, + main_table_cursor_id, + key_reg, + )?; + } + if unsafe { &*table_reference }.virtual_table().is_some() { let conflict_action = 0u16; let start_reg = key_reg; @@ -692,6 +731,518 @@ fn emit_delete_insns( Ok(()) } +/// Emit parent-side FK counter maintenance for UPDATE on a table with a composite PK. +/// +/// For every child FK that targets `parent_table_name`: +/// 1. Pass 1: If any child row currently references the OLD parent key, +/// increment the global FK counter (deferred violation potential). +/// We try an index probe on child(child_cols...) if available, else do a table scan. +/// 2. Pass 2: If any child row references the NEW parent key, decrement the counter +/// (because the reference would be “retargeted” by the update). +pub fn emit_fk_parent_pk_change_counters( + program: &mut ProgramBuilder, + incoming: &[ResolvedFkRef], + resolver: &Resolver, + old_pk_start: usize, + new_pk_start: usize, + n_cols: usize, +) -> crate::Result<()> { + if incoming.is_empty() { + return Ok(()); + } + for fk_ref in incoming.iter() { + let child_tbl = &fk_ref.child_table; + let child_cols = &fk_ref.fk.child_columns; + // Prefer exact-prefix index on child + let idx = resolver.schema.get_indices(&child_tbl.name).find(|ix| { + ix.columns.len() == child_cols.len() + && ix + .columns + .iter() + .zip(child_cols.iter()) + .all(|(ic, cc)| ic.name.eq_ignore_ascii_case(cc)) + }); + + if let Some(ix) = idx { + let icur = program.alloc_cursor_id(CursorType::BTreeIndex(ix.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: icur, + root_page: ix.root_page, + db: 0, + }); + + // Build child-probe key from OLD parent PK (1:1 map ensured by the column-name equality above) + // We just copy the OLD PK registers, apply index affinities before the probe. + let probe_start = old_pk_start; + + // Apply affinities for composite comparison + let aff: String = ix + .columns + .iter() + .map(|ic| { + let (_, col) = child_tbl + .get_column(&ic.name) + .expect("indexed child column not found"); + col.affinity().aff_mask() + }) + .collect(); + if let Some(count) = NonZeroUsize::new(n_cols) { + program.emit_insn(Insn::Affinity { + start_reg: probe_start, + count, + affinities: aff, + }); + } + + let found = program.allocate_label(); + program.emit_insn(Insn::Found { + cursor_id: icur, + target_pc: found, + record_reg: probe_start, + num_regs: n_cols, + }); + + // Not found => no increment + program.emit_insn(Insn::Close { cursor_id: icur }); + let skip = program.allocate_label(); + program.emit_insn(Insn::Goto { target_pc: skip }); + + // Found => increment + program.preassign_label_to_next_insn(found); + program.emit_insn(Insn::Close { cursor_id: icur }); + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: false, + }); + program.preassign_label_to_next_insn(skip); + } else { + // Table-scan fallback with per-column checks (jump-if-NULL semantics) + let ccur = program.alloc_cursor_id(CursorType::BTreeTable(child_tbl.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: ccur, + root_page: child_tbl.root_page, + db: 0, + }); + + let done = program.allocate_label(); + program.emit_insn(Insn::Rewind { + cursor_id: ccur, + pc_if_empty: done, + }); + + let loop_top = program.allocate_label(); + let next_row = program.allocate_label(); + program.preassign_label_to_next_insn(loop_top); + + for (i, child_name) in child_cols.iter().enumerate() { + let (pos, _) = child_tbl.get_column(child_name).ok_or_else(|| { + crate::LimboError::InternalError(format!("child col {child_name} missing")) + })?; + let tmp = program.alloc_register(); + program.emit_insn(Insn::Column { + cursor_id: ccur, + column: pos, + dest: tmp, + default: None, + }); + + // Treat NULL as non-match: jump away immediately + program.emit_insn(Insn::IsNull { + reg: tmp, + target_pc: next_row, + }); + + // Eq(tmp, old_pk[i]) with Binary collation, jump-if-NULL enabled + let cont = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: tmp, + rhs: old_pk_start + i, + target_pc: cont, + flags: CmpInsFlags::default().jump_if_null(), + collation: Some(super::collate::CollationSeq::Binary), + }); + program.emit_insn(Insn::Goto { + target_pc: next_row, + }); + program.preassign_label_to_next_insn(cont); + } + + // All columns matched OLD -> increment + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: false, + }); + + program.preassign_label_to_next_insn(next_row); + program.emit_insn(Insn::Next { + cursor_id: ccur, + pc_if_next: loop_top, + }); + program.preassign_label_to_next_insn(done); + program.emit_insn(Insn::Close { cursor_id: ccur }); + } + } + + // PASS 2: count children of NEW key + for fk_ref in incoming.iter() { + let child_tbl = &fk_ref.child_table; + let child_cols = &fk_ref.fk.child_columns; + + let idx = resolver.schema.get_indices(&child_tbl.name).find(|ix| { + ix.columns.len() == child_cols.len() + && ix + .columns + .iter() + .zip(child_cols.iter()) + .all(|(ic, cc)| ic.name.eq_ignore_ascii_case(cc)) + }); + + if let Some(ix) = idx { + let icur = program.alloc_cursor_id(CursorType::BTreeIndex(ix.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: icur, + root_page: ix.root_page, + db: 0, + }); + + // Build probe from NEW PK registers; apply affinities + let probe_start = new_pk_start; + let aff: String = ix + .columns + .iter() + .map(|ic| { + let (_, col) = child_tbl + .get_column(&ic.name) + .expect("indexed child column not found"); + col.affinity().aff_mask() + }) + .collect(); + if let Some(count) = NonZeroUsize::new(n_cols) { + program.emit_insn(Insn::Affinity { + start_reg: probe_start, + count, + affinities: aff, + }); + } + + let found = program.allocate_label(); + program.emit_insn(Insn::Found { + cursor_id: icur, + target_pc: found, + record_reg: probe_start, + num_regs: n_cols, + }); + + // Not found => no decrement + program.emit_insn(Insn::Close { cursor_id: icur }); + let skip = program.allocate_label(); + program.emit_insn(Insn::Goto { target_pc: skip }); + + // Found => decrement + program.preassign_label_to_next_insn(found); + program.emit_insn(Insn::Close { cursor_id: icur }); + program.emit_insn(Insn::FkCounter { + increment_value: -1, + check_abort: false, + is_scope: false, + }); + program.preassign_label_to_next_insn(skip); + } else { + // Table-scan fallback on NEW key + let ccur = program.alloc_cursor_id(CursorType::BTreeTable(child_tbl.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: ccur, + root_page: child_tbl.root_page, + db: 0, + }); + + let done = program.allocate_label(); + program.emit_insn(Insn::Rewind { + cursor_id: ccur, + pc_if_empty: done, + }); + + let loop_top = program.allocate_label(); + let next_row = program.allocate_label(); + program.preassign_label_to_next_insn(loop_top); + + for (i, child_name) in child_cols.iter().enumerate() { + let (pos, _) = child_tbl.get_column(child_name).ok_or_else(|| { + crate::LimboError::InternalError(format!("child col {child_name} missing")) + })?; + let tmp = program.alloc_register(); + program.emit_insn(Insn::Column { + cursor_id: ccur, + column: pos, + dest: tmp, + default: None, + }); + + program.emit_insn(Insn::IsNull { + reg: tmp, + target_pc: next_row, + }); + + let cont = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: tmp, + rhs: new_pk_start + i, + target_pc: cont, + flags: CmpInsFlags::default().jump_if_null(), + collation: Some(super::collate::CollationSeq::Binary), + }); + program.emit_insn(Insn::Goto { + target_pc: next_row, + }); + program.preassign_label_to_next_insn(cont); + } + + // All columns matched NEW: decrement + program.emit_insn(Insn::FkCounter { + increment_value: -1, + check_abort: false, + is_scope: false, + }); + + program.preassign_label_to_next_insn(next_row); + program.emit_insn(Insn::Next { + cursor_id: ccur, + pc_if_next: loop_top, + }); + program.preassign_label_to_next_insn(done); + program.emit_insn(Insn::Close { cursor_id: ccur }); + } + } + Ok(()) +} + +/// Emit checks that prevent updating/deleting a parent row that is still referenced by a child. +/// +/// If the global deferred-FK counter is zero, we skip all checks (fast path for no outstanding refs). +/// For each incoming FK: +/// Build the parent key (in FK parent-column order) from the current row. +/// Probe the child table for any row whose FK columns equal that key. +/// - If an exact child index exists on the FK columns, use `NotFound` against that index. +/// - Otherwise, scan the child table and compare each FK column (NULL short-circuits to “no match”). +/// If a referencing child is found: +/// - Deferred FK: increment counter (violation will be raised at COMMIT). +/// - Immediate FK: raise `SQLITE_CONSTRAINT_FOREIGNKEY` now. +pub fn emit_fk_parent_existence_checks( + program: &mut ProgramBuilder, + resolver: &Resolver, + parent_table_name: &str, + parent_cursor_id: usize, + parent_rowid_reg: usize, +) -> Result<()> { + let after_all = program.allocate_label(); + program.emit_insn(Insn::FkIfZero { + target_pc: after_all, + if_zero: true, + }); + + let parent_bt = resolver + .schema + .get_btree_table(parent_table_name) + .ok_or_else(|| crate::LimboError::InternalError("parent not btree".into()))?; + + for fk_ref in resolver.schema.incoming_fks_to(parent_table_name) { + // Resolve parent key columns + let parent_cols: Vec = if fk_ref.fk.parent_columns.is_empty() { + parent_bt + .primary_key_columns + .iter() + .map(|(n, _)| n.clone()) + .collect() + } else { + fk_ref.fk.parent_columns.clone() + }; + + // Load parent key values for THIS row into regs, in parent_cols order + let parent_cols_len = parent_cols.len(); + let parent_key_start = program.alloc_registers(parent_cols_len); + for (i, pcol) in parent_cols.iter().enumerate() { + let src = if pcol.eq_ignore_ascii_case("rowid") { + parent_rowid_reg + } else { + let (pos, col) = parent_bt + .get_column(&normalize_ident(pcol)) + .ok_or_else(|| { + crate::LimboError::InternalError(format!("col {pcol} missing")) + })?; + if col.is_rowid_alias { + parent_rowid_reg + } else { + // read current cell's column value + program.emit_insn(Insn::Column { + cursor_id: parent_cursor_id, + column: pos, + dest: parent_key_start + i, + default: None, + }); + continue; + } + }; + program.emit_insn(Insn::Copy { + src_reg: src, + dst_reg: parent_key_start + i, + extra_amount: 0, + }); + } + + // Build child-side probe key in child_columns order, from parent_key_start + // + // Map parent_col to child_col position 1:1 + let child_cols = &fk_ref.fk.child_columns; + // Try to find an index on child(child_cols...) to do an existance check + let child_idx = resolver + .schema + .get_indices(&fk_ref.child_table.name) + .find(|idx| { + idx.columns.len() == child_cols.len() + && idx + .columns + .iter() + .zip(child_cols.iter()) + .all(|(ic, cc)| ic.name.eq_ignore_ascii_case(cc)) + }); + + if let Some(idx) = child_idx { + // Index existence probe: Found -> violation + let icur = program.alloc_cursor_id(CursorType::BTreeIndex(idx.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: icur, + root_page: idx.root_page, + db: 0, + }); + + // Pack the child key regs from the parent key regs in fk order. + // Same order because we matched columns 1:1 above + let probe_start = program.alloc_registers(parent_cols_len); + for i in 0..parent_cols_len { + program.emit_insn(Insn::Copy { + src_reg: parent_key_start + i, + dst_reg: probe_start + i, + extra_amount: 0, + }); + } + + let ok = program.allocate_label(); + program.emit_insn(Insn::NotFound { + cursor_id: icur, + target_pc: ok, + record_reg: probe_start, + num_regs: parent_cols_len, + }); + + // found referencing child row = violation path + program.emit_insn(Insn::Close { cursor_id: icur }); + if fk_ref.fk.deferred { + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: false, + }); + } else { + program.emit_insn(Insn::Halt { + err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, + description: "FOREIGN KEY constraint failed".to_string(), + }); + } + program.preassign_label_to_next_insn(ok); + program.emit_insn(Insn::Close { cursor_id: icur }); + } else { + // Fallback: table-scan the child table + let ccur = program.alloc_cursor_id(CursorType::BTreeTable(fk_ref.child_table.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: ccur, + root_page: fk_ref.child_table.root_page, + db: 0, + }); + + let done = program.allocate_label(); + program.emit_insn(Insn::Rewind { + cursor_id: ccur, + pc_if_empty: done, + }); + + // Loop labels local to this scan + let loop_top = program.allocate_label(); + let next_row = program.allocate_label(); + + program.preassign_label_to_next_insn(loop_top); + + // For each FK column: require a match, if NULL or mismatch -> next_row + for (i, child_col) in child_cols.iter().enumerate() { + let (pos, _) = fk_ref + .child_table + .get_column(&normalize_ident(child_col)) + .ok_or_else(|| { + crate::LimboError::InternalError(format!("child col {child_col} missing")) + })?; + + let tmp = program.alloc_register(); + program.emit_insn(Insn::Column { + cursor_id: ccur, + column: pos, + dest: tmp, + default: None, + }); + + // NULL FK value => this child row cannot reference the parent, skip row + program.emit_insn(Insn::IsNull { + reg: tmp, + target_pc: next_row, + }); + + // Equal? continue to check next column; else jump to next_row + let cont_i = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: tmp, + rhs: parent_key_start + i, + target_pc: cont_i, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + // Not equal -> skip this child row + program.emit_insn(Insn::Goto { + target_pc: next_row, + }); + + // Equal path resumes here, then we check the next column + program.preassign_label_to_next_insn(cont_i); + } + + // If we reached here, all FK columns matched, violation + if fk_ref.fk.deferred { + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: false, + }); + } else { + program.emit_insn(Insn::Halt { + err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, + description: "FOREIGN KEY constraint failed".to_string(), + }); + } + + // Advance to next child row and loop + program.preassign_label_to_next_insn(next_row); + program.emit_insn(Insn::Next { + cursor_id: ccur, + pc_if_next: loop_top, + }); + + program.preassign_label_to_next_insn(done); + program.emit_insn(Insn::Close { cursor_id: ccur }); + } + } + program.resolve_label(after_all, program.offset()); + Ok(()) +} + #[instrument(skip_all, level = Level::DEBUG)] fn emit_program_for_update( connection: &Arc, @@ -736,6 +1287,25 @@ fn emit_program_for_update( program.decr_nesting(); } + let fk_enabled = connection.foreign_keys_enabled(); + let table_name = plan + .table_references + .joined_tables() + .first() + .unwrap() + .table + .get_name(); + let has_child_fks = fk_enabled && !resolver.schema.get_fks_for_table(table_name).is_empty(); + let has_parent_fks = fk_enabled && resolver.schema.any_incoming_fk_to(table_name); + // statement-level FK scope open + if has_child_fks || has_parent_fks { + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: true, + }); + } + // Initialize the main loop init_loop( program, @@ -803,6 +1373,13 @@ fn emit_program_for_update( program.preassign_label_to_next_insn(after_main_loop_label); + if has_child_fks || has_parent_fks { + program.emit_insn(Insn::FkCounter { + increment_value: -1, + check_abort: true, + is_scope: true, + }); + } after(program); program.result_columns = plan.returning.unwrap_or_default(); @@ -1067,6 +1644,234 @@ fn emit_update_insns( } } + if connection.foreign_keys_enabled() { + let rowid_new_reg = rowid_set_clause_reg.unwrap_or(beg); + if let Some(table_btree) = unsafe { &*table_ref }.btree() { + //first, stablize the image of the NEW row in the registers + if !table_btree.primary_key_columns.is_empty() { + let set_cols: std::collections::HashSet = plan + .set_clauses + .iter() + .filter_map(|(i, _)| if *i == ROWID_SENTINEL { None } else { Some(*i) }) + .collect(); + for (pk_name, _) in &table_btree.primary_key_columns { + let (pos, col) = table_btree.get_column(pk_name).unwrap(); + if !set_cols.contains(&pos) { + if col.is_rowid_alias { + program.emit_insn(Insn::Copy { + src_reg: rowid_new_reg, + dst_reg: start + pos, + extra_amount: 0, + }); + } else { + program.emit_insn(Insn::Column { + cursor_id, + column: pos, + dest: start + pos, + default: None, + }); + } + } + } + } + if t_ctx.resolver.schema.has_child_fks(table_name) { + // Child-side checks: + // this ensures updated row still satisfies child FKs that point OUT from this table + emit_fk_child_existence_checks( + program, + &t_ctx.resolver, + &table_btree, + table_name, + start, + rowid_new_reg, + &plan + .set_clauses + .iter() + .map(|(i, _)| *i) + .collect::>(), + )?; + } + // Parent-side checks: + // We only need to do work if the referenced key (the parent key) might change. + // we detect that by comparing OLD vs NEW primary key representation + // then run parent FK checks only when it actually changes. + if t_ctx.resolver.schema.any_incoming_fk_to(table_name) { + let updated_parent_positions: HashSet = + plan.set_clauses.iter().map(|(i, _)| *i).collect(); + + // If no incoming FK’s parent key can be affected by these updates, skip the whole parent-FK block. + let incoming = t_ctx.resolver.schema.incoming_fks_to(table_name); + let parent_tbl = &table_btree; + let maybe_affects_parent_key = incoming + .iter() + .any(|r| r.parent_key_may_change(&updated_parent_positions, parent_tbl)); + if maybe_affects_parent_key { + let pk_len = table_btree.primary_key_columns.len(); + match pk_len { + 0 => { + // Rowid table: the implicit PK is rowid. + // If rowid is unchanged then we skip, else check that no child row still references the OLD key. + let skip_parent_fk = program.allocate_label(); + let old_rowid_reg = beg; + let new_rowid_reg = rowid_set_clause_reg.unwrap_or(beg); + + program.emit_insn(Insn::Eq { + lhs: new_rowid_reg, + rhs: old_rowid_reg, + target_pc: skip_parent_fk, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + // Rowid changed: check incoming FKs (children) that reference this parent row + emit_fk_parent_existence_checks( + program, + &t_ctx.resolver, + table_name, + cursor_id, + old_rowid_reg, + )?; + program.preassign_label_to_next_insn(skip_parent_fk); + } + 1 => { + // Single-column declared PK, may be a rowid alias or a real column. + // If PK value unchanged then skip, else verify no child still references OLD key. + let (pk_name, _) = &table_btree.primary_key_columns[0]; + let (pos, col) = table_btree.get_column(pk_name).unwrap(); + + let old_reg = program.alloc_register(); + if col.is_rowid_alias { + program.emit_insn(Insn::RowId { + cursor_id, + dest: old_reg, + }); + } else { + program.emit_insn(Insn::Column { + cursor_id, + column: pos, + dest: old_reg, + default: None, + }); + } + let new_reg = if col.is_rowid_alias { + rowid_new_reg + } else { + start + pos + }; + + let skip_parent_fk = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: old_reg, + rhs: new_reg, + target_pc: skip_parent_fk, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + emit_fk_parent_existence_checks( + program, + &t_ctx.resolver, + table_name, + cursor_id, + beg, + )?; + program.preassign_label_to_next_insn(skip_parent_fk); + } + _ => { + // Composite PK: + // 1. Materialize OLD PK vector from current row. + // 2. Materialize NEW PK vector from updated registers. + // 3. If any component differs, the PK changes -> run composite parent-FK update flow. + let old_pk_start = program.alloc_registers(pk_len); + for (i, (pk_name, _)) in + table_btree.primary_key_columns.iter().enumerate() + { + let (pos, col) = table_btree.get_column(pk_name).unwrap(); + if col.is_rowid_alias { + program.emit_insn(Insn::Copy { + src_reg: beg, + dst_reg: old_pk_start + i, + extra_amount: 0, + }); + } else { + program.emit_insn(Insn::Column { + cursor_id, + column: pos, + dest: old_pk_start + i, + default: None, + }); + } + } + + // Build NEW PK values from the updated registers + let new_pk_start = program.alloc_registers(pk_len); + for (i, (pk_name, _)) in + table_btree.primary_key_columns.iter().enumerate() + { + let (pos, col) = table_btree.get_column(pk_name).unwrap(); + let src = if col.is_rowid_alias { + rowid_new_reg + } else { + start + pos // Updated value from SET clause + }; + program.emit_insn(Insn::Copy { + src_reg: src, + dst_reg: new_pk_start + i, + extra_amount: 0, + }); + } + + // Compare OLD vs NEW to see if PK is changing + let skip_parent_fk = program.allocate_label(); + let pk_changed = program.allocate_label(); + + for i in 0..pk_len { + if i == pk_len - 1 { + // Last comparison, if equal, all are equal + program.emit_insn(Insn::Eq { + lhs: old_pk_start + i, + rhs: new_pk_start + i, + target_pc: skip_parent_fk, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + // Not equal - PK is changing + program.emit_insn(Insn::Goto { + target_pc: pk_changed, + }); + } else { + // Not last comparison + let next_check = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: old_pk_start + i, + rhs: new_pk_start + i, + target_pc: next_check, // Equal, check next component + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + // Not equal - PK is changing + program.emit_insn(Insn::Goto { + target_pc: pk_changed, + }); + program.preassign_label_to_next_insn(next_check); + } + } + program.preassign_label_to_next_insn(pk_changed); + // PK changed: maintain the deferred FK counter in two passes + emit_fk_parent_pk_change_counters( + program, + &incoming, + &t_ctx.resolver, + old_pk_start, + new_pk_start, + pk_len, + )?; + program.preassign_label_to_next_insn(skip_parent_fk); + } + } + } + } + } + } + for (index, (idx_cursor_id, record_reg)) in plan.indexes_to_update.iter().zip(&index_cursors) { // We need to know whether or not the OLD values satisfied the predicate on the // partial index, so we can know whether or not to delete the old index entry, @@ -1518,6 +2323,152 @@ fn emit_update_insns( Ok(()) } +pub fn emit_fk_child_existence_checks( + program: &mut ProgramBuilder, + resolver: &Resolver, + table: &BTreeTable, + table_name: &str, + start_reg: usize, + rowid_reg: usize, + updated_cols: &HashSet, +) -> Result<()> { + let after_all = program.allocate_label(); + program.emit_insn(Insn::FkIfZero { + target_pc: after_all, + if_zero: true, + }); + + for fk_ref in resolver.schema.outgoing_fks_of(table_name) { + // Skip when the child key is untouched (including rowid-alias special case) + if !fk_ref.child_key_changed(updated_cols, table) { + continue; + } + + let fk_ok = program.allocate_label(); + + // look for NULLs in any child FK column + for child_name in &fk_ref.child_cols { + let (i, col) = table.get_column(child_name).unwrap(); + let src = if col.is_rowid_alias { + rowid_reg + } else { + start_reg + i + }; + program.emit_insn(Insn::IsNull { + reg: src, + target_pc: fk_ok, + }); + } + + if fk_ref.parent_uses_rowid { + // Fast rowid probe on the parent table + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("Parent must be btree"); + + let pcur = program.alloc_cursor_id(CursorType::BTreeTable(parent_tbl.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: pcur, + root_page: parent_tbl.root_page, + db: 0, + }); + + let (i_child, col_child) = table.get_column(&fk_ref.child_cols[0]).unwrap(); + let val_reg = if col_child.is_rowid_alias { + rowid_reg + } else { + start_reg + i_child + }; + + let violation = program.allocate_label(); + program.emit_insn(Insn::NotExists { + cursor: pcur, + rowid_reg: val_reg, + target_pc: violation, + }); + program.emit_insn(Insn::Close { cursor_id: pcur }); + program.emit_insn(Insn::Goto { target_pc: fk_ok }); + + program.preassign_label_to_next_insn(violation); + program.emit_insn(Insn::Close { cursor_id: pcur }); + if fk_ref.fk.deferred { + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: false, + }); + } else { + program.emit_insn(Insn::Halt { + err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, + description: "FOREIGN KEY constraint failed".to_string(), + }); + } + } else { + // Unique-index probe on the parent (already resolved) + let parent_idx = fk_ref + .parent_unique_index + .as_ref() + .expect("parent unique index required"); + let icur = program.alloc_cursor_id(CursorType::BTreeIndex(parent_idx.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: icur, + root_page: parent_idx.root_page, + db: 0, + }); + + // Build probe key from NEW child values in fk order + let n = fk_ref.child_cols.len(); + let probe_start = program.alloc_registers(n); + for (k, child_name) in fk_ref.child_cols.iter().enumerate() { + let (i, col) = table.get_column(child_name).unwrap(); + program.emit_insn(Insn::Copy { + src_reg: if col.is_rowid_alias { + rowid_reg + } else { + start_reg + i + }, + dst_reg: probe_start + k, + extra_amount: 0, + }); + } + + let found = program.allocate_label(); + program.emit_insn(Insn::Found { + cursor_id: icur, + target_pc: found, + record_reg: probe_start, + num_regs: n, + }); + + // Not found => violation + program.emit_insn(Insn::Close { cursor_id: icur }); + if fk_ref.fk.deferred { + program.emit_insn(Insn::FkCounter { + increment_value: 1, + check_abort: false, + is_scope: false, + }); + } else { + program.emit_insn(Insn::Halt { + err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, + description: "FOREIGN KEY constraint failed".to_string(), + }); + } + program.emit_insn(Insn::Goto { target_pc: fk_ok }); + + // Found => OK + program.preassign_label_to_next_insn(found); + program.emit_insn(Insn::Close { cursor_id: icur }); + } + + program.preassign_label_to_next_insn(fk_ok); + } + + program.resolve_label(after_all, program.offset()); + Ok(()) +} + pub fn prepare_cdc_if_necessary( program: &mut ProgramBuilder, schema: &Schema, diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 8f4b8158f..01d1355a1 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -87,7 +87,7 @@ pub fn translate_insert( let has_child_fks = fk_enabled && !resolver .schema - .get_foreign_keys_for_table(table_name.as_str()) + .get_fks_for_table(table_name.as_str()) .is_empty(); let has_parent_fks = fk_enabled && resolver.schema.any_incoming_fk_to(table_name.as_str()); @@ -1146,7 +1146,6 @@ pub fn translate_insert( &mut result_columns, cdc_table.as_ref().map(|c| c.0), row_done_label, - connection, )?; } else { // UpsertDo::Nothing case @@ -1910,7 +1909,7 @@ fn emit_fk_checks_for_insert( }); // Iterate child FKs declared on this table - for fk in resolver.schema.get_foreign_keys_for_table(table_name) { + for fk in resolver.schema.get_fks_for_table(table_name) { let fk_ok = program.allocate_label(); // If any child column is NULL, skip this FK diff --git a/core/translate/pragma.rs b/core/translate/pragma.rs index 57a658212..0a527a68c 100644 --- a/core/translate/pragma.rs +++ b/core/translate/pragma.rs @@ -4,7 +4,7 @@ use chrono::Datelike; use std::sync::Arc; use turso_macros::match_ignore_ascii_case; -use turso_parser::ast::{self, ColumnDefinition, Expr, Literal, Name}; +use turso_parser::ast::{self, ColumnDefinition, Expr, Literal}; use turso_parser::ast::{PragmaName, QualifiedName}; use super::integrity_check::translate_integrity_check; @@ -388,10 +388,10 @@ fn update_pragma( Ok((program, TransactionMode::None)) } PragmaName::ForeignKeys => { - let enabled = match &value { - Expr::Id(name) | Expr::Name(name) => { - let name_str = name.as_str().as_bytes(); - match_ignore_ascii_case!(match name_str { + let enabled = match value { + Expr::Name(name) | Expr::Id(name) => { + let name_bytes = name.as_str().as_bytes(); + match_ignore_ascii_case!(match name_bytes { b"ON" | b"TRUE" | b"YES" | b"1" => true, _ => false, }) diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index ffcff23e5..f9bfd5af9 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -5,10 +5,15 @@ use std::{collections::HashMap, sync::Arc}; use turso_parser::ast::{self, Upsert}; use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY; +use crate::translate::emitter::{ + emit_fk_child_existence_checks, emit_fk_parent_existence_checks, + emit_fk_parent_pk_change_counters, +}; use crate::translate::expr::{walk_expr, WalkControl}; use crate::translate::insert::format_unique_violation_desc; use crate::translate::planner::ROWID_STRS; use crate::vdbe::insn::CmpInsFlags; +use crate::Connection; use crate::{ bail_parse_error, error::SQLITE_CONSTRAINT_NOTNULL, @@ -346,6 +351,7 @@ pub fn emit_upsert( returning: &mut [ResultSetColumn], cdc_cursor_id: Option, row_done_label: BranchOffset, + connection: &Arc, ) -> crate::Result<()> { // Seek & snapshot CURRENT program.emit_insn(Insn::SeekRowid { @@ -464,10 +470,179 @@ pub fn emit_upsert( } } + let (changed_cols, rowid_changed) = collect_changed_cols(table, set_pairs); + + if let Some(bt) = table.btree() { + if connection.foreign_keys_enabled() { + let rowid_new_reg = new_rowid_reg.unwrap_or(conflict_rowid_reg); + + // Child-side checks + if resolver.schema.has_child_fks(bt.name.as_str()) { + emit_fk_child_existence_checks( + program, + resolver, + &bt, + table.get_name(), + new_start, + rowid_new_reg, + &changed_cols, + )?; + } + + // Parent-side checks only if any incoming FK could care + if resolver.schema.any_incoming_fk_to(table.get_name()) { + // if parent key can't change, skip + let updated_parent_positions: HashSet = + set_pairs.iter().map(|(i, _)| *i).collect(); + let incoming = resolver.schema.incoming_fks_to(table.get_name()); + let parent_key_may_change = incoming + .iter() + .any(|r| r.parent_key_may_change(&updated_parent_positions, &bt)); + + if parent_key_may_change { + let skip_parent_fk = program.allocate_label(); + let pk_len = bt.primary_key_columns.len(); + + match pk_len { + 0 => { + // implicit rowid + program.emit_insn(Insn::Eq { + lhs: rowid_new_reg, + rhs: conflict_rowid_reg, + target_pc: skip_parent_fk, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + emit_fk_parent_existence_checks( + program, + resolver, + table.get_name(), + tbl_cursor_id, + conflict_rowid_reg, + )?; + program.preassign_label_to_next_insn(skip_parent_fk); + } + 1 => { + // single-col declared PK + let (pk_name, _) = &bt.primary_key_columns[0]; + let (pos, col) = bt.get_column(pk_name).unwrap(); + + let old_reg = program.alloc_register(); + if col.is_rowid_alias { + program.emit_insn(Insn::RowId { + cursor_id: tbl_cursor_id, + dest: old_reg, + }); + } else { + program.emit_insn(Insn::Column { + cursor_id: tbl_cursor_id, + column: pos, + dest: old_reg, + default: None, + }); + } + let new_reg = new_start + pos; + + let skip = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: old_reg, + rhs: new_reg, + target_pc: skip, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + emit_fk_parent_existence_checks( + program, + resolver, + table.get_name(), + tbl_cursor_id, + conflict_rowid_reg, + )?; + program.preassign_label_to_next_insn(skip); + } + _ => { + // composite PK: build OLD/NEW vectors and do the 2-pass counter logic + let old_pk_start = program.alloc_registers(pk_len); + for (i, (pk_name, _)) in bt.primary_key_columns.iter().enumerate() { + let (pos, col) = bt.get_column(pk_name).unwrap(); + if col.is_rowid_alias { + // old rowid (UPSERT target) == conflict_rowid_reg + program.emit_insn(Insn::Copy { + src_reg: conflict_rowid_reg, + dst_reg: old_pk_start + i, + extra_amount: 0, + }); + } else { + program.emit_insn(Insn::Column { + cursor_id: tbl_cursor_id, + column: pos, + dest: old_pk_start + i, + default: None, + }); + } + } + + let new_pk_start = program.alloc_registers(pk_len); + for (i, (pk_name, _)) in bt.primary_key_columns.iter().enumerate() { + let (pos, col) = bt.get_column(pk_name).unwrap(); + let src = if col.is_rowid_alias { + rowid_new_reg + } else { + new_start + pos + }; + program.emit_insn(Insn::Copy { + src_reg: src, + dst_reg: new_pk_start + i, + extra_amount: 0, + }); + } + + // Compare OLD vs NEW, if all equal then skip + let skip = program.allocate_label(); + let changed = program.allocate_label(); + for i in 0..pk_len { + if i == pk_len - 1 { + program.emit_insn(Insn::Eq { + lhs: old_pk_start + i, + rhs: new_pk_start + i, + target_pc: skip, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + program.emit_insn(Insn::Goto { target_pc: changed }); + } else { + let next = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: old_pk_start + i, + rhs: new_pk_start + i, + target_pc: next, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); + program.emit_insn(Insn::Goto { target_pc: changed }); + program.preassign_label_to_next_insn(next); + } + } + + program.preassign_label_to_next_insn(changed); + emit_fk_parent_pk_change_counters( + program, + &incoming, + resolver, + old_pk_start, + new_pk_start, + pk_len, + )?; + program.preassign_label_to_next_insn(skip); + } + } + } + } + } + } + // Index rebuild (DELETE old, INSERT new), honoring partial-index WHEREs if let Some(before) = before_start { - let (changed_cols, rowid_changed) = collect_changed_cols(table, set_pairs); - for (idx_name, _root, idx_cid) in idx_cursors { let idx_meta = resolver .schema diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index eb3f8a1af..42b7660f4 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -3,12 +3,11 @@ pub mod grammar_generator; #[cfg(test)] mod tests { use rand::seq::{IndexedRandom, SliceRandom}; - use std::collections::HashSet; - use turso_core::DatabaseOpts; - use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; use rusqlite::{params, types::Value}; + use std::{collections::HashSet, io::Write}; + use turso_core::DatabaseOpts; use crate::{ common::{ @@ -646,6 +645,422 @@ mod tests { "Different results! limbo: {:?}, sqlite: {:?}, seed: {}, query: {}, table def: {}", limbo_rows, sqlite_rows, seed, query, table_defs[i] ); + + } + } + } + } + + pub fn fk_single_pk_mutation_fuzz() { + let _ = env_logger::try_init(); + let (mut rng, seed) = rng_from_time(); + println!("fk_single_pk_mutation_fuzz seed: {seed}"); + + const OUTER_ITERS: usize = 50; + const INNER_ITERS: usize = 200; + + for outer in 0..OUTER_ITERS { + println!("fk_single_pk_mutation_fuzz {}/{}", outer + 1, OUTER_ITERS); + + let limbo_db = TempDatabase::new_empty(true); + let sqlite_db = TempDatabase::new_empty(true); + let limbo = limbo_db.connect_limbo(); + let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap(); + + // Statement log for this iteration + let mut stmts: Vec = Vec::new(); + let mut log_and_exec = |sql: &str| { + stmts.push(sql.to_string()); + sql.to_string() + }; + + // Enable FKs in both engines + let s = log_and_exec("PRAGMA foreign_keys=ON"); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // DDL + let s = log_and_exec("CREATE TABLE p(id INTEGER PRIMARY KEY, a INT, b INT)"); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + let s = log_and_exec( + "CREATE TABLE c(id INTEGER PRIMARY KEY, x INT, y INT, FOREIGN KEY(x) REFERENCES p(id))", + ); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // Seed parent + let n_par = rng.random_range(5..=40); + let mut used_ids = std::collections::HashSet::new(); + for _ in 0..n_par { + let mut id; + loop { + id = rng.random_range(1..=200) as i64; + if used_ids.insert(id) { + break; + } + } + let a = rng.random_range(-5..=25); + let b = rng.random_range(-5..=25); + let stmt = log_and_exec(&format!("INSERT INTO p VALUES ({id}, {a}, {b})")); + limbo_exec_rows(&limbo_db, &limbo, &stmt); + sqlite.execute(&stmt, params![]).unwrap(); + } + + // Seed child + let n_child = rng.random_range(5..=80); + for i in 0..n_child { + let id = 1000 + i as i64; + let x = if rng.random_bool(0.8) { + *used_ids.iter().choose(&mut rng).unwrap() + } else { + rng.random_range(1..=220) as i64 + }; + let y = rng.random_range(-10..=10); + let stmt = log_and_exec(&format!("INSERT INTO c VALUES ({id}, {x}, {y})")); + match ( + sqlite.execute(&stmt, params![]), + limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt), + ) { + (Ok(_), Ok(_)) => {} + (Err(_), Err(_)) => {} + (x, y) => { + eprintln!("\n=== FK fuzz failure (seeding mismatch) ==="); + eprintln!("seed: {seed}, outer: {}", outer + 1); + eprintln!("sqlite: {x:?}, limbo: {y:?}"); + eprintln!("last stmt: {stmt}"); + eprintln!("--- replay statements ({}) ---", stmts.len()); + for (i, s) in stmts.iter().enumerate() { + eprintln!("{:04}: {};", i + 1, s); + } + panic!("Seeding child insert mismatch"); + } + } + } + + // Mutations + for _ in 0..INNER_ITERS { + let action = rng.random_range(0..6); + let stmt = match action { + // Parent INSERT + 0 => { + let mut id; + let mut tries = 0; + loop { + id = rng.random_range(1..=250) as i64; + if !used_ids.contains(&id) || tries > 10 { + break; + } + tries += 1; + } + let a = rng.random_range(-5..=25); + let b = rng.random_range(-5..=25); + format!("INSERT INTO p VALUES({id}, {a}, {b})") + } + // Parent UPDATE + 1 => { + if rng.random_bool(0.5) { + let old = rng.random_range(1..=250); + let new_id = rng.random_range(1..=260); + format!("UPDATE p SET id={new_id} WHERE id={old}") + } else { + let a = rng.random_range(-5..=25); + let b = rng.random_range(-5..=25); + let tgt = rng.random_range(1..=260); + format!("UPDATE p SET a={a}, b={b} WHERE id={tgt}") + } + } + // Parent DELETE + 2 => { + let del_id = rng.random_range(1..=260); + format!("DELETE FROM p WHERE id={del_id}") + } + // Child INSERT + 3 => { + let id = rng.random_range(1000..=2000); + let x = if rng.random_bool(0.7) { + if let Some(p) = used_ids.iter().choose(&mut rng) { + *p + } else { + rng.random_range(1..=260) as i64 + } + } else { + rng.random_range(1..=260) as i64 + }; + let y = rng.random_range(-10..=10); + format!("INSERT INTO c VALUES({id}, {x}, {y})") + } + // Child UPDATE + 4 => { + let pick = rng.random_range(1000..=2000); + if rng.random_bool(0.6) { + let new_x = if rng.random_bool(0.7) { + if let Some(p) = used_ids.iter().choose(&mut rng) { + *p + } else { + rng.random_range(1..=260) as i64 + } + } else { + rng.random_range(1..=260) as i64 + }; + format!("UPDATE c SET x={new_x} WHERE id={pick}") + } else { + let new_y = rng.random_range(-10..=10); + format!("UPDATE c SET y={new_y} WHERE id={pick}") + } + } + // Child DELETE + _ => { + let pick = rng.random_range(1000..=2000); + format!("DELETE FROM c WHERE id={pick}") + } + }; + + let stmt = log_and_exec(&stmt); + + let sres = sqlite.execute(&stmt, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + + match (sres, lres) { + (Ok(_), Ok(_)) => { + if stmt.starts_with("INSERT INTO p VALUES(") { + if let Some(tok) = stmt.split_whitespace().nth(4) { + if let Some(idtok) = tok.split(['(', ',']).nth(1) { + if let Ok(idnum) = idtok.parse::() { + used_ids.insert(idnum); + } + } + } + } + let sp = sqlite_exec_rows(&sqlite, "SELECT id,a,b FROM p ORDER BY id"); + let sc = sqlite_exec_rows(&sqlite, "SELECT id,x,y FROM c ORDER BY id"); + let lp = + limbo_exec_rows(&limbo_db, &limbo, "SELECT id,a,b FROM p ORDER BY id"); + let lc = + limbo_exec_rows(&limbo_db, &limbo, "SELECT id,x,y FROM c ORDER BY id"); + + if sp != lp || sc != lc { + eprintln!("\n=== FK fuzz failure (state mismatch) ==="); + eprintln!("seed: {seed}, outer: {}", outer + 1); + eprintln!("last stmt: {stmt}"); + eprintln!("sqlite p: {sp:?}\nsqlite c: {sc:?}"); + eprintln!("limbo p: {lp:?}\nlimbo c: {lc:?}"); + eprintln!("--- replay statements ({}) ---", stmts.len()); + for (i, s) in stmts.iter().enumerate() { + eprintln!("{:04}: {};", i + 1, s); + } + panic!("State mismatch"); + } + } + (Err(_), Err(_)) => { /* parity OK */ } + (ok_sqlite, ok_limbo) => { + eprintln!("\n=== FK fuzz failure (outcome mismatch) ==="); + eprintln!("seed: {seed}, outer: {}", outer + 1); + eprintln!("sqlite: {ok_sqlite:?}, limbo: {ok_limbo:?}"); + eprintln!("last stmt: {stmt}"); + // dump final states to help decide who is right + let sp = sqlite_exec_rows(&sqlite, "SELECT id,a,b FROM p ORDER BY id"); + let sc = sqlite_exec_rows(&sqlite, "SELECT id,x,y FROM c ORDER BY id"); + let lp = + limbo_exec_rows(&limbo_db, &limbo, "SELECT id,a,b FROM p ORDER BY id"); + let lc = + limbo_exec_rows(&limbo_db, &limbo, "SELECT id,x,y FROM c ORDER BY id"); + eprintln!("sqlite p: {sp:?}\nsqlite c: {sc:?}"); + eprintln!("turso p: {lp:?}\nturso c: {lc:?}"); + eprintln!( + "--- writing ({}) statements to fk_fuzz_statements.sql ---", + stmts.len() + ); + let mut file = std::fs::File::create("fk_fuzz_statements.sql").unwrap(); + for s in stmts.iter() { + let _ = file.write_fmt(format_args!("{s};\n")); + } + file.flush().unwrap(); + panic!("DML outcome mismatch, statements written to tests/fk_fuzz_statements.sql"); + } + } + } + } + } + + #[test] + #[ignore] // TODO: un-ignore when UNIQUE constraints are fixed + pub fn fk_composite_pk_mutation_fuzz() { + let _ = env_logger::try_init(); + let (mut rng, seed) = rng_from_time(); + println!("fk_composite_pk_mutation_fuzz seed: {seed}"); + + const OUTER_ITERS: usize = 30; + const INNER_ITERS: usize = 200; + + for outer in 0..OUTER_ITERS { + println!( + "fk_composite_pk_mutation_fuzz {}/{}", + outer + 1, + OUTER_ITERS + ); + + let limbo_db = TempDatabase::new_empty(true); + let sqlite_db = TempDatabase::new_empty(true); + let limbo = limbo_db.connect_limbo(); + let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap(); + + let mut stmts: Vec = Vec::new(); + let mut log_and_exec = |sql: &str| { + stmts.push(sql.to_string()); + sql.to_string() + }; + + // Enable FKs in both engines + let _ = log_and_exec("PRAGMA foreign_keys=ON"); + limbo_exec_rows(&limbo_db, &limbo, "PRAGMA foreign_keys=ON"); + sqlite.execute("PRAGMA foreign_keys=ON", params![]).unwrap(); + + // Parent PK is composite (a,b). Child references (x,y) -> (a,b). + let s = log_and_exec( + "CREATE TABLE p(a INT NOT NULL, b INT NOT NULL, v INT, PRIMARY KEY(a,b))", + ); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + let s = log_and_exec( + "CREATE TABLE c(id INTEGER PRIMARY KEY, x INT, y INT, w INT, \ + FOREIGN KEY(x,y) REFERENCES p(a,b))", + ); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // Seed parent: small grid of (a,b) + let mut pairs: Vec<(i64, i64)> = Vec::new(); + for _ in 0..rng.random_range(5..=25) { + let a = rng.random_range(-3..=6); + let b = rng.random_range(-3..=6); + if !pairs.contains(&(a, b)) { + pairs.push((a, b)); + let v = rng.random_range(0..=20); + let stmt = log_and_exec(&format!("INSERT INTO p VALUES({a},{b},{v})")); + limbo_exec_rows(&limbo_db, &limbo, &stmt); + sqlite.execute(&stmt, params![]).unwrap(); + } + } + + // Seed child rows, 70% chance to reference existing (a,b) + for i in 0..rng.random_range(5..=60) { + let id = 5000 + i as i64; + let (x, y) = if rng.random_bool(0.7) { + *pairs.choose(&mut rng).unwrap_or(&(0, 0)) + } else { + (rng.random_range(-4..=7), rng.random_range(-4..=7)) + }; + let w = rng.random_range(-10..=10); + let stmt = log_and_exec(&format!("INSERT INTO c VALUES({id}, {x}, {y}, {w})")); + let _ = sqlite.execute(&stmt, params![]); + let _ = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + } + + for _ in 0..INNER_ITERS { + let op = rng.random_range(0..6); + let stmt = log_and_exec(&match op { + // INSERT parent + 0 => { + let a = rng.random_range(-4..=8); + let b = rng.random_range(-4..=8); + let v = rng.random_range(0..=20); + format!("INSERT INTO p VALUES({a},{b},{v})") + } + // UPDATE parent composite key (a,b) + 1 => { + let a_old = rng.random_range(-4..=8); + let b_old = rng.random_range(-4..=8); + let a_new = rng.random_range(-4..=8); + let b_new = rng.random_range(-4..=8); + format!("UPDATE p SET a={a_new}, b={b_new} WHERE a={a_old} AND b={b_old}") + } + // DELETE parent + 2 => { + let a = rng.random_range(-4..=8); + let b = rng.random_range(-4..=8); + format!("DELETE FROM p WHERE a={a} AND b={b}") + } + // INSERT child + 3 => { + let id = rng.random_range(5000..=7000); + let (x, y) = if rng.random_bool(0.7) { + *pairs.choose(&mut rng).unwrap_or(&(0, 0)) + } else { + (rng.random_range(-4..=8), rng.random_range(-4..=8)) + }; + let w = rng.random_range(-10..=10); + format!("INSERT INTO c VALUES({id},{x},{y},{w})") + } + // UPDATE child FK columns (x,y) + 4 => { + let id = rng.random_range(5000..=7000); + let (x, y) = if rng.random_bool(0.7) { + *pairs.choose(&mut rng).unwrap_or(&(0, 0)) + } else { + (rng.random_range(-4..=8), rng.random_range(-4..=8)) + }; + format!("UPDATE c SET x={x}, y={y} WHERE id={id}") + } + // DELETE child + _ => { + let id = rng.random_range(5000..=7000); + format!("DELETE FROM c WHERE id={id}") + } + }); + + let sres = sqlite.execute(&stmt, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + + match (sres, lres) { + (Ok(_), Ok(_)) => { + // Compare canonical states + let sp = sqlite_exec_rows(&sqlite, "SELECT a,b,v FROM p ORDER BY a,b,v"); + let sc = sqlite_exec_rows(&sqlite, "SELECT id,x,y,w FROM c ORDER BY id"); + let lp = limbo_exec_rows( + &limbo_db, + &limbo, + "SELECT a,b,v FROM p ORDER BY a,b,v", + ); + let lc = limbo_exec_rows( + &limbo_db, + &limbo, + "SELECT id,x,y,w FROM c ORDER BY id", + ); + assert_eq!(sp, lp, "seed {seed}, stmt {stmt}"); + assert_eq!(sc, lc, "seed {seed}, stmt {stmt}"); + } + (Err(_), Err(_)) => { /* both errored -> parity OK */ } + (ok_s, ok_l) => { + eprintln!( + "Mismatch sqlite={ok_s:?}, limbo={ok_l:?}, stmt={stmt}, seed={seed}" + ); + let sp = sqlite_exec_rows(&sqlite, "SELECT a,b,v FROM p ORDER BY a,b,v"); + let sc = sqlite_exec_rows(&sqlite, "SELECT id,x,y,w FROM c ORDER BY id"); + let lp = limbo_exec_rows( + &limbo_db, + &limbo, + "SELECT a,b,v FROM p ORDER BY a,b,v", + ); + let lc = limbo_exec_rows( + &limbo_db, + &limbo, + "SELECT id,x,y,w FROM c ORDER BY id", + ); + eprintln!( + "sqlite p={sp:?}\nsqlite c={sc:?}\nlimbo p={lp:?}\nlimbo c={lc:?}" + ); + let mut file = + std::fs::File::create("fk_composite_fuzz_statements.sql").unwrap(); + for s in stmts.iter() { + let _ = writeln!(&file, "{s};"); + } + file.flush().unwrap(); + panic!("DML outcome mismatch, sql file written to tests/fk_composite_fuzz_statements.sql"); + } + } } } } From 23248d900165f03f5ecac42d56246f3299596e4a Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 30 Sep 2025 19:42:03 -0400 Subject: [PATCH 074/428] Add UPSERT to fuzzing for FK constraints --- tests/integration/fuzz/mod.rs | 85 ++++++++++++++++++++++++++++++++++- 1 file changed, 83 insertions(+), 2 deletions(-) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 42b7660f4..a60d1fcc3 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -741,7 +741,7 @@ mod tests { // Mutations for _ in 0..INNER_ITERS { - let action = rng.random_range(0..6); + let action = rng.random_range(0..8); let stmt = match action { // Parent INSERT 0 => { @@ -810,6 +810,56 @@ mod tests { format!("UPDATE c SET y={new_y} WHERE id={pick}") } } + 5 => { + // UPSERT parent + let pick = rng.random_range(1..=250); + if rng.random_bool(0.5) { + let a = rng.random_range(-5..=25); + let b = rng.random_range(-5..=25); + format!( + "INSERT INTO p VALUES({pick}, {a}, {b}) \ + ON CONFLICT(id) DO UPDATE SET a=excluded.a, b=excluded.b" + ) + } else { + let a = rng.random_range(-5..=25); + let b = rng.random_range(-5..=25); + format!( + "INSERT INTO p VALUES({pick}, {a}, {b}) \ + ON CONFLICT(id) DO NOTHING" + ) + } + } + 6 => { + // UPSERT child + let pick = rng.random_range(1000..=2000); + if rng.random_bool(0.5) { + let x = if rng.random_bool(0.7) { + if let Some(p) = used_ids.iter().choose(&mut rng) { + *p + } else { + rng.random_range(1..=260) as i64 + } + } else { + rng.random_range(1..=260) as i64 + }; + format!( + "INSERT INTO c VALUES({pick}, {x}, 0) ON CONFLICT(id) DO UPDATE SET x=excluded.x" + ) + } else { + let x = if rng.random_bool(0.7) { + if let Some(p) = used_ids.iter().choose(&mut rng) { + *p + } else { + rng.random_range(1..=260) as i64 + } + } else { + rng.random_range(1..=260) as i64 + }; + format!( + "INSERT INTO c VALUES({pick}, {x}, 0) ON CONFLICT(id) DO NOTHING" + ) + } + } // Child DELETE _ => { let pick = rng.random_range(1000..=2000); @@ -960,7 +1010,7 @@ mod tests { } for _ in 0..INNER_ITERS { - let op = rng.random_range(0..6); + let op = rng.random_range(0..7); let stmt = log_and_exec(&match op { // INSERT parent 0 => { @@ -1004,6 +1054,37 @@ mod tests { }; format!("UPDATE c SET x={x}, y={y} WHERE id={id}") } + 5 => { + // UPSERT parent + if rng.random_bool(0.5) { + let a = rng.random_range(-4..=8); + let b = rng.random_range(-4..=8); + let v = rng.random_range(0..=20); + format!( + "INSERT INTO p VALUES({a},{b},{v}) ON CONFLICT(a,b) DO UPDATE SET v=excluded.v" + ) + } else { + let a = rng.random_range(-4..=8); + let b = rng.random_range(-4..=8); + format!( + "INSERT INTO p VALUES({a},{b},{}) ON CONFLICT(a,b) DO NOTHING", + rng.random_range(0..=20) + ) + } + } + 6 => { + // UPSERT child + let id = rng.random_range(5000..=7000); + let (x, y) = if rng.random_bool(0.7) { + *pairs.choose(&mut rng).unwrap_or(&(0, 0)) + } else { + (rng.random_range(-4..=8), rng.random_range(-4..=8)) + }; + format!( + "INSERT INTO c VALUES({id},{x},{y},{}) ON CONFLICT(id) DO UPDATE SET x=excluded.x, y=excluded.y", + rng.random_range(-10..=10) + ) + } // DELETE child _ => { let id = rng.random_range(5000..=7000); From fa23cedbbedf893d3c1ce20cb1d08fdbe72c5d29 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 30 Sep 2025 20:12:39 -0400 Subject: [PATCH 075/428] Add helper to pragma to parse enabled opts and fix schema parsing for foreign key constraints --- core/lib.rs | 1 + core/schema.rs | 152 +++++++++------------------------- core/translate/emitter.rs | 23 ++++-- core/translate/insert.rs | 169 +++++++++++++------------------------- core/translate/pragma.rs | 64 +++++---------- core/translate/upsert.rs | 7 +- 6 files changed, 137 insertions(+), 279 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index ee55c34ca..43e07b609 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -1100,6 +1100,7 @@ pub struct Connection { busy_timeout: RwLock, /// Whether this is an internal connection used for MVCC bootstrap is_mvcc_bootstrap_connection: AtomicBool, + /// Whether pragma foreign_keys=ON for this connection fk_pragma: AtomicBool, } diff --git a/core/schema.rs b/core/schema.rs index 6619aaaa2..29a805294 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -846,28 +846,18 @@ impl Schema { Ok(()) } - pub fn incoming_fks_to(&self, table_name: &str) -> Vec { + /// Compute all resolved FKs *referencing* `table_name` (arg: `table_name` is the parent). + /// Each item contains the child table, normalized columns/positions, and the parent lookup + /// strategy (rowid vs. UNIQUE index or PK). + pub fn resolved_fks_referencing(&self, table_name: &str) -> Vec { let target = normalize_ident(table_name); - let mut out = vec![]; + let mut out = Vec::with_capacity(4); // arbitrary estimate let parent_tbl = self .get_btree_table(&target) - .expect("incoming_fks_to: parent table must exist"); + .expect("parent table must exist"); // Precompute helper to find parent unique index, if it's not the rowid let find_parent_unique = |cols: &Vec| -> Option> { - // If matches PK exactly, we don't need a secondary index probe - let matches_pk = !parent_tbl.primary_key_columns.is_empty() - && parent_tbl.primary_key_columns.len() == cols.len() - && parent_tbl - .primary_key_columns - .iter() - .zip(cols.iter()) - .all(|((n, _ord), c)| n.eq_ignore_ascii_case(c)); - - if matches_pk { - return None; - } - self.get_indices(&parent_tbl.name) .find(|idx| { idx.unique @@ -887,16 +877,12 @@ impl Schema { }; for fk in &child.foreign_keys { - if normalize_ident(&fk.parent_table) != target { + if fk.parent_table != target { continue; } // Resolve + normalize columns - let child_cols: Vec = fk - .child_columns - .iter() - .map(|c| normalize_ident(c)) - .collect(); + let child_cols: Vec = fk.child_columns.clone(); // If no explicit parent columns were given, they were validated in add_btree_table() // to match the parent's PK. We resolve them the same way here. @@ -904,25 +890,21 @@ impl Schema { parent_tbl .primary_key_columns .iter() - .map(|(n, _)| normalize_ident(n)) + .map(|(col, _)| col) + .cloned() .collect() } else { - fk.parent_columns - .iter() - .map(|c| normalize_ident(c)) - .collect() + fk.parent_columns.clone() }; // Child positions let child_pos: Vec = child_cols .iter() .map(|cname| { - child.get_column(cname).map(|(i, _)| i).unwrap_or_else(|| { - panic!( - "incoming_fks_to: child col {}.{} missing", - child.name, cname - ) - }) + child + .get_column(cname) + .map(|(i, _)| i) + .unwrap_or_else(|| panic!("child col {}.{} missing", child.name, cname)) }) .collect(); @@ -941,10 +923,7 @@ impl Schema { } }) .unwrap_or_else(|| { - panic!( - "incoming_fks_to: parent col {}.{cname} missing", - parent_tbl.name - ) + panic!("parent col {}.{cname} missing", parent_tbl.name) }) }) .collect(); @@ -983,7 +962,8 @@ impl Schema { out } - pub fn outgoing_fks_of(&self, child_table: &str) -> Vec { + /// Compute all resolved FKs *declared by* `child_table` + pub fn resolved_fks_for_child(&self, child_table: &str) -> Vec { let child_name = normalize_ident(child_table); let Some(child) = self.get_btree_table(&child_name) else { return vec![]; @@ -992,16 +972,6 @@ impl Schema { // Helper to find the UNIQUE/index on the parent that matches the resolved parent cols let find_parent_unique = |parent_tbl: &BTreeTable, cols: &Vec| -> Option> { - let matches_pk = !parent_tbl.primary_key_columns.is_empty() - && parent_tbl.primary_key_columns.len() == cols.len() - && parent_tbl - .primary_key_columns - .iter() - .zip(cols.iter()) - .all(|((n, _), c)| n.eq_ignore_ascii_case(c)); - if matches_pk { - return None; - } self.get_indices(&parent_tbl.name) .find(|idx| { idx.unique @@ -1015,14 +985,14 @@ impl Schema { .cloned() }; - let mut out = Vec::new(); + let mut out = Vec::with_capacity(child.foreign_keys.len()); for fk in &child.foreign_keys { - let parent_name = normalize_ident(&fk.parent_table); - let Some(parent_tbl) = self.get_btree_table(&parent_name) else { + let parent_name = &fk.parent_table; + let Some(parent_tbl) = self.get_btree_table(parent_name) else { continue; }; - // Normalize columns (same rules you used in validation) + // Normalize columns let child_cols: Vec = fk .child_columns .iter() @@ -1045,7 +1015,6 @@ impl Schema { .collect() }; - // Positions let child_pos: Vec = child_cols .iter() .map(|c| child.get_column(c).expect("child col missing").0) @@ -1061,7 +1030,6 @@ impl Schema { }) .collect(); - // Parent uses rowid? let parent_uses_rowid = parent_cols.len() == 1 && { let c = parent_cols[0].as_str(); c.eq_ignore_ascii_case("rowid") @@ -1094,7 +1062,8 @@ impl Schema { out } - pub fn any_incoming_fk_to(&self, table_name: &str) -> bool { + /// Returns if any table declares a FOREIGN KEY whose parent is `table_name`. + pub fn any_resolved_fks_referencing(&self, table_name: &str) -> bool { self.tables.values().any(|t| { let Some(bt) = t.btree() else { return false; @@ -1105,36 +1074,12 @@ impl Schema { }) } - /// Returns if this table declares any outgoing FKs (is a child of some parent) + /// Returns true if `table_name` declares any FOREIGN KEYs pub fn has_child_fks(&self, table_name: &str) -> bool { self.get_table(table_name) .and_then(|t| t.btree()) .is_some_and(|t| !t.foreign_keys.is_empty()) } - - /// Return the *declared* (unresolved) FKs for a table. Callers that need - /// positions/rowid/unique info should use `incoming_fks_to` instead. - pub fn get_fks_for_table(&self, table_name: &str) -> Vec> { - self.get_table(table_name) - .and_then(|t| t.btree()) - .map(|t| t.foreign_keys.clone()) - .unwrap_or_default() - } - - /// Return pairs of (child_table_name, FK) for FKs that reference `parent_table` - pub fn get_referencing_fks(&self, parent_table: &str) -> Vec<(String, Arc)> { - let mut refs = Vec::new(); - for table in self.tables.values() { - if let Table::BTree(btree) = table.deref() { - for fk in &btree.foreign_keys { - if fk.parent_table == parent_table { - refs.push((btree.name.as_str().to_string(), fk.clone())); - } - } - } - } - refs - } } impl Clone for Schema { @@ -1524,7 +1469,6 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R .iter() .map(|ic| normalize_ident(ic.col_name.as_str())) .collect(); - // derive parent columns: explicit or default to parent PK let parent_table = normalize_ident(clause.tbl_name.as_str()); let parent_columns: Vec = clause @@ -1533,8 +1477,8 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R .map(|ic| normalize_ident(ic.col_name.as_str())) .collect(); - // arity check - if child_columns.len() != parent_columns.len() { + // Only check arity if parent columns were explicitly listed + if !parent_columns.is_empty() && child_columns.len() != parent_columns.len() { crate::bail_parse_error!( "foreign key on \"{}\" has {} child column(s) but {} parent column(s)", tbl_name, @@ -1568,17 +1512,6 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R } }) .unwrap_or(RefAct::NoAction), - on_insert: clause - .args - .iter() - .find_map(|a| { - if let ast::RefArg::OnInsert(x) = a { - Some(*x) - } else { - None - } - }) - .unwrap_or(RefAct::NoAction), on_update: clause .args .iter() @@ -1601,7 +1534,7 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R constraints, } in columns { - let name = col_name.as_str().to_string(); + let name = normalize_ident(col_name.as_str()); // Regular sqlite tables have an integer rowid that uniquely identifies a row. // Even if you create a table with a column e.g. 'id INT PRIMARY KEY', there will still // be a separate hidden rowid, and the 'id' column will have a separate index built for it. @@ -1684,11 +1617,11 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R defer_clause, } => { let fk = ForeignKey { - parent_table: clause.tbl_name.to_string(), + parent_table: normalize_ident(clause.tbl_name.as_str()), parent_columns: clause .columns .iter() - .map(|c| c.col_name.as_str().to_string()) + .map(|c| normalize_ident(c.col_name.as_str())) .collect(), on_delete: clause .args @@ -1701,17 +1634,6 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R } }) .unwrap_or(RefAct::NoAction), - on_insert: clause - .args - .iter() - .find_map(|arg| { - if let ast::RefArg::OnInsert(act) = arg { - Some(*act) - } else { - None - } - }) - .unwrap_or(RefAct::NoAction), on_update: clause .args .iter() @@ -1724,7 +1646,16 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R }) .unwrap_or(RefAct::NoAction), child_columns: vec![name.clone()], - deferred: defer_clause.is_some(), + deferred: match defer_clause { + Some(d) => { + d.deferrable + && matches!( + d.init_deferred, + Some(InitDeferredPred::InitiallyDeferred) + ) + } + None => false, + }, }; foreign_keys.push(Arc::new(fk)); } @@ -1742,7 +1673,7 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R } cols.push(Column { - name: Some(normalize_ident(&name)), + name: Some(name), ty, ty_str, primary_key, @@ -1875,7 +1806,6 @@ pub struct ForeignKey { pub parent_columns: Vec, pub on_delete: RefAct, pub on_update: RefAct, - pub on_insert: RefAct, /// DEFERRABLE INITIALLY DEFERRED pub deferred: bool, } diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index f569743be..3a1d1d017 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -440,7 +440,7 @@ fn emit_program_for_delete( .unwrap() .table .get_name(); - resolver.schema.any_incoming_fk_to(table_name) + resolver.schema.any_resolved_fks_referencing(table_name) }; // Open FK scope for the whole statement if has_parent_fks { @@ -542,7 +542,10 @@ fn emit_delete_insns( if connection.foreign_keys_enabled() && unsafe { &*table_reference }.btree().is_some() - && t_ctx.resolver.schema.any_incoming_fk_to(table_name) + && t_ctx + .resolver + .schema + .any_resolved_fks_referencing(table_name) { emit_fk_parent_existence_checks( program, @@ -1047,7 +1050,7 @@ pub fn emit_fk_parent_existence_checks( .get_btree_table(parent_table_name) .ok_or_else(|| crate::LimboError::InternalError("parent not btree".into()))?; - for fk_ref in resolver.schema.incoming_fks_to(parent_table_name) { + for fk_ref in resolver.schema.resolved_fks_referencing(parent_table_name) { // Resolve parent key columns let parent_cols: Vec = if fk_ref.fk.parent_columns.is_empty() { parent_bt @@ -1295,8 +1298,8 @@ fn emit_program_for_update( .unwrap() .table .get_name(); - let has_child_fks = fk_enabled && !resolver.schema.get_fks_for_table(table_name).is_empty(); - let has_parent_fks = fk_enabled && resolver.schema.any_incoming_fk_to(table_name); + let has_child_fks = fk_enabled && resolver.schema.has_child_fks(table_name); + let has_parent_fks = fk_enabled && resolver.schema.any_resolved_fks_referencing(table_name); // statement-level FK scope open if has_child_fks || has_parent_fks { program.emit_insn(Insn::FkCounter { @@ -1695,12 +1698,16 @@ fn emit_update_insns( // We only need to do work if the referenced key (the parent key) might change. // we detect that by comparing OLD vs NEW primary key representation // then run parent FK checks only when it actually changes. - if t_ctx.resolver.schema.any_incoming_fk_to(table_name) { + if t_ctx + .resolver + .schema + .any_resolved_fks_referencing(table_name) + { let updated_parent_positions: HashSet = plan.set_clauses.iter().map(|(i, _)| *i).collect(); // If no incoming FK’s parent key can be affected by these updates, skip the whole parent-FK block. - let incoming = t_ctx.resolver.schema.incoming_fks_to(table_name); + let incoming = t_ctx.resolver.schema.resolved_fks_referencing(table_name); let parent_tbl = &table_btree; let maybe_affects_parent_key = incoming .iter() @@ -2338,7 +2345,7 @@ pub fn emit_fk_child_existence_checks( if_zero: true, }); - for fk_ref in resolver.schema.outgoing_fks_of(table_name) { + for fk_ref in resolver.schema.resolved_fks_for_child(table_name) { // Skip when the child key is untouched (including rowid-alias special case) if !fk_ref.child_key_changed(updated_cols, table) { continue; diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 01d1355a1..78c5dee5c 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -83,13 +83,6 @@ pub fn translate_insert( ); } let table_name = &tbl_name.name; - let fk_enabled = connection.foreign_keys_enabled(); - let has_child_fks = fk_enabled - && !resolver - .schema - .get_fks_for_table(table_name.as_str()) - .is_empty(); - let has_parent_fks = fk_enabled && resolver.schema.any_incoming_fk_to(table_name.as_str()); // Check if this is a system table that should be protected from direct writes if crate::schema::is_system_table(table_name.as_str()) { @@ -100,6 +93,7 @@ pub fn translate_insert( Some(table) => table, None => crate::bail_parse_error!("no such table: {}", table_name), }; + let fk_enabled = connection.foreign_keys_enabled(); // Check if this is a materialized view if resolver.schema.is_materialized_view(table_name.as_str()) { @@ -140,6 +134,7 @@ pub fn translate_insert( if !btree_table.has_rowid { crate::bail_parse_error!("INSERT into WITHOUT ROWID table is not supported"); } + let has_child_fks = fk_enabled && !btree_table.foreign_keys.is_empty(); let root_page = btree_table.root_page; @@ -243,7 +238,7 @@ pub fn translate_insert( connection, )?; - if has_child_fks || has_parent_fks { + if has_child_fks { program.emit_insn(Insn::FkCounter { increment_value: 1, check_abort: false, @@ -1044,7 +1039,7 @@ pub fn translate_insert( } } } - if has_child_fks || has_parent_fks { + if has_child_fks { emit_fk_checks_for_insert(&mut program, resolver, &insertion, table_name.as_str())?; } @@ -1909,87 +1904,56 @@ fn emit_fk_checks_for_insert( }); // Iterate child FKs declared on this table - for fk in resolver.schema.get_fks_for_table(table_name) { - let fk_ok = program.allocate_label(); + for fk_ref in resolver.schema.resolved_fks_for_child(table_name) { + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("parent table"); + let num_child_cols = fk_ref.child_cols.len(); - // If any child column is NULL, skip this FK - for child_col in &fk.child_columns { - let mapping = insertion - .get_col_mapping_by_name(child_col) - .ok_or_else(|| { - crate::LimboError::InternalError(format!("FK column {child_col} not found")) - })?; - let src = if mapping.column.is_rowid_alias { - insertion.key_register() - } else { - mapping.register - }; + // if any child FK value is NULL, this row doesn't reference the parent. + let fk_ok = program.allocate_label(); + for &pos_in_child in fk_ref.child_pos.iter() { + // Map INSERT image register for that column + let src = insertion + .col_mappings + .get(pos_in_child) + .expect("col must be present") + .register; program.emit_insn(Insn::IsNull { reg: src, target_pc: fk_ok, }); } - // Parent lookup: rowid path or unique-index path - let parent_tbl = resolver.schema.get_table(&fk.parent_table).ok_or_else(|| { - crate::LimboError::InternalError(format!("Parent table {} not found", fk.parent_table)) - })?; - - let uses_rowid = { - // If single parent column equals rowid or aliases rowid - fk.parent_columns.len() == 1 && { - let parent_col = fk.parent_columns[0].as_str(); - parent_col.eq_ignore_ascii_case("rowid") - || parent_tbl.columns().iter().any(|c| { - c.is_rowid_alias - && c.name - .as_ref() - .is_some_and(|n| n.eq_ignore_ascii_case(parent_col)) - }) - } - }; - - if uses_rowid { - // Simple rowid probe on parent table - let parent_bt = parent_tbl.btree().ok_or_else(|| { - crate::LimboError::InternalError("Parent table is not a BTree".into()) - })?; - let pcur = program.alloc_cursor_id(CursorType::BTreeTable(parent_bt.clone())); + if fk_ref.parent_uses_rowid { + // Parent is rowid/alias: single-reg probe + let pcur = program.alloc_cursor_id(CursorType::BTreeTable(parent_tbl.clone())); program.emit_insn(Insn::OpenRead { cursor_id: pcur, - root_page: parent_bt.root_page, + root_page: parent_tbl.root_page, db: 0, }); - - // Child value register - let cm = insertion - .get_col_mapping_by_name(&fk.child_columns[0]) - .ok_or_else(|| { - crate::LimboError::InternalError("FK child column not found".into()) - })?; - let val_reg = if cm.column.is_rowid_alias { - insertion.key_register() - } else { - cm.register - }; - + let only = 0; // n == 1 guaranteed if parent_uses_rowid + let src = insertion + .col_mappings + .get(fk_ref.child_pos[only]) + .unwrap() + .register; let violation = program.allocate_label(); - // NotExists: jump to violation if missing in parent program.emit_insn(Insn::NotExists { cursor: pcur, - rowid_reg: val_reg, + rowid_reg: src, target_pc: violation, }); - // OK program.emit_insn(Insn::Close { cursor_id: pcur }); program.emit_insn(Insn::Goto { target_pc: fk_ok }); - // Violation program.preassign_label_to_next_insn(violation); program.emit_insn(Insn::Close { cursor_id: pcur }); // Deferred vs immediate - if fk.deferred { + if fk_ref.fk.deferred { program.emit_insn(Insn::FkCounter { increment_value: 1, check_abort: false, @@ -2001,67 +1965,48 @@ fn emit_fk_checks_for_insert( description: "FOREIGN KEY constraint failed".to_string(), }); } - } else { - // Multi-column (or non-rowid) parent, we have to match a UNIQUE index with - // the exact column set and order - let parent_idx = resolver - .schema - .get_indices(&fk.parent_table) - .find(|idx| { - idx.unique - && idx.columns.len() == fk.parent_columns.len() - && idx - .columns - .iter() - .zip(fk.parent_columns.iter()) - .all(|(ic, pc)| ic.name.eq_ignore_ascii_case(pc)) - }) - .ok_or_else(|| { - crate::LimboError::InternalError(format!( - "No UNIQUE index on parent {}({:?}) for FK", - fk.parent_table, fk.parent_columns - )) - })?; - - let icur = program.alloc_cursor_id(CursorType::BTreeIndex(parent_idx.clone())); + } else if let Some(ix) = &fk_ref.parent_unique_index { + // Parent has a UNIQUE index exactly on parent_cols: use Found against that index + let icur = program.alloc_cursor_id(CursorType::BTreeIndex(ix.clone())); program.emit_insn(Insn::OpenRead { cursor_id: icur, - root_page: parent_idx.root_page, + root_page: ix.root_page, db: 0, }); - // Build packed search key registers from the *child* values - let n = fk.child_columns.len(); - let start = program.alloc_registers(n); - for (i, child_col) in fk.child_columns.iter().enumerate() { - let cm = insertion - .get_col_mapping_by_name(child_col) - .ok_or_else(|| { - crate::LimboError::InternalError(format!("Column {child_col} not found")) - })?; - let src = if cm.column.is_rowid_alias { - insertion.key_register() - } else { - cm.register - }; + // Build probe (child values order == parent index order by construction) + let probe_start = program.alloc_registers(num_child_cols); + for (i, &pos_in_child) in fk_ref.child_pos.iter().enumerate() { + let src = insertion.col_mappings.get(pos_in_child).unwrap().register; program.emit_insn(Insn::Copy { src_reg: src, - dst_reg: start + i, + dst_reg: probe_start + i, extra_amount: 0, }); } + let aff: String = ix + .columns + .iter() + .map(|c| parent_tbl.columns[c.pos_in_table].affinity().aff_mask()) + .collect(); + program.emit_insn(Insn::Affinity { + start_reg: probe_start, + count: std::num::NonZeroUsize::new(num_child_cols).unwrap(), + affinities: aff, + }); + let found = program.allocate_label(); program.emit_insn(Insn::Found { cursor_id: icur, target_pc: found, - record_reg: start, - num_regs: n, + record_reg: probe_start, + num_regs: num_child_cols, }); - // Violation path + // Not found: violation program.emit_insn(Insn::Close { cursor_id: icur }); - if fk.deferred { + if fk_ref.fk.deferred { program.emit_insn(Insn::FkCounter { increment_value: 1, check_abort: false, @@ -2074,16 +2019,14 @@ fn emit_fk_checks_for_insert( }); } program.emit_insn(Insn::Goto { target_pc: fk_ok }); - // Found OK program.preassign_label_to_next_insn(found); program.emit_insn(Insn::Close { cursor_id: icur }); } - // Done with this FK program.preassign_label_to_next_insn(fk_ok); } - program.resolve_label(after_all, program.offset()); + program.preassign_label_to_next_insn(after_all); Ok(()) } diff --git a/core/translate/pragma.rs b/core/translate/pragma.rs index 0a527a68c..601032943 100644 --- a/core/translate/pragma.rs +++ b/core/translate/pragma.rs @@ -95,6 +95,20 @@ fn update_pragma( connection: Arc, mut program: ProgramBuilder, ) -> crate::Result<(ProgramBuilder, TransactionMode)> { + let parse_pragma_enabled = |expr: &ast::Expr| -> bool { + if let Expr::Literal(Literal::Numeric(n)) = expr { + return !matches!(n.as_str(), "0"); + }; + let name_bytes = match expr { + Expr::Literal(Literal::Keyword(name)) => name.as_bytes(), + Expr::Name(name) | Expr::Id(name) => name.as_str().as_bytes(), + _ => "".as_bytes(), + }; + match_ignore_ascii_case!(match name_bytes { + b"ON" | b"TRUE" | b"YES" | b"1" => true, + _ => false, + }) + }; match pragma { PragmaName::ApplicationId => { let data = parse_signed_number(&value)?; @@ -343,38 +357,15 @@ fn update_pragma( } PragmaName::Synchronous => { use crate::SyncMode; - - let mode = match value { - Expr::Name(name) => { - let name_bytes = name.as_str().as_bytes(); - match_ignore_ascii_case!(match name_bytes { - b"OFF" | b"FALSE" | b"NO" | b"0" => SyncMode::Off, - _ => SyncMode::Full, - }) - } - Expr::Literal(Literal::Numeric(n)) => match n.as_str() { - "0" => SyncMode::Off, - _ => SyncMode::Full, - }, - _ => SyncMode::Full, + let mode = match parse_pragma_enabled(&value) { + true => SyncMode::Full, + false => SyncMode::Off, }; - connection.set_sync_mode(mode); Ok((program, TransactionMode::None)) } PragmaName::DataSyncRetry => { - let retry_enabled = match value { - Expr::Name(name) => { - let name_bytes = name.as_str().as_bytes(); - match_ignore_ascii_case!(match name_bytes { - b"ON" | b"TRUE" | b"YES" | b"1" => true, - _ => false, - }) - } - Expr::Literal(Literal::Numeric(n)) => !matches!(n.as_str(), "0"), - _ => false, - }; - + let retry_enabled = parse_pragma_enabled(&value); connection.set_data_sync_retry(retry_enabled); Ok((program, TransactionMode::None)) } @@ -388,24 +379,7 @@ fn update_pragma( Ok((program, TransactionMode::None)) } PragmaName::ForeignKeys => { - let enabled = match value { - Expr::Name(name) | Expr::Id(name) => { - let name_bytes = name.as_str().as_bytes(); - match_ignore_ascii_case!(match name_bytes { - b"ON" | b"TRUE" | b"YES" | b"1" => true, - _ => false, - }) - } - Expr::Literal(Literal::Keyword(name) | Literal::String(name)) => { - let name_bytes = name.as_bytes(); - match_ignore_ascii_case!(match name_bytes { - b"ON" | b"TRUE" | b"YES" | b"1" => true, - _ => false, - }) - } - Expr::Literal(Literal::Numeric(n)) => !matches!(n.as_str(), "0"), - _ => false, - }; + let enabled = parse_pragma_enabled(&value); connection.set_foreign_keys_enabled(enabled); Ok((program, TransactionMode::None)) } diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index f9bfd5af9..2ae07f961 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -490,11 +490,14 @@ pub fn emit_upsert( } // Parent-side checks only if any incoming FK could care - if resolver.schema.any_incoming_fk_to(table.get_name()) { + if resolver + .schema + .any_resolved_fks_referencing(table.get_name()) + { // if parent key can't change, skip let updated_parent_positions: HashSet = set_pairs.iter().map(|(i, _)| *i).collect(); - let incoming = resolver.schema.incoming_fks_to(table.get_name()); + let incoming = resolver.schema.resolved_fks_referencing(table.get_name()); let parent_key_may_change = incoming .iter() .any(|r| r.parent_key_may_change(&updated_parent_positions, &bt)); From 99ae96c5f664b7a7fe7512d38d05da144c48dc3b Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 1 Oct 2025 11:09:18 -0400 Subject: [PATCH 076/428] Fix self-referential FK relationships and validation of FKs --- core/translate/insert.rs | 69 +++++++++++++++--- testing/foreign_keys.test | 149 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 203 insertions(+), 15 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 78c5dee5c..f6cfa4b88 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -23,7 +23,7 @@ use crate::translate::upsert::{ }; use crate::util::normalize_ident; use crate::vdbe::builder::ProgramBuilderOpts; -use crate::vdbe::insn::{IdxInsertFlags, InsertFlags, RegisterOrLiteral}; +use crate::vdbe::insn::{CmpInsFlags, IdxInsertFlags, InsertFlags, RegisterOrLiteral}; use crate::vdbe::BranchOffset; use crate::{ schema::{Column, Schema}, @@ -135,6 +135,10 @@ pub fn translate_insert( crate::bail_parse_error!("INSERT into WITHOUT ROWID table is not supported"); } let has_child_fks = fk_enabled && !btree_table.foreign_keys.is_empty(); + let has_parent_fks = fk_enabled + && resolver + .schema + .any_resolved_fks_referencing(table_name.as_str()); let root_page = btree_table.root_page; @@ -238,7 +242,7 @@ pub fn translate_insert( connection, )?; - if has_child_fks { + if has_child_fks || has_parent_fks { program.emit_insn(Insn::FkCounter { increment_value: 1, check_abort: false, @@ -1039,8 +1043,14 @@ pub fn translate_insert( } } } - if has_child_fks { - emit_fk_checks_for_insert(&mut program, resolver, &insertion, table_name.as_str())?; + if has_child_fks || has_parent_fks { + emit_fk_checks_for_insert( + &mut program, + resolver, + &insertion, + table_name.as_str(), + !inserting_multiple_rows, + )?; } program.emit_insn(Insn::Insert { @@ -1188,7 +1198,7 @@ pub fn translate_insert( } program.preassign_label_to_next_insn(stmt_epilogue); - if has_child_fks { + if has_child_fks || has_parent_fks { // close FK scope and surface deferred violations program.emit_insn(Insn::FkCounter { increment_value: -1, @@ -1896,6 +1906,7 @@ fn emit_fk_checks_for_insert( resolver: &Resolver, insertion: &Insertion, table_name: &str, + single_row_insert: bool, ) -> Result<()> { let after_all = program.allocate_label(); program.emit_insn(Insn::FkIfZero { @@ -1910,7 +1921,8 @@ fn emit_fk_checks_for_insert( .get_btree_table(&fk_ref.fk.parent_table) .expect("parent table"); let num_child_cols = fk_ref.child_cols.len(); - + let is_self_single = + table_name.eq_ignore_ascii_case(&fk_ref.fk.parent_table) && single_row_insert; // if any child FK value is NULL, this row doesn't reference the parent. let fk_ok = program.allocate_label(); for &pos_in_child in fk_ref.child_pos.iter() { @@ -1934,16 +1946,32 @@ fn emit_fk_checks_for_insert( root_page: parent_tbl.root_page, db: 0, }); - let only = 0; // n == 1 guaranteed if parent_uses_rowid + let rowid_pos = 0; // guaranteed if parent_uses_rowid let src = insertion - .col_mappings - .get(fk_ref.child_pos[only]) + .get_col_mapping_by_name(fk_ref.child_cols[rowid_pos].as_str()) .unwrap() .register; let violation = program.allocate_label(); + let tmp = program.alloc_register(); + program.emit_insn(Insn::Copy { + src_reg: src, + dst_reg: tmp, + extra_amount: 0, + }); + // coerce to INT (parent rowid affinity) + program.emit_insn(Insn::MustBeInt { reg: tmp }); + if is_self_single { + program.emit_insn(Insn::Eq { + lhs: tmp, + rhs: insertion.key_register(), + target_pc: fk_ok, + flags: CmpInsFlags::default(), + collation: None, + }); + } program.emit_insn(Insn::NotExists { cursor: pcur, - rowid_reg: src, + rowid_reg: tmp, target_pc: violation, }); program.emit_insn(Insn::Close { cursor_id: pcur }); @@ -1966,6 +1994,27 @@ fn emit_fk_checks_for_insert( }); } } else if let Some(ix) = &fk_ref.parent_unique_index { + if is_self_single { + let skip_probe = program.allocate_label(); + for (i, &pos_in_child) in fk_ref.child_pos.iter().enumerate() { + let child_reg = insertion.col_mappings.get(pos_in_child).unwrap().register; + let parent_reg = insertion + .get_col_mapping_by_name(fk_ref.parent_cols[i].as_str()) + .unwrap() + .register; + program.emit_insn(Insn::Ne { + lhs: child_reg, + rhs: parent_reg, + target_pc: skip_probe, // any mismatch and we do the normal probe + flags: CmpInsFlags::default().jump_if_null(), + collation: None, + }); + } + // all matched, OK + program.emit_insn(Insn::Goto { target_pc: fk_ok }); + program.preassign_label_to_next_insn(skip_probe); + } + // Parent has a UNIQUE index exactly on parent_cols: use Found against that index let icur = program.alloc_cursor_id(CursorType::BTreeIndex(ix.clone())); program.emit_insn(Insn::OpenRead { diff --git a/testing/foreign_keys.test b/testing/foreign_keys.test index 7db9b876c..a88ca55fe 100644 --- a/testing/foreign_keys.test +++ b/testing/foreign_keys.test @@ -124,14 +124,17 @@ do_execsql_test_in_memory_any_error fk-composite-unique-missing { INSERT INTO child VALUES (2,'A','X'); -- no ('A','X') in parent } -do_execsql_test_on_specific_db {:memory:} fk-rowid-alias-parent-ok { +# SQLite doesnt let you name a foreign key constraint 'rowid' explicitly... +# well it does.. but it throws a parse error only when you try to insert into the table -_- +# We will throw a parse error when you create the table instead, because that is +# obviously the only sane thing to do +do_execsql_test_in_memory_any_error fk-rowid-alias-parent { PRAGMA foreign_keys=ON; CREATE TABLE t(id INTEGER PRIMARY KEY, a TEXT); - CREATE TABLE c(cid INTEGER PRIMARY KEY, rid REFERENCES t(rowid)); + CREATE TABLE c(cid INTEGER PRIMARY KEY, rid REFERENCES t(rowid)); -- we error here INSERT INTO t VALUES (100,'x'); - INSERT INTO c VALUES (1, 100); - SELECT cid, rid FROM c; -} {1|100} + INSERT INTO c VALUES (1, 100); - sqlite errors here +} do_execsql_test_in_memory_any_error fk-rowid-alias-parent-missing { PRAGMA foreign_keys=ON; @@ -192,3 +195,139 @@ do_execsql_test_in_memory_any_error fk-composite-pk-delete-violate { -- Deleting the referenced tuple should fail DELETE FROM p WHERE a=2 AND b=3; } + +# Parent columns omitted: should default to parent's declared PRIMARY KEY (composite) +do_execsql_test_on_specific_db {:memory:} fk-default-parent-pk-composite-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE p( + a INT NOT NULL, + b INT NOT NULL, + PRIMARY KEY(a,b) + ); + -- Parent columns omitted in REFERENCES p + CREATE TABLE c( + id INT PRIMARY KEY, + x INT, y INT, + FOREIGN KEY(x,y) REFERENCES p + ); + INSERT INTO p VALUES (1,1), (1,2); + INSERT INTO c VALUES (10,1,1), (11,1,2), (12,NULL,2); -- NULL in child allowed + SELECT id,x,y FROM c ORDER BY id; +} {10|1|1 +11|1|2 +12||2} + +do_execsql_test_in_memory_any_error fk-default-parent-pk-composite-missing { + PRAGMA foreign_keys=ON; + CREATE TABLE p(a INT NOT NULL, b INT NOT NULL, PRIMARY KEY(a,b)); + CREATE TABLE c(id INT PRIMARY KEY, x INT, y INT, + FOREIGN KEY(x,y) REFERENCES p); -- omit parent cols + INSERT INTO p VALUES (1,1); + INSERT INTO c VALUES (20,1,2); -- (1,2) missing in parent +} + +# Parent has no explicitly declared PK, so we throw parse error when referencing bare table +do_execsql_test_in_memory_any_error fk-default-parent-rowid-no-parent-pk { + PRAGMA foreign_keys=ON; + CREATE TABLE p_no_pk(v TEXT); + CREATE TABLE c_rowid(id INT PRIMARY KEY, + r REFERENCES p_no_pk); + INSERT INTO p_no_pk(v) VALUES ('a'), ('b'); + INSERT INTO c_rowid VALUES (1, 1); +} + +do_execsql_test_on_specific_db {:memory:} fk-parent-omit-cols-parent-has-pk { + PRAGMA foreign_keys=ON; + CREATE TABLE p_pk(id INTEGER PRIMARY KEY, v TEXT); + CREATE TABLE c_ok(id INT PRIMARY KEY, r REFERENCES p_pk); -- binds to p_pk(id) + INSERT INTO p_pk VALUES (1,'a'),(2,'b'); + INSERT INTO c_ok VALUES (10,1); + INSERT INTO c_ok VALUES (11,2); + SELECT id, r FROM c_ok ORDER BY id; +} {10|1 11|2} + + +# Self-reference (same table) with INTEGER PRIMARY KEY: single-row insert should pass +do_execsql_test_on_specific_db {:memory:} fk-self-ipk-single-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + id INTEGER PRIMARY KEY, + rid REFERENCES t(id) -- child->parent in same table + ); + INSERT INTO t(id,rid) VALUES(5,5); -- self-reference, single-row + SELECT id, rid FROM t; +} {5|5} + +# Self-reference with mismatched value: should fail immediately (no counter semantics used) +do_execsql_test_in_memory_any_error fk-self-ipk-single-mismatch { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + id INTEGER PRIMARY KEY, + rid REFERENCES t(id) + ); + INSERT INTO t(id,rid) VALUES(5,4); -- rid!=id -> FK violation +} + +# Self-reference on composite PRIMARY KEY: single-row insert should pass +do_execsql_test_on_specific_db {:memory:} fk-self-composite-single-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + a INT NOT NULL, + b INT NOT NULL, + x INT, + y INT, + PRIMARY KEY(a,b), + FOREIGN KEY(x,y) REFERENCES t(a,b) + ); + INSERT INTO t(a,b,x,y) VALUES(1,2,1,2); -- self-reference matches PK + SELECT a,b,x,y FROM t; +} {1|2|1|2} + +# Rowid parent path: text '10' must be coerced to integer (MustBeInt) and succeed +do_execsql_test_on_specific_db {:memory:} fk-rowid-mustbeint-coercion-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(cid INTEGER PRIMARY KEY, pid REFERENCES p(id)); + INSERT INTO p(id) VALUES(10); + INSERT INTO c VALUES(1, '10'); -- text -> int via MustBeInt; should match + SELECT pid FROM c; +} {10} + +# Rowid parent path: non-numeric text cannot be coerced -> violation +do_execsql_test_in_memory_any_error fk-rowid-mustbeint-coercion-fail { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(cid INTEGER PRIMARY KEY, pid REFERENCES p(id)); + INSERT INTO p(id) VALUES(10); + INSERT INTO c VALUES(2, 'abc'); -- MustBeInt fails to match any parent row +} + +# Parent match via UNIQUE index (non-rowid), success path +do_execsql_test_on_specific_db {:memory:} fk-parent-unique-index-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(u TEXT, v TEXT, pad INT, UNIQUE(u,v)); + CREATE TABLE child(id INT PRIMARY KEY, cu TEXT, cv TEXT, + FOREIGN KEY(cu,cv) REFERENCES parent(u,v)); + INSERT INTO parent VALUES ('A','B',0),('A','C',0); + INSERT INTO child VALUES (1,'A','B'); + SELECT id, cu, cv FROM child ORDER BY id; +} {1|A|B} + +# Parent UNIQUE index path: missing key -> immediate violation +do_execsql_test_in_memory_any_error fk-parent-unique-index-missing { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(u TEXT, v TEXT, pad INT, UNIQUE(u,v)); + CREATE TABLE child(id INT PRIMARY KEY, cu TEXT, cv TEXT, + FOREIGN KEY(cu,cv) REFERENCES parent(u,v)); + INSERT INTO parent VALUES ('A','B',0); + INSERT INTO child VALUES (2,'A','X'); -- no ('A','X') in parent +} + +# NULL in child short-circuits FK check +do_execsql_test_on_specific_db {:memory:} fk-child-null-shortcircuit { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid REFERENCES p(id)); + INSERT INTO c VALUES (1, NULL); -- NULL child is allowed + SELECT id, pid FROM c; +} {1|} From f56f37fae5cf83f8169fb0e86f57a4345cf27731 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 1 Oct 2025 12:59:08 -0400 Subject: [PATCH 077/428] Add more tests for self-referencing FKs and remove unneeded FkIfZero checks/labels in emitter --- core/translate/emitter.rs | 56 +++++++++++++++++++++---------- testing/foreign_keys.test | 70 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 107 insertions(+), 19 deletions(-) diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 3a1d1d017..21107843d 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -1039,12 +1039,6 @@ pub fn emit_fk_parent_existence_checks( parent_cursor_id: usize, parent_rowid_reg: usize, ) -> Result<()> { - let after_all = program.allocate_label(); - program.emit_insn(Insn::FkIfZero { - target_pc: after_all, - if_zero: true, - }); - let parent_bt = resolver .schema .get_btree_table(parent_table_name) @@ -1130,6 +1124,25 @@ pub fn emit_fk_parent_existence_checks( extra_amount: 0, }); } + if let Some(count) = NonZeroUsize::new(parent_cols_len) { + // Apply index affinities for composite comparison + let aff: String = idx + .columns + .iter() + .map(|ic| { + let (_, col) = fk_ref + .child_table + .get_column(&ic.name) + .expect("indexed child column not found"); + col.affinity().aff_mask() + }) + .collect(); + program.emit_insn(Insn::Affinity { + start_reg: probe_start, + count, + affinities: aff, + }); + } let ok = program.allocate_label(); program.emit_insn(Insn::NotFound { @@ -1205,7 +1218,7 @@ pub fn emit_fk_parent_existence_checks( lhs: tmp, rhs: parent_key_start + i, target_pc: cont_i, - flags: CmpInsFlags::default(), + flags: CmpInsFlags::default().jump_if_null(), collation: program.curr_collation(), }); // Not equal -> skip this child row @@ -1242,7 +1255,6 @@ pub fn emit_fk_parent_existence_checks( program.emit_insn(Insn::Close { cursor_id: ccur }); } } - program.resolve_label(after_all, program.offset()); Ok(()) } @@ -2339,12 +2351,6 @@ pub fn emit_fk_child_existence_checks( rowid_reg: usize, updated_cols: &HashSet, ) -> Result<()> { - let after_all = program.allocate_label(); - program.emit_insn(Insn::FkIfZero { - target_pc: after_all, - if_zero: true, - }); - for fk_ref in resolver.schema.resolved_fks_for_child(table_name) { // Skip when the child key is untouched (including rowid-alias special case) if !fk_ref.child_key_changed(updated_cols, table) { @@ -2387,11 +2393,17 @@ pub fn emit_fk_child_existence_checks( } else { start_reg + i_child }; - + let tmp = program.alloc_register(); + program.emit_insn(Insn::Copy { + src_reg: val_reg, + dst_reg: tmp, + extra_amount: 0, + }); + program.emit_insn(Insn::MustBeInt { reg: tmp }); let violation = program.allocate_label(); program.emit_insn(Insn::NotExists { cursor: pcur, - rowid_reg: val_reg, + rowid_reg: tmp, target_pc: violation, }); program.emit_insn(Insn::Close { cursor_id: pcur }); @@ -2440,6 +2452,16 @@ pub fn emit_fk_child_existence_checks( }); } + let aff: String = parent_idx + .columns + .iter() + .map(|ic| table.columns[ic.pos_in_table].affinity().aff_mask()) + .collect(); + program.emit_insn(Insn::Affinity { + start_reg: probe_start, + count: NonZeroUsize::new(n).unwrap(), + affinities: aff, + }); let found = program.allocate_label(); program.emit_insn(Insn::Found { cursor_id: icur, @@ -2471,8 +2493,6 @@ pub fn emit_fk_child_existence_checks( program.preassign_label_to_next_insn(fk_ok); } - - program.resolve_label(after_all, program.offset()); Ok(()) } diff --git a/testing/foreign_keys.test b/testing/foreign_keys.test index a88ca55fe..78e8498f2 100644 --- a/testing/foreign_keys.test +++ b/testing/foreign_keys.test @@ -299,7 +299,7 @@ do_execsql_test_in_memory_any_error fk-rowid-mustbeint-coercion-fail { CREATE TABLE p(id INTEGER PRIMARY KEY); CREATE TABLE c(cid INTEGER PRIMARY KEY, pid REFERENCES p(id)); INSERT INTO p(id) VALUES(10); - INSERT INTO c VALUES(2, 'abc'); -- MustBeInt fails to match any parent row + INSERT INTO c VALUES(2, 'abc'); -- MustBeInt fails to match any parent row } # Parent match via UNIQUE index (non-rowid), success path @@ -331,3 +331,71 @@ do_execsql_test_on_specific_db {:memory:} fk-child-null-shortcircuit { INSERT INTO c VALUES (1, NULL); -- NULL child is allowed SELECT id, pid FROM c; } {1|} + +do_execsql_test_on_specific_db {:memory:} fk-self-unique-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + u TEXT, + v TEXT, + cu TEXT, + cv TEXT, + UNIQUE(u,v), + FOREIGN KEY(cu,cv) REFERENCES t(u,v) + ); + -- Single row insert where child points to its own (u,v): allowed + INSERT INTO t(u,v,cu,cv) VALUES('A','B','A','B'); + SELECT u, v, cu, cv FROM t; +} {A|B|A|B} + +do_execsql_test_in_memory_any_error fk-self-unique-mismatch { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + u TEXT, + v TEXT, + cu TEXT, + cv TEXT, + UNIQUE(u,v), + FOREIGN KEY(cu,cv) REFERENCES t(u,v) + ); + -- Child points to a different (u,v) that doesn't exist: must fail + INSERT INTO t(u,v,cu,cv) VALUES('A','B','A','X'); +} + +do_execsql_test_on_specific_db {:memory:} fk-self-unique-reference-existing-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + u TEXT, + v TEXT, + cu TEXT, + cv TEXT, + UNIQUE(u,v), + FOREIGN KEY(cu,cv) REFERENCES t(u,v) + ); + -- Insert a parent row first + INSERT INTO t(u,v,cu,cv) VALUES('P','Q',NULL,NULL); + -- Now insert a row whose FK references the existing ('P','Q'): OK + INSERT INTO t(u,v,cu,cv) VALUES('X','Y','P','Q'); + SELECT u, v, cu, cv FROM t ORDER BY u, v, cu, cv; +} {P|Q|| X|Y|P|Q} + +do_execsql_test_on_specific_db {:memory:} fk-self-unique-multirow-no-fastpath { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + u TEXT, + v TEXT, + cu TEXT, + cv TEXT, + UNIQUE(u,v), + FOREIGN KEY(cu,cv) REFERENCES t(u,v) + ); + INSERT INTO t(u,v,cu,cv) VALUES + ('C','D','C','D'), + ('E','F','E','F'); +} {} + +do_execsql_test_in_memory_any_error fk-self-multirow-one-bad { + PRAGMA foreign_keys=ON; + CREATE TABLE t(id INTEGER PRIMARY KEY, rid INTEGER, + FOREIGN KEY(rid) REFERENCES t(id)); + INSERT INTO t(id,rid) VALUES (1,1),(3,99); -- 99 has no parent -> error +} From a232e3cc7aa58f23648c2fb81010ff2253caa85e Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 1 Oct 2025 13:54:13 -0400 Subject: [PATCH 078/428] Implement proper handling of deferred foreign keys --- core/lib.rs | 3 +- core/schema.rs | 272 ++++---- core/translate/collate.rs | 4 + core/translate/emitter.rs | 1171 +++++++-------------------------- core/translate/fkeys.rs | 1025 +++++++++++++++++++++++++++++ core/translate/insert.rs | 469 ++++++++----- core/translate/mod.rs | 1 + core/translate/upsert.rs | 187 +----- core/vdbe/builder.rs | 3 + core/vdbe/execute.rs | 118 ++-- core/vdbe/explain.rs | 8 +- core/vdbe/insn.rs | 3 +- core/vdbe/mod.rs | 4 +- testing/foreign_keys.test | 717 +++++++++++++++++++- tests/integration/fuzz/mod.rs | 695 ++++++++++++++++++- 15 files changed, 3262 insertions(+), 1418 deletions(-) create mode 100644 core/translate/fkeys.rs diff --git a/core/lib.rs b/core/lib.rs index 43e07b609..a2ac4a267 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -583,6 +583,7 @@ impl Database { busy_timeout: RwLock::new(Duration::new(0, 0)), is_mvcc_bootstrap_connection: AtomicBool::new(is_mvcc_bootstrap_connection), fk_pragma: AtomicBool::new(false), + fk_deferred_violations: AtomicIsize::new(0), }); self.n_connections .fetch_add(1, std::sync::atomic::Ordering::SeqCst); @@ -1102,6 +1103,7 @@ pub struct Connection { is_mvcc_bootstrap_connection: AtomicBool, /// Whether pragma foreign_keys=ON for this connection fk_pragma: AtomicBool, + fk_deferred_violations: AtomicIsize, } impl Drop for Connection { @@ -1540,7 +1542,6 @@ impl Connection { pub fn foreign_keys_enabled(&self) -> bool { self.fk_pragma.load(Ordering::Acquire) } - pub(crate) fn clear_deferred_foreign_key_violations(&self) -> isize { self.fk_deferred_violations.swap(0, Ordering::Release) } diff --git a/core/schema.rs b/core/schema.rs index 29a805294..f81c8fe2e 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -849,12 +849,17 @@ impl Schema { /// Compute all resolved FKs *referencing* `table_name` (arg: `table_name` is the parent). /// Each item contains the child table, normalized columns/positions, and the parent lookup /// strategy (rowid vs. UNIQUE index or PK). - pub fn resolved_fks_referencing(&self, table_name: &str) -> Vec { + pub fn resolved_fks_referencing(&self, table_name: &str) -> Result> { + let fk_mismatch_err = |child: &str, parent: &str| -> crate::LimboError { + crate::LimboError::Constraint(format!( + "foreign key mismatch - \"{child}\" referencing \"{parent}\"" + )) + }; let target = normalize_ident(table_name); let mut out = Vec::with_capacity(4); // arbitrary estimate let parent_tbl = self .get_btree_table(&target) - .expect("parent table must exist"); + .ok_or_else(|| fk_mismatch_err("", &target))?; // Precompute helper to find parent unique index, if it's not the rowid let find_parent_unique = |cols: &Vec| -> Option> { @@ -875,78 +880,82 @@ impl Schema { let Some(child) = t.btree() else { continue; }; - for fk in &child.foreign_keys { - if fk.parent_table != target { + if !fk.parent_table.eq_ignore_ascii_case(&target) { continue; } - - // Resolve + normalize columns + if fk.child_columns.is_empty() { + // SQLite requires an explicit child column list unless the table has a single-column PK that + return Err(fk_mismatch_err(&child.name, &parent_tbl.name)); + } let child_cols: Vec = fk.child_columns.clone(); + let mut child_pos = Vec::with_capacity(child_cols.len()); - // If no explicit parent columns were given, they were validated in add_btree_table() - // to match the parent's PK. We resolve them the same way here. + for cname in &child_cols { + let (i, _) = child + .get_column(cname) + .ok_or_else(|| fk_mismatch_err(&child.name, &parent_tbl.name))?; + child_pos.push(i); + } let parent_cols: Vec = if fk.parent_columns.is_empty() { - parent_tbl - .primary_key_columns - .iter() - .map(|(col, _)| col) - .cloned() - .collect() + if !parent_tbl.primary_key_columns.is_empty() { + parent_tbl + .primary_key_columns + .iter() + .map(|(col, _)| col) + .cloned() + .collect() + } else { + return Err(fk_mismatch_err(&child.name, &parent_tbl.name)); + } } else { fk.parent_columns.clone() }; - // Child positions - let child_pos: Vec = child_cols - .iter() - .map(|cname| { - child - .get_column(cname) - .map(|(i, _)| i) - .unwrap_or_else(|| panic!("child col {}.{} missing", child.name, cname)) - }) - .collect(); + // Same length required + if parent_cols.len() != child_cols.len() { + return Err(fk_mismatch_err(&child.name, &parent_tbl.name)); + } - let parent_pos: Vec = parent_cols - .iter() - .map(|cname| { - // Allow "rowid" sentinel; return 0 but it won't be used when parent_uses_rowid == true - parent_tbl - .get_column(cname) - .map(|(i, _)| i) - .or_else(|| { - if cname.eq_ignore_ascii_case("rowid") { - Some(0) - } else { - None - } - }) - .unwrap_or_else(|| { - panic!("parent col {}.{cname} missing", parent_tbl.name) - }) - }) - .collect(); + let mut parent_pos = Vec::with_capacity(parent_cols.len()); + for pc in &parent_cols { + let pos = parent_tbl.get_column(pc).map(|(i, _)| i).or_else(|| { + ROWID_STRS + .iter() + .any(|s| pc.eq_ignore_ascii_case(s)) + .then_some(0) + }); + let Some(p) = pos else { + return Err(fk_mismatch_err(&child.name, &parent_tbl.name)); + }; + parent_pos.push(p); + } - // Detect parent rowid usage (single-column and rowid/alias) - let parent_uses_rowid = parent_cols.len() == 1 && { - let c = parent_cols[0].as_str(); - c.eq_ignore_ascii_case("rowid") - || parent_tbl.columns.iter().any(|col| { - col.is_rowid_alias - && col - .name + // Determine if parent key is ROWID/alias + let parent_uses_rowid = parent_tbl.primary_key_columns.len().eq(&1) && { + if parent_tbl.primary_key_columns.len() == 1 { + let pk_name = &parent_tbl.primary_key_columns[0].0; + // rowid or alias INTEGER PRIMARY KEY; either is ok implicitly + parent_tbl.columns.iter().any(|c| { + c.is_rowid_alias + && c.name .as_deref() - .is_some_and(|n| n.eq_ignore_ascii_case(c)) - }) + .is_some_and(|n| n.eq_ignore_ascii_case(pk_name)) + }) || ROWID_STRS.iter().any(|&r| r.eq_ignore_ascii_case(pk_name)) + } else { + false + } }; + // If not rowid, there must be a non-partial UNIQUE exactly on parent_cols let parent_unique_index = if parent_uses_rowid { None } else { find_parent_unique(&parent_cols) + .ok_or_else(|| fk_mismatch_err(&child.name, &parent_tbl.name))? + .into() }; - + fk.validate()?; out.push(ResolvedFkRef { child_table: Arc::clone(&child), fk: Arc::clone(fk), @@ -959,80 +968,80 @@ impl Schema { }); } } - out + Ok(out) } /// Compute all resolved FKs *declared by* `child_table` - pub fn resolved_fks_for_child(&self, child_table: &str) -> Vec { - let child_name = normalize_ident(child_table); - let Some(child) = self.get_btree_table(&child_name) else { - return vec![]; + pub fn resolved_fks_for_child(&self, child_table: &str) -> crate::Result> { + let fk_mismatch_err = |child: &str, parent: &str| -> crate::LimboError { + crate::LimboError::Constraint(format!( + "foreign key mismatch - \"{child}\" referencing \"{parent}\"" + )) }; - - // Helper to find the UNIQUE/index on the parent that matches the resolved parent cols - let find_parent_unique = - |parent_tbl: &BTreeTable, cols: &Vec| -> Option> { - self.get_indices(&parent_tbl.name) - .find(|idx| { - idx.unique - && idx.columns.len() == cols.len() - && idx - .columns - .iter() - .zip(cols.iter()) - .all(|(ic, pc)| ic.name.eq_ignore_ascii_case(pc)) - }) - .cloned() - }; + let child_name = normalize_ident(child_table); + let child = self + .get_btree_table(&child_name) + .ok_or_else(|| fk_mismatch_err(&child_name, ""))?; let mut out = Vec::with_capacity(child.foreign_keys.len()); - for fk in &child.foreign_keys { - let parent_name = &fk.parent_table; - let Some(parent_tbl) = self.get_btree_table(parent_name) else { - continue; - }; - // Normalize columns - let child_cols: Vec = fk - .child_columns - .iter() - .map(|s| normalize_ident(s)) - .collect(); + for fk in &child.foreign_keys { + let parent_name = normalize_ident(&fk.parent_table); + let parent_tbl = self + .get_btree_table(&parent_name) + .ok_or_else(|| fk_mismatch_err(&child.name, &parent_name))?; + + let child_cols: Vec = fk.child_columns.clone(); + if child_cols.is_empty() { + return Err(fk_mismatch_err(&child.name, &parent_tbl.name)); + } + + // Child positions exist + let mut child_pos = Vec::with_capacity(child_cols.len()); + for cname in &child_cols { + let (i, _) = child + .get_column(cname) + .ok_or_else(|| fk_mismatch_err(&child.name, &parent_tbl.name))?; + child_pos.push(i); + } + let parent_cols: Vec = if fk.parent_columns.is_empty() { if !parent_tbl.primary_key_columns.is_empty() { parent_tbl .primary_key_columns .iter() - .map(|(n, _)| normalize_ident(n)) + .map(|(col, _)| col) + .cloned() .collect() } else { - vec!["rowid".to_string()] + return Err(fk_mismatch_err(&child.name, &parent_tbl.name)); } } else { - fk.parent_columns - .iter() - .map(|s| normalize_ident(s)) - .collect() + fk.parent_columns.clone() }; - let child_pos: Vec = child_cols - .iter() - .map(|c| child.get_column(c).expect("child col missing").0) - .collect(); - let parent_pos: Vec = parent_cols - .iter() - .map(|c| { - parent_tbl - .get_column(c) - .map(|(i, _)| i) - .or_else(|| c.eq_ignore_ascii_case("rowid").then_some(0)) - .expect("parent col missing") - }) - .collect(); + if parent_cols.len() != child_cols.len() { + return Err(fk_mismatch_err(&child.name, &parent_tbl.name)); + } - let parent_uses_rowid = parent_cols.len() == 1 && { + // Parent positions exist, or rowid sentinel + let mut parent_pos = Vec::with_capacity(parent_cols.len()); + for pc in &parent_cols { + let pos = parent_tbl.get_column(pc).map(|(i, _)| i).or_else(|| { + ROWID_STRS + .iter() + .any(|&r| r.eq_ignore_ascii_case(pc)) + .then_some(0) + }); + let Some(p) = pos else { + return Err(fk_mismatch_err(&child.name, &parent_tbl.name)); + }; + parent_pos.push(p); + } + + let parent_uses_rowid = parent_cols.len().eq(&1) && { let c = parent_cols[0].as_str(); - c.eq_ignore_ascii_case("rowid") + ROWID_STRS.iter().any(|&r| r.eq_ignore_ascii_case(c)) || parent_tbl.columns.iter().any(|col| { col.is_rowid_alias && col @@ -1042,12 +1051,27 @@ impl Schema { }) }; + // Must be PK or a non-partial UNIQUE on exactly those columns. let parent_unique_index = if parent_uses_rowid { None } else { - find_parent_unique(&parent_tbl, &parent_cols) + self.get_indices(&parent_tbl.name) + .find(|idx| { + idx.unique + && idx.where_clause.is_none() + && idx.columns.len() == parent_cols.len() + && idx + .columns + .iter() + .zip(parent_cols.iter()) + .all(|(ic, pc)| ic.name.eq_ignore_ascii_case(pc)) + }) + .cloned() + .ok_or_else(|| fk_mismatch_err(&child.name, &parent_tbl.name))? + .into() }; + fk.validate()?; out.push(ResolvedFkRef { child_table: Arc::clone(&child), fk: Arc::clone(fk), @@ -1059,7 +1083,8 @@ impl Schema { parent_unique_index, }); } - out + + Ok(out) } /// Returns if any table declares a FOREIGN KEY whose parent is `table_name`. @@ -1534,7 +1559,7 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R constraints, } in columns { - let name = normalize_ident(col_name.as_str()); + let name = col_name.as_str().to_string(); // Regular sqlite tables have an integer rowid that uniquely identifies a row. // Even if you create a table with a column e.g. 'id INT PRIMARY KEY', there will still // be a separate hidden rowid, and the 'id' column will have a separate index built for it. @@ -1673,7 +1698,7 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R } cols.push(Column { - name: Some(name), + name: Some(normalize_ident(&name)), ty, ty_str, primary_key, @@ -1809,6 +1834,29 @@ pub struct ForeignKey { /// DEFERRABLE INITIALLY DEFERRED pub deferred: bool, } +impl ForeignKey { + fn validate(&self) -> Result<()> { + // TODO: remove this when actions are implemented + if !(matches!(self.on_update, RefAct::NoAction) + && matches!(self.on_delete, RefAct::NoAction)) + { + crate::bail_parse_error!( + "foreign key actions other than NO ACTION are not implemented" + ); + } + if self + .parent_columns + .iter() + .any(|c| ROWID_STRS.iter().any(|&r| r.eq_ignore_ascii_case(c))) + { + return Err(crate::LimboError::Constraint(format!( + "foreign key mismatch referencing \"{}\"", + self.parent_table + ))); + } + Ok(()) + } +} /// A single resolved foreign key where `parent_table == target`. #[derive(Clone, Debug)] diff --git a/core/translate/collate.rs b/core/translate/collate.rs index 04324424c..721d82944 100644 --- a/core/translate/collate.rs +++ b/core/translate/collate.rs @@ -371,6 +371,7 @@ mod tests { hidden: false, }], unique_sets: vec![], + foreign_keys: vec![], })), }); @@ -413,6 +414,7 @@ mod tests { hidden: false, }], unique_sets: vec![], + foreign_keys: vec![], })), }); // Right table t2(id=2) @@ -446,6 +448,7 @@ mod tests { hidden: false, }], unique_sets: vec![], + foreign_keys: vec![], })), }); table_references @@ -486,6 +489,7 @@ mod tests { hidden: false, }], unique_sets: vec![], + foreign_keys: vec![], })), }); table_references diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 21107843d..e281277dc 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -24,12 +24,17 @@ use super::select::emit_simple_count; use super::subquery::emit_subqueries; use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY; use crate::function::Func; -use crate::schema::{BTreeTable, Column, ResolvedFkRef, Schema, Table, ROWID_SENTINEL}; +use crate::schema::{BTreeTable, Column, Schema, Table, ROWID_SENTINEL}; use crate::translate::compound_select::emit_program_for_compound_select; use crate::translate::expr::{ emit_returning_results, translate_expr_no_constant_opt, walk_expr_mut, NoConstantOptReason, ReturningValueRegisters, WalkControl, }; +use crate::translate::fkeys::{ + build_index_affinity_string, emit_fk_child_update_counters, + emit_fk_delete_parent_existence_checks, emit_fk_scope_if_needed, emit_parent_pk_change_checks, + stabilize_new_row_for_fk, +}; use crate::translate::plan::{DeletePlan, JoinedTable, Plan, QueryDestination, Search}; use crate::translate::planner::ROWID_STRS; use crate::translate::result_row::try_fold_expr_to_i64; @@ -432,25 +437,18 @@ fn emit_program_for_delete( }); } - let has_parent_fks = connection.foreign_keys_enabled() && { - let table_name = plan - .table_references - .joined_tables() - .first() - .unwrap() - .table - .get_name(); - resolver.schema.any_resolved_fks_referencing(table_name) - }; - // Open FK scope for the whole statement - if has_parent_fks { - program.emit_insn(Insn::FkCounter { - increment_value: 1, - check_abort: false, - is_scope: true, - }); + let fk_enabled = connection.foreign_keys_enabled(); + let table_name = plan + .table_references + .joined_tables() + .first() + .unwrap() + .table + .get_name() + .to_string(); + if fk_enabled { + emit_fk_scope_if_needed(program, resolver, &table_name, true)?; } - // Initialize cursors and other resources needed for query execution init_loop( program, @@ -489,12 +487,8 @@ fn emit_program_for_delete( None, )?; program.preassign_label_to_next_insn(after_main_loop_label); - if has_parent_fks { - program.emit_insn(Insn::FkCounter { - increment_value: -1, - check_abort: true, - is_scope: true, - }); + if fk_enabled { + emit_fk_scope_if_needed(program, resolver, &table_name, false)?; } // Finalize program program.result_columns = plan.result_columns; @@ -502,6 +496,169 @@ fn emit_program_for_delete( Ok(()) } +pub fn emit_fk_child_decrement_on_delete( + program: &mut ProgramBuilder, + resolver: &Resolver, + child_tbl: &BTreeTable, + child_table_name: &str, + child_cursor_id: usize, + child_rowid_reg: usize, +) -> crate::Result<()> { + for fk_ref in resolver.schema.resolved_fks_for_child(child_table_name)? { + if !fk_ref.fk.deferred { + continue; + } + // Fast path: if any FK column is NULL can't be a violation + let null_skip = program.allocate_label(); + for cname in &fk_ref.child_cols { + let (pos, col) = child_tbl.get_column(cname).unwrap(); + let src = if col.is_rowid_alias { + child_rowid_reg + } else { + let tmp = program.alloc_register(); + program.emit_insn(Insn::Column { + cursor_id: child_cursor_id, + column: pos, + dest: tmp, + default: None, + }); + tmp + }; + program.emit_insn(Insn::IsNull { + reg: src, + target_pc: null_skip, + }); + } + + if fk_ref.parent_uses_rowid { + // Probe parent table by rowid + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("parent btree"); + let pcur = program.alloc_cursor_id(CursorType::BTreeTable(parent_tbl.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: pcur, + root_page: parent_tbl.root_page, + db: 0, + }); + + let (pos, col) = child_tbl.get_column(&fk_ref.child_cols[0]).unwrap(); + let val = if col.is_rowid_alias { + child_rowid_reg + } else { + let tmp = program.alloc_register(); + program.emit_insn(Insn::Column { + cursor_id: child_cursor_id, + column: pos, + dest: tmp, + default: None, + }); + tmp + }; + let tmpi = program.alloc_register(); + program.emit_insn(Insn::Copy { + src_reg: val, + dst_reg: tmpi, + extra_amount: 0, + }); + program.emit_insn(Insn::MustBeInt { reg: tmpi }); + + // NotExists jumps when the parent key is missing, so we decrement there + let missing = program.allocate_label(); + let done = program.allocate_label(); + + program.emit_insn(Insn::NotExists { + cursor: pcur, + rowid_reg: tmpi, + target_pc: missing, + }); + + // Parent FOUND, no decrement + program.emit_insn(Insn::Close { cursor_id: pcur }); + program.emit_insn(Insn::Goto { target_pc: done }); + + // Parent MISSING, decrement is guarded by FkIfZero to avoid underflow + program.preassign_label_to_next_insn(missing); + program.emit_insn(Insn::Close { cursor_id: pcur }); + program.emit_insn(Insn::FkIfZero { + is_scope: false, + target_pc: done, + }); + program.emit_insn(Insn::FkCounter { + is_scope: false, + increment_value: -1, + }); + + program.preassign_label_to_next_insn(done); + } else { + // Probe parent unique index + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("parent btree"); + let idx = fk_ref.parent_unique_index.as_ref().expect("unique index"); + let icur = program.alloc_cursor_id(CursorType::BTreeIndex(idx.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: icur, + root_page: idx.root_page, + db: 0, + }); + + // Build probe from current child row + let n = fk_ref.child_cols.len(); + let probe = program.alloc_registers(n); + for (i, cname) in fk_ref.child_cols.iter().enumerate() { + let (pos, col) = child_tbl.get_column(cname).unwrap(); + let src = if col.is_rowid_alias { + child_rowid_reg + } else { + let r = program.alloc_register(); + program.emit_insn(Insn::Column { + cursor_id: child_cursor_id, + column: pos, + dest: r, + default: None, + }); + r + }; + program.emit_insn(Insn::Copy { + src_reg: src, + dst_reg: probe + i, + extra_amount: 0, + }); + } + program.emit_insn(Insn::Affinity { + start_reg: probe, + count: std::num::NonZeroUsize::new(n).unwrap(), + affinities: build_index_affinity_string(idx, &parent_tbl), + }); + + let ok = program.allocate_label(); + program.emit_insn(Insn::Found { + cursor_id: icur, + target_pc: ok, + record_reg: probe, + num_regs: n, + }); + program.emit_insn(Insn::Close { cursor_id: icur }); + program.emit_insn(Insn::FkIfZero { + is_scope: false, + target_pc: ok, + }); + program.emit_insn(Insn::FkCounter { + increment_value: -1, + is_scope: false, + }); + program.preassign_label_to_next_insn(ok); + program.emit_insn(Insn::Close { cursor_id: icur }); + } + + program.preassign_label_to_next_insn(null_skip); + } + Ok(()) +} + fn emit_delete_insns( connection: &Arc, program: &mut ProgramBuilder, @@ -540,20 +697,32 @@ fn emit_delete_insns( dest: key_reg, }); - if connection.foreign_keys_enabled() - && unsafe { &*table_reference }.btree().is_some() - && t_ctx - .resolver - .schema - .any_resolved_fks_referencing(table_name) - { - emit_fk_parent_existence_checks( - program, - &t_ctx.resolver, - table_name, - main_table_cursor_id, - key_reg, - )?; + if connection.foreign_keys_enabled() { + if let Some(table) = unsafe { &*table_reference }.btree() { + if t_ctx + .resolver + .schema + .any_resolved_fks_referencing(table_name) + { + emit_fk_delete_parent_existence_checks( + program, + &t_ctx.resolver, + table_name, + main_table_cursor_id, + key_reg, + )?; + } + if t_ctx.resolver.schema.has_child_fks(table_name) { + emit_fk_child_decrement_on_delete( + program, + &t_ctx.resolver, + &table, + table_name, + main_table_cursor_id, + key_reg, + )?; + } + } } if unsafe { &*table_reference }.virtual_table().is_some() { @@ -734,530 +903,6 @@ fn emit_delete_insns( Ok(()) } -/// Emit parent-side FK counter maintenance for UPDATE on a table with a composite PK. -/// -/// For every child FK that targets `parent_table_name`: -/// 1. Pass 1: If any child row currently references the OLD parent key, -/// increment the global FK counter (deferred violation potential). -/// We try an index probe on child(child_cols...) if available, else do a table scan. -/// 2. Pass 2: If any child row references the NEW parent key, decrement the counter -/// (because the reference would be “retargeted” by the update). -pub fn emit_fk_parent_pk_change_counters( - program: &mut ProgramBuilder, - incoming: &[ResolvedFkRef], - resolver: &Resolver, - old_pk_start: usize, - new_pk_start: usize, - n_cols: usize, -) -> crate::Result<()> { - if incoming.is_empty() { - return Ok(()); - } - for fk_ref in incoming.iter() { - let child_tbl = &fk_ref.child_table; - let child_cols = &fk_ref.fk.child_columns; - // Prefer exact-prefix index on child - let idx = resolver.schema.get_indices(&child_tbl.name).find(|ix| { - ix.columns.len() == child_cols.len() - && ix - .columns - .iter() - .zip(child_cols.iter()) - .all(|(ic, cc)| ic.name.eq_ignore_ascii_case(cc)) - }); - - if let Some(ix) = idx { - let icur = program.alloc_cursor_id(CursorType::BTreeIndex(ix.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: icur, - root_page: ix.root_page, - db: 0, - }); - - // Build child-probe key from OLD parent PK (1:1 map ensured by the column-name equality above) - // We just copy the OLD PK registers, apply index affinities before the probe. - let probe_start = old_pk_start; - - // Apply affinities for composite comparison - let aff: String = ix - .columns - .iter() - .map(|ic| { - let (_, col) = child_tbl - .get_column(&ic.name) - .expect("indexed child column not found"); - col.affinity().aff_mask() - }) - .collect(); - if let Some(count) = NonZeroUsize::new(n_cols) { - program.emit_insn(Insn::Affinity { - start_reg: probe_start, - count, - affinities: aff, - }); - } - - let found = program.allocate_label(); - program.emit_insn(Insn::Found { - cursor_id: icur, - target_pc: found, - record_reg: probe_start, - num_regs: n_cols, - }); - - // Not found => no increment - program.emit_insn(Insn::Close { cursor_id: icur }); - let skip = program.allocate_label(); - program.emit_insn(Insn::Goto { target_pc: skip }); - - // Found => increment - program.preassign_label_to_next_insn(found); - program.emit_insn(Insn::Close { cursor_id: icur }); - program.emit_insn(Insn::FkCounter { - increment_value: 1, - check_abort: false, - is_scope: false, - }); - program.preassign_label_to_next_insn(skip); - } else { - // Table-scan fallback with per-column checks (jump-if-NULL semantics) - let ccur = program.alloc_cursor_id(CursorType::BTreeTable(child_tbl.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: ccur, - root_page: child_tbl.root_page, - db: 0, - }); - - let done = program.allocate_label(); - program.emit_insn(Insn::Rewind { - cursor_id: ccur, - pc_if_empty: done, - }); - - let loop_top = program.allocate_label(); - let next_row = program.allocate_label(); - program.preassign_label_to_next_insn(loop_top); - - for (i, child_name) in child_cols.iter().enumerate() { - let (pos, _) = child_tbl.get_column(child_name).ok_or_else(|| { - crate::LimboError::InternalError(format!("child col {child_name} missing")) - })?; - let tmp = program.alloc_register(); - program.emit_insn(Insn::Column { - cursor_id: ccur, - column: pos, - dest: tmp, - default: None, - }); - - // Treat NULL as non-match: jump away immediately - program.emit_insn(Insn::IsNull { - reg: tmp, - target_pc: next_row, - }); - - // Eq(tmp, old_pk[i]) with Binary collation, jump-if-NULL enabled - let cont = program.allocate_label(); - program.emit_insn(Insn::Eq { - lhs: tmp, - rhs: old_pk_start + i, - target_pc: cont, - flags: CmpInsFlags::default().jump_if_null(), - collation: Some(super::collate::CollationSeq::Binary), - }); - program.emit_insn(Insn::Goto { - target_pc: next_row, - }); - program.preassign_label_to_next_insn(cont); - } - - // All columns matched OLD -> increment - program.emit_insn(Insn::FkCounter { - increment_value: 1, - check_abort: false, - is_scope: false, - }); - - program.preassign_label_to_next_insn(next_row); - program.emit_insn(Insn::Next { - cursor_id: ccur, - pc_if_next: loop_top, - }); - program.preassign_label_to_next_insn(done); - program.emit_insn(Insn::Close { cursor_id: ccur }); - } - } - - // PASS 2: count children of NEW key - for fk_ref in incoming.iter() { - let child_tbl = &fk_ref.child_table; - let child_cols = &fk_ref.fk.child_columns; - - let idx = resolver.schema.get_indices(&child_tbl.name).find(|ix| { - ix.columns.len() == child_cols.len() - && ix - .columns - .iter() - .zip(child_cols.iter()) - .all(|(ic, cc)| ic.name.eq_ignore_ascii_case(cc)) - }); - - if let Some(ix) = idx { - let icur = program.alloc_cursor_id(CursorType::BTreeIndex(ix.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: icur, - root_page: ix.root_page, - db: 0, - }); - - // Build probe from NEW PK registers; apply affinities - let probe_start = new_pk_start; - let aff: String = ix - .columns - .iter() - .map(|ic| { - let (_, col) = child_tbl - .get_column(&ic.name) - .expect("indexed child column not found"); - col.affinity().aff_mask() - }) - .collect(); - if let Some(count) = NonZeroUsize::new(n_cols) { - program.emit_insn(Insn::Affinity { - start_reg: probe_start, - count, - affinities: aff, - }); - } - - let found = program.allocate_label(); - program.emit_insn(Insn::Found { - cursor_id: icur, - target_pc: found, - record_reg: probe_start, - num_regs: n_cols, - }); - - // Not found => no decrement - program.emit_insn(Insn::Close { cursor_id: icur }); - let skip = program.allocate_label(); - program.emit_insn(Insn::Goto { target_pc: skip }); - - // Found => decrement - program.preassign_label_to_next_insn(found); - program.emit_insn(Insn::Close { cursor_id: icur }); - program.emit_insn(Insn::FkCounter { - increment_value: -1, - check_abort: false, - is_scope: false, - }); - program.preassign_label_to_next_insn(skip); - } else { - // Table-scan fallback on NEW key - let ccur = program.alloc_cursor_id(CursorType::BTreeTable(child_tbl.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: ccur, - root_page: child_tbl.root_page, - db: 0, - }); - - let done = program.allocate_label(); - program.emit_insn(Insn::Rewind { - cursor_id: ccur, - pc_if_empty: done, - }); - - let loop_top = program.allocate_label(); - let next_row = program.allocate_label(); - program.preassign_label_to_next_insn(loop_top); - - for (i, child_name) in child_cols.iter().enumerate() { - let (pos, _) = child_tbl.get_column(child_name).ok_or_else(|| { - crate::LimboError::InternalError(format!("child col {child_name} missing")) - })?; - let tmp = program.alloc_register(); - program.emit_insn(Insn::Column { - cursor_id: ccur, - column: pos, - dest: tmp, - default: None, - }); - - program.emit_insn(Insn::IsNull { - reg: tmp, - target_pc: next_row, - }); - - let cont = program.allocate_label(); - program.emit_insn(Insn::Eq { - lhs: tmp, - rhs: new_pk_start + i, - target_pc: cont, - flags: CmpInsFlags::default().jump_if_null(), - collation: Some(super::collate::CollationSeq::Binary), - }); - program.emit_insn(Insn::Goto { - target_pc: next_row, - }); - program.preassign_label_to_next_insn(cont); - } - - // All columns matched NEW: decrement - program.emit_insn(Insn::FkCounter { - increment_value: -1, - check_abort: false, - is_scope: false, - }); - - program.preassign_label_to_next_insn(next_row); - program.emit_insn(Insn::Next { - cursor_id: ccur, - pc_if_next: loop_top, - }); - program.preassign_label_to_next_insn(done); - program.emit_insn(Insn::Close { cursor_id: ccur }); - } - } - Ok(()) -} - -/// Emit checks that prevent updating/deleting a parent row that is still referenced by a child. -/// -/// If the global deferred-FK counter is zero, we skip all checks (fast path for no outstanding refs). -/// For each incoming FK: -/// Build the parent key (in FK parent-column order) from the current row. -/// Probe the child table for any row whose FK columns equal that key. -/// - If an exact child index exists on the FK columns, use `NotFound` against that index. -/// - Otherwise, scan the child table and compare each FK column (NULL short-circuits to “no match”). -/// If a referencing child is found: -/// - Deferred FK: increment counter (violation will be raised at COMMIT). -/// - Immediate FK: raise `SQLITE_CONSTRAINT_FOREIGNKEY` now. -pub fn emit_fk_parent_existence_checks( - program: &mut ProgramBuilder, - resolver: &Resolver, - parent_table_name: &str, - parent_cursor_id: usize, - parent_rowid_reg: usize, -) -> Result<()> { - let parent_bt = resolver - .schema - .get_btree_table(parent_table_name) - .ok_or_else(|| crate::LimboError::InternalError("parent not btree".into()))?; - - for fk_ref in resolver.schema.resolved_fks_referencing(parent_table_name) { - // Resolve parent key columns - let parent_cols: Vec = if fk_ref.fk.parent_columns.is_empty() { - parent_bt - .primary_key_columns - .iter() - .map(|(n, _)| n.clone()) - .collect() - } else { - fk_ref.fk.parent_columns.clone() - }; - - // Load parent key values for THIS row into regs, in parent_cols order - let parent_cols_len = parent_cols.len(); - let parent_key_start = program.alloc_registers(parent_cols_len); - for (i, pcol) in parent_cols.iter().enumerate() { - let src = if pcol.eq_ignore_ascii_case("rowid") { - parent_rowid_reg - } else { - let (pos, col) = parent_bt - .get_column(&normalize_ident(pcol)) - .ok_or_else(|| { - crate::LimboError::InternalError(format!("col {pcol} missing")) - })?; - if col.is_rowid_alias { - parent_rowid_reg - } else { - // read current cell's column value - program.emit_insn(Insn::Column { - cursor_id: parent_cursor_id, - column: pos, - dest: parent_key_start + i, - default: None, - }); - continue; - } - }; - program.emit_insn(Insn::Copy { - src_reg: src, - dst_reg: parent_key_start + i, - extra_amount: 0, - }); - } - - // Build child-side probe key in child_columns order, from parent_key_start - // - // Map parent_col to child_col position 1:1 - let child_cols = &fk_ref.fk.child_columns; - // Try to find an index on child(child_cols...) to do an existance check - let child_idx = resolver - .schema - .get_indices(&fk_ref.child_table.name) - .find(|idx| { - idx.columns.len() == child_cols.len() - && idx - .columns - .iter() - .zip(child_cols.iter()) - .all(|(ic, cc)| ic.name.eq_ignore_ascii_case(cc)) - }); - - if let Some(idx) = child_idx { - // Index existence probe: Found -> violation - let icur = program.alloc_cursor_id(CursorType::BTreeIndex(idx.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: icur, - root_page: idx.root_page, - db: 0, - }); - - // Pack the child key regs from the parent key regs in fk order. - // Same order because we matched columns 1:1 above - let probe_start = program.alloc_registers(parent_cols_len); - for i in 0..parent_cols_len { - program.emit_insn(Insn::Copy { - src_reg: parent_key_start + i, - dst_reg: probe_start + i, - extra_amount: 0, - }); - } - if let Some(count) = NonZeroUsize::new(parent_cols_len) { - // Apply index affinities for composite comparison - let aff: String = idx - .columns - .iter() - .map(|ic| { - let (_, col) = fk_ref - .child_table - .get_column(&ic.name) - .expect("indexed child column not found"); - col.affinity().aff_mask() - }) - .collect(); - program.emit_insn(Insn::Affinity { - start_reg: probe_start, - count, - affinities: aff, - }); - } - - let ok = program.allocate_label(); - program.emit_insn(Insn::NotFound { - cursor_id: icur, - target_pc: ok, - record_reg: probe_start, - num_regs: parent_cols_len, - }); - - // found referencing child row = violation path - program.emit_insn(Insn::Close { cursor_id: icur }); - if fk_ref.fk.deferred { - program.emit_insn(Insn::FkCounter { - increment_value: 1, - check_abort: false, - is_scope: false, - }); - } else { - program.emit_insn(Insn::Halt { - err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, - description: "FOREIGN KEY constraint failed".to_string(), - }); - } - program.preassign_label_to_next_insn(ok); - program.emit_insn(Insn::Close { cursor_id: icur }); - } else { - // Fallback: table-scan the child table - let ccur = program.alloc_cursor_id(CursorType::BTreeTable(fk_ref.child_table.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: ccur, - root_page: fk_ref.child_table.root_page, - db: 0, - }); - - let done = program.allocate_label(); - program.emit_insn(Insn::Rewind { - cursor_id: ccur, - pc_if_empty: done, - }); - - // Loop labels local to this scan - let loop_top = program.allocate_label(); - let next_row = program.allocate_label(); - - program.preassign_label_to_next_insn(loop_top); - - // For each FK column: require a match, if NULL or mismatch -> next_row - for (i, child_col) in child_cols.iter().enumerate() { - let (pos, _) = fk_ref - .child_table - .get_column(&normalize_ident(child_col)) - .ok_or_else(|| { - crate::LimboError::InternalError(format!("child col {child_col} missing")) - })?; - - let tmp = program.alloc_register(); - program.emit_insn(Insn::Column { - cursor_id: ccur, - column: pos, - dest: tmp, - default: None, - }); - - // NULL FK value => this child row cannot reference the parent, skip row - program.emit_insn(Insn::IsNull { - reg: tmp, - target_pc: next_row, - }); - - // Equal? continue to check next column; else jump to next_row - let cont_i = program.allocate_label(); - program.emit_insn(Insn::Eq { - lhs: tmp, - rhs: parent_key_start + i, - target_pc: cont_i, - flags: CmpInsFlags::default().jump_if_null(), - collation: program.curr_collation(), - }); - // Not equal -> skip this child row - program.emit_insn(Insn::Goto { - target_pc: next_row, - }); - - // Equal path resumes here, then we check the next column - program.preassign_label_to_next_insn(cont_i); - } - - // If we reached here, all FK columns matched, violation - if fk_ref.fk.deferred { - program.emit_insn(Insn::FkCounter { - increment_value: 1, - check_abort: false, - is_scope: false, - }); - } else { - program.emit_insn(Insn::Halt { - err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, - description: "FOREIGN KEY constraint failed".to_string(), - }); - } - - // Advance to next child row and loop - program.preassign_label_to_next_insn(next_row); - program.emit_insn(Insn::Next { - cursor_id: ccur, - pc_if_next: loop_top, - }); - - program.preassign_label_to_next_insn(done); - program.emit_insn(Insn::Close { cursor_id: ccur }); - } - } - Ok(()) -} - #[instrument(skip_all, level = Level::DEBUG)] fn emit_program_for_update( connection: &Arc, @@ -1309,16 +954,13 @@ fn emit_program_for_update( .first() .unwrap() .table - .get_name(); - let has_child_fks = fk_enabled && resolver.schema.has_child_fks(table_name); - let has_parent_fks = fk_enabled && resolver.schema.any_resolved_fks_referencing(table_name); + .get_name() + .to_string(); + // statement-level FK scope open - if has_child_fks || has_parent_fks { - program.emit_insn(Insn::FkCounter { - increment_value: 1, - check_abort: false, - is_scope: true, - }); + if fk_enabled { + let open = true; + emit_fk_scope_if_needed(program, resolver, &table_name, open)?; } // Initialize the main loop @@ -1387,13 +1029,9 @@ fn emit_program_for_update( )?; program.preassign_label_to_next_insn(after_main_loop_label); - - if has_child_fks || has_parent_fks { - program.emit_insn(Insn::FkCounter { - increment_value: -1, - check_abort: true, - is_scope: true, - }); + if fk_enabled { + let open = false; + emit_fk_scope_if_needed(program, resolver, &table_name, open)?; } after(program); @@ -1662,41 +1300,23 @@ fn emit_update_insns( if connection.foreign_keys_enabled() { let rowid_new_reg = rowid_set_clause_reg.unwrap_or(beg); if let Some(table_btree) = unsafe { &*table_ref }.btree() { - //first, stablize the image of the NEW row in the registers - if !table_btree.primary_key_columns.is_empty() { - let set_cols: std::collections::HashSet = plan - .set_clauses - .iter() - .filter_map(|(i, _)| if *i == ROWID_SENTINEL { None } else { Some(*i) }) - .collect(); - for (pk_name, _) in &table_btree.primary_key_columns { - let (pos, col) = table_btree.get_column(pk_name).unwrap(); - if !set_cols.contains(&pos) { - if col.is_rowid_alias { - program.emit_insn(Insn::Copy { - src_reg: rowid_new_reg, - dst_reg: start + pos, - extra_amount: 0, - }); - } else { - program.emit_insn(Insn::Column { - cursor_id, - column: pos, - dest: start + pos, - default: None, - }); - } - } - } - } + stabilize_new_row_for_fk( + program, + &table_btree, + &plan.set_clauses, + cursor_id, + start, + rowid_new_reg, + )?; if t_ctx.resolver.schema.has_child_fks(table_name) { // Child-side checks: // this ensures updated row still satisfies child FKs that point OUT from this table - emit_fk_child_existence_checks( + emit_fk_child_update_counters( program, &t_ctx.resolver, &table_btree, table_name, + cursor_id, start, rowid_new_reg, &plan @@ -1715,178 +1335,17 @@ fn emit_update_insns( .schema .any_resolved_fks_referencing(table_name) { - let updated_parent_positions: HashSet = - plan.set_clauses.iter().map(|(i, _)| *i).collect(); - - // If no incoming FK’s parent key can be affected by these updates, skip the whole parent-FK block. - let incoming = t_ctx.resolver.schema.resolved_fks_referencing(table_name); - let parent_tbl = &table_btree; - let maybe_affects_parent_key = incoming - .iter() - .any(|r| r.parent_key_may_change(&updated_parent_positions, parent_tbl)); - if maybe_affects_parent_key { - let pk_len = table_btree.primary_key_columns.len(); - match pk_len { - 0 => { - // Rowid table: the implicit PK is rowid. - // If rowid is unchanged then we skip, else check that no child row still references the OLD key. - let skip_parent_fk = program.allocate_label(); - let old_rowid_reg = beg; - let new_rowid_reg = rowid_set_clause_reg.unwrap_or(beg); - - program.emit_insn(Insn::Eq { - lhs: new_rowid_reg, - rhs: old_rowid_reg, - target_pc: skip_parent_fk, - flags: CmpInsFlags::default(), - collation: program.curr_collation(), - }); - // Rowid changed: check incoming FKs (children) that reference this parent row - emit_fk_parent_existence_checks( - program, - &t_ctx.resolver, - table_name, - cursor_id, - old_rowid_reg, - )?; - program.preassign_label_to_next_insn(skip_parent_fk); - } - 1 => { - // Single-column declared PK, may be a rowid alias or a real column. - // If PK value unchanged then skip, else verify no child still references OLD key. - let (pk_name, _) = &table_btree.primary_key_columns[0]; - let (pos, col) = table_btree.get_column(pk_name).unwrap(); - - let old_reg = program.alloc_register(); - if col.is_rowid_alias { - program.emit_insn(Insn::RowId { - cursor_id, - dest: old_reg, - }); - } else { - program.emit_insn(Insn::Column { - cursor_id, - column: pos, - dest: old_reg, - default: None, - }); - } - let new_reg = if col.is_rowid_alias { - rowid_new_reg - } else { - start + pos - }; - - let skip_parent_fk = program.allocate_label(); - program.emit_insn(Insn::Eq { - lhs: old_reg, - rhs: new_reg, - target_pc: skip_parent_fk, - flags: CmpInsFlags::default(), - collation: program.curr_collation(), - }); - emit_fk_parent_existence_checks( - program, - &t_ctx.resolver, - table_name, - cursor_id, - beg, - )?; - program.preassign_label_to_next_insn(skip_parent_fk); - } - _ => { - // Composite PK: - // 1. Materialize OLD PK vector from current row. - // 2. Materialize NEW PK vector from updated registers. - // 3. If any component differs, the PK changes -> run composite parent-FK update flow. - let old_pk_start = program.alloc_registers(pk_len); - for (i, (pk_name, _)) in - table_btree.primary_key_columns.iter().enumerate() - { - let (pos, col) = table_btree.get_column(pk_name).unwrap(); - if col.is_rowid_alias { - program.emit_insn(Insn::Copy { - src_reg: beg, - dst_reg: old_pk_start + i, - extra_amount: 0, - }); - } else { - program.emit_insn(Insn::Column { - cursor_id, - column: pos, - dest: old_pk_start + i, - default: None, - }); - } - } - - // Build NEW PK values from the updated registers - let new_pk_start = program.alloc_registers(pk_len); - for (i, (pk_name, _)) in - table_btree.primary_key_columns.iter().enumerate() - { - let (pos, col) = table_btree.get_column(pk_name).unwrap(); - let src = if col.is_rowid_alias { - rowid_new_reg - } else { - start + pos // Updated value from SET clause - }; - program.emit_insn(Insn::Copy { - src_reg: src, - dst_reg: new_pk_start + i, - extra_amount: 0, - }); - } - - // Compare OLD vs NEW to see if PK is changing - let skip_parent_fk = program.allocate_label(); - let pk_changed = program.allocate_label(); - - for i in 0..pk_len { - if i == pk_len - 1 { - // Last comparison, if equal, all are equal - program.emit_insn(Insn::Eq { - lhs: old_pk_start + i, - rhs: new_pk_start + i, - target_pc: skip_parent_fk, - flags: CmpInsFlags::default(), - collation: program.curr_collation(), - }); - // Not equal - PK is changing - program.emit_insn(Insn::Goto { - target_pc: pk_changed, - }); - } else { - // Not last comparison - let next_check = program.allocate_label(); - program.emit_insn(Insn::Eq { - lhs: old_pk_start + i, - rhs: new_pk_start + i, - target_pc: next_check, // Equal, check next component - flags: CmpInsFlags::default(), - collation: program.curr_collation(), - }); - // Not equal - PK is changing - program.emit_insn(Insn::Goto { - target_pc: pk_changed, - }); - program.preassign_label_to_next_insn(next_check); - } - } - program.preassign_label_to_next_insn(pk_changed); - // PK changed: maintain the deferred FK counter in two passes - emit_fk_parent_pk_change_counters( - program, - &incoming, - &t_ctx.resolver, - old_pk_start, - new_pk_start, - pk_len, - )?; - program.preassign_label_to_next_insn(skip_parent_fk); - } - } - } + emit_parent_pk_change_checks( + program, + &t_ctx.resolver, + &table_btree, + cursor_id, + beg, + start, + rowid_new_reg, + rowid_set_clause_reg, + &plan.set_clauses, + )?; } } } @@ -2342,160 +1801,6 @@ fn emit_update_insns( Ok(()) } -pub fn emit_fk_child_existence_checks( - program: &mut ProgramBuilder, - resolver: &Resolver, - table: &BTreeTable, - table_name: &str, - start_reg: usize, - rowid_reg: usize, - updated_cols: &HashSet, -) -> Result<()> { - for fk_ref in resolver.schema.resolved_fks_for_child(table_name) { - // Skip when the child key is untouched (including rowid-alias special case) - if !fk_ref.child_key_changed(updated_cols, table) { - continue; - } - - let fk_ok = program.allocate_label(); - - // look for NULLs in any child FK column - for child_name in &fk_ref.child_cols { - let (i, col) = table.get_column(child_name).unwrap(); - let src = if col.is_rowid_alias { - rowid_reg - } else { - start_reg + i - }; - program.emit_insn(Insn::IsNull { - reg: src, - target_pc: fk_ok, - }); - } - - if fk_ref.parent_uses_rowid { - // Fast rowid probe on the parent table - let parent_tbl = resolver - .schema - .get_btree_table(&fk_ref.fk.parent_table) - .expect("Parent must be btree"); - - let pcur = program.alloc_cursor_id(CursorType::BTreeTable(parent_tbl.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: pcur, - root_page: parent_tbl.root_page, - db: 0, - }); - - let (i_child, col_child) = table.get_column(&fk_ref.child_cols[0]).unwrap(); - let val_reg = if col_child.is_rowid_alias { - rowid_reg - } else { - start_reg + i_child - }; - let tmp = program.alloc_register(); - program.emit_insn(Insn::Copy { - src_reg: val_reg, - dst_reg: tmp, - extra_amount: 0, - }); - program.emit_insn(Insn::MustBeInt { reg: tmp }); - let violation = program.allocate_label(); - program.emit_insn(Insn::NotExists { - cursor: pcur, - rowid_reg: tmp, - target_pc: violation, - }); - program.emit_insn(Insn::Close { cursor_id: pcur }); - program.emit_insn(Insn::Goto { target_pc: fk_ok }); - - program.preassign_label_to_next_insn(violation); - program.emit_insn(Insn::Close { cursor_id: pcur }); - if fk_ref.fk.deferred { - program.emit_insn(Insn::FkCounter { - increment_value: 1, - check_abort: false, - is_scope: false, - }); - } else { - program.emit_insn(Insn::Halt { - err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, - description: "FOREIGN KEY constraint failed".to_string(), - }); - } - } else { - // Unique-index probe on the parent (already resolved) - let parent_idx = fk_ref - .parent_unique_index - .as_ref() - .expect("parent unique index required"); - let icur = program.alloc_cursor_id(CursorType::BTreeIndex(parent_idx.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: icur, - root_page: parent_idx.root_page, - db: 0, - }); - - // Build probe key from NEW child values in fk order - let n = fk_ref.child_cols.len(); - let probe_start = program.alloc_registers(n); - for (k, child_name) in fk_ref.child_cols.iter().enumerate() { - let (i, col) = table.get_column(child_name).unwrap(); - program.emit_insn(Insn::Copy { - src_reg: if col.is_rowid_alias { - rowid_reg - } else { - start_reg + i - }, - dst_reg: probe_start + k, - extra_amount: 0, - }); - } - - let aff: String = parent_idx - .columns - .iter() - .map(|ic| table.columns[ic.pos_in_table].affinity().aff_mask()) - .collect(); - program.emit_insn(Insn::Affinity { - start_reg: probe_start, - count: NonZeroUsize::new(n).unwrap(), - affinities: aff, - }); - let found = program.allocate_label(); - program.emit_insn(Insn::Found { - cursor_id: icur, - target_pc: found, - record_reg: probe_start, - num_regs: n, - }); - - // Not found => violation - program.emit_insn(Insn::Close { cursor_id: icur }); - if fk_ref.fk.deferred { - program.emit_insn(Insn::FkCounter { - increment_value: 1, - check_abort: false, - is_scope: false, - }); - } else { - program.emit_insn(Insn::Halt { - err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, - description: "FOREIGN KEY constraint failed".to_string(), - }); - } - program.emit_insn(Insn::Goto { target_pc: fk_ok }); - - // Found => OK - program.preassign_label_to_next_insn(found); - program.emit_insn(Insn::Close { cursor_id: icur }); - } - - program.preassign_label_to_next_insn(fk_ok); - } - Ok(()) -} - pub fn prepare_cdc_if_necessary( program: &mut ProgramBuilder, schema: &Schema, diff --git a/core/translate/fkeys.rs b/core/translate/fkeys.rs new file mode 100644 index 000000000..b2b356b37 --- /dev/null +++ b/core/translate/fkeys.rs @@ -0,0 +1,1025 @@ +use turso_parser::ast::Expr; + +use super::ProgramBuilder; +use crate::{ + schema::{BTreeTable, ForeignKey, Index, ResolvedFkRef, ROWID_SENTINEL}, + translate::{emitter::Resolver, planner::ROWID_STRS}, + vdbe::{ + builder::CursorType, + insn::{CmpInsFlags, Insn}, + }, + Result, +}; +use std::{collections::HashSet, num::NonZeroUsize, sync::Arc}; + +#[inline] +/// Increment/decrement the FK scope counter if `table_name` has either outgoing or incoming FKs. +/// +/// Returns `true` if a scope change was emitted. Scope open (+1) occurs before a statement +/// touching the table; scope close (−1) occurs after. On scope close, remaining deferred +/// violations are raised by the runtime. +pub fn emit_fk_scope_if_needed( + program: &mut ProgramBuilder, + resolver: &Resolver, + table_name: &str, + open: bool, +) -> Result { + let has_fks = resolver.schema.has_child_fks(table_name) + || resolver.schema.any_resolved_fks_referencing(table_name); + if has_fks { + program.emit_insn(Insn::FkCounter { + increment_value: if open { 1 } else { -1 }, + is_scope: true, + }); + } + Ok(has_fks) +} + +/// Open a read cursor on an index and return its cursor id. +#[inline] +pub fn open_read_index(program: &mut ProgramBuilder, idx: &Arc) -> usize { + let icur = program.alloc_cursor_id(CursorType::BTreeIndex(idx.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: icur, + root_page: idx.root_page, + db: 0, + }); + icur +} + +/// Open a read cursor on a table and return its cursor id. +#[inline] +pub fn open_read_table(program: &mut ProgramBuilder, tbl: &Arc) -> usize { + let tcur = program.alloc_cursor_id(CursorType::BTreeTable(tbl.clone())); + program.emit_insn(Insn::OpenRead { + cursor_id: tcur, + root_page: tbl.root_page, + db: 0, + }); + tcur +} + +/// Copy `len` registers starting at `src_start` to a fresh block and apply index affinities. +/// Returns the destination start register. +#[inline] +fn copy_with_affinity( + program: &mut ProgramBuilder, + src_start: usize, + len: usize, + idx: &Index, + aff_from_tbl: &BTreeTable, +) -> usize { + let dst = program.alloc_registers(len); + for i in 0..len { + program.emit_insn(Insn::Copy { + src_reg: src_start + i, + dst_reg: dst + i, + extra_amount: 0, + }); + } + if let Some(count) = NonZeroUsize::new(len) { + program.emit_insn(Insn::Affinity { + start_reg: dst, + count, + affinities: build_index_affinity_string(idx, aff_from_tbl), + }); + } + dst +} + +/// Issue an index probe using `Found`/`NotFound` and route to `on_found`/`on_not_found`. +pub fn index_probe( + program: &mut ProgramBuilder, + icur: usize, + record_reg: usize, + num_regs: usize, + mut on_found: F, + mut on_not_found: G, +) -> Result<()> +where + F: FnMut(&mut ProgramBuilder) -> Result<()>, + G: FnMut(&mut ProgramBuilder) -> Result<()>, +{ + let lbl_found = program.allocate_label(); + let lbl_join = program.allocate_label(); + + program.emit_insn(Insn::Found { + cursor_id: icur, + target_pc: lbl_found, + record_reg, + num_regs, + }); + + // NOT FOUND path + on_not_found(program)?; + program.emit_insn(Insn::Goto { + target_pc: lbl_join, + }); + + // FOUND path + program.preassign_label_to_next_insn(lbl_found); + on_found(program)?; + + // Join & close once + program.preassign_label_to_next_insn(lbl_join); + program.emit_insn(Insn::Close { cursor_id: icur }); + Ok(()) +} + +/// Iterate a table and call `on_match` when all child columns equal the key at `parent_key_start`. +/// Skips rows where any FK column is NULL. If `self_exclude_rowid` is Some, the row with that rowid is skipped. +fn table_scan_match_any( + program: &mut ProgramBuilder, + child_tbl: &Arc, + child_cols: &[String], + parent_key_start: usize, + self_exclude_rowid: Option, + mut on_match: F, +) -> Result<()> +where + F: FnMut(&mut ProgramBuilder) -> Result<()>, +{ + let ccur = open_read_table(program, child_tbl); + let done = program.allocate_label(); + program.emit_insn(Insn::Rewind { + cursor_id: ccur, + pc_if_empty: done, + }); + + let loop_top = program.allocate_label(); + program.preassign_label_to_next_insn(loop_top); + let next_row = program.allocate_label(); + + // Compare each FK column to parent key component. + for (i, cname) in child_cols.iter().enumerate() { + let (pos, _) = child_tbl.get_column(cname).ok_or_else(|| { + crate::LimboError::InternalError(format!("child col {cname} missing")) + })?; + let tmp = program.alloc_register(); + program.emit_insn(Insn::Column { + cursor_id: ccur, + column: pos, + dest: tmp, + default: None, + }); + program.emit_insn(Insn::IsNull { + reg: tmp, + target_pc: next_row, + }); + + let cont = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: tmp, + rhs: parent_key_start + i, + target_pc: cont, + flags: CmpInsFlags::default().jump_if_null(), + collation: Some(super::collate::CollationSeq::Binary), + }); + program.emit_insn(Insn::Goto { + target_pc: next_row, + }); + program.preassign_label_to_next_insn(cont); + } + + //self-reference exclusion on rowid + if let Some(parent_rowid) = self_exclude_rowid { + let child_rowid = program.alloc_register(); + let skip = program.allocate_label(); + program.emit_insn(Insn::RowId { + cursor_id: ccur, + dest: child_rowid, + }); + program.emit_insn(Insn::Eq { + lhs: child_rowid, + rhs: parent_rowid, + target_pc: skip, + flags: CmpInsFlags::default(), + collation: None, + }); + on_match(program)?; + program.preassign_label_to_next_insn(skip); + } else { + on_match(program)?; + } + + program.preassign_label_to_next_insn(next_row); + program.emit_insn(Insn::Next { + cursor_id: ccur, + pc_if_next: loop_top, + }); + + program.preassign_label_to_next_insn(done); + program.emit_insn(Insn::Close { cursor_id: ccur }); + Ok(()) +} + +/// Build the index affinity mask string (one char per indexed column). +#[inline] +pub fn build_index_affinity_string(idx: &Index, table: &BTreeTable) -> String { + idx.columns + .iter() + .map(|ic| table.columns[ic.pos_in_table].affinity().aff_mask()) + .collect() +} + +/// For deferred FKs: increment the global counter; for immediate FKs: halt with FK error. +pub fn emit_fk_violation(program: &mut ProgramBuilder, fk: &ForeignKey) -> Result<()> { + if fk.deferred { + program.emit_insn(Insn::FkCounter { + increment_value: 1, + is_scope: false, + }); + } else { + program.emit_insn(Insn::Halt { + err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, + description: "FOREIGN KEY constraint failed".to_string(), + }); + } + Ok(()) +} + +/// Stabilize the NEW row image for FK checks (UPDATE): +/// fill in unmodified PK columns from the current row so the NEW PK vector is complete. +pub fn stabilize_new_row_for_fk( + program: &mut ProgramBuilder, + table_btree: &BTreeTable, + set_clauses: &[(usize, Box)], + cursor_id: usize, + start: usize, + rowid_new_reg: usize, +) -> Result<()> { + if table_btree.primary_key_columns.is_empty() { + return Ok(()); + } + let set_cols: HashSet = set_clauses + .iter() + .filter_map(|(i, _)| if *i == ROWID_SENTINEL { None } else { Some(*i) }) + .collect(); + + for (pk_name, _) in &table_btree.primary_key_columns { + let (pos, col) = table_btree + .get_column(pk_name) + .ok_or_else(|| crate::LimboError::InternalError(format!("pk col {pk_name} missing")))?; + if !set_cols.contains(&pos) { + if col.is_rowid_alias { + program.emit_insn(Insn::Copy { + src_reg: rowid_new_reg, + dst_reg: start + pos, + extra_amount: 0, + }); + } else { + program.emit_insn(Insn::Column { + cursor_id, + column: pos, + dest: start + pos, + default: None, + }); + } + } + } + Ok(()) +} + +/// Parent-side checks when the parent PK might change (UPDATE on parent): +/// Detect if any child references the OLD key (potential violation), and if any references the NEW key +/// (which cancels one potential violation). For composite PKs this builds OLD/NEW vectors first. +#[allow(clippy::too_many_arguments)] +pub fn emit_parent_pk_change_checks( + program: &mut ProgramBuilder, + resolver: &Resolver, + table_btree: &BTreeTable, + cursor_id: usize, + old_rowid_reg: usize, + start: usize, + rowid_new_reg: usize, + rowid_set_clause_reg: Option, + set_clauses: &[(usize, Box)], +) -> Result<()> { + let updated_positions: HashSet = set_clauses.iter().map(|(i, _)| *i).collect(); + let incoming = resolver + .schema + .resolved_fks_referencing(&table_btree.name)?; + let affects_pk = incoming + .iter() + .any(|r| r.parent_key_may_change(&updated_positions, table_btree)); + if !affects_pk { + return Ok(()); + } + + match table_btree.primary_key_columns.len() { + 0 => emit_rowid_pk_change_check( + program, + &incoming, + resolver, + old_rowid_reg, + rowid_set_clause_reg.unwrap_or(old_rowid_reg), + ), + 1 => emit_single_pk_change_check( + program, + &incoming, + resolver, + table_btree, + cursor_id, + start, + rowid_new_reg, + ), + _ => emit_composite_pk_change_check( + program, + &incoming, + resolver, + table_btree, + cursor_id, + old_rowid_reg, + start, + rowid_new_reg, + ), + } +} + +/// Rowid-table parent PK change: compare rowid OLD vs NEW; if changed, run two-pass counters. +pub fn emit_rowid_pk_change_check( + program: &mut ProgramBuilder, + incoming: &[ResolvedFkRef], + resolver: &Resolver, + old_rowid_reg: usize, + new_rowid_reg: usize, +) -> Result<()> { + let skip = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: new_rowid_reg, + rhs: old_rowid_reg, + target_pc: skip, + flags: CmpInsFlags::default(), + collation: None, + }); + + let old_pk = program.alloc_register(); + let new_pk = program.alloc_register(); + program.emit_insn(Insn::Copy { + src_reg: old_rowid_reg, + dst_reg: old_pk, + extra_amount: 0, + }); + program.emit_insn(Insn::Copy { + src_reg: new_rowid_reg, + dst_reg: new_pk, + extra_amount: 0, + }); + + emit_fk_parent_pk_change_counters(program, incoming, resolver, old_pk, new_pk, 1)?; + program.preassign_label_to_next_insn(skip); + Ok(()) +} + +/// Single-column PK parent change: load OLD and NEW; if changed, run two-pass counters. +pub fn emit_single_pk_change_check( + program: &mut ProgramBuilder, + incoming: &[ResolvedFkRef], + resolver: &Resolver, + table_btree: &BTreeTable, + cursor_id: usize, + start: usize, + rowid_new_reg: usize, +) -> Result<()> { + let (pk_name, _) = &table_btree.primary_key_columns[0]; + let (pos, col) = table_btree.get_column(pk_name).unwrap(); + + let old_reg = program.alloc_register(); + if col.is_rowid_alias { + program.emit_insn(Insn::RowId { + cursor_id, + dest: old_reg, + }); + } else { + program.emit_insn(Insn::Column { + cursor_id, + column: pos, + dest: old_reg, + default: None, + }); + } + let new_reg = if col.is_rowid_alias { + rowid_new_reg + } else { + start + pos + }; + + let skip = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: old_reg, + rhs: new_reg, + target_pc: skip, + flags: CmpInsFlags::default(), + collation: None, + }); + + let old_pk = program.alloc_register(); + let new_pk = program.alloc_register(); + program.emit_insn(Insn::Copy { + src_reg: old_reg, + dst_reg: old_pk, + extra_amount: 0, + }); + program.emit_insn(Insn::Copy { + src_reg: new_reg, + dst_reg: new_pk, + extra_amount: 0, + }); + + emit_fk_parent_pk_change_counters(program, incoming, resolver, old_pk, new_pk, 1)?; + program.preassign_label_to_next_insn(skip); + Ok(()) +} + +/// Composite-PK parent change: build OLD/NEW vectors; if any component differs, run two-pass counters. +#[allow(clippy::too_many_arguments)] +pub fn emit_composite_pk_change_check( + program: &mut ProgramBuilder, + incoming: &[ResolvedFkRef], + resolver: &Resolver, + table_btree: &BTreeTable, + cursor_id: usize, + old_rowid_reg: usize, + start: usize, + rowid_new_reg: usize, +) -> Result<()> { + let pk_len = table_btree.primary_key_columns.len(); + + let old_pk = program.alloc_registers(pk_len); + for (i, (pk_name, _)) in table_btree.primary_key_columns.iter().enumerate() { + let (pos, col) = table_btree.get_column(pk_name).unwrap(); + if col.is_rowid_alias { + program.emit_insn(Insn::Copy { + src_reg: old_rowid_reg, + dst_reg: old_pk + i, + extra_amount: 0, + }); + } else { + program.emit_insn(Insn::Column { + cursor_id, + column: pos, + dest: old_pk + i, + default: None, + }); + } + } + let new_pk = program.alloc_registers(pk_len); + for (i, (pk_name, _)) in table_btree.primary_key_columns.iter().enumerate() { + let (pos, col) = table_btree.get_column(pk_name).unwrap(); + let src = if col.is_rowid_alias { + rowid_new_reg + } else { + start + pos + }; + program.emit_insn(Insn::Copy { + src_reg: src, + dst_reg: new_pk + i, + extra_amount: 0, + }); + } + + let skip = program.allocate_label(); + let changed = program.allocate_label(); + for i in 0..pk_len { + let next = if i + 1 == pk_len { + None + } else { + Some(program.allocate_label()) + }; + program.emit_insn(Insn::Eq { + lhs: old_pk + i, + rhs: new_pk + i, + target_pc: next.unwrap_or(skip), + flags: CmpInsFlags::default(), + collation: None, + }); + program.emit_insn(Insn::Goto { target_pc: changed }); + if let Some(n) = next { + program.preassign_label_to_next_insn(n); + } + } + + program.preassign_label_to_next_insn(changed); + emit_fk_parent_pk_change_counters(program, incoming, resolver, old_pk, new_pk, pk_len)?; + program.preassign_label_to_next_insn(skip); + Ok(()) +} + +/// Two-pass parent-side maintenance for UPDATE of a parent key: +/// 1. Probe child for OLD key, increment deferred counter if any references exist. +/// 2. Probe child for NEW key, guarded decrement cancels exactly one increment if present +pub fn emit_fk_parent_pk_change_counters( + program: &mut ProgramBuilder, + incoming: &[ResolvedFkRef], + resolver: &Resolver, + old_pk_start: usize, + new_pk_start: usize, + n_cols: usize, +) -> Result<()> { + for fk_ref in incoming { + emit_fk_parent_key_probe( + program, + resolver, + fk_ref, + old_pk_start, + n_cols, + ParentProbePass::Old, + )?; + emit_fk_parent_key_probe( + program, + resolver, + fk_ref, + new_pk_start, + n_cols, + ParentProbePass::New, + )?; + } + Ok(()) +} + +#[derive(Clone, Copy)] +enum ParentProbePass { + Old, + New, +} + +/// Probe the child side for a given parent key. If `increment_value` is +1, increment counter on match. +/// If −1, we guard with `FkIfZero` then decrement to avoid counter underflow in edge cases. +fn emit_fk_parent_key_probe( + program: &mut ProgramBuilder, + resolver: &Resolver, + fk_ref: &ResolvedFkRef, + parent_key_start: usize, + n_cols: usize, + pass: ParentProbePass, +) -> Result<()> { + let child_tbl = &fk_ref.child_table; + let child_cols = &fk_ref.fk.child_columns; + let is_deferred = fk_ref.fk.deferred; + + let on_match = |p: &mut ProgramBuilder| -> Result<()> { + match (is_deferred, pass) { + // OLD key referenced by a child + (false, ParentProbePass::Old) => { + // Immediate FK: fail now. + emit_fk_violation(p, &fk_ref.fk)?; // HALT for immediate + } + (true, ParentProbePass::Old) => { + // Deferred FK: increment counter. + p.emit_insn(Insn::FkCounter { + increment_value: 1, + is_scope: false, + }); + } + + // NEW key referenced by a child (cancel one deferred violation) + (true, ParentProbePass::New) => { + // Guard to avoid underflow if OLD pass didn't increment. + let skip = p.allocate_label(); + p.emit_insn(Insn::FkIfZero { + is_scope: false, + target_pc: skip, + }); + p.emit_insn(Insn::FkCounter { + increment_value: -1, + is_scope: false, + }); + p.preassign_label_to_next_insn(skip); + } + // Immediate FK on NEW pass: nothing to cancel; do nothing. + (false, ParentProbePass::New) => {} + } + Ok(()) + }; + + // Prefer exact child index on (child_cols...) + let idx = resolver.schema.get_indices(&child_tbl.name).find(|ix| { + ix.columns.len() == child_cols.len() + && ix + .columns + .iter() + .zip(child_cols.iter()) + .all(|(ic, cc)| ic.name.eq_ignore_ascii_case(cc)) + }); + + if let Some(ix) = idx { + let icur = open_read_index(program, ix); + let probe = copy_with_affinity(program, parent_key_start, n_cols, ix, child_tbl); + + // FOUND => on_match; NOT FOUND => no-op + index_probe(program, icur, probe, n_cols, on_match, |_p| Ok(()))?; + } else { + // Table scan fallback + table_scan_match_any( + program, + child_tbl, + child_cols, + parent_key_start, + None, + on_match, + )?; + } + + Ok(()) +} + +/// Build a parent key vector (in FK parent-column order) into `dest_start`. +/// Handles rowid aliasing and explicit ROWID names; uses current row for non-rowid columns. +fn build_parent_key( + program: &mut ProgramBuilder, + parent_bt: &BTreeTable, + parent_cols: &[String], + parent_cursor_id: usize, + parent_rowid_reg: usize, + dest_start: usize, +) -> Result<()> { + for (i, pcol) in parent_cols.iter().enumerate() { + let src = if ROWID_STRS.iter().any(|s| pcol.eq_ignore_ascii_case(s)) { + parent_rowid_reg + } else { + let (pos, col) = parent_bt + .get_column(pcol) + .ok_or_else(|| crate::LimboError::InternalError(format!("col {pcol} missing")))?; + if col.is_rowid_alias { + parent_rowid_reg + } else { + program.emit_insn(Insn::Column { + cursor_id: parent_cursor_id, + column: pos, + dest: dest_start + i, + default: None, + }); + continue; + } + }; + program.emit_insn(Insn::Copy { + src_reg: src, + dst_reg: dest_start + i, + extra_amount: 0, + }); + } + Ok(()) +} + +/// Child-side FK maintenance for UPDATE/UPSERT: +/// If any FK columns of this child row changed: +/// Pass 1 (OLD tuple): if OLD is non-NULL and parent is missing → decrement deferred counter (guarded). +/// Pass 2 (NEW tuple): if NEW is non-NULL and parent is missing → immediate error or deferred(+1). +#[allow(clippy::too_many_arguments)] +pub fn emit_fk_child_update_counters( + program: &mut ProgramBuilder, + resolver: &Resolver, + child_tbl: &BTreeTable, + child_table_name: &str, + child_cursor_id: usize, + new_start_reg: usize, + new_rowid_reg: usize, + updated_cols: &HashSet, +) -> crate::Result<()> { + // Helper: materialize OLD tuple for this FK; returns (start_reg, ncols) or None if any component is NULL. + let load_old_tuple = + |program: &mut ProgramBuilder, fk_cols: &[String]| -> Option<(usize, usize)> { + let n = fk_cols.len(); + let start = program.alloc_registers(n); + let null_jmp = program.allocate_label(); + + for (k, cname) in fk_cols.iter().enumerate() { + let (pos, _col) = match child_tbl.get_column(cname) { + Some(v) => v, + None => { + // schema inconsistency; treat as no-old tuple + return None; + } + }; + program.emit_column_or_rowid(child_cursor_id, pos, start + k); + program.emit_insn(Insn::IsNull { + reg: start + k, + target_pc: null_jmp, + }); + } + + // No NULLs, proceed + let cont = program.allocate_label(); + program.emit_insn(Insn::Goto { target_pc: cont }); + // NULL encountered -> invalidate tuple by jumping here + program.preassign_label_to_next_insn(null_jmp); + + program.preassign_label_to_next_insn(cont); + Some((start, n)) + }; + + for fk_ref in resolver.schema.resolved_fks_for_child(child_table_name)? { + // If the child-side FK columns did not change, there is nothing to do. + if !fk_ref.child_key_changed(updated_cols, child_tbl) { + continue; + } + + let ncols = fk_ref.child_cols.len(); + + // Pass 1: OLD tuple handling only for deferred FKs + if fk_ref.fk.deferred { + if let Some((old_start, _)) = load_old_tuple(program, &fk_ref.child_cols) { + if fk_ref.parent_uses_rowid { + // Parent key is rowid: probe parent table by rowid + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("parent btree"); + let pcur = open_read_table(program, &parent_tbl); + + // first FK col is the rowid value + let rid = program.alloc_register(); + program.emit_insn(Insn::Copy { + src_reg: old_start, + dst_reg: rid, + extra_amount: 0, + }); + program.emit_insn(Insn::MustBeInt { reg: rid }); + + // If NOT exists => decrement (guarded) + let miss = program.allocate_label(); + program.emit_insn(Insn::NotExists { + cursor: pcur, + rowid_reg: rid, + target_pc: miss, + }); + // found → close & continue + let join = program.allocate_label(); + program.emit_insn(Insn::Close { cursor_id: pcur }); + program.emit_insn(Insn::Goto { target_pc: join }); + + // missing → guarded decrement + program.preassign_label_to_next_insn(miss); + program.emit_insn(Insn::Close { cursor_id: pcur }); + let skip = program.allocate_label(); + program.emit_insn(Insn::FkIfZero { + is_scope: false, + target_pc: skip, + }); + program.emit_insn(Insn::FkCounter { + is_scope: false, + increment_value: -1, + }); + program.preassign_label_to_next_insn(skip); + + program.preassign_label_to_next_insn(join); + } else { + // Parent key is a unique index: use index probe and guarded decrement on NOT FOUND + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("parent btree"); + let idx = fk_ref + .parent_unique_index + .as_ref() + .expect("parent unique index required"); + let icur = open_read_index(program, idx); + + // Copy OLD tuple and apply parent index affinities + let probe = copy_with_affinity(program, old_start, ncols, idx, &parent_tbl); + // Found: nothing; Not found: guarded decrement + index_probe( + program, + icur, + probe, + ncols, + |_p| Ok(()), + |p| { + let skip = p.allocate_label(); + p.emit_insn(Insn::FkIfZero { + is_scope: false, + target_pc: skip, + }); + p.emit_insn(Insn::FkCounter { + is_scope: false, + increment_value: -1, + }); + p.preassign_label_to_next_insn(skip); + Ok(()) + }, + )?; + } + } + } + + // Pass 2: NEW tuple handling + // If any NEW component is NULL → FK is satisfied vacuously. + let fk_ok = program.allocate_label(); + for cname in &fk_ref.fk.child_columns { + let (i, col) = child_tbl.get_column(cname).unwrap(); + let src = if col.is_rowid_alias { + new_rowid_reg + } else { + new_start_reg + i + }; + program.emit_insn(Insn::IsNull { + reg: src, + target_pc: fk_ok, + }); + } + + if fk_ref.parent_uses_rowid { + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("parent btree"); + let pcur = open_read_table(program, &parent_tbl); + + // Take the first child column value (rowid) from NEW image + let (i_child, col_child) = child_tbl.get_column(&fk_ref.child_cols[0]).unwrap(); + let val_reg = if col_child.is_rowid_alias { + new_rowid_reg + } else { + new_start_reg + i_child + }; + + let tmp = program.alloc_register(); + program.emit_insn(Insn::Copy { + src_reg: val_reg, + dst_reg: tmp, + extra_amount: 0, + }); + program.emit_insn(Insn::MustBeInt { reg: tmp }); + + let violation = program.allocate_label(); + program.emit_insn(Insn::NotExists { + cursor: pcur, + rowid_reg: tmp, + target_pc: violation, + }); + // found → close and continue + program.emit_insn(Insn::Close { cursor_id: pcur }); + program.emit_insn(Insn::Goto { target_pc: fk_ok }); + + // missing → violation (immediate HALT or deferred +1) + program.preassign_label_to_next_insn(violation); + program.emit_insn(Insn::Close { cursor_id: pcur }); + emit_fk_violation(program, &fk_ref.fk)?; + } else { + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("parent btree"); + let idx = fk_ref + .parent_unique_index + .as_ref() + .expect("parent unique index required"); + let icur = open_read_index(program, idx); + + // Build NEW probe (in FK child column order → aligns with parent index columns) + let probe = { + let start = program.alloc_registers(ncols); + for (k, cname) in fk_ref.child_cols.iter().enumerate() { + let (i, col) = child_tbl.get_column(cname).unwrap(); + program.emit_insn(Insn::Copy { + src_reg: if col.is_rowid_alias { + new_rowid_reg + } else { + new_start_reg + i + }, + dst_reg: start + k, + extra_amount: 0, + }); + } + // Apply affinities of the parent index/table + if let Some(cnt) = NonZeroUsize::new(ncols) { + program.emit_insn(Insn::Affinity { + start_reg: start, + count: cnt, + affinities: build_index_affinity_string(idx, &parent_tbl), + }); + } + start + }; + + // FOUND: ok; NOT FOUND: violation path + index_probe( + program, + icur, + probe, + ncols, + |_p| Ok(()), + |p| { + emit_fk_violation(p, &fk_ref.fk)?; + Ok(()) + }, + )?; + program.emit_insn(Insn::Goto { target_pc: fk_ok }); + } + + // Skip label for NEW tuple NULL short-circuit + program.preassign_label_to_next_insn(fk_ok); + } + + Ok(()) +} + +/// Prevent deleting a parent row that is still referenced by any child. +/// For each incoming FK referencing `parent_table_name`: +/// 1. Build the parent key vector from the current parent row (FK parent-column order, +/// or the table's PK columns when the FK omits parent columns). +/// 2. Look for referencing child rows: +/// - Prefer an exact child index on (child_columns...). If found, probe the index. +/// - Otherwise scan the child table. For self-referential FKs, exclude the current rowid. +/// 3. If a referencing child exists: +/// - Immediate FK: HALT with SQLITE_CONSTRAINT_FOREIGNKEY +/// - Deferred FK: FkCounter +1 +pub fn emit_fk_delete_parent_existence_checks( + program: &mut ProgramBuilder, + resolver: &Resolver, + parent_table_name: &str, + parent_cursor_id: usize, + parent_rowid_reg: usize, +) -> Result<()> { + let parent_bt = resolver + .schema + .get_btree_table(parent_table_name) + .ok_or_else(|| crate::LimboError::InternalError("parent not btree".into()))?; + + for fk_ref in resolver + .schema + .resolved_fks_referencing(parent_table_name)? + { + let is_self_ref = fk_ref + .child_table + .name + .eq_ignore_ascii_case(parent_table_name); + + // Build parent key in FK's parent-column order (or table PK columns if unspecified). + let parent_cols: Vec = if fk_ref.fk.parent_columns.is_empty() { + parent_bt + .primary_key_columns + .iter() + .map(|(n, _)| n.clone()) + .collect() + } else { + fk_ref.fk.parent_columns.clone() + }; + let ncols = parent_cols.len(); + + let parent_key_start = program.alloc_registers(ncols); + build_parent_key( + program, + &parent_bt, + &parent_cols, + parent_cursor_id, + parent_rowid_reg, + parent_key_start, + )?; + + // Try an exact child index on (child_columns...) if available and not self-ref + let child_cols = &fk_ref.fk.child_columns; + let child_idx = if !is_self_ref { + resolver + .schema + .get_indices(&fk_ref.child_table.name) + .find(|idx| { + idx.columns.len() == child_cols.len() + && idx + .columns + .iter() + .zip(child_cols.iter()) + .all(|(ic, cc)| ic.name.eq_ignore_ascii_case(cc)) + }) + } else { + None + }; + + if let Some(idx) = child_idx { + // Index probe: FOUND => violation; NOT FOUND => ok. + let icur = open_read_index(program, idx); + let probe = + copy_with_affinity(program, parent_key_start, ncols, idx, &fk_ref.child_table); + + index_probe( + program, + icur, + probe, + ncols, + |p| { + emit_fk_violation(p, &fk_ref.fk)?; + Ok(()) + }, + |_p| Ok(()), + )?; + } else { + // Table scan fallback; for self-ref, exclude the same parent row by rowid. + table_scan_match_any( + program, + &fk_ref.child_table, + child_cols, + parent_key_start, + if is_self_ref { + Some(parent_rowid_reg) + } else { + None + }, + |p| { + emit_fk_violation(p, &fk_ref.fk)?; + Ok(()) + }, + )?; + } + } + Ok(()) +} diff --git a/core/translate/insert.rs b/core/translate/insert.rs index f6cfa4b88..7e4900f2c 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -8,7 +8,7 @@ use turso_parser::ast::{ use crate::error::{ SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PRIMARYKEY, SQLITE_CONSTRAINT_UNIQUE, }; -use crate::schema::{self, Affinity, Index, Table}; +use crate::schema::{self, Affinity, BTreeTable, Index, ResolvedFkRef, Table}; use crate::translate::emitter::{ emit_cdc_insns, emit_cdc_patch_record, prepare_cdc_if_necessary, OperationMode, }; @@ -16,6 +16,10 @@ use crate::translate::expr::{ bind_and_rewrite_expr, emit_returning_results, process_returning_clause, walk_expr_mut, BindingBehavior, ReturningValueRegisters, WalkControl, }; +use crate::translate::fkeys::{ + build_index_affinity_string, emit_fk_scope_if_needed, emit_fk_violation, index_probe, + open_read_index, open_read_table, +}; use crate::translate::plan::TableReferences; use crate::translate::planner::ROWID_STRS; use crate::translate::upsert::{ @@ -134,11 +138,6 @@ pub fn translate_insert( if !btree_table.has_rowid { crate::bail_parse_error!("INSERT into WITHOUT ROWID table is not supported"); } - let has_child_fks = fk_enabled && !btree_table.foreign_keys.is_empty(); - let has_parent_fks = fk_enabled - && resolver - .schema - .any_resolved_fks_referencing(table_name.as_str()); let root_page = btree_table.root_page; @@ -242,14 +241,11 @@ pub fn translate_insert( connection, )?; - if has_child_fks || has_parent_fks { - program.emit_insn(Insn::FkCounter { - increment_value: 1, - check_abort: false, - is_scope: true, - }); - } - + let has_fks = if fk_enabled { + emit_fk_scope_if_needed(&mut program, resolver, table_name.as_str(), true)? + } else { + false + }; let mut yield_reg_opt = None; let mut temp_table_ctx = None; let (num_values, cursor_id) = match body { @@ -274,7 +270,6 @@ pub fn translate_insert( let query_destination = QueryDestination::CoroutineYield { yield_reg, - // keep implementation_start as halt_label (producer internals) coroutine_implementation_start: halt_label, }; program.incr_nesting(); @@ -1043,13 +1038,14 @@ pub fn translate_insert( } } } - if has_child_fks || has_parent_fks { - emit_fk_checks_for_insert( + if has_fks { + // Child-side check must run before Insert (may HALT or increment deferred counter) + emit_fk_child_insert_checks( &mut program, resolver, - &insertion, - table_name.as_str(), - !inserting_multiple_rows, + &btree_table, + insertion.first_col_register(), + insertion.key_register(), )?; } @@ -1061,6 +1057,11 @@ pub fn translate_insert( table_name: table_name.to_string(), }); + if has_fks { + // After the row is actually present, repair deferred counters for children referencing this NEW parent key. + emit_parent_side_fk_decrement_on_insert(&mut program, resolver, &btree_table, &insertion)?; + } + if let Some((seq_cursor_id, r_seq, r_seq_rowid, table_name_reg)) = autoincrement_meta { let no_update_needed_label = program.allocate_label(); program.emit_insn(Insn::Le { @@ -1151,6 +1152,7 @@ pub fn translate_insert( &mut result_columns, cdc_table.as_ref().map(|c| c.0), row_done_label, + connection, )?; } else { // UpsertDo::Nothing case @@ -1198,13 +1200,8 @@ pub fn translate_insert( } program.preassign_label_to_next_insn(stmt_epilogue); - if has_child_fks || has_parent_fks { - // close FK scope and surface deferred violations - program.emit_insn(Insn::FkCounter { - increment_value: -1, - check_abort: true, - is_scope: true, - }); + if has_fks { + emit_fk_scope_if_needed(&mut program, resolver, table_name.as_str(), false)?; } program.resolve_label(halt_label, program.offset()); @@ -1900,38 +1897,29 @@ fn emit_update_sqlite_sequence( Ok(()) } -/// Emit child->parent foreign key checks for an INSERT, for the current row -fn emit_fk_checks_for_insert( +/// Child-side FK checks for INSERT of a single row: +/// For each outgoing FK on `child_tbl`, if the NEW tuple's FK columns are all non-NULL, +/// verify that the referenced parent key exists. +pub fn emit_fk_child_insert_checks( program: &mut ProgramBuilder, resolver: &Resolver, - insertion: &Insertion, - table_name: &str, - single_row_insert: bool, -) -> Result<()> { - let after_all = program.allocate_label(); - program.emit_insn(Insn::FkIfZero { - target_pc: after_all, - if_zero: true, - }); + child_tbl: &BTreeTable, + new_start_reg: usize, + new_rowid_reg: usize, +) -> crate::Result<()> { + for fk_ref in resolver.schema.resolved_fks_for_child(&child_tbl.name)? { + let ncols = fk_ref.child_cols.len(); + let is_self_ref = fk_ref.fk.parent_table.eq_ignore_ascii_case(&child_tbl.name); - // Iterate child FKs declared on this table - for fk_ref in resolver.schema.resolved_fks_for_child(table_name) { - let parent_tbl = resolver - .schema - .get_btree_table(&fk_ref.fk.parent_table) - .expect("parent table"); - let num_child_cols = fk_ref.child_cols.len(); - let is_self_single = - table_name.eq_ignore_ascii_case(&fk_ref.fk.parent_table) && single_row_insert; - // if any child FK value is NULL, this row doesn't reference the parent. + // Short-circuit if any NEW component is NULL let fk_ok = program.allocate_label(); - for &pos_in_child in fk_ref.child_pos.iter() { - // Map INSERT image register for that column - let src = insertion - .col_mappings - .get(pos_in_child) - .expect("col must be present") - .register; + for cname in &fk_ref.child_cols { + let (i, col) = child_tbl.get_column(cname).unwrap(); + let src = if col.is_rowid_alias { + new_rowid_reg + } else { + new_start_reg + i + }; program.emit_insn(Insn::IsNull { reg: src, target_pc: fk_ok, @@ -1939,36 +1927,29 @@ fn emit_fk_checks_for_insert( } if fk_ref.parent_uses_rowid { - // Parent is rowid/alias: single-reg probe - let pcur = program.alloc_cursor_id(CursorType::BTreeTable(parent_tbl.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: pcur, - root_page: parent_tbl.root_page, - db: 0, - }); - let rowid_pos = 0; // guaranteed if parent_uses_rowid - let src = insertion - .get_col_mapping_by_name(fk_ref.child_cols[rowid_pos].as_str()) - .unwrap() - .register; - let violation = program.allocate_label(); + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("parent btree"); + let pcur = open_read_table(program, &parent_tbl); + + // first child col carries rowid + let (i_child, col_child) = child_tbl.get_column(&fk_ref.child_cols[0]).unwrap(); + let val_reg = if col_child.is_rowid_alias { + new_rowid_reg + } else { + new_start_reg + i_child + }; + let tmp = program.alloc_register(); program.emit_insn(Insn::Copy { - src_reg: src, + src_reg: val_reg, dst_reg: tmp, extra_amount: 0, }); - // coerce to INT (parent rowid affinity) program.emit_insn(Insn::MustBeInt { reg: tmp }); - if is_self_single { - program.emit_insn(Insn::Eq { - lhs: tmp, - rhs: insertion.key_register(), - target_pc: fk_ok, - flags: CmpInsFlags::default(), - collation: None, - }); - } + + let violation = program.allocate_label(); program.emit_insn(Insn::NotExists { cursor: pcur, rowid_reg: tmp, @@ -1980,102 +1961,296 @@ fn emit_fk_checks_for_insert( program.preassign_label_to_next_insn(violation); program.emit_insn(Insn::Close { cursor_id: pcur }); - // Deferred vs immediate - if fk_ref.fk.deferred { + // Self-ref: count (don’t halt). Non-self: standard behavior. + if is_self_ref { program.emit_insn(Insn::FkCounter { increment_value: 1, - check_abort: false, is_scope: false, }); } else { - program.emit_insn(Insn::Halt { - err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, - description: "FOREIGN KEY constraint failed".to_string(), - }); + emit_fk_violation(program, &fk_ref.fk)?; } - } else if let Some(ix) = &fk_ref.parent_unique_index { - if is_self_single { - let skip_probe = program.allocate_label(); - for (i, &pos_in_child) in fk_ref.child_pos.iter().enumerate() { - let child_reg = insertion.col_mappings.get(pos_in_child).unwrap().register; - let parent_reg = insertion - .get_col_mapping_by_name(fk_ref.parent_cols[i].as_str()) - .unwrap() - .register; - program.emit_insn(Insn::Ne { - lhs: child_reg, - rhs: parent_reg, - target_pc: skip_probe, // any mismatch and we do the normal probe - flags: CmpInsFlags::default().jump_if_null(), - collation: None, + } else { + // Parent by unique index + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("parent btree"); + let idx = fk_ref + .parent_unique_index + .as_ref() + .expect("parent unique index required"); + let icur = open_read_index(program, idx); + + // Build NEW probe from child NEW values; apply parent index affinities + let probe = { + let start = program.alloc_registers(ncols); + for (k, cname) in fk_ref.child_cols.iter().enumerate() { + let (i, col) = child_tbl.get_column(cname).unwrap(); + program.emit_insn(Insn::Copy { + src_reg: if col.is_rowid_alias { + new_rowid_reg + } else { + new_start_reg + i + }, + dst_reg: start + k, + extra_amount: 0, }); } - // all matched, OK - program.emit_insn(Insn::Goto { target_pc: fk_ok }); - program.preassign_label_to_next_insn(skip_probe); - } + if let Some(cnt) = NonZeroUsize::new(ncols) { + program.emit_insn(Insn::Affinity { + start_reg: start, + count: cnt, + affinities: build_index_affinity_string(idx, &parent_tbl), + }); + } + start + }; + index_probe( + program, + icur, + probe, + ncols, + |_p| Ok(()), + |p| { + if is_self_ref { + p.emit_insn(Insn::FkCounter { + increment_value: 1, + is_scope: false, + }); + } else { + emit_fk_violation(p, &fk_ref.fk)?; + } + Ok(()) + }, + )?; + program.emit_insn(Insn::Goto { target_pc: fk_ok }); + } - // Parent has a UNIQUE index exactly on parent_cols: use Found against that index - let icur = program.alloc_cursor_id(CursorType::BTreeIndex(ix.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: icur, - root_page: ix.root_page, - db: 0, - }); + program.preassign_label_to_next_insn(fk_ok); + } + Ok(()) +} - // Build probe (child values order == parent index order by construction) - let probe_start = program.alloc_registers(num_child_cols); - for (i, &pos_in_child) in fk_ref.child_pos.iter().enumerate() { - let src = insertion.col_mappings.get(pos_in_child).unwrap().register; +/// Build NEW parent key image in FK parent-column order into a contiguous register block. +/// Handles 3 shapes: +/// - parent_uses_rowid: single "rowid" component +/// - explicit fk.parent_columns +/// - fk.parent_columns empty => use parent's declared PK columns (order-preserving) +fn build_parent_key_image_for_insert( + program: &mut ProgramBuilder, + parent_table: &BTreeTable, + pref: &ResolvedFkRef, + insertion: &Insertion, +) -> crate::Result<(usize, usize)> { + // Decide column list + let parent_cols: Vec = if pref.parent_uses_rowid { + vec!["rowid".to_string()] + } else if !pref.fk.parent_columns.is_empty() { + pref.fk.parent_columns.clone() + } else { + // fall back to the declared PK of the parent table, in schema order + parent_table + .primary_key_columns + .iter() + .map(|(n, _)| n.clone()) + .collect() + }; + + let ncols = parent_cols.len(); + let start = program.alloc_registers(ncols); + // Copy from the would-be parent insertion + for (i, pname) in parent_cols.iter().enumerate() { + let src = if pname.eq_ignore_ascii_case("rowid") { + insertion.key_register() + } else { + // For rowid-alias parents, get_col_mapping_by_name will return the key mapping, + // not the NULL placeholder in col_mappings. + insertion + .get_col_mapping_by_name(pname) + .ok_or_else(|| { + crate::LimboError::PlanningError(format!( + "Column '{}' not present in INSERT image for parent {}", + pname, parent_table.name + )) + })? + .register + }; + program.emit_insn(Insn::Copy { + src_reg: src, + dst_reg: start + i, + extra_amount: 0, + }); + } + + // Apply affinities of the parent columns (or integer for rowid) + let aff: String = if pref.parent_uses_rowid { + "i".to_string() + } else { + parent_cols + .iter() + .map(|name| { + let (_, col) = parent_table.get_column(name).ok_or_else(|| { + crate::LimboError::InternalError(format!("parent col {name} missing")) + })?; + Ok::<_, crate::LimboError>(col.affinity().aff_mask()) + }) + .collect::>()? + }; + if let Some(count) = NonZeroUsize::new(ncols) { + program.emit_insn(Insn::Affinity { + start_reg: start, + count, + affinities: aff, + }); + } + + Ok((start, ncols)) +} + +/// Parent-side: when inserting into the parent, decrement the counter +/// if any child rows reference the NEW parent key. +/// We *always* do this for deferred FKs, and we *also* do it for +/// self-referential FKs (even if immediate) because the insert can +/// “repair” a prior child-insert count recorded earlier in the same statement. +pub fn emit_parent_side_fk_decrement_on_insert( + program: &mut ProgramBuilder, + resolver: &Resolver, + parent_table: &BTreeTable, + insertion: &Insertion, +) -> crate::Result<()> { + for pref in resolver + .schema + .resolved_fks_referencing(&parent_table.name)? + { + let is_self_ref = pref + .child_table + .name + .eq_ignore_ascii_case(&parent_table.name); + // Skip only when it cannot repair anything: non-deferred and not self-ref. + if !pref.fk.deferred && !is_self_ref { + continue; + } + let (new_pk_start, n_cols) = + build_parent_key_image_for_insert(program, parent_table, &pref, insertion)?; + + let child_tbl = &pref.child_table; + let child_cols = &pref.fk.child_columns; + let idx = resolver.schema.get_indices(&child_tbl.name).find(|ix| { + ix.columns.len() == child_cols.len() + && ix + .columns + .iter() + .zip(child_cols.iter()) + .all(|(ic, cc)| ic.name.eq_ignore_ascii_case(cc)) + }); + + if let Some(ix) = idx { + let icur = open_read_index(program, ix); + // Copy key into probe regs and apply child-index affinities + let probe_start = program.alloc_registers(n_cols); + for i in 0..n_cols { program.emit_insn(Insn::Copy { - src_reg: src, + src_reg: new_pk_start + i, dst_reg: probe_start + i, extra_amount: 0, }); } - - let aff: String = ix - .columns - .iter() - .map(|c| parent_tbl.columns[c.pos_in_table].affinity().aff_mask()) - .collect(); - program.emit_insn(Insn::Affinity { - start_reg: probe_start, - count: std::num::NonZeroUsize::new(num_child_cols).unwrap(), - affinities: aff, - }); + if let Some(count) = NonZeroUsize::new(n_cols) { + program.emit_insn(Insn::Affinity { + start_reg: probe_start, + count, + affinities: build_index_affinity_string(ix, child_tbl), + }); + } let found = program.allocate_label(); program.emit_insn(Insn::Found { cursor_id: icur, target_pc: found, record_reg: probe_start, - num_regs: num_child_cols, + num_regs: n_cols, }); - // Not found: violation + // Not found => nothing to decrement program.emit_insn(Insn::Close { cursor_id: icur }); - if fk_ref.fk.deferred { - program.emit_insn(Insn::FkCounter { - increment_value: 1, - check_abort: false, - is_scope: false, + let skip = program.allocate_label(); + program.emit_insn(Insn::Goto { target_pc: skip }); + + // Found => guarded decrement + program.resolve_label(found, program.offset()); + program.emit_insn(Insn::Close { cursor_id: icur }); + program.emit_insn(Insn::FkIfZero { + is_scope: false, + target_pc: skip, + }); + program.emit_insn(Insn::FkCounter { + increment_value: -1, + is_scope: false, + }); + program.resolve_label(skip, program.offset()); + } else { + // fallback scan :( + let ccur = open_read_table(program, child_tbl); + let done = program.allocate_label(); + program.emit_insn(Insn::Rewind { + cursor_id: ccur, + pc_if_empty: done, + }); + let loop_top = program.allocate_label(); + let next_row = program.allocate_label(); + program.resolve_label(loop_top, program.offset()); + + for (i, child_name) in child_cols.iter().enumerate() { + let (pos, _) = child_tbl.get_column(child_name).ok_or_else(|| { + crate::LimboError::InternalError(format!("child col {child_name} missing")) + })?; + let tmp = program.alloc_register(); + program.emit_insn(Insn::Column { + cursor_id: ccur, + column: pos, + dest: tmp, + default: None, }); - } else { - program.emit_insn(Insn::Halt { - err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, - description: "FOREIGN KEY constraint failed".to_string(), + + program.emit_insn(Insn::IsNull { + reg: tmp, + target_pc: next_row, }); + + let cont = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: tmp, + rhs: new_pk_start + i, + target_pc: cont, + flags: CmpInsFlags::default().jump_if_null(), + collation: Some(super::collate::CollationSeq::Binary), + }); + program.emit_insn(Insn::Goto { + target_pc: next_row, + }); + program.resolve_label(cont, program.offset()); } - program.emit_insn(Insn::Goto { target_pc: fk_ok }); - // Found OK - program.preassign_label_to_next_insn(found); - program.emit_insn(Insn::Close { cursor_id: icur }); + + // Matched one child row -> guarded decrement + program.emit_insn(Insn::FkIfZero { + is_scope: false, + target_pc: next_row, + }); + program.emit_insn(Insn::FkCounter { + is_scope: false, + increment_value: -1, + }); + + program.resolve_label(next_row, program.offset()); + program.emit_insn(Insn::Next { + cursor_id: ccur, + pc_if_next: loop_top, + }); + + program.resolve_label(done, program.offset()); + program.emit_insn(Insn::Close { cursor_id: ccur }); } - - program.preassign_label_to_next_insn(fk_ok); } - - program.preassign_label_to_next_insn(after_all); Ok(()) } diff --git a/core/translate/mod.rs b/core/translate/mod.rs index 690ad7c47..d51d89dea 100644 --- a/core/translate/mod.rs +++ b/core/translate/mod.rs @@ -17,6 +17,7 @@ pub(crate) mod delete; pub(crate) mod display; pub(crate) mod emitter; pub(crate) mod expr; +pub(crate) mod fkeys; pub(crate) mod group_by; pub(crate) mod index; pub(crate) mod insert; diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index 2ae07f961..868f3a933 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -5,11 +5,9 @@ use std::{collections::HashMap, sync::Arc}; use turso_parser::ast::{self, Upsert}; use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY; -use crate::translate::emitter::{ - emit_fk_child_existence_checks, emit_fk_parent_existence_checks, - emit_fk_parent_pk_change_counters, -}; +use crate::schema::ROWID_SENTINEL; use crate::translate::expr::{walk_expr, WalkControl}; +use crate::translate::fkeys::{emit_fk_child_update_counters, emit_parent_pk_change_checks}; use crate::translate::insert::format_unique_violation_desc; use crate::translate::planner::ROWID_STRS; use crate::vdbe::insn::CmpInsFlags; @@ -471,176 +469,49 @@ pub fn emit_upsert( } let (changed_cols, rowid_changed) = collect_changed_cols(table, set_pairs); + let rowid_alias_idx = table.columns().iter().position(|c| c.is_rowid_alias); + let has_direct_rowid_update = set_pairs + .iter() + .any(|(idx, _)| *idx == rowid_alias_idx.unwrap_or(ROWID_SENTINEL)); + let has_user_provided_rowid = if let Some(i) = rowid_alias_idx { + set_pairs.iter().any(|(idx, _)| *idx == i) || has_direct_rowid_update + } else { + has_direct_rowid_update + }; + let rowid_set_clause_reg = if has_user_provided_rowid { + Some(new_rowid_reg.unwrap_or(conflict_rowid_reg)) + } else { + None + }; if let Some(bt) = table.btree() { if connection.foreign_keys_enabled() { let rowid_new_reg = new_rowid_reg.unwrap_or(conflict_rowid_reg); // Child-side checks if resolver.schema.has_child_fks(bt.name.as_str()) { - emit_fk_child_existence_checks( + emit_fk_child_update_counters( program, resolver, &bt, table.get_name(), + tbl_cursor_id, new_start, rowid_new_reg, &changed_cols, )?; } - - // Parent-side checks only if any incoming FK could care - if resolver - .schema - .any_resolved_fks_referencing(table.get_name()) - { - // if parent key can't change, skip - let updated_parent_positions: HashSet = - set_pairs.iter().map(|(i, _)| *i).collect(); - let incoming = resolver.schema.resolved_fks_referencing(table.get_name()); - let parent_key_may_change = incoming - .iter() - .any(|r| r.parent_key_may_change(&updated_parent_positions, &bt)); - - if parent_key_may_change { - let skip_parent_fk = program.allocate_label(); - let pk_len = bt.primary_key_columns.len(); - - match pk_len { - 0 => { - // implicit rowid - program.emit_insn(Insn::Eq { - lhs: rowid_new_reg, - rhs: conflict_rowid_reg, - target_pc: skip_parent_fk, - flags: CmpInsFlags::default(), - collation: program.curr_collation(), - }); - emit_fk_parent_existence_checks( - program, - resolver, - table.get_name(), - tbl_cursor_id, - conflict_rowid_reg, - )?; - program.preassign_label_to_next_insn(skip_parent_fk); - } - 1 => { - // single-col declared PK - let (pk_name, _) = &bt.primary_key_columns[0]; - let (pos, col) = bt.get_column(pk_name).unwrap(); - - let old_reg = program.alloc_register(); - if col.is_rowid_alias { - program.emit_insn(Insn::RowId { - cursor_id: tbl_cursor_id, - dest: old_reg, - }); - } else { - program.emit_insn(Insn::Column { - cursor_id: tbl_cursor_id, - column: pos, - dest: old_reg, - default: None, - }); - } - let new_reg = new_start + pos; - - let skip = program.allocate_label(); - program.emit_insn(Insn::Eq { - lhs: old_reg, - rhs: new_reg, - target_pc: skip, - flags: CmpInsFlags::default(), - collation: program.curr_collation(), - }); - emit_fk_parent_existence_checks( - program, - resolver, - table.get_name(), - tbl_cursor_id, - conflict_rowid_reg, - )?; - program.preassign_label_to_next_insn(skip); - } - _ => { - // composite PK: build OLD/NEW vectors and do the 2-pass counter logic - let old_pk_start = program.alloc_registers(pk_len); - for (i, (pk_name, _)) in bt.primary_key_columns.iter().enumerate() { - let (pos, col) = bt.get_column(pk_name).unwrap(); - if col.is_rowid_alias { - // old rowid (UPSERT target) == conflict_rowid_reg - program.emit_insn(Insn::Copy { - src_reg: conflict_rowid_reg, - dst_reg: old_pk_start + i, - extra_amount: 0, - }); - } else { - program.emit_insn(Insn::Column { - cursor_id: tbl_cursor_id, - column: pos, - dest: old_pk_start + i, - default: None, - }); - } - } - - let new_pk_start = program.alloc_registers(pk_len); - for (i, (pk_name, _)) in bt.primary_key_columns.iter().enumerate() { - let (pos, col) = bt.get_column(pk_name).unwrap(); - let src = if col.is_rowid_alias { - rowid_new_reg - } else { - new_start + pos - }; - program.emit_insn(Insn::Copy { - src_reg: src, - dst_reg: new_pk_start + i, - extra_amount: 0, - }); - } - - // Compare OLD vs NEW, if all equal then skip - let skip = program.allocate_label(); - let changed = program.allocate_label(); - for i in 0..pk_len { - if i == pk_len - 1 { - program.emit_insn(Insn::Eq { - lhs: old_pk_start + i, - rhs: new_pk_start + i, - target_pc: skip, - flags: CmpInsFlags::default(), - collation: program.curr_collation(), - }); - program.emit_insn(Insn::Goto { target_pc: changed }); - } else { - let next = program.allocate_label(); - program.emit_insn(Insn::Eq { - lhs: old_pk_start + i, - rhs: new_pk_start + i, - target_pc: next, - flags: CmpInsFlags::default(), - collation: program.curr_collation(), - }); - program.emit_insn(Insn::Goto { target_pc: changed }); - program.preassign_label_to_next_insn(next); - } - } - - program.preassign_label_to_next_insn(changed); - emit_fk_parent_pk_change_counters( - program, - &incoming, - resolver, - old_pk_start, - new_pk_start, - pk_len, - )?; - program.preassign_label_to_next_insn(skip); - } - } - } - } + emit_parent_pk_change_checks( + program, + resolver, + &bt, + tbl_cursor_id, + conflict_rowid_reg, + new_start, + new_rowid_reg.unwrap_or(conflict_rowid_reg), + rowid_set_clause_reg, + set_pairs, + )?; } } diff --git a/core/vdbe/builder.rs b/core/vdbe/builder.rs index 3d1a333ec..70e98eb00 100644 --- a/core/vdbe/builder.rs +++ b/core/vdbe/builder.rs @@ -792,6 +792,9 @@ impl ProgramBuilder { Insn::NotFound { target_pc, .. } => { resolve(target_pc, "NotFound"); } + Insn::FkIfZero { target_pc, .. } => { + resolve(target_pc, "FkIfZero"); + } _ => {} } } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 40642b87d..09e112e0e 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2169,6 +2169,17 @@ pub fn halt( let auto_commit = program.connection.auto_commit.load(Ordering::SeqCst); tracing::trace!("halt(auto_commit={})", auto_commit); if auto_commit { + if program.connection.foreign_keys_enabled() + && program + .connection + .fk_deferred_violations + .swap(0, Ordering::AcqRel) + > 0 + { + return Err(LimboError::Constraint( + "foreign key constraint failed".to_string(), + )); + } program .commit_txn(pager.clone(), state, mv_store, false) .map(Into::into) @@ -2263,12 +2274,13 @@ pub fn op_transaction_inner( if write && conn.db.open_flags.get().contains(OpenFlags::ReadOnly) { return Err(LimboError::ReadOnly); } - // 1. We try to upgrade current version let current_state = conn.get_tx_state(); - let (new_transaction_state, updated) = if conn.is_nested_stmt.load(Ordering::SeqCst) + let (new_transaction_state, updated, should_clear_deferred_violations) = if conn + .is_nested_stmt + .load(Ordering::SeqCst) { - (current_state, false) + (current_state, false, false) } else { match (current_state, write) { // pending state means that we tried beginning a tx and the method returned IO. @@ -2283,30 +2295,36 @@ pub fn op_transaction_inner( schema_did_change: false, }, true, + true, ) } (TransactionState::Write { schema_did_change }, true) => { - (TransactionState::Write { schema_did_change }, false) + (TransactionState::Write { schema_did_change }, false, false) } (TransactionState::Write { schema_did_change }, false) => { - (TransactionState::Write { schema_did_change }, false) + (TransactionState::Write { schema_did_change }, false, false) } (TransactionState::Read, true) => ( TransactionState::Write { schema_did_change: false, }, true, + true, ), - (TransactionState::Read, false) => (TransactionState::Read, false), + (TransactionState::Read, false) => (TransactionState::Read, false, false), (TransactionState::None, true) => ( TransactionState::Write { schema_did_change: false, }, true, + true, ), - (TransactionState::None, false) => (TransactionState::Read, true), + (TransactionState::None, false) => (TransactionState::Read, true, false), } }; + if should_clear_deferred_violations { + conn.fk_deferred_violations.store(0, Ordering::Release); + } // 2. Start transaction if needed if let Some(mv_store) = &mv_store { @@ -2383,8 +2401,8 @@ pub fn op_transaction_inner( return Err(LimboError::Busy); } if let IOResult::IO(io) = begin_w_tx_res? { - // set the transaction state to pending so we don't have to // end the read transaction. + // set the transaction state to pending so we don't have to program .connection .set_tx_state(TransactionState::PendingUpgrade); @@ -2448,15 +2466,20 @@ pub fn op_auto_commit( }, insn ); - let conn = program.connection.clone(); if matches!(state.commit_state, CommitState::Committing) { return program .commit_txn(pager.clone(), state, mv_store, *rollback) .map(Into::into); } + let conn = program.connection.clone(); if *auto_commit != conn.auto_commit.load(Ordering::SeqCst) { if *rollback { + program // reset deferred fk violations on ROLLBACK + .connection + .fk_deferred_violations + .store(0, Ordering::Release); + // TODO(pere): add rollback I/O logic once we implement rollback journal if let Some(mv_store) = mv_store { if let Some(tx_id) = conn.get_mv_tx_id() { @@ -2468,6 +2491,15 @@ pub fn op_auto_commit( conn.set_tx_state(TransactionState::None); conn.auto_commit.store(true, Ordering::SeqCst); } else { + if conn.foreign_keys_enabled() { + let violations = conn.fk_deferred_violations.swap(0, Ordering::AcqRel); + if violations > 0 { + // Fail the commit + return Err(LimboError::Constraint( + "FOREIGN KEY constraint failed".into(), + )); + } + } conn.auto_commit.store(*auto_commit, Ordering::SeqCst); } } else { @@ -2494,6 +2526,15 @@ pub fn op_auto_commit( )); } } + if conn.foreign_keys_enabled() { + let violations = conn.fk_deferred_violations.swap(0, Ordering::AcqRel); + if violations > 0 { + // Fail the commit + return Err(LimboError::Constraint( + "FOREIGN KEY constraint failed".into(), + )); + } + } } program @@ -8289,35 +8330,18 @@ pub fn op_fk_counter( load_insn!( FkCounter { increment_value, - check_abort, is_scope, }, insn ); if *is_scope { - // Adjust FK scope depth state.fk_scope_counter = state.fk_scope_counter.saturating_add(*increment_value); - - // raise if there were deferred violations in this statement. - if *check_abort { - if state.fk_scope_counter < 0 { - return Err(LimboError::Constraint( - "FOREIGN KEY constraint failed".into(), - )); - } - if state.fk_scope_counter == 0 && state.fk_deferred_violations > 0 { - // Clear violations for safety, a new statement will re-open scope. - state.fk_deferred_violations = 0; - return Err(LimboError::Constraint( - "FOREIGN KEY constraint failed".into(), - )); - } - } } else { - // Adjust deferred violations counter - state.fk_deferred_violations = state + // Transaction-level counter: add/subtract for deferred FKs. + program + .connection .fk_deferred_violations - .saturating_add(*increment_value); + .fetch_add(*increment_value, Ordering::AcqRel); } state.pc += 1; @@ -8331,29 +8355,37 @@ pub fn op_fk_if_zero( _pager: &Arc, _mv_store: Option<&Arc>, ) -> Result { - load_insn!(FkIfZero { target_pc, if_zero }, insn); + load_insn!( + FkIfZero { + is_scope, + target_pc, + }, + insn + ); let fk_enabled = program.connection.foreign_keys_enabled(); // Jump if any: // Foreign keys are disabled globally // p1 is true AND deferred constraint counter is zero // p1 is false AND deferred constraint counter is non-zero - let scope_zero = state.fk_scope_counter == 0; - - let should_jump = if !fk_enabled { - true - } else if *if_zero { - scope_zero + if !fk_enabled { + state.pc = target_pc.as_offset_int(); + return Ok(InsnFunctionStepResult::Step); + } + let v = if !*is_scope { + program + .connection + .fk_deferred_violations + .load(Ordering::Acquire) } else { - !scope_zero + state.fk_scope_counter }; - if should_jump { - state.pc = target_pc.as_offset_int(); + state.pc = if v == 0 { + target_pc.as_offset_int() } else { - state.pc += 1; - } - + state.pc + 1 + }; Ok(InsnFunctionStepResult::Step) } diff --git a/core/vdbe/explain.rs b/core/vdbe/explain.rs index 3f99fe809..ca5b74ef3 100644 --- a/core/vdbe/explain.rs +++ b/core/vdbe/explain.rs @@ -1804,19 +1804,19 @@ pub fn insn_to_row( 0, String::new(), ), - Insn::FkCounter{check_abort, increment_value, is_scope } => ( + Insn::FkCounter{increment_value, is_scope } => ( "FkCounter", - *check_abort as i32, *increment_value as i32, *is_scope as i32, + 0, Value::build_text(""), 0, String::new(), ), - Insn::FkIfZero{target_pc, if_zero } => ( + Insn::FkIfZero{target_pc, is_scope } => ( "FkIfZero", target_pc.as_debug_int(), - *if_zero as i32, + *is_scope as i32, 0, Value::build_text(""), 0, diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index 917038b80..9a5bab21c 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -1173,7 +1173,6 @@ pub enum Insn { // If P1 is non-zero, the database constraint counter is incremented (deferred foreign key constraints). // Otherwise, if P1 is zero, the statement counter is incremented (immediate foreign key constraints). FkCounter { - check_abort: bool, increment_value: isize, is_scope: bool, }, @@ -1181,7 +1180,7 @@ pub enum Insn { // If P1 is non-zero, then the jump is taken if the database constraint-counter is zero (the one that counts deferred constraint violations). // If P1 is zero, the jump is taken if the statement constraint-counter is zero (immediate foreign key constraint violations). FkIfZero { - if_zero: bool, + is_scope: bool, target_pc: BranchOffset, }, } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index d192be864..db633c2f1 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -832,7 +832,6 @@ impl Program { // Reset state for next use program_state.view_delta_state = ViewDeltaCommitState::NotStarted; - if self.connection.get_tx_state() == TransactionState::None { // No need to do any work here if not in tx. Current MVCC logic doesn't work with this assumption, // hence the mv_store.is_none() check. @@ -915,6 +914,9 @@ impl Program { self.connection .set_changes(self.n_change.load(Ordering::SeqCst)); } + if connection.foreign_keys_enabled() { + connection.clear_deferred_foreign_key_violations(); + } Ok(IOResult::Done(())) } } diff --git a/testing/foreign_keys.test b/testing/foreign_keys.test index 78e8498f2..8934292fd 100644 --- a/testing/foreign_keys.test +++ b/testing/foreign_keys.test @@ -124,16 +124,12 @@ do_execsql_test_in_memory_any_error fk-composite-unique-missing { INSERT INTO child VALUES (2,'A','X'); -- no ('A','X') in parent } -# SQLite doesnt let you name a foreign key constraint 'rowid' explicitly... -# well it does.. but it throws a parse error only when you try to insert into the table -_- -# We will throw a parse error when you create the table instead, because that is -# obviously the only sane thing to do do_execsql_test_in_memory_any_error fk-rowid-alias-parent { PRAGMA foreign_keys=ON; CREATE TABLE t(id INTEGER PRIMARY KEY, a TEXT); - CREATE TABLE c(cid INTEGER PRIMARY KEY, rid REFERENCES t(rowid)); -- we error here + CREATE TABLE c(cid INTEGER PRIMARY KEY, rid REFERENCES t(rowid)); INSERT INTO t VALUES (100,'x'); - INSERT INTO c VALUES (1, 100); - sqlite errors here + INSERT INTO c VALUES (1, 100); } do_execsql_test_in_memory_any_error fk-rowid-alias-parent-missing { @@ -399,3 +395,712 @@ do_execsql_test_in_memory_any_error fk-self-multirow-one-bad { FOREIGN KEY(rid) REFERENCES t(id)); INSERT INTO t(id,rid) VALUES (1,1),(3,99); -- 99 has no parent -> error } + +# doesnt fail because tx is un-committed +do_execsql_test_on_specific_db {:memory:} fk-deferred-commit-doesnt-fail-early { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO c VALUES(1, 99); -- shouldnt fail because we are mid-tx +} {} + +# it should fail here because we actuall COMMIT +do_execsql_test_in_memory_any_error fk-deferred-commit-fails { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO c VALUES(1, 99); + COMMIT; +} + + +# If we fix it before COMMIT, COMMIT succeeds +do_execsql_test_on_specific_db {:memory:} fk-deferred-fix-before-commit-succeeds { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO c VALUES(1, 99); -- temporary violation + INSERT INTO p VALUES(99); -- fix parent + COMMIT; + SELECT * FROM p ORDER BY 1; +} {99} + +# ROLLBACK clears deferred state; a new tx can still fail if violation persists +do_execsql_test_on_specific_db {:memory:} fk-deferred-rollback-clears { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO c VALUES(1, 123); + ROLLBACK; + + -- Now start over and *fix* it, COMMIT should pass. + BEGIN; + INSERT INTO p VALUES(123); + INSERT INTO c VALUES(1, 123); + COMMIT; + SELECT * FROM c ORDER BY 1; +} {1|123} + + +do_execsql_test_on_specific_db {:memory:} fk-deferred-insert-parent-fixes-before-commit { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO c VALUES(1, 50); -- violation + INSERT INTO p VALUES(50); -- resolve + COMMIT; + SELECT * FROM c ORDER BY 1; +} {1|50} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-update-fixes-child-before-commit { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO c VALUES(1, 50); -- violation + INSERT INTO p VALUES(32); + UPDATE c SET pid=32 WHERE id=1; -- resolve child + COMMIT; + SELECT * FROM c ORDER BY 1; +} {1|32} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-delete-fixes-child-before-commit { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO c VALUES(1, 50); -- violation + INSERT INTO p VALUES(32); + DELETE FROM c WHERE id=1; -- resolve by deleting child + COMMIT; + SELECT * FROM c ORDER BY 1; +} {} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-update-fixes-parent-before-commit { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO c VALUES(1, 50); -- violation + INSERT INTO p VALUES(32); + UPDATE p SET id=50 WHERE id=32; -- resolve via parent + COMMIT; + SELECT * FROM c ORDER BY 1; +} {1|50} + +# Self-referential: row referencing itself should succeed +do_execsql_test_on_specific_db {:memory:} fk-deferred-self-ref-succeeds { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + id INTEGER PRIMARY KEY, + pid INT REFERENCES t(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO t VALUES(1, 1); -- self-match + COMMIT; + SELECT * FROM t ORDER BY 1; +} {1|1} + +# Two-step self-ref: insert invalid, then create parent before COMMIT +do_execsql_test_on_specific_db {:memory:} fk-deferred-self-ref-late-parent { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + id INTEGER PRIMARY KEY, + pid INT REFERENCES t(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO t VALUES(2, 3); -- currently invalid + INSERT INTO t VALUES(3, 3); -- now parent exists + COMMIT; + SELECT * FROM t ORDER BY 1; +} {2|3 +3|3} + + +# counter must not be neutralized by later good statements +do_execsql_test_in_memory_any_error fk-deferred-neutralize.1 { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(id INTEGER PRIMARY KEY); + CREATE TABLE parent_comp(a INT NOT NULL, b INT NOT NULL, PRIMARY KEY(a,b)); + CREATE TABLE child_deferred(id INTEGER PRIMARY KEY, pid INT, + FOREIGN KEY(pid) REFERENCES parent(id)); + + CREATE TABLE child_comp_deferred(id INTEGER PRIMARY KEY, ca INT, cb INT, + FOREIGN KEY(ca,cb) REFERENCES parent_comp(a,b)); + INSERT INTO parent_comp VALUES (4,-1); + BEGIN; + INSERT INTO child_deferred VALUES (1, 999); + INSERT INTO child_comp_deferred VALUES (2, 4, -1); + COMMIT; +} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-upsert-late-parent { +PRAGMA foreign_keys=ON; + +CREATE TABLE p(id INTEGER PRIMARY KEY); +CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED +); + +BEGIN; + INSERT INTO c VALUES(1, 50); -- deferred violation + INSERT INTO p VALUES(32); -- parent exists, but pid still 50 + INSERT INTO c(id,pid) VALUES(1,32) + ON CONFLICT(id) DO UPDATE SET pid=excluded.pid; -- resolve child via UPSERT +COMMIT; +-- Expect: row is (1,32) and no violations remain +SELECT * FROM c ORDER BY id; +} {1|32} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-upsert-late-child { +PRAGMA foreign_keys=ON; + +CREATE TABLE p( + id INTEGER PRIMARY KEY, + u INT UNIQUE +); +CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED +); +BEGIN; + INSERT INTO c VALUES(1, 50); -- deferred violation (no parent 50) + INSERT INTO p VALUES(32, 7); -- parent row with u=7 + -- Trigger DO UPDATE via conflict on p.u, then change the PK id to 50, + -- which satisfies the child reference. + INSERT INTO p(id,u) VALUES(999,7) + ON CONFLICT(u) DO UPDATE SET id=50; +COMMIT; +-- Expect: parent is now (50,7), child (1,50), no violations remain +SELECT p.id, c.id FROM p join c on c.pid = p.id; +} {50|1} + +do_execsql_test_in_memory_any_error fk-deferred-insert-commit-fails { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INTEGER REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO c VALUES(1, 99); -- no parent -> deferred violation + COMMIT; -- must fail +} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-insert-parent-fix-before-commit { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INTEGER REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO c VALUES(1, 99); -- violation + INSERT INTO p VALUES(99); -- fix by inserting parent + COMMIT; + SELECT id, pid FROM c ORDER BY id; +} {1|99} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-insert-multi-children-one-parent-fix { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO c VALUES(1, 50); + INSERT INTO c VALUES(2, 50); -- two violations pointing to same parent + INSERT INTO p VALUES(50); -- one parent fixes both + COMMIT; + SELECT id, pid FROM c ORDER BY id; +} {1|50 2|50} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-insert-then-delete-child-fix { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO c VALUES(1, 77); -- violation + DELETE FROM c WHERE id=1; -- resolve by removing the child + COMMIT; + SELECT count(*) FROM c; +} {0} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-insert-self-ref-succeeds { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + id INTEGER PRIMARY KEY, + pid INT REFERENCES t(id) DEFERRABLE INITIALLY DEFERRED + ); + BEGIN; + INSERT INTO t VALUES(1, 1); -- self-reference, legal at COMMIT + COMMIT; + SELECT id, pid FROM t; +} {1|1} + +do_execsql_test_in_memory_any_error fk-deferred-update-child-breaks-commit-fails { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(10); + INSERT INTO c VALUES(1, 10); -- valid + BEGIN; + UPDATE c SET pid=99 WHERE id=1; -- create violation + COMMIT; -- must fail +} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-update-child-fix-before-commit { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(10); + INSERT INTO c VALUES(1, 10); + BEGIN; + UPDATE c SET pid=99 WHERE id=1; -- violation + UPDATE c SET pid=10 WHERE id=1; -- fix child back + COMMIT; + SELECT id, pid FROM c; +} {1|10} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-update-child-fix-by-inserting-parent { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(10); + INSERT INTO c VALUES(1, 10); + BEGIN; + UPDATE c SET pid=50 WHERE id=1; -- violation + INSERT INTO p VALUES(50); -- fix by adding parent + COMMIT; + SELECT id, pid FROM c; +} {1|50} + +do_execsql_test_in_memory_any_error fk-deferred-update-parent-breaks-commit-fails { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(32); + INSERT INTO c VALUES(1, 32); -- valid + BEGIN; + UPDATE p SET id=50 WHERE id=32; -- break child reference + COMMIT; -- must fail (no fix) +} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-update-parent-fix-by-updating-child { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(32); + INSERT INTO c VALUES(1, 32); + BEGIN; + UPDATE p SET id=50 WHERE id=32; -- break + UPDATE c SET pid=50 WHERE id=1; -- fix child to new parent key + COMMIT; + SELECT id, pid FROM c; +} {1|50} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-update-parent-fix-by-reverting-parent { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(32); + INSERT INTO c VALUES(1, 32); + BEGIN; + UPDATE p SET id=50 WHERE id=32; -- break + UPDATE p SET id=32 WHERE id=50; -- revert (fix) + COMMIT; + SELECT id, pid FROM c; +} {1|32} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-update-self-ref-id-change-and-fix { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + id INTEGER PRIMARY KEY, + pid INT REFERENCES t(id) DEFERRABLE INITIALLY DEFERRED + ); + INSERT INTO t VALUES(1,1); + BEGIN; + UPDATE t SET id=2 WHERE id=1; -- break self-ref + UPDATE t SET pid=2 WHERE id=2; -- fix to new self + COMMIT; + SELECT id, pid FROM t; +} {2|2} + +do_execsql_test_in_memory_any_error fk-deferred-delete-parent-commit-fails { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(10); + INSERT INTO c VALUES(1, 10); -- valid + BEGIN; + DELETE FROM p WHERE id=10; -- break reference + COMMIT; -- must fail +} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-delete-parent-then-delete-child-fix { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(10); + INSERT INTO c VALUES(1, 10); + BEGIN; + DELETE FROM p WHERE id=10; -- break + DELETE FROM c WHERE id=1; -- fix by removing child + COMMIT; + SELECT count(*) FROM p, c; -- both empty +} {0} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-delete-parent-then-reinsert-parent-fix { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(10); + INSERT INTO c VALUES(1, 10); + BEGIN; + DELETE FROM p WHERE id=10; -- break + INSERT INTO p VALUES(10); -- fix by re-creating parent + COMMIT; + SELECT id, pid FROM c; +} {1|10} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-delete-self-ref-row-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t( + id INTEGER PRIMARY KEY, + pid INT REFERENCES t(id) DEFERRABLE INITIALLY DEFERRED + ); + INSERT INTO t VALUES(1,1); -- valid + BEGIN; + DELETE FROM t WHERE id=1; -- removes both child+parent (same row) + COMMIT; -- should succeed + SELECT count(*) FROM t; +} {0} + +do_execsql_test_on_specific_db {:memory:} fk-deferred-delete-parent-then-update-child-to-null-fix { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c( + id INTEGER PRIMARY KEY, + pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED + ); + INSERT INTO p VALUES(5); + INSERT INTO c VALUES(1,5); + BEGIN; + DELETE FROM p WHERE id=5; -- break + UPDATE c SET pid=NULL WHERE id=1; -- fix (NULL never violates) + COMMIT; + SELECT id, pid FROM c; +} {1|} + +# AUTOCOMMIT: deferred FK still fails at end-of-statement +do_execsql_test_in_memory_any_error fk-deferred-autocommit-insert-missing-parent { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(id INTEGER PRIMARY KEY); + CREATE TABLE child(id INTEGER PRIMARY KEY, pid INT REFERENCES parent(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO child VALUES(1, 3); -- no BEGIN; should fail at statement end +} + +# AUTOCOMMIT: self-referential insert is OK (parent is same row) +do_execsql_test_on_specific_db {:memory:} fk-deferred-autocommit-selfref-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t(id INTEGER PRIMARY KEY, pid INT REFERENCES t(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO t VALUES(1,1); + SELECT * FROM t; +} {1|1} + +# AUTOCOMMIT: deleting a parent that has a child → fails at statement end +do_execsql_test_in_memory_any_error fk-deferred-autocommit-delete-parent-fails { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(1); + INSERT INTO c VALUES(10,1); + DELETE FROM p WHERE id=1; -- no BEGIN; should fail at statement end +} + +# TX: delete a referenced parent then reinsert before COMMIT -> OK +do_execsql_test_on_specific_db {:memory:} fk-deferred-tx-delete-parent-then-reinsert-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(5); + INSERT INTO c VALUES(1,5); + BEGIN; + DELETE FROM p WHERE id=5; -- violation (deferred) + INSERT INTO p VALUES(5); -- fix in same tx + COMMIT; + SELECT count(*) FROM p WHERE id=5; +} {1} + +# TX: multiple violating children, later insert parent, COMMIT -> OK +do_execsql_test_on_specific_db {:memory:} fk-deferred-tx-multi-children-fixed-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO c VALUES(1,99); + INSERT INTO c VALUES(2,99); + INSERT INTO p VALUES(99); + COMMIT; + SELECT id,pid FROM c ORDER BY id; +} {1|99 2|99} + +# one of several children left unfixed -> COMMIT fails +do_execsql_test_in_memory_any_error fk-deferred-tx-multi-children-one-left-fails { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO c VALUES(1,42); + INSERT INTO c VALUES(2,42); + INSERT INTO p VALUES(42); + UPDATE c SET pid=777 WHERE id=2; -- reintroduce a bad reference + COMMIT; -- should fail +} + +# composite PK parent, fix via parent UPDATE before COMMIT -> OK +do_execsql_test_on_specific_db {:memory:} fk-deferred-composite-parent-update-fix { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(a INT NOT NULL, b INT NOT NULL, PRIMARY KEY(a,b)); + CREATE TABLE child(id INT PRIMARY KEY, ca INT, cb INT, + FOREIGN KEY(ca,cb) REFERENCES parent(a,b) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO parent VALUES(1,1); + BEGIN; + INSERT INTO child VALUES(10, 7, 7); -- violation + UPDATE parent SET a=7, b=7 WHERE a=1 AND b=1; -- fix composite PK + COMMIT; + SELECT id, ca, cb FROM child; +} {10|7|7} + +# TX: NULL in child FK -> never a violation +do_execsql_test_on_specific_db {:memory:} fk-deferred-null-fk-never-violates { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO c VALUES(1, NULL); -- always OK + COMMIT; + SELECT id, pid FROM c; +} {1|} + +# TX: child UPDATE to NULL resolves before COMMIT +do_execsql_test_on_specific_db {:memory:} fk-deferred-update-child-null-resolves { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO c VALUES(1, 500); -- violation + UPDATE c SET pid=NULL WHERE id=1; -- resolves + COMMIT; + SELECT * FROM c; +} {1|} + +# TX: delete violating child resolves before COMMIT +do_execsql_test_on_specific_db {:memory:} fk-deferred-delete-child-resolves { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO c VALUES(1, 777); -- violation + DELETE FROM c WHERE id=1; -- resolves + COMMIT; + SELECT count(*) FROM c; +} {0} + +# TX: update parent PK to match child before COMMIT -> OK +do_execsql_test_on_specific_db {:memory:} fk-deferred-update-parent-pk-resolves { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO p VALUES(10); + BEGIN; + INSERT INTO c VALUES(1, 20); -- violation + UPDATE p SET id=20 WHERE id=10; -- resolve via parent + COMMIT; + SELECT * FROM c; +} {1|20} + +# Two-table cycle; both inserted before COMMIT -> OK +do_execsql_test_on_specific_db {:memory:} fk-deferred-cycle-two-tables-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE a(id INT PRIMARY KEY, b_id INT, FOREIGN KEY(b_id) REFERENCES b(id) DEFERRABLE INITIALLY DEFERRED); + CREATE TABLE b(id INT PRIMARY KEY, a_id INT, FOREIGN KEY(a_id) REFERENCES a(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO a VALUES(1, 1); -- refers to b(1) (not yet present) + INSERT INTO b VALUES(1, 1); -- refers to a(1) + COMMIT; + SELECT count(b.id), count(a.id) FROM a, b; +} {1|1} + +# Delete a row that self-references (child==parent) within a tx -> OK +do_execsql_test_on_specific_db {:memory:} fk-deferred-selfref-delete-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE t(id INTEGER PRIMARY KEY, pid INT REFERENCES t(id) DEFERRABLE INITIALLY DEFERRED); + INSERT INTO t VALUES(1,1); + BEGIN; + DELETE FROM t WHERE id=1; + COMMIT; + SELECT count(*) FROM t; +} {0} + + +do_execsql_test_on_specific_db {:memory:} fk-parentcomp-donothing-noconflict-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE parent (id INTEGER PRIMARY KEY, a INT, b INT); + CREATE TABLE child_deferred ( + id INTEGER PRIMARY KEY, pid INT, x INT, + FOREIGN KEY(pid) REFERENCES parent(id) DEFERRABLE INITIALLY DEFERRED + ); + CREATE TABLE parent_comp (a INT NOT NULL, b INT NOT NULL, c INT, PRIMARY KEY(a,b)); + CREATE TABLE child_comp_deferred ( + id INTEGER PRIMARY KEY, ca INT, cb INT, z INT, + FOREIGN KEY (ca,cb) REFERENCES parent_comp(a,b) DEFERRABLE INITIALLY DEFERRED + ); + + -- No conflict on (a,b); should insert 1 row, no FK noise + INSERT INTO parent_comp VALUES (-1,-1,9) ON CONFLICT DO NOTHING; + SELECT a,b,c FROM parent_comp ORDER BY a,b; +} {-1|-1|9} + +do_execsql_test_on_specific_db {:memory:} fk-parentcomp-donothing-conflict-noop { + PRAGMA foreign_keys=ON; + CREATE TABLE parent_comp (a INT NOT NULL, b INT NOT NULL, c INT, PRIMARY KEY(a,b)); + CREATE TABLE child_comp_deferred ( + id INTEGER PRIMARY KEY, ca INT, cb INT, z INT, + FOREIGN KEY (ca,cb) REFERENCES parent_comp(a,b) DEFERRABLE INITIALLY DEFERRED + ); + + INSERT INTO parent_comp VALUES (10,20,1); + -- Conflicts with existing (10,20); must do nothing (no triggers, no FK scans that mutate counters) + INSERT INTO parent_comp VALUES (10,20,999) ON CONFLICT DO NOTHING; + SELECT a,b,c FROM parent_comp; +} {10|20|1} + +do_execsql_test_on_specific_db {:memory:} fk-parentcomp-donothing-unrelated-immediate-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child_immediate ( + id INTEGER PRIMARY KEY, pid INT, + FOREIGN KEY(pid) REFERENCES parent(id) -- IMMEDIATE + ); + CREATE TABLE parent_comp (a INT NOT NULL, b INT NOT NULL, c INT, PRIMARY KEY(a,b)); + CREATE TABLE child_comp_deferred ( + id INTEGER PRIMARY KEY, ca INT, cb INT, z INT, + FOREIGN KEY(ca,cb) REFERENCES parent_comp(a,b) DEFERRABLE INITIALLY DEFERRED + ); + + INSERT INTO parent_comp VALUES (-1,-1,9) ON CONFLICT DO NOTHING; + SELECT a,b,c FROM parent_comp; +} {-1|-1|9} + +do_execsql_test_on_specific_db {:memory:} fk-parentcomp-deferred-fix-inside-tx-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE parent_comp (a INT NOT NULL, b INT NOT NULL, c INT, PRIMARY KEY(a,b)); + CREATE TABLE child_comp_deferred ( + id INTEGER PRIMARY KEY, ca INT, cb INT, + FOREIGN KEY(ca,cb) REFERENCES parent_comp(a,b) DEFERRABLE INITIALLY DEFERRED + ); + + BEGIN; + INSERT INTO child_comp_deferred VALUES (1, -5, -6); -- violation + INSERT INTO parent_comp VALUES (-5, -6, 9); -- fix via parent insert + COMMIT; + SELECT id,ca,cb FROM child_comp_deferred; +} {1|-5|-6} + +do_execsql_test_on_specific_db {:memory:} fk-parentcomp-autocommit-unrelated-children-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE parent_comp (a INT NOT NULL, b INT NOT NULL, c INT, PRIMARY KEY(a,b)); + CREATE TABLE child_comp_deferred ( + id INTEGER PRIMARY KEY, ca INT, cb INT, + FOREIGN KEY(ca,cb) REFERENCES parent_comp(a,b) DEFERRABLE INITIALLY DEFERRED + ); + + INSERT INTO parent_comp VALUES (1,1,0); + INSERT INTO child_comp_deferred VALUES (10,1,1); -- valid + INSERT INTO parent_comp VALUES (2,2,0) ON CONFLICT DO NOTHING; -- unrelated insert; must not raise + SELECT a,b,c FROM parent_comp ORDER BY a,b; +} {1|1|0 +2|2|0} + +# ROLLBACK must clear any deferred state; next statement must not trip. +do_execsql_test_on_specific_db {:memory:} fk-rollback-clears-then-donothing-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + CREATE TABLE parent_comp(a INT NOT NULL, b INT NOT NULL, c INT, PRIMARY KEY(a,b)); + + BEGIN; + INSERT INTO c VALUES(1, 456); -- create deferred violation + ROLLBACK; -- must clear counters + + INSERT INTO parent_comp VALUES(-2,-2,0) ON CONFLICT DO NOTHING; + SELECT a,b,c FROM parent_comp; +} {-2|-2|0} + +# DO NOTHING conflict path must touch no FK maintenance at all. +do_execsql_test_on_specific_db {:memory:} fk-parentcomp-donothing-conflict-stays-quiet { + PRAGMA foreign_keys=ON; + CREATE TABLE parent_comp(a INT NOT NULL, b INT NOT NULL, c INT, PRIMARY KEY(a,b)); + CREATE TABLE child_comp_deferred( + id INTEGER PRIMARY KEY, ca INT, cb INT, + FOREIGN KEY(ca,cb) REFERENCES parent_comp(a,b) DEFERRABLE INITIALLY DEFERRED + ); + + INSERT INTO parent_comp VALUES(10,20,1); + -- This conflicts with (10,20) and must be a no-op; if counters move here, it’s a bug. + INSERT INTO parent_comp VALUES(10,20,999) ON CONFLICT DO NOTHING; + + -- Prove DB is sane afterwards (no stray FK error) + INSERT INTO parent_comp VALUES(11,22,3) ON CONFLICT DO NOTHING; + SELECT a,b FROM parent_comp ORDER BY a,b; +} {10|20 +11|22} + +# Two-statement fix inside an explicit transaction (separate statements). +#Insert child (violation), then insert parent in a new statement; commit must pass. +do_execsql_test_on_specific_db {:memory:} fk-deferred-two-stmt-fix-inside-tx-ok { + PRAGMA foreign_keys=ON; + CREATE TABLE p(id INTEGER PRIMARY KEY); + CREATE TABLE c(id INTEGER PRIMARY KEY, pid INT REFERENCES p(id) DEFERRABLE INITIALLY DEFERRED); + BEGIN; + INSERT INTO c VALUES(1, 777); -- violation recorded in tx + INSERT INTO p VALUES(777); -- next statement fixes it + COMMIT; + SELECT * FROM c; +} {1|777} + +do_execsql_test_on_specific_db {:memory:} fk-delete-composite-bounds { + PRAGMA foreign_keys=ON; + CREATE TABLE p(a INT NOT NULL, b INT NOT NULL, v INT, PRIMARY KEY(a,b)); + CREATE TABLE c(id INTEGER PRIMARY KEY, x INT, y INT, w INT, + FOREIGN KEY(x,y) REFERENCES p(a,b)); + + INSERT INTO p VALUES (5,1,0),(5,2,0),(5,4,0); + INSERT INTO c VALUES (1,5,4,0); -- child references (5,4) + + -- This should be a no-op (no row (5,3)), and MUST NOT error. + DELETE FROM p WHERE a=5 AND b=3; +} {} diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index a60d1fcc3..078df667b 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -2,7 +2,7 @@ pub mod grammar_generator; #[cfg(test)] mod tests { - use rand::seq::{IndexedRandom, SliceRandom}; + use rand::seq::{IndexedRandom, IteratorRandom, SliceRandom}; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; use rusqlite::{params, types::Value}; @@ -645,19 +645,334 @@ mod tests { "Different results! limbo: {:?}, sqlite: {:?}, seed: {}, query: {}, table def: {}", limbo_rows, sqlite_rows, seed, query, table_defs[i] ); + } + } + } + #[test] + pub fn fk_deferred_constraints_fuzz() { + let _ = env_logger::try_init(); + let (mut rng, seed) = rng_from_time(); + println!("fk_deferred_constraints_fuzz seed: {seed}"); + + const OUTER_ITERS: usize = 10; + const INNER_ITERS: usize = 50; + + for outer in 0..OUTER_ITERS { + println!("fk_deferred_constraints_fuzz {}/{}", outer + 1, OUTER_ITERS); + + let limbo_db = TempDatabase::new_empty(true); + let sqlite_db = TempDatabase::new_empty(true); + let limbo = limbo_db.connect_limbo(); + let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap(); + + let mut stmts: Vec = Vec::new(); + let mut log_and_exec = |sql: &str| { + stmts.push(sql.to_string()); + sql.to_string() + }; + // Enable FKs + let s = log_and_exec("PRAGMA foreign_keys=ON"); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // Mix of immediate and deferred FK constraints + let s = log_and_exec("CREATE TABLE parent(id INTEGER PRIMARY KEY, a INT, b INT)"); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // Child with DEFERRABLE INITIALLY DEFERRED FK + let s = log_and_exec( + "CREATE TABLE child_deferred(id INTEGER PRIMARY KEY, pid INT, x INT, \ + FOREIGN KEY(pid) REFERENCES parent(id) DEFERRABLE INITIALLY DEFERRED)", + ); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // Child with immediate FK (default) + let s = log_and_exec( + "CREATE TABLE child_immediate(id INTEGER PRIMARY KEY, pid INT, y INT, \ + FOREIGN KEY(pid) REFERENCES parent(id))", + ); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // Composite key parent for deferred testing + let s = log_and_exec( + "CREATE TABLE parent_comp(a INT NOT NULL, b INT NOT NULL, c INT, PRIMARY KEY(a,b))", + ); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // Child with composite deferred FK + let s = log_and_exec( + "CREATE TABLE child_comp_deferred(id INTEGER PRIMARY KEY, ca INT, cb INT, z INT, \ + FOREIGN KEY(ca,cb) REFERENCES parent_comp(a,b) DEFERRABLE INITIALLY DEFERRED)", + ); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // Seed initial data + let mut parent_ids = std::collections::HashSet::new(); + for _ in 0..rng.random_range(10..=25) { + let id = rng.random_range(1..=50) as i64; + if parent_ids.insert(id) { + let a = rng.random_range(-5..=25); + let b = rng.random_range(-5..=25); + let stmt = log_and_exec(&format!("INSERT INTO parent VALUES ({id}, {a}, {b})")); + limbo_exec_rows(&limbo_db, &limbo, &stmt); + sqlite.execute(&stmt, params![]).unwrap(); + } + } + + // Seed composite parent + let mut comp_pairs = std::collections::HashSet::new(); + for _ in 0..rng.random_range(3..=10) { + let a = rng.random_range(-3..=6) as i64; + let b = rng.random_range(-3..=6) as i64; + if comp_pairs.insert((a, b)) { + let c = rng.random_range(0..=20); + let stmt = + log_and_exec(&format!("INSERT INTO parent_comp VALUES ({a}, {b}, {c})")); + limbo_exec_rows(&limbo_db, &limbo, &stmt); + sqlite.execute(&stmt, params![]).unwrap(); + } + } + + // Transaction-based mutations with mix of deferred and immediate operations + for tx_num in 0..INNER_ITERS { + // Decide if we're in a transaction + let mut in_tx = false; + let use_transaction = rng.random_bool(0.7); + + if use_transaction && !in_tx { + in_tx = true; + let s = log_and_exec("BEGIN"); + let sres = sqlite.execute(&s, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &s); + match (&sres, &lres) { + (Ok(_), Ok(_)) | (Err(_), Err(_)) => {} + _ => { + eprintln!("BEGIN mismatch"); + eprintln!("sqlite result: {sres:?}"); + eprintln!("limbo result: {lres:?}"); + let file = std::fs::File::create("fk_deferred.sql").unwrap(); + for stmt in stmts.iter() { + writeln!(&file, "{stmt};").unwrap(); + } + eprintln!("Wrote `tests/fk_deferred.sql` for debugging"); + eprintln!("turso path: {}", limbo_db.path.display()); + eprintln!("sqlite path: {}", sqlite_db.path.display()); + panic!("BEGIN mismatch"); + } + } + } + + let op = rng.random_range(0..12); + let stmt = match op { + // Insert into child_deferred (can violate temporarily in transaction) + 0 => { + let id = rng.random_range(1000..=2000); + let pid = if rng.random_bool(0.6) { + *parent_ids.iter().choose(&mut rng).unwrap_or(&1) + } else { + // Non-existent parent - OK if deferred and fixed before commit + rng.random_range(200..=300) as i64 + }; + let x = rng.random_range(-10..=10); + format!("INSERT INTO child_deferred VALUES ({id}, {pid}, {x})") + } + // Insert into child_immediate (must satisfy FK immediately) + 1 => { + let id = rng.random_range(3000..=4000); + let pid = if rng.random_bool(0.8) { + *parent_ids.iter().choose(&mut rng).unwrap_or(&1) + } else { + rng.random_range(200..=300) as i64 + }; + let y = rng.random_range(-10..=10); + format!("INSERT INTO child_immediate VALUES ({id}, {pid}, {y})") + } + // Insert parent (may fix deferred violations) + 2 => { + let id = rng.random_range(1..=300); + let a = rng.random_range(-5..=25); + let b = rng.random_range(-5..=25); + parent_ids.insert(id as i64); + format!("INSERT INTO parent VALUES ({id}, {a}, {b})") + } + // Delete parent (may cause violations) + 3 => { + let id = if rng.random_bool(0.5) { + *parent_ids.iter().choose(&mut rng).unwrap_or(&1) + } else { + rng.random_range(1..=300) as i64 + }; + format!("DELETE FROM parent WHERE id={id}") + } + // Update parent PK + 4 => { + let old = rng.random_range(1..=300); + let new = rng.random_range(1..=350); + format!("UPDATE parent SET id={new} WHERE id={old}") + } + // Update child_deferred FK + 5 => { + let id = rng.random_range(1000..=2000); + let pid = if rng.random_bool(0.5) { + *parent_ids.iter().choose(&mut rng).unwrap_or(&1) + } else { + rng.random_range(200..=400) as i64 + }; + format!("UPDATE child_deferred SET pid={pid} WHERE id={id}") + } + // Insert into composite deferred child + 6 => { + let id = rng.random_range(5000..=6000); + let (ca, cb) = if rng.random_bool(0.6) { + *comp_pairs.iter().choose(&mut rng).unwrap_or(&(1, 1)) + } else { + // Non-existent composite parent + ( + rng.random_range(-5..=8) as i64, + rng.random_range(-5..=8) as i64, + ) + }; + let z = rng.random_range(0..=10); + format!( + "INSERT INTO child_comp_deferred VALUES ({id}, {ca}, {cb}, {z}) ON CONFLICT DO NOTHING" + ) + } + // Insert composite parent + 7 => { + let a = rng.random_range(-5..=8) as i64; + let b = rng.random_range(-5..=8) as i64; + let c = rng.random_range(0..=20); + comp_pairs.insert((a, b)); + format!("INSERT INTO parent_comp VALUES ({a}, {b}, {c})") + } + // UPSERT with deferred child + 8 => { + let id = rng.random_range(1000..=2000); + let pid = if rng.random_bool(0.5) { + *parent_ids.iter().choose(&mut rng).unwrap_or(&1) + } else { + rng.random_range(200..=400) as i64 + }; + let x = rng.random_range(-10..=10); + format!( + "INSERT INTO child_deferred VALUES ({id}, {pid}, {x}) + ON CONFLICT(id) DO UPDATE SET pid=excluded.pid, x=excluded.x" + ) + } + // Delete from child_deferred + 9 => { + let id = rng.random_range(1000..=2000); + format!("DELETE FROM child_deferred WHERE id={id}") + } + // Self-referential deferred insert (create temp violation then fix) + 10 if use_transaction => { + let id = rng.random_range(400..=500); + let pid = id + 1; // References non-existent yet + format!("INSERT INTO child_deferred VALUES ({id}, {pid}, 0)") + } + _ => { + // Default: simple parent insert + let id = rng.random_range(1..=300); + format!("INSERT INTO parent VALUES ({id}, 0, 0)") + } + }; + + let stmt = log_and_exec(&stmt); + let sres = sqlite.execute(&stmt, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + + if !use_transaction && !in_tx { + match (sres, lres) { + (Ok(_), Ok(_)) | (Err(_), Err(_)) => {} + (s, l) => { + eprintln!("Non-tx mismatch: sqlite={s:?}, limbo={l:?}"); + eprintln!("Statement: {stmt}"); + eprintln!("Seed: {seed}, outer: {outer}, tx: {tx_num}"); + let mut file = std::fs::File::create("fk_deferred.sql").unwrap(); + for stmt in stmts.iter() { + writeln!(file, "{stmt};").expect("write to file"); + } + eprintln!("turso path: {}", limbo_db.path.display()); + eprintln!("sqlite path: {}", sqlite_db.path.display()); + panic!("Non-transactional operation mismatch, file written to 'tests/fk_deferred.sql'"); + } + } + } + + if use_transaction && in_tx { + // Randomly COMMIT or ROLLBACK + let commit = rng.random_bool(0.7); + let s = log_and_exec("COMMIT"); + + let sres = sqlite.execute(&s, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &s); + + match (sres, lres) { + (Ok(_), Ok(_)) => {} + (Err(_), Err(_)) => { + // Both failed - OK, deferred constraint violation at commit + if commit && in_tx { + in_tx = false; + let s = if commit { + log_and_exec("ROLLBACK") + } else { + log_and_exec("SELECT 1") // noop if we already rolled back + }; + + let sres = sqlite.execute(&s, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &s); + match (sres, lres) { + (Ok(_), Ok(_)) => {} + (s, l) => { + eprintln!("Post-failed-commit cleanup mismatch: sqlite={s:?}, limbo={l:?}"); + let mut file = + std::fs::File::create("fk_deferred.sql").unwrap(); + for stmt in stmts.iter() { + writeln!(file, "{stmt};").expect("write to file"); + } + eprintln!("turso path: {}", limbo_db.path.display()); + eprintln!("sqlite path: {}", sqlite_db.path.display()); + panic!("Post-failed-commit cleanup mismatch, file written to 'tests/fk_deferred.sql'"); + } + } + } + } + (s, l) => { + eprintln!("\n=== COMMIT/ROLLBACK mismatch ==="); + eprintln!("Operation: {s:?}"); + eprintln!("sqlite={s:?}, limbo={l:?}"); + eprintln!("Seed: {seed}, outer: {outer}, tx: {tx_num}"); + eprintln!("--- Replay statements ({}) ---", stmts.len()); + let mut file = std::fs::File::create("fk_deferred.sql").unwrap(); + for stmt in stmts.iter() { + writeln!(file, "{stmt};").expect("write to file"); + } + eprintln!("Turso path: {}", limbo_db.path.display()); + eprintln!("Sqlite path: {}", sqlite_db.path.display()); + panic!( + "outcome mismatch, .sql file written to `tests/fk_deferred.sql`" + ); + } + } } } } } + #[test] pub fn fk_single_pk_mutation_fuzz() { let _ = env_logger::try_init(); let (mut rng, seed) = rng_from_time(); println!("fk_single_pk_mutation_fuzz seed: {seed}"); - const OUTER_ITERS: usize = 50; - const INNER_ITERS: usize = 200; + const OUTER_ITERS: usize = 20; + const INNER_ITERS: usize = 100; for outer in 0..OUTER_ITERS { println!("fk_single_pk_mutation_fuzz {}/{}", outer + 1, OUTER_ITERS); @@ -679,7 +994,6 @@ mod tests { limbo_exec_rows(&limbo_db, &limbo, &s); sqlite.execute(&s, params![]).unwrap(); - // DDL let s = log_and_exec("CREATE TABLE p(id INTEGER PRIMARY KEY, a INT, b INT)"); limbo_exec_rows(&limbo_db, &limbo, &s); sqlite.execute(&s, params![]).unwrap(); @@ -704,8 +1018,14 @@ mod tests { let a = rng.random_range(-5..=25); let b = rng.random_range(-5..=25); let stmt = log_and_exec(&format!("INSERT INTO p VALUES ({id}, {a}, {b})")); - limbo_exec_rows(&limbo_db, &limbo, &stmt); - sqlite.execute(&stmt, params![]).unwrap(); + let l_res = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + let s_res = sqlite.execute(&stmt, params![]); + match (l_res, s_res) { + (Ok(_), Ok(_)) | (Err(_), Err(_)) => {} + _ => { + panic!("Seeding parent insert mismatch"); + } + } } // Seed child @@ -817,8 +1137,7 @@ mod tests { let a = rng.random_range(-5..=25); let b = rng.random_range(-5..=25); format!( - "INSERT INTO p VALUES({pick}, {a}, {b}) \ - ON CONFLICT(id) DO UPDATE SET a=excluded.a, b=excluded.b" + "INSERT INTO p VALUES({pick}, {a}, {b}) ON CONFLICT(id) DO UPDATE SET a=excluded.a, b=excluded.b" ) } else { let a = rng.random_range(-5..=25); @@ -935,14 +1254,368 @@ mod tests { } #[test] - #[ignore] // TODO: un-ignore when UNIQUE constraints are fixed + pub fn fk_edgecases_fuzzing() { + let _ = env_logger::try_init(); + let (mut rng, seed) = rng_from_time(); + println!("fk_edgecases_minifuzz seed: {seed}"); + + const OUTER_ITERS: usize = 20; + const INNER_ITERS: usize = 100; + + fn assert_parity( + seed: u64, + stmts: &[String], + sqlite_res: rusqlite::Result, + limbo_res: Result>, turso_core::LimboError>, + last_stmt: &str, + tag: &str, + ) { + match (sqlite_res.is_ok(), limbo_res.is_ok()) { + (true, true) | (false, false) => (), + _ => { + eprintln!("\n=== {tag} mismatch ==="); + eprintln!("seed: {seed}"); + eprintln!("sqlite: {sqlite_res:?}, limbo: {limbo_res:?}"); + eprintln!("stmt: {last_stmt}"); + eprintln!("--- replay statements ({}) ---", stmts.len()); + for (i, s) in stmts.iter().enumerate() { + eprintln!("{:04}: {};", i + 1, s); + } + panic!("{tag}: engines disagree"); + } + } + } + + // parent rowid, child textified integers -> MustBeInt coercion path + for outer in 0..OUTER_ITERS { + let limbo_db = TempDatabase::new_empty(true); + let sqlite_db = TempDatabase::new_empty(true); + let limbo = limbo_db.connect_limbo(); + let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap(); + + let mut stmts: Vec = Vec::new(); + let log = |s: &str, stmts: &mut Vec| { + stmts.push(s.to_string()); + s.to_string() + }; + + for s in [ + "PRAGMA foreign_keys=ON", + "CREATE TABLE p(id INTEGER PRIMARY KEY, a INT)", + "CREATE TABLE c(id INTEGER PRIMARY KEY, x INT, FOREIGN KEY(x) REFERENCES p(id))", + ] { + let s = log(s, &mut stmts); + let _ = limbo_exec_rows_fallible(&limbo_db, &limbo, &s); + let _ = sqlite.execute(&s, params![]); + } + + // Seed a few parents + for _ in 0..rng.random_range(2..=5) { + let id = rng.random_range(1..=15); + let a = rng.random_range(-5..=5); + let s = log(&format!("INSERT INTO p VALUES({id},{a})"), &mut stmts); + let _ = limbo_exec_rows_fallible(&limbo_db, &limbo, &s); + let _ = sqlite.execute(&s, params![]); + } + + // try random child inserts with weird text-ints + for i in 0..INNER_ITERS { + let id = 1000 + i as i64; + let raw = if rng.random_bool(0.7) { + 1 + rng.random_range(0..=15) + } else { + rng.random_range(100..=200) as i64 + }; + + // Randomly decorate the integer as text with spacing/zeros/plus + let pad_left_zeros = rng.random_range(0..=2); + let spaces_left = rng.random_range(0..=2); + let spaces_right = rng.random_range(0..=2); + let plus = if rng.random_bool(0.3) { "+" } else { "" }; + let txt_num = format!( + "{plus}{:0width$}", + raw, + width = (1 + pad_left_zeros) as usize + ); + let txt = format!( + "'{}{}{}'", + " ".repeat(spaces_left), + txt_num, + " ".repeat(spaces_right) + ); + + let stmt = log(&format!("INSERT INTO c VALUES({id}, {txt})"), &mut stmts); + let sres = sqlite.execute(&stmt, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + assert_parity(seed, &stmts, sres, lres, &stmt, "A: rowid-coercion"); + } + println!("A {}/{} ok", outer + 1, OUTER_ITERS); + } + + // slf-referential rowid FK + for outer in 0..OUTER_ITERS { + let limbo_db = TempDatabase::new_empty(true); + let sqlite_db = TempDatabase::new_empty(true); + let limbo = limbo_db.connect_limbo(); + let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap(); + + let mut stmts: Vec = Vec::new(); + let log = |s: &str, stmts: &mut Vec| { + stmts.push(s.to_string()); + s.to_string() + }; + + for s in [ + "PRAGMA foreign_keys=ON", + "CREATE TABLE t(id INTEGER PRIMARY KEY, rid REFERENCES t(id))", + ] { + let s = log(s, &mut stmts); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + } + + // Self-match should succeed for many ids + for _ in 0..INNER_ITERS { + let id = rng.random_range(1..=500); + let stmt = log( + &format!("INSERT INTO t(id,rid) VALUES({id},{id})"), + &mut stmts, + ); + let sres = sqlite.execute(&stmt, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + assert_parity(seed, &stmts, sres, lres, &stmt, "B1: self-row ok"); + } + + // Mismatch (rid != id) should fail (unless the referenced id already exists). + for _ in 0..rng.random_range(1..=10) { + let id = rng.random_range(1..=20); + let s = log( + &format!("INSERT INTO t(id,rid) VALUES({id},{id})"), + &mut stmts, + ); + let s_res = sqlite.execute(&s, params![]); + let turso_rs = limbo_exec_rows_fallible(&limbo_db, &limbo, &s); + match (s_res.is_ok(), turso_rs.is_ok()) { + (true, true) | (false, false) => {} + _ => panic!("Seeding self-ref failed differently"), + } + } + + for _ in 0..INNER_ITERS { + let id = rng.random_range(600..=900); + let ref_ = rng.random_range(1..=25); + let stmt = log( + &format!("INSERT INTO t(id,rid) VALUES({id},{ref_})"), + &mut stmts, + ); + let sres = sqlite.execute(&stmt, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + assert_parity(seed, &stmts, sres, lres, &stmt, "B2: self-row mismatch"); + } + println!("B {}/{} ok", outer + 1, OUTER_ITERS); + } + + // self-referential UNIQUE(u,v) parent (fast-path for composite) + for outer in 0..OUTER_ITERS { + let limbo_db = TempDatabase::new_empty(true); + let sqlite_db = TempDatabase::new_empty(true); + let limbo = limbo_db.connect_limbo(); + let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap(); + + let mut stmts: Vec = Vec::new(); + let log = |s: &str, stmts: &mut Vec| { + stmts.push(s.to_string()); + s.to_string() + }; + + let s = log("PRAGMA foreign_keys=ON", &mut stmts); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // Variant the schema a bit: TEXT/TEXT, NUMERIC/TEXT, etc. + let decls = [ + ("TEXT", "TEXT"), + ("TEXT", "NUMERIC"), + ("NUMERIC", "TEXT"), + ("TEXT", "BLOB"), + ]; + let (tu, tv) = decls[rng.random_range(0..decls.len())]; + + let s = log( + &format!( + "CREATE TABLE sr(u {tu}, v {tv}, cu {tu}, cv {tv}, UNIQUE(u,v), \ + FOREIGN KEY(cu,cv) REFERENCES sr(u,v))" + ), + &mut stmts, + ); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + + // Self-matching composite rows should succeed + for _ in 0..INNER_ITERS { + // Random small tokens, possibly padded + let u = format!("U{}", rng.random_range(0..50)); + let v = format!("V{}", rng.random_range(0..50)); + let mut cu = u.clone(); + let mut cv = v.clone(); + + // occasionally wrap child refs as blobs/text to stress coercion on parent index + if rng.random_bool(0.2) { + // child cv as hex blob of ascii v + let hex: String = v.bytes().map(|b| format!("{b:02X}")).collect(); + cv = format!("x'{hex}'"); + } else { + cu = format!("'{cu}'"); + cv = format!("'{cv}'"); + } + + let stmt = log( + &format!("INSERT INTO sr(u,v,cu,cv) VALUES('{u}','{v}',{cu},{cv})"), + &mut stmts, + ); + let sres = sqlite.execute(&stmt, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + assert_parity(seed, &stmts, sres, lres, &stmt, "C1: self-UNIQUE ok"); + } + + // Non-self-match likely fails unless earlier rows happen to satisfy (u,v) + for _ in 0..INNER_ITERS { + let u = format!("U{}", rng.random_range(60..100)); + let v = format!("V{}", rng.random_range(60..100)); + let cu = format!("'U{}'", rng.random_range(0..40)); + let cv = format!("'{}{}'", "V", rng.random_range(0..40)); + let stmt = log( + &format!("INSERT INTO sr(u,v,cu,cv) VALUES('{u}','{v}',{cu},{cv})"), + &mut stmts, + ); + let sres = sqlite.execute(&stmt, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + assert_parity(seed, &stmts, sres, lres, &stmt, "C2: self-UNIQUE mismatch"); + } + println!("C {}/{} ok", outer + 1, OUTER_ITERS); + } + + // parent TEXT UNIQUE(u,v), child types differ; rely on parent-index affinities + for outer in 0..OUTER_ITERS { + let limbo_db = TempDatabase::new_empty(true); + let sqlite_db = TempDatabase::new_empty(true); + let limbo = limbo_db.connect_limbo(); + let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap(); + + let mut stmts: Vec = Vec::new(); + let log = |s: &str, stmts: &mut Vec| { + stmts.push(s.to_string()); + s.to_string() + }; + + for s in [ + "PRAGMA foreign_keys=ON", + "CREATE TABLE parent(u TEXT, v TEXT, UNIQUE(u,v))", + "CREATE TABLE child(id INTEGER PRIMARY KEY, cu INT, cv BLOB, \ + FOREIGN KEY(cu,cv) REFERENCES parent(u,v))", + ] { + let s = log(s, &mut stmts); + limbo_exec_rows(&limbo_db, &limbo, &s); + sqlite.execute(&s, params![]).unwrap(); + } + + for _ in 0..rng.random_range(3..=8) { + let u_raw = rng.random_range(0..=9); + let v_raw = rng.random_range(0..=9); + let u = if rng.random_bool(0.4) { + format!("+{u_raw}") + } else { + format!("{u_raw}") + }; + let v = if rng.random_bool(0.5) { + format!("{v_raw:02}",) + } else { + format!("{v_raw}") + }; + let s = log( + &format!("INSERT INTO parent VALUES('{u}','{v}')"), + &mut stmts, + ); + let l_res = limbo_exec_rows_fallible(&limbo_db, &limbo, &s); + let s_res = sqlite.execute(&s, params![]); + match (s_res, l_res) { + (Ok(_), Ok(_)) | (Err(_), Err(_)) => {} + (x, y) => { + panic!("Parent seeding mismatch: sqlite {x:?}, limbo {y:?}"); + } + } + } + + for i in 0..INNER_ITERS { + let id = i as i64 + 1; + let u_txt = if rng.random_bool(0.7) { + format!("+{}", rng.random_range(0..=9)) + } else { + format!("{}", rng.random_range(0..=9)) + }; + let v_txt = if rng.random_bool(0.5) { + format!("{:02}", rng.random_range(0..=9)) + } else { + format!("{}", rng.random_range(0..=9)) + }; + + // produce child literals that *look different* but should match under TEXT affinity + // cu uses integer-ish form of u; cv uses blob of ASCII v or quoted v randomly. + let cu = if let Ok(u_int) = u_txt.trim().trim_start_matches('+').parse::() { + if rng.random_bool(0.5) { + format!("{u_int}",) + } else { + format!("'{u_txt}'") + } + } else { + format!("'{u_txt}'") + }; + let cv = if rng.random_bool(0.6) { + let hex: String = v_txt + .as_bytes() + .iter() + .map(|b| format!("{b:02X}")) + .collect(); + format!("x'{hex}'") + } else { + format!("'{v_txt}'") + }; + + let stmt = log( + &format!("INSERT INTO child VALUES({id}, {cu}, {cv})"), + &mut stmts, + ); + let sres = sqlite.execute(&stmt, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + assert_parity(seed, &stmts, sres, lres, &stmt, "D1: parent-index affinity"); + } + + for i in 0..(INNER_ITERS / 3) { + let id = 10_000 + i as i64; + let cu = rng.random_range(0..=9); + let miss = rng.random_range(10..=19); + let stmt = log( + &format!("INSERT INTO child VALUES({id}, {cu}, x'{miss:02X}')"), + &mut stmts, + ); + let sres = sqlite.execute(&stmt, params![]); + let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); + assert_parity(seed, &stmts, sres, lres, &stmt, "D2: parent-index negative"); + } + println!("D {}/{} ok", outer + 1, OUTER_ITERS); + } + + println!("fk_edgecases_minifuzz complete (seed {seed})"); + } + + #[test] pub fn fk_composite_pk_mutation_fuzz() { let _ = env_logger::try_init(); let (mut rng, seed) = rng_from_time(); println!("fk_composite_pk_mutation_fuzz seed: {seed}"); - const OUTER_ITERS: usize = 30; - const INNER_ITERS: usize = 200; + const OUTER_ITERS: usize = 10; + const INNER_ITERS: usize = 100; for outer in 0..OUTER_ITERS { println!( From 7e9277958ba32022844f999d699938da0d4d77e0 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Mon, 6 Oct 2025 15:07:23 -0400 Subject: [PATCH 079/428] Fix deferred FK in vdbe --- core/lib.rs | 1 + core/translate/emitter.rs | 73 ++-------------- core/translate/fkeys.rs | 82 +++++------------ core/translate/insert.rs | 150 +++++++++++++++++-------------- core/vdbe/execute.rs | 160 +++++++++++++++++++--------------- core/vdbe/mod.rs | 3 - tests/integration/fuzz/mod.rs | 6 +- 7 files changed, 212 insertions(+), 263 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index a2ac4a267..6271d194c 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -1539,6 +1539,7 @@ impl Connection { pub fn set_foreign_keys_enabled(&self, enable: bool) { self.fk_pragma.store(enable, Ordering::Release); } + pub fn foreign_keys_enabled(&self) -> bool { self.fk_pragma.load(Ordering::Acquire) } diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index e281277dc..2dabd4b82 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -32,8 +32,8 @@ use crate::translate::expr::{ }; use crate::translate::fkeys::{ build_index_affinity_string, emit_fk_child_update_counters, - emit_fk_delete_parent_existence_checks, emit_fk_scope_if_needed, emit_parent_pk_change_checks, - stabilize_new_row_for_fk, + emit_fk_delete_parent_existence_checks, emit_guarded_fk_decrement, + emit_parent_pk_change_checks, open_read_index, open_read_table, stabilize_new_row_for_fk, }; use crate::translate::plan::{DeletePlan, JoinedTable, Plan, QueryDestination, Search}; use crate::translate::planner::ROWID_STRS; @@ -437,18 +437,6 @@ fn emit_program_for_delete( }); } - let fk_enabled = connection.foreign_keys_enabled(); - let table_name = plan - .table_references - .joined_tables() - .first() - .unwrap() - .table - .get_name() - .to_string(); - if fk_enabled { - emit_fk_scope_if_needed(program, resolver, &table_name, true)?; - } // Initialize cursors and other resources needed for query execution init_loop( program, @@ -487,9 +475,6 @@ fn emit_program_for_delete( None, )?; program.preassign_label_to_next_insn(after_main_loop_label); - if fk_enabled { - emit_fk_scope_if_needed(program, resolver, &table_name, false)?; - } // Finalize program program.result_columns = plan.result_columns; program.table_references.extend(plan.table_references); @@ -536,12 +521,7 @@ pub fn emit_fk_child_decrement_on_delete( .schema .get_btree_table(&fk_ref.fk.parent_table) .expect("parent btree"); - let pcur = program.alloc_cursor_id(CursorType::BTreeTable(parent_tbl.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: pcur, - root_page: parent_tbl.root_page, - db: 0, - }); + let pcur = open_read_table(program, &parent_tbl); let (pos, col) = child_tbl.get_column(&fk_ref.child_cols[0]).unwrap(); let val = if col.is_rowid_alias { @@ -581,15 +561,7 @@ pub fn emit_fk_child_decrement_on_delete( // Parent MISSING, decrement is guarded by FkIfZero to avoid underflow program.preassign_label_to_next_insn(missing); program.emit_insn(Insn::Close { cursor_id: pcur }); - program.emit_insn(Insn::FkIfZero { - is_scope: false, - target_pc: done, - }); - program.emit_insn(Insn::FkCounter { - is_scope: false, - increment_value: -1, - }); - + emit_guarded_fk_decrement(program, done); program.preassign_label_to_next_insn(done); } else { // Probe parent unique index @@ -598,12 +570,7 @@ pub fn emit_fk_child_decrement_on_delete( .get_btree_table(&fk_ref.fk.parent_table) .expect("parent btree"); let idx = fk_ref.parent_unique_index.as_ref().expect("unique index"); - let icur = program.alloc_cursor_id(CursorType::BTreeIndex(idx.clone())); - program.emit_insn(Insn::OpenRead { - cursor_id: icur, - root_page: idx.root_page, - db: 0, - }); + let icur = open_read_index(program, idx); // Build probe from current child row let n = fk_ref.child_cols.len(); @@ -642,18 +609,10 @@ pub fn emit_fk_child_decrement_on_delete( num_regs: n, }); program.emit_insn(Insn::Close { cursor_id: icur }); - program.emit_insn(Insn::FkIfZero { - is_scope: false, - target_pc: ok, - }); - program.emit_insn(Insn::FkCounter { - increment_value: -1, - is_scope: false, - }); + emit_guarded_fk_decrement(program, ok); program.preassign_label_to_next_insn(ok); program.emit_insn(Insn::Close { cursor_id: icur }); } - program.preassign_label_to_next_insn(null_skip); } Ok(()) @@ -947,22 +906,6 @@ fn emit_program_for_update( program.decr_nesting(); } - let fk_enabled = connection.foreign_keys_enabled(); - let table_name = plan - .table_references - .joined_tables() - .first() - .unwrap() - .table - .get_name() - .to_string(); - - // statement-level FK scope open - if fk_enabled { - let open = true; - emit_fk_scope_if_needed(program, resolver, &table_name, open)?; - } - // Initialize the main loop init_loop( program, @@ -1029,10 +972,6 @@ fn emit_program_for_update( )?; program.preassign_label_to_next_insn(after_main_loop_label); - if fk_enabled { - let open = false; - emit_fk_scope_if_needed(program, resolver, &table_name, open)?; - } after(program); program.result_columns = plan.returning.unwrap_or_default(); diff --git a/core/translate/fkeys.rs b/core/translate/fkeys.rs index b2b356b37..b8544a078 100644 --- a/core/translate/fkeys.rs +++ b/core/translate/fkeys.rs @@ -7,32 +7,22 @@ use crate::{ vdbe::{ builder::CursorType, insn::{CmpInsFlags, Insn}, + BranchOffset, }, Result, }; use std::{collections::HashSet, num::NonZeroUsize, sync::Arc}; #[inline] -/// Increment/decrement the FK scope counter if `table_name` has either outgoing or incoming FKs. -/// -/// Returns `true` if a scope change was emitted. Scope open (+1) occurs before a statement -/// touching the table; scope close (−1) occurs after. On scope close, remaining deferred -/// violations are raised by the runtime. -pub fn emit_fk_scope_if_needed( - program: &mut ProgramBuilder, - resolver: &Resolver, - table_name: &str, - open: bool, -) -> Result { - let has_fks = resolver.schema.has_child_fks(table_name) - || resolver.schema.any_resolved_fks_referencing(table_name); - if has_fks { - program.emit_insn(Insn::FkCounter { - increment_value: if open { 1 } else { -1 }, - is_scope: true, - }); - } - Ok(has_fks) +pub fn emit_guarded_fk_decrement(program: &mut ProgramBuilder, label: BranchOffset) { + program.emit_insn(Insn::FkIfZero { + is_scope: false, + target_pc: label, + }); + program.emit_insn(Insn::FkCounter { + increment_value: -1, + is_scope: false, + }); } /// Open a read cursor on an index and return its cursor id. @@ -543,8 +533,7 @@ enum ParentProbePass { New, } -/// Probe the child side for a given parent key. If `increment_value` is +1, increment counter on match. -/// If −1, we guard with `FkIfZero` then decrement to avoid counter underflow in edge cases. +/// Probe the child side for a given parent key fn emit_fk_parent_key_probe( program: &mut ProgramBuilder, resolver: &Resolver, @@ -576,14 +565,7 @@ fn emit_fk_parent_key_probe( (true, ParentProbePass::New) => { // Guard to avoid underflow if OLD pass didn't increment. let skip = p.allocate_label(); - p.emit_insn(Insn::FkIfZero { - is_scope: false, - target_pc: skip, - }); - p.emit_insn(Insn::FkCounter { - increment_value: -1, - is_scope: false, - }); + emit_guarded_fk_decrement(p, skip); p.preassign_label_to_next_insn(skip); } // Immediate FK on NEW pass: nothing to cancel; do nothing. @@ -663,8 +645,8 @@ fn build_parent_key( /// Child-side FK maintenance for UPDATE/UPSERT: /// If any FK columns of this child row changed: -/// Pass 1 (OLD tuple): if OLD is non-NULL and parent is missing → decrement deferred counter (guarded). -/// Pass 2 (NEW tuple): if NEW is non-NULL and parent is missing → immediate error or deferred(+1). +/// Pass 1 (OLD tuple): if OLD is non-NULL and parent is missing: decrement deferred counter (guarded). +/// Pass 2 (NEW tuple): if NEW is non-NULL and parent is missing: immediate error or deferred(+1). #[allow(clippy::too_many_arguments)] pub fn emit_fk_child_update_counters( program: &mut ProgramBuilder, @@ -687,7 +669,6 @@ pub fn emit_fk_child_update_counters( let (pos, _col) = match child_tbl.get_column(cname) { Some(v) => v, None => { - // schema inconsistency; treat as no-old tuple return None; } }; @@ -701,7 +682,7 @@ pub fn emit_fk_child_update_counters( // No NULLs, proceed let cont = program.allocate_label(); program.emit_insn(Insn::Goto { target_pc: cont }); - // NULL encountered -> invalidate tuple by jumping here + // NULL encountered: invalidate tuple by jumping here program.preassign_label_to_next_insn(null_jmp); program.preassign_label_to_next_insn(cont); @@ -736,30 +717,23 @@ pub fn emit_fk_child_update_counters( }); program.emit_insn(Insn::MustBeInt { reg: rid }); - // If NOT exists => decrement (guarded) + // If NOT exists => decrement let miss = program.allocate_label(); program.emit_insn(Insn::NotExists { cursor: pcur, rowid_reg: rid, target_pc: miss, }); - // found → close & continue + // found: close & continue let join = program.allocate_label(); program.emit_insn(Insn::Close { cursor_id: pcur }); program.emit_insn(Insn::Goto { target_pc: join }); - // missing → guarded decrement + // missing: guarded decrement program.preassign_label_to_next_insn(miss); program.emit_insn(Insn::Close { cursor_id: pcur }); let skip = program.allocate_label(); - program.emit_insn(Insn::FkIfZero { - is_scope: false, - target_pc: skip, - }); - program.emit_insn(Insn::FkCounter { - is_scope: false, - increment_value: -1, - }); + emit_guarded_fk_decrement(program, skip); program.preassign_label_to_next_insn(skip); program.preassign_label_to_next_insn(join); @@ -786,14 +760,7 @@ pub fn emit_fk_child_update_counters( |_p| Ok(()), |p| { let skip = p.allocate_label(); - p.emit_insn(Insn::FkIfZero { - is_scope: false, - target_pc: skip, - }); - p.emit_insn(Insn::FkCounter { - is_scope: false, - increment_value: -1, - }); + emit_guarded_fk_decrement(p, skip); p.preassign_label_to_next_insn(skip); Ok(()) }, @@ -803,7 +770,6 @@ pub fn emit_fk_child_update_counters( } // Pass 2: NEW tuple handling - // If any NEW component is NULL → FK is satisfied vacuously. let fk_ok = program.allocate_label(); for cname in &fk_ref.fk.child_columns { let (i, col) = child_tbl.get_column(cname).unwrap(); @@ -825,7 +791,7 @@ pub fn emit_fk_child_update_counters( .expect("parent btree"); let pcur = open_read_table(program, &parent_tbl); - // Take the first child column value (rowid) from NEW image + // Take the first child column value from NEW image let (i_child, col_child) = child_tbl.get_column(&fk_ref.child_cols[0]).unwrap(); let val_reg = if col_child.is_rowid_alias { new_rowid_reg @@ -847,11 +813,11 @@ pub fn emit_fk_child_update_counters( rowid_reg: tmp, target_pc: violation, }); - // found → close and continue + // found: close and continue program.emit_insn(Insn::Close { cursor_id: pcur }); program.emit_insn(Insn::Goto { target_pc: fk_ok }); - // missing → violation (immediate HALT or deferred +1) + // missing: violation (immediate HALT or deferred +1) program.preassign_label_to_next_insn(violation); program.emit_insn(Insn::Close { cursor_id: pcur }); emit_fk_violation(program, &fk_ref.fk)?; @@ -866,7 +832,7 @@ pub fn emit_fk_child_update_counters( .expect("parent unique index required"); let icur = open_read_index(program, idx); - // Build NEW probe (in FK child column order → aligns with parent index columns) + // Build NEW probe (in FK child column order, aligns with parent index columns) let probe = { let start = program.alloc_registers(ncols); for (k, cname) in fk_ref.child_cols.iter().enumerate() { diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 7e4900f2c..b3e04ec91 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -17,7 +17,7 @@ use crate::translate::expr::{ BindingBehavior, ReturningValueRegisters, WalkControl, }; use crate::translate::fkeys::{ - build_index_affinity_string, emit_fk_scope_if_needed, emit_fk_violation, index_probe, + build_index_affinity_string, emit_fk_violation, emit_guarded_fk_decrement, index_probe, open_read_index, open_read_table, }; use crate::translate::plan::TableReferences; @@ -241,11 +241,11 @@ pub fn translate_insert( connection, )?; - let has_fks = if fk_enabled { - emit_fk_scope_if_needed(&mut program, resolver, table_name.as_str(), true)? - } else { - false - }; + let has_fks = fk_enabled + && (resolver.schema.has_child_fks(table_name.as_str()) + || resolver + .schema + .any_resolved_fks_referencing(table_name.as_str())); let mut yield_reg_opt = None; let mut temp_table_ctx = None; let (num_values, cursor_id) = match body { @@ -1200,10 +1200,6 @@ pub fn translate_insert( } program.preassign_label_to_next_insn(stmt_epilogue); - if has_fks { - emit_fk_scope_if_needed(&mut program, resolver, table_name.as_str(), false)?; - } - program.resolve_label(halt_label, program.offset()); Ok(program) @@ -1908,7 +1904,6 @@ pub fn emit_fk_child_insert_checks( new_rowid_reg: usize, ) -> crate::Result<()> { for fk_ref in resolver.schema.resolved_fks_for_child(&child_tbl.name)? { - let ncols = fk_ref.child_cols.len(); let is_self_ref = fk_ref.fk.parent_table.eq_ignore_ascii_case(&child_tbl.name); // Short-circuit if any NEW component is NULL @@ -1925,12 +1920,11 @@ pub fn emit_fk_child_insert_checks( target_pc: fk_ok, }); } - + let parent_tbl = resolver + .schema + .get_btree_table(&fk_ref.fk.parent_table) + .expect("parent btree"); if fk_ref.parent_uses_rowid { - let parent_tbl = resolver - .schema - .get_btree_table(&fk_ref.fk.parent_table) - .expect("parent btree"); let pcur = open_read_table(program, &parent_tbl); // first child col carries rowid @@ -1941,6 +1935,7 @@ pub fn emit_fk_child_insert_checks( new_start_reg + i_child }; + // Normalize rowid to integer for both the probe and the same-row fast path. let tmp = program.alloc_register(); program.emit_insn(Insn::Copy { src_reg: val_reg, @@ -1949,6 +1944,18 @@ pub fn emit_fk_child_insert_checks( }); program.emit_insn(Insn::MustBeInt { reg: tmp }); + // If this is a self-reference *and* the child FK equals NEW rowid, + // the constraint will be satisfied once this row is inserted + if is_self_ref { + program.emit_insn(Insn::Eq { + lhs: tmp, + rhs: new_rowid_reg, + target_pc: fk_ok, + flags: CmpInsFlags::default(), + collation: None, + }); + } + let violation = program.allocate_label(); program.emit_insn(Insn::NotExists { cursor: pcur, @@ -1958,31 +1965,20 @@ pub fn emit_fk_child_insert_checks( program.emit_insn(Insn::Close { cursor_id: pcur }); program.emit_insn(Insn::Goto { target_pc: fk_ok }); + // Missing parent: immediate vs deferred as usual program.preassign_label_to_next_insn(violation); program.emit_insn(Insn::Close { cursor_id: pcur }); - - // Self-ref: count (don’t halt). Non-self: standard behavior. - if is_self_ref { - program.emit_insn(Insn::FkCounter { - increment_value: 1, - is_scope: false, - }); - } else { - emit_fk_violation(program, &fk_ref.fk)?; - } + emit_fk_violation(program, &fk_ref.fk)?; + program.preassign_label_to_next_insn(fk_ok); } else { - // Parent by unique index - let parent_tbl = resolver - .schema - .get_btree_table(&fk_ref.fk.parent_table) - .expect("parent btree"); let idx = fk_ref .parent_unique_index .as_ref() .expect("parent unique index required"); let icur = open_read_index(program, idx); + let ncols = fk_ref.child_cols.len(); - // Build NEW probe from child NEW values; apply parent index affinities + // Build NEW child probe from child NEW values, apply parent-index affinities. let probe = { let start = program.alloc_registers(ncols); for (k, cname) in fk_ref.child_cols.iter().enumerate() { @@ -2006,28 +2002,69 @@ pub fn emit_fk_child_insert_checks( } start }; + if is_self_ref { + // Determine the parent column order to compare against: + let parent_cols: Vec<&str> = + idx.columns.iter().map(|ic| ic.name.as_str()).collect(); + + // Build new parent-key image from this same row’s new values, in the index order. + let parent_new = program.alloc_registers(ncols); + for (i, pname) in parent_cols.iter().enumerate() { + let (pos, col) = child_tbl.get_column(pname).unwrap(); + program.emit_insn(Insn::Copy { + src_reg: if col.is_rowid_alias { + new_rowid_reg + } else { + new_start_reg + pos + }, + dst_reg: parent_new + i, + extra_amount: 0, + }); + } + if let Some(cnt) = NonZeroUsize::new(ncols) { + program.emit_insn(Insn::Affinity { + start_reg: parent_new, + count: cnt, + affinities: build_index_affinity_string(idx, &parent_tbl), + }); + } + + // Compare child probe to NEW parent image column-by-column. + let mismatch = program.allocate_label(); + for i in 0..ncols { + let cont = program.allocate_label(); + program.emit_insn(Insn::Eq { + lhs: probe + i, + rhs: parent_new + i, + target_pc: cont, + flags: CmpInsFlags::default().jump_if_null(), + collation: Some(super::collate::CollationSeq::Binary), + }); + program.emit_insn(Insn::Goto { + target_pc: mismatch, + }); + program.preassign_label_to_next_insn(cont); + } + // All equal: same-row OK + program.emit_insn(Insn::Goto { target_pc: fk_ok }); + program.preassign_label_to_next_insn(mismatch); + } index_probe( program, icur, probe, ncols, + // on_found: parent exists, FK satisfied |_p| Ok(()), + // on_not_found: behave like a normal FK |p| { - if is_self_ref { - p.emit_insn(Insn::FkCounter { - increment_value: 1, - is_scope: false, - }); - } else { - emit_fk_violation(p, &fk_ref.fk)?; - } + emit_fk_violation(p, &fk_ref.fk)?; Ok(()) }, )?; program.emit_insn(Insn::Goto { target_pc: fk_ok }); + program.preassign_label_to_next_insn(fk_ok); } - - program.preassign_label_to_next_insn(fk_ok); } Ok(()) } @@ -2127,7 +2164,7 @@ pub fn emit_parent_side_fk_decrement_on_insert( .child_table .name .eq_ignore_ascii_case(&parent_table.name); - // Skip only when it cannot repair anything: non-deferred and not self-ref. + // Skip only when it cannot repair anything: non-deferred and not self-referencing if !pref.fk.deferred && !is_self_ref { continue; } @@ -2172,22 +2209,15 @@ pub fn emit_parent_side_fk_decrement_on_insert( num_regs: n_cols, }); - // Not found => nothing to decrement + // Not found, nothing to decrement program.emit_insn(Insn::Close { cursor_id: icur }); let skip = program.allocate_label(); program.emit_insn(Insn::Goto { target_pc: skip }); - // Found => guarded decrement + // Found: guarded counter decrement program.resolve_label(found, program.offset()); program.emit_insn(Insn::Close { cursor_id: icur }); - program.emit_insn(Insn::FkIfZero { - is_scope: false, - target_pc: skip, - }); - program.emit_insn(Insn::FkCounter { - increment_value: -1, - is_scope: false, - }); + emit_guarded_fk_decrement(program, skip); program.resolve_label(skip, program.offset()); } else { // fallback scan :( @@ -2231,23 +2261,13 @@ pub fn emit_parent_side_fk_decrement_on_insert( }); program.resolve_label(cont, program.offset()); } - - // Matched one child row -> guarded decrement - program.emit_insn(Insn::FkIfZero { - is_scope: false, - target_pc: next_row, - }); - program.emit_insn(Insn::FkCounter { - is_scope: false, - increment_value: -1, - }); - + // Matched one child row: guarded decrement of counter + emit_guarded_fk_decrement(program, next_row); program.resolve_label(next_row, program.offset()); program.emit_insn(Insn::Next { cursor_id: ccur, pc_if_next: loop_top, }); - program.resolve_label(done, program.offset()); program.emit_insn(Insn::Close { cursor_id: ccur }); } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 09e112e0e..32cd18e79 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -35,7 +35,7 @@ use crate::{ }, translate::emitter::TransactionMode, }; -use crate::{get_cursor, CheckpointMode, MvCursor}; +use crate::{get_cursor, CheckpointMode, Connection, MvCursor}; use std::env::temp_dir; use std::ops::DerefMut; use std::{ @@ -2169,20 +2169,21 @@ pub fn halt( let auto_commit = program.connection.auto_commit.load(Ordering::SeqCst); tracing::trace!("halt(auto_commit={})", auto_commit); if auto_commit { - if program.connection.foreign_keys_enabled() + let res = program.commit_txn(pager.clone(), state, mv_store, false); + if res.is_ok() + && program.connection.foreign_keys_enabled() && program .connection .fk_deferred_violations .swap(0, Ordering::AcqRel) > 0 { + // In autocommit mode, a statement that leaves deferred violations must fail here. return Err(LimboError::Constraint( "foreign key constraint failed".to_string(), )); } - program - .commit_txn(pager.clone(), state, mv_store, false) - .map(Into::into) + res.map(Into::into) } else { Ok(InsnFunctionStepResult::Done) } @@ -2274,13 +2275,12 @@ pub fn op_transaction_inner( if write && conn.db.open_flags.get().contains(OpenFlags::ReadOnly) { return Err(LimboError::ReadOnly); } + // 1. We try to upgrade current version let current_state = conn.get_tx_state(); - let (new_transaction_state, updated, should_clear_deferred_violations) = if conn - .is_nested_stmt - .load(Ordering::SeqCst) + let (new_transaction_state, updated) = if conn.is_nested_stmt.load(Ordering::SeqCst) { - (current_state, false, false) + (current_state, false) } else { match (current_state, write) { // pending state means that we tried beginning a tx and the method returned IO. @@ -2295,36 +2295,30 @@ pub fn op_transaction_inner( schema_did_change: false, }, true, - true, ) } (TransactionState::Write { schema_did_change }, true) => { - (TransactionState::Write { schema_did_change }, false, false) + (TransactionState::Write { schema_did_change }, false) } (TransactionState::Write { schema_did_change }, false) => { - (TransactionState::Write { schema_did_change }, false, false) + (TransactionState::Write { schema_did_change }, false) } (TransactionState::Read, true) => ( TransactionState::Write { schema_did_change: false, }, true, - true, ), - (TransactionState::Read, false) => (TransactionState::Read, false, false), + (TransactionState::Read, false) => (TransactionState::Read, false), (TransactionState::None, true) => ( TransactionState::Write { schema_did_change: false, }, true, - true, ), - (TransactionState::None, false) => (TransactionState::Read, true, false), + (TransactionState::None, false) => (TransactionState::Read, true), } }; - if should_clear_deferred_violations { - conn.fk_deferred_violations.store(0, Ordering::Release); - } // 2. Start transaction if needed if let Some(mv_store) = &mv_store { @@ -2401,8 +2395,8 @@ pub fn op_transaction_inner( return Err(LimboError::Busy); } if let IOResult::IO(io) = begin_w_tx_res? { - // end the read transaction. // set the transaction state to pending so we don't have to + // end the read transaction. program .connection .set_tx_state(TransactionState::PendingUpgrade); @@ -2462,25 +2456,47 @@ pub fn op_auto_commit( load_insn!( AutoCommit { auto_commit, - rollback, + rollback }, insn ); - if matches!(state.commit_state, CommitState::Committing) { - return program - .commit_txn(pager.clone(), state, mv_store, *rollback) - .map(Into::into); - } let conn = program.connection.clone(); - if *auto_commit != conn.auto_commit.load(Ordering::SeqCst) { - if *rollback { - program // reset deferred fk violations on ROLLBACK - .connection - .fk_deferred_violations - .store(0, Ordering::Release); + let fk_on = conn.foreign_keys_enabled(); + let had_autocommit = conn.auto_commit.load(Ordering::SeqCst); // true, not in tx - // TODO(pere): add rollback I/O logic once we implement rollback journal + // Drive any multi-step commit/rollback that’s already in progress. + if matches!(state.commit_state, CommitState::Committing) { + let res = program + .commit_txn(pager.clone(), state, mv_store, *rollback) + .map(Into::into); + // Only clear after a final, successful non-rollback COMMIT. + if fk_on + && !*rollback + && matches!( + res, + Ok(InsnFunctionStepResult::Step | InsnFunctionStepResult::Done) + ) + { + conn.clear_deferred_foreign_key_violations(); + } + return res; + } + + // The logic in this opcode can be a bit confusing, so to make things a bit clearer lets be + // very explicit about the currently existing and requested state. + let requested_autocommit = *auto_commit; + let requested_rollback = *rollback; + let changed = requested_autocommit != had_autocommit; + + // what the requested operation is + let is_begin_req = had_autocommit && !requested_autocommit && !requested_rollback; + let is_commit_req = !had_autocommit && requested_autocommit && !requested_rollback; + let is_rollback_req = !had_autocommit && requested_autocommit && requested_rollback; + + if changed { + if requested_rollback { + // ROLLBACK transition if let Some(mv_store) = mv_store { if let Some(tx_id) = conn.get_mv_tx_id() { mv_store.rollback_tx(tx_id, pager.clone(), &conn); @@ -2491,25 +2507,23 @@ pub fn op_auto_commit( conn.set_tx_state(TransactionState::None); conn.auto_commit.store(true, Ordering::SeqCst); } else { - if conn.foreign_keys_enabled() { - let violations = conn.fk_deferred_violations.swap(0, Ordering::AcqRel); - if violations > 0 { - // Fail the commit - return Err(LimboError::Constraint( - "FOREIGN KEY constraint failed".into(), - )); - } + // BEGIN (true->false) or COMMIT (false->true) + if is_commit_req { + // Pre-check deferred FKs; leave tx open and do NOT clear violations + check_deferred_fk_on_commit(&conn)?; } - conn.auto_commit.store(*auto_commit, Ordering::SeqCst); + conn.auto_commit + .store(requested_autocommit, Ordering::SeqCst); } } else { - let mvcc_tx_active = program.connection.get_mv_tx().is_some(); + // No autocommit flip + let mvcc_tx_active = conn.get_mv_tx().is_some(); if !mvcc_tx_active { - if !*auto_commit { + if !requested_autocommit { return Err(LimboError::TxError( "cannot start a transaction within a transaction".to_string(), )); - } else if *rollback { + } else if requested_rollback { return Err(LimboError::TxError( "cannot rollback - no transaction is active".to_string(), )); @@ -2518,28 +2532,41 @@ pub fn op_auto_commit( "cannot commit - no transaction is active".to_string(), )); } - } else { - let is_begin = !*auto_commit && !*rollback; - if is_begin { - return Err(LimboError::TxError( - "cannot use BEGIN after BEGIN CONCURRENT".to_string(), - )); - } - } - if conn.foreign_keys_enabled() { - let violations = conn.fk_deferred_violations.swap(0, Ordering::AcqRel); - if violations > 0 { - // Fail the commit - return Err(LimboError::Constraint( - "FOREIGN KEY constraint failed".into(), - )); - } + } else if is_begin_req { + return Err(LimboError::TxError( + "cannot use BEGIN after BEGIN CONCURRENT".to_string(), + )); } } - program - .commit_txn(pager.clone(), state, mv_store, *rollback) - .map(Into::into) + let res = program + .commit_txn(pager.clone(), state, mv_store, requested_rollback) + .map(Into::into); + + // Clear deferred FK counters only after FINAL success of COMMIT/ROLLBACK. + if fk_on + && matches!( + res, + Ok(InsnFunctionStepResult::Step | InsnFunctionStepResult::Done) + ) + && (is_rollback_req || is_commit_req) + { + conn.clear_deferred_foreign_key_violations(); + } + + res +} + +fn check_deferred_fk_on_commit(conn: &Connection) -> Result<()> { + if !conn.foreign_keys_enabled() { + return Ok(()); + } + if conn.get_deferred_foreign_key_violations() > 0 { + return Err(LimboError::Constraint( + "FOREIGN KEY constraint failed".into(), + )); + } + Ok(()) } pub fn op_goto( @@ -8373,10 +8400,7 @@ pub fn op_fk_if_zero( return Ok(InsnFunctionStepResult::Step); } let v = if !*is_scope { - program - .connection - .fk_deferred_violations - .load(Ordering::Acquire) + program.connection.get_deferred_foreign_key_violations() } else { state.fk_scope_counter }; diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index db633c2f1..bcb4372d5 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -914,9 +914,6 @@ impl Program { self.connection .set_changes(self.n_change.load(Ordering::SeqCst)); } - if connection.foreign_keys_enabled() { - connection.clear_deferred_foreign_key_violations(); - } Ok(IOResult::Done(())) } } diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 078df667b..8cbac20c6 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -650,6 +650,8 @@ mod tests { } #[test] + #[allow(unused_assignments)] + #[ignore] // ignoring because every error I can find is due to sqlite sub-transaction behavior pub fn fk_deferred_constraints_fuzz() { let _ = env_logger::try_init(); let (mut rng, seed) = rng_from_time(); @@ -893,7 +895,7 @@ mod tests { (s, l) => { eprintln!("Non-tx mismatch: sqlite={s:?}, limbo={l:?}"); eprintln!("Statement: {stmt}"); - eprintln!("Seed: {seed}, outer: {outer}, tx: {tx_num}"); + eprintln!("Seed: {seed}, outer: {outer}, tx: {tx_num}, in_tx={in_tx}"); let mut file = std::fs::File::create("fk_deferred.sql").unwrap(); for stmt in stmts.iter() { writeln!(file, "{stmt};").expect("write to file"); @@ -947,7 +949,7 @@ mod tests { eprintln!("\n=== COMMIT/ROLLBACK mismatch ==="); eprintln!("Operation: {s:?}"); eprintln!("sqlite={s:?}, limbo={l:?}"); - eprintln!("Seed: {seed}, outer: {outer}, tx: {tx_num}"); + eprintln!("Seed: {seed}, outer: {outer}, tx: {tx_num}, in_tx={in_tx}"); eprintln!("--- Replay statements ({}) ---", stmts.len()); let mut file = std::fs::File::create("fk_deferred.sql").unwrap(); for stmt in stmts.iter() { From 7b17c824fb2957fcaa20bc3c5fafbe8f1865d61e Mon Sep 17 00:00:00 2001 From: Konstantinos Artopoulos Date: Wed, 8 Oct 2025 00:16:33 +0300 Subject: [PATCH 080/428] feat(cli): .tables and .indexes dot commands should show tables and indexes from attached databases --- cli/app.rs | 109 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 69 insertions(+), 40 deletions(-) diff --git a/cli/app.rs b/cli/app.rs index efc2f312f..490818817 100644 --- a/cli/app.rs +++ b/cli/app.rs @@ -1239,26 +1239,33 @@ impl Limbo { } fn display_indexes(&mut self, maybe_table: Option) -> anyhow::Result<()> { - let sql = match maybe_table { - Some(ref tbl_name) => format!( - "SELECT name FROM sqlite_schema WHERE type='index' AND tbl_name = '{tbl_name}' ORDER BY 1" - ), - None => String::from("SELECT name FROM sqlite_schema WHERE type='index' ORDER BY 1"), - }; - let mut indexes = String::new(); - let handler = |row: &turso_core::Row| -> anyhow::Result<()> { - if let Ok(Value::Text(idx)) = row.get::<&Value>(0) { - indexes.push_str(idx.as_str()); - indexes.push(' '); - } - Ok(()) - }; - if let Err(err) = self.handle_row(&sql, handler) { - if err.to_string().contains("no such table: sqlite_schema") { - return Err(anyhow::anyhow!("Unable to access database schema. The database may be using an older SQLite version or may not be properly initialized.")); - } else { - return Err(anyhow::anyhow!("Error querying schema: {}", err)); + + for name in self.database_names()? { + let prefix = (name != "main").then_some(&name); + let sql = match maybe_table { + Some(ref tbl_name) => format!( + "SELECT name FROM {name}.sqlite_schema WHERE type='index' AND tbl_name = '{tbl_name}' ORDER BY 1" + ), + None => format!("SELECT name FROM {name}.sqlite_schema WHERE type='index' ORDER BY 1"), + }; + let handler = |row: &turso_core::Row| -> anyhow::Result<()> { + if let Ok(Value::Text(idx)) = row.get::<&Value>(0) { + if let Some(prefix) = prefix { + indexes.push_str(prefix); + indexes.push('.'); + } + indexes.push_str(idx.as_str()); + indexes.push(' '); + } + Ok(()) + }; + if let Err(err) = self.handle_row(&sql, handler) { + if err.to_string().contains("no such table: sqlite_schema") { + return Err(anyhow::anyhow!("Unable to access database schema. The database may be using an older SQLite version or may not be properly initialized.")); + } else { + return Err(anyhow::anyhow!("Error querying schema: {}", err)); + } } } if !indexes.is_empty() { @@ -1268,28 +1275,35 @@ impl Limbo { } fn display_tables(&mut self, pattern: Option<&str>) -> anyhow::Result<()> { - let sql = match pattern { - Some(pattern) => format!( - "SELECT name FROM sqlite_schema WHERE type='table' AND name NOT LIKE 'sqlite_%' AND name LIKE '{pattern}' ORDER BY 1" - ), - None => String::from( - "SELECT name FROM sqlite_schema WHERE type='table' AND name NOT LIKE 'sqlite_%' ORDER BY 1" - ), - }; - let mut tables = String::new(); - let handler = |row: &turso_core::Row| -> anyhow::Result<()> { - if let Ok(Value::Text(table)) = row.get::<&Value>(0) { - tables.push_str(table.as_str()); - tables.push(' '); - } - Ok(()) - }; - if let Err(e) = self.handle_row(&sql, handler) { - if e.to_string().contains("no such table: sqlite_schema") { - return Err(anyhow::anyhow!("Unable to access database schema. The database may be using an older SQLite version or may not be properly initialized.")); - } else { - return Err(anyhow::anyhow!("Error querying schema: {}", e)); + + for name in self.database_names()? { + let prefix = (name != "main").then_some(&name); + let sql = match pattern { + Some(pattern) => format!( + "SELECT name FROM {name}.sqlite_schema WHERE type='table' AND name NOT LIKE 'sqlite_%' AND name LIKE '{pattern}' ORDER BY 1" + ), + None => format!( + "SELECT name FROM {name}.sqlite_schema WHERE type='table' AND name NOT LIKE 'sqlite_%' ORDER BY 1" + ), + }; + let handler = |row: &turso_core::Row| -> anyhow::Result<()> { + if let Ok(Value::Text(table)) = row.get::<&Value>(0) { + if let Some(prefix) = prefix { + tables.push_str(prefix); + tables.push('.'); + } + tables.push_str(table.as_str()); + tables.push(' '); + } + Ok(()) + }; + if let Err(e) = self.handle_row(&sql, handler) { + if e.to_string().contains("no such table: sqlite_schema") { + return Err(anyhow::anyhow!("Unable to access database schema. The database may be using an older SQLite version or may not be properly initialized.")); + } else { + return Err(anyhow::anyhow!("Error querying schema: {}", e)); + } } } if !tables.is_empty() { @@ -1304,6 +1318,21 @@ impl Limbo { Ok(()) } + fn database_names(&mut self) -> anyhow::Result> { + let sql = "PRAGMA database_list"; + let mut db_names: Vec = Vec::new(); + let handler = |row: &turso_core::Row| -> anyhow::Result<()> { + if let Ok(Value::Text(name)) = row.get::<&Value>(1) { + db_names.push(name.to_string()); + } + Ok(()) + }; + match self.handle_row(&sql, handler) { + Ok(_) => Ok(db_names), + Err(e) => Err(anyhow::anyhow!("Error in database list: {}", e)), + } + } + fn handle_row(&mut self, sql: &str, mut handler: F) -> anyhow::Result<()> where F: FnMut(&turso_core::Row) -> anyhow::Result<()>, From 1f68e80c50b633da59b626cfb061cdd35b3d26b7 Mon Sep 17 00:00:00 2001 From: Konstantinos Artopoulos Date: Wed, 8 Oct 2025 00:17:03 +0300 Subject: [PATCH 081/428] test(cli_tests): .tables with attached db --- testing/cli_tests/cli_test_cases.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/testing/cli_tests/cli_test_cases.py b/testing/cli_tests/cli_test_cases.py index fa07285d4..10ad90765 100755 --- a/testing/cli_tests/cli_test_cases.py +++ b/testing/cli_tests/cli_test_cases.py @@ -157,6 +157,7 @@ def test_output_file(): # Clean up os.remove(output_file) + shell.quit() def test_multi_line_single_line_comments_succession(): @@ -367,6 +368,16 @@ def test_parse_error(): lambda res: "Parse error: " in res, "Try to LIMIT using an identifier should trigger a Parse error", ) + turso.quit() + + +def test_tables_with_attached_db(): + shell = TestTursoShell() + shell.execute_dot(".open :memory:") + shell.execute_dot("CREATE TABLE orders(a);") + shell.execute_dot("ATTACH DATABASE 'testing/testing.db' AS attached;") + shell.run_test("tables-with-attached-database", ".tables", "orders attached.products attached.users") + shell.quit() def main(): @@ -393,6 +404,7 @@ def main(): test_copy_db_file() test_copy_memory_db_to_file() test_parse_error() + test_tables_with_attached_db() console.info("All tests have passed") From f5766379ce554145889c3bfbfc55673e009d16a7 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Wed, 8 Oct 2025 08:36:12 +0300 Subject: [PATCH 082/428] Allow unbound identifiers specifically for INSERT ... ON CONFLICT the binding for the ON CONFLICT clause is done later. --- core/translate/expr.rs | 12 ++++++++++++ core/translate/insert.rs | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index feb2960f3..5a743fa46 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -3272,11 +3272,14 @@ impl ParamState { /// TryCanonicalColumnsFirst means that canonical columns take precedence over result columns. This is used for e.g. WHERE clauses. /// /// ResultColumnsNotAllowed means that referring to result columns is not allowed. This is used e.g. for DML statements. +/// +/// AllowUnboundIdentifiers means that unbound identifiers are allowed. This is used for INSERT ... ON CONFLICT DO UPDATE SET ... where binding is handled later than this phase. #[derive(Debug, Clone, PartialEq, Eq)] pub enum BindingBehavior { TryResultColumnsFirst, TryCanonicalColumnsFirst, ResultColumnsNotAllowed, + AllowUnboundIdentifiers, } /// Rewrite ast::Expr in place, binding Column references/rewriting Expr::Id -> Expr::Column @@ -3329,6 +3332,9 @@ pub fn bind_and_rewrite_expr<'a>( match expr { Expr::Id(id) => { let Some(referenced_tables) = &mut referenced_tables else { + if binding_behavior == BindingBehavior::AllowUnboundIdentifiers { + return Ok(WalkControl::Continue); + } crate::bail_parse_error!("no such column: {}", id.as_str()); }; let normalized_id = normalize_ident(id.as_str()); @@ -3459,6 +3465,9 @@ pub fn bind_and_rewrite_expr<'a>( Expr::Qualified(tbl, id) => { tracing::debug!("bind_and_rewrite_expr({:?}, {:?})", tbl, id); let Some(referenced_tables) = &mut referenced_tables else { + if binding_behavior == BindingBehavior::AllowUnboundIdentifiers { + return Ok(WalkControl::Continue); + } crate::bail_parse_error!( "no such column: {}.{}", tbl.as_str(), @@ -3499,6 +3508,9 @@ pub fn bind_and_rewrite_expr<'a>( } Expr::DoublyQualified(db_name, tbl_name, col_name) => { let Some(referenced_tables) = &mut referenced_tables else { + if binding_behavior == BindingBehavior::AllowUnboundIdentifiers { + return Ok(WalkControl::Continue); + } crate::bail_parse_error!( "no such column: {}.{}.{}", db_name.as_str(), diff --git a/core/translate/insert.rs b/core/translate/insert.rs index e46a5607d..6e8d2fdeb 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -190,7 +190,7 @@ pub fn translate_insert( None, connection, &mut program.param_ctx, - BindingBehavior::ResultColumnsNotAllowed, + BindingBehavior::AllowUnboundIdentifiers, )?; } if let Some(ref mut where_expr) = where_clause { @@ -200,7 +200,7 @@ pub fn translate_insert( None, connection, &mut program.param_ctx, - BindingBehavior::ResultColumnsNotAllowed, + BindingBehavior::AllowUnboundIdentifiers, )?; } } From 111b6fcb814183393a13b5703fb11761a9f54b04 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 7 Oct 2025 22:58:24 -0700 Subject: [PATCH 083/428] support the same syntax as sqlite for version function SQLite surprisingly supports this: select sqlite_version(*); this gets translated at the parser level to sqlite_version(), and it works for all functions that take 0 arguments. Let's be compatible with SQLite and support the same thing. --- core/function.rs | 22 +++++++++++++++++ core/translate/expr.rs | 51 +++++++++++++++++++++++++++++++++++++-- core/translate/planner.rs | 21 +++++++++++++++- 3 files changed, 91 insertions(+), 3 deletions(-) diff --git a/core/function.rs b/core/function.rs index b2858af4a..b5a6503cc 100644 --- a/core/function.rs +++ b/core/function.rs @@ -642,6 +642,28 @@ impl Func { Self::AlterTable(_) => true, } } + + pub fn supports_star_syntax(&self) -> bool { + match self { + Self::Scalar(scalar_func) => { + matches!( + scalar_func, + ScalarFunc::Changes + | ScalarFunc::Random + | ScalarFunc::TotalChanges + | ScalarFunc::SqliteVersion + | ScalarFunc::SqliteSourceId + | ScalarFunc::LastInsertRowid + ) + } + Self::Math(math_func) => { + matches!(math_func.arity(), MathFuncArity::Nullary) + } + // Aggregate functions with (*) syntax are handled separately in the planner + Self::Agg(_) => false, + _ => false, + } + } pub fn resolve_function(name: &str, arg_count: usize) -> Result { let normalized_name = crate::util::normalize_ident(name); match normalized_name.as_str() { diff --git a/core/translate/expr.rs b/core/translate/expr.rs index e110523f3..940c7a01e 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -1819,8 +1819,55 @@ pub fn translate_expr( Func::AlterTable(_) => unreachable!(), } } - ast::Expr::FunctionCallStar { .. } => { - crate::bail_parse_error!("FunctionCallStar in WHERE clause is not supported") + ast::Expr::FunctionCallStar { name, filter_over } => { + // Handle func(*) syntax as a function call with 0 arguments + // This is equivalent to func() for functions that accept 0 arguments + let args_count = 0; + let func_type = resolver.resolve_function(name.as_str(), args_count); + + if func_type.is_none() { + crate::bail_parse_error!("unknown function {}", name.as_str()); + } + + let func_ctx = FuncCtx { + func: func_type.unwrap(), + arg_count: args_count, + }; + + // Check if this function supports the (*) syntax by verifying it can be called with 0 args + match &func_ctx.func { + Func::Agg(_) => { + crate::bail_parse_error!( + "misuse of {} function {}(*)", + if filter_over.over_clause.is_some() { + "window" + } else { + "aggregate" + }, + name.as_str() + ) + } + // For supported functions, delegate to the existing FunctionCall logic + // by creating a synthetic FunctionCall with empty args + _ => { + let synthetic_call = ast::Expr::FunctionCall { + name: name.clone(), + distinctness: None, + args: vec![], // Empty args for func(*) + filter_over: filter_over.clone(), + order_by: vec![], // Empty order_by for func(*) + }; + + // Recursively call translate_expr with the synthetic function call + translate_expr( + program, + referenced_tables, + &synthetic_call, + target_register, + resolver, + ) + } + } } ast::Expr::Id(id) => { // Treat double-quoted identifiers as string literals (SQLite compatibility) diff --git a/core/translate/planner.rs b/core/translate/planner.rs index 589b45f3f..43254474f 100644 --- a/core/translate/planner.rs +++ b/core/translate/planner.rs @@ -180,7 +180,26 @@ pub fn resolve_window_and_aggregate_functions( name.as_str() ); } - crate::bail_parse_error!("Invalid aggregate function: {}", name.as_str()); + + // Check if the function supports (*) syntax using centralized logic + match crate::function::Func::resolve_function(name.as_str(), 0) { + Ok(func) => { + if func.supports_star_syntax() { + return Ok(WalkControl::Continue); + } else { + crate::bail_parse_error!( + "wrong number of arguments to function {}()", + name.as_str() + ); + } + } + Err(_) => { + crate::bail_parse_error!( + "wrong number of arguments to function {}()", + name.as_str() + ); + } + } } Err(e) => match e { crate::LimboError::ParseError(e) => { From 94c343770d67116fe73abbd28754c13b58370be2 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 7 Oct 2025 09:32:48 +0300 Subject: [PATCH 084/428] mvcc: Disable automatic checkpointing by default MVCC checkpointing currently prevents concurrent writes so disable it by default while we work on it. --- core/lib.rs | 4 ++-- core/mvcc/database/mod.rs | 4 ++-- core/mvcc/persistent_storage/logical_log.rs | 18 +++++++++--------- core/mvcc/persistent_storage/mod.rs | 4 ++-- core/translate/pragma.rs | 8 +++++--- 5 files changed, 20 insertions(+), 18 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 8145af6e7..c62c05b80 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -2341,7 +2341,7 @@ impl Connection { *self.mv_tx.read() } - pub(crate) fn set_mvcc_checkpoint_threshold(&self, threshold: u64) -> Result<()> { + pub(crate) fn set_mvcc_checkpoint_threshold(&self, threshold: i64) -> Result<()> { match self.db.mv_store.as_ref() { Some(mv_store) => { mv_store.set_checkpoint_threshold(threshold); @@ -2351,7 +2351,7 @@ impl Connection { } } - pub(crate) fn mvcc_checkpoint_threshold(&self) -> Result { + pub(crate) fn mvcc_checkpoint_threshold(&self) -> Result { match self.db.mv_store.as_ref() { Some(mv_store) => Ok(mv_store.checkpoint_threshold()), None => Err(LimboError::InternalError("MVCC not enabled".into())), diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 95014cbc2..c7352fe7c 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -2025,11 +2025,11 @@ impl MvStore { Ok(true) } - pub fn set_checkpoint_threshold(&self, threshold: u64) { + pub fn set_checkpoint_threshold(&self, threshold: i64) { self.storage.set_checkpoint_threshold(threshold) } - pub fn checkpoint_threshold(&self) -> u64 { + pub fn checkpoint_threshold(&self) -> i64 { self.storage.checkpoint_threshold() } } diff --git a/core/mvcc/persistent_storage/logical_log.rs b/core/mvcc/persistent_storage/logical_log.rs index a340d6c43..f2a09230e 100644 --- a/core/mvcc/persistent_storage/logical_log.rs +++ b/core/mvcc/persistent_storage/logical_log.rs @@ -12,17 +12,14 @@ use std::sync::{Arc, RwLock}; use crate::File; -pub const DEFAULT_LOG_CHECKPOINT_THRESHOLD: u64 = 1024 * 1024 * 8; // 8 MiB as default to mimic - // 2000 pages in sqlite which is - // pretty much equal to - // 8MiB if page_size == - // 4096 bytes +pub const DEFAULT_LOG_CHECKPOINT_THRESHOLD: i64 = -1; // Disabled by default pub struct LogicalLog { pub file: Arc, pub offset: u64, /// Size at which we start performing a checkpoint on the logical log. - checkpoint_threshold: u64, + /// Set to -1 to disable automatic checkpointing. + checkpoint_threshold: i64, } /// Log's Header, this will be the 64 bytes in any logical log file. @@ -229,14 +226,17 @@ impl LogicalLog { } pub fn should_checkpoint(&self) -> bool { - self.offset >= self.checkpoint_threshold + if self.checkpoint_threshold < 0 { + return false; + } + self.offset >= self.checkpoint_threshold as u64 } - pub fn set_checkpoint_threshold(&mut self, threshold: u64) { + pub fn set_checkpoint_threshold(&mut self, threshold: i64) { self.checkpoint_threshold = threshold; } - pub fn checkpoint_threshold(&self) -> u64 { + pub fn checkpoint_threshold(&self) -> i64 { self.checkpoint_threshold } } diff --git a/core/mvcc/persistent_storage/mod.rs b/core/mvcc/persistent_storage/mod.rs index 0c5514f6c..0ddf14223 100644 --- a/core/mvcc/persistent_storage/mod.rs +++ b/core/mvcc/persistent_storage/mod.rs @@ -44,11 +44,11 @@ impl Storage { self.logical_log.read().should_checkpoint() } - pub fn set_checkpoint_threshold(&self, threshold: u64) { + pub fn set_checkpoint_threshold(&self, threshold: i64) { self.logical_log.write().set_checkpoint_threshold(threshold) } - pub fn checkpoint_threshold(&self) -> u64 { + pub fn checkpoint_threshold(&self) -> i64 { self.logical_log.read().checkpoint_threshold() } } diff --git a/core/translate/pragma.rs b/core/translate/pragma.rs index 19542adad..149cfafa1 100644 --- a/core/translate/pragma.rs +++ b/core/translate/pragma.rs @@ -380,8 +380,10 @@ fn update_pragma( } PragmaName::MvccCheckpointThreshold => { let threshold = match parse_signed_number(&value)? { - Value::Integer(size) if size > 0 => size as u64, - _ => bail_parse_error!("mvcc_checkpoint_threshold must be a positive integer"), + Value::Integer(size) if size >= -1 => size, + _ => bail_parse_error!( + "mvcc_checkpoint_threshold must be -1, 0, or a positive integer" + ), }; connection.set_mvcc_checkpoint_threshold(threshold)?; @@ -699,7 +701,7 @@ fn query_pragma( PragmaName::MvccCheckpointThreshold => { let threshold = connection.mvcc_checkpoint_threshold()?; let register = program.alloc_register(); - program.emit_int(threshold as i64, register); + program.emit_int(threshold, register); program.emit_result_row(register, 1); program.add_pragma_result_column(pragma.to_string()); Ok((program, TransactionMode::None)) From f92d19ddeb1dcd0eb4f4f6a651a0ba37bd2c02a4 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 8 Oct 2025 09:20:44 +0300 Subject: [PATCH 085/428] Tweak README a bit --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 00207893f..dffd2421e 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ Turso Database is an in-process SQL database written in Rust, compatible with SQ * **SQLite compatibility** for SQL dialect, file formats, and the C API [see [document](COMPAT.md) for details] * **Change data capture (CDC)** for real-time tracking of database changes. -* **Language support** for +* **Multi-language support** for * [Go](https://github.com/tursodatabase/turso-go) * [JavaScript](bindings/javascript) * [Java](bindings/java) From 722c906ca6bcd69eee9811eb0e028f4bf0066ac2 Mon Sep 17 00:00:00 2001 From: Kim Seon Woo <69591622+seonwoo960000@users.noreply.github.com> Date: Wed, 8 Oct 2025 15:43:27 +0900 Subject: [PATCH 086/428] Change variable names --- .github/workflows/java-publish.yml | 8 ++++---- bindings/java/gradle/publish.gradle.kts | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/java-publish.yml b/.github/workflows/java-publish.yml index 1d94a3ed1..ec42744da 100644 --- a/.github/workflows/java-publish.yml +++ b/.github/workflows/java-publish.yml @@ -126,10 +126,10 @@ jobs: - name: Publish to Maven Central env: - MAVEN_CENTRAL_USERNAME: ${{ secrets.MAVEN_CENTRAL_USERNAME }} - MAVEN_CENTRAL_PASSWORD: ${{ secrets.MAVEN_CENTRAL_PASSWORD }} - GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} - GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + MAVEN_UPLOAD_USERNAME: ${{ secrets.MAVEN_UPLOAD_USERNAME }} + MAVEN_UPLOAD_PASSWORD: ${{ secrets.MAVEN_UPLOAD_PASSWORD }} + MAVEN_SIGNING_KEY: ${{ secrets.MAVEN_SIGNING_KEY }} + MAVEN_SIGNING_PASSPHRASE: ${{ secrets.MAVEN_SIGNING_PASSPHRASE }} run: | echo "Building, signing, and publishing to Maven Central..." ./gradlew clean publishToMavenCentral --no-daemon --stacktrace diff --git a/bindings/java/gradle/publish.gradle.kts b/bindings/java/gradle/publish.gradle.kts index 38f797305..97fd1dc41 100644 --- a/bindings/java/gradle/publish.gradle.kts +++ b/bindings/java/gradle/publish.gradle.kts @@ -52,8 +52,8 @@ configure { setRequired(true) // For CI/GitHub Actions: use in-memory keys - val signingKey = providers.environmentVariable("GPG_PRIVATE_KEY").orNull - val signingPassword = providers.environmentVariable("GPG_PASSPHRASE").orNull + val signingKey = providers.environmentVariable("MAVEN_SIGNING_KEY").orNull + val signingPassword = providers.environmentVariable("MAVEN_SIGNING_PASSPHRASE").orNull if (signingKey != null && signingPassword != null) { // CI mode: use in-memory keys @@ -222,12 +222,12 @@ tasks.register("publishToMavenCentral") { createMavenCentralBundle.get().mustRunAfter("publish") doLast { - val username = providers.environmentVariable("MAVEN_CENTRAL_USERNAME").orNull - val password = providers.environmentVariable("MAVEN_CENTRAL_PASSWORD").orNull + val username = providers.environmentVariable("MAVEN_UPLOAD_USERNAME").orNull + val password = providers.environmentVariable("MAVEN_UPLOAD_PASSWORD").orNull val bundleFile = createMavenCentralBundle.get().archiveFile.get().asFile - require(username != null) { "MAVEN_CENTRAL_USERNAME environment variable must be set" } - require(password != null) { "MAVEN_CENTRAL_PASSWORD environment variable must be set" } + require(username != null) { "MAVEN_UPLOAD_USERNAME environment variable must be set" } + require(password != null) { "MAVEN_UPLOAD_PASSWORD environment variable must be set" } require(bundleFile.exists()) { "Bundle file does not exist: ${bundleFile.absolutePath}" } logger.lifecycle("Uploading bundle to Maven Central Portal...") From 247fa6ce77f7f3153d6922993330bc7e9f424bde Mon Sep 17 00:00:00 2001 From: Kim Seon Woo <69591622+seonwoo960000@users.noreply.github.com> Date: Wed, 8 Oct 2025 15:47:21 +0900 Subject: [PATCH 087/428] Nit --- reference/publish-on-central | 1 + 1 file changed, 1 insertion(+) create mode 160000 reference/publish-on-central diff --git a/reference/publish-on-central b/reference/publish-on-central new file mode 160000 index 000000000..47ae6afdc --- /dev/null +++ b/reference/publish-on-central @@ -0,0 +1 @@ +Subproject commit 47ae6afdcd23c514a93db44c2d66322c0d76224a From a911cff0bdb23acc33607441843b4c7ec9ff8088 Mon Sep 17 00:00:00 2001 From: Kim Seon Woo <69591622+seonwoo960000@users.noreply.github.com> Date: Wed, 8 Oct 2025 15:54:51 +0900 Subject: [PATCH 088/428] Remove unnecessary directory --- reference/publish-on-central | 1 - 1 file changed, 1 deletion(-) delete mode 160000 reference/publish-on-central diff --git a/reference/publish-on-central b/reference/publish-on-central deleted file mode 160000 index 47ae6afdc..000000000 --- a/reference/publish-on-central +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 47ae6afdcd23c514a93db44c2d66322c0d76224a From 88b4b86a48d890093008f2eb0fb65c936e865a24 Mon Sep 17 00:00:00 2001 From: Kim Seon Woo <69591622+seonwoo960000@users.noreply.github.com> Date: Wed, 8 Oct 2025 15:55:36 +0900 Subject: [PATCH 089/428] Revert removed file --- .github/data/limbo-cargo-output.txt | 222 ++++++++++++++++++++++++++++ 1 file changed, 222 insertions(+) create mode 100644 .github/data/limbo-cargo-output.txt diff --git a/.github/data/limbo-cargo-output.txt b/.github/data/limbo-cargo-output.txt new file mode 100644 index 000000000..5a33c5fd0 --- /dev/null +++ b/.github/data/limbo-cargo-output.txt @@ -0,0 +1,222 @@ +Benchmarking limbo/Prepare statement: 'SELECT 1' +Benchmarking limbo/Prepare statement: 'SELECT 1': Warming up for 3.0000 s +Benchmarking limbo/Prepare statement: 'SELECT 1': Collecting 100 samples in estimated 5.0062 s (2.4M iterations) +Benchmarking limbo/Prepare statement: 'SELECT 1': Analyzing +limbo/Prepare statement: 'SELECT 1' + time: [2.0631 µs 2.0688 µs 2.0763 µs] + thrpt: [481.63 Kelem/s 483.37 Kelem/s 484.70 Kelem/s] +Found 13 outliers among 100 measurements (13.00%) + 1 (1.00%) low severe + 5 (5.00%) high mild + 7 (7.00%) high severe +Benchmarking limbo/Prepare statement: 'SELECT * FROM users LIMIT 1' +Benchmarking limbo/Prepare statement: 'SELECT * FROM users LIMIT 1': Warming up for 3.0000 s +Benchmarking limbo/Prepare statement: 'SELECT * FROM users LIMIT 1': Collecting 100 samples in estimated 5.0159 s (1.3M iterations) +Benchmarking limbo/Prepare statement: 'SELECT * FROM users LIMIT 1': Analyzing +limbo/Prepare statement: 'SELECT * FROM users LIMIT 1' + time: [3.9747 µs 3.9842 µs 3.9937 µs] + thrpt: [250.39 Kelem/s 250.99 Kelem/s 251.59 Kelem/s] +Found 17 outliers among 100 measurements (17.00%) + 5 (5.00%) low severe + 3 (3.00%) low mild + 3 (3.00%) high mild + 6 (6.00%) high severe +Benchmarking limbo/Prepare statement: 'SELECT first_name, count(1) FROM users GROUP BY first_name HAVING count(1)... +Benchmarking limbo/Prepare statement: 'SELECT first_name, count(1) FROM users GROUP BY first_name HAVING count(1)...: Warming up for 3.0000 s +Benchmarking limbo/Prepare statement: 'SELECT first_name, count(1) FROM users GROUP BY first_name HAVING count(1)...: Collecting 100 samples in estimated 5.0099 s (500k iterations) +Benchmarking limbo/Prepare statement: 'SELECT first_name, count(1) FROM users GROUP BY first_name HAVING count(1)...: Analyzing +limbo/Prepare statement: 'SELECT first_name, count(1) FROM users GROUP BY first_name HAVING count(1)... + time: [10.220 µs 10.280 µs 10.358 µs] + thrpt: [96.544 Kelem/s 97.281 Kelem/s 97.846 Kelem/s] +Found 13 outliers among 100 measurements (13.00%) + 1 (1.00%) low severe + 2 (2.00%) high mild + 10 (10.00%) high severe +Benchmarking limbo/Execute prepared statement: 'SELECT 1' +Benchmarking limbo/Execute prepared statement: 'SELECT 1': Warming up for 3.0000 s +Benchmarking limbo/Execute prepared statement: 'SELECT 1': Collecting 100 samples in estimated 5.0006 s (27M iterations) +Benchmarking limbo/Execute prepared statement: 'SELECT 1': Analyzing +limbo/Execute prepared statement: 'SELECT 1' + time: [181.95 ns 182.33 ns 182.70 ns] + thrpt: [5.4736 Melem/s 5.4844 Melem/s 5.4960 Melem/s] +Found 18 outliers among 100 measurements (18.00%) + 6 (6.00%) low severe + 3 (3.00%) low mild + 6 (6.00%) high mild + 3 (3.00%) high severe +Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 1' +Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Warming up for 3.0000 s +Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Collecting 100 samples in estimated 5.0008 s (4.0M iterations) +Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Analyzing +limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 1' + time: [1.2549 µs 1.2572 µs 1.2594 µs] + thrpt: [794.03 Kelem/s 795.44 Kelem/s 796.89 Kelem/s] +Found 15 outliers among 100 measurements (15.00%) + 6 (6.00%) low severe + 3 (3.00%) high mild + 6 (6.00%) high severe +Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 100' +Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Warming up for 3.0000 s +Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Collecting 100 samples in estimated 5.0047 s (4.0M iterations) +Benchmarking limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Analyzing +limbo/Execute prepared statement: 'SELECT * FROM users LIMIT 100' + time: [1.2503 µs 1.2528 µs 1.2560 µs] + thrpt: [796.20 Kelem/s 798.23 Kelem/s 799.84 Kelem/s] +Found 14 outliers among 100 measurements (14.00%) + 2 (2.00%) low severe + 1 (1.00%) low mild + 5 (5.00%) high mild + 6 (6.00%) high severe + +Benchmarking rusqlite/Prepare statement: 'SELECT 1' +Benchmarking rusqlite/Prepare statement: 'SELECT 1': Warming up for 3.0000 s +Benchmarking rusqlite/Prepare statement: 'SELECT 1': Collecting 100 samples in estimated 5.0010 s (6.5M iterations) +Benchmarking rusqlite/Prepare statement: 'SELECT 1': Analyzing +rusqlite/Prepare statement: 'SELECT 1' + time: [768.58 ns 770.50 ns 772.43 ns] + thrpt: [1.2946 Melem/s 1.2979 Melem/s 1.3011 Melem/s] +Found 16 outliers among 100 measurements (16.00%) + 5 (5.00%) low severe + 2 (2.00%) low mild + 1 (1.00%) high mild + 8 (8.00%) high severe +Benchmarking rusqlite/Prepare statement: 'SELECT * FROM users LIMIT 1' +Benchmarking rusqlite/Prepare statement: 'SELECT * FROM users LIMIT 1': Warming up for 3.0000 s +Benchmarking rusqlite/Prepare statement: 'SELECT * FROM users LIMIT 1': Collecting 100 samples in estimated 5.0083 s (1.6M iterations) +Benchmarking rusqlite/Prepare statement: 'SELECT * FROM users LIMIT 1': Analyzing +rusqlite/Prepare statement: 'SELECT * FROM users LIMIT 1' + time: [3.2006 µs 3.2038 µs 3.2084 µs] + thrpt: [311.68 Kelem/s 312.13 Kelem/s 312.45 Kelem/s] +Found 11 outliers among 100 measurements (11.00%) + 5 (5.00%) low severe + 1 (1.00%) low mild + 2 (2.00%) high mild + 3 (3.00%) high severe +Benchmarking rusqlite/Execute prepared statement: 'SELECT 1' +Benchmarking rusqlite/Execute prepared statement: 'SELECT 1': Warming up for 3.0000 s +Benchmarking rusqlite/Execute prepared statement: 'SELECT 1': Collecting 100 samples in estimated 5.0002 s (82M iterations) +Benchmarking rusqlite/Execute prepared statement: 'SELECT 1': Analyzing +rusqlite/Execute prepared statement: 'SELECT 1' + time: [60.613 ns 60.788 ns 61.098 ns] + thrpt: [16.367 Melem/s 16.451 Melem/s 16.498 Melem/s] +Found 8 outliers among 100 measurements (8.00%) + 1 (1.00%) low mild + 1 (1.00%) high mild + 6 (6.00%) high severe +Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 1' +Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Warming up for 3.0000 s +Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Collecting 100 samples in estimated 5.0014 s (4.3M iterations) +Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 1': Analyzing +rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 1' + time: [1.1686 µs 1.1702 µs 1.1716 µs] + thrpt: [853.52 Kelem/s 854.55 Kelem/s 855.74 Kelem/s] +Found 13 outliers among 100 measurements (13.00%) + 2 (2.00%) low severe + 1 (1.00%) low mild + 6 (6.00%) high mild + 4 (4.00%) high severe +Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 100' +Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Warming up for 3.0000 s +Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Collecting 100 samples in estimated 5.0016 s (4.3M iterations) +Benchmarking rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 100': Analyzing +rusqlite/Execute prepared statement: 'SELECT * FROM users LIMIT 100' + time: [1.1643 µs 1.1710 µs 1.1789 µs] + thrpt: [848.26 Kelem/s 853.98 Kelem/s 858.85 Kelem/s] +Found 17 outliers among 100 measurements (17.00%) + 5 (5.00%) low severe + 3 (3.00%) low mild + 4 (4.00%) high mild + 5 (5.00%) high severe + + Running unittests src/lib.rs (target/release/deps/limbo_ext-8b70654a7fccf221) + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + Running unittests src/lib.rs (target/release/deps/limbo_libsql-66ab9a0ee1a27f4c) + +running 1 test +test params::tests::test_serialize_array ... ignored + +test result: ok. 0 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in 0.00s + + Running unittests src/lib.rs (target/release/deps/limbo_macros-669ce5abb31e687e) + Running unittests main.rs (target/release/deps/limbo_sim-d28081335520ff9c) + +running 0 tests + Running unittests src/lib.rs (target/release/deps/limbo_sqlite3-1d7f53c447b1fc8a) + Running unittests src/lib.rs (target/release/deps/limbo_uuid-c0d9c3b2e9e30eee) + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + Running unittests src/lib.rs (target/release/deps/_limbo-9562557e55b9bbed) + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + Running unittests src/lib.rs (target/release/deps/sqlite3_parser-475bbf4de217f28a) + +running 38 tests +test lexer::sql::test::alter_add_column_primary_key ... ignored +test lexer::sql::test::alter_add_column_unique ... ignored +test lexer::sql::test::alter_rename_same ... ignored +test lexer::sql::test::cast_without_typename ... ignored +test lexer::sql::test::column_specified_more_than_once ... ignored +test lexer::sql::test::count_named_placeholders ... ignored +test lexer::sql::test::count_numbered_placeholders ... ignored +test lexer::sql::test::count_placeholders ... ignored +test lexer::sql::test::count_unused_placeholders ... ignored +test lexer::sql::test::create_strict_table_generated_column ... ignored +test lexer::sql::test::create_strict_table_missing_datatype ... ignored +test lexer::sql::test::create_strict_table_unknown_datatype ... ignored +test lexer::sql::test::create_table_with_only_generated_column ... ignored +test lexer::sql::test::create_table_without_column ... ignored +test lexer::sql::test::create_table_without_rowid_missing_pk ... ignored +test lexer::sql::test::create_temporary_table_with_qualified_name ... ignored +test lexer::sql::test::create_view_duplicate_column_name ... ignored + Running benches/keyword.rs (target/release/deps/keyword-255a8492c4f11233) +test lexer::sql::test::create_view_mismatch_count ... ignored +test lexer::sql::test::delete_order_by_without_limit ... ignored +test lexer::sql::test::duplicate_column ... ignored +test lexer::sql::test::extra_comments_between_statements ... ignored +test lexer::sql::test::extra_semicolons_between_statements ... ignored +test lexer::sql::test::foreign_key_on_column ... ignored +test lexer::sql::test::indexed_by_clause_within_triggers ... ignored +test lexer::sql::test::insert_default_values ... ignored +test lexer::sql::test::insert_mismatch_count ... ignored +test lexer::sql::test::missing_join_clause ... ignored +test lexer::sql::test::natural_join_on ... ignored +test lexer::sql::test::only_semicolons_no_statements ... ignored +test lexer::sql::test::qualified_table_name_within_triggers ... ignored +test lexer::sql::test::selects_compound_mismatch_columns_count ... ignored +test lexer::sql::test::unknown_table_option ... ignored +test lexer::sql::test::update_order_by_without_limit ... ignored +test lexer::sql::test::values_mismatch_columns_count ... ignored +test lexer::sql::test::vtab_args ... ignored +test lexer::sql::tests::fallible_iterator ... ignored +test lexer::sql::tests::invalid_number_literal ... ignored +test parser::ast::test::test_dequote ... ignored + +test result: ok. 0 passed; 0 failed; 38 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s From e28ece6950f6a17592508bf6dc9db4825848dff4 Mon Sep 17 00:00:00 2001 From: Dave Warnock Date: Wed, 8 Oct 2025 08:03:43 +0100 Subject: [PATCH 090/428] Update Doc regarding rusqlite compatibility Make it clear that it's not a drop in replacement for rusqlite as that isn't async --- bindings/rust/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/rust/README.md b/bindings/rust/README.md index a8c1ce4c4..bc0701de9 100644 --- a/bindings/rust/README.md +++ b/bindings/rust/README.md @@ -4,7 +4,7 @@ The next evolution of SQLite: A high-performance, SQLite-compatible database lib ## Features -- **SQLite Compatible**: Drop-in replacement for rusqlite with familiar API +- **SQLite Compatible**: Similar interface to rusqlite with familiar API apart from being async (using Tokio) - **High Performance**: Built with Rust for maximum speed and efficiency - **Async/Await Support**: Native async operations with tokio support - **In-Process**: No network overhead, runs directly in your application From 435a472bcad2d551b58f6a912e0827b4495dbf6b Mon Sep 17 00:00:00 2001 From: Konstantinos Artopoulos Date: Wed, 8 Oct 2025 11:09:20 +0300 Subject: [PATCH 091/428] fix(cli): clippy error --- cli/app.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/app.rs b/cli/app.rs index 490818817..e4fccbb34 100644 --- a/cli/app.rs +++ b/cli/app.rs @@ -1327,7 +1327,7 @@ impl Limbo { } Ok(()) }; - match self.handle_row(&sql, handler) { + match self.handle_row(sql, handler) { Ok(_) => Ok(db_names), Err(e) => Err(anyhow::anyhow!("Error in database list: {}", e)), } From 17a578a4967d5e6e37c956c3393b7f2e59f3b996 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 8 Oct 2025 11:47:39 +0300 Subject: [PATCH 092/428] bindings/rust: Tokio is not required The bindings use just async Rust so any async runtime should work. --- bindings/rust/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/rust/README.md b/bindings/rust/README.md index bc0701de9..88d358c20 100644 --- a/bindings/rust/README.md +++ b/bindings/rust/README.md @@ -4,7 +4,7 @@ The next evolution of SQLite: A high-performance, SQLite-compatible database lib ## Features -- **SQLite Compatible**: Similar interface to rusqlite with familiar API apart from being async (using Tokio) +- **SQLite Compatible**: Similar interface to rusqlite with familiar API apart from using async Rust - **High Performance**: Built with Rust for maximum speed and efficiency - **Async/Await Support**: Native async operations with tokio support - **In-Process**: No network overhead, runs directly in your application From 4fe3282d8e34d487deb758610e4401618688c612 Mon Sep 17 00:00:00 2001 From: Duy Dang <55247256+ddwalias@users.noreply.github.com> Date: Wed, 8 Oct 2025 21:06:13 +0700 Subject: [PATCH 093/428] Fix missing let from merge --- core/mvcc/database/checkpoint_state_machine.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/mvcc/database/checkpoint_state_machine.rs b/core/mvcc/database/checkpoint_state_machine.rs index 4ba2903fa..16b5d3a4a 100644 --- a/core/mvcc/database/checkpoint_state_machine.rs +++ b/core/mvcc/database/checkpoint_state_machine.rs @@ -195,7 +195,7 @@ impl CheckpointStateMachine { let row_data = ImmutableRecord::from_bin_record(version.row.data.clone()); let mut record_cursor = RecordCursor::new(); record_cursor.parse_full_header(&row_data).unwrap(); - let ValueRef::Integer(root_page) = + if let ValueRef::Integer(root_page) = record_cursor.get_value(&row_data, 3).unwrap() { if is_delete { From f138448da2ba39876c01b0d5d1a92cedfc054fb2 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Thu, 9 Oct 2025 08:09:31 +0530 Subject: [PATCH 094/428] don't allow duplicate col names in create table --- core/translate/schema.rs | 13 +++++++++++++ testing/create_table.test | 5 +++++ 2 files changed, 18 insertions(+) diff --git a/core/translate/schema.rs b/core/translate/schema.rs index a78e1b630..f7fde9f51 100644 --- a/core/translate/schema.rs +++ b/core/translate/schema.rs @@ -40,6 +40,19 @@ pub fn translate_create_table( bail_parse_error!("TEMPORARY table not supported yet"); } + // maybe we can do better than this. + if let ast::CreateTableBody::ColumnsAndConstraints { columns, .. } = &body { + for i in 0..columns.len() { + let name1 = normalize_ident(columns[i].col_name.as_str()); + for j in (i + 1)..columns.len() { + let name2 = normalize_ident(columns[j].col_name.as_str()); + if name1 == name2 { + bail_parse_error!("duplicate column name: {}", columns[i].col_name.as_str()); + } + } + } + } + // Check for STRICT mode without experimental flag if let ast::CreateTableBody::ColumnsAndConstraints { options, .. } = &body { if options.contains(ast::TableOptions::STRICT) && !connection.experimental_strict_enabled() diff --git a/testing/create_table.test b/testing/create_table.test index 7eb7bea7d..e961c017f 100755 --- a/testing/create_table.test +++ b/testing/create_table.test @@ -53,3 +53,8 @@ do_execsql_test_on_specific_db {:memory:} col-named-rowid { update t set rowid = 1; -- should allow regular update and not throw unique constraint select count(*) from t where rowid = 1; } {3} + +# https://github.com/tursodatabase/turso/issues/3637 +do_execsql_test_in_memory_any_error create_table_duplicate_column_names { + CREATE TABLE t(a, a); +} \ No newline at end of file From f0d9ead19f24d73dc4849b9bc72b348420fec2e8 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Thu, 9 Oct 2025 08:17:46 +0530 Subject: [PATCH 095/428] add more tests refactor and use sort_unstable_by_key --- core/translate/schema.rs | 17 +++++++++-------- testing/create_table.test | 10 +++++++++- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/core/translate/schema.rs b/core/translate/schema.rs index f7fde9f51..ce8f40f7f 100644 --- a/core/translate/schema.rs +++ b/core/translate/schema.rs @@ -40,15 +40,16 @@ pub fn translate_create_table( bail_parse_error!("TEMPORARY table not supported yet"); } - // maybe we can do better than this. if let ast::CreateTableBody::ColumnsAndConstraints { columns, .. } = &body { - for i in 0..columns.len() { - let name1 = normalize_ident(columns[i].col_name.as_str()); - for j in (i + 1)..columns.len() { - let name2 = normalize_ident(columns[j].col_name.as_str()); - if name1 == name2 { - bail_parse_error!("duplicate column name: {}", columns[i].col_name.as_str()); - } + let mut indexes = (0..columns.len()).collect::>(); + + indexes.sort_unstable_by_key(|&i| normalize_ident(columns[i].col_name.as_str())); + + for w in indexes.windows(2) { + let name1 = normalize_ident(columns[w[0]].col_name.as_str()); + let name2 = normalize_ident(columns[w[1]].col_name.as_str()); + if name1 == name2 { + bail_parse_error!("duplicate column name: {}", columns[w[0]].col_name.as_str()); } } } diff --git a/testing/create_table.test b/testing/create_table.test index e961c017f..289e0e98f 100755 --- a/testing/create_table.test +++ b/testing/create_table.test @@ -57,4 +57,12 @@ do_execsql_test_on_specific_db {:memory:} col-named-rowid { # https://github.com/tursodatabase/turso/issues/3637 do_execsql_test_in_memory_any_error create_table_duplicate_column_names { CREATE TABLE t(a, a); -} \ No newline at end of file +} + +do_execsql_test_in_memory_any_error create_table_duplicate_column_names_case_insensitive { + CREATE TABLE t(A, a); +} + +do_execsql_test_in_memory_any_error create_table_duplicate_column_names_quoted { + CREATE TABLE t("a", a); +} From f54b1132ca6535a40e8a8e184608ac1f91026997 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 9 Oct 2025 01:16:10 -0300 Subject: [PATCH 096/428] ignore `Property::AllTableHaveExpectedContent` when counting stats, so we can generate more interesting interactions --- simulator/generation/plan.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index a468e87ec..c8925cd59 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -201,6 +201,11 @@ impl InteractionPlan { for interactions in &self.plan { match &interactions.interactions { InteractionsType::Property(property) => { + if matches!(property, Property::AllTableHaveExpectedContent { .. }) { + // Skip Property::AllTableHaveExpectedContent when counting stats + // this allows us to generate more relevant interactions as we count less Select's to the Stats + continue; + } for interaction in &property.interactions(interactions.connection_index) { if let InteractionType::Query(query) = &interaction.interaction { query_stat(query, &mut stats); From 414f92d0a09e5d901ba8a2823defd68c28c02076 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Thu, 9 Oct 2025 13:31:47 +0530 Subject: [PATCH 097/428] go back to for loop cleanup clippy --- core/translate/schema.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/core/translate/schema.rs b/core/translate/schema.rs index ce8f40f7f..7191de03e 100644 --- a/core/translate/schema.rs +++ b/core/translate/schema.rs @@ -41,15 +41,17 @@ pub fn translate_create_table( } if let ast::CreateTableBody::ColumnsAndConstraints { columns, .. } = &body { - let mut indexes = (0..columns.len()).collect::>(); + for i in 0..columns.len() { + let col_i = &columns[i]; - indexes.sort_unstable_by_key(|&i| normalize_ident(columns[i].col_name.as_str())); - - for w in indexes.windows(2) { - let name1 = normalize_ident(columns[w[0]].col_name.as_str()); - let name2 = normalize_ident(columns[w[1]].col_name.as_str()); - if name1 == name2 { - bail_parse_error!("duplicate column name: {}", columns[w[0]].col_name.as_str()); + for j in &columns[(i + 1)..] { + if col_i + .col_name + .as_str() + .eq_ignore_ascii_case(j.col_name.as_str()) + { + bail_parse_error!("duplicate column name: {}", j.col_name.as_str()); + } } } } From 4313f57ecb38841b7d5dc64e2b0c3646bb466ba2 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Sun, 31 Aug 2025 18:06:08 +0400 Subject: [PATCH 098/428] Optimize range scans --- core/translate/main_loop.rs | 110 ++-- core/translate/optimizer/access_method.rs | 34 +- core/translate/optimizer/constraints.rs | 204 ++++-- core/translate/optimizer/cost.rs | 20 +- core/translate/optimizer/join.rs | 53 +- core/translate/optimizer/mod.rs | 766 +++++++++------------- core/translate/plan.rs | 105 ++- testing/select.test | 170 +++++ 8 files changed, 799 insertions(+), 663 deletions(-) diff --git a/core/translate/main_loop.rs b/core/translate/main_loop.rs index 0b11d0c3e..03e00f01d 100644 --- a/core/translate/main_loop.rs +++ b/core/translate/main_loop.rs @@ -23,7 +23,7 @@ use crate::{ schema::{Affinity, Index, IndexColumn, Table}, translate::{ emitter::prepare_cdc_if_necessary, - plan::{DistinctCtx, Distinctness, Scan}, + plan::{DistinctCtx, Distinctness, Scan, SeekKeyComponent}, result_row::emit_select_result, }, types::SeekOp, @@ -606,7 +606,10 @@ pub fn open_loop( ); }; - let start_reg = program.alloc_registers(seek_def.key.len()); + let max_registers = seek_def + .size(&seek_def.start) + .max(seek_def.size(&seek_def.end)); + let start_reg = program.alloc_registers(max_registers); emit_seek( program, table_references, @@ -1146,7 +1149,8 @@ fn emit_seek( seek_index: Option<&Arc>, ) -> Result<()> { let is_index = seek_index.is_some(); - let Some(seek) = seek_def.seek.as_ref() else { + if seek_def.prefix.is_empty() && matches!(seek_def.start.last_component, SeekKeyComponent::None) + { // If there is no seek key, we start from the first or last row of the index, // depending on the iteration direction. // @@ -1196,43 +1200,34 @@ fn emit_seek( }; // We allocated registers for the full index key, but our seek key might not use the full index key. // See [crate::translate::optimizer::build_seek_def] for more details about in which cases we do and don't use the full index key. - for i in 0..seek_def.key.len() { + for (i, key) in seek_def.iter(&seek_def.start).enumerate() { let reg = start_reg + i; - if i >= seek.len { - if seek.null_pad { - program.emit_insn(Insn::Null { - dest: reg, - dest_end: None, - }); - } - } else { - let expr = &seek_def.key[i].0; - translate_expr_no_constant_opt( - program, - Some(tables), - expr, - reg, - &t_ctx.resolver, - NoConstantOptReason::RegisterReuse, - )?; - // If the seek key column is not verifiably non-NULL, we need check whether it is NULL, - // and if so, jump to the loop end. - // This is to avoid returning rows for e.g. SELECT * FROM t WHERE t.x > NULL, - // which would erroneously return all rows from t, as NULL is lower than any non-NULL value in index key comparisons. - if !expr.is_nonnull(tables) { - program.emit_insn(Insn::IsNull { + match key { + SeekKeyComponent::Expr(expr) => { + translate_expr_no_constant_opt( + program, + Some(tables), + expr, reg, - target_pc: loop_end, - }); + &t_ctx.resolver, + NoConstantOptReason::RegisterReuse, + )?; + // If the seek key column is not verifiably non-NULL, we need check whether it is NULL, + // and if so, jump to the loop end. + // This is to avoid returning rows for e.g. SELECT * FROM t WHERE t.x > NULL, + // which would erroneously return all rows from t, as NULL is lower than any non-NULL value in index key comparisons. + if !expr.is_nonnull(tables) { + program.emit_insn(Insn::IsNull { + reg, + target_pc: loop_end, + }); + } } + SeekKeyComponent::None => unreachable!("None component is not possible in iterator"), } } - let num_regs = if seek.null_pad { - seek_def.key.len() - } else { - seek.len - }; - match seek.op { + let num_regs = seek_def.size(&seek_def.start); + match seek_def.start.op { SeekOp::GE { eq_only } => program.emit_insn(Insn::SeekGE { is_index, cursor_id: seek_cursor_id, @@ -1289,7 +1284,7 @@ fn emit_seek_termination( seek_index: Option<&Arc>, ) -> Result<()> { let is_index = seek_index.is_some(); - let Some(termination) = seek_def.termination.as_ref() else { + if seek_def.prefix.is_empty() && matches!(seek_def.end.last_component, SeekKeyComponent::None) { program.preassign_label_to_next_insn(loop_start); // If we will encounter NULLs in the index at the end of iteration (Forward + Desc OR Backward + Asc) // then, we must explicitly stop before them as seek always has some bound condition over indexed column (e.g. c < ?, c >= ?, ...) @@ -1320,46 +1315,23 @@ fn emit_seek_termination( return Ok(()); }; - // How many non-NULL values were used for seeking. - let seek_len = seek_def.seek.as_ref().map_or(0, |seek| seek.len); + // For all index key values apart from the last one, we are guaranteed to use the same values + // as these values were emited from common prefix, so we don't need to emit them again. - // How many values will be used for the termination condition. - let num_regs = if termination.null_pad { - seek_def.key.len() - } else { - termination.len - }; - for i in 0..seek_def.key.len() { - let reg = start_reg + i; - let is_last = i == seek_def.key.len() - 1; - - // For all index key values apart from the last one, we are guaranteed to use the same values - // as were used for the seek, so we don't need to emit them again. - if i < seek_len && !is_last { - continue; - } - // For the last index key value, we need to emit a NULL if the termination condition is NULL-padded. - // See [SeekKey::null_pad] and [crate::translate::optimizer::build_seek_def] for why this is the case. - if i >= termination.len && !termination.null_pad { - continue; - } - if is_last && termination.null_pad { - program.emit_insn(Insn::Null { - dest: reg, - dest_end: None, - }); - // if the seek key is shorter than the termination key, we need to translate the remaining suffix of the termination key. - // if not, we just reuse what was emitted for the seek. - } else if seek_len < termination.len { + let num_regs = seek_def.size(&seek_def.end); + let last_reg = start_reg + seek_def.prefix.len(); + match &seek_def.end.last_component { + SeekKeyComponent::Expr(expr) => { translate_expr_no_constant_opt( program, Some(tables), - &seek_def.key[i].0, - reg, + expr, + last_reg, &t_ctx.resolver, NoConstantOptReason::RegisterReuse, )?; } + SeekKeyComponent::None => {} } program.preassign_label_to_next_insn(loop_start); let mut rowid_reg = None; @@ -1385,7 +1357,7 @@ fn emit_seek_termination( Some(Affinity::Numeric) }; } - match (is_index, termination.op) { + match (is_index, seek_def.end.op) { (true, SeekOp::GE { .. }) => program.emit_insn(Insn::IdxGE { cursor_id: seek_cursor_id, start_reg, diff --git a/core/translate/optimizer/access_method.rs b/core/translate/optimizer/access_method.rs index 35e5e2718..883ae789c 100644 --- a/core/translate/optimizer/access_method.rs +++ b/core/translate/optimizer/access_method.rs @@ -3,7 +3,9 @@ use std::sync::Arc; use turso_ext::{ConstraintInfo, ConstraintUsage, ResultCode}; use turso_parser::ast::SortOrder; -use crate::translate::optimizer::constraints::{convert_to_vtab_constraint, Constraint}; +use crate::translate::optimizer::constraints::{ + convert_to_vtab_constraint, Constraint, RangeConstraintRef, +}; use crate::{ schema::{Index, Table}, translate::plan::{IterationDirection, JoinOrderMember, JoinedTable}, @@ -12,24 +14,24 @@ use crate::{ }; use super::{ - constraints::{usable_constraints_for_join_order, ConstraintRef, TableConstraints}, + constraints::{usable_constraints_for_join_order, TableConstraints}, cost::{estimate_cost_for_scan_or_seek, Cost, IndexInfo}, order::OrderTarget, }; #[derive(Debug, Clone)] /// Represents a way to access a table. -pub struct AccessMethod<'a> { +pub struct AccessMethod { /// The estimated number of page fetches. /// We are ignoring CPU cost for now. pub cost: Cost, /// Table-type specific access method details. - pub params: AccessMethodParams<'a>, + pub params: AccessMethodParams, } /// Table‑specific details of how an [`AccessMethod`] operates. #[derive(Debug, Clone)] -pub enum AccessMethodParams<'a> { +pub enum AccessMethodParams { BTreeTable { /// The direction of iteration for the access method. /// Typically this is backwards only if it helps satisfy an [OrderTarget]. @@ -39,7 +41,7 @@ pub enum AccessMethodParams<'a> { /// The constraint references that are being used, if any. /// An empty list of constraint refs means a scan (full table or index); /// a non-empty list means a search. - constraint_refs: &'a [ConstraintRef], + constraint_refs: Vec, }, VirtualTable { /// Index identifier returned by the table's `best_index` method. @@ -57,13 +59,13 @@ pub enum AccessMethodParams<'a> { } /// Return the best [AccessMethod] for a given join order. -pub fn find_best_access_method_for_join_order<'a>( +pub fn find_best_access_method_for_join_order( rhs_table: &JoinedTable, - rhs_constraints: &'a TableConstraints, + rhs_constraints: &TableConstraints, join_order: &[JoinOrderMember], maybe_order_target: Option<&OrderTarget>, input_cardinality: f64, -) -> Result>> { +) -> Result> { match &rhs_table.table { Table::BTree(_) => find_best_access_method_for_btree( rhs_table, @@ -85,19 +87,19 @@ pub fn find_best_access_method_for_join_order<'a>( } } -fn find_best_access_method_for_btree<'a>( +fn find_best_access_method_for_btree( rhs_table: &JoinedTable, - rhs_constraints: &'a TableConstraints, + rhs_constraints: &TableConstraints, join_order: &[JoinOrderMember], maybe_order_target: Option<&OrderTarget>, input_cardinality: f64, -) -> Result>> { +) -> Result> { let table_no = join_order.last().unwrap().table_id; let mut best_cost = estimate_cost_for_scan_or_seek(None, &[], &[], input_cardinality); let mut best_params = AccessMethodParams::BTreeTable { iter_dir: IterationDirection::Forwards, index: None, - constraint_refs: &[], + constraint_refs: vec![], }; let rowid_column_idx = rhs_table.columns().iter().position(|c| c.is_rowid_alias); @@ -123,7 +125,7 @@ fn find_best_access_method_for_btree<'a>( let cost = estimate_cost_for_scan_or_seek( Some(index_info), &rhs_constraints.constraints, - usable_constraint_refs, + &usable_constraint_refs, input_cardinality, ); @@ -192,12 +194,12 @@ fn find_best_access_method_for_btree<'a>( })) } -fn find_best_access_method_for_vtab<'a>( +fn find_best_access_method_for_vtab( vtab: &VirtualTable, constraints: &[Constraint], join_order: &[JoinOrderMember], input_cardinality: f64, -) -> Result>> { +) -> Result> { let vtab_constraints = convert_to_vtab_constraint(constraints, join_order); // TODO: get proper order_by information to pass to the vtab. diff --git a/core/translate/optimizer/constraints.rs b/core/translate/optimizer/constraints.rs index 62d77f3c9..7e32f17c5 100644 --- a/core/translate/optimizer/constraints.rs +++ b/core/translate/optimizer/constraints.rs @@ -67,17 +67,17 @@ pub enum BinaryExprSide { } impl Constraint { - /// Get the constraining expression, e.g. '2+3' from 't.x = 2+3' - pub fn get_constraining_expr(&self, where_clause: &[WhereTerm]) -> ast::Expr { + /// Get the constraining expression and operator, e.g. ('>=', '2+3') from 't.x >= 2+3' + pub fn get_constraining_expr(&self, where_clause: &[WhereTerm]) -> (ast::Operator, ast::Expr) { let (idx, side) = self.where_clause_pos; let where_term = &where_clause[idx]; let Ok(Some((lhs, _, rhs))) = as_binary_components(&where_term.expr) else { panic!("Expected a valid binary expression"); }; if side == BinaryExprSide::Lhs { - lhs.clone() + (self.operator, lhs.clone()) } else { - rhs.clone() + (self.operator, rhs.clone()) } } @@ -108,19 +108,6 @@ pub struct ConstraintRef { pub sort_order: SortOrder, } -impl ConstraintRef { - /// Convert the constraint to a column usable in a [crate::translate::plan::SeekDef::key]. - pub fn as_seek_key_column( - &self, - constraints: &[Constraint], - where_clause: &[WhereTerm], - ) -> (ast::Expr, SortOrder) { - let constraint = &constraints[self.constraint_vec_pos]; - let constraining_expr = constraint.get_constraining_expr(where_clause); - (constraining_expr, self.sort_order) - } -} - /// A collection of [ConstraintRef]s for a given index, or if index is None, for the table's rowid index. /// For example, given a table `T (x,y,z)` with an index `T_I (y desc,z)`, take the following query: /// ```sql @@ -150,6 +137,7 @@ pub struct ConstraintUseCandidate { /// The index that may be used to satisfy the constraints. If none, the table's rowid index is used. pub index: Option>, /// References to the constraints that may be used as an access path for the index. + /// Refs are sorted by [ConstraintRef::index_col_pos] pub refs: Vec, } @@ -193,6 +181,9 @@ fn estimate_selectivity(column: &Column, op: ast::Operator) -> f64 { /// Precompute all potentially usable [Constraints] from a WHERE clause. /// The resulting list of [TableConstraints] is then used to evaluate the best access methods for various join orders. +/// +/// This method do not perform much filtering of constraints and delegate this tasks to the consumers of the method +/// Consumers must inspect [TableConstraints] and its candidates and pick best constraints for optimized access pub fn constraints_from_where_clause( where_clause: &[WhereTerm], table_references: &TableReferences, @@ -379,24 +370,6 @@ pub fn constraints_from_where_clause( for candidate in cs.candidates.iter_mut() { // Sort by index_col_pos, ascending -- index columns must be consumed in contiguous order. candidate.refs.sort_by_key(|cref| cref.index_col_pos); - // Deduplicate by position, keeping first occurrence (which will be equality if one exists, since the constraints vec is sorted that way) - candidate.refs.dedup_by_key(|cref| cref.index_col_pos); - // Truncate at first gap in positions -- again, index columns must be consumed in contiguous order. - let contiguous_len = candidate - .refs - .iter() - .enumerate() - .take_while(|(i, cref)| cref.index_col_pos == *i) - .count(); - candidate.refs.truncate(contiguous_len); - - // Truncate after the first inequality, since the left-prefix rule of indexes requires that all constraints but the last one must be equalities; - // again see: https://www.solarwinds.com/blog/the-left-prefix-index-rule - if let Some(first_inequality) = candidate.refs.iter().position(|cref| { - cs.constraints[cref.constraint_vec_pos].operator != ast::Operator::Equals - }) { - candidate.refs.truncate(first_inequality + 1); - } } cs.candidates.retain(|c| { if let Some(idx) = &c.index { @@ -413,6 +386,87 @@ pub fn constraints_from_where_clause( Ok(constraints) } +#[derive(Clone, Debug)] +/// A reference to a [Constraint]s in a [TableConstraints] for single column. +/// +/// This is specialized version of [ConstraintRef] which specifically holds range-like constraints: +/// - x = 10 (eq is set) +/// - x >= 10, x > 10 (lower_bound is set) +/// - x <= 10, x < 10 (upper_bound is set) +/// - x > 10 AND x < 20 (both lower_bound and upper_bound are set) +/// +/// eq, lower_bound and upper_bound holds None or position of the constraint in the [Constraint] array +pub struct RangeConstraintRef { + /// position of the column in the table definition + pub table_col_pos: usize, + /// position of the column in the index definition + pub index_col_pos: usize, + /// sort order for the column in the index definition + pub sort_order: SortOrder, + /// equality constraint + pub eq: Option, + /// lower bound constraint (either > or >=) + pub lower_bound: Option, + /// upper bound constraint (either < or <=) + pub upper_bound: Option, +} + +#[derive(Debug, Clone)] +/// Represent seek range which can be used in query planning to emit range scan over table or index +pub struct SeekRangeConstraint { + pub sort_order: SortOrder, + pub eq: Option<(ast::Operator, ast::Expr)>, + pub lower_bound: Option<(ast::Operator, ast::Expr)>, + pub upper_bound: Option<(ast::Operator, ast::Expr)>, +} + +impl SeekRangeConstraint { + pub fn new_eq(sort_order: SortOrder, eq: (ast::Operator, ast::Expr)) -> Self { + Self { + sort_order, + eq: Some(eq), + lower_bound: None, + upper_bound: None, + } + } + pub fn new_range( + sort_order: SortOrder, + lower_bound: Option<(ast::Operator, ast::Expr)>, + upper_bound: Option<(ast::Operator, ast::Expr)>, + ) -> Self { + assert!(lower_bound.is_some() || upper_bound.is_some()); + Self { + sort_order, + eq: None, + lower_bound, + upper_bound, + } + } +} + +impl RangeConstraintRef { + /// Convert the [RangeConstraintRef] to a [SeekRangeConstraint] usable in a [crate::translate::plan::SeekDef::key]. + pub fn as_seek_range_constraint( + &self, + constraints: &[Constraint], + where_clause: &[WhereTerm], + ) -> SeekRangeConstraint { + if let Some(eq) = self.eq { + return SeekRangeConstraint::new_eq( + self.sort_order, + constraints[eq].get_constraining_expr(where_clause), + ); + } + SeekRangeConstraint::new_range( + self.sort_order, + self.lower_bound + .map(|x| constraints[x].get_constraining_expr(where_clause)), + self.upper_bound + .map(|x| constraints[x].get_constraining_expr(where_clause)), + ) + } +} + /// Find which [Constraint]s are usable for a given join order. /// Returns a slice of the references to the constraints that are usable. /// A constraint is considered usable for a given table if all of the other tables referenced by the constraint @@ -421,28 +475,88 @@ pub fn usable_constraints_for_join_order<'a>( constraints: &'a [Constraint], refs: &'a [ConstraintRef], join_order: &[JoinOrderMember], -) -> &'a [ConstraintRef] { +) -> Vec { + debug_assert!(refs.is_sorted_by_key(|x| x.index_col_pos)); + let table_idx = join_order.last().unwrap().original_idx; - let mut usable_until = 0; + let lhs_mask = TableMask::from_table_number_iter( + join_order + .iter() + .take(join_order.len() - 1) + .map(|j| j.original_idx), + ); + let mut usable: Vec = Vec::new(); + let mut last_column_pos = 0; for cref in refs.iter() { let constraint = &constraints[cref.constraint_vec_pos]; let other_side_refers_to_self = constraint.lhs_mask.contains_table(table_idx); if other_side_refers_to_self { break; } - let lhs_mask = TableMask::from_table_number_iter( - join_order - .iter() - .take(join_order.len() - 1) - .map(|j| j.original_idx), - ); let all_required_tables_are_on_left_side = lhs_mask.contains_all(&constraint.lhs_mask); if !all_required_tables_are_on_left_side { break; } - usable_until += 1; + if Some(cref.index_col_pos) == usable.last().map(|x| x.index_col_pos) { + assert_eq!(cref.sort_order, usable.last().unwrap().sort_order); + assert_eq!(cref.index_col_pos, usable.last().unwrap().index_col_pos); + assert_eq!( + constraints[cref.constraint_vec_pos].table_col_pos, + usable.last().unwrap().table_col_pos + ); + // if we already have eq constraint - we must not add anything to it + // otherwise, we can incorrectly consume filters which will not be used in the access path + if usable.last().unwrap().eq.is_some() { + continue; + } + match constraints[cref.constraint_vec_pos].operator { + ast::Operator::Greater | ast::Operator::GreaterEquals => { + usable.last_mut().unwrap().lower_bound = Some(cref.constraint_vec_pos); + } + ast::Operator::Less | ast::Operator::LessEquals => { + usable.last_mut().unwrap().upper_bound = Some(cref.constraint_vec_pos); + } + _ => {} + } + continue; + } + if cref.index_col_pos != last_column_pos { + break; + } + if usable.last().is_some_and(|x| x.eq.is_none()) { + break; + } + let constraint_group = match constraints[cref.constraint_vec_pos].operator { + ast::Operator::Equals => RangeConstraintRef { + table_col_pos: constraints[cref.constraint_vec_pos].table_col_pos, + index_col_pos: cref.index_col_pos, + sort_order: cref.sort_order, + eq: Some(cref.constraint_vec_pos), + lower_bound: None, + upper_bound: None, + }, + ast::Operator::Greater | ast::Operator::GreaterEquals => RangeConstraintRef { + table_col_pos: constraints[cref.constraint_vec_pos].table_col_pos, + index_col_pos: cref.index_col_pos, + sort_order: cref.sort_order, + eq: None, + lower_bound: Some(cref.constraint_vec_pos), + upper_bound: None, + }, + ast::Operator::Less | ast::Operator::LessEquals => RangeConstraintRef { + table_col_pos: constraints[cref.constraint_vec_pos].table_col_pos, + index_col_pos: cref.index_col_pos, + sort_order: cref.sort_order, + eq: None, + lower_bound: None, + upper_bound: Some(cref.constraint_vec_pos), + }, + _ => continue, + }; + usable.push(constraint_group); + last_column_pos += 1; } - &refs[..usable_until] + usable } fn can_use_partial_index(index: &Index, query_where_clause: &[WhereTerm]) -> bool { diff --git a/core/translate/optimizer/cost.rs b/core/translate/optimizer/cost.rs index 460fa9b0a..c96947e5d 100644 --- a/core/translate/optimizer/cost.rs +++ b/core/translate/optimizer/cost.rs @@ -1,4 +1,6 @@ -use super::constraints::{Constraint, ConstraintRef}; +use crate::translate::optimizer::constraints::RangeConstraintRef; + +use super::constraints::Constraint; /// A simple newtype wrapper over a f64 that represents the cost of an operation. /// @@ -43,7 +45,7 @@ pub fn estimate_page_io_cost(rowcount: f64) -> Cost { pub fn estimate_cost_for_scan_or_seek( index_info: Option, constraints: &[Constraint], - usable_constraint_refs: &[ConstraintRef], + usable_constraint_refs: &[RangeConstraintRef], input_cardinality: f64, ) -> Cost { let Some(index_info) = index_info else { @@ -55,8 +57,18 @@ pub fn estimate_cost_for_scan_or_seek( let selectivity_multiplier: f64 = usable_constraint_refs .iter() .map(|cref| { - let constraint = &constraints[cref.constraint_vec_pos]; - constraint.selectivity + if let Some(eq) = cref.eq { + let constraint = &constraints[eq]; + return constraint.selectivity; + } + let mut selectivity = 1.0; + if let Some(lower_bound) = cref.lower_bound { + selectivity *= constraints[lower_bound].selectivity; + } + if let Some(upper_bound) = cref.upper_bound { + selectivity *= constraints[upper_bound].selectivity; + } + selectivity }) .product(); diff --git a/core/translate/optimizer/join.rs b/core/translate/optimizer/join.rs index fe1a41bbb..79b80174a 100644 --- a/core/translate/optimizer/join.rs +++ b/core/translate/optimizer/join.rs @@ -49,7 +49,7 @@ pub fn join_lhs_and_rhs<'a>( rhs_constraints: &'a TableConstraints, join_order: &[JoinOrderMember], maybe_order_target: Option<&OrderTarget>, - access_methods_arena: &'a RefCell>>, + access_methods_arena: &'a RefCell>, cost_upper_bound: Cost, ) -> Result> { // The input cardinality for this join is the output cardinality of the previous join. @@ -125,7 +125,7 @@ pub fn compute_best_join_order<'a>( joined_tables: &[JoinedTable], maybe_order_target: Option<&OrderTarget>, constraints: &'a [TableConstraints], - access_methods_arena: &'a RefCell>>, + access_methods_arena: &'a RefCell>, ) -> Result> { // Skip work if we have no tables to consider. if joined_tables.is_empty() { @@ -403,7 +403,7 @@ pub fn compute_best_join_order<'a>( pub fn compute_naive_left_deep_plan<'a>( joined_tables: &[JoinedTable], maybe_order_target: Option<&OrderTarget>, - access_methods_arena: &'a RefCell>>, + access_methods_arena: &'a RefCell>, constraints: &'a [TableConstraints], ) -> Result> { let n = joined_tables.len(); @@ -509,9 +509,9 @@ mod tests { use crate::{ schema::{BTreeTable, Column, Index, IndexColumn, Table, Type}, translate::{ - optimizer::access_method::AccessMethodParams, - optimizer::constraints::{ - constraints_from_where_clause, BinaryExprSide, ConstraintRef, + optimizer::{ + access_method::AccessMethodParams, + constraints::{constraints_from_where_clause, BinaryExprSide, RangeConstraintRef}, }, plan::{ ColumnUsedMask, IterationDirection, JoinInfo, Operation, TableReferences, WhereTerm, @@ -632,8 +632,7 @@ mod tests { assert!(iter_dir == IterationDirection::Forwards); assert!(constraint_refs.len() == 1); assert!( - table_constraints[0].constraints[constraint_refs[0].constraint_vec_pos] - .where_clause_pos + table_constraints[0].constraints[constraint_refs[0].eq.unwrap()].where_clause_pos == (0, BinaryExprSide::Rhs) ); } @@ -701,8 +700,7 @@ mod tests { assert!(index.as_ref().unwrap().name == "sqlite_autoindex_test_table_1"); assert!(constraint_refs.len() == 1); assert!( - table_constraints[0].constraints[constraint_refs[0].constraint_vec_pos] - .where_clause_pos + table_constraints[0].constraints[constraint_refs[0].eq.unwrap()].where_clause_pos == (0, BinaryExprSide::Rhs) ); } @@ -784,8 +782,7 @@ mod tests { assert!(index.as_ref().unwrap().name == "index1"); assert!(constraint_refs.len() == 1); assert!( - table_constraints[TABLE1].constraints[constraint_refs[0].constraint_vec_pos] - .where_clause_pos + table_constraints[TABLE1].constraints[constraint_refs[0].eq.unwrap()].where_clause_pos == (0, BinaryExprSide::Rhs) ); } @@ -960,8 +957,8 @@ mod tests { assert!(iter_dir == IterationDirection::Forwards); assert!(index.as_ref().unwrap().name == "sqlite_autoindex_customers_1"); assert!(constraint_refs.len() == 1); - let constraint = &table_constraints[TABLE_NO_CUSTOMERS].constraints - [constraint_refs[0].constraint_vec_pos]; + let constraint = + &table_constraints[TABLE_NO_CUSTOMERS].constraints[constraint_refs[0].eq.unwrap()]; assert!(constraint.lhs_mask.is_empty()); let access_method = &access_methods_arena.borrow()[best_plan.data[1].1]; @@ -970,7 +967,7 @@ mod tests { assert!(index.as_ref().unwrap().name == "orders_customer_id_idx"); assert!(constraint_refs.len() == 1); let constraint = - &table_constraints[TABLE_NO_ORDERS].constraints[constraint_refs[0].constraint_vec_pos]; + &table_constraints[TABLE_NO_ORDERS].constraints[constraint_refs[0].eq.unwrap()]; assert!(constraint.lhs_mask.contains_table(TABLE_NO_CUSTOMERS)); let access_method = &access_methods_arena.borrow()[best_plan.data[2].1]; @@ -978,8 +975,8 @@ mod tests { assert!(iter_dir == IterationDirection::Forwards); assert!(index.as_ref().unwrap().name == "order_items_order_id_idx"); assert!(constraint_refs.len() == 1); - let constraint = &table_constraints[TABLE_NO_ORDER_ITEMS].constraints - [constraint_refs[0].constraint_vec_pos]; + let constraint = + &table_constraints[TABLE_NO_ORDER_ITEMS].constraints[constraint_refs[0].eq.unwrap()]; assert!(constraint.lhs_mask.contains_table(TABLE_NO_ORDERS)); } @@ -1187,8 +1184,8 @@ mod tests { assert!(iter_dir == IterationDirection::Forwards); assert!(index.is_none()); assert!(constraint_refs.len() == 1); - let constraint = &table_constraints[*table_number].constraints - [constraint_refs[0].constraint_vec_pos]; + let constraint = + &table_constraints[*table_number].constraints[constraint_refs[0].eq.unwrap()]; assert!(constraint.lhs_mask.contains_table(FACT_TABLE_IDX)); assert!(constraint.operator == ast::Operator::Equals); } @@ -1280,7 +1277,7 @@ mod tests { assert!(iter_dir == IterationDirection::Forwards); assert!(index.is_none()); assert!(constraint_refs.len() == 1); - let constraint = &table_constraints.constraints[constraint_refs[0].constraint_vec_pos]; + let constraint = &table_constraints.constraints[constraint_refs[0].eq.unwrap()]; assert!(constraint.lhs_mask.contains_table(i - 1)); assert!(constraint.operator == ast::Operator::Equals); } @@ -1481,7 +1478,7 @@ mod tests { let (_, index, constraint_refs) = _as_btree(access_method); assert!(index.as_ref().is_some_and(|i| i.name == "idx1")); assert!(constraint_refs.len() == 1); - let constraint = &table_constraints[0].constraints[constraint_refs[0].constraint_vec_pos]; + let constraint = &table_constraints[0].constraints[constraint_refs[0].eq.unwrap()]; assert!(constraint.operator == ast::Operator::Equals); assert!(constraint.table_col_pos == 0); // c1 } @@ -1608,10 +1605,10 @@ mod tests { let (_, index, constraint_refs) = _as_btree(access_method); assert!(index.as_ref().is_some_and(|i| i.name == "idx1")); assert!(constraint_refs.len() == 2); - let constraint = &table_constraints[0].constraints[constraint_refs[0].constraint_vec_pos]; + let constraint = &table_constraints[0].constraints[constraint_refs[0].eq.unwrap()]; assert!(constraint.operator == ast::Operator::Equals); assert!(constraint.table_col_pos == 0); // c1 - let constraint = &table_constraints[0].constraints[constraint_refs[1].constraint_vec_pos]; + let constraint = &table_constraints[0].constraints[constraint_refs[1].lower_bound.unwrap()]; assert!(constraint.operator == ast::Operator::Greater); assert!(constraint.table_col_pos == 1); // c2 } @@ -1711,9 +1708,13 @@ mod tests { Expr::Literal(ast::Literal::Numeric(value.to_string())) } - fn _as_btree<'a>( - access_method: &AccessMethod<'a>, - ) -> (IterationDirection, Option>, &'a [ConstraintRef]) { + fn _as_btree( + access_method: &AccessMethod, + ) -> ( + IterationDirection, + Option>, + &'_ [RangeConstraintRef], + ) { match &access_method.params { AccessMethodParams::BTreeTable { iter_dir, diff --git a/core/translate/optimizer/mod.rs b/core/translate/optimizer/mod.rs index bd4ecdd2d..f053e7e7e 100644 --- a/core/translate/optimizer/mod.rs +++ b/core/translate/optimizer/mod.rs @@ -18,8 +18,11 @@ use turso_parser::ast::{self, Expr, SortOrder}; use crate::{ schema::{Index, IndexColumn, Schema, Table}, translate::{ - optimizer::access_method::AccessMethodParams, optimizer::constraints::TableConstraints, - plan::Scan, plan::TerminationKey, + optimizer::{ + access_method::AccessMethodParams, + constraints::{RangeConstraintRef, SeekRangeConstraint, TableConstraints}, + }, + plan::{Scan, SeekKeyComponent}, }, types::SeekOp, LimboError, Result, @@ -343,13 +346,15 @@ fn optimize_table_access( .filter(|c| c.usable) .cloned() .collect::>(); - let temp_constraint_refs = (0..usable_constraints.len()) + let mut temp_constraint_refs = (0..usable_constraints.len()) .map(|i| ConstraintRef { constraint_vec_pos: i, - index_col_pos: usable_constraints[i].table_col_pos, + index_col_pos: i, sort_order: SortOrder::Asc, }) .collect::>(); + temp_constraint_refs.sort_by_key(|x| x.index_col_pos); + let usable_constraint_refs = usable_constraints_for_join_order( &usable_constraints, &temp_constraint_refs, @@ -362,17 +367,14 @@ fn optimize_table_access( }); continue; } - let ephemeral_index = ephemeral_index_build( - &joined_tables[table_idx], - &usable_constraints, - usable_constraint_refs, - ); + let ephemeral_index = + ephemeral_index_build(&joined_tables[table_idx], &usable_constraint_refs); let ephemeral_index = Arc::new(ephemeral_index); joined_tables[table_idx].op = Operation::Search(Search::Seek { index: Some(ephemeral_index), seek_def: build_seek_def_from_constraints( - &usable_constraints, - usable_constraint_refs, + &table_constraints.constraints, + &usable_constraint_refs, *iter_dir, where_clause, )?, @@ -383,25 +385,29 @@ fn optimize_table_access( .as_ref() .is_some_and(|join_info| join_info.outer); for cref in constraint_refs.iter() { - let constraint = - &constraints_per_table[table_idx].constraints[cref.constraint_vec_pos]; - let where_term = &mut where_clause[constraint.where_clause_pos.0]; - assert!( - !where_term.consumed, - "trying to consume a where clause term twice: {where_term:?}", - ); - if is_outer_join && where_term.from_outer_join.is_none() { - // Don't consume WHERE terms from outer joins if the where term is not part of the outer join condition. Consider: - // - SELECT * FROM t1 LEFT JOIN t2 ON false WHERE t2.id = 5 - // - there is no row in t2 where t2.id = 5 - // This should never produce any rows with null columns for t2 (because NULL != 5), but if we consume 't2.id = 5' to use it as a seek key, - // this will cause a null row to be emitted for EVERY row of t1. - // Note: in most cases like this, the LEFT JOIN could just be converted into an INNER JOIN (because e.g. t2.id=5 statically excludes any null rows), - // but that optimization should not be done here - it should be done before the join order optimization happens. - continue; + for constraint_vec_pos in &[cref.eq, cref.lower_bound, cref.upper_bound] { + let Some(constraint_vec_pos) = constraint_vec_pos else { + continue; + }; + let constraint = + &constraints_per_table[table_idx].constraints[*constraint_vec_pos]; + let where_term = &mut where_clause[constraint.where_clause_pos.0]; + assert!( + !where_term.consumed, + "trying to consume a where clause term twice: {where_term:?}", + ); + if is_outer_join && where_term.from_outer_join.is_none() { + // Don't consume WHERE terms from outer joins if the where term is not part of the outer join condition. Consider: + // - SELECT * FROM t1 LEFT JOIN t2 ON false WHERE t2.id = 5 + // - there is no row in t2 where t2.id = 5 + // This should never produce any rows with null columns for t2 (because NULL != 5), but if we consume 't2.id = 5' to use it as a seek key, + // this will cause a null row to be emitted for EVERY row of t1. + // Note: in most cases like this, the LEFT JOIN could just be converted into an INNER JOIN (because e.g. t2.id=5 statically excludes any null rows), + // but that optimization should not be done here - it should be done before the join order optimization happens. + continue; + } + where_term.consumed = true; } - - where_clause[constraint.where_clause_pos.0].consumed = true; } if let Some(index) = &index { joined_tables[table_idx].op = Operation::Search(Search::Seek { @@ -419,13 +425,14 @@ fn optimize_table_access( constraint_refs.len() == 1, "expected exactly one constraint for rowid seek, got {constraint_refs:?}" ); - let constraint = &constraints_per_table[table_idx].constraints - [constraint_refs[0].constraint_vec_pos]; - joined_tables[table_idx].op = match constraint.operator { - ast::Operator::Equals => Operation::Search(Search::RowidEq { - cmp_expr: constraint.get_constraining_expr(where_clause), - }), - _ => Operation::Search(Search::Seek { + joined_tables[table_idx].op = if let Some(eq) = constraint_refs[0].eq { + Operation::Search(Search::RowidEq { + cmp_expr: constraints_per_table[table_idx].constraints[eq] + .get_constraining_expr(where_clause) + .1, + }) + } else { + Operation::Search(Search::Seek { index: None, seek_def: build_seek_def_from_constraints( &constraints_per_table[table_idx].constraints, @@ -433,7 +440,7 @@ fn optimize_table_access( *iter_dir, where_clause, )?, - }), + }) }; } } @@ -505,7 +512,7 @@ fn build_vtab_scan_op( if usage.omit { where_clause[constraint.where_clause_pos.0].consumed = true; } - let expr = constraint.get_constraining_expr(where_clause); + let (_, expr) = constraint.get_constraining_expr(where_clause); constraints[zero_based_argv_index] = Some(expr); arg_count += 1; } @@ -864,8 +871,7 @@ impl Optimizable for ast::Expr { fn ephemeral_index_build( table_reference: &JoinedTable, - constraints: &[Constraint], - constraint_refs: &[ConstraintRef], + constraint_refs: &[RangeConstraintRef], ) -> Index { let mut ephemeral_columns: Vec = table_reference .columns() @@ -886,11 +892,11 @@ fn ephemeral_index_build( let a_constraint = constraint_refs .iter() .enumerate() - .find(|(_, c)| constraints[c.constraint_vec_pos].table_col_pos == a.pos_in_table); + .find(|(_, c)| c.table_col_pos == a.pos_in_table); let b_constraint = constraint_refs .iter() .enumerate() - .find(|(_, c)| constraints[c.constraint_vec_pos].table_col_pos == b.pos_in_table); + .find(|(_, c)| c.table_col_pos == b.pos_in_table); match (a_constraint, b_constraint) { (Some(_), None) => Ordering::Less, (None, Some(_)) => Ordering::Greater, @@ -922,7 +928,7 @@ fn ephemeral_index_build( /// Build a [SeekDef] for a given list of [Constraint]s pub fn build_seek_def_from_constraints( constraints: &[Constraint], - constraint_refs: &[ConstraintRef], + constraint_refs: &[RangeConstraintRef], iter_dir: IterationDirection, where_clause: &[WhereTerm], ) -> Result { @@ -933,472 +939,294 @@ pub fn build_seek_def_from_constraints( // Extract the key values and operators let key = constraint_refs .iter() - .map(|cref| cref.as_seek_key_column(constraints, where_clause)) + .map(|cref| cref.as_seek_range_constraint(constraints, where_clause)) .collect(); - // We know all but potentially the last term is an equality, so we can use the operator of the last term - // to form the SeekOp - let op = constraints[constraint_refs.last().unwrap().constraint_vec_pos].operator; - - let seek_def = build_seek_def(op, iter_dir, key)?; + let seek_def = build_seek_def(iter_dir, key)?; Ok(seek_def) } -/// Build a [SeekDef] for a given comparison operator and index key. +/// Build a [SeekDef] for a given [SeekRangeConstraint] and [IterationDirection]. /// To be usable as a seek key, all but potentially the last term must be equalities. -/// The last term can be a nonequality. -/// The comparison operator referred to by `op` is the operator of the last term. +/// The last term can be a nonequality (range with potentially one unbounded range). /// /// There are two parts to the seek definition: -/// 1. The [SeekKey], which specifies the key that we will use to seek to the first row that matches the index key. -/// 2. The [TerminationKey], which specifies the key that we will use to terminate the index scan that follows the seek. +/// 1. start [SeekKey], which specifies the key that we will use to seek to the first row that matches the index key. +/// 2. end [SeekKey], which specifies the key that we will use to terminate the index scan that follows the seek. /// -/// There are some nuances to how, and which parts of, the index key can be used in the [SeekKey] and [TerminationKey], +/// There are some nuances to how, and which parts of, the index key can be used in the start and end [SeekKey]s, /// depending on the operator and iteration order. This function explains those nuances inline when dealing with /// each case. /// /// But to illustrate the general idea, consider the following examples: /// /// 1. For example, having two conditions like (x>10 AND y>20) cannot be used as a valid [SeekKey] GT(x:10, y:20) -/// because the first row greater than (x:10, y:20) might be (x:10, y:21), which does not satisfy the where clause. +/// because the first row greater than (x:10, y:20) might be (x:11, y:19), which does not satisfy the where clause. /// In this case, only GT(x:10) must be used as the [SeekKey], and rows with y <= 20 must be filtered as a regular condition expression for each value of x. /// /// 2. In contrast, having (x=10 AND y>20) forms a valid index key GT(x:10, y:20) because after the seek, we can simply terminate as soon as x > 10, -/// i.e. use GT(x:10, y:20) as the [SeekKey] and GT(x:10) as the [TerminationKey]. +/// i.e. use GT(x:10, y:20) as the start [SeekKey] and GT(x:10) as the end. /// /// The preceding examples are for an ascending index. The logic is similar for descending indexes, but an important distinction is that /// since a descending index is laid out in reverse order, the comparison operators are reversed, e.g. LT becomes GT, LE becomes GE, etc. /// So when you see e.g. a SeekOp::GT below for a descending index, it actually means that we are seeking the first row where the index key is LESS than the seek key. /// fn build_seek_def( - op: ast::Operator, iter_dir: IterationDirection, - key: Vec<(ast::Expr, SortOrder)>, + mut key: Vec, ) -> Result { - let key_len = key.len(); - let sort_order_of_last_key = key.last().unwrap().1; + assert!(!key.is_empty()); + let last = key.last().unwrap(); + + // if we searching for exact key - emit definition immediately with prefix as a full key + if last.eq.is_some() { + let (start_op, end_op) = match iter_dir { + IterationDirection::Forwards => (SeekOp::GE { eq_only: true }, SeekOp::GT), + IterationDirection::Backwards => (SeekOp::LE { eq_only: true }, SeekOp::LT), + }; + return Ok(SeekDef { + prefix: key, + iter_dir, + start: SeekKey { + last_component: SeekKeyComponent::None, + op: start_op, + }, + end: SeekKey { + last_component: SeekKeyComponent::None, + op: end_op, + }, + }); + } + assert!(last.lower_bound.is_some() || last.upper_bound.is_some()); + + // pop last key as we will do some form of range search + let last = key.pop().unwrap(); + + // after that all key components must be equality constraints + debug_assert!(key.iter().all(|k| k.eq.is_some())); // For the commented examples below, keep in mind that since a descending index is laid out in reverse order, the comparison operators are reversed, e.g. LT becomes GT, LE becomes GE, etc. // Also keep in mind that index keys are compared based on the number of columns given, so for example: // - if key is GT(x:10), then (x=10, y=usize::MAX) is not GT because only X is compared. (x=11, y=) is GT. // - if key is GT(x:10, y:20), then (x=10, y=21) is GT because both X and Y are compared. // - if key is GT(x:10, y:NULL), then (x=10, y=0) is GT because NULL is always LT in index key comparisons. - Ok(match (iter_dir, op) { - // Forwards, EQ: - // Example: (x=10 AND y=20) - // Seek key: start from the first GE(x:10, y:20) - // Termination key: end at the first GT(x:10, y:20) - // Ascending vs descending doesn't matter because all the comparisons are equalities. - (IterationDirection::Forwards, ast::Operator::Equals) => SeekDef { - key, - iter_dir, - seek: Some(SeekKey { - len: key_len, - null_pad: false, - op: SeekOp::GE { eq_only: true }, - }), - termination: Some(TerminationKey { - len: key_len, - null_pad: false, - op: SeekOp::GT, - }), - }, - // Forwards, GT: - // Ascending index example: (x=10 AND y>20) - // Seek key: start from the first GT(x:10, y:20), e.g. (x=10, y=21) - // Termination key: end at the first GT(x:10), e.g. (x=11, y=0) - // - // Descending index example: (x=10 AND y>20) - // Seek key: start from the first LE(x:10), e.g. (x=10, y=usize::MAX), so reversed -> GE(x:10) - // Termination key: end at the first LE(x:10, y:20), e.g. (x=10, y=20) so reversed -> GE(x:10, y:20) - (IterationDirection::Forwards, ast::Operator::Greater) => { - let (seek_key_len, termination_key_len, seek_op, termination_op) = - if sort_order_of_last_key == SortOrder::Asc { - (key_len, key_len - 1, SeekOp::GT, SeekOp::GT) - } else { - ( - key_len - 1, - key_len, - SeekOp::LE { eq_only: false }.reverse(), - SeekOp::LE { eq_only: false }.reverse(), - ) - }; + Ok(match iter_dir { + IterationDirection::Forwards => { + let (start, end) = match last.sort_order { + SortOrder::Asc => { + let start = match last.lower_bound { + // Forwards, Asc, GT: (x=10 AND y>20) + // Start key: start from the first GT(x:10, y:20) + Some((ast::Operator::Greater, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::GT, + }, + // Forwards, Asc, GE: (x=10 AND y>=20) + // Start key: start from the first GE(x:10, y:20) + Some((ast::Operator::GreaterEquals, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::GE { eq_only: false }, + }, + // Forwards, Asc, None, (x=10 AND y<30) + // Start key: start from the first GE(x:10) + None => SeekKey { + last_component: SeekKeyComponent::None, + op: SeekOp::GE { eq_only: false }, + }, + Some((op, _)) => { + crate::bail_parse_error!("build_seek_def: invalid operator: {:?}", op,) + } + }; + let end = match last.upper_bound { + // Forwards, Asc, LT, (x=10 AND y<30) + // End key: end at first GE(x:10, y:30) + Some((ast::Operator::Less, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::GE { eq_only: false }, + }, + // Forwards, Asc, LE, (x=10 AND y<=30) + // End key: end at first GT(x:10, y:30) + Some((ast::Operator::LessEquals, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::GT, + }, + // Forwards, Asc, None, (x=10 AND y>20) + // End key: end at first GT(x:10) + None => SeekKey { + last_component: SeekKeyComponent::None, + op: SeekOp::GT, + }, + Some((op, _)) => { + crate::bail_parse_error!("build_seek_def: invalid operator: {:?}", op,) + } + }; + (start, end) + } + SortOrder::Desc => { + let start = match last.upper_bound { + // Forwards, Desc, LT: (x=10 AND y<30) + // Start key: start from the first GT(x:10, y:30) + Some((ast::Operator::Less, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::GT, + }, + // Forwards, Desc, LE: (x=10 AND y<=30) + // Start key: start from the first GE(x:10, y:30) + Some((ast::Operator::LessEquals, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::GE { eq_only: false }, + }, + // Forwards, Desc, None: (x=10 AND y>20) + // Start key: start from the first GE(x:10) + None => SeekKey { + last_component: SeekKeyComponent::None, + op: SeekOp::GE { eq_only: false }, + }, + Some((op, _)) => { + crate::bail_parse_error!("build_seek_def: invalid operator: {:?}", op,) + } + }; + let end = match last.lower_bound { + // Forwards, Asc, GT, (x=10 AND y>20) + // End key: end at first GE(x:10, y:20) + Some((ast::Operator::Greater, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::GE { eq_only: false }, + }, + // Forwards, Asc, GE, (x=10 AND y>=20) + // End key: end at first GT(x:10, y:20) + Some((ast::Operator::GreaterEquals, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::GT, + }, + // Forwards, Asc, None, (x=10 AND y<30) + // End key: end at first GT(x:10) + None => SeekKey { + last_component: SeekKeyComponent::None, + op: SeekOp::GT, + }, + Some((op, _)) => { + crate::bail_parse_error!("build_seek_def: invalid operator: {:?}", op,) + } + }; + (start, end) + } + }; SeekDef { - key, + prefix: key, iter_dir, - seek: if seek_key_len > 0 { - Some(SeekKey { - len: seek_key_len, - op: seek_op, - null_pad: false, - }) - } else { - None - }, - termination: if termination_key_len > 0 { - Some(TerminationKey { - len: termination_key_len, - op: termination_op, - null_pad: false, - }) - } else { - None - }, + start, + end, } } - // Forwards, GE: - // Ascending index example: (x=10 AND y>=20) - // Seek key: start from the first GE(x:10, y:20), e.g. (x=10, y=20) - // Termination key: end at the first GT(x:10), e.g. (x=11, y=0) - // - // Descending index example: (x=10 AND y>=20) - // Seek key: start from the first LE(x:10), e.g. (x=10, y=usize::MAX), so reversed -> GE(x:10) - // Termination key: end at the first LT(x:10, y:20), e.g. (x=10, y=19), so reversed -> GT(x:10, y:20) - (IterationDirection::Forwards, ast::Operator::GreaterEquals) => { - let (seek_key_len, termination_key_len, seek_op, termination_op) = - if sort_order_of_last_key == SortOrder::Asc { - ( - key_len, - key_len - 1, - SeekOp::GE { eq_only: false }, - SeekOp::GT, - ) - } else { - ( - key_len - 1, - key_len, - SeekOp::LE { eq_only: false }.reverse(), - SeekOp::LT.reverse(), - ) - }; + IterationDirection::Backwards => { + let (start, end) = match last.sort_order { + SortOrder::Asc => { + let start = match last.upper_bound { + // Backwards, Asc, LT: (x=10 AND y<30) + // Start key: start from the first LT(x:10, y:30) + Some((ast::Operator::Less, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::LT, + }, + // Backwards, Asc, LT: (x=10 AND y<=30) + // Start key: start from the first LE(x:10, y:30) + Some((ast::Operator::LessEquals, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::LE { eq_only: false }, + }, + // Backwards, Asc, None: (x=10 AND y>20) + // Start key: start from the first LE(x:10) + None => SeekKey { + last_component: SeekKeyComponent::None, + op: SeekOp::LE { eq_only: false }, + }, + Some((op, _)) => { + crate::bail_parse_error!("build_seek_def: invalid operator: {:?}", op) + } + }; + let end = match last.lower_bound { + // Backwards, Asc, GT, (x=10 AND y>20) + // End key: end at first LE(x:10, y:20) + Some((ast::Operator::Greater, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::LE { eq_only: false }, + }, + // Backwards, Asc, GT, (x=10 AND y>=20) + // End key: end at first LT(x:10, y:20) + Some((ast::Operator::GreaterEquals, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::LT, + }, + // Backwards, Asc, None, (x=10 AND y<30) + // End key: end at first LT(x:10) + None => SeekKey { + last_component: SeekKeyComponent::None, + op: SeekOp::LT, + }, + Some((op, _)) => { + crate::bail_parse_error!("build_seek_def: invalid operator: {:?}", op,) + } + }; + (start, end) + } + SortOrder::Desc => { + let start = match last.lower_bound { + // Backwards, Desc, LT: (x=10 AND y>20) + // Start key: start from the first LT(x:10, y:20) + Some((ast::Operator::Greater, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::LT, + }, + // Backwards, Desc, LE: (x=10 AND y>=20) + // Start key: start from the first LE(x:10, y:20) + Some((ast::Operator::GreaterEquals, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::LE { eq_only: false }, + }, + // Backwards, Desc, LE: (x=10 AND y<30) + // Start key: start from the first LE(x:10) + None => SeekKey { + last_component: SeekKeyComponent::None, + op: SeekOp::LE { eq_only: false }, + }, + Some((op, _)) => { + crate::bail_parse_error!("build_seek_def: invalid operator: {:?}", op,) + } + }; + let end = match last.upper_bound { + // Backwards, Desc, LT, (x=10 AND y<30) + // End key: end at first LE(x:10, y:30) + Some((ast::Operator::Less, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::LE { eq_only: false }, + }, + // Backwards, Desc, LT, (x=10 AND y<=30) + // End key: end at first LT(x:10, y:30) + Some((ast::Operator::LessEquals, bound)) => SeekKey { + last_component: SeekKeyComponent::Expr(bound), + op: SeekOp::LT, + }, + // Backwards, Desc, LT, (x=10 AND y>20) + // End key: end at first LT(x:10) + None => SeekKey { + last_component: SeekKeyComponent::None, + op: SeekOp::LT, + }, + Some((op, _)) => { + crate::bail_parse_error!("build_seek_def: invalid operator: {:?}", op,) + } + }; + (start, end) + } + }; SeekDef { - key, + prefix: key, iter_dir, - seek: if seek_key_len > 0 { - Some(SeekKey { - len: seek_key_len, - op: seek_op, - null_pad: false, - }) - } else { - None - }, - termination: if termination_key_len > 0 { - Some(TerminationKey { - len: termination_key_len, - op: termination_op, - null_pad: false, - }) - } else { - None - }, + start, + end, } } - // Forwards, LT: - // Ascending index example: (x=10 AND y<20) - // Seek key: start from the first GT(x:10, y: NULL), e.g. (x=10, y=0) - // Termination key: end at the first GE(x:10, y:20), e.g. (x=10, y=20) - // - // Descending index example: (x=10 AND y<20) - // Seek key: start from the first LT(x:10, y:20), e.g. (x=10, y=19) so reversed -> GT(x:10, y:20) - // Termination key: end at the first LT(x:10), e.g. (x=9, y=usize::MAX), so reversed -> GE(x:10, NULL); i.e. GE the smallest possible (x=10, y) combination (NULL is always LT) - (IterationDirection::Forwards, ast::Operator::Less) => { - let (seek_key_len, termination_key_len, seek_op, termination_op) = - if sort_order_of_last_key == SortOrder::Asc { - ( - key_len - 1, - key_len, - SeekOp::GT, - SeekOp::GE { eq_only: false }, - ) - } else { - ( - key_len, - key_len - 1, - SeekOp::GT, - SeekOp::GE { eq_only: false }, - ) - }; - SeekDef { - key, - iter_dir, - seek: if seek_key_len > 0 { - Some(SeekKey { - len: seek_key_len, - op: seek_op, - null_pad: sort_order_of_last_key == SortOrder::Asc, - }) - } else { - None - }, - termination: if termination_key_len > 0 { - Some(TerminationKey { - len: termination_key_len, - op: termination_op, - null_pad: sort_order_of_last_key == SortOrder::Desc, - }) - } else { - None - }, - } - } - // Forwards, LE: - // Ascending index example: (x=10 AND y<=20) - // Seek key: start from the first GE(x:10, y:NULL), e.g. (x=10, y=0) - // Termination key: end at the first GT(x:10, y:20), e.g. (x=10, y=21) - // - // Descending index example: (x=10 AND y<=20) - // Seek key: start from the first LE(x:10, y:20), e.g. (x=10, y=20) so reversed -> GE(x:10, y:20) - // Termination key: end at the first LT(x:10), e.g. (x=9, y=usize::MAX), so reversed -> GE(x:10, NULL); i.e. GE the smallest possible (x=10, y) combination (NULL is always LT) - (IterationDirection::Forwards, ast::Operator::LessEquals) => { - let (seek_key_len, termination_key_len, seek_op, termination_op) = - if sort_order_of_last_key == SortOrder::Asc { - (key_len - 1, key_len, SeekOp::GT, SeekOp::GT) - } else { - ( - key_len, - key_len - 1, - SeekOp::LE { eq_only: false }.reverse(), - SeekOp::LE { eq_only: false }.reverse(), - ) - }; - SeekDef { - key, - iter_dir, - seek: if seek_key_len > 0 { - Some(SeekKey { - len: seek_key_len, - op: seek_op, - null_pad: sort_order_of_last_key == SortOrder::Asc, - }) - } else { - None - }, - termination: if termination_key_len > 0 { - Some(TerminationKey { - len: termination_key_len, - op: termination_op, - null_pad: sort_order_of_last_key == SortOrder::Desc, - }) - } else { - None - }, - } - } - // Backwards, EQ: - // Example: (x=10 AND y=20) - // Seek key: start from the last LE(x:10, y:20) - // Termination key: end at the first LT(x:10, y:20) - // Ascending vs descending doesn't matter because all the comparisons are equalities. - (IterationDirection::Backwards, ast::Operator::Equals) => SeekDef { - key, - iter_dir, - seek: Some(SeekKey { - len: key_len, - op: SeekOp::LE { eq_only: true }, - null_pad: false, - }), - termination: Some(TerminationKey { - len: key_len, - op: SeekOp::LT, - null_pad: false, - }), - }, - // Backwards, LT: - // Ascending index example: (x=10 AND y<20) - // Seek key: start from the last LT(x:10, y:20), e.g. (x=10, y=19) - // Termination key: end at the first LE(x:10, NULL), e.g. (x=9, y=usize::MAX) - // - // Descending index example: (x=10 AND y<20) - // Seek key: start from the last GT(x:10, y:NULL), e.g. (x=10, y=0) so reversed -> LT(x:10, NULL) - // Termination key: end at the first GE(x:10, y:20), e.g. (x=10, y=20) so reversed -> LE(x:10, y:20) - (IterationDirection::Backwards, ast::Operator::Less) => { - let (seek_key_len, termination_key_len, seek_op, termination_op) = - if sort_order_of_last_key == SortOrder::Asc { - ( - key_len, - key_len - 1, - SeekOp::LT, - SeekOp::LE { eq_only: false }, - ) - } else { - ( - key_len - 1, - key_len, - SeekOp::GT.reverse(), - SeekOp::GE { eq_only: false }.reverse(), - ) - }; - SeekDef { - key, - iter_dir, - seek: if seek_key_len > 0 { - Some(SeekKey { - len: seek_key_len, - op: seek_op, - null_pad: sort_order_of_last_key == SortOrder::Desc, - }) - } else { - None - }, - termination: if termination_key_len > 0 { - Some(TerminationKey { - len: termination_key_len, - op: termination_op, - null_pad: sort_order_of_last_key == SortOrder::Asc, - }) - } else { - None - }, - } - } - // Backwards, LE: - // Ascending index example: (x=10 AND y<=20) - // Seek key: start from the last LE(x:10, y:20), e.g. (x=10, y=20) - // Termination key: end at the first LT(x:10, NULL), e.g. (x=9, y=usize::MAX) - // - // Descending index example: (x=10 AND y<=20) - // Seek key: start from the last GT(x:10, NULL), e.g. (x=10, y=0) so reversed -> LT(x:10, NULL) - // Termination key: end at the first GT(x:10, y:20), e.g. (x=10, y=21) so reversed -> LT(x:10, y:20) - (IterationDirection::Backwards, ast::Operator::LessEquals) => { - let (seek_key_len, termination_key_len, seek_op, termination_op) = - if sort_order_of_last_key == SortOrder::Asc { - ( - key_len, - key_len - 1, - SeekOp::LE { eq_only: false }, - SeekOp::LE { eq_only: false }, - ) - } else { - ( - key_len - 1, - key_len, - SeekOp::GT.reverse(), - SeekOp::GT.reverse(), - ) - }; - SeekDef { - key, - iter_dir, - seek: if seek_key_len > 0 { - Some(SeekKey { - len: seek_key_len, - op: seek_op, - null_pad: sort_order_of_last_key == SortOrder::Desc, - }) - } else { - None - }, - termination: if termination_key_len > 0 { - Some(TerminationKey { - len: termination_key_len, - op: termination_op, - null_pad: sort_order_of_last_key == SortOrder::Asc, - }) - } else { - None - }, - } - } - // Backwards, GT: - // Ascending index example: (x=10 AND y>20) - // Seek key: start from the last LE(x:10), e.g. (x=10, y=usize::MAX) - // Termination key: end at the first LE(x:10, y:20), e.g. (x=10, y=20) - // - // Descending index example: (x=10 AND y>20) - // Seek key: start from the last GT(x:10, y:20), e.g. (x=10, y=21) so reversed -> LT(x:10, y:20) - // Termination key: end at the first GT(x:10), e.g. (x=11, y=0) so reversed -> LT(x:10) - (IterationDirection::Backwards, ast::Operator::Greater) => { - let (seek_key_len, termination_key_len, seek_op, termination_op) = - if sort_order_of_last_key == SortOrder::Asc { - ( - key_len - 1, - key_len, - SeekOp::LE { eq_only: false }, - SeekOp::LE { eq_only: false }, - ) - } else { - ( - key_len, - key_len - 1, - SeekOp::GT.reverse(), - SeekOp::GT.reverse(), - ) - }; - SeekDef { - key, - iter_dir, - seek: if seek_key_len > 0 { - Some(SeekKey { - len: seek_key_len, - op: seek_op, - null_pad: false, - }) - } else { - None - }, - termination: if termination_key_len > 0 { - Some(TerminationKey { - len: termination_key_len, - op: termination_op, - null_pad: false, - }) - } else { - None - }, - } - } - // Backwards, GE: - // Ascending index example: (x=10 AND y>=20) - // Seek key: start from the last LE(x:10), e.g. (x=10, y=usize::MAX) - // Termination key: end at the first LT(x:10, y:20), e.g. (x=10, y=19) - // - // Descending index example: (x=10 AND y>=20) - // Seek key: start from the last GE(x:10, y:20), e.g. (x=10, y=20) so reversed -> LE(x:10, y:20) - // Termination key: end at the first GT(x:10), e.g. (x=11, y=0) so reversed -> LT(x:10) - (IterationDirection::Backwards, ast::Operator::GreaterEquals) => { - let (seek_key_len, termination_key_len, seek_op, termination_op) = - if sort_order_of_last_key == SortOrder::Asc { - ( - key_len - 1, - key_len, - SeekOp::LE { eq_only: false }, - SeekOp::LT, - ) - } else { - ( - key_len, - key_len - 1, - SeekOp::GE { eq_only: false }.reverse(), - SeekOp::GT.reverse(), - ) - }; - SeekDef { - key, - iter_dir, - seek: if seek_key_len > 0 { - Some(SeekKey { - len: seek_key_len, - op: seek_op, - null_pad: false, - }) - } else { - None - }, - termination: if termination_key_len > 0 { - Some(TerminationKey { - len: termination_key_len, - op: termination_op, - null_pad: false, - }) - } else { - None - }, - } - } - (_, op) => { - crate::bail_parse_error!("build_seek_def: invalid operator: {:?}", op,) - } }) } diff --git a/core/translate/plan.rs b/core/translate/plan.rs index ec556f3f9..21fa84b69 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -4,7 +4,7 @@ use turso_parser::ast::{self, FrameBound, FrameClause, FrameExclude, FrameMode, use crate::{ function::AggFunc, schema::{BTreeTable, Column, FromClauseSubquery, Index, Schema, Table}, - translate::collate::get_collseq_from_expr, + translate::{collate::get_collseq_from_expr, optimizer::constraints::SeekRangeConstraint}, vdbe::{ builder::{CursorKey, CursorType, ProgramBuilder}, insn::{IdxInsertFlags, Insn}, @@ -1004,54 +1004,91 @@ impl JoinedTable { /// A definition of a rowid/index search. /// /// [SeekKey] is the condition that is used to seek to a specific row in a table/index. -/// [TerminationKey] is the condition that is used to terminate the search after a seek. +/// [SeekKey] also used to represent range scan termination condition. #[derive(Debug, Clone)] pub struct SeekDef { - /// The key to use when seeking and when terminating the scan that follows the seek. + /// Common prefix of the key which is shared between start/end fields /// For example, given: /// - CREATE INDEX i ON t (x, y desc) /// - SELECT * FROM t WHERE x = 1 AND y >= 30 /// - /// The key is [(1, ASC), (30, DESC)] - pub key: Vec<(ast::Expr, SortOrder)>, + /// Then, prefix=[(eq=1, ASC)], start=Some((ge, Expr(30))), end=Some((gt, Sentinel)) + pub prefix: Vec, /// The condition to use when seeking. See [SeekKey] for more details. - pub seek: Option, - /// The condition to use when terminating the scan that follows the seek. See [TerminationKey] for more details. - pub termination: Option, + pub start: SeekKey, + /// The condition to use when terminating the scan that follows the seek. See [SeekKey] for more details. + pub end: SeekKey, /// The direction of the scan that follows the seek. pub iter_dir: IterationDirection, } +pub struct SeekDefKeyIterator<'a> { + seek_def: &'a SeekDef, + seek_key: &'a SeekKey, + pos: usize, +} + +impl<'a> Iterator for SeekDefKeyIterator<'a> { + type Item = SeekKeyComponent<&'a ast::Expr>; + + fn next(&mut self) -> Option { + let result = if self.pos < self.seek_def.prefix.len() { + Some(SeekKeyComponent::Expr( + &self.seek_def.prefix[self.pos].eq.as_ref().unwrap().1, + )) + } else if self.pos == self.seek_def.prefix.len() { + match &self.seek_key.last_component { + SeekKeyComponent::Expr(expr) => Some(SeekKeyComponent::Expr(expr)), + SeekKeyComponent::None => None, + } + } else { + None + }; + self.pos += 1; + result + } +} + +impl SeekDef { + /// returns amount of values in the given seek key + /// - so, for SELECT * FROM t WHERE x = 10 AND y = 20 AND y >= 30 there will be 3 values (10, 20, 30) + pub fn size(&self, key: &SeekKey) -> usize { + self.prefix.len() + + match key.last_component { + SeekKeyComponent::Expr(_) => 1, + SeekKeyComponent::None => 0, + } + } + /// iterate over value expressions in the given seek key + pub fn iter<'a>(&'a self, key: &'a SeekKey) -> SeekDefKeyIterator<'a> { + SeekDefKeyIterator { + seek_def: self, + seek_key: key, + pos: 0, + } + } +} + +/// [SeekKeyComponent] enum represents optional last_component of the [SeekKey] +/// +/// This component represented by separate enum instead of Option because before there were third Sentinel value +/// For now - we don't need this and it's enough to just either use some user-provided expression or omit last component of the key completely +/// But as separate enum is almost never a harm - I decided to keep it here. +/// +/// This enum accepts generic argument E in order to use both SeekKeyComponent and SeekKeyComponent<&ast::Expr> +#[derive(Debug, Clone)] +pub enum SeekKeyComponent { + Expr(E), + None, +} + /// A condition to use when seeking. #[derive(Debug, Clone)] pub struct SeekKey { - /// How many columns from [SeekDef::key] are used in seeking. - pub len: usize, - /// Whether to NULL pad the last column of the seek key to match the length of [SeekDef::key]. - /// The reason it is done is that sometimes our full index key is not used in seeking, - /// but we want to find the lowest value that matches the non-null prefix of the key. - /// For example, given: - /// - CREATE INDEX i ON t (x, y) - /// - SELECT * FROM t WHERE x = 1 AND y < 30 - /// - /// We want to seek to the first row where x = 1, and then iterate forwards. - /// In this case, the seek key is GT(1, NULL) since NULL is always LT in index key comparisons. - /// We can't use just GT(1) because in index key comparisons, only the given number of columns are compared, - /// so this means any index keys with (x=1) will compare equal, e.g. (x=1, y=usize::MAX) will compare equal to the seek key (x:1) - pub null_pad: bool, - /// The comparison operator to use when seeking. - pub op: SeekOp, -} + /// Complete key must be constructed from common [SeekDef::prefix] and optional last_component + pub last_component: SeekKeyComponent, -#[derive(Debug, Clone)] -/// A condition to use when terminating the scan that follows a seek. -pub struct TerminationKey { - /// How many columns from [SeekDef::key] are used in terminating the scan that follows the seek. - pub len: usize, - /// Whether to NULL pad the last column of the termination key to match the length of [SeekDef::key]. - /// See [SeekKey::null_pad]. - pub null_pad: bool, - /// The comparison operator to use when terminating the scan that follows the seek. + /// The comparison operator to use when seeking. pub op: SeekOp, } diff --git a/testing/select.test b/testing/select.test index 5b35d3eda..ec13690be 100755 --- a/testing/select.test +++ b/testing/select.test @@ -720,6 +720,176 @@ do_execsql_test_on_specific_db {:memory:} select-no-match-in-leaf-page { 2 2} +do_execsql_test_on_specific_db {:memory:} select-range-search-count-asc-index { + CREATE TABLE t (a, b); + CREATE INDEX t_idx ON t(a, b); + insert into t values (1, 1); + insert into t values (1, 2); + insert into t values (1, 3); + insert into t values (1, 4); + insert into t values (1, 5); + insert into t values (1, 6); + insert into t values (2, 1); + insert into t values (2, 2); + insert into t values (2, 3); + insert into t values (2, 4); + insert into t values (2, 5); + insert into t values (2, 6); + select count(*) from t where a = 1 AND b >= 2 ORDER BY a ASC, b ASC; + select count(*) from t where a = 1 AND b > 2 ORDER BY a ASC, b ASC; + select count(*) from t where a = 1 AND b <= 4 ORDER BY a ASC, b ASC; + select count(*) from t where a = 1 AND b < 4 ORDER BY a ASC, b ASC; + select count(*) from t where a = 1 AND b >= 2 AND b <= 4 ORDER BY a ASC, b ASC; + select count(*) from t where a = 1 AND b > 2 AND b <= 4 ORDER BY a ASC, b ASC; + select count(*) from t where a = 1 AND b >= 2 AND b < 4 ORDER BY a ASC, b ASC; + select count(*) from t where a = 1 AND b > 2 AND b < 4 ORDER BY a ASC, b ASC; + + select count(*) from t where a = 1 AND b >= 2 ORDER BY a DESC, b DESC; + select count(*) from t where a = 1 AND b > 2 ORDER BY a DESC, b DESC; + select count(*) from t where a = 1 AND b <= 4 ORDER BY a DESC, b DESC; + select count(*) from t where a = 1 AND b < 4 ORDER BY a DESC, b DESC; + select count(*) from t where a = 1 AND b >= 2 AND b <= 4 ORDER BY a DESC, b DESC; + select count(*) from t where a = 1 AND b > 2 AND b <= 4 ORDER BY a DESC, b DESC; + select count(*) from t where a = 1 AND b >= 2 AND b < 4 ORDER BY a DESC, b DESC; + select count(*) from t where a = 1 AND b > 2 AND b < 4 ORDER BY a DESC, b DESC; +} {5 +4 +4 +3 +3 +2 +2 +1 +5 +4 +4 +3 +3 +2 +2 +1} + +do_execsql_test_on_specific_db {:memory:} select-range-search-count-desc-index { + CREATE TABLE t (a, b); + CREATE INDEX t_idx ON t(a, b DESC); + insert into t values (1, 1); + insert into t values (1, 2); + insert into t values (1, 3); + insert into t values (1, 4); + insert into t values (1, 5); + insert into t values (1, 6); + insert into t values (2, 1); + insert into t values (2, 2); + insert into t values (2, 3); + insert into t values (2, 4); + insert into t values (2, 5); + insert into t values (2, 6); + select count(*) from t where a = 1 AND b >= 2 ORDER BY a ASC, b DESC; + select count(*) from t where a = 1 AND b > 2 ORDER BY a ASC, b DESC; + select count(*) from t where a = 1 AND b <= 4 ORDER BY a ASC, b DESC; + select count(*) from t where a = 1 AND b < 4 ORDER BY a ASC, b DESC; + select count(*) from t where a = 1 AND b >= 2 AND b <= 4 ORDER BY a ASC, b DESC; + select count(*) from t where a = 1 AND b > 2 AND b <= 4 ORDER BY a ASC, b DESC; + select count(*) from t where a = 1 AND b >= 2 AND b < 4 ORDER BY a ASC, b DESC; + select count(*) from t where a = 1 AND b > 2 AND b < 4 ORDER BY a ASC, b DESC; + + select count(*) from t where a = 1 AND b >= 2 ORDER BY a DESC, b ASC; + select count(*) from t where a = 1 AND b > 2 ORDER BY a DESC, b ASC; + select count(*) from t where a = 1 AND b <= 4 ORDER BY a DESC, b ASC; + select count(*) from t where a = 1 AND b < 4 ORDER BY a DESC, b ASC; + select count(*) from t where a = 1 AND b >= 2 AND b <= 4 ORDER BY a DESC, b ASC; + select count(*) from t where a = 1 AND b > 2 AND b <= 4 ORDER BY a DESC, b ASC; + select count(*) from t where a = 1 AND b >= 2 AND b < 4 ORDER BY a DESC, b ASC; + select count(*) from t where a = 1 AND b > 2 AND b < 4 ORDER BY a DESC, b ASC; +} {5 +4 +4 +3 +3 +2 +2 +1 +5 +4 +4 +3 +3 +2 +2 +1} + +do_execsql_test_on_specific_db {:memory:} select-range-search-scan-asc-index { + CREATE TABLE t (a, b); + CREATE INDEX t_idx ON t(a, b); + insert into t values (1, 1); + insert into t values (1, 2); + insert into t values (1, 3); + insert into t values (1, 4); + insert into t values (1, 5); + insert into t values (1, 6); + insert into t values (2, 1); + insert into t values (2, 2); + insert into t values (2, 3); + insert into t values (2, 4); + insert into t values (2, 5); + insert into t values (2, 6); + select * from t where a = 1 AND b > 1 AND b < 6 ORDER BY a ASC, b ASC; + select * from t where a = 2 AND b > 1 AND b < 6 ORDER BY a DESC, b DESC; + select * from t where a = 1 AND b > 1 AND b < 6 ORDER BY a DESC, b ASC; + select * from t where a = 2 AND b > 1 AND b < 6 ORDER BY a ASC, b DESC; +} {1|2 +1|3 +1|4 +1|5 +2|5 +2|4 +2|3 +2|2 +1|2 +1|3 +1|4 +1|5 +2|5 +2|4 +2|3 +2|2} + +do_execsql_test_on_specific_db {:memory:} select-range-search-scan-desc-index { + CREATE TABLE t (a, b); + CREATE INDEX t_idx ON t(a, b DESC); + insert into t values (1, 1); + insert into t values (1, 2); + insert into t values (1, 3); + insert into t values (1, 4); + insert into t values (1, 5); + insert into t values (1, 6); + insert into t values (2, 1); + insert into t values (2, 2); + insert into t values (2, 3); + insert into t values (2, 4); + insert into t values (2, 5); + insert into t values (2, 6); + select * from t where a = 1 AND b > 1 AND b < 6 ORDER BY a ASC, b ASC; + select * from t where a = 2 AND b > 1 AND b < 6 ORDER BY a DESC, b DESC; + select * from t where a = 1 AND b > 1 AND b < 6 ORDER BY a DESC, b ASC; + select * from t where a = 2 AND b > 1 AND b < 6 ORDER BY a ASC, b DESC; +} {1|2 +1|3 +1|4 +1|5 +2|5 +2|4 +2|3 +2|2 +1|2 +1|3 +1|4 +1|5 +2|5 +2|4 +2|3 +2|2} + # Regression tests for double-quoted strings in SELECT statements do_execsql_test_skip_lines_on_specific_db 1 {:memory:} select-double-quotes-values { .dbconfig dqs_dml on From 5b6e8e4b841bf7d65a8f13fad290abb4663ec780 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 13:28:40 +0400 Subject: [PATCH 099/428] Float32/Float64 -> Float32Dense/Float64Dense --- core/vector/distance/euclidean.rs | 4 +- core/vector/mod.rs | 12 ++--- core/vector/vector_types.rs | 85 ++++++++++++++++--------------- 3 files changed, 51 insertions(+), 50 deletions(-) diff --git a/core/vector/distance/euclidean.rs b/core/vector/distance/euclidean.rs index f8e4f048d..dd46584e7 100644 --- a/core/vector/distance/euclidean.rs +++ b/core/vector/distance/euclidean.rs @@ -12,8 +12,8 @@ impl DistanceCalculator for Euclidean { fn calculate(v1: &Vector, v2: &Vector) -> Result { match v1.vector_type { - VectorType::Float32 => Ok(euclidean_distance_f32(v1.as_f32_slice(), v2.as_f32_slice())), - VectorType::Float64 => Ok(euclidean_distance_f64(v1.as_f64_slice(), v2.as_f64_slice())), + VectorType::Float32Dense => Ok(euclidean_distance_f32(v1.as_f32_slice(), v2.as_f32_slice())), + VectorType::Float64Dense => Ok(euclidean_distance_f64(v1.as_f64_slice(), v2.as_f64_slice())), } } } diff --git a/core/vector/mod.rs b/core/vector/mod.rs index 2fc960849..516e30a4e 100644 --- a/core/vector/mod.rs +++ b/core/vector/mod.rs @@ -14,7 +14,7 @@ pub fn vector32(args: &[Register]) -> Result { "vector32 requires exactly one argument".to_string(), )); } - let x = parse_vector(&args[0], Some(VectorType::Float32))?; + let x = parse_vector(&args[0], Some(VectorType::Float32Dense))?; // Extract the Vec from Value if let Value::Blob(data) = vector_serialize_f32(x) { Ok(Value::Blob(data)) @@ -31,7 +31,7 @@ pub fn vector64(args: &[Register]) -> Result { "vector64 requires exactly one argument".to_string(), )); } - let x = parse_vector(&args[0], Some(VectorType::Float64))?; + let x = parse_vector(&args[0], Some(VectorType::Float64Dense))?; // Extract the Vec from Value if let Value::Blob(data) = vector_serialize_f64(x) { Ok(Value::Blob(data)) @@ -123,8 +123,8 @@ pub fn vector_concat(args: &[Register]) -> Result { let vector = vector_types::vector_concat(&x, &y)?; match vector.vector_type { - VectorType::Float32 => Ok(vector_serialize_f32(vector)), - VectorType::Float64 => Ok(vector_serialize_f64(vector)), + VectorType::Float32Dense => Ok(vector_serialize_f32(vector)), + VectorType::Float64Dense => Ok(vector_serialize_f64(vector)), } } @@ -156,7 +156,7 @@ pub fn vector_slice(args: &[Register]) -> Result { let result = vector_types::vector_slice(&vector, start_index as usize, end_index as usize)?; Ok(match result.vector_type { - VectorType::Float32 => vector_serialize_f32(result), - VectorType::Float64 => vector_serialize_f64(result), + VectorType::Float32Dense => vector_serialize_f32(result), + VectorType::Float64Dense => vector_serialize_f64(result), }) } diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index 20d268dc0..15e18723c 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -4,15 +4,15 @@ use crate::{LimboError, Result}; #[derive(Debug, Clone, PartialEq, Copy)] pub enum VectorType { - Float32, - Float64, + Float32Dense, + Float64Dense, } impl VectorType { pub fn size_to_dims(&self, size: usize) -> usize { match self { - VectorType::Float32 => size / 4, - VectorType::Float64 => size / 8, + VectorType::Float32Dense => size / 4, + VectorType::Float64Dense => size / 8, } } } @@ -120,7 +120,7 @@ pub fn parse_string_vector(vector_type: VectorType, value: &Value) -> Result { + VectorType::Float32Dense => { let x = x .parse::() .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; @@ -131,7 +131,7 @@ pub fn parse_string_vector(vector_type: VectorType, value: &Value) -> Result { + VectorType::Float64Dense => { let x = x .parse::() .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; @@ -154,9 +154,10 @@ pub fn parse_string_vector(vector_type: VectorType, value: &Value) -> Result) -> Result { match value.get_value().value_type() { - ValueType::Text => { - parse_string_vector(vec_ty.unwrap_or(VectorType::Float32), value.get_value()) - } + ValueType::Text => parse_string_vector( + vec_ty.unwrap_or(VectorType::Float32Dense), + value.get_value(), + ), ValueType::Blob => { let Some(blob) = value.get_value().to_blob() else { return Err(LimboError::ConversionError( @@ -183,7 +184,7 @@ pub fn vector_to_text(vector: &Vector) -> String { let mut text = String::new(); text.push('['); match vector.vector_type { - VectorType::Float32 => { + VectorType::Float32Dense => { let data = vector.as_f32_slice(); for (i, value) in data.iter().enumerate().take(vector.dims) { text.push_str(&value.to_string()); @@ -192,7 +193,7 @@ pub fn vector_to_text(vector: &Vector) -> String { } } } - VectorType::Float64 => { + VectorType::Float64Dense => { let data = vector.as_f64_slice(); for (i, value) in data.iter().enumerate().take(vector.dims) { text.push_str(&value.to_string()); @@ -208,8 +209,8 @@ pub fn vector_to_text(vector: &Vector) -> String { pub fn vector_deserialize(vector_type: VectorType, blob: &[u8]) -> Result { match vector_type { - VectorType::Float32 => vector_deserialize_f32(blob), - VectorType::Float64 => vector_deserialize_f64(blob), + VectorType::Float32Dense => vector_deserialize_f32(blob), + VectorType::Float64Dense => vector_deserialize_f64(blob), } } @@ -222,7 +223,7 @@ pub fn vector_serialize_f64(x: Vector) -> Value { pub fn vector_deserialize_f64(blob: &[u8]) -> Result { Ok(Vector { - vector_type: VectorType::Float64, + vector_type: VectorType::Float64Dense, dims: (blob.len() - 1) / 8, data: blob[..blob.len() - 1].to_vec(), }) @@ -234,7 +235,7 @@ pub fn vector_serialize_f32(x: Vector) -> Value { pub fn vector_deserialize_f32(blob: &[u8]) -> Result { Ok(Vector { - vector_type: VectorType::Float32, + vector_type: VectorType::Float32Dense, dims: blob.len() / 4, data: blob.to_vec(), }) @@ -242,8 +243,8 @@ pub fn vector_deserialize_f32(blob: &[u8]) -> Result { pub fn do_vector_distance_cos(v1: &Vector, v2: &Vector) -> Result { match v1.vector_type { - VectorType::Float32 => vector_f32_distance_cos(v1, v2), - VectorType::Float64 => vector_f64_distance_cos(v1, v2), + VectorType::Float32Dense => vector_f32_distance_cos(v1, v2), + VectorType::Float64Dense => vector_f64_distance_cos(v1, v2), } } @@ -330,7 +331,7 @@ pub fn vector_f64_distance_cos(v1: &Vector, v2: &Vector) -> Result { pub fn vector_type(blob: &[u8]) -> Result { // Even-sized blobs are always float32. if blob.len() % 2 == 0 { - return Ok(VectorType::Float32); + return Ok(VectorType::Float32Dense); } // Odd-sized blobs have type byte at the end let (data_blob, type_byte) = blob.split_at(blob.len() - 1); @@ -342,7 +343,7 @@ pub fn vector_type(blob: &[u8]) -> Result { "Invalid vector value".to_string(), )); } - Ok(VectorType::Float32) + Ok(VectorType::Float32Dense) } 2 => { if data_blob.len() % 8 != 0 { @@ -350,7 +351,7 @@ pub fn vector_type(blob: &[u8]) -> Result { "Invalid vector value".to_string(), )); } - Ok(VectorType::Float64) + Ok(VectorType::Float64Dense) } _ => Err(LimboError::ConversionError( "Invalid vector type".to_string(), @@ -402,14 +403,14 @@ pub fn vector_slice(vector: &Vector, start_idx: usize, end_idx: usize) -> Result } let (vector_type, data) = match vector.vector_type { - VectorType::Float32 => ( - VectorType::Float32, + VectorType::Float32Dense => ( + VectorType::Float32Dense, extract_bytes::(vector.as_f32_slice(), start_idx, end_idx, |v| { v.to_le_bytes() })?, ), - VectorType::Float64 => ( - VectorType::Float64, + VectorType::Float64Dense => ( + VectorType::Float64Dense, extract_bytes::(vector.as_f64_slice(), start_idx, end_idx, |v| { v.to_le_bytes() })?, @@ -486,17 +487,17 @@ mod tests { impl Arbitrary for ArbitraryVector { fn arbitrary(g: &mut Gen) -> Self { let vector_type = if bool::arbitrary(g) { - VectorType::Float32 + VectorType::Float32Dense } else { - VectorType::Float64 + VectorType::Float64Dense }; let data = match vector_type { - VectorType::Float32 => { + VectorType::Float32Dense => { let floats = Self::generate_f32_vector(g); floats.iter().flat_map(|f| f.to_le_bytes()).collect() } - VectorType::Float64 => { + VectorType::Float64Dense => { let floats = Self::generate_f64_vector(g); floats.iter().flat_map(|f| f.to_le_bytes()).collect() } @@ -535,8 +536,8 @@ mod tests { fn test_vector_type(v: Vector) -> bool { let vtype = v.vector_type; let value = match &vtype { - VectorType::Float32 => vector_serialize_f32(v), - VectorType::Float64 => vector_serialize_f64(v), + VectorType::Float32Dense => vector_serialize_f32(v), + VectorType::Float64Dense => vector_serialize_f64(v), }; let blob = value.to_blob().unwrap(); @@ -576,12 +577,12 @@ mod tests { /// - The data length is correct (4 bytes per float for f32, 8 bytes per float for f64) fn test_slice_conversion(v: Vector) -> bool { match v.vector_type { - VectorType::Float32 => { + VectorType::Float32Dense => { let slice = v.as_f32_slice(); // Check if the slice length matches the dimensions and the data length is correct (4 bytes per float) slice.len() == DIMS && (slice.len() * 4 == v.data.len()) } - VectorType::Float64 => { + VectorType::Float64Dense => { let slice = v.as_f64_slice(); // Check if the slice length matches the dimensions and the data length is correct (8 bytes per float) slice.len() == DIMS && (slice.len() * 8 == v.data.len()) @@ -667,25 +668,25 @@ mod tests { #[test] fn parse_string_vector_zero_length() { let value = Value::from_text("[]"); - let vector = parse_string_vector(VectorType::Float32, &value).unwrap(); + let vector = parse_string_vector(VectorType::Float32Dense, &value).unwrap(); assert_eq!(vector.dims, 0); - assert_eq!(vector.vector_type, VectorType::Float32); + assert_eq!(vector.vector_type, VectorType::Float32Dense); } #[test] fn test_parse_string_vector_valid_whitespace() { let value = Value::from_text(" [ 1.0 , 2.0 , 3.0 ] "); - let vector = parse_string_vector(VectorType::Float32, &value).unwrap(); + let vector = parse_string_vector(VectorType::Float32Dense, &value).unwrap(); assert_eq!(vector.dims, 3); - assert_eq!(vector.vector_type, VectorType::Float32); + assert_eq!(vector.vector_type, VectorType::Float32Dense); } #[test] fn test_parse_string_vector_valid() { let value = Value::from_text("[1.0, 2.0, 3.0]"); - let vector = parse_string_vector(VectorType::Float32, &value).unwrap(); + let vector = parse_string_vector(VectorType::Float32Dense, &value).unwrap(); assert_eq!(vector.dims, 3); - assert_eq!(vector.vector_type, VectorType::Float32); + assert_eq!(vector.vector_type, VectorType::Float32Dense); } fn float32_vec_from(slice: &[f32]) -> Vector { @@ -695,7 +696,7 @@ mod tests { } Vector { - vector_type: VectorType::Float32, + vector_type: VectorType::Float32Dense, dims: slice.len(), data, } @@ -713,7 +714,7 @@ mod tests { let result = vector_concat(&v1, &v2).unwrap(); assert_eq!(result.dims, 6); - assert_eq!(result.vector_type, VectorType::Float32); + assert_eq!(result.vector_type, VectorType::Float32Dense); assert_eq!( f32_slice_from_vector(&result), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0] @@ -867,12 +868,12 @@ mod tests { } match v.vector_type { - VectorType::Float32 => { + VectorType::Float32Dense => { let original = v.as_f32_slice(); let parsed = parsed_vector.as_f32_slice(); original.iter().zip(parsed.iter()).all(|(a, b)| a == b) } - VectorType::Float64 => { + VectorType::Float64Dense => { let original = v.as_f64_slice(); let parsed = parsed_vector.as_f64_slice(); original.iter().zip(parsed.iter()).all(|(a, b)| a == b) From a76cdb83c5ae43429a0ce7d6cdfec87a8528ca64 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 9 Oct 2025 12:34:12 +0300 Subject: [PATCH 100/428] fuzz: sometimes add another condition on the same column to exercise index range queries --- tests/integration/fuzz/mod.rs | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 8cbac20c6..e9b81cd33 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -381,11 +381,28 @@ mod tests { // Use a small limit to make the test complete faster let limit = 5; - // Generate WHERE clause string + /// Generate a comparison string (e.g. x > 10 AND x < 20) or just x > 10. + fn generate_comparison( + operator: &str, + col_name: &str, + col_val: i32, + rng: &mut ChaCha8Rng, + ) -> String { + if operator != "=" && rng.random_range(0..3) == 1 { + let val2 = rng.random_range(0..=3000); + let op2 = COMPARISONS[rng.random_range(0..COMPARISONS.len())]; + format!("{col_name} {operator} {col_val} AND {col_name} {op2} {val2}") + } else { + format!("{col_name} {operator} {col_val}") + } + } + + // Generate WHERE clause string. + // Sometimes add another inequality to the WHERE clause (e.g. x > 10 AND x < 20) to exercise range queries. let where_clause_components = vec![ - comp1.map(|x| format!("x {} {}", x, col_val_first.unwrap())), - comp2.map(|x| format!("y {} {}", x, col_val_second.unwrap())), - comp3.map(|x| format!("z {} {}", x, col_val_third.unwrap())), + comp1.map(|x| generate_comparison(x, "x", col_val_first.unwrap(), &mut rng)), + comp2.map(|x| generate_comparison(x, "y", col_val_second.unwrap(), &mut rng)), + comp3.map(|x| generate_comparison(x, "z", col_val_third.unwrap(), &mut rng)), ] .into_iter() .flatten() From e0461dd78a11b26c14475e257ca0ba84de4fa07f Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 9 Oct 2025 15:01:47 +0300 Subject: [PATCH 101/428] Sorter: compute values upfront instead of deserializing on every comparison --- core/vdbe/sorter.rs | 71 ++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 40 deletions(-) diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index bedf6de18..0294787f1 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -1,6 +1,5 @@ use turso_parser::ast::SortOrder; -use std::cell::{RefCell, UnsafeCell}; use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd, Reverse}; use std::collections::BinaryHeap; use std::rc::Rc; @@ -204,7 +203,7 @@ impl Sorter { }; match record { Some(record) => { - if let Some(error) = record.deserialization_error.replace(None) { + if let Some(error) = record.deserialization_error { // If there was a key deserialization error during the comparison, return the error. return Err(error); } @@ -614,12 +613,14 @@ impl SortedChunk { struct SortableImmutableRecord { record: ImmutableRecord, cursor: RecordCursor, - // SAFETY: borrows from 'self - key_values: UnsafeCell>>, + /// SAFETY: borrows from self + /// These are precomputed on record construction so that they can be reused during + /// sorting comparisons. + key_values: Vec>, key_len: usize, index_key_info: Rc>, /// The key deserialization error, if any. - deserialization_error: RefCell>, + deserialization_error: Option, } impl SortableImmutableRecord { @@ -634,45 +635,40 @@ impl SortableImmutableRecord { index_key_info.len() >= cursor.serial_types.len(), "index_key_info.len() < cursor.serial_types.len()" ); + + // Pre-compute all key values upfront + let mut key_values = Vec::with_capacity(key_len); + let mut deserialization_error = None; + + for i in 0..key_len { + match cursor.deserialize_column(&record, i) { + Ok(value) => { + // SAFETY: We're storing the value with 'static lifetime but it's actually bound to the record + // This is safe because the record lives as long as this struct + let value: ValueRef<'static> = unsafe { std::mem::transmute(value) }; + key_values.push(value); + } + Err(err) => { + deserialization_error = Some(err); + break; + } + } + } + Ok(Self { record, cursor, - key_values: UnsafeCell::new(Vec::with_capacity(key_len)), + key_values, index_key_info, - deserialization_error: RefCell::new(None), + deserialization_error, key_len, }) } - - fn key_value<'a>(&'a self, i: usize) -> Option> { - // SAFETY: there are no other active references - let key_values = unsafe { &mut *self.key_values.get() }; - - if i >= key_values.len() { - assert_eq!(key_values.len(), i, "access must be sequential"); - - let value = match self.cursor.deserialize_column(&self.record, i) { - Ok(value) => value, - Err(err) => { - self.deserialization_error.replace(Some(err)); - return None; - } - }; - - // SAFETY: no 'static lifetime is exposed, all references are bound to 'self - let value: ValueRef<'static> = unsafe { std::mem::transmute(value) }; - key_values.push(value); - } - - Some(key_values[i]) - } } impl Ord for SortableImmutableRecord { fn cmp(&self, other: &Self) -> Ordering { - if self.deserialization_error.borrow().is_some() - || other.deserialization_error.borrow().is_some() - { + if self.deserialization_error.is_some() || other.deserialization_error.is_some() { // If one of the records has a deserialization error, circumvent the comparison and return early. return Ordering::Equal; } @@ -682,13 +678,8 @@ impl Ord for SortableImmutableRecord { ); for i in 0..self.key_len { - let Some(this_key_value) = self.key_value(i) else { - return Ordering::Equal; - }; - - let Some(other_key_value) = other.key_value(i) else { - return Ordering::Equal; - }; + let this_key_value = self.key_values[i]; + let other_key_value = other.key_values[i]; let column_order = self.index_key_info[i].sort_order; let collation = self.index_key_info[i].collation; From 7e9e102f207e0739a65a5bcb4f5175f541e0c14e Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 16:01:37 +0400 Subject: [PATCH 102/428] move vector operations under operations/ folder --- core/vector/distance.rs | 25 -- core/vector/mod.rs | 31 +- core/vector/operations/concat.rs | 101 ++++++ core/vector/operations/distance_cos.rs | 91 ++++++ .../distance_l2.rs} | 43 +-- core/vector/operations/mod.rs | 4 + core/vector/operations/slice.rs | 139 +++++++++ core/vector/vector_types.rs | 293 +----------------- 8 files changed, 369 insertions(+), 358 deletions(-) delete mode 100644 core/vector/distance.rs create mode 100644 core/vector/operations/concat.rs create mode 100644 core/vector/operations/distance_cos.rs rename core/vector/{distance/euclidean.rs => operations/distance_l2.rs} (53%) create mode 100644 core/vector/operations/mod.rs create mode 100644 core/vector/operations/slice.rs diff --git a/core/vector/distance.rs b/core/vector/distance.rs deleted file mode 100644 index e61c24c70..000000000 --- a/core/vector/distance.rs +++ /dev/null @@ -1,25 +0,0 @@ -use super::vector_types::Vector; -use crate::Result; - -pub(crate) mod euclidean; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[non_exhaustive] -pub enum DistanceType { - /// Euclidean distance. This is a very common distance metric that - /// accounts for both magnitude and direction when determining the distance - /// between vectors. Euclidean distance has a range of [0, ∞). - Euclidean, - - // TODO(asukamilet): Refactor the current `vector_types.rs` to integrate - #[allow(dead_code)] - /// Cosine distance. This is a measure of similarity between two vectors - Cosine, -} - -pub trait DistanceCalculator { - #[allow(unused)] - fn distance_type() -> DistanceType; - - fn calculate(v1: &Vector, v2: &Vector) -> Result; -} diff --git a/core/vector/mod.rs b/core/vector/mod.rs index 516e30a4e..5a722c34c 100644 --- a/core/vector/mod.rs +++ b/core/vector/mod.rs @@ -1,10 +1,9 @@ use crate::types::Value; use crate::vdbe::Register; -use crate::vector::distance::{euclidean::Euclidean, DistanceCalculator}; use crate::LimboError; use crate::Result; -pub mod distance; +pub mod operations; pub mod vector_types; use vector_types::*; @@ -76,7 +75,7 @@ pub fn vector_distance_cos(args: &[Register]) -> Result { let x = parse_vector(&args[0], None)?; let y = parse_vector(&args[1], None)?; - let dist = do_vector_distance_cos(&x, &y)?; + let dist = operations::distance_cos::vector_distance_cos(&x, &y)?; Ok(Value::Float(dist)) } @@ -89,19 +88,7 @@ pub fn vector_distance_l2(args: &[Register]) -> Result { let x = parse_vector(&args[0], None)?; let y = parse_vector(&args[1], None)?; - // Validate that both vectors have the same dimensions and type - if x.dims != y.dims { - return Err(LimboError::ConversionError( - "Vectors must have the same dimensions".to_string(), - )); - } - if x.vector_type != y.vector_type { - return Err(LimboError::ConversionError( - "Vectors must be of the same type".to_string(), - )); - } - - let dist = Euclidean::calculate(&x, &y)?; + let dist = operations::distance_l2::vector_distance_l2(&x, &y)?; Ok(Value::Float(dist)) } @@ -114,14 +101,7 @@ pub fn vector_concat(args: &[Register]) -> Result { let x = parse_vector(&args[0], None)?; let y = parse_vector(&args[1], None)?; - - if x.vector_type != y.vector_type { - return Err(LimboError::InvalidArgument( - "Vectors must be of the same type".into(), - )); - } - - let vector = vector_types::vector_concat(&x, &y)?; + let vector = operations::concat::vector_concat(&x, &y)?; match vector.vector_type { VectorType::Float32Dense => Ok(vector_serialize_f32(vector)), VectorType::Float64Dense => Ok(vector_serialize_f64(vector)), @@ -153,7 +133,8 @@ pub fn vector_slice(args: &[Register]) -> Result { )); } - let result = vector_types::vector_slice(&vector, start_index as usize, end_index as usize)?; + let result = + operations::slice::vector_slice(&vector, start_index as usize, end_index as usize)?; Ok(match result.vector_type { VectorType::Float32Dense => vector_serialize_f32(result), diff --git a/core/vector/operations/concat.rs b/core/vector/operations/concat.rs new file mode 100644 index 000000000..3b4ebf302 --- /dev/null +++ b/core/vector/operations/concat.rs @@ -0,0 +1,101 @@ +use crate::{vector::vector_types::Vector, LimboError, Result}; + +pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result { + if v1.vector_type != v2.vector_type { + return Err(LimboError::ConversionError( + "Mismatched vector types".into(), + )); + } + + let mut data = Vec::with_capacity(v1.data.len() + v2.data.len()); + data.extend_from_slice(&v1.data); + data.extend_from_slice(&v2.data); + + Ok(Vector { + vector_type: v1.vector_type, + dims: v1.dims + v2.dims, + data, + }) +} + +#[cfg(test)] +mod tests { + use crate::vector::{ + operations::concat::vector_concat, + vector_types::{Vector, VectorType}, + }; + + fn float32_vec_from(slice: &[f32]) -> Vector { + let mut data = Vec::new(); + for &v in slice { + data.extend_from_slice(&v.to_le_bytes()); + } + + Vector { + vector_type: VectorType::Float32Dense, + dims: slice.len(), + data, + } + } + + fn f32_slice_from_vector(vector: &Vector) -> Vec { + vector.as_f32_slice().to_vec() + } + + #[test] + fn test_vector_concat_normal_case() { + let v1 = float32_vec_from(&[1.0, 2.0, 3.0]); + let v2 = float32_vec_from(&[4.0, 5.0, 6.0]); + + let result = vector_concat(&v1, &v2).unwrap(); + + assert_eq!(result.dims, 6); + assert_eq!(result.vector_type, VectorType::Float32Dense); + assert_eq!( + f32_slice_from_vector(&result), + vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + ); + } + + #[test] + fn test_vector_concat_empty_left() { + let v1 = float32_vec_from(&[]); + let v2 = float32_vec_from(&[4.0, 5.0]); + + let result = vector_concat(&v1, &v2).unwrap(); + + assert_eq!(result.dims, 2); + assert_eq!(f32_slice_from_vector(&result), vec![4.0, 5.0]); + } + + #[test] + fn test_vector_concat_empty_right() { + let v1 = float32_vec_from(&[1.0, 2.0]); + let v2 = float32_vec_from(&[]); + + let result = vector_concat(&v1, &v2).unwrap(); + + assert_eq!(result.dims, 2); + assert_eq!(f32_slice_from_vector(&result), vec![1.0, 2.0]); + } + + #[test] + fn test_vector_concat_both_empty() { + let v1 = float32_vec_from(&[]); + let v2 = float32_vec_from(&[]); + let result = vector_concat(&v1, &v2).unwrap(); + assert_eq!(result.dims, 0); + assert_eq!(f32_slice_from_vector(&result), Vec::::new()); + } + + #[test] + fn test_vector_concat_different_lengths() { + let v1 = float32_vec_from(&[1.0]); + let v2 = float32_vec_from(&[2.0, 3.0, 4.0]); + + let result = vector_concat(&v1, &v2).unwrap(); + + assert_eq!(result.dims, 4); + assert_eq!(f32_slice_from_vector(&result), vec![1.0, 2.0, 3.0, 4.0]); + } +} diff --git a/core/vector/operations/distance_cos.rs b/core/vector/operations/distance_cos.rs new file mode 100644 index 000000000..7f53edfc6 --- /dev/null +++ b/core/vector/operations/distance_cos.rs @@ -0,0 +1,91 @@ +use crate::{ + vector::vector_types::{Vector, VectorType}, + LimboError, Result, +}; + +pub fn vector_distance_cos(v1: &Vector, v2: &Vector) -> Result { + match v1.vector_type { + VectorType::Float32Dense => vector_f32_distance_cos(v1, v2), + VectorType::Float64Dense => vector_f64_distance_cos(v1, v2), + } +} + +fn vector_f32_distance_cos(v1: &Vector, v2: &Vector) -> Result { + if v1.dims != v2.dims { + return Err(LimboError::ConversionError( + "Invalid vector dimensions".to_string(), + )); + } + if v1.vector_type != v2.vector_type { + return Err(LimboError::ConversionError( + "Invalid vector type".to_string(), + )); + } + let (mut dot, mut norm1, mut norm2) = (0.0, 0.0, 0.0); + let v1_data = v1.as_f32_slice(); + let v2_data = v2.as_f32_slice(); + + // Check for non-finite values + if v1_data.iter().any(|x| !x.is_finite()) || v2_data.iter().any(|x| !x.is_finite()) { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + } + + for i in 0..v1.dims { + let e1 = v1_data[i]; + let e2 = v2_data[i]; + dot += e1 * e2; + norm1 += e1 * e1; + norm2 += e2 * e2; + } + + // Check for zero norms to avoid division by zero + if norm1 == 0.0 || norm2 == 0.0 { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + } + + Ok(1.0 - (dot / (norm1 * norm2).sqrt()) as f64) +} + +fn vector_f64_distance_cos(v1: &Vector, v2: &Vector) -> Result { + if v1.dims != v2.dims { + return Err(LimboError::ConversionError( + "Invalid vector dimensions".to_string(), + )); + } + if v1.vector_type != v2.vector_type { + return Err(LimboError::ConversionError( + "Invalid vector type".to_string(), + )); + } + let (mut dot, mut norm1, mut norm2) = (0.0, 0.0, 0.0); + let v1_data = v1.as_f64_slice(); + let v2_data = v2.as_f64_slice(); + + // Check for non-finite values + if v1_data.iter().any(|x| !x.is_finite()) || v2_data.iter().any(|x| !x.is_finite()) { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + } + + for i in 0..v1.dims { + let e1 = v1_data[i]; + let e2 = v2_data[i]; + dot += e1 * e2; + norm1 += e1 * e1; + norm2 += e2 * e2; + } + + // Check for zero norms + if norm1 == 0.0 || norm2 == 0.0 { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + } + + Ok(1.0 - (dot / (norm1 * norm2).sqrt())) +} diff --git a/core/vector/distance/euclidean.rs b/core/vector/operations/distance_l2.rs similarity index 53% rename from core/vector/distance/euclidean.rs rename to core/vector/operations/distance_l2.rs index dd46584e7..2eed64cd3 100644 --- a/core/vector/distance/euclidean.rs +++ b/core/vector/operations/distance_l2.rs @@ -1,24 +1,31 @@ -use super::{DistanceCalculator, DistanceType}; -use crate::vector::vector_types::{Vector, VectorType}; -use crate::Result; +use crate::{ + vector::vector_types::{Vector, VectorType}, + LimboError, Result, +}; -#[derive(Debug, Clone)] -pub struct Euclidean; - -impl DistanceCalculator for Euclidean { - fn distance_type() -> DistanceType { - DistanceType::Euclidean +pub fn vector_distance_l2(v1: &Vector, v2: &Vector) -> Result { + // Validate that both vectors have the same dimensions and type + if v1.dims != v2.dims { + return Err(LimboError::ConversionError( + "Vectors must have the same dimensions".to_string(), + )); } - - fn calculate(v1: &Vector, v2: &Vector) -> Result { - match v1.vector_type { - VectorType::Float32Dense => Ok(euclidean_distance_f32(v1.as_f32_slice(), v2.as_f32_slice())), - VectorType::Float64Dense => Ok(euclidean_distance_f64(v1.as_f64_slice(), v2.as_f64_slice())), + if v1.vector_type != v2.vector_type { + return Err(LimboError::ConversionError( + "Vectors must be of the same type".to_string(), + )); + } + match v1.vector_type { + VectorType::Float32Dense => { + Ok(vector_f32_distance_l2(v1.as_f32_slice(), v2.as_f32_slice())) + } + VectorType::Float64Dense => { + Ok(vector_f64_distance_l2(v1.as_f64_slice(), v2.as_f64_slice())) } } } -fn euclidean_distance_f32(v1: &[f32], v2: &[f32]) -> f64 { +fn vector_f32_distance_l2(v1: &[f32], v2: &[f32]) -> f64 { let sum = v1 .iter() .zip(v2.iter()) @@ -27,7 +34,7 @@ fn euclidean_distance_f32(v1: &[f32], v2: &[f32]) -> f64 { sum.sqrt() } -fn euclidean_distance_f64(v1: &[f64], v2: &[f64]) -> f64 { +fn vector_f64_distance_l2(v1: &[f64], v2: &[f64]) -> f64 { let sum = v1 .iter() .zip(v2.iter()) @@ -58,7 +65,7 @@ mod tests { ]; let results = vectors .iter() - .map(|v| euclidean_distance_f32(&query, v)) + .map(|v| vector_f32_distance_l2(&query, v)) .collect::>(); assert_eq!(results, expected); } @@ -67,6 +74,6 @@ mod tests { fn test_odd_len() { let v = (0..5).map(|x| x as f32).collect::>(); let query = (2..7).map(|x| x as f32).collect::>(); - assert_eq!(euclidean_distance_f32(&v, &query), 20.0_f64.sqrt()); + assert_eq!(vector_f32_distance_l2(&v, &query), 20.0_f64.sqrt()); } } diff --git a/core/vector/operations/mod.rs b/core/vector/operations/mod.rs new file mode 100644 index 000000000..f136eb9bb --- /dev/null +++ b/core/vector/operations/mod.rs @@ -0,0 +1,4 @@ +pub mod concat; +pub mod distance_cos; +pub mod distance_l2; +pub mod slice; diff --git a/core/vector/operations/slice.rs b/core/vector/operations/slice.rs new file mode 100644 index 000000000..054edce4e --- /dev/null +++ b/core/vector/operations/slice.rs @@ -0,0 +1,139 @@ +use crate::{ + vector::vector_types::{Vector, VectorType}, + LimboError, Result, +}; + +pub fn vector_slice(vector: &Vector, start_idx: usize, end_idx: usize) -> Result { + fn extract_bytes( + slice: &[T], + start: usize, + end: usize, + to_bytes: impl Fn(&T) -> [u8; N], + ) -> Result> { + if start > end { + return Err(LimboError::InvalidArgument( + "start index must not be greater than end index".into(), + )); + } + if end > slice.len() || end < start { + return Err(LimboError::ConversionError( + "vector_slice range out of bounds".into(), + )); + } + + let mut buf = Vec::with_capacity((end - start) * N); + for item in &slice[start..end] { + buf.extend_from_slice(&to_bytes(item)); + } + Ok(buf) + } + + let (vector_type, data) = match vector.vector_type { + VectorType::Float32Dense => ( + VectorType::Float32Dense, + extract_bytes::(vector.as_f32_slice(), start_idx, end_idx, |v| { + v.to_le_bytes() + })?, + ), + VectorType::Float64Dense => ( + VectorType::Float64Dense, + extract_bytes::(vector.as_f64_slice(), start_idx, end_idx, |v| { + v.to_le_bytes() + })?, + ), + }; + + Ok(Vector { + vector_type, + dims: end_idx - start_idx, + data, + }) +} + +#[cfg(test)] +mod tests { + use crate::vector::{ + operations::slice::vector_slice, + vector_types::{Vector, VectorType}, + }; + + fn float32_vec_from(slice: &[f32]) -> Vector { + let mut data = Vec::new(); + for &v in slice { + data.extend_from_slice(&v.to_le_bytes()); + } + + Vector { + vector_type: VectorType::Float32Dense, + dims: slice.len(), + data, + } + } + + fn f32_slice_from_vector(vector: &Vector) -> Vec { + vector.as_f32_slice().to_vec() + } + + #[test] + fn test_vector_slice_normal_case() { + let input_vec = float32_vec_from(&[1.0, 2.0, 3.0, 4.0, 5.0]); + let result = vector_slice(&input_vec, 1, 4).unwrap(); + + assert_eq!(result.dims, 3); + assert_eq!(f32_slice_from_vector(&result), vec![2.0, 3.0, 4.0]); + } + + #[test] + fn test_vector_slice_full_range() { + let input_vec = float32_vec_from(&[10.0, 20.0, 30.0]); + let result = vector_slice(&input_vec, 0, 3).unwrap(); + + assert_eq!(result.dims, 3); + assert_eq!(f32_slice_from_vector(&result), vec![10.0, 20.0, 30.0]); + } + + #[test] + fn test_vector_slice_single_element() { + let input_vec = float32_vec_from(&[4.40, 2.71]); + let result = vector_slice(&input_vec, 1, 2).unwrap(); + + assert_eq!(result.dims, 1); + assert_eq!(f32_slice_from_vector(&result), vec![2.71]); + } + + #[test] + fn test_vector_slice_empty_list() { + let input_vec = float32_vec_from(&[1.0, 2.0]); + let result = vector_slice(&input_vec, 2, 2).unwrap(); + + assert_eq!(result.dims, 0); + } + + #[test] + fn test_vector_slice_zero_length() { + let input_vec = float32_vec_from(&[1.0, 2.0, 3.0]); + let err = vector_slice(&input_vec, 2, 1); + assert!(err.is_err(), "Expected error on zero-length range"); + } + + #[test] + fn test_vector_slice_out_of_bounds() { + let input_vec = float32_vec_from(&[1.0, 2.0]); + let err = vector_slice(&input_vec, 0, 5); + assert!(err.is_err()); + } + + #[test] + fn test_vector_slice_start_out_of_bounds() { + let input_vec = float32_vec_from(&[1.0, 2.0]); + let err = vector_slice(&input_vec, 5, 5); + assert!(err.is_err()); + } + + #[test] + fn test_vector_slice_end_out_of_bounds() { + let input_vec = float32_vec_from(&[1.0, 2.0]); + let err = vector_slice(&input_vec, 1, 3); + assert!(err.is_err()); + } +} diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index 15e18723c..d396414e7 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -241,93 +241,6 @@ pub fn vector_deserialize_f32(blob: &[u8]) -> Result { }) } -pub fn do_vector_distance_cos(v1: &Vector, v2: &Vector) -> Result { - match v1.vector_type { - VectorType::Float32Dense => vector_f32_distance_cos(v1, v2), - VectorType::Float64Dense => vector_f64_distance_cos(v1, v2), - } -} - -pub fn vector_f32_distance_cos(v1: &Vector, v2: &Vector) -> Result { - if v1.dims != v2.dims { - return Err(LimboError::ConversionError( - "Invalid vector dimensions".to_string(), - )); - } - if v1.vector_type != v2.vector_type { - return Err(LimboError::ConversionError( - "Invalid vector type".to_string(), - )); - } - let (mut dot, mut norm1, mut norm2) = (0.0, 0.0, 0.0); - let v1_data = v1.as_f32_slice(); - let v2_data = v2.as_f32_slice(); - - // Check for non-finite values - if v1_data.iter().any(|x| !x.is_finite()) || v2_data.iter().any(|x| !x.is_finite()) { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - - for i in 0..v1.dims { - let e1 = v1_data[i]; - let e2 = v2_data[i]; - dot += e1 * e2; - norm1 += e1 * e1; - norm2 += e2 * e2; - } - - // Check for zero norms to avoid division by zero - if norm1 == 0.0 || norm2 == 0.0 { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - - Ok(1.0 - (dot / (norm1 * norm2).sqrt()) as f64) -} - -pub fn vector_f64_distance_cos(v1: &Vector, v2: &Vector) -> Result { - if v1.dims != v2.dims { - return Err(LimboError::ConversionError( - "Invalid vector dimensions".to_string(), - )); - } - if v1.vector_type != v2.vector_type { - return Err(LimboError::ConversionError( - "Invalid vector type".to_string(), - )); - } - let (mut dot, mut norm1, mut norm2) = (0.0, 0.0, 0.0); - let v1_data = v1.as_f64_slice(); - let v2_data = v2.as_f64_slice(); - - // Check for non-finite values - if v1_data.iter().any(|x| !x.is_finite()) || v2_data.iter().any(|x| !x.is_finite()) { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - - for i in 0..v1.dims { - let e1 = v1_data[i]; - let e2 = v2_data[i]; - dot += e1 * e2; - norm1 += e1 * e1; - norm2 += e2 * e2; - } - - // Check for zero norms - if norm1 == 0.0 || norm2 == 0.0 { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - - Ok(1.0 - (dot / (norm1 * norm2).sqrt())) -} - pub fn vector_type(blob: &[u8]) -> Result { // Even-sized blobs are always float32. if blob.len() % 2 == 0 { @@ -359,73 +272,10 @@ pub fn vector_type(blob: &[u8]) -> Result { } } -pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result { - if v1.vector_type != v2.vector_type { - return Err(LimboError::ConversionError( - "Mismatched vector types".into(), - )); - } - - let mut data = Vec::with_capacity(v1.data.len() + v2.data.len()); - data.extend_from_slice(&v1.data); - data.extend_from_slice(&v2.data); - - Ok(Vector { - vector_type: v1.vector_type, - dims: v1.dims + v2.dims, - data, - }) -} - -pub fn vector_slice(vector: &Vector, start_idx: usize, end_idx: usize) -> Result { - fn extract_bytes( - slice: &[T], - start: usize, - end: usize, - to_bytes: impl Fn(&T) -> [u8; N], - ) -> Result> { - if start > end { - return Err(LimboError::InvalidArgument( - "start index must not be greater than end index".into(), - )); - } - if end > slice.len() || end < start { - return Err(LimboError::ConversionError( - "vector_slice range out of bounds".into(), - )); - } - - let mut buf = Vec::with_capacity((end - start) * N); - for item in &slice[start..end] { - buf.extend_from_slice(&to_bytes(item)); - } - Ok(buf) - } - - let (vector_type, data) = match vector.vector_type { - VectorType::Float32Dense => ( - VectorType::Float32Dense, - extract_bytes::(vector.as_f32_slice(), start_idx, end_idx, |v| { - v.to_le_bytes() - })?, - ), - VectorType::Float64Dense => ( - VectorType::Float64Dense, - extract_bytes::(vector.as_f64_slice(), start_idx, end_idx, |v| { - v.to_le_bytes() - })?, - ), - }; - - Ok(Vector { - vector_type, - dims: end_idx - start_idx, - data, - }) -} - #[cfg(test)] mod tests { + use crate::vector::operations; + use super::*; use quickcheck::{Arbitrary, Gen}; use quickcheck_macros::quickcheck; @@ -659,7 +509,7 @@ mod tests { /// - Assumes vectors are well-formed (same type and dimension) /// - The distance must be between 0 and 2 fn test_vector_distance(v1: &Vector, v2: &Vector) -> bool { - match do_vector_distance_cos(v1, v2) { + match operations::distance_cos::vector_distance_cos(v1, v2) { Ok(distance) => (0.0..=2.0).contains(&distance), Err(_) => true, } @@ -689,143 +539,6 @@ mod tests { assert_eq!(vector.vector_type, VectorType::Float32Dense); } - fn float32_vec_from(slice: &[f32]) -> Vector { - let mut data = Vec::new(); - for &v in slice { - data.extend_from_slice(&v.to_le_bytes()); - } - - Vector { - vector_type: VectorType::Float32Dense, - dims: slice.len(), - data, - } - } - - fn f32_slice_from_vector(vector: &Vector) -> Vec { - vector.as_f32_slice().to_vec() - } - - #[test] - fn test_vector_concat_normal_case() { - let v1 = float32_vec_from(&[1.0, 2.0, 3.0]); - let v2 = float32_vec_from(&[4.0, 5.0, 6.0]); - - let result = vector_concat(&v1, &v2).unwrap(); - - assert_eq!(result.dims, 6); - assert_eq!(result.vector_type, VectorType::Float32Dense); - assert_eq!( - f32_slice_from_vector(&result), - vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0] - ); - } - - #[test] - fn test_vector_concat_empty_left() { - let v1 = float32_vec_from(&[]); - let v2 = float32_vec_from(&[4.0, 5.0]); - - let result = vector_concat(&v1, &v2).unwrap(); - - assert_eq!(result.dims, 2); - assert_eq!(f32_slice_from_vector(&result), vec![4.0, 5.0]); - } - - #[test] - fn test_vector_concat_empty_right() { - let v1 = float32_vec_from(&[1.0, 2.0]); - let v2 = float32_vec_from(&[]); - - let result = vector_concat(&v1, &v2).unwrap(); - - assert_eq!(result.dims, 2); - assert_eq!(f32_slice_from_vector(&result), vec![1.0, 2.0]); - } - - #[test] - fn test_vector_concat_both_empty() { - let v1 = float32_vec_from(&[]); - let v2 = float32_vec_from(&[]); - let result = vector_concat(&v1, &v2).unwrap(); - assert_eq!(result.dims, 0); - assert_eq!(f32_slice_from_vector(&result), Vec::::new()); - } - - #[test] - fn test_vector_concat_different_lengths() { - let v1 = float32_vec_from(&[1.0]); - let v2 = float32_vec_from(&[2.0, 3.0, 4.0]); - - let result = vector_concat(&v1, &v2).unwrap(); - - assert_eq!(result.dims, 4); - assert_eq!(f32_slice_from_vector(&result), vec![1.0, 2.0, 3.0, 4.0]); - } - - #[test] - fn test_vector_slice_normal_case() { - let input_vec = float32_vec_from(&[1.0, 2.0, 3.0, 4.0, 5.0]); - let result = vector_slice(&input_vec, 1, 4).unwrap(); - - assert_eq!(result.dims, 3); - assert_eq!(f32_slice_from_vector(&result), vec![2.0, 3.0, 4.0]); - } - - #[test] - fn test_vector_slice_full_range() { - let input_vec = float32_vec_from(&[10.0, 20.0, 30.0]); - let result = vector_slice(&input_vec, 0, 3).unwrap(); - - assert_eq!(result.dims, 3); - assert_eq!(f32_slice_from_vector(&result), vec![10.0, 20.0, 30.0]); - } - - #[test] - fn test_vector_slice_single_element() { - let input_vec = float32_vec_from(&[4.40, 2.71]); - let result = vector_slice(&input_vec, 1, 2).unwrap(); - - assert_eq!(result.dims, 1); - assert_eq!(f32_slice_from_vector(&result), vec![2.71]); - } - - #[test] - fn test_vector_slice_empty_list() { - let input_vec = float32_vec_from(&[1.0, 2.0]); - let result = vector_slice(&input_vec, 2, 2).unwrap(); - - assert_eq!(result.dims, 0); - } - - #[test] - fn test_vector_slice_zero_length() { - let input_vec = float32_vec_from(&[1.0, 2.0, 3.0]); - let err = vector_slice(&input_vec, 2, 1); - assert!(err.is_err(), "Expected error on zero-length range"); - } - - #[test] - fn test_vector_slice_out_of_bounds() { - let input_vec = float32_vec_from(&[1.0, 2.0]); - let err = vector_slice(&input_vec, 0, 5); - assert!(err.is_err()); - } - - #[test] - fn test_vector_slice_start_out_of_bounds() { - let input_vec = float32_vec_from(&[1.0, 2.0]); - let err = vector_slice(&input_vec, 5, 5); - assert!(err.is_err()); - } - - #[test] - fn test_vector_slice_end_out_of_bounds() { - let input_vec = float32_vec_from(&[1.0, 2.0]); - let err = vector_slice(&input_vec, 1, 3); - assert!(err.is_err()); - } - #[quickcheck] fn prop_vector_text_roundtrip_2d(v: ArbitraryVector<2>) -> bool { test_vector_text_roundtrip(v.into()) From a2f4376bd2af1e5d64fcf77494f7eafda4e90735 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 16:18:53 +0400 Subject: [PATCH 103/428] move more operations to the operations/ folder --- core/vector/mod.rs | 34 ++----- core/vector/operations/deserialize.rs | 0 core/vector/operations/mod.rs | 3 + core/vector/operations/serialize.rs | 22 ++++ core/vector/operations/text.rs | 96 ++++++++++++++++++ core/vector/vector_types.rs | 141 +++----------------------- 6 files changed, 144 insertions(+), 152 deletions(-) create mode 100644 core/vector/operations/deserialize.rs create mode 100644 core/vector/operations/serialize.rs create mode 100644 core/vector/operations/text.rs diff --git a/core/vector/mod.rs b/core/vector/mod.rs index 5a722c34c..dae03a7e1 100644 --- a/core/vector/mod.rs +++ b/core/vector/mod.rs @@ -13,15 +13,8 @@ pub fn vector32(args: &[Register]) -> Result { "vector32 requires exactly one argument".to_string(), )); } - let x = parse_vector(&args[0], Some(VectorType::Float32Dense))?; - // Extract the Vec from Value - if let Value::Blob(data) = vector_serialize_f32(x) { - Ok(Value::Blob(data)) - } else { - Err(LimboError::ConversionError( - "Failed to serialize vector".to_string(), - )) - } + let vector = parse_vector(&args[0], Some(VectorType::Float32Dense))?; + Ok(operations::serialize::vector_serialize(vector)) } pub fn vector64(args: &[Register]) -> Result { @@ -30,15 +23,8 @@ pub fn vector64(args: &[Register]) -> Result { "vector64 requires exactly one argument".to_string(), )); } - let x = parse_vector(&args[0], Some(VectorType::Float64Dense))?; - // Extract the Vec from Value - if let Value::Blob(data) = vector_serialize_f64(x) { - Ok(Value::Blob(data)) - } else { - Err(LimboError::ConversionError( - "Failed to serialize vector".to_string(), - )) - } + let vector = parse_vector(&args[0], Some(VectorType::Float64Dense))?; + Ok(operations::serialize::vector_serialize(vector)) } pub fn vector_extract(args: &[Register]) -> Result { @@ -63,7 +49,7 @@ pub fn vector_extract(args: &[Register]) -> Result { let vector_type = vector_type(blob)?; let vector = vector_deserialize(vector_type, blob)?; - Ok(Value::build_text(vector_to_text(&vector))) + Ok(Value::build_text(operations::text::vector_to_text(&vector))) } pub fn vector_distance_cos(args: &[Register]) -> Result { @@ -102,10 +88,7 @@ pub fn vector_concat(args: &[Register]) -> Result { let x = parse_vector(&args[0], None)?; let y = parse_vector(&args[1], None)?; let vector = operations::concat::vector_concat(&x, &y)?; - match vector.vector_type { - VectorType::Float32Dense => Ok(vector_serialize_f32(vector)), - VectorType::Float64Dense => Ok(vector_serialize_f64(vector)), - } + Ok(operations::serialize::vector_serialize(vector)) } pub fn vector_slice(args: &[Register]) -> Result { @@ -136,8 +119,5 @@ pub fn vector_slice(args: &[Register]) -> Result { let result = operations::slice::vector_slice(&vector, start_index as usize, end_index as usize)?; - Ok(match result.vector_type { - VectorType::Float32Dense => vector_serialize_f32(result), - VectorType::Float64Dense => vector_serialize_f64(result), - }) + Ok(operations::serialize::vector_serialize(result)) } diff --git a/core/vector/operations/deserialize.rs b/core/vector/operations/deserialize.rs new file mode 100644 index 000000000..e69de29bb diff --git a/core/vector/operations/mod.rs b/core/vector/operations/mod.rs index f136eb9bb..55b249aa8 100644 --- a/core/vector/operations/mod.rs +++ b/core/vector/operations/mod.rs @@ -1,4 +1,7 @@ pub mod concat; +pub mod deserialize; pub mod distance_cos; pub mod distance_l2; +pub mod serialize; pub mod slice; +pub mod text; diff --git a/core/vector/operations/serialize.rs b/core/vector/operations/serialize.rs new file mode 100644 index 000000000..fca0bae0b --- /dev/null +++ b/core/vector/operations/serialize.rs @@ -0,0 +1,22 @@ +use crate::{ + vector::vector_types::{Vector, VectorType}, + Value, +}; + +pub fn vector_serialize(x: Vector) -> Value { + match x.vector_type { + VectorType::Float32Dense => vector_f32_serialize(x), + VectorType::Float64Dense => vector_f64_serialize(x), + } +} + +fn vector_f64_serialize(x: Vector) -> Value { + let mut blob = Vec::with_capacity(x.dims * 8 + 1); + blob.extend_from_slice(&x.data); + blob.push(2); + Value::from_blob(blob) +} + +fn vector_f32_serialize(x: Vector) -> Value { + Value::from_blob(x.data) +} diff --git a/core/vector/operations/text.rs b/core/vector/operations/text.rs new file mode 100644 index 000000000..e522e89af --- /dev/null +++ b/core/vector/operations/text.rs @@ -0,0 +1,96 @@ +use crate::{ + vector::vector_types::{Vector, VectorType}, + LimboError, Result, +}; + +pub fn vector_to_text(vector: &Vector) -> String { + let mut text = String::new(); + text.push('['); + match vector.vector_type { + VectorType::Float32Dense => { + let data = vector.as_f32_slice(); + for (i, value) in data.iter().enumerate().take(vector.dims) { + text.push_str(&value.to_string()); + if i < vector.dims - 1 { + text.push(','); + } + } + } + VectorType::Float64Dense => { + let data = vector.as_f64_slice(); + for (i, value) in data.iter().enumerate().take(vector.dims) { + text.push_str(&value.to_string()); + if i < vector.dims - 1 { + text.push(','); + } + } + } + } + text.push(']'); + text +} + +/// Parse a vector in text representation into a Vector. +/// +/// The format of a vector in text representation looks as follows: +/// +/// ```console +/// [1.0, 2.0, 3.0] +/// ``` +pub fn vector_from_text(vector_type: VectorType, text: &str) -> Result { + let text = text.trim(); + let mut chars = text.chars(); + if chars.next() != Some('[') || chars.last() != Some(']') { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + } + let mut data: Vec = Vec::new(); + let text = &text[1..text.len() - 1]; + if text.trim().is_empty() { + return Ok(Vector { + vector_type, + dims: 0, + data, + }); + } + let xs = text.split(','); + for x in xs { + let x = x.trim(); + if x.is_empty() { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + } + match vector_type { + VectorType::Float32Dense => { + let x = x + .parse::() + .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; + if !x.is_finite() { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + } + data.extend_from_slice(&x.to_le_bytes()); + } + VectorType::Float64Dense => { + let x = x + .parse::() + .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; + if !x.is_finite() { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + } + data.extend_from_slice(&x.to_le_bytes()); + } + }; + } + let dims = vector_type.size_to_dims(data.len()); + Ok(Vector { + vector_type, + dims, + data, + }) +} diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index d396414e7..c13d7e1ba 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -1,5 +1,6 @@ -use crate::types::{Value, ValueType}; +use crate::types::ValueType; use crate::vdbe::Register; +use crate::vector::operations; use crate::{LimboError, Result}; #[derive(Debug, Clone, PartialEq, Copy)] @@ -82,81 +83,11 @@ impl Vector { } } -/// Parse a vector in text representation into a Vector. -/// -/// The format of a vector in text representation looks as follows: -/// -/// ```console -/// [1.0, 2.0, 3.0] -/// ``` -pub fn parse_string_vector(vector_type: VectorType, value: &Value) -> Result { - let Some(text) = value.to_text() else { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - }; - let text = text.trim(); - let mut chars = text.chars(); - if chars.next() != Some('[') || chars.last() != Some(']') { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - let mut data: Vec = Vec::new(); - let text = &text[1..text.len() - 1]; - if text.trim().is_empty() { - return Ok(Vector { - vector_type, - dims: 0, - data, - }); - } - let xs = text.split(','); - for x in xs { - let x = x.trim(); - if x.is_empty() { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - match vector_type { - VectorType::Float32Dense => { - let x = x - .parse::() - .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; - if !x.is_finite() { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - data.extend_from_slice(&x.to_le_bytes()); - } - VectorType::Float64Dense => { - let x = x - .parse::() - .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; - if !x.is_finite() { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - data.extend_from_slice(&x.to_le_bytes()); - } - }; - } - let dims = vector_type.size_to_dims(data.len()); - Ok(Vector { - vector_type, - dims, - data, - }) -} - pub fn parse_vector(value: &Register, vec_ty: Option) -> Result { match value.get_value().value_type() { - ValueType::Text => parse_string_vector( + ValueType::Text => operations::text::vector_from_text( vec_ty.unwrap_or(VectorType::Float32Dense), - value.get_value(), + value.get_value().to_text().expect("value must be text"), ), ValueType::Blob => { let Some(blob) = value.get_value().to_blob() else { @@ -180,33 +111,6 @@ pub fn parse_vector(value: &Register, vec_ty: Option) -> Result String { - let mut text = String::new(); - text.push('['); - match vector.vector_type { - VectorType::Float32Dense => { - let data = vector.as_f32_slice(); - for (i, value) in data.iter().enumerate().take(vector.dims) { - text.push_str(&value.to_string()); - if i < vector.dims - 1 { - text.push(','); - } - } - } - VectorType::Float64Dense => { - let data = vector.as_f64_slice(); - for (i, value) in data.iter().enumerate().take(vector.dims) { - text.push_str(&value.to_string()); - if i < vector.dims - 1 { - text.push(','); - } - } - } - } - text.push(']'); - text -} - pub fn vector_deserialize(vector_type: VectorType, blob: &[u8]) -> Result { match vector_type { VectorType::Float32Dense => vector_deserialize_f32(blob), @@ -214,13 +118,6 @@ pub fn vector_deserialize(vector_type: VectorType, blob: &[u8]) -> Result Value { - let mut blob = Vec::with_capacity(x.dims * 8 + 1); - blob.extend_from_slice(&x.data); - blob.push(2); - Value::from_blob(blob) -} - pub fn vector_deserialize_f64(blob: &[u8]) -> Result { Ok(Vector { vector_type: VectorType::Float64Dense, @@ -229,10 +126,6 @@ pub fn vector_deserialize_f64(blob: &[u8]) -> Result { }) } -pub fn vector_serialize_f32(x: Vector) -> Value { - Value::from_blob(x.data) -} - pub fn vector_deserialize_f32(blob: &[u8]) -> Result { Ok(Vector { vector_type: VectorType::Float32Dense, @@ -385,11 +278,7 @@ mod tests { /// Test if the vector type identification is correct for a given vector. fn test_vector_type(v: Vector) -> bool { let vtype = v.vector_type; - let value = match &vtype { - VectorType::Float32Dense => vector_serialize_f32(v), - VectorType::Float64Dense => vector_serialize_f64(v), - }; - + let value = operations::serialize::vector_serialize(v); let blob = value.to_blob().unwrap(); match vector_type(blob) { Ok(detected_type) => detected_type == vtype, @@ -517,24 +406,27 @@ mod tests { #[test] fn parse_string_vector_zero_length() { - let value = Value::from_text("[]"); - let vector = parse_string_vector(VectorType::Float32Dense, &value).unwrap(); + let vector = operations::text::vector_from_text(VectorType::Float32Dense, "[]").unwrap(); assert_eq!(vector.dims, 0); assert_eq!(vector.vector_type, VectorType::Float32Dense); } #[test] fn test_parse_string_vector_valid_whitespace() { - let value = Value::from_text(" [ 1.0 , 2.0 , 3.0 ] "); - let vector = parse_string_vector(VectorType::Float32Dense, &value).unwrap(); + let vector = operations::text::vector_from_text( + VectorType::Float32Dense, + " [ 1.0 , 2.0 , 3.0 ] ", + ) + .unwrap(); assert_eq!(vector.dims, 3); assert_eq!(vector.vector_type, VectorType::Float32Dense); } #[test] fn test_parse_string_vector_valid() { - let value = Value::from_text("[1.0, 2.0, 3.0]"); - let vector = parse_string_vector(VectorType::Float32Dense, &value).unwrap(); + let vector = + operations::text::vector_from_text(VectorType::Float32Dense, "[1.0, 2.0, 3.0]") + .unwrap(); assert_eq!(vector.dims, 3); assert_eq!(vector.vector_type, VectorType::Float32Dense); } @@ -567,11 +459,10 @@ mod tests { /// Test that a vector can be converted to text and back without loss of precision fn test_vector_text_roundtrip(v: Vector) -> bool { // Convert to text - let text = vector_to_text(&v); + let text = operations::text::vector_to_text(&v); // Parse back from text - let value = Value::from_text(&text); - let parsed = parse_string_vector(v.vector_type, &value); + let parsed = operations::text::vector_from_text(v.vector_type, &text); match parsed { Ok(parsed_vector) => { From bcca4045511dd3d3e1fd3db7c14613300ced4bf9 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 9 Oct 2025 15:34:27 +0300 Subject: [PATCH 104/428] Avoid string allocation in sorter record comparison --- core/vdbe/sorter.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index 0294787f1..a55cb656a 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -686,8 +686,9 @@ impl Ord for SortableImmutableRecord { let cmp = match (this_key_value, other_key_value) { (ValueRef::Text(left, _), ValueRef::Text(right, _)) => collation.compare_strings( - &String::from_utf8_lossy(left), - &String::from_utf8_lossy(right), + // SAFETY: these were checked to be valid UTF-8 on construction. + unsafe { std::str::from_utf8_unchecked(left) }, + unsafe { std::str::from_utf8_unchecked(right) }, ), _ => this_key_value.partial_cmp(&other_key_value).unwrap(), }; From 8584ee18a3216da4bd24b2701a4cadebce7dfb04 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 16:31:49 +0400 Subject: [PATCH 105/428] refactor parsing/deserialization --- core/vector/mod.rs | 24 ++++- core/vector/operations/text.rs | 7 +- core/vector/vector_types.rs | 182 ++++++++++----------------------- 3 files changed, 75 insertions(+), 138 deletions(-) diff --git a/core/vector/mod.rs b/core/vector/mod.rs index dae03a7e1..9fcba41d1 100644 --- a/core/vector/mod.rs +++ b/core/vector/mod.rs @@ -1,4 +1,5 @@ use crate::types::Value; +use crate::types::ValueType; use crate::vdbe::Register; use crate::LimboError; use crate::Result; @@ -7,6 +8,26 @@ pub mod operations; pub mod vector_types; use vector_types::*; +pub fn parse_vector(value: &Register, vec_ty: Option) -> Result { + match value.get_value().value_type() { + ValueType::Text => operations::text::vector_from_text( + vec_ty.unwrap_or(VectorType::Float32Dense), + value.get_value().to_text().expect("value must be text"), + ), + ValueType::Blob => { + let Some(blob) = value.get_value().to_blob() else { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + }; + Vector::from_blob(blob.to_vec()) + } + _ => Err(LimboError::ConversionError( + "Invalid vector type".to_string(), + )), + } +} + pub fn vector32(args: &[Register]) -> Result { if args.len() != 1 { return Err(LimboError::ConversionError( @@ -47,8 +68,7 @@ pub fn vector_extract(args: &[Register]) -> Result { return Ok(Value::build_text("[]")); } - let vector_type = vector_type(blob)?; - let vector = vector_deserialize(vector_type, blob)?; + let vector = Vector::from_blob(blob.to_vec())?; Ok(Value::build_text(operations::text::vector_to_text(&vector))) } diff --git a/core/vector/operations/text.rs b/core/vector/operations/text.rs index e522e89af..32eb0c0e8 100644 --- a/core/vector/operations/text.rs +++ b/core/vector/operations/text.rs @@ -87,10 +87,5 @@ pub fn vector_from_text(vector_type: VectorType, text: &str) -> Result { } }; } - let dims = vector_type.size_to_dims(data.len()); - Ok(Vector { - vector_type, - dims, - data, - }) + Vector::from_data(vector_type, data) } diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index c13d7e1ba..95380e805 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -1,6 +1,3 @@ -use crate::types::ValueType; -use crate::vdbe::Register; -use crate::vector::operations; use crate::{LimboError, Result}; #[derive(Debug, Clone, PartialEq, Copy)] @@ -9,15 +6,6 @@ pub enum VectorType { Float64Dense, } -impl VectorType { - pub fn size_to_dims(&self, size: usize) -> usize { - match self { - VectorType::Float32Dense => size / 4, - VectorType::Float64Dense => size / 8, - } - } -} - #[derive(Debug)] pub struct Vector { pub vector_type: VectorType, @@ -26,6 +14,55 @@ pub struct Vector { } impl Vector { + pub fn vector_type(mut blob: Vec) -> Result<(VectorType, Vec)> { + // Even-sized blobs are always float32. + if blob.len() % 2 == 0 { + return Ok((VectorType::Float32Dense, blob)); + } + // Odd-sized blobs have type byte at the end + let vector_type = blob.pop().unwrap(); + match vector_type { + 1 => Ok((VectorType::Float32Dense, blob)), + 2 => Ok((VectorType::Float64Dense, blob)), + _ => Err(LimboError::ConversionError( + "Invalid vector type".to_string(), + )), + } + } + pub fn from_blob(blob: Vec) -> Result { + let (vector_type, data) = Self::vector_type(blob)?; + Self::from_data(vector_type, data) + } + pub fn from_data(vector_type: VectorType, data: Vec) -> Result { + match vector_type { + VectorType::Float32Dense => { + if data.len() % 4 != 0 { + return Err(LimboError::InvalidArgument(format!( + "f32 dense vector unexpected data length: {}", + data.len(), + ))); + } + Ok(Vector { + vector_type, + dims: data.len() / 4, + data, + }) + } + VectorType::Float64Dense => { + if data.len() % 8 != 0 { + return Err(LimboError::InvalidArgument(format!( + "f64 dense vector unexpected data length: {}", + data.len(), + ))); + } + Ok(Vector { + vector_type, + dims: data.len() / 8, + data, + }) + } + } + } /// # Safety /// /// This method is used to reinterpret the underlying `Vec` data @@ -83,88 +120,6 @@ impl Vector { } } -pub fn parse_vector(value: &Register, vec_ty: Option) -> Result { - match value.get_value().value_type() { - ValueType::Text => operations::text::vector_from_text( - vec_ty.unwrap_or(VectorType::Float32Dense), - value.get_value().to_text().expect("value must be text"), - ), - ValueType::Blob => { - let Some(blob) = value.get_value().to_blob() else { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - }; - let vector_type = vector_type(blob)?; - if let Some(vec_ty) = vec_ty { - if vec_ty != vector_type { - return Err(LimboError::ConversionError( - "Invalid vector type".to_string(), - )); - } - } - vector_deserialize(vector_type, blob) - } - _ => Err(LimboError::ConversionError( - "Invalid vector type".to_string(), - )), - } -} - -pub fn vector_deserialize(vector_type: VectorType, blob: &[u8]) -> Result { - match vector_type { - VectorType::Float32Dense => vector_deserialize_f32(blob), - VectorType::Float64Dense => vector_deserialize_f64(blob), - } -} - -pub fn vector_deserialize_f64(blob: &[u8]) -> Result { - Ok(Vector { - vector_type: VectorType::Float64Dense, - dims: (blob.len() - 1) / 8, - data: blob[..blob.len() - 1].to_vec(), - }) -} - -pub fn vector_deserialize_f32(blob: &[u8]) -> Result { - Ok(Vector { - vector_type: VectorType::Float32Dense, - dims: blob.len() / 4, - data: blob.to_vec(), - }) -} - -pub fn vector_type(blob: &[u8]) -> Result { - // Even-sized blobs are always float32. - if blob.len() % 2 == 0 { - return Ok(VectorType::Float32Dense); - } - // Odd-sized blobs have type byte at the end - let (data_blob, type_byte) = blob.split_at(blob.len() - 1); - let vector_type = type_byte[0]; - match vector_type { - 1 => { - if data_blob.len() % 4 != 0 { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - Ok(VectorType::Float32Dense) - } - 2 => { - if data_blob.len() % 8 != 0 { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - Ok(VectorType::Float64Dense) - } - _ => Err(LimboError::ConversionError( - "Invalid vector type".to_string(), - )), - } -} - #[cfg(test)] mod tests { use crate::vector::operations; @@ -279,9 +234,9 @@ mod tests { fn test_vector_type(v: Vector) -> bool { let vtype = v.vector_type; let value = operations::serialize::vector_serialize(v); - let blob = value.to_blob().unwrap(); - match vector_type(blob) { - Ok(detected_type) => detected_type == vtype, + let blob = value.to_blob().unwrap().to_vec(); + match Vector::vector_type(blob) { + Ok((detected_type, _)) => detected_type == vtype, Err(_) => false, } } @@ -329,39 +284,6 @@ mod tests { } } - // Test size_to_dims calculation with different dimensions - #[quickcheck] - fn prop_size_to_dims_calculation_2d(v: ArbitraryVector<2>) -> bool { - test_size_to_dims::<2>(v.into()) - } - - #[quickcheck] - fn prop_size_to_dims_calculation_3d(v: ArbitraryVector<3>) -> bool { - test_size_to_dims::<3>(v.into()) - } - - #[quickcheck] - fn prop_size_to_dims_calculation_4d(v: ArbitraryVector<4>) -> bool { - test_size_to_dims::<4>(v.into()) - } - - #[quickcheck] - fn prop_size_to_dims_calculation_100d(v: ArbitraryVector<100>) -> bool { - test_size_to_dims::<100>(v.into()) - } - - #[quickcheck] - fn prop_size_to_dims_calculation_1536d(v: ArbitraryVector<1536>) -> bool { - test_size_to_dims::<1536>(v.into()) - } - - /// Test if the size_to_dims calculation is correct for a given vector. - fn test_size_to_dims(v: Vector) -> bool { - let size = v.data.len(); - let calculated_dims = v.vector_type.size_to_dims(size); - calculated_dims == DIMS - } - #[quickcheck] fn prop_vector_distance_safety_2d(v1: ArbitraryVector<2>, v2: ArbitraryVector<2>) -> bool { test_vector_distance::<2>(&v1.into(), &v2.into()) From 14e104f8308ae87f0ba5600709c930e032d01027 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 16:56:36 +0400 Subject: [PATCH 106/428] add convert operation --- core/vector/mod.rs | 2 ++ core/vector/operations/concat.rs | 16 ++++++++++++---- core/vector/operations/convert.rs | 25 +++++++++++++++++++++++++ core/vector/operations/deserialize.rs | 0 core/vector/operations/mod.rs | 2 +- 5 files changed, 40 insertions(+), 5 deletions(-) create mode 100644 core/vector/operations/convert.rs delete mode 100644 core/vector/operations/deserialize.rs diff --git a/core/vector/mod.rs b/core/vector/mod.rs index 9fcba41d1..5087cacfd 100644 --- a/core/vector/mod.rs +++ b/core/vector/mod.rs @@ -35,6 +35,7 @@ pub fn vector32(args: &[Register]) -> Result { )); } let vector = parse_vector(&args[0], Some(VectorType::Float32Dense))?; + let vector = operations::convert::vector_convert(vector, VectorType::Float32Dense)?; Ok(operations::serialize::vector_serialize(vector)) } @@ -45,6 +46,7 @@ pub fn vector64(args: &[Register]) -> Result { )); } let vector = parse_vector(&args[0], Some(VectorType::Float64Dense))?; + let vector = operations::convert::vector_convert(vector, VectorType::Float64Dense)?; Ok(operations::serialize::vector_serialize(vector)) } diff --git a/core/vector/operations/concat.rs b/core/vector/operations/concat.rs index 3b4ebf302..c8cfda2aa 100644 --- a/core/vector/operations/concat.rs +++ b/core/vector/operations/concat.rs @@ -1,4 +1,7 @@ -use crate::{vector::vector_types::Vector, LimboError, Result}; +use crate::{ + vector::vector_types::{Vector, VectorType}, + LimboError, Result, +}; pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result { if v1.vector_type != v2.vector_type { @@ -7,9 +10,14 @@ pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result { )); } - let mut data = Vec::with_capacity(v1.data.len() + v2.data.len()); - data.extend_from_slice(&v1.data); - data.extend_from_slice(&v2.data); + let data = match v1.vector_type { + VectorType::Float32Dense | VectorType::Float64Dense => { + let mut data = Vec::with_capacity(v1.data.len() + v2.data.len()); + data.extend_from_slice(&v1.data); + data.extend_from_slice(&v2.data); + data + } + }; Ok(Vector { vector_type: v1.vector_type, diff --git a/core/vector/operations/convert.rs b/core/vector/operations/convert.rs new file mode 100644 index 000000000..3703c69a4 --- /dev/null +++ b/core/vector/operations/convert.rs @@ -0,0 +1,25 @@ +use crate::{ + vector::vector_types::{Vector, VectorType}, + Result, +}; + +pub fn vector_convert(v: Vector, target_type: VectorType) -> Result { + match (v.vector_type, target_type) { + (VectorType::Float32Dense, VectorType::Float32Dense) + | (VectorType::Float64Dense, VectorType::Float64Dense) => Ok(v), + (VectorType::Float32Dense, VectorType::Float64Dense) => { + let mut data = Vec::with_capacity(v.dims * 8); + for &x in v.as_f32_slice() { + data.extend_from_slice(&f64::to_le_bytes(x as f64)); + } + Vector::from_data(target_type, data) + } + (VectorType::Float64Dense, VectorType::Float32Dense) => { + let mut data = Vec::with_capacity(v.dims * 4); + for &x in v.as_f32_slice() { + data.extend_from_slice(&f64::to_le_bytes(x as f64)); + } + Vector::from_data(target_type, data) + } + } +} diff --git a/core/vector/operations/deserialize.rs b/core/vector/operations/deserialize.rs deleted file mode 100644 index e69de29bb..000000000 diff --git a/core/vector/operations/mod.rs b/core/vector/operations/mod.rs index 55b249aa8..c0d10a0f0 100644 --- a/core/vector/operations/mod.rs +++ b/core/vector/operations/mod.rs @@ -1,5 +1,5 @@ pub mod concat; -pub mod deserialize; +pub mod convert; pub mod distance_cos; pub mod distance_l2; pub mod serialize; From d7f3a450ad7bca4f05d2274fbf7102d5ae68fcd8 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 17:06:49 +0400 Subject: [PATCH 107/428] return Nan for cosine distance instead of error - errors are hard to handle in case of some scan operations (something went wrong in the middle - whoe query aborted) - it will be more flexibly if we will return NaN and let user handle situation --- core/vector/operations/distance_cos.rs | 94 ++++++++++++-------------- core/vector/operations/distance_l2.rs | 1 - 2 files changed, 42 insertions(+), 53 deletions(-) diff --git a/core/vector/operations/distance_cos.rs b/core/vector/operations/distance_cos.rs index 7f53edfc6..1bee43308 100644 --- a/core/vector/operations/distance_cos.rs +++ b/core/vector/operations/distance_cos.rs @@ -4,37 +4,35 @@ use crate::{ }; pub fn vector_distance_cos(v1: &Vector, v2: &Vector) -> Result { - match v1.vector_type { - VectorType::Float32Dense => vector_f32_distance_cos(v1, v2), - VectorType::Float64Dense => vector_f64_distance_cos(v1, v2), - } -} - -fn vector_f32_distance_cos(v1: &Vector, v2: &Vector) -> Result { if v1.dims != v2.dims { return Err(LimboError::ConversionError( - "Invalid vector dimensions".to_string(), + "Vectors must have the same dimensions".to_string(), )); } if v1.vector_type != v2.vector_type { return Err(LimboError::ConversionError( - "Invalid vector type".to_string(), + "Vectors must be of the same type".to_string(), )); } + match v1.vector_type { + VectorType::Float32Dense => Ok(vector_f32_distance_cos( + v1.as_f32_slice(), + v2.as_f32_slice(), + )), + VectorType::Float64Dense => Ok(vector_f64_distance_cos( + v1.as_f64_slice(), + v2.as_f64_slice(), + )), + } +} + +fn vector_f32_distance_cos(v1: &[f32], v2: &[f32]) -> f64 { let (mut dot, mut norm1, mut norm2) = (0.0, 0.0, 0.0); - let v1_data = v1.as_f32_slice(); - let v2_data = v2.as_f32_slice(); - // Check for non-finite values - if v1_data.iter().any(|x| !x.is_finite()) || v2_data.iter().any(|x| !x.is_finite()) { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - - for i in 0..v1.dims { - let e1 = v1_data[i]; - let e2 = v2_data[i]; + let dims = v1.len(); + for i in 0..dims { + let e1 = v1[i]; + let e2 = v2[i]; dot += e1 * e2; norm1 += e1 * e1; norm2 += e2 * e2; @@ -42,39 +40,19 @@ fn vector_f32_distance_cos(v1: &Vector, v2: &Vector) -> Result { // Check for zero norms to avoid division by zero if norm1 == 0.0 || norm2 == 0.0 { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); + return f64::NAN; } - Ok(1.0 - (dot / (norm1 * norm2).sqrt()) as f64) + 1.0 - (dot / (norm1 * norm2).sqrt()) as f64 } -fn vector_f64_distance_cos(v1: &Vector, v2: &Vector) -> Result { - if v1.dims != v2.dims { - return Err(LimboError::ConversionError( - "Invalid vector dimensions".to_string(), - )); - } - if v1.vector_type != v2.vector_type { - return Err(LimboError::ConversionError( - "Invalid vector type".to_string(), - )); - } +fn vector_f64_distance_cos(v1: &[f64], v2: &[f64]) -> f64 { let (mut dot, mut norm1, mut norm2) = (0.0, 0.0, 0.0); - let v1_data = v1.as_f64_slice(); - let v2_data = v2.as_f64_slice(); - // Check for non-finite values - if v1_data.iter().any(|x| !x.is_finite()) || v2_data.iter().any(|x| !x.is_finite()) { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - - for i in 0..v1.dims { - let e1 = v1_data[i]; - let e2 = v2_data[i]; + let dims = v1.len(); + for i in 0..dims { + let e1 = v1[i]; + let e2 = v2[i]; dot += e1 * e2; norm1 += e1 * e1; norm2 += e2 * e2; @@ -82,10 +60,22 @@ fn vector_f64_distance_cos(v1: &Vector, v2: &Vector) -> Result { // Check for zero norms if norm1 == 0.0 || norm2 == 0.0 { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); + return f64::NAN; } - Ok(1.0 - (dot / (norm1 * norm2).sqrt())) + 1.0 - (dot / (norm1 * norm2).sqrt()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_distance_cos_f32() { + assert!(vector_f32_distance_cos(&[], &[]).is_nan()); + assert!(vector_f32_distance_cos(&[1.0, 2.0], &[0.0, 0.0]).is_nan()); + assert_eq!(vector_f32_distance_cos(&[1.0, 2.0], &[1.0, 2.0]), 0.0); + assert_eq!(vector_f32_distance_cos(&[1.0, 2.0], &[-1.0, -2.0]), 2.0); + assert_eq!(vector_f32_distance_cos(&[1.0, 2.0], &[-2.0, 1.0]), 1.0); + } } diff --git a/core/vector/operations/distance_l2.rs b/core/vector/operations/distance_l2.rs index 2eed64cd3..a0e8cde04 100644 --- a/core/vector/operations/distance_l2.rs +++ b/core/vector/operations/distance_l2.rs @@ -4,7 +4,6 @@ use crate::{ }; pub fn vector_distance_l2(v1: &Vector, v2: &Vector) -> Result { - // Validate that both vectors have the same dimensions and type if v1.dims != v2.dims { return Err(LimboError::ConversionError( "Vectors must have the same dimensions".to_string(), From 9e68fa7f4aa5fcdc07e4670e53e3487043f697ad Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 17:11:13 +0400 Subject: [PATCH 108/428] simplify vector_slice operation --- core/vector/operations/slice.rs | 62 ++++++++++----------------------- 1 file changed, 18 insertions(+), 44 deletions(-) diff --git a/core/vector/operations/slice.rs b/core/vector/operations/slice.rs index 054edce4e..698cf07e7 100644 --- a/core/vector/operations/slice.rs +++ b/core/vector/operations/slice.rs @@ -3,51 +3,25 @@ use crate::{ LimboError, Result, }; -pub fn vector_slice(vector: &Vector, start_idx: usize, end_idx: usize) -> Result { - fn extract_bytes( - slice: &[T], - start: usize, - end: usize, - to_bytes: impl Fn(&T) -> [u8; N], - ) -> Result> { - if start > end { - return Err(LimboError::InvalidArgument( - "start index must not be greater than end index".into(), - )); - } - if end > slice.len() || end < start { - return Err(LimboError::ConversionError( - "vector_slice range out of bounds".into(), - )); - } - - let mut buf = Vec::with_capacity((end - start) * N); - for item in &slice[start..end] { - buf.extend_from_slice(&to_bytes(item)); - } - Ok(buf) +pub fn vector_slice(vector: &Vector, start: usize, end: usize) -> Result { + if start > end { + return Err(LimboError::InvalidArgument( + "start index must not be greater than end index".into(), + )); + } + if end > vector.dims || end < start { + return Err(LimboError::ConversionError( + "vector_slice range out of bounds".into(), + )); + } + match vector.vector_type { + VectorType::Float32Dense => { + Vector::from_data(vector.vector_type, vector.data[start * 4..end * 4].to_vec()) + } + VectorType::Float64Dense => { + Vector::from_data(vector.vector_type, vector.data[start * 8..end * 8].to_vec()) + } } - - let (vector_type, data) = match vector.vector_type { - VectorType::Float32Dense => ( - VectorType::Float32Dense, - extract_bytes::(vector.as_f32_slice(), start_idx, end_idx, |v| { - v.to_le_bytes() - })?, - ), - VectorType::Float64Dense => ( - VectorType::Float64Dense, - extract_bytes::(vector.as_f64_slice(), start_idx, end_idx, |v| { - v.to_le_bytes() - })?, - ), - }; - - Ok(Vector { - vector_type, - dims: end_idx - start_idx, - data, - }) } #[cfg(test)] From 1ebf2b7c8d5492d5f2c90251993a05078f8c0a84 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 17:25:40 +0400 Subject: [PATCH 109/428] add f32 sparse vector type --- core/vector/operations/concat.rs | 1 + core/vector/operations/convert.rs | 1 + core/vector/operations/distance_cos.rs | 1 + core/vector/operations/distance_l2.rs | 1 + core/vector/operations/serialize.rs | 1 + core/vector/operations/slice.rs | 1 + core/vector/operations/text.rs | 2 + core/vector/vector_types.rs | 58 ++++++++++++++++++++++++-- 8 files changed, 63 insertions(+), 3 deletions(-) diff --git a/core/vector/operations/concat.rs b/core/vector/operations/concat.rs index c8cfda2aa..57d159b26 100644 --- a/core/vector/operations/concat.rs +++ b/core/vector/operations/concat.rs @@ -17,6 +17,7 @@ pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result { data.extend_from_slice(&v2.data); data } + _ => todo!(), }; Ok(Vector { diff --git a/core/vector/operations/convert.rs b/core/vector/operations/convert.rs index 3703c69a4..cc8855831 100644 --- a/core/vector/operations/convert.rs +++ b/core/vector/operations/convert.rs @@ -21,5 +21,6 @@ pub fn vector_convert(v: Vector, target_type: VectorType) -> Result { } Vector::from_data(target_type, data) } + _ => todo!(), } } diff --git a/core/vector/operations/distance_cos.rs b/core/vector/operations/distance_cos.rs index 1bee43308..b8ccff451 100644 --- a/core/vector/operations/distance_cos.rs +++ b/core/vector/operations/distance_cos.rs @@ -23,6 +23,7 @@ pub fn vector_distance_cos(v1: &Vector, v2: &Vector) -> Result { v1.as_f64_slice(), v2.as_f64_slice(), )), + _ => todo!(), } } diff --git a/core/vector/operations/distance_l2.rs b/core/vector/operations/distance_l2.rs index a0e8cde04..9fa79c6fa 100644 --- a/core/vector/operations/distance_l2.rs +++ b/core/vector/operations/distance_l2.rs @@ -21,6 +21,7 @@ pub fn vector_distance_l2(v1: &Vector, v2: &Vector) -> Result { VectorType::Float64Dense => { Ok(vector_f64_distance_l2(v1.as_f64_slice(), v2.as_f64_slice())) } + _ => todo!(), } } diff --git a/core/vector/operations/serialize.rs b/core/vector/operations/serialize.rs index fca0bae0b..8fdd518d4 100644 --- a/core/vector/operations/serialize.rs +++ b/core/vector/operations/serialize.rs @@ -7,6 +7,7 @@ pub fn vector_serialize(x: Vector) -> Value { match x.vector_type { VectorType::Float32Dense => vector_f32_serialize(x), VectorType::Float64Dense => vector_f64_serialize(x), + _ => todo!(), } } diff --git a/core/vector/operations/slice.rs b/core/vector/operations/slice.rs index 698cf07e7..a347e7f1c 100644 --- a/core/vector/operations/slice.rs +++ b/core/vector/operations/slice.rs @@ -21,6 +21,7 @@ pub fn vector_slice(vector: &Vector, start: usize, end: usize) -> Result VectorType::Float64Dense => { Vector::from_data(vector.vector_type, vector.data[start * 8..end * 8].to_vec()) } + _ => todo!(), } } diff --git a/core/vector/operations/text.rs b/core/vector/operations/text.rs index 32eb0c0e8..13f709499 100644 --- a/core/vector/operations/text.rs +++ b/core/vector/operations/text.rs @@ -25,6 +25,7 @@ pub fn vector_to_text(vector: &Vector) -> String { } } } + _ => todo!(), } text.push(']'); text @@ -85,6 +86,7 @@ pub fn vector_from_text(vector_type: VectorType, text: &str) -> Result { } data.extend_from_slice(&x.to_le_bytes()); } + _ => todo!(), }; } Vector::from_data(vector_type, data) diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index 95380e805..52465038f 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -4,6 +4,7 @@ use crate::{LimboError, Result}; pub enum VectorType { Float32Dense, Float64Dense, + Float32Sparse, } #[derive(Debug)] @@ -13,6 +14,11 @@ pub struct Vector { pub data: Vec, } +pub struct VectorSparse<'a, T> { + idx: &'a [u32], + values: &'a [T], +} + impl Vector { pub fn vector_type(mut blob: Vec) -> Result<(VectorType, Vec)> { // Even-sized blobs are always float32. @@ -21,12 +27,26 @@ impl Vector { } // Odd-sized blobs have type byte at the end let vector_type = blob.pop().unwrap(); + /* + vector types used by LibSQL: + (see https://github.com/tursodatabase/libsql/blob/a55bf61192bdb89e97568de593c4af5b70d24bde/libsql-sqlite3/src/vectorInt.h#L52) + #define VECTOR_TYPE_FLOAT32 1 + #define VECTOR_TYPE_FLOAT64 2 + #define VECTOR_TYPE_FLOAT1BIT 3 + #define VECTOR_TYPE_FLOAT8 4 + #define VECTOR_TYPE_FLOAT16 5 + #define VECTOR_TYPE_FLOATB16 6 + */ match vector_type { 1 => Ok((VectorType::Float32Dense, blob)), 2 => Ok((VectorType::Float64Dense, blob)), - _ => Err(LimboError::ConversionError( - "Invalid vector type".to_string(), + 3 | 4 | 5 | 6 => Err(LimboError::ConversionError( + "unsupported vector type from LibSQL".to_string(), )), + 9 => Ok((VectorType::Float32Sparse, blob)), + _ => Err(LimboError::ConversionError(format!( + "unknown vector type: {vector_type}" + ))), } } pub fn from_blob(blob: Vec) -> Result { @@ -61,6 +81,21 @@ impl Vector { data, }) } + VectorType::Float32Sparse => { + if data.len() == 0 || data.len() % 4 != 0 || (data.len() - 4) % 8 != 0 { + return Err(LimboError::InvalidArgument(format!( + "f32 sparse vector unexpected data length: {}", + data.len(), + ))); + } + let dims = u32::from_le_bytes(data[data.len() - 4..].try_into().unwrap()) as usize; + let vector = Vector { + vector_type, + dims, + data, + }; + Ok(vector) + } } } /// # Safety @@ -116,7 +151,21 @@ impl Vector { "data pointer must be aligned to {align} bytes for f64 access" ); - unsafe { std::slice::from_raw_parts(self.data.as_ptr() as *const f64, self.dims) } + unsafe { std::slice::from_raw_parts(ptr as *const f64, self.dims) } + } + + pub fn as_f32_sparse(&self) -> VectorSparse<'_, f32> { + let ptr = self.data.as_ptr(); + let align = std::mem::align_of::(); + assert_eq!( + ptr.align_offset(align), + 0, + "data pointer must be aligned to {align} bytes for f32 access" + ); + let length = (self.data.len() - 4) / 4 / 2; + let values = unsafe { std::slice::from_raw_parts(ptr as *const f32, length) }; + let idx = unsafe { std::slice::from_raw_parts((ptr as *const u32).add(length), length) }; + VectorSparse { idx, values } } } @@ -199,6 +248,7 @@ mod tests { let floats = Self::generate_f64_vector(g); floats.iter().flat_map(|f| f.to_le_bytes()).collect() } + _ => unreachable!(), }; ArbitraryVector { vector_type, data } @@ -281,6 +331,7 @@ mod tests { // Check if the slice length matches the dimensions and the data length is correct (8 bytes per float) slice.len() == DIMS && (slice.len() * 8 == v.data.len()) } + _ => unreachable!(), } } @@ -404,6 +455,7 @@ mod tests { let parsed = parsed_vector.as_f64_slice(); original.iter().zip(parsed.iter()).all(|(a, b)| a == b) } + _ => unreachable!(), } } Err(_) => false, From 68632cc1420774efcf2aed66dc3f12f4dd927c5d Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 17:26:36 +0400 Subject: [PATCH 110/428] rename euclidian to L2 for consistency --- core/function.rs | 7 +++---- core/translate/expr.rs | 2 +- core/vdbe/execute.rs | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/core/function.rs b/core/function.rs index b5a6503cc..e0e7b345c 100644 --- a/core/function.rs +++ b/core/function.rs @@ -156,7 +156,7 @@ pub enum VectorFunc { Vector64, VectorExtract, VectorDistanceCos, - VectorDistanceEuclidean, + VectorDistanceL2, VectorConcat, VectorSlice, } @@ -175,8 +175,7 @@ impl Display for VectorFunc { Self::Vector64 => "vector64".to_string(), Self::VectorExtract => "vector_extract".to_string(), Self::VectorDistanceCos => "vector_distance_cos".to_string(), - // We use `distance_l2` to reduce user input - Self::VectorDistanceEuclidean => "vector_distance_l2".to_string(), + Self::VectorDistanceL2 => "vector_distance_l2".to_string(), Self::VectorConcat => "vector_concat".to_string(), Self::VectorSlice => "vector_slice".to_string(), }; @@ -868,7 +867,7 @@ impl Func { "vector64" => Ok(Self::Vector(VectorFunc::Vector64)), "vector_extract" => Ok(Self::Vector(VectorFunc::VectorExtract)), "vector_distance_cos" => Ok(Self::Vector(VectorFunc::VectorDistanceCos)), - "vector_distance_l2" => Ok(Self::Vector(VectorFunc::VectorDistanceEuclidean)), + "vector_distance_l2" => Ok(Self::Vector(VectorFunc::VectorDistanceL2)), "vector_concat" => Ok(Self::Vector(VectorFunc::VectorConcat)), "vector_slice" => Ok(Self::Vector(VectorFunc::VectorSlice)), _ => crate::bail_parse_error!("no such function: {}", name), diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 940c7a01e..835f1ea08 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -904,7 +904,7 @@ pub fn translate_expr( emit_function_call(program, func_ctx, &[regs, regs + 1], target_register)?; Ok(target_register) } - VectorFunc::VectorDistanceEuclidean => { + VectorFunc::VectorDistanceL2 => { let args = expect_arguments_exact!(args, 2, vector_func); let regs = program.alloc_registers(2); translate_expr(program, referenced_tables, &args[0], regs, resolver)?; diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 1c63f54de..8f74268be 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -5210,7 +5210,7 @@ pub fn op_function( vector_distance_cos(&state.registers[*start_reg..*start_reg + arg_count])?; state.registers[*dest] = Register::Value(result); } - VectorFunc::VectorDistanceEuclidean => { + VectorFunc::VectorDistanceL2 => { let result = vector_distance_l2(&state.registers[*start_reg..*start_reg + arg_count])?; state.registers[*dest] = Register::Value(result); From f4116eb3d4c34c56c7d18cc8c03275a8acce9f82 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Wed, 8 Oct 2025 08:25:01 -0700 Subject: [PATCH 111/428] lie about sqlite version I found an application in the open that expects sqlite_version() to return a specific string (higher than 3.8...). We had tons of those issues at Scylla, and the lesson was that you tell your kids not to lie, but when life hits, well... you lie. We'll add a new function, turso_version, that tells the truth. --- core/function.rs | 5 +++++ core/translate/expr.rs | 26 +++----------------------- core/vdbe/execute.rs | 19 ++++++++++++++----- 3 files changed, 22 insertions(+), 28 deletions(-) diff --git a/core/function.rs b/core/function.rs index b5a6503cc..891e6c4ce 100644 --- a/core/function.rs +++ b/core/function.rs @@ -310,6 +310,7 @@ pub enum ScalarFunc { Unicode, Quote, SqliteVersion, + TursoVersion, SqliteSourceId, UnixEpoch, JulianDay, @@ -373,6 +374,7 @@ impl ScalarFunc { ScalarFunc::Unicode => true, ScalarFunc::Quote => true, ScalarFunc::SqliteVersion => true, + ScalarFunc::TursoVersion => true, ScalarFunc::SqliteSourceId => true, ScalarFunc::UnixEpoch => false, ScalarFunc::JulianDay => false, @@ -437,6 +439,7 @@ impl Display for ScalarFunc { Self::Unicode => "unicode".to_string(), Self::Quote => "quote".to_string(), Self::SqliteVersion => "sqlite_version".to_string(), + Self::TursoVersion => "turso_version".to_string(), Self::SqliteSourceId => "sqlite_source_id".to_string(), Self::JulianDay => "julianday".to_string(), Self::UnixEpoch => "unixepoch".to_string(), @@ -652,6 +655,7 @@ impl Func { | ScalarFunc::Random | ScalarFunc::TotalChanges | ScalarFunc::SqliteVersion + | ScalarFunc::TursoVersion | ScalarFunc::SqliteSourceId | ScalarFunc::LastInsertRowid ) @@ -770,6 +774,7 @@ impl Func { "unicode" => Ok(Self::Scalar(ScalarFunc::Unicode)), "quote" => Ok(Self::Scalar(ScalarFunc::Quote)), "sqlite_version" => Ok(Self::Scalar(ScalarFunc::SqliteVersion)), + "turso_version" => Ok(Self::Scalar(ScalarFunc::TursoVersion)), "sqlite_source_id" => Ok(Self::Scalar(ScalarFunc::SqliteSourceId)), "replace" => Ok(Self::Scalar(ScalarFunc::Replace)), "likely" => Ok(Self::Scalar(ScalarFunc::Likely)), diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 940c7a01e..23ca0c75e 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -1499,7 +1499,9 @@ pub fn translate_expr( Ok(target_register) } - ScalarFunc::SqliteVersion => { + ScalarFunc::SqliteVersion + | ScalarFunc::TursoVersion + | ScalarFunc::SqliteSourceId => { if !args.is_empty() { crate::bail_parse_error!("sqlite_version function with arguments"); } @@ -1519,28 +1521,6 @@ pub fn translate_expr( }); Ok(target_register) } - ScalarFunc::SqliteSourceId => { - if !args.is_empty() { - crate::bail_parse_error!( - "sqlite_source_id function with arguments" - ); - } - - let output_register = program.alloc_register(); - program.emit_insn(Insn::Function { - constant_mask: 0, - start_reg: output_register, - dest: output_register, - func: func_ctx, - }); - - program.emit_insn(Insn::Copy { - src_reg: output_register, - dst_reg: target_register, - extra_amount: 0, - }); - Ok(target_register) - } ScalarFunc::Replace => { if !args.len() == 3 { crate::bail_parse_error!( diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 9cb2e6b7e..7cd64c884 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -4971,7 +4971,7 @@ pub fn op_function( } } } - ScalarFunc::SqliteVersion => { + ScalarFunc::TursoVersion => { if !program.connection.is_db_initialized() { state.registers[*dest] = Register::Value(Value::build_text(info::build::PKG_VERSION)); @@ -4979,10 +4979,14 @@ pub fn op_function( let version_integer = return_if_io!(pager.with_header(|header| header.version_number)).get() as i64; - let version = execute_sqlite_version(version_integer); + let version = execute_turso_version(version_integer); state.registers[*dest] = Register::Value(Value::build_text(version)); } } + ScalarFunc::SqliteVersion => { + let version = execute_sqlite_version(); + state.registers[*dest] = Register::Value(Value::build_text(version)); + } ScalarFunc::SqliteSourceId => { let src_id = format!( "{} {}", @@ -9508,7 +9512,12 @@ fn try_float_to_integer_affinity(value: &mut Value, fl: f64) -> bool { false } -fn execute_sqlite_version(version_integer: i64) -> String { +// Compat for applications that test for SQLite. +fn execute_sqlite_version() -> String { + "3.50.4".to_string() +} + +fn execute_turso_version(version_integer: i64) -> String { let major = version_integer / 1_000_000; let minor = (version_integer % 1_000_000) / 1_000; let release = version_integer % 1_000; @@ -10392,7 +10401,7 @@ mod tests { use crate::vdbe::{Bitfield, Register}; - use super::{exec_char, execute_sqlite_version}; + use super::{exec_char, execute_turso_version}; use std::collections::HashMap; #[test] @@ -11180,7 +11189,7 @@ mod tests { fn test_execute_sqlite_version() { let version_integer = 3046001; let expected = "3.46.1"; - assert_eq!(execute_sqlite_version(version_integer), expected); + assert_eq!(execute_turso_version(version_integer), expected); } #[test] From 1f310a473803031dfdbb29320e06c28efde0d258 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 9 Oct 2025 17:29:18 +0300 Subject: [PATCH 112/428] Remove expensive hot path assert --- core/vdbe/sorter.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index a55cb656a..366d59e1e 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -296,14 +296,6 @@ impl Sorter { fn next_from_chunk_heap(&mut self) -> Result>> { // Make sure all chunks read at least one record into their buffer. - turso_assert!( - !self.chunks.iter().any(|chunk| matches!( - *chunk.io_state.read().unwrap(), - SortedChunkIOState::WaitingForRead - )), - "chunks should have been read" - ); - if let Some((next_record, next_chunk_idx)) = self.chunk_heap.pop() { // TODO: blocking will be unnecessary here with IO completions let io = self.io.clone(); From 1c35d5b34287bf17d6b4d044ceca7a4905bc745b Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 9 Oct 2025 17:43:28 +0300 Subject: [PATCH 113/428] avoid expensive Arc cloning --- core/vdbe/sorter.rs | 65 ++++++++++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 21 deletions(-) diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index 366d59e1e..47f480890 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -7,7 +7,6 @@ use std::sync::{atomic, Arc, RwLock}; use tempfile; use crate::types::IOCompletions; -use crate::util::IOExt; use crate::{ error::LimboError, io::{Buffer, Completion, File, OpenFlags, IO}, @@ -88,6 +87,7 @@ pub struct Sorter { /// State machine for [Sorter::init_chunk_heap] init_chunk_heap_state: InitChunkHeapState, seq_count: i64, + pending_completions: Vec, } impl Sorter { @@ -126,6 +126,7 @@ impl Sorter { insert_state: InsertState::Start, init_chunk_heap_state: InitChunkHeapState::Start, seq_count: 0, + pending_completions: Vec::new(), } } @@ -284,43 +285,58 @@ impl Sorter { ); self.chunk_heap.reserve(self.chunks.len()); // TODO: blocking will be unnecessary here with IO completions - let io = self.io.clone(); + let mut completions = vec![]; for chunk_idx in 0..self.chunks.len() { - io.block(|| self.push_to_chunk_heap(chunk_idx))?; + match self.push_to_chunk_heap(chunk_idx)? { + Some(c) => completions.push(c), + None => (), + }; } self.init_chunk_heap_state = InitChunkHeapState::Start; + if !completions.is_empty() { + io_yield_many!(completions); + } Ok(IOResult::Done(())) } } } fn next_from_chunk_heap(&mut self) -> Result>> { + if !self.pending_completions.is_empty() { + return Ok(IOResult::IO(IOCompletions::Many( + self.pending_completions.drain(..).collect(), + ))); + } // Make sure all chunks read at least one record into their buffer. if let Some((next_record, next_chunk_idx)) = self.chunk_heap.pop() { // TODO: blocking will be unnecessary here with IO completions - let io = self.io.clone(); - io.block(|| self.push_to_chunk_heap(next_chunk_idx))?; + if let Some(c) = self.push_to_chunk_heap(next_chunk_idx)? { + self.pending_completions.push(c); + } Ok(IOResult::Done(Some(next_record.0))) } else { Ok(IOResult::Done(None)) } } - fn push_to_chunk_heap(&mut self, chunk_idx: usize) -> Result> { + fn push_to_chunk_heap(&mut self, chunk_idx: usize) -> Result> { let chunk = &mut self.chunks[chunk_idx]; - if let Some(record) = return_if_io!(chunk.next()) { - self.chunk_heap.push(( - Reverse(SortableImmutableRecord::new( - record, - self.key_len, - self.index_key_info.clone(), - )?), - chunk_idx, - )); + match chunk.next()? { + ChunkNextResult::Done(Some(record)) => { + self.chunk_heap.push(( + Reverse(SortableImmutableRecord::new( + record, + self.key_len, + self.index_key_info.clone(), + )?), + chunk_idx, + )); + Ok(None) + } + ChunkNextResult::Done(None) => Ok(None), + ChunkNextResult::IO(io) => Ok(Some(io)), } - - Ok(IOResult::Done(())) } fn flush(&mut self) -> Result> { @@ -404,6 +420,11 @@ struct SortedChunk { next_state: NextState, } +enum ChunkNextResult { + Done(Option), + IO(Completion), +} + impl SortedChunk { fn new(file: Arc, start_offset: usize, buffer_size: usize) -> Self { Self { @@ -427,13 +448,13 @@ impl SortedChunk { self.buffer_len.store(len, atomic::Ordering::SeqCst); } - fn next(&mut self) -> Result>> { + fn next(&mut self) -> Result { loop { match self.next_state { NextState::Start => { let mut buffer_len = self.buffer_len(); if self.records.is_empty() && buffer_len == 0 { - return Ok(IOResult::Done(None)); + return Ok(ChunkNextResult::Done(None)); } if self.records.is_empty() { @@ -497,13 +518,15 @@ impl SortedChunk { *self.io_state.write().unwrap() = SortedChunkIOState::ReadEOF; } else { let c = self.read()?; - io_yield_one!(c); + if !c.succeeded() { + return Ok(ChunkNextResult::IO(c)); + } } } } NextState::Finish => { self.next_state = NextState::Start; - return Ok(IOResult::Done(self.records.pop())); + return Ok(ChunkNextResult::Done(self.records.pop())); } } } From a1a83c689b75caf0c06840bb218a410c58e83d58 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 9 Oct 2025 17:50:06 +0300 Subject: [PATCH 114/428] Don't yield if completion already succeeded --- core/vdbe/sorter.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index 47f480890..934f56caa 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -227,7 +227,9 @@ impl Sorter { self.insert_state = InsertState::Insert; if self.current_buffer_size + payload_size > self.max_buffer_size { if let Some(c) = self.flush()? { - io_yield_one!(c); + if !c.succeeded() { + io_yield_one!(c); + } } } } From 0356a7102cab22373eeb9edb7ac5ee65066d370a Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 9 Oct 2025 17:50:15 +0300 Subject: [PATCH 115/428] remove another expensive assert --- core/vdbe/sorter.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index 934f56caa..9af3110e0 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -234,15 +234,6 @@ impl Sorter { } } InsertState::Insert => { - turso_assert!( - !self.chunks.iter().any(|chunk| { - matches!( - *chunk.io_state.read().unwrap(), - SortedChunkIOState::WaitingForWrite - ) - }), - "chunks should have written" - ); self.records.push(SortableImmutableRecord::new( record.clone(), self.key_len, From 70fc509046fdd3b19b3e9c3953fe5984f3230f38 Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Mon, 6 Oct 2025 02:14:15 -0300 Subject: [PATCH 116/428] First step to fix 3277 This follows almost step by step sqlite's functions, and indeed it's correct. But still have to translate some of this logic to our current semantics --- core/translate/expr.rs | 217 ++++++++++++++++++++--------------------- 1 file changed, 106 insertions(+), 111 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index d9e1c281d..359bb8cf7 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -149,6 +149,33 @@ macro_rules! expect_arguments_even { }}; } +fn expr_vec_size(expr: &ast::Expr) -> usize { + match expr { + Expr::Parenthesized(v) => v.len(), + _ => 1, + } +} + +fn expr_code_vector(program: &mut ProgramBuilder, expr: &ast::Expr) -> usize { + let size = expr_vec_size(expr); + program.alloc_registers(size) +} + +fn expr_can_be_null(expr: &ast::Expr) -> bool { + // todo: better handling columns. Check sqlite3ExprCanBeNull + match expr { + Expr::Literal(literal) => match literal { + ast::Literal::Numeric(_) => false, + ast::Literal::String(_) => false, + ast::Literal::Blob(_) => false, + _ => true, + }, + _ => true, + } +} + +// todo(rn): Check right affinities, fix other call to translate + /// Core implementation of IN expression logic that can be used in both conditional and expression contexts. /// This follows SQLite's approach where a single core function handles all InList cases. /// @@ -160,121 +187,92 @@ fn translate_in_list( referenced_tables: Option<&TableReferences>, lhs: &ast::Expr, rhs: &[Box], - not: bool, - condition_metadata: ConditionMetadata, + dest_if_false: BranchOffset, + dest_if_null: BranchOffset, resolver: &Resolver, ) -> Result<()> { - // lhs is e.g. a column reference - // rhs is an Option> - // If rhs is None, it means the IN expression is always false, i.e. tbl.id IN (). - // If rhs is Some, it means the IN expression has a list of values to compare against, e.g. tbl.id IN (1, 2, 3). - // - // The IN expression is equivalent to a series of OR expressions. - // For example, `a IN (1, 2, 3)` is equivalent to `a = 1 OR a = 2 OR a = 3`. - // The NOT IN expression is equivalent to a series of AND expressions. - // For example, `a NOT IN (1, 2, 3)` is equivalent to `a != 1 AND a != 2 AND a != 3`. - // - // SQLite typically optimizes IN expressions to use a binary search on an ephemeral index if there are many values. - // For now we don't have the plumbing to do that, so we'll just emit a series of comparisons, - // which is what SQLite also does for small lists of values. - // TODO: Let's refactor this later to use a more efficient implementation conditionally based on the number of values. - + // Disclamer: SQLite does this opt during parsing (https://github.com/sqlite/sqlite/blob/833fb1ef59b1c62fb2b00c7a121a5b5171f8a85e/src/parse.y#L1425) + // But we're the cool kids so we gotta do during translation :) if rhs.is_empty() { - // If rhs is None, IN expressions are always false and NOT IN expressions are always true. - if not { - // On a trivially true NOT IN () expression we can only jump to the 'jump_target_when_true' label if 'jump_if_condition_is_true'; otherwise me must fall through. - // This is because in a more complex condition we might need to evaluate the rest of the condition. - // Note that we are already breaking up our WHERE clauses into a series of terms at "AND" boundaries, so right now we won't be running into cases where jumping on true would be incorrect, - // but once we have e.g. parenthesization and more complex conditions, not having this 'if' here would introduce a bug. - if condition_metadata.jump_if_condition_is_true { - program.emit_insn(Insn::Goto { - target_pc: condition_metadata.jump_target_when_true, - }); - } - } else { - program.emit_insn(Insn::Goto { - target_pc: condition_metadata.jump_target_when_false, - }); - } + program.emit_insn(Insn::Goto { + target_pc: dest_if_false, + }); return Ok(()); } - // The left hand side only needs to be evaluated once we have a list of values to compare against. - let lhs_reg = program.alloc_register(); + let lhs_reg = expr_code_vector(program, lhs); let _ = translate_expr(program, referenced_tables, lhs, lhs_reg, resolver)?; + let mut check_null_reg = 0; + let label_ok = program.allocate_label(); - // The difference between a local jump and an "upper level" jump is that for example in this case: - // WHERE foo IN (1,2,3) OR bar = 5, - // we can immediately jump to the 'jump_target_when_true' label of the ENTIRE CONDITION if foo = 1, foo = 2, or foo = 3 without evaluating the bar = 5 condition. - // This is why in Binary-OR expressions we set jump_if_condition_is_true to true for the first condition. - // However, in this example: - // WHERE foo IN (1,2,3) AND bar = 5, - // we can't jump to the 'jump_target_when_true' label of the entire condition foo = 1, foo = 2, or foo = 3, because we still need to evaluate the bar = 5 condition later. - // This is why in that case we just jump over the rest of the IN conditions in this "local" branch which evaluates the IN condition. - let jump_target_when_true = if condition_metadata.jump_if_condition_is_true { - condition_metadata.jump_target_when_true - } else { - program.allocate_label() - }; + if dest_if_false != dest_if_null { + check_null_reg = program.alloc_register(); + program.emit_insn(Insn::BitAnd { + lhs: lhs_reg, + rhs: lhs_reg, + dest: check_null_reg, + }); + } - if !not { - // If it's an IN expression, we need to jump to the 'jump_target_when_true' label if any of the conditions are true. - for (i, expr) in rhs.iter().enumerate() { - let rhs_reg = program.alloc_register(); - let last_condition = i == rhs.len() - 1; - let _ = translate_expr(program, referenced_tables, expr, rhs_reg, resolver)?; - // If this is not the last condition, we need to jump to the 'jump_target_when_true' label if the condition is true. - if !last_condition { + for (i, expr) in rhs.iter().enumerate() { + let last_condition = i == rhs.len() - 1; + let rhs_reg = program.alloc_register(); + let _ = translate_expr(program, referenced_tables, expr, rhs_reg, resolver)?; + + if check_null_reg != 0 && expr_can_be_null(expr) { + program.emit_insn(Insn::BitAnd { + lhs: check_null_reg, + rhs: rhs_reg, + dest: check_null_reg, + }); + } + + if !last_condition || dest_if_false != dest_if_null { + if lhs_reg != rhs_reg { program.emit_insn(Insn::Eq { lhs: lhs_reg, rhs: rhs_reg, - target_pc: jump_target_when_true, + target_pc: label_ok, + // Use affinity instead flags: CmpInsFlags::default(), collation: program.curr_collation(), }); } else { - // If this is the last condition, we need to jump to the 'jump_target_when_false' label if there is no match. + program.emit_insn(Insn::NotNull { + reg: lhs_reg, + target_pc: label_ok, + }); + } + // sqlite3VdbeChangeP5(v, zAff[0]); + } else { + if lhs_reg != rhs_reg { program.emit_insn(Insn::Ne { lhs: lhs_reg, rhs: rhs_reg, - target_pc: condition_metadata.jump_target_when_false, - flags: CmpInsFlags::default().jump_if_null(), + target_pc: dest_if_false, + flags: CmpInsFlags::default(), collation: program.curr_collation(), }); + } else { + program.emit_insn(Insn::IsNull { + reg: lhs_reg, + target_pc: dest_if_false, + }); } } - // If we got here, then the last condition was a match, so we jump to the 'jump_target_when_true' label if 'jump_if_condition_is_true'. - // If not, we can just fall through without emitting an unnecessary instruction. - if condition_metadata.jump_if_condition_is_true { - program.emit_insn(Insn::Goto { - target_pc: condition_metadata.jump_target_when_true, - }); - } - } else { - // If it's a NOT IN expression, we need to jump to the 'jump_target_when_false' label if any of the conditions are true. - for expr in rhs.iter() { - let rhs_reg = program.alloc_register(); - let _ = translate_expr(program, referenced_tables, expr, rhs_reg, resolver)?; - program.emit_insn(Insn::Eq { - lhs: lhs_reg, - rhs: rhs_reg, - target_pc: condition_metadata.jump_target_when_false, - flags: CmpInsFlags::default().jump_if_null(), - collation: program.curr_collation(), - }); - } - // If we got here, then none of the conditions were a match, so we jump to the 'jump_target_when_true' label if 'jump_if_condition_is_true'. - // If not, we can just fall through without emitting an unnecessary instruction. - if condition_metadata.jump_if_condition_is_true { - program.emit_insn(Insn::Goto { - target_pc: condition_metadata.jump_target_when_true, - }); - } } - if !condition_metadata.jump_if_condition_is_true { - program.preassign_label_to_next_insn(jump_target_when_true); + if check_null_reg != 0 { + program.emit_insn(Insn::IsNull { + reg: check_null_reg, + target_pc: dest_if_null, + }); + program.emit_insn(Insn::Goto { + target_pc: dest_if_false, + }); } + program.resolve_label(label_ok, program.offset()); + // todo: deallocate check_null_reg Ok(()) } @@ -403,13 +401,20 @@ pub fn translate_condition_expr( emit_cond_jump(program, condition_metadata, reg); } ast::Expr::InList { lhs, not, rhs } => { + let ConditionMetadata { + jump_target_when_true, + jump_target_when_false, + .. + } = condition_metadata; + // fix me translate_in_list( program, Some(referenced_tables), lhs, rhs, - *not, - condition_metadata, + jump_target_when_false, + // should be null!!! + jump_target_when_true, resolver, )?; } @@ -2029,14 +2034,13 @@ pub fn translate_expr( // but wrap it with appropriate expression context handling let result_reg = target_register; - // Set result to NULL initially (matches SQLite behavior) program.emit_insn(Insn::Null { dest: result_reg, dest_end: None, }); let dest_if_false = program.allocate_label(); - let label_integer_conversion = program.allocate_label(); + let dest_if_null = program.allocate_label(); // Call the core InList logic with expression-appropriate condition metadata translate_in_list( @@ -2044,12 +2048,8 @@ pub fn translate_expr( referenced_tables, lhs, rhs, - *not, - ConditionMetadata { - jump_if_condition_is_true: false, - jump_target_when_true: label_integer_conversion, // will be resolved below - jump_target_when_false: dest_if_false, - }, + dest_if_false, + dest_if_null, resolver, )?; @@ -2058,25 +2058,20 @@ pub fn translate_expr( value: 1, dest: result_reg, }); - program.emit_insn(Insn::Goto { - target_pc: label_integer_conversion, - }); - // False path: set result to 0 program.resolve_label(dest_if_false, program.offset()); - program.emit_insn(Insn::Integer { - value: 0, - dest: result_reg, - }); - - program.resolve_label(label_integer_conversion, program.offset()); - // Force integer conversion with AddImm 0 program.emit_insn(Insn::AddImm { register: result_reg, value: 0, }); - + if *not { + program.emit_insn(Insn::Not { + reg: result_reg, + dest: result_reg, + }); + } + program.resolve_label(dest_if_null, program.offset()); Ok(result_reg) } ast::Expr::InSelect { .. } => { From 52ed0f7997b56c8ae40e5938221ddd9db473faa3 Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Mon, 6 Oct 2025 21:34:54 -0300 Subject: [PATCH 117/428] Add in expr optimization at the parser level instead of translation. lhs IN () and lhs NOT IN () can be translated to false and true. --- core/translate/expr.rs | 9 --------- parser/src/parser.rs | 22 +++++++++++++++++----- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 359bb8cf7..ce797c75a 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -191,15 +191,6 @@ fn translate_in_list( dest_if_null: BranchOffset, resolver: &Resolver, ) -> Result<()> { - // Disclamer: SQLite does this opt during parsing (https://github.com/sqlite/sqlite/blob/833fb1ef59b1c62fb2b00c7a121a5b5171f8a85e/src/parse.y#L1425) - // But we're the cool kids so we gotta do during translation :) - if rhs.is_empty() { - program.emit_insn(Insn::Goto { - target_pc: dest_if_false, - }); - return Ok(()); - } - let lhs_reg = expr_code_vector(program, lhs); let _ = translate_expr(program, referenced_tables, lhs, lhs_reg, resolver)?; let mut check_null_reg = 0; diff --git a/parser/src/parser.rs b/parser/src/parser.rs index b4188df44..b134f9f95 100644 --- a/parser/src/parser.rs +++ b/parser/src/parser.rs @@ -1723,11 +1723,23 @@ impl<'a> Parser<'a> { _ => { let exprs = self.parse_expr_list()?; eat_expect!(self, TK_RP); - Box::new(Expr::InList { - lhs: result, - not, - rhs: exprs, - }) + // Expressions in the form: + // lhs IN () + // lhs NOT IN () + // can be simplified to constants 0 (false) and 1 (true), respectively. + // + // todo: should check if lhs has a function. If so, this optimization cannot + // be done. + if exprs.is_empty() { + let name = if not { "1" } else { "0" }; + Box::new(Expr::Literal(Literal::Numeric(name.into()))) + } else { + Box::new(Expr::InList { + lhs: result, + not, + rhs: exprs, + }) + } } } } From 79958f468d4b25a5a773b7afdd918787852bc524 Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Mon, 6 Oct 2025 21:49:34 -0300 Subject: [PATCH 118/428] Add jump_target_null to ConditionMetadata It's kinda make sense, conditions can be evaluated into 3 values: false, true and null. Now we handle that. --- core/translate/expr.rs | 49 +++++++++++++++++++++---------------- core/translate/group_by.rs | 2 ++ core/translate/index.rs | 2 ++ core/translate/main_loop.rs | 6 ++++- 4 files changed, 37 insertions(+), 22 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index ce797c75a..57da3c669 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -30,6 +30,7 @@ pub struct ConditionMetadata { pub jump_if_condition_is_true: bool, pub jump_target_when_true: BranchOffset, pub jump_target_when_false: BranchOffset, + pub jump_target_when_null: BranchOffset, } /// Container for register locations of values that can be referenced in RETURNING expressions @@ -174,21 +175,20 @@ fn expr_can_be_null(expr: &ast::Expr) -> bool { } } -// todo(rn): Check right affinities, fix other call to translate - /// Core implementation of IN expression logic that can be used in both conditional and expression contexts. /// This follows SQLite's approach where a single core function handles all InList cases. /// /// This is extracted from the original conditional implementation to be reusable. /// The logic exactly matches the original conditional InList implementation. +// todo: Check right affinities #[instrument(skip(program, referenced_tables, resolver), level = Level::DEBUG)] fn translate_in_list( program: &mut ProgramBuilder, referenced_tables: Option<&TableReferences>, lhs: &ast::Expr, rhs: &[Box], - dest_if_false: BranchOffset, - dest_if_null: BranchOffset, + condition_metadata: ConditionMetadata, + // dest if null should be in ConditionMetadata resolver: &Resolver, ) -> Result<()> { let lhs_reg = expr_code_vector(program, lhs); @@ -196,7 +196,7 @@ fn translate_in_list( let mut check_null_reg = 0; let label_ok = program.allocate_label(); - if dest_if_false != dest_if_null { + if condition_metadata.jump_target_when_false != condition_metadata.jump_target_when_null { check_null_reg = program.alloc_register(); program.emit_insn(Insn::BitAnd { lhs: lhs_reg, @@ -218,7 +218,9 @@ fn translate_in_list( }); } - if !last_condition || dest_if_false != dest_if_null { + if !last_condition + || condition_metadata.jump_target_when_false != condition_metadata.jump_target_when_null + { if lhs_reg != rhs_reg { program.emit_insn(Insn::Eq { lhs: lhs_reg, @@ -240,14 +242,14 @@ fn translate_in_list( program.emit_insn(Insn::Ne { lhs: lhs_reg, rhs: rhs_reg, - target_pc: dest_if_false, + target_pc: condition_metadata.jump_target_when_false, flags: CmpInsFlags::default(), collation: program.curr_collation(), }); } else { program.emit_insn(Insn::IsNull { reg: lhs_reg, - target_pc: dest_if_false, + target_pc: condition_metadata.jump_target_when_false, }); } } @@ -256,10 +258,17 @@ fn translate_in_list( if check_null_reg != 0 { program.emit_insn(Insn::IsNull { reg: check_null_reg, - target_pc: dest_if_null, + target_pc: condition_metadata.jump_target_when_null, }); program.emit_insn(Insn::Goto { - target_pc: dest_if_false, + target_pc: condition_metadata.jump_target_when_false, + }); + } + + // by default if IN expression is true we just continue to the next instruction + if condition_metadata.jump_if_condition_is_true { + program.emit_insn(Insn::Goto { + target_pc: condition_metadata.jump_target_when_true, }); } program.resolve_label(label_ok, program.offset()); @@ -392,20 +401,12 @@ pub fn translate_condition_expr( emit_cond_jump(program, condition_metadata, reg); } ast::Expr::InList { lhs, not, rhs } => { - let ConditionMetadata { - jump_target_when_true, - jump_target_when_false, - .. - } = condition_metadata; - // fix me translate_in_list( program, Some(referenced_tables), lhs, rhs, - jump_target_when_false, - // should be null!!! - jump_target_when_true, + condition_metadata, resolver, )?; } @@ -2032,6 +2033,8 @@ pub fn translate_expr( let dest_if_false = program.allocate_label(); let dest_if_null = program.allocate_label(); + // won't use this label :/ + let dest_if_true = program.allocate_label(); // Call the core InList logic with expression-appropriate condition metadata translate_in_list( @@ -2039,8 +2042,12 @@ pub fn translate_expr( referenced_tables, lhs, rhs, - dest_if_false, - dest_if_null, + ConditionMetadata { + jump_if_condition_is_true: false, + jump_target_when_true: dest_if_true, + jump_target_when_false: dest_if_false, + jump_target_when_null: dest_if_null, + }, resolver, )?; diff --git a/core/translate/group_by.rs b/core/translate/group_by.rs index 94d8dee03..6f891ae2b 100644 --- a/core/translate/group_by.rs +++ b/core/translate/group_by.rs @@ -778,6 +778,7 @@ pub fn group_by_emit_row_phase<'a>( if let Some(having) = &group_by.having { for expr in having.iter() { let if_true_target = program.allocate_label(); + let if_null_target = program.allocate_label(); translate_condition_expr( program, &plan.table_references, @@ -786,6 +787,7 @@ pub fn group_by_emit_row_phase<'a>( jump_if_condition_is_true: false, jump_target_when_false: labels.label_group_by_end_without_emitting_row, jump_target_when_true: if_true_target, + jump_target_when_null: if_null_target, }, &t_ctx.resolver, )?; diff --git a/core/translate/index.rs b/core/translate/index.rs index 8aced50d9..ced75fdf7 100644 --- a/core/translate/index.rs +++ b/core/translate/index.rs @@ -237,6 +237,7 @@ pub fn translate_create_index( let mut skip_row_label = None; if let Some(where_clause) = where_clause { let label = program.allocate_label(); + let null_label = program.allocate_label(); translate_condition_expr( &mut program, &table_references, @@ -245,6 +246,7 @@ pub fn translate_create_index( jump_if_condition_is_true: false, jump_target_when_false: label, jump_target_when_true: BranchOffset::Placeholder, + jump_target_when_null: null_label, }, resolver, )?; diff --git a/core/translate/main_loop.rs b/core/translate/main_loop.rs index 03e00f01d..a93390d11 100644 --- a/core/translate/main_loop.rs +++ b/core/translate/main_loop.rs @@ -384,10 +384,12 @@ pub fn init_loop( .filter(|c| c.should_eval_before_loop(&[JoinOrderMember::default()])) { let jump_target = program.allocate_label(); + let null_jump_target = program.allocate_label(); let meta = ConditionMetadata { jump_if_condition_is_true: false, jump_target_when_true: jump_target, jump_target_when_false: t_ctx.label_main_loop_end.unwrap(), + jump_target_when_null: null_jump_target, }; translate_condition_expr(program, tables, &cond.expr, meta, &t_ctx.resolver)?; program.preassign_label_to_next_insn(jump_target); @@ -709,10 +711,12 @@ fn emit_conditions( .filter(|cond| cond.should_eval_at_loop(join_index, join_order)) { let jump_target_when_true = program.allocate_label(); + let jump_target_when_null = program.allocate_label(); let condition_metadata = ConditionMetadata { jump_if_condition_is_true: false, jump_target_when_true, jump_target_when_false: next, + jump_target_when_null, }; translate_condition_expr( program, @@ -727,7 +731,7 @@ fn emit_conditions( Ok(()) } -/// SQLite (and so Limbo) processes joins as a nested loop. +/// SQLite (and so Turso) processes joins as a nested loop. /// The loop may emit rows to various destinations depending on the query: /// - a GROUP BY sorter (grouping is done by sorting based on the GROUP BY keys and aggregating while the GROUP BY keys match) /// - a GROUP BY phase with no sorting (when the rows are already in the order required by the GROUP BY keys) From da323fa0c4fe8675c6e20fe5092ce84b81d321e7 Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Tue, 7 Oct 2025 02:34:33 -0300 Subject: [PATCH 119/428] Some clean ups and correctly working on WHERE clauses --- core/translate/expr.rs | 89 ++++++++++++++++++++++++++++--------- core/translate/group_by.rs | 4 +- core/translate/index.rs | 3 +- core/translate/main_loop.rs | 6 +-- parser/src/parser.rs | 2 +- testing/select.test | 13 ++++++ 6 files changed, 87 insertions(+), 30 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 57da3c669..2ab33a85a 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -165,12 +165,10 @@ fn expr_code_vector(program: &mut ProgramBuilder, expr: &ast::Expr) -> usize { fn expr_can_be_null(expr: &ast::Expr) -> bool { // todo: better handling columns. Check sqlite3ExprCanBeNull match expr { - Expr::Literal(literal) => match literal { - ast::Literal::Numeric(_) => false, - ast::Literal::String(_) => false, - ast::Literal::Blob(_) => false, - _ => true, - }, + Expr::Literal(literal) => !matches!( + literal, + ast::Literal::Numeric(_) | ast::Literal::String(_) | ast::Literal::Blob(_) + ), _ => true, } } @@ -180,6 +178,30 @@ fn expr_can_be_null(expr: &ast::Expr) -> bool { /// /// This is extracted from the original conditional implementation to be reusable. /// The logic exactly matches the original conditional InList implementation. +/// +/// An IN expression has one of the following formats: +/// ```sql +/// x IN (y1, y2,...,yN) +/// x IN (subquery) (Not yet implemented) +/// ``` +/// The result of an IN operator is one of TRUE, FALSE, or NULL. A NULL result +/// means that it cannot be determined if the LHS is contained in the RHS due +/// to the presence of NULL values. +/// +/// Currently, we do a simple full-scan, yet it's not ideal when there are many rows +/// on RHS. (Check sqlite's in-operator.md) +/// +/// Algorithm: +/// 1. Set the null-flag to false +/// 2. For each row in the RHS: +/// - Compare LHS and RHS +/// - If LHS matches RHS, returns TRUE +/// - If the comparison results in NULL, set the null-flag to true +/// 3. If the null-flag is true, return NULL +/// 4. Return FALSE +/// +/// A "NOT IN" operator is computed by first computing the equivalent IN +/// operator, then interchanging the TRUE and FALSE results. // todo: Check right affinities #[instrument(skip(program, referenced_tables, resolver), level = Level::DEBUG)] fn translate_in_list( @@ -237,21 +259,19 @@ fn translate_in_list( }); } // sqlite3VdbeChangeP5(v, zAff[0]); + } else if lhs_reg != rhs_reg { + program.emit_insn(Insn::Ne { + lhs: lhs_reg, + rhs: rhs_reg, + target_pc: condition_metadata.jump_target_when_false, + flags: CmpInsFlags::default(), + collation: program.curr_collation(), + }); } else { - if lhs_reg != rhs_reg { - program.emit_insn(Insn::Ne { - lhs: lhs_reg, - rhs: rhs_reg, - target_pc: condition_metadata.jump_target_when_false, - flags: CmpInsFlags::default(), - collation: program.curr_collation(), - }); - } else { - program.emit_insn(Insn::IsNull { - reg: lhs_reg, - target_pc: condition_metadata.jump_target_when_false, - }); - } + program.emit_insn(Insn::IsNull { + reg: lhs_reg, + target_pc: condition_metadata.jump_target_when_false, + }); } } @@ -400,15 +420,42 @@ pub fn translate_condition_expr( translate_expr(program, Some(referenced_tables), expr, reg, resolver)?; emit_cond_jump(program, condition_metadata, reg); } + ast::Expr::InList { lhs, not, rhs } => { + let ConditionMetadata { + jump_if_condition_is_true, + jump_target_when_true, + jump_target_when_false, + jump_target_when_null, + } = condition_metadata; + + // Adjust targets if `NOT IN` + let adjusted_metadata = if *not { + ConditionMetadata { + jump_if_condition_is_true, + jump_target_when_true, + jump_target_when_false: jump_target_when_true, + jump_target_when_null, + } + } else { + condition_metadata + }; + translate_in_list( program, Some(referenced_tables), lhs, rhs, - condition_metadata, + adjusted_metadata, resolver, )?; + + if *not { + program.emit_insn(Insn::Goto { + target_pc: jump_target_when_false, + }); + program.resolve_label(adjusted_metadata.jump_target_when_false, program.offset()); + } } ast::Expr::Like { not, .. } => { let cur_reg = program.alloc_register(); diff --git a/core/translate/group_by.rs b/core/translate/group_by.rs index 6f891ae2b..19f91c5bc 100644 --- a/core/translate/group_by.rs +++ b/core/translate/group_by.rs @@ -778,7 +778,6 @@ pub fn group_by_emit_row_phase<'a>( if let Some(having) = &group_by.having { for expr in having.iter() { let if_true_target = program.allocate_label(); - let if_null_target = program.allocate_label(); translate_condition_expr( program, &plan.table_references, @@ -787,7 +786,8 @@ pub fn group_by_emit_row_phase<'a>( jump_if_condition_is_true: false, jump_target_when_false: labels.label_group_by_end_without_emitting_row, jump_target_when_true: if_true_target, - jump_target_when_null: if_null_target, + // treat null result has false for now + jump_target_when_null: labels.label_group_by_end_without_emitting_row, }, &t_ctx.resolver, )?; diff --git a/core/translate/index.rs b/core/translate/index.rs index ced75fdf7..5fa07dc0d 100644 --- a/core/translate/index.rs +++ b/core/translate/index.rs @@ -237,7 +237,6 @@ pub fn translate_create_index( let mut skip_row_label = None; if let Some(where_clause) = where_clause { let label = program.allocate_label(); - let null_label = program.allocate_label(); translate_condition_expr( &mut program, &table_references, @@ -246,7 +245,7 @@ pub fn translate_create_index( jump_if_condition_is_true: false, jump_target_when_false: label, jump_target_when_true: BranchOffset::Placeholder, - jump_target_when_null: null_label, + jump_target_when_null: label, }, resolver, )?; diff --git a/core/translate/main_loop.rs b/core/translate/main_loop.rs index a93390d11..1f9d0a069 100644 --- a/core/translate/main_loop.rs +++ b/core/translate/main_loop.rs @@ -384,12 +384,11 @@ pub fn init_loop( .filter(|c| c.should_eval_before_loop(&[JoinOrderMember::default()])) { let jump_target = program.allocate_label(); - let null_jump_target = program.allocate_label(); let meta = ConditionMetadata { jump_if_condition_is_true: false, jump_target_when_true: jump_target, jump_target_when_false: t_ctx.label_main_loop_end.unwrap(), - jump_target_when_null: null_jump_target, + jump_target_when_null: t_ctx.label_main_loop_end.unwrap(), }; translate_condition_expr(program, tables, &cond.expr, meta, &t_ctx.resolver)?; program.preassign_label_to_next_insn(jump_target); @@ -711,12 +710,11 @@ fn emit_conditions( .filter(|cond| cond.should_eval_at_loop(join_index, join_order)) { let jump_target_when_true = program.allocate_label(); - let jump_target_when_null = program.allocate_label(); let condition_metadata = ConditionMetadata { jump_if_condition_is_true: false, jump_target_when_true, jump_target_when_false: next, - jump_target_when_null, + jump_target_when_null: next, }; translate_condition_expr( program, diff --git a/parser/src/parser.rs b/parser/src/parser.rs index b134f9f95..bbab464ff 100644 --- a/parser/src/parser.rs +++ b/parser/src/parser.rs @@ -1736,8 +1736,8 @@ impl<'a> Parser<'a> { } else { Box::new(Expr::InList { lhs: result, - not, rhs: exprs, + not, }) } } diff --git a/testing/select.test b/testing/select.test index f70d0ada7..5dff582f5 100755 --- a/testing/select.test +++ b/testing/select.test @@ -913,6 +913,19 @@ do_execsql_test_on_specific_db {:memory:} select-in-simple { } {1 0} +do_execsql_test_on_specific_db {:memory:} select-in-with-nulls { + SELECT 4 IN (1, 4, null); + SELECT 4 NOT IN (1, 4, null); +} {1 +0} + +# All should be null +do_execsql_test_on_specific_db {:memory:} select-in-with-nulls-2 { +SELECT 1 IN (2, 3, null); +SELECT 1 NOT IN (2, 3, null); +SELECT null in (null); +} {\n\n} + do_execsql_test_on_specific_db {:memory:} select-in-complex { CREATE TABLE test_table (id INTEGER, category TEXT, value INTEGER); INSERT INTO test_table VALUES (1, 'A', 10), (2, 'B', 20), (3, 'A', 30), (4, 'C', 40); From 625403cc2a307f855b80d3ff0b1c29d5012276be Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Tue, 7 Oct 2025 16:54:50 -0300 Subject: [PATCH 120/428] Fix register reuse when called inside a coroutine - On each interaction we assume that the value is NULL, so we need to set it like so for every interaction in the list. So we force to not emit this NULL as constant; - Forces a copy so IN expressions works inside an aggregation expression. Not ideal but it works, we should work more on the query planner for sure. --- core/translate/expr.rs | 42 +++++++++++++++++++++++++++++++++--------- core/vdbe/insn.rs | 4 +++- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 2ab33a85a..99c0ad4bb 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -43,6 +43,13 @@ pub struct ReturningValueRegisters { pub num_columns: usize, } +/// Emit an instruction that is guaranteed not to be in any constant span. +/// This ensures the instruction won't be hoisted when emit_constant_insns is called. +fn emit_no_constant_insn(program: &mut ProgramBuilder, insn: Insn) { + program.constant_span_end_all(); + program.emit_insn(insn); +} + #[instrument(skip_all, level = Level::DEBUG)] fn emit_cond_jump(program: &mut ProgramBuilder, cond_meta: ConditionMetadata, reg: usize) { if cond_meta.jump_if_condition_is_true { @@ -2073,16 +2080,28 @@ pub fn translate_expr( // but wrap it with appropriate expression context handling let result_reg = target_register; - program.emit_insn(Insn::Null { - dest: result_reg, - dest_end: None, - }); - let dest_if_false = program.allocate_label(); let dest_if_null = program.allocate_label(); // won't use this label :/ let dest_if_true = program.allocate_label(); + let tmp = program.alloc_register(); + emit_no_constant_insn( + program, + Insn::Null { + dest: tmp, + dest_end: None, + }, + ); + translate_expr_no_constant_opt( + program, + referenced_tables, + &ast::Expr::Literal(ast::Literal::Null), + tmp, + resolver, + NoConstantOptReason::RegisterReuse, + )?; + // Call the core InList logic with expression-appropriate condition metadata translate_in_list( program, @@ -2101,22 +2120,27 @@ pub fn translate_expr( // condition true: set result to 1 program.emit_insn(Insn::Integer { value: 1, - dest: result_reg, + dest: tmp, }); // False path: set result to 0 program.resolve_label(dest_if_false, program.offset()); // Force integer conversion with AddImm 0 program.emit_insn(Insn::AddImm { - register: result_reg, + register: tmp, value: 0, }); if *not { program.emit_insn(Insn::Not { - reg: result_reg, - dest: result_reg, + reg: tmp, + dest: tmp, }); } program.resolve_label(dest_if_null, program.offset()); + program.emit_insn(Insn::Copy { + src_reg: tmp, + dst_reg: result_reg, + extra_amount: 0, + }); Ok(result_reg) } ast::Expr::InSelect { .. } => { diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index 9a5bab21c..64bae3947 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -853,10 +853,12 @@ pub enum Insn { db: usize, }, + /// Make a copy of register src..src+extra_amount into dst..dst+extra_amount. Copy { src_reg: usize, dst_reg: usize, - extra_amount: usize, // 0 extra_amount means we include src_reg, dst_reg..=dst_reg+amount = src_reg..=src_reg+amount + /// 0 extra_amount means we include src_reg, dst_reg..=dst_reg+amount = src_reg..=src_reg+amount + extra_amount: usize, }, /// Allocate a new b-tree. From 84e8d117648e0e00f90a5427a641f6ea8da0d3c5 Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Wed, 8 Oct 2025 14:42:55 -0300 Subject: [PATCH 121/428] Fix bug when jump_if_true is enabled --- core/translate/expr.rs | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 99c0ad4bb..f1ec8ada0 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -292,13 +292,14 @@ fn translate_in_list( }); } + program.resolve_label(label_ok, program.offset()); + // by default if IN expression is true we just continue to the next instruction if condition_metadata.jump_if_condition_is_true { program.emit_insn(Insn::Goto { target_pc: condition_metadata.jump_target_when_true, }); } - program.resolve_label(label_ok, program.offset()); // todo: deallocate check_null_reg Ok(()) @@ -437,15 +438,21 @@ pub fn translate_condition_expr( } = condition_metadata; // Adjust targets if `NOT IN` - let adjusted_metadata = if *not { - ConditionMetadata { - jump_if_condition_is_true, - jump_target_when_true, - jump_target_when_false: jump_target_when_true, - jump_target_when_null, - } + let (adjusted_metadata, not_true_label, not_false_label) = if *not { + let not_true_label = program.allocate_label(); + let not_false_label = program.allocate_label(); + ( + ConditionMetadata { + jump_if_condition_is_true, + jump_target_when_true: not_true_label, + jump_target_when_false: not_false_label, + jump_target_when_null, + }, + Some(not_true_label), + Some(not_false_label), + ) } else { - condition_metadata + (condition_metadata, None, None) }; translate_in_list( @@ -458,10 +465,17 @@ pub fn translate_condition_expr( )?; if *not { + // When IN is TRUE (match found), NOT IN should be FALSE + program.resolve_label(not_true_label.unwrap(), program.offset()); program.emit_insn(Insn::Goto { target_pc: jump_target_when_false, }); - program.resolve_label(adjusted_metadata.jump_target_when_false, program.offset()); + + // When IN is FALSE (no match), NOT IN should be TRUE + program.resolve_label(not_false_label.unwrap(), program.offset()); + program.emit_insn(Insn::Goto { + target_pc: jump_target_when_true, + }); } } ast::Expr::Like { not, .. } => { From b8f8a870071137345551403275c772c0f4b07414 Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Wed, 8 Oct 2025 15:37:49 -0300 Subject: [PATCH 122/428] Refactor bytecode emission - we were redundantly translating tmp - Make emit_constant_insn a method of ProgramBuilder --- core/translate/expr.rs | 26 ++++---------------------- core/vdbe/builder.rs | 8 ++++++++ 2 files changed, 12 insertions(+), 22 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index f1ec8ada0..622cb7a25 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -43,13 +43,6 @@ pub struct ReturningValueRegisters { pub num_columns: usize, } -/// Emit an instruction that is guaranteed not to be in any constant span. -/// This ensures the instruction won't be hoisted when emit_constant_insns is called. -fn emit_no_constant_insn(program: &mut ProgramBuilder, insn: Insn) { - program.constant_span_end_all(); - program.emit_insn(insn); -} - #[instrument(skip_all, level = Level::DEBUG)] fn emit_cond_jump(program: &mut ProgramBuilder, cond_meta: ConditionMetadata, reg: usize) { if cond_meta.jump_if_condition_is_true { @@ -2100,21 +2093,10 @@ pub fn translate_expr( let dest_if_true = program.allocate_label(); let tmp = program.alloc_register(); - emit_no_constant_insn( - program, - Insn::Null { - dest: tmp, - dest_end: None, - }, - ); - translate_expr_no_constant_opt( - program, - referenced_tables, - &ast::Expr::Literal(ast::Literal::Null), - tmp, - resolver, - NoConstantOptReason::RegisterReuse, - )?; + program.emit_no_constant_insn(Insn::Null { + dest: tmp, + dest_end: None, + }); // Call the core InList logic with expression-appropriate condition metadata translate_in_list( diff --git a/core/vdbe/builder.rs b/core/vdbe/builder.rs index 70e98eb00..82710377e 100644 --- a/core/vdbe/builder.rs +++ b/core/vdbe/builder.rs @@ -341,6 +341,14 @@ impl ProgramBuilder { self.insns.push((insn, self.insns.len())); } + /// Emit an instruction that is guaranteed not to be in any constant span. + /// This ensures the instruction won't be hoisted when emit_constant_insns is called. + #[instrument(skip(self), level = Level::DEBUG)] + pub fn emit_no_constant_insn(&mut self, insn: Insn) { + self.constant_span_end_all(); + self.emit_insn(insn); + } + pub fn close_cursors(&mut self, cursors: &[CursorID]) { for cursor in cursors { self.emit_insn(Insn::Close { cursor_id: *cursor }); From d2d265a06f2ce1c2b948d128235a5f2ec8e5b6f3 Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Thu, 9 Oct 2025 12:14:20 -0300 Subject: [PATCH 123/428] Small nits and code clean ups --- core/translate/expr.rs | 39 ++++++++++++--------------------------- parser/src/ast.rs | 11 +++++++++++ 2 files changed, 23 insertions(+), 27 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 622cb7a25..7d5f60bc0 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -150,29 +150,6 @@ macro_rules! expect_arguments_even { }}; } -fn expr_vec_size(expr: &ast::Expr) -> usize { - match expr { - Expr::Parenthesized(v) => v.len(), - _ => 1, - } -} - -fn expr_code_vector(program: &mut ProgramBuilder, expr: &ast::Expr) -> usize { - let size = expr_vec_size(expr); - program.alloc_registers(size) -} - -fn expr_can_be_null(expr: &ast::Expr) -> bool { - // todo: better handling columns. Check sqlite3ExprCanBeNull - match expr { - Expr::Literal(literal) => !matches!( - literal, - ast::Literal::Numeric(_) | ast::Literal::String(_) | ast::Literal::Blob(_) - ), - _ => true, - } -} - /// Core implementation of IN expression logic that can be used in both conditional and expression contexts. /// This follows SQLite's approach where a single core function handles all InList cases. /// @@ -213,7 +190,11 @@ fn translate_in_list( // dest if null should be in ConditionMetadata resolver: &Resolver, ) -> Result<()> { - let lhs_reg = expr_code_vector(program, lhs); + let lhs_reg = if let Expr::Parenthesized(v) = lhs { + program.alloc_registers(v.len()) + } else { + program.alloc_register() + }; let _ = translate_expr(program, referenced_tables, lhs, lhs_reg, resolver)?; let mut check_null_reg = 0; let label_ok = program.allocate_label(); @@ -232,7 +213,7 @@ fn translate_in_list( let rhs_reg = program.alloc_register(); let _ = translate_expr(program, referenced_tables, expr, rhs_reg, resolver)?; - if check_null_reg != 0 && expr_can_be_null(expr) { + if check_null_reg != 0 && expr.can_be_null() { program.emit_insn(Insn::BitAnd { lhs: check_null_reg, rhs: rhs_reg, @@ -2089,16 +2070,17 @@ pub fn translate_expr( let dest_if_false = program.allocate_label(); let dest_if_null = program.allocate_label(); - // won't use this label :/ let dest_if_true = program.allocate_label(); + // Ideally we wouldn't need a tmp register, but currently if an IN expression + // is used inside an aggregator the target_register is cleared on every iteration, + // losing the state of the aggregator. let tmp = program.alloc_register(); program.emit_no_constant_insn(Insn::Null { dest: tmp, dest_end: None, }); - // Call the core InList logic with expression-appropriate condition metadata translate_in_list( program, referenced_tables, @@ -2118,13 +2100,16 @@ pub fn translate_expr( value: 1, dest: tmp, }); + // False path: set result to 0 program.resolve_label(dest_if_false, program.offset()); + // Force integer conversion with AddImm 0 program.emit_insn(Insn::AddImm { register: tmp, value: 0, }); + if *not { program.emit_insn(Insn::Not { reg: tmp, diff --git a/parser/src/ast.rs b/parser/src/ast.rs index dae656cc4..9fa714eef 100644 --- a/parser/src/ast.rs +++ b/parser/src/ast.rs @@ -542,6 +542,17 @@ impl Expr { pub fn raise(resolve_type: ResolveType, expr: Option) -> Expr { Expr::Raise(resolve_type, expr.map(Box::new)) } + + pub fn can_be_null(&self) -> bool { + // todo: better handling columns. Check sqlite3ExprCanBeNull + match self { + Expr::Literal(literal) => !matches!( + literal, + Literal::Numeric(_) | Literal::String(_) | Literal::Blob(_) + ), + _ => true, + } + } } /// SQL literal From 84643dc4f221828fc1d3fa820751c684a2977d5d Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 19:19:33 +0400 Subject: [PATCH 124/428] implement sparse vector operations --- core/function.rs | 3 + core/translate/expr.rs | 8 ++ core/vdbe/execute.rs | 6 +- core/vector/mod.rs | 15 ++- core/vector/operations/concat.rs | 8 ++ core/vector/operations/convert.rs | 75 +++++++++++- core/vector/operations/distance_cos.rs | 54 ++++++++- core/vector/operations/distance_l2.rs | 24 +++- core/vector/operations/serialize.rs | 26 ++--- core/vector/operations/slice.rs | 34 +++++- core/vector/operations/text.rs | 155 ++++++++++++++++--------- core/vector/vector_types.rs | 15 ++- 12 files changed, 333 insertions(+), 90 deletions(-) diff --git a/core/function.rs b/core/function.rs index e0e7b345c..f064970c3 100644 --- a/core/function.rs +++ b/core/function.rs @@ -153,6 +153,7 @@ impl Display for JsonFunc { pub enum VectorFunc { Vector, Vector32, + Vector32Sparse, Vector64, VectorExtract, VectorDistanceCos, @@ -172,6 +173,7 @@ impl Display for VectorFunc { let str = match self { Self::Vector => "vector".to_string(), Self::Vector32 => "vector32".to_string(), + Self::Vector32Sparse => "vector32_sparse".to_string(), Self::Vector64 => "vector64".to_string(), Self::VectorExtract => "vector_extract".to_string(), Self::VectorDistanceCos => "vector_distance_cos".to_string(), @@ -864,6 +866,7 @@ impl Func { "printf" => Ok(Self::Scalar(ScalarFunc::Printf)), "vector" => Ok(Self::Vector(VectorFunc::Vector)), "vector32" => Ok(Self::Vector(VectorFunc::Vector32)), + "vector32_sparse" => Ok(Self::Vector(VectorFunc::Vector32Sparse)), "vector64" => Ok(Self::Vector(VectorFunc::Vector64)), "vector_extract" => Ok(Self::Vector(VectorFunc::VectorExtract)), "vector_distance_cos" => Ok(Self::Vector(VectorFunc::VectorDistanceCos)), diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 835f1ea08..0a84e1548 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -879,6 +879,14 @@ pub fn translate_expr( emit_function_call(program, func_ctx, &[start_reg], target_register)?; Ok(target_register) } + VectorFunc::Vector32Sparse => { + let args = expect_arguments_exact!(args, 1, vector_func); + let start_reg = program.alloc_register(); + translate_expr(program, referenced_tables, &args[0], start_reg, resolver)?; + + emit_function_call(program, func_ctx, &[start_reg], target_register)?; + Ok(target_register) + } VectorFunc::Vector64 => { let args = expect_arguments_exact!(args, 1, vector_func); let start_reg = program.alloc_register(); diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 8f74268be..87103b8d4 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -20,7 +20,7 @@ use crate::types::{ use crate::util::normalize_ident; use crate::vdbe::insn::InsertFlags; use crate::vdbe::{registers_to_ref_values, TxnCleanup}; -use crate::vector::{vector_concat, vector_slice}; +use crate::vector::{vector32_sparse, vector_concat, vector_slice}; use crate::{ error::{ LimboError, SQLITE_CONSTRAINT, SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PRIMARYKEY, @@ -5197,6 +5197,10 @@ pub fn op_function( let result = vector32(&state.registers[*start_reg..*start_reg + arg_count])?; state.registers[*dest] = Register::Value(result); } + VectorFunc::Vector32Sparse => { + let result = vector32_sparse(&state.registers[*start_reg..*start_reg + arg_count])?; + state.registers[*dest] = Register::Value(result); + } VectorFunc::Vector64 => { let result = vector64(&state.registers[*start_reg..*start_reg + arg_count])?; state.registers[*dest] = Register::Value(result); diff --git a/core/vector/mod.rs b/core/vector/mod.rs index 5087cacfd..8c77f7e7c 100644 --- a/core/vector/mod.rs +++ b/core/vector/mod.rs @@ -8,10 +8,10 @@ pub mod operations; pub mod vector_types; use vector_types::*; -pub fn parse_vector(value: &Register, vec_ty: Option) -> Result { +pub fn parse_vector(value: &Register, type_hint: Option) -> Result { match value.get_value().value_type() { ValueType::Text => operations::text::vector_from_text( - vec_ty.unwrap_or(VectorType::Float32Dense), + type_hint.unwrap_or(VectorType::Float32Dense), value.get_value().to_text().expect("value must be text"), ), ValueType::Blob => { @@ -39,6 +39,17 @@ pub fn vector32(args: &[Register]) -> Result { Ok(operations::serialize::vector_serialize(vector)) } +pub fn vector32_sparse(args: &[Register]) -> Result { + if args.len() != 1 { + return Err(LimboError::ConversionError( + "vector32_sparse requires exactly one argument".to_string(), + )); + } + let vector = parse_vector(&args[0], Some(VectorType::Float32Sparse))?; + let vector = operations::convert::vector_convert(vector, VectorType::Float32Sparse)?; + Ok(operations::serialize::vector_serialize(vector)) +} + pub fn vector64(args: &[Register]) -> Result { if args.len() != 1 { return Err(LimboError::ConversionError( diff --git a/core/vector/operations/concat.rs b/core/vector/operations/concat.rs index 57d159b26..3504ba5bd 100644 --- a/core/vector/operations/concat.rs +++ b/core/vector/operations/concat.rs @@ -17,6 +17,14 @@ pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result { data.extend_from_slice(&v2.data); data } + VectorType::Float32Sparse => { + let mut data = Vec::with_capacity(v1.data.len() + v2.data.len()); + data.extend_from_slice(&v1.data[..v1.data.len() / 2]); + data.extend_from_slice(&v2.data[..v2.data.len() / 2]); + data.extend_from_slice(&v1.data[v1.data.len() / 2..]); + data.extend_from_slice(&v2.data[v2.data.len() / 2..]); + data + } _ => todo!(), }; diff --git a/core/vector/operations/convert.rs b/core/vector/operations/convert.rs index cc8855831..175f7aa0d 100644 --- a/core/vector/operations/convert.rs +++ b/core/vector/operations/convert.rs @@ -6,21 +6,88 @@ use crate::{ pub fn vector_convert(v: Vector, target_type: VectorType) -> Result { match (v.vector_type, target_type) { (VectorType::Float32Dense, VectorType::Float32Dense) - | (VectorType::Float64Dense, VectorType::Float64Dense) => Ok(v), + | (VectorType::Float64Dense, VectorType::Float64Dense) + | (VectorType::Float32Sparse, VectorType::Float32Sparse) => Ok(v), (VectorType::Float32Dense, VectorType::Float64Dense) => { let mut data = Vec::with_capacity(v.dims * 8); for &x in v.as_f32_slice() { data.extend_from_slice(&f64::to_le_bytes(x as f64)); } - Vector::from_data(target_type, data) + Ok(Vector { + vector_type: target_type, + dims: v.dims, + data, + }) } (VectorType::Float64Dense, VectorType::Float32Dense) => { let mut data = Vec::with_capacity(v.dims * 4); for &x in v.as_f32_slice() { data.extend_from_slice(&f64::to_le_bytes(x as f64)); } - Vector::from_data(target_type, data) + Ok(Vector { + vector_type: target_type, + dims: v.dims, + data, + }) + } + (VectorType::Float32Dense, VectorType::Float32Sparse) => { + let (mut idx, mut values) = (Vec::new(), Vec::new()); + for (i, &value) in v.as_f32_slice().iter().enumerate() { + if value == 0.0 { + continue; + } + idx.extend_from_slice(&(i as u32).to_le_bytes()); + values.extend_from_slice(&value.to_le_bytes()); + } + values.extend_from_slice(&idx); + Ok(Vector { + vector_type: target_type, + dims: v.dims, + data: values, + }) + } + (VectorType::Float64Dense, VectorType::Float32Sparse) => { + let (mut idx, mut values) = (Vec::new(), Vec::new()); + for (i, &value) in v.as_f64_slice().iter().enumerate() { + if value == 0.0 { + continue; + } + idx.extend_from_slice(&(i as u32).to_le_bytes()); + values.extend_from_slice(&(value as f32).to_le_bytes()); + } + values.extend_from_slice(&idx); + Ok(Vector { + vector_type: target_type, + dims: v.dims, + data: values, + }) + } + (VectorType::Float32Sparse, VectorType::Float32Dense) => { + let sparse = v.as_f32_sparse(); + let mut data = vec![0u8; v.dims * 4]; + for (&i, &value) in sparse.idx.iter().zip(sparse.values.iter()) { + data.splice((4 * i) as usize..4 * (i + 1) as usize, value.to_le_bytes()); + } + Ok(Vector { + vector_type: target_type, + dims: v.dims, + data, + }) + } + (VectorType::Float32Sparse, VectorType::Float64Dense) => { + let sparse = v.as_f32_sparse(); + let mut data = vec![0u8; v.dims * 8]; + for (&i, &value) in sparse.idx.iter().zip(sparse.values.iter()) { + data.splice( + (8 * i) as usize..8 * (i + 1) as usize, + (value as f64).to_le_bytes(), + ); + } + Ok(Vector { + vector_type: target_type, + dims: v.dims, + data, + }) } - _ => todo!(), } } diff --git a/core/vector/operations/distance_cos.rs b/core/vector/operations/distance_cos.rs index b8ccff451..e211ee0e4 100644 --- a/core/vector/operations/distance_cos.rs +++ b/core/vector/operations/distance_cos.rs @@ -1,5 +1,5 @@ use crate::{ - vector::vector_types::{Vector, VectorType}, + vector::vector_types::{Vector, VectorSparse, VectorType}, LimboError, Result, }; @@ -23,6 +23,10 @@ pub fn vector_distance_cos(v1: &Vector, v2: &Vector) -> Result { v1.as_f64_slice(), v2.as_f64_slice(), )), + VectorType::Float32Sparse => Ok(vector_f32_sparse_distance_cos( + v1.as_f32_sparse(), + v2.as_f32_sparse(), + )), _ => todo!(), } } @@ -67,6 +71,45 @@ fn vector_f64_distance_cos(v1: &[f64], v2: &[f64]) -> f64 { 1.0 - (dot / (norm1 * norm2).sqrt()) } +fn vector_f32_sparse_distance_cos(v1: VectorSparse, v2: VectorSparse) -> f64 { + let mut v1_pos = 0; + let mut v2_pos = 0; + let (mut dot, mut norm1, mut norm2) = (0.0, 0.0, 0.0); + while v1_pos < v1.idx.len() && v2_pos < v2.idx.len() { + let e1 = v1.values[v1_pos]; + let e2 = v2.values[v2_pos]; + if v1.idx[v1_pos] == v2.idx[v2_pos] { + dot += e1 * e2; + norm1 += e1 * e1; + norm2 += e2 * e2; + v1_pos += 1; + v2_pos += 1; + } else if v1.idx[v1_pos] < v2.idx[v2_pos] { + norm1 += e1 * e1; + v1_pos += 1; + } else { + norm2 += e2 * e2; + v2_pos += 1; + } + } + + while v1_pos < v1.idx.len() { + norm1 += v1.values[v1_pos] * v1.values[v1_pos]; + v1_pos += 1; + } + while v2_pos < v2.idx.len() { + norm1 += v2.values[v2_pos] * v2.values[v2_pos]; + v2_pos += 1; + } + + // Check for zero norms + if norm1 == 0.0f32 || norm2 == 0.0f32 { + return f64::NAN; + } + + (1.0f32 - (dot / (norm1 * norm2).sqrt())) as f64 +} + #[cfg(test)] mod tests { use super::*; @@ -79,4 +122,13 @@ mod tests { assert_eq!(vector_f32_distance_cos(&[1.0, 2.0], &[-1.0, -2.0]), 2.0); assert_eq!(vector_f32_distance_cos(&[1.0, 2.0], &[-2.0, 1.0]), 1.0); } + + #[test] + fn test_distance_cos_f64() { + assert!(vector_f64_distance_cos(&[], &[]).is_nan()); + assert!(vector_f64_distance_cos(&[1.0, 2.0], &[0.0, 0.0]).is_nan()); + assert_eq!(vector_f64_distance_cos(&[1.0, 2.0], &[1.0, 2.0]), 0.0); + assert_eq!(vector_f64_distance_cos(&[1.0, 2.0], &[-1.0, -2.0]), 2.0); + assert_eq!(vector_f64_distance_cos(&[1.0, 2.0], &[-2.0, 1.0]), 1.0); + } } diff --git a/core/vector/operations/distance_l2.rs b/core/vector/operations/distance_l2.rs index 9fa79c6fa..4095632d3 100644 --- a/core/vector/operations/distance_l2.rs +++ b/core/vector/operations/distance_l2.rs @@ -1,5 +1,5 @@ use crate::{ - vector::vector_types::{Vector, VectorType}, + vector::vector_types::{Vector, VectorSparse, VectorType}, LimboError, Result, }; @@ -21,6 +21,10 @@ pub fn vector_distance_l2(v1: &Vector, v2: &Vector) -> Result { VectorType::Float64Dense => { Ok(vector_f64_distance_l2(v1.as_f64_slice(), v2.as_f64_slice())) } + VectorType::Float32Sparse => Ok(vector_f32_sparse_distance_l2( + v1.as_f32_sparse(), + v2.as_f32_sparse(), + )), _ => todo!(), } } @@ -43,6 +47,24 @@ fn vector_f64_distance_l2(v1: &[f64], v2: &[f64]) -> f64 { sum.sqrt() } +fn vector_f32_sparse_distance_l2(v1: VectorSparse, v2: VectorSparse) -> f64 { + let mut v1_pos = 0; + let mut v2_pos = 0; + let mut sum = 0.0; + while v1_pos < v1.idx.len() && v2_pos < v2.idx.len() { + if v1.idx[v1_pos] == v2.idx[v2_pos] { + sum += (v1.values[v1_pos] - v2.values[v2_pos]).powi(2); + v1_pos += 1; + v2_pos += 1; + } else if v1.idx[v1_pos] < v2.idx[v2_pos] { + v1_pos += 1; + } else { + v2_pos += 1; + } + } + sum as f64 +} + #[cfg(test)] mod tests { use super::*; diff --git a/core/vector/operations/serialize.rs b/core/vector/operations/serialize.rs index 8fdd518d4..622819a59 100644 --- a/core/vector/operations/serialize.rs +++ b/core/vector/operations/serialize.rs @@ -3,21 +3,17 @@ use crate::{ Value, }; -pub fn vector_serialize(x: Vector) -> Value { +pub fn vector_serialize(mut x: Vector) -> Value { match x.vector_type { - VectorType::Float32Dense => vector_f32_serialize(x), - VectorType::Float64Dense => vector_f64_serialize(x), - _ => todo!(), + VectorType::Float32Dense => Value::from_blob(x.data), + VectorType::Float64Dense => { + x.data.push(2); + Value::from_blob(x.data) + } + VectorType::Float32Sparse => { + x.data.extend_from_slice(&(x.dims as u32).to_le_bytes()); + x.data.push(9); + Value::from_blob(x.data) + } } } - -fn vector_f64_serialize(x: Vector) -> Value { - let mut blob = Vec::with_capacity(x.dims * 8 + 1); - blob.extend_from_slice(&x.data); - blob.push(2); - Value::from_blob(blob) -} - -fn vector_f32_serialize(x: Vector) -> Value { - Value::from_blob(x.data) -} diff --git a/core/vector/operations/slice.rs b/core/vector/operations/slice.rs index a347e7f1c..a1ee1b2fa 100644 --- a/core/vector/operations/slice.rs +++ b/core/vector/operations/slice.rs @@ -15,13 +15,35 @@ pub fn vector_slice(vector: &Vector, start: usize, end: usize) -> Result )); } match vector.vector_type { - VectorType::Float32Dense => { - Vector::from_data(vector.vector_type, vector.data[start * 4..end * 4].to_vec()) + VectorType::Float32Dense => Ok(Vector { + vector_type: vector.vector_type, + dims: end - start + 1, + data: vector.data[start * 4..end * 4].to_vec(), + }), + VectorType::Float64Dense => Ok(Vector { + vector_type: vector.vector_type, + dims: end - start + 1, + data: vector.data[start * 8..end * 8].to_vec(), + }), + VectorType::Float32Sparse => { + let mut values = Vec::new(); + let mut idx = Vec::new(); + let sparse = vector.as_f32_sparse(); + for (&i, &value) in sparse.idx.iter().zip(sparse.values.iter()) { + let i = i as usize; + if i < start || i >= end { + continue; + } + values.extend_from_slice(&value.to_le_bytes()); + idx.extend_from_slice(&i.to_le_bytes()); + } + values.extend_from_slice(&idx); + Ok(Vector { + vector_type: vector.vector_type, + dims: end - start + 1, + data: values, + }) } - VectorType::Float64Dense => { - Vector::from_data(vector.vector_type, vector.data[start * 8..end * 8].to_vec()) - } - _ => todo!(), } } diff --git a/core/vector/operations/text.rs b/core/vector/operations/text.rs index 13f709499..c6403d812 100644 --- a/core/vector/operations/text.rs +++ b/core/vector/operations/text.rs @@ -4,28 +4,31 @@ use crate::{ }; pub fn vector_to_text(vector: &Vector) -> String { + match vector.vector_type { + VectorType::Float32Dense => format_text(vector.as_f32_slice().iter()), + VectorType::Float64Dense => format_text(vector.as_f64_slice().iter()), + VectorType::Float32Sparse => { + let mut dense = vec![0.0f32; vector.dims]; + let sparse = vector.as_f32_sparse(); + tracing::info!("{:?}", sparse); + for (&idx, &value) in sparse.idx.iter().zip(sparse.values.iter()) { + dense[idx as usize] = value; + } + format_text(dense.iter()) + } + } +} + +fn format_text(values: impl Iterator) -> String { let mut text = String::new(); text.push('['); - match vector.vector_type { - VectorType::Float32Dense => { - let data = vector.as_f32_slice(); - for (i, value) in data.iter().enumerate().take(vector.dims) { - text.push_str(&value.to_string()); - if i < vector.dims - 1 { - text.push(','); - } - } + let mut first = true; + for value in values { + if !first { + text.push(','); } - VectorType::Float64Dense => { - let data = vector.as_f64_slice(); - for (i, value) in data.iter().enumerate().take(vector.dims) { - text.push_str(&value.to_string()); - if i < vector.dims - 1 { - text.push(','); - } - } - } - _ => todo!(), + first = false; + text.push_str(&value.to_string()); } text.push(']'); text @@ -46,48 +49,92 @@ pub fn vector_from_text(vector_type: VectorType, text: &str) -> Result { "Invalid vector value".to_string(), )); } - let mut data: Vec = Vec::new(); let text = &text[1..text.len() - 1]; if text.trim().is_empty() { - return Ok(Vector { - vector_type, - dims: 0, - data, + return Ok(match vector_type { + VectorType::Float32Dense | VectorType::Float64Dense | VectorType::Float32Sparse => { + Vector { + vector_type, + dims: 0, + data: Vec::new(), + } + } }); } - let xs = text.split(','); - for x in xs { - let x = x.trim(); - if x.is_empty() { + let tokens = text.split(',').map(|x| x.trim()); + match vector_type { + VectorType::Float32Dense => vector32_from_text(tokens), + VectorType::Float64Dense => vector64_from_text(tokens), + VectorType::Float32Sparse => vector32_sparse_from_text(tokens), + } +} + +fn vector32_from_text<'a>(tokens: impl Iterator) -> Result { + let mut data = Vec::new(); + for token in tokens { + let value = token + .parse::() + .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; + if !value.is_finite() { return Err(LimboError::ConversionError( "Invalid vector value".to_string(), )); } - match vector_type { - VectorType::Float32Dense => { - let x = x - .parse::() - .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; - if !x.is_finite() { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - data.extend_from_slice(&x.to_le_bytes()); - } - VectorType::Float64Dense => { - let x = x - .parse::() - .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; - if !x.is_finite() { - return Err(LimboError::ConversionError( - "Invalid vector value".to_string(), - )); - } - data.extend_from_slice(&x.to_le_bytes()); - } - _ => todo!(), - }; + data.extend_from_slice(&value.to_le_bytes()); } - Vector::from_data(vector_type, data) + Ok(Vector { + vector_type: VectorType::Float32Dense, + dims: data.len() / 4, + data, + }) +} + +fn vector64_from_text<'a>(tokens: impl Iterator) -> Result { + let mut data = Vec::new(); + for token in tokens { + let value = token + .parse::() + .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; + if !value.is_finite() { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + } + data.extend_from_slice(&value.to_le_bytes()); + } + Ok(Vector { + vector_type: VectorType::Float64Dense, + dims: data.len() / 8, + data, + }) +} + +fn vector32_sparse_from_text<'a>(tokens: impl Iterator) -> Result { + let mut idx = Vec::new(); + let mut values = Vec::new(); + let mut dims = 0u32; + for token in tokens { + let value = token + .parse::() + .map_err(|_| LimboError::ConversionError("Invalid vector value".to_string()))?; + if !value.is_finite() { + return Err(LimboError::ConversionError( + "Invalid vector value".to_string(), + )); + } + + dims += 1; + if value == 0.0 { + continue; + } + idx.extend_from_slice(&(dims - 1).to_le_bytes()); + values.extend_from_slice(&value.to_le_bytes()); + } + + values.extend_from_slice(&idx); + Ok(Vector { + vector_type: VectorType::Float32Sparse, + dims: dims as usize, + data: values, + }) } diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index 52465038f..114d34edf 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -14,9 +14,10 @@ pub struct Vector { pub data: Vec, } -pub struct VectorSparse<'a, T> { - idx: &'a [u32], - values: &'a [T], +#[derive(Debug)] +pub struct VectorSparse<'a, T: std::fmt::Debug> { + pub idx: &'a [u32], + pub values: &'a [T], } impl Vector { @@ -53,7 +54,7 @@ impl Vector { let (vector_type, data) = Self::vector_type(blob)?; Self::from_data(vector_type, data) } - pub fn from_data(vector_type: VectorType, data: Vec) -> Result { + pub fn from_data(vector_type: VectorType, mut data: Vec) -> Result { match vector_type { VectorType::Float32Dense => { if data.len() % 4 != 0 { @@ -88,7 +89,8 @@ impl Vector { data.len(), ))); } - let dims = u32::from_le_bytes(data[data.len() - 4..].try_into().unwrap()) as usize; + let dims_bytes = data.split_off(data.len() - 4); + let dims = u32::from_le_bytes(dims_bytes.try_into().unwrap()) as usize; let vector = Vector { vector_type, dims, @@ -162,9 +164,10 @@ impl Vector { 0, "data pointer must be aligned to {align} bytes for f32 access" ); - let length = (self.data.len() - 4) / 4 / 2; + let length = self.data.len() / 4 / 2; let values = unsafe { std::slice::from_raw_parts(ptr as *const f32, length) }; let idx = unsafe { std::slice::from_raw_parts((ptr as *const u32).add(length), length) }; + debug_assert!(idx.is_sorted()); VectorSparse { idx, values } } } From 812709cf8e08e35df406066f41daf1db7a2bf40b Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 9 Oct 2025 18:23:47 +0300 Subject: [PATCH 125/428] inline collation comparison functions --- core/translate/collate.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/translate/collate.rs b/core/translate/collate.rs index 721d82944..89804c470 100644 --- a/core/translate/collate.rs +++ b/core/translate/collate.rs @@ -1,6 +1,5 @@ use std::{cmp::Ordering, str::FromStr as _}; -use tracing::Level; use turso_parser::ast::Expr; use crate::{ @@ -37,8 +36,8 @@ impl CollationSeq { }) } + #[inline(always)] pub fn compare_strings(&self, lhs: &str, rhs: &str) -> Ordering { - tracing::event!(Level::DEBUG, collate = %self, lhs, rhs); match self { CollationSeq::Binary => Self::binary_cmp(lhs, rhs), CollationSeq::NoCase => Self::nocase_cmp(lhs, rhs), @@ -46,16 +45,19 @@ impl CollationSeq { } } + #[inline(always)] fn binary_cmp(lhs: &str, rhs: &str) -> Ordering { lhs.cmp(rhs) } + #[inline(always)] fn nocase_cmp(lhs: &str, rhs: &str) -> Ordering { let nocase_lhs = uncased::UncasedStr::new(lhs); let nocase_rhs = uncased::UncasedStr::new(rhs); nocase_lhs.cmp(nocase_rhs) } + #[inline(always)] fn rtrim_cmp(lhs: &str, rhs: &str) -> Ordering { lhs.trim_end().cmp(rhs.trim_end()) } From 27a88b86dc3ae9c37755a4d0d164dee5813263b6 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 9 Oct 2025 18:37:10 +0300 Subject: [PATCH 126/428] Reuse a single RecordCursor per PseudoCursor --- core/pseudo.rs | 33 +++++++++++++++++++++++++++------ core/vdbe/execute.rs | 8 ++------ 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/core/pseudo.rs b/core/pseudo.rs index d55ba0e98..338db2e96 100644 --- a/core/pseudo.rs +++ b/core/pseudo.rs @@ -1,16 +1,37 @@ -use crate::types::ImmutableRecord; +use std::cell::{Ref, RefCell}; + +use crate::{ + types::{ImmutableRecord, RecordCursor}, + Result, Value, +}; -#[derive(Default)] pub struct PseudoCursor { - current: Option, + record_cursor: RecordCursor, + current: RefCell>, } impl PseudoCursor { - pub fn record(&self) -> Option<&ImmutableRecord> { - self.current.as_ref() + pub fn new() -> Self { + Self { + record_cursor: RecordCursor::new(), + current: RefCell::new(None), + } + } + + pub fn record(&self) -> Ref> { + self.current.borrow() } pub fn insert(&mut self, record: ImmutableRecord) { - self.current = Some(record); + self.record_cursor.invalidate(); + self.current.replace(Some(record)); + } + + pub fn get_value(&mut self, column: usize) -> Result { + if let Some(record) = self.current.borrow().as_ref() { + Ok(self.record_cursor.get_value(record, column)?.to_owned()) + } else { + Ok(Value::Null) + } } } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 1c63f54de..83263cffc 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -1389,7 +1389,7 @@ pub fn op_open_pseudo( ); { let cursors = &mut state.cursors; - let cursor = PseudoCursor::default(); + let cursor = PseudoCursor::new(); cursors .get_mut(*cursor_id) .unwrap() @@ -1875,11 +1875,7 @@ pub fn op_column( let value = { let cursor = state.get_cursor(*cursor_id); let cursor = cursor.as_pseudo_mut(); - if let Some(record) = cursor.record() { - record.get_value(*column)?.to_owned() - } else { - Value::Null - } + cursor.get_value(*column)? }; state.registers[*dest] = Register::Value(value); } From edf40cc65bbf8e6b2043a1d7f73b858ee94cab61 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 9 Oct 2025 19:00:40 +0300 Subject: [PATCH 127/428] clippy --- core/pseudo.rs | 6 ++++-- core/vdbe/execute.rs | 2 +- core/vdbe/sorter.rs | 5 ++--- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/core/pseudo.rs b/core/pseudo.rs index 338db2e96..3c43a55ea 100644 --- a/core/pseudo.rs +++ b/core/pseudo.rs @@ -10,14 +10,16 @@ pub struct PseudoCursor { current: RefCell>, } -impl PseudoCursor { - pub fn new() -> Self { +impl Default for PseudoCursor { + fn default() -> Self { Self { record_cursor: RecordCursor::new(), current: RefCell::new(None), } } +} +impl PseudoCursor { pub fn record(&self) -> Ref> { self.current.borrow() } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 83263cffc..019c3abd9 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -1389,7 +1389,7 @@ pub fn op_open_pseudo( ); { let cursors = &mut state.cursors; - let cursor = PseudoCursor::new(); + let cursor = PseudoCursor::default(); cursors .get_mut(*cursor_id) .unwrap() diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index 9af3110e0..ac7e07ed4 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -280,9 +280,8 @@ impl Sorter { // TODO: blocking will be unnecessary here with IO completions let mut completions = vec![]; for chunk_idx in 0..self.chunks.len() { - match self.push_to_chunk_heap(chunk_idx)? { - Some(c) => completions.push(c), - None => (), + if let Some(c) = self.push_to_chunk_heap(chunk_idx)? { + completions.push(c); }; } self.init_chunk_heap_state = InitChunkHeapState::Start; From 585d11b736f2207decc09718a0842bc06f2061dc Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 20:52:58 +0400 Subject: [PATCH 128/428] implement operations for sparse vectors --- core/vector/operations/concat.rs | 1 - core/vector/operations/distance_cos.rs | 21 +++++++- core/vector/operations/distance_l2.rs | 67 +++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 5 deletions(-) diff --git a/core/vector/operations/concat.rs b/core/vector/operations/concat.rs index 3504ba5bd..3e7f6a4f1 100644 --- a/core/vector/operations/concat.rs +++ b/core/vector/operations/concat.rs @@ -25,7 +25,6 @@ pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result { data.extend_from_slice(&v2.data[v2.data.len() / 2..]); data } - _ => todo!(), }; Ok(Vector { diff --git a/core/vector/operations/distance_cos.rs b/core/vector/operations/distance_cos.rs index e211ee0e4..19fb17688 100644 --- a/core/vector/operations/distance_cos.rs +++ b/core/vector/operations/distance_cos.rs @@ -27,7 +27,6 @@ pub fn vector_distance_cos(v1: &Vector, v2: &Vector) -> Result { v1.as_f32_sparse(), v2.as_f32_sparse(), )), - _ => todo!(), } } @@ -98,7 +97,7 @@ fn vector_f32_sparse_distance_cos(v1: VectorSparse, v2: VectorSparse) v1_pos += 1; } while v2_pos < v2.idx.len() { - norm1 += v2.values[v2_pos] * v2.values[v2_pos]; + norm2 += v2.values[v2_pos] * v2.values[v2_pos]; v2_pos += 1; } @@ -131,4 +130,22 @@ mod tests { assert_eq!(vector_f64_distance_cos(&[1.0, 2.0], &[-1.0, -2.0]), 2.0); assert_eq!(vector_f64_distance_cos(&[1.0, 2.0], &[-2.0, 1.0]), 1.0); } + + #[test] + fn test_distance_cos_f32_sparse() { + assert!( + (vector_f32_sparse_distance_cos( + VectorSparse { + idx: &[0, 1], + values: &[1.0, 2.0] + }, + VectorSparse { + idx: &[1, 2], + values: &[1.0, 3.0] + }, + ) - vector_f32_distance_cos(&[1.0, 2.0, 0.0], &[0.0, 1.0, 3.0])) + .abs() + < 1e-7 + ); + } } diff --git a/core/vector/operations/distance_l2.rs b/core/vector/operations/distance_l2.rs index 4095632d3..2599fd4d3 100644 --- a/core/vector/operations/distance_l2.rs +++ b/core/vector/operations/distance_l2.rs @@ -25,7 +25,6 @@ pub fn vector_distance_l2(v1: &Vector, v2: &Vector) -> Result { v1.as_f32_sparse(), v2.as_f32_sparse(), )), - _ => todo!(), } } @@ -57,12 +56,22 @@ fn vector_f32_sparse_distance_l2(v1: VectorSparse, v2: VectorSparse) - v1_pos += 1; v2_pos += 1; } else if v1.idx[v1_pos] < v2.idx[v2_pos] { + sum += v1.values[v1_pos].powi(2); v1_pos += 1; } else { + sum += v2.values[v2_pos].powi(2); v2_pos += 1; } } - sum as f64 + while v1_pos < v1.idx.len() { + sum += v1.values[v1_pos].powi(2); + v1_pos += 1; + } + while v2_pos < v2.idx.len() { + sum += v2.values[v2_pos].powi(2); + v2_pos += 1; + } + (sum as f64).sqrt() } #[cfg(test)] @@ -98,4 +107,58 @@ mod tests { let query = (2..7).map(|x| x as f32).collect::>(); assert_eq!(vector_f32_distance_l2(&v, &query), 20.0_f64.sqrt()); } + + #[test] + fn test_distance_l2_f32() { + assert_eq!(vector_f32_distance_l2(&[], &[]), 0.0); + assert_eq!( + vector_f32_distance_l2(&[1.0, 2.0], &[0.0, 0.0]), + (1f64 + 2f64 * 2f64).sqrt() + ); + assert_eq!(vector_f32_distance_l2(&[1.0, 2.0], &[1.0, 2.0]), 0.0); + assert_eq!( + vector_f32_distance_l2(&[1.0, 2.0], &[-1.0, -2.0]), + (2f64 * 2f64 + 4f64 * 4f64).sqrt() + ); + assert_eq!( + vector_f32_distance_l2(&[1.0, 2.0], &[-2.0, 1.0]), + (3f64 * 3f64 + 1f64 * 1f64).sqrt() + ); + } + + #[test] + fn test_distance_l2_f64() { + assert_eq!(vector_f64_distance_l2(&[], &[]), 0.0); + assert_eq!( + vector_f64_distance_l2(&[1.0, 2.0], &[0.0, 0.0]), + (1f64 + 2f64 * 2f64).sqrt() + ); + assert_eq!(vector_f64_distance_l2(&[1.0, 2.0], &[1.0, 2.0]), 0.0); + assert_eq!( + vector_f64_distance_l2(&[1.0, 2.0], &[-1.0, -2.0]), + (2f64 * 2f64 + 4f64 * 4f64).sqrt() + ); + assert_eq!( + vector_f64_distance_l2(&[1.0, 2.0], &[-2.0, 1.0]), + (3f64 * 3f64 + 1f64 * 1f64).sqrt() + ); + } + + #[test] + fn test_distance_l2_f32_sparse() { + assert!( + (vector_f32_sparse_distance_l2( + VectorSparse { + idx: &[0, 1], + values: &[1.0, 2.0] + }, + VectorSparse { + idx: &[1, 2], + values: &[1.0, 3.0] + }, + ) - vector_f32_distance_l2(&[1.0, 2.0, 0.0], &[0.0, 1.0, 3.0])) + .abs() + < 1e-7 + ); + } } From 5336801574ecdf06bd211aa93f2bce9e490680b2 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 20:59:27 +0400 Subject: [PATCH 129/428] add jaccard distance --- core/function.rs | 3 ++ core/translate/expr.rs | 9 ++++ core/vdbe/execute.rs | 7 ++- core/vector/mod.rs | 13 +++++ core/vector/operations/jaccard.rs | 87 +++++++++++++++++++++++++++++++ core/vector/operations/mod.rs | 1 + 6 files changed, 119 insertions(+), 1 deletion(-) create mode 100644 core/vector/operations/jaccard.rs diff --git a/core/function.rs b/core/function.rs index f064970c3..a9b9750d7 100644 --- a/core/function.rs +++ b/core/function.rs @@ -158,6 +158,7 @@ pub enum VectorFunc { VectorExtract, VectorDistanceCos, VectorDistanceL2, + VectorDistanceJaccard, VectorConcat, VectorSlice, } @@ -178,6 +179,7 @@ impl Display for VectorFunc { Self::VectorExtract => "vector_extract".to_string(), Self::VectorDistanceCos => "vector_distance_cos".to_string(), Self::VectorDistanceL2 => "vector_distance_l2".to_string(), + Self::VectorDistanceJaccard => "vector_distance_jaccard".to_string(), Self::VectorConcat => "vector_concat".to_string(), Self::VectorSlice => "vector_slice".to_string(), }; @@ -871,6 +873,7 @@ impl Func { "vector_extract" => Ok(Self::Vector(VectorFunc::VectorExtract)), "vector_distance_cos" => Ok(Self::Vector(VectorFunc::VectorDistanceCos)), "vector_distance_l2" => Ok(Self::Vector(VectorFunc::VectorDistanceL2)), + "vector_distance_jaccard" => Ok(Self::Vector(VectorFunc::VectorDistanceJaccard)), "vector_concat" => Ok(Self::Vector(VectorFunc::VectorConcat)), "vector_slice" => Ok(Self::Vector(VectorFunc::VectorSlice)), _ => crate::bail_parse_error!("no such function: {}", name), diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 0a84e1548..77a464538 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -921,6 +921,15 @@ pub fn translate_expr( emit_function_call(program, func_ctx, &[regs, regs + 1], target_register)?; Ok(target_register) } + VectorFunc::VectorDistanceJaccard => { + let args = expect_arguments_exact!(args, 2, vector_func); + let regs = program.alloc_registers(2); + translate_expr(program, referenced_tables, &args[0], regs, resolver)?; + translate_expr(program, referenced_tables, &args[1], regs + 1, resolver)?; + + emit_function_call(program, func_ctx, &[regs, regs + 1], target_register)?; + Ok(target_register) + } VectorFunc::VectorConcat => { let args = expect_arguments_exact!(args, 2, vector_func); let regs = program.alloc_registers(2); diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 87103b8d4..b68bdf866 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -20,7 +20,7 @@ use crate::types::{ use crate::util::normalize_ident; use crate::vdbe::insn::InsertFlags; use crate::vdbe::{registers_to_ref_values, TxnCleanup}; -use crate::vector::{vector32_sparse, vector_concat, vector_slice}; +use crate::vector::{vector32_sparse, vector_concat, vector_distance_jaccard, vector_slice}; use crate::{ error::{ LimboError, SQLITE_CONSTRAINT, SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PRIMARYKEY, @@ -5219,6 +5219,11 @@ pub fn op_function( vector_distance_l2(&state.registers[*start_reg..*start_reg + arg_count])?; state.registers[*dest] = Register::Value(result); } + VectorFunc::VectorDistanceJaccard => { + let result = + vector_distance_jaccard(&state.registers[*start_reg..*start_reg + arg_count])?; + state.registers[*dest] = Register::Value(result); + } VectorFunc::VectorConcat => { let result = vector_concat(&state.registers[*start_reg..*start_reg + arg_count])?; state.registers[*dest] = Register::Value(result); diff --git a/core/vector/mod.rs b/core/vector/mod.rs index 8c77f7e7c..514780a78 100644 --- a/core/vector/mod.rs +++ b/core/vector/mod.rs @@ -111,6 +111,19 @@ pub fn vector_distance_l2(args: &[Register]) -> Result { Ok(Value::Float(dist)) } +pub fn vector_distance_jaccard(args: &[Register]) -> Result { + if args.len() != 2 { + return Err(LimboError::ConversionError( + "distance_jaccard requires exactly two arguments".to_string(), + )); + } + + let x = parse_vector(&args[0], None)?; + let y = parse_vector(&args[1], None)?; + let dist = operations::jaccard::vector_distance_jaccard(&x, &y)?; + Ok(Value::Float(dist)) +} + pub fn vector_concat(args: &[Register]) -> Result { if args.len() != 2 { return Err(LimboError::InvalidArgument( diff --git a/core/vector/operations/jaccard.rs b/core/vector/operations/jaccard.rs new file mode 100644 index 000000000..f545d82bd --- /dev/null +++ b/core/vector/operations/jaccard.rs @@ -0,0 +1,87 @@ +use crate::{ + vector::vector_types::{Vector, VectorSparse, VectorType}, + LimboError, Result, +}; + +pub fn vector_distance_jaccard(v1: &Vector, v2: &Vector) -> Result { + if v1.dims != v2.dims { + return Err(LimboError::ConversionError( + "Vectors must have the same dimensions".to_string(), + )); + } + if v1.vector_type != v2.vector_type { + return Err(LimboError::ConversionError( + "Vectors must be of the same type".to_string(), + )); + } + match v1.vector_type { + VectorType::Float32Dense => Ok(vector_f32_distance_jaccard( + v1.as_f32_slice(), + v2.as_f32_slice(), + )), + VectorType::Float64Dense => Ok(vector_f64_distance_jaccard( + v1.as_f64_slice(), + v2.as_f64_slice(), + )), + VectorType::Float32Sparse => Ok(vector_f32_sparse_distance_jaccard( + v1.as_f32_sparse(), + v2.as_f32_sparse(), + )), + } +} + +fn vector_f32_distance_jaccard(v1: &[f32], v2: &[f32]) -> f64 { + let (mut min_sum, mut max_sum) = (0.0, 0.0); + for (&a, &b) in v1.iter().zip(v2.iter()) { + min_sum += a.min(b); + max_sum += a.max(b); + } + if max_sum == 0.0 { + return f64::NAN; + } + 1. - (min_sum / min_sum) as f64 +} + +fn vector_f64_distance_jaccard(v1: &[f64], v2: &[f64]) -> f64 { + let (mut min_sum, mut max_sum) = (0.0, 0.0); + for (&a, &b) in v1.iter().zip(v2.iter()) { + min_sum += a.min(b); + max_sum += a.max(b); + } + if max_sum == 0.0 { + return f64::NAN; + } + 1. - min_sum / min_sum +} + +fn vector_f32_sparse_distance_jaccard(v1: VectorSparse, v2: VectorSparse) -> f64 { + let mut v1_pos = 0; + let mut v2_pos = 0; + let (mut min_sum, mut max_sum) = (0.0, 0.0); + while v1_pos < v1.idx.len() && v2_pos < v2.idx.len() { + if v1.idx[v1_pos] == v2.idx[v2_pos] { + min_sum += v1.values[v1_pos].min(v2.values[v2_pos]); + max_sum += v1.values[v1_pos].max(v2.values[v2_pos]); + v1_pos += 1; + v2_pos += 1; + } else if v1.idx[v1_pos] < v2.idx[v2_pos] { + max_sum += v1.values[v1_pos]; + v1_pos += 1; + } else { + max_sum += v2.values[v2_pos]; + v2_pos += 1; + } + } + while v1_pos < v1.idx.len() { + max_sum += v1.values[v1_pos]; + v1_pos += 1; + } + while v2_pos < v2.idx.len() { + max_sum += v2.values[v2_pos]; + v2_pos += 1; + } + if max_sum == 0.0 { + return f64::NAN; + } + 1. - (min_sum / max_sum) as f64 +} diff --git a/core/vector/operations/mod.rs b/core/vector/operations/mod.rs index c0d10a0f0..9b1a20ada 100644 --- a/core/vector/operations/mod.rs +++ b/core/vector/operations/mod.rs @@ -2,6 +2,7 @@ pub mod concat; pub mod convert; pub mod distance_cos; pub mod distance_l2; +pub mod jaccard; pub mod serialize; pub mod slice; pub mod text; From ac9a25a417b62bea6d7818173f5059beba1fa8a9 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 21:16:55 +0400 Subject: [PATCH 130/428] fix clippy --- core/vector/operations/jaccard.rs | 4 ++-- core/vector/vector_types.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/vector/operations/jaccard.rs b/core/vector/operations/jaccard.rs index f545d82bd..e93655033 100644 --- a/core/vector/operations/jaccard.rs +++ b/core/vector/operations/jaccard.rs @@ -39,7 +39,7 @@ fn vector_f32_distance_jaccard(v1: &[f32], v2: &[f32]) -> f64 { if max_sum == 0.0 { return f64::NAN; } - 1. - (min_sum / min_sum) as f64 + 1. - (min_sum / max_sum) as f64 } fn vector_f64_distance_jaccard(v1: &[f64], v2: &[f64]) -> f64 { @@ -51,7 +51,7 @@ fn vector_f64_distance_jaccard(v1: &[f64], v2: &[f64]) -> f64 { if max_sum == 0.0 { return f64::NAN; } - 1. - min_sum / min_sum + 1. - min_sum / max_sum } fn vector_f32_sparse_distance_jaccard(v1: VectorSparse, v2: VectorSparse) -> f64 { diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index 114d34edf..c8c84ddee 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -41,7 +41,7 @@ impl Vector { match vector_type { 1 => Ok((VectorType::Float32Dense, blob)), 2 => Ok((VectorType::Float64Dense, blob)), - 3 | 4 | 5 | 6 => Err(LimboError::ConversionError( + 3..=6 => Err(LimboError::ConversionError( "unsupported vector type from LibSQL".to_string(), )), 9 => Ok((VectorType::Float32Sparse, blob)), @@ -83,7 +83,7 @@ impl Vector { }) } VectorType::Float32Sparse => { - if data.len() == 0 || data.len() % 4 != 0 || (data.len() - 4) % 8 != 0 { + if data.is_empty() || data.len() % 4 != 0 || (data.len() - 4) % 8 != 0 { return Err(LimboError::InvalidArgument(format!( "f32 sparse vector unexpected data length: {}", data.len(), From e18f26a1f1b787c65f9c83791f61069138999b09 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 21:28:46 +0400 Subject: [PATCH 131/428] fix bug after refactoring --- core/vector/operations/slice.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/vector/operations/slice.rs b/core/vector/operations/slice.rs index a1ee1b2fa..003195070 100644 --- a/core/vector/operations/slice.rs +++ b/core/vector/operations/slice.rs @@ -17,12 +17,12 @@ pub fn vector_slice(vector: &Vector, start: usize, end: usize) -> Result match vector.vector_type { VectorType::Float32Dense => Ok(Vector { vector_type: vector.vector_type, - dims: end - start + 1, + dims: end - start, data: vector.data[start * 4..end * 4].to_vec(), }), VectorType::Float64Dense => Ok(Vector { vector_type: vector.vector_type, - dims: end - start + 1, + dims: end - start, data: vector.data[start * 8..end * 8].to_vec(), }), VectorType::Float32Sparse => { @@ -40,7 +40,7 @@ pub fn vector_slice(vector: &Vector, start: usize, end: usize) -> Result values.extend_from_slice(&idx); Ok(Vector { vector_type: vector.vector_type, - dims: end - start + 1, + dims: end - start, data: values, }) } From 10c51c8da0f5d0b946a231eaa88a9f077b4af51a Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 22:14:38 +0400 Subject: [PATCH 132/428] add test for convert operation --- core/vector/operations/convert.rs | 67 ++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 2 deletions(-) diff --git a/core/vector/operations/convert.rs b/core/vector/operations/convert.rs index 175f7aa0d..9b8261418 100644 --- a/core/vector/operations/convert.rs +++ b/core/vector/operations/convert.rs @@ -21,8 +21,8 @@ pub fn vector_convert(v: Vector, target_type: VectorType) -> Result { } (VectorType::Float64Dense, VectorType::Float32Dense) => { let mut data = Vec::with_capacity(v.dims * 4); - for &x in v.as_f32_slice() { - data.extend_from_slice(&f64::to_le_bytes(x as f64)); + for &x in v.as_f64_slice() { + data.extend_from_slice(&f32::to_le_bytes(x as f32)); } Ok(Vector { vector_type: target_type, @@ -91,3 +91,66 @@ pub fn vector_convert(v: Vector, target_type: VectorType) -> Result { } } } + +#[cfg(test)] +mod tests { + use crate::vector::{ + operations::convert::vector_convert, + vector_types::{Vector, VectorType}, + }; + + fn concat(data: &[[u8; N]]) -> Vec { + data.iter().flatten().cloned().collect::>() + } + + fn assert_vectors(v1: &Vector, v2: &Vector) { + assert_eq!(v1.vector_type, v2.vector_type); + assert_eq!(v1.dims, v2.dims); + assert_eq!(v1.data, v2.data); + } + + #[test] + pub fn test_vector_convert() { + let vf32 = Vector { + vector_type: VectorType::Float32Dense, + dims: 3, + data: concat(&[ + 1.0f32.to_le_bytes(), + 0.0f32.to_le_bytes(), + 2.0f32.to_le_bytes(), + ]), + }; + let vf64 = Vector { + vector_type: VectorType::Float64Dense, + dims: 3, + data: concat(&[ + 1.0f64.to_le_bytes(), + 0.0f64.to_le_bytes(), + 2.0f64.to_le_bytes(), + ]), + }; + let vf32_sparse = Vector { + vector_type: VectorType::Float32Sparse, + dims: 3, + data: concat(&[ + 1.0f32.to_le_bytes(), + 2.0f32.to_le_bytes(), + 0u32.to_le_bytes(), + 2u32.to_le_bytes(), + ]), + }; + + let vectors = [vf32, vf64, vf32_sparse]; + for v1 in &vectors { + for v2 in &vectors { + println!("{:?} -> {:?}", v1.vector_type, v2.vector_type); + let v_copy = Vector { + vector_type: v1.vector_type, + dims: v1.dims, + data: v1.data.clone(), + }; + assert_vectors(&vector_convert(v_copy, v2.vector_type).unwrap(), &v2); + } + } + } +} From 7e727d07af5b08783a1abf1e8393b8041478eb0e Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Thu, 9 Oct 2025 23:23:16 +0400 Subject: [PATCH 133/428] fix bugs add tests --- core/vector/operations/convert.rs | 73 ++++++---------------- core/vector/operations/distance_cos.rs | 27 ++++++++- core/vector/operations/distance_l2.rs | 32 ++++++++-- core/vector/operations/jaccard.rs | 83 ++++++++++++++++++++++++-- core/vector/vector_types.rs | 75 ++++++++++++++++++++++- 5 files changed, 220 insertions(+), 70 deletions(-) diff --git a/core/vector/operations/convert.rs b/core/vector/operations/convert.rs index 9b8261418..96401123e 100644 --- a/core/vector/operations/convert.rs +++ b/core/vector/operations/convert.rs @@ -8,43 +8,22 @@ pub fn vector_convert(v: Vector, target_type: VectorType) -> Result { (VectorType::Float32Dense, VectorType::Float32Dense) | (VectorType::Float64Dense, VectorType::Float64Dense) | (VectorType::Float32Sparse, VectorType::Float32Sparse) => Ok(v), - (VectorType::Float32Dense, VectorType::Float64Dense) => { - let mut data = Vec::with_capacity(v.dims * 8); - for &x in v.as_f32_slice() { - data.extend_from_slice(&f64::to_le_bytes(x as f64)); - } - Ok(Vector { - vector_type: target_type, - dims: v.dims, - data, - }) - } - (VectorType::Float64Dense, VectorType::Float32Dense) => { - let mut data = Vec::with_capacity(v.dims * 4); - for &x in v.as_f64_slice() { - data.extend_from_slice(&f32::to_le_bytes(x as f32)); - } - Ok(Vector { - vector_type: target_type, - dims: v.dims, - data, - }) - } + (VectorType::Float32Dense, VectorType::Float64Dense) => Ok(Vector::from_f64( + v.as_f32_slice().iter().map(|&x| x as f64).collect(), + )), + (VectorType::Float64Dense, VectorType::Float32Dense) => Ok(Vector::from_f32( + v.as_f64_slice().iter().map(|&x| x as f32).collect(), + )), (VectorType::Float32Dense, VectorType::Float32Sparse) => { let (mut idx, mut values) = (Vec::new(), Vec::new()); for (i, &value) in v.as_f32_slice().iter().enumerate() { if value == 0.0 { continue; } - idx.extend_from_slice(&(i as u32).to_le_bytes()); - values.extend_from_slice(&value.to_le_bytes()); + idx.push(i as u32); + values.push(value); } - values.extend_from_slice(&idx); - Ok(Vector { - vector_type: target_type, - dims: v.dims, - data: values, - }) + Ok(Vector::from_f32_sparse(v.dims, values, idx)) } (VectorType::Float64Dense, VectorType::Float32Sparse) => { let (mut idx, mut values) = (Vec::new(), Vec::new()); @@ -52,42 +31,26 @@ pub fn vector_convert(v: Vector, target_type: VectorType) -> Result { if value == 0.0 { continue; } - idx.extend_from_slice(&(i as u32).to_le_bytes()); - values.extend_from_slice(&(value as f32).to_le_bytes()); + idx.push(i as u32); + values.push(value as f32); } - values.extend_from_slice(&idx); - Ok(Vector { - vector_type: target_type, - dims: v.dims, - data: values, - }) + Ok(Vector::from_f32_sparse(v.dims, values, idx)) } (VectorType::Float32Sparse, VectorType::Float32Dense) => { let sparse = v.as_f32_sparse(); - let mut data = vec![0u8; v.dims * 4]; + let mut data = vec![0f32; v.dims]; for (&i, &value) in sparse.idx.iter().zip(sparse.values.iter()) { - data.splice((4 * i) as usize..4 * (i + 1) as usize, value.to_le_bytes()); + data[i as usize] = value; } - Ok(Vector { - vector_type: target_type, - dims: v.dims, - data, - }) + Ok(Vector::from_f32(data)) } (VectorType::Float32Sparse, VectorType::Float64Dense) => { let sparse = v.as_f32_sparse(); - let mut data = vec![0u8; v.dims * 8]; + let mut data = vec![0f64; v.dims]; for (&i, &value) in sparse.idx.iter().zip(sparse.values.iter()) { - data.splice( - (8 * i) as usize..8 * (i + 1) as usize, - (value as f64).to_le_bytes(), - ); + data[i as usize] = value as f64; } - Ok(Vector { - vector_type: target_type, - dims: v.dims, - data, - }) + Ok(Vector::from_f64(data)) } } } diff --git a/core/vector/operations/distance_cos.rs b/core/vector/operations/distance_cos.rs index 19fb17688..aaa2c86f6 100644 --- a/core/vector/operations/distance_cos.rs +++ b/core/vector/operations/distance_cos.rs @@ -111,10 +111,15 @@ fn vector_f32_sparse_distance_cos(v1: VectorSparse, v2: VectorSparse) #[cfg(test)] mod tests { + use crate::vector::{ + operations::convert::vector_convert, vector_types::tests::ArbitraryVector, + }; + use super::*; + use quickcheck_macros::quickcheck; #[test] - fn test_distance_cos_f32() { + fn test_vector_distance_cos_f32() { assert!(vector_f32_distance_cos(&[], &[]).is_nan()); assert!(vector_f32_distance_cos(&[1.0, 2.0], &[0.0, 0.0]).is_nan()); assert_eq!(vector_f32_distance_cos(&[1.0, 2.0], &[1.0, 2.0]), 0.0); @@ -123,7 +128,7 @@ mod tests { } #[test] - fn test_distance_cos_f64() { + fn test_vector_distance_cos_f64() { assert!(vector_f64_distance_cos(&[], &[]).is_nan()); assert!(vector_f64_distance_cos(&[1.0, 2.0], &[0.0, 0.0]).is_nan()); assert_eq!(vector_f64_distance_cos(&[1.0, 2.0], &[1.0, 2.0]), 0.0); @@ -132,7 +137,7 @@ mod tests { } #[test] - fn test_distance_cos_f32_sparse() { + fn test_vector_distance_cos_f32_sparse() { assert!( (vector_f32_sparse_distance_cos( VectorSparse { @@ -148,4 +153,20 @@ mod tests { < 1e-7 ); } + + #[quickcheck] + fn prop_vector_distance_cos_dense_vs_sparse( + v1: ArbitraryVector<100>, + v2: ArbitraryVector<100>, + ) -> bool { + let v1 = vector_convert(v1.into(), VectorType::Float32Dense).unwrap(); + let v2 = vector_convert(v2.into(), VectorType::Float32Dense).unwrap(); + let d1 = vector_distance_cos(&v1, &v2).unwrap(); + + let sparse1 = vector_convert(v1, VectorType::Float32Sparse).unwrap(); + let sparse2 = vector_convert(v2, VectorType::Float32Sparse).unwrap(); + let d2 = vector_f32_sparse_distance_cos(sparse1.as_f32_sparse(), sparse2.as_f32_sparse()); + + (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-6 + } } diff --git a/core/vector/operations/distance_l2.rs b/core/vector/operations/distance_l2.rs index 2599fd4d3..68d01857a 100644 --- a/core/vector/operations/distance_l2.rs +++ b/core/vector/operations/distance_l2.rs @@ -76,10 +76,16 @@ fn vector_f32_sparse_distance_l2(v1: VectorSparse, v2: VectorSparse) - #[cfg(test)] mod tests { + use quickcheck_macros::quickcheck; + + use crate::vector::{ + operations::convert::vector_convert, vector_types::tests::ArbitraryVector, + }; + use super::*; #[test] - fn test_euclidean_distance_f32() { + fn test_vector_distance_l2_f32_another() { let vectors = [ (0..8).map(|x| x as f32).collect::>(), (1..9).map(|x| x as f32).collect::>(), @@ -102,14 +108,14 @@ mod tests { } #[test] - fn test_odd_len() { + fn test_vector_distance_l2_odd_len() { let v = (0..5).map(|x| x as f32).collect::>(); let query = (2..7).map(|x| x as f32).collect::>(); assert_eq!(vector_f32_distance_l2(&v, &query), 20.0_f64.sqrt()); } #[test] - fn test_distance_l2_f32() { + fn test_vector_distance_l2_f32() { assert_eq!(vector_f32_distance_l2(&[], &[]), 0.0); assert_eq!( vector_f32_distance_l2(&[1.0, 2.0], &[0.0, 0.0]), @@ -127,7 +133,7 @@ mod tests { } #[test] - fn test_distance_l2_f64() { + fn test_vector_distance_l2_f64() { assert_eq!(vector_f64_distance_l2(&[], &[]), 0.0); assert_eq!( vector_f64_distance_l2(&[1.0, 2.0], &[0.0, 0.0]), @@ -145,7 +151,7 @@ mod tests { } #[test] - fn test_distance_l2_f32_sparse() { + fn test_vector_distance_l2_f32_sparse() { assert!( (vector_f32_sparse_distance_l2( VectorSparse { @@ -161,4 +167,20 @@ mod tests { < 1e-7 ); } + + #[quickcheck] + fn prop_vector_distance_l2_dense_vs_sparse( + v1: ArbitraryVector<100>, + v2: ArbitraryVector<100>, + ) -> bool { + let v1 = vector_convert(v1.into(), VectorType::Float32Dense).unwrap(); + let v2 = vector_convert(v2.into(), VectorType::Float32Dense).unwrap(); + let d1 = vector_distance_l2(&v1, &v2).unwrap(); + + let sparse1 = vector_convert(v1, VectorType::Float32Sparse).unwrap(); + let sparse2 = vector_convert(v2, VectorType::Float32Sparse).unwrap(); + let d2 = vector_f32_sparse_distance_l2(sparse1.as_f32_sparse(), sparse2.as_f32_sparse()); + + (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-6 + } } diff --git a/core/vector/operations/jaccard.rs b/core/vector/operations/jaccard.rs index e93655033..f7f73c2a0 100644 --- a/core/vector/operations/jaccard.rs +++ b/core/vector/operations/jaccard.rs @@ -65,19 +65,23 @@ fn vector_f32_sparse_distance_jaccard(v1: VectorSparse, v2: VectorSparse, v2: VectorSparse, + v2: ArbitraryVector<100>, + ) -> bool { + let v1 = vector_convert(v1.into(), VectorType::Float32Dense).unwrap(); + let v2 = vector_convert(v2.into(), VectorType::Float32Dense).unwrap(); + let d1 = vector_distance_jaccard(&v1, &v2).unwrap(); + println!("v1: {:?}, v2: {:?}", v1.as_f32_slice(), v2.as_f32_slice()); + + let sparse1 = vector_convert(v1, VectorType::Float32Sparse).unwrap(); + let sparse2 = vector_convert(v2, VectorType::Float32Sparse).unwrap(); + let d2 = + vector_f32_sparse_distance_jaccard(sparse1.as_f32_sparse(), sparse2.as_f32_sparse()); + + println!("d1: {}, d2: {}, delta: {}", d1, d2, (d1 - d2).abs()); + (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-6 + } +} diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index c8c84ddee..a5551a453 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -50,6 +50,64 @@ impl Vector { ))), } } + pub fn from_f32(mut values_f32: Vec) -> Self { + let dims = values_f32.len(); + let values = unsafe { + Vec::from_raw_parts( + values_f32.as_mut_ptr() as *mut u8, + values_f32.len() * 4, + values_f32.capacity() * 4, + ) + }; + std::mem::forget(values_f32); + Self { + vector_type: VectorType::Float32Dense, + dims, + data: values, + } + } + pub fn from_f64(mut values_f64: Vec) -> Self { + let dims = values_f64.len(); + let values = unsafe { + Vec::from_raw_parts( + values_f64.as_mut_ptr() as *mut u8, + values_f64.len() * 8, + values_f64.capacity() * 8, + ) + }; + std::mem::forget(values_f64); + Self { + vector_type: VectorType::Float64Dense, + dims, + data: values, + } + } + pub fn from_f32_sparse(dims: usize, mut values_f32: Vec, mut idx_u32: Vec) -> Self { + let mut values = unsafe { + Vec::from_raw_parts( + values_f32.as_mut_ptr() as *mut u8, + values_f32.len() * 4, + values_f32.capacity() * 4, + ) + }; + std::mem::forget(values_f32); + + let idx = unsafe { + Vec::from_raw_parts( + idx_u32.as_mut_ptr() as *mut u8, + idx_u32.len() * 4, + idx_u32.capacity() * 4, + ) + }; + std::mem::forget(idx_u32); + + values.extend_from_slice(&idx); + Self { + vector_type: VectorType::Float32Sparse, + dims, + data: values, + } + } pub fn from_blob(blob: Vec) -> Result { let (vector_type, data) = Self::vector_type(blob)?; Self::from_data(vector_type, data) @@ -107,6 +165,7 @@ impl Vector { /// - The buffer is correctly aligned for `f32` /// - The length of the buffer is exactly `dims * size_of::()` pub fn as_f32_slice(&self) -> &[f32] { + debug_assert!(self.vector_type == VectorType::Float32Dense); if self.dims == 0 { return &[]; } @@ -135,6 +194,7 @@ impl Vector { /// - The buffer is correctly aligned for `f64` /// - The length of the buffer is exactly `dims * size_of::()` pub fn as_f64_slice(&self) -> &[f64] { + debug_assert!(self.vector_type == VectorType::Float64Dense); if self.dims == 0 { return &[]; } @@ -157,6 +217,7 @@ impl Vector { } pub fn as_f32_sparse(&self) -> VectorSparse<'_, f32> { + debug_assert!(self.vector_type == VectorType::Float32Sparse); let ptr = self.data.as_ptr(); let align = std::mem::align_of::(); assert_eq!( @@ -173,7 +234,7 @@ impl Vector { } #[cfg(test)] -mod tests { +pub(crate) mod tests { use crate::vector::operations; use super::*; @@ -182,7 +243,7 @@ mod tests { // Helper to generate arbitrary vectors of specific type and dimensions #[derive(Debug, Clone)] - struct ArbitraryVector { + pub struct ArbitraryVector { vector_type: VectorType, data: Vec, } @@ -193,6 +254,10 @@ mod tests { (0..DIMS) .map(|_| { loop { + // generate zeroes with some probability since we have support for sparse vectors + if bool::arbitrary(g) { + return 0.0; + } let f = f32::arbitrary(g); // f32::arbitrary() can generate "problem values" like NaN, infinity, and very small values // Skip these values @@ -209,6 +274,10 @@ mod tests { (0..DIMS) .map(|_| { loop { + // generate zeroes with some probability since we have support for sparse vectors + if bool::arbitrary(g) { + return 0.0; + } let f = f64::arbitrary(g); // f64::arbitrary() can generate "problem values" like NaN, infinity, and very small values // Skip these values @@ -375,7 +444,7 @@ mod tests { /// - The distance must be between 0 and 2 fn test_vector_distance(v1: &Vector, v2: &Vector) -> bool { match operations::distance_cos::vector_distance_cos(v1, v2) { - Ok(distance) => (0.0..=2.0).contains(&distance), + Ok(distance) => distance.is_nan() || (0.0..=2.0).contains(&distance), Err(_) => true, } } From b6f94b2fa1e27a1080ec32711c7355cceb74ef25 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 9 Oct 2025 17:19:47 -0300 Subject: [PATCH 134/428] remove dead code in sim --- simulator/generation/plan.rs | 28 ----------- simulator/generation/property.rs | 81 ++------------------------------ simulator/main.rs | 3 +- simulator/model/mod.rs | 10 ---- simulator/runner/bugbase.rs | 4 ++ simulator/runner/env.rs | 22 --------- simulator/runner/execution.rs | 1 + simulator/runner/memory/io.rs | 6 +-- simulator/runner/mod.rs | 2 +- 9 files changed, 14 insertions(+), 143 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index c8925cd59..337978569 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -63,11 +63,6 @@ impl InteractionPlan { Self { plan, mvcc, len } } - #[inline] - pub fn plan(&self) -> &[Interactions] { - &self.plan - } - /// Length of interactions that are not transaction statements #[inline] pub fn len(&self) -> usize { @@ -629,14 +624,6 @@ impl InteractionsType { } impl Interactions { - pub(crate) fn name(&self) -> Option<&str> { - match &self.interactions { - InteractionsType::Property(property) => Some(property.name()), - InteractionsType::Query(_) => None, - InteractionsType::Fault(_) => None, - } - } - pub(crate) fn interactions(&self) -> Vec { match &self.interactions { InteractionsType::Property(property) => property.interactions(self.connection_index), @@ -726,17 +713,6 @@ pub(crate) struct InteractionStats { pub(crate) rollback_count: u32, } -impl InteractionStats { - pub fn total_writes(&self) -> u32 { - self.insert_count - + self.delete_count - + self.update_count - + self.create_count - + self.create_index_count - + self.drop_count - } -} - impl Display for InteractionStats { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( @@ -758,10 +734,6 @@ impl Display for InteractionStats { type AssertionFunc = dyn Fn(&Vec, &mut SimulatorEnv) -> Result>; -enum AssertionAST { - Pick(), -} - #[derive(Clone)] pub struct Assertion { pub func: Rc, diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 47b352406..b67026dae 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -8,7 +8,7 @@ use rand::distr::{Distribution, weighted::WeightedIndex}; use serde::{Deserialize, Serialize}; use sql_generation::{ - generation::{Arbitrary, ArbitraryFrom, GenerationContext, Opts, pick, pick_index}, + generation::{Arbitrary, ArbitraryFrom, GenerationContext, pick, pick_index}, model::{ query::{ Create, Delete, Drop, Insert, Select, @@ -17,7 +17,7 @@ use sql_generation::{ transaction::{Begin, Commit, Rollback}, update::Update, }, - table::{SimValue, Table}, + table::SimValue, }, }; use strum::IntoEnumIterator; @@ -27,40 +27,15 @@ use turso_parser::ast::{self, Distinctness}; use crate::{ common::print_diff, generation::{ - Shadow as _, WeightedDistribution, - plan::InteractionType, - query::{QueryDistribution, possible_queries}, + Shadow as _, WeightedDistribution, plan::InteractionType, query::QueryDistribution, }, model::{Query, QueryCapabilities, QueryDiscriminants}, profiles::query::QueryProfile, - runner::env::{ShadowTablesMut, SimulatorEnv}, + runner::env::SimulatorEnv, }; use super::plan::{Assertion, Interaction, InteractionStats, ResultSet}; -#[derive(Debug, Clone, Copy)] -struct PropertyGenContext<'a> { - tables: &'a Vec, - opts: &'a sql_generation::generation::Opts, -} - -impl<'a> PropertyGenContext<'a> { - #[inline] - fn new(tables: &'a Vec
, opts: &'a Opts) -> Self { - Self { tables, opts } - } -} - -impl<'a> GenerationContext for PropertyGenContext<'a> { - fn tables(&self) -> &Vec { - self.tables - } - - fn opts(&self) -> &sql_generation::generation::Opts { - self.opts - } -} - /// Properties are representations of executable specifications /// about the database behavior. #[derive(Debug, Clone, Serialize, Deserialize, strum::EnumDiscriminants)] @@ -1925,11 +1900,6 @@ impl PropertyDiscriminants { } } -pub fn possiple_properties(tables: &[Table]) -> Vec { - let queries = possible_queries(tables); - PropertyDiscriminants::can_generate(queries) -} - pub(super) struct PropertyDistribution<'a> { properties: Vec, weights: WeightedIndex, @@ -1995,49 +1965,6 @@ impl<'a> ArbitraryFrom<&PropertyDistribution<'a>> for Property { } } -fn generate_queries( - rng: &mut R, - ctx: &impl GenerationContext, - amount: usize, - init_queries: &[&Query], - func: F, -) -> Vec -where - F: Fn(&mut R, PropertyGenContext) -> Option, -{ - // Create random queries respecting the constraints - let mut queries = Vec::new(); - - let range = 0..amount; - if !range.is_empty() { - let mut tmp_tables = ctx.tables().clone(); - - for query in init_queries { - tmp_shadow(&mut tmp_tables, query); - } - - for _ in range { - let tmp_ctx = PropertyGenContext::new(&tmp_tables, ctx.opts()); - - let Some(query) = func(rng, tmp_ctx) else { - continue; - }; - - tmp_shadow(&mut tmp_tables, &query); - - queries.push(query); - } - } - queries -} - -fn tmp_shadow(tmp_tables: &mut Vec
, query: &Query) { - let mut tx_tables = None; - let mut tmp_shadow_tables = ShadowTablesMut::new(tmp_tables, &mut tx_tables); - - let _ = query.shadow(&mut tmp_shadow_tables); -} - fn print_row(row: &[SimValue]) -> String { row.iter() .map(|v| match &v.0 { diff --git a/simulator/main.rs b/simulator/main.rs index 15376a13b..6a5d097b8 100644 --- a/simulator/main.rs +++ b/simulator/main.rs @@ -1,4 +1,4 @@ -#![allow(clippy::arc_with_non_send_sync, dead_code)] +#![allow(clippy::arc_with_non_send_sync)] use anyhow::anyhow; use clap::Parser; use generation::plan::{InteractionPlan, InteractionPlanState}; @@ -421,6 +421,7 @@ enum SandboxedResult { error: String, last_execution: Execution, }, + #[expect(dead_code)] FoundBug { error: String, history: ExecutionHistory, diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 9e3d29db2..3f8a4ec9d 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -204,16 +204,6 @@ impl QueryDiscriminants { QueryDiscriminants::Drop, QueryDiscriminants::CreateIndex, ]; - - #[inline] - pub fn is_transaction(&self) -> bool { - matches!(self, Self::Begin | Self::Commit | Self::Rollback) - } - - #[inline] - pub fn is_ddl(&self) -> bool { - matches!(self, Self::Create | Self::CreateIndex | Self::Drop) - } } impl Shadow for Create { diff --git a/simulator/runner/bugbase.rs b/simulator/runner/bugbase.rs index 179c292f1..dd0d6f432 100644 --- a/simulator/runner/bugbase.rs +++ b/simulator/runner/bugbase.rs @@ -49,6 +49,7 @@ pub(crate) struct BugRun { } impl Bug { + #[expect(dead_code)] /// Check if the bug is loaded. pub(crate) fn is_loaded(&self) -> bool { match self { @@ -130,6 +131,7 @@ impl BugBase { Err(anyhow!("failed to create bug base")) } + #[expect(dead_code)] /// Load the bug base from one of the potential paths. pub(crate) fn interactive_load() -> anyhow::Result { let potential_paths = vec![ @@ -338,6 +340,7 @@ impl BugBase { } } + #[expect(dead_code)] pub(crate) fn mark_successful_run( &mut self, seed: u64, @@ -434,6 +437,7 @@ impl BugBase { } impl BugBase { + #[expect(dead_code)] /// Get the path to the bug base directory. pub(crate) fn path(&self) -> &PathBuf { &self.path diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index 300b08c84..79497c38b 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -83,18 +83,6 @@ impl<'a, 'b> ShadowTablesMut<'a> where 'a: 'b, { - /// Creation of [ShadowTablesMut] outside of [SimulatorEnv] should be done sparingly and carefully. - /// Should only need to call this function if we need to do shadowing in a temporary model table - pub fn new( - commited_tables: &'a mut Vec
, - transaction_tables: &'a mut Option, - ) -> Self { - ShadowTablesMut { - commited_tables, - transaction_tables, - } - } - fn tables(&'a self) -> &'a Vec
{ self.transaction_tables .as_ref() @@ -312,7 +300,6 @@ impl SimulatorEnv { seed, ticks: rng .random_range(cli_opts.minimum_tests as usize..=cli_opts.maximum_tests as usize), - max_tables: rng.random_range(0..128), disable_select_optimizer: cli_opts.disable_select_optimizer, disable_insert_values_select: cli_opts.disable_insert_values_select, disable_double_create_failure: cli_opts.disable_double_create_failure, @@ -528,14 +515,6 @@ impl SimulatorEnv { } } -pub trait ConnectionTrait -where - Self: std::marker::Sized + Clone, -{ - fn is_connected(&self) -> bool; - fn disconnect(&mut self); -} - pub(crate) enum SimConnection { LimboConnection(Arc), SQLiteConnection(rusqlite::Connection), @@ -584,7 +563,6 @@ impl Display for SimConnection { pub(crate) struct SimulatorOpts { pub(crate) seed: u64, pub(crate) ticks: usize, - pub(crate) max_tables: usize, pub(crate) disable_select_optimizer: bool, pub(crate) disable_insert_values_select: bool, diff --git a/simulator/runner/execution.rs b/simulator/runner/execution.rs index e3cfef375..7bc9b40e4 100644 --- a/simulator/runner/execution.rs +++ b/simulator/runner/execution.rs @@ -46,6 +46,7 @@ impl ExecutionHistory { } pub struct ExecutionResult { + #[expect(dead_code)] pub history: ExecutionHistory, pub error: Option, } diff --git a/simulator/runner/memory/io.rs b/simulator/runner/memory/io.rs index 007398a10..557ada9a2 100644 --- a/simulator/runner/memory/io.rs +++ b/simulator/runner/memory/io.rs @@ -1,4 +1,4 @@ -use std::cell::{Cell, RefCell}; +use std::cell::RefCell; use std::sync::Arc; use indexmap::IndexMap; @@ -121,7 +121,7 @@ pub struct MemorySimIO { timeouts: CallbackQueue, pub files: RefCell>>, pub rng: RefCell, - pub nr_run_once_faults: Cell, + #[expect(dead_code)] pub page_size: usize, seed: u64, latency_probability: u8, @@ -141,13 +141,11 @@ impl MemorySimIO { ) -> Self { let files = RefCell::new(IndexMap::new()); let rng = RefCell::new(ChaCha8Rng::seed_from_u64(seed)); - let nr_run_once_faults = Cell::new(0); Self { callbacks: Arc::new(Mutex::new(Vec::new())), timeouts: Arc::new(Mutex::new(Vec::new())), files, rng, - nr_run_once_faults, page_size, seed, latency_probability, diff --git a/simulator/runner/mod.rs b/simulator/runner/mod.rs index 7afbaa720..0f60c95fb 100644 --- a/simulator/runner/mod.rs +++ b/simulator/runner/mod.rs @@ -5,7 +5,7 @@ pub mod differential; pub mod doublecheck; pub mod env; pub mod execution; -#[allow(dead_code)] +#[expect(dead_code)] pub mod file; pub mod io; pub mod memory; From fb6c5ffcff295133963a90b27c6f86abb35ad6bf Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 9 Oct 2025 16:48:57 -0300 Subject: [PATCH 135/428] move SimValue generation to separate files to facilitate generation of new types of values in the future --- sql_generation/generation/mod.rs | 1 + sql_generation/generation/predicate/binary.rs | 2 +- sql_generation/generation/table.rs | 232 +----------------- sql_generation/generation/value/cmp.rs | 146 +++++++++++ sql_generation/generation/value/mod.rs | 58 +++++ sql_generation/generation/value/pattern.rs | 44 ++++ 6 files changed, 252 insertions(+), 231 deletions(-) create mode 100644 sql_generation/generation/value/cmp.rs create mode 100644 sql_generation/generation/value/mod.rs create mode 100644 sql_generation/generation/value/pattern.rs diff --git a/sql_generation/generation/mod.rs b/sql_generation/generation/mod.rs index 1292b3448..e67dc482b 100644 --- a/sql_generation/generation/mod.rs +++ b/sql_generation/generation/mod.rs @@ -8,6 +8,7 @@ pub mod opts; pub mod predicate; pub mod query; pub mod table; +pub mod value; pub use opts::*; diff --git a/sql_generation/generation/predicate/binary.rs b/sql_generation/generation/predicate/binary.rs index 37b2e4e93..e3b52d5ec 100644 --- a/sql_generation/generation/predicate/binary.rs +++ b/sql_generation/generation/predicate/binary.rs @@ -6,7 +6,7 @@ use crate::{ generation::{ backtrack, one_of, pick, predicate::{CompoundPredicate, SimplePredicate}, - table::{GTValue, LTValue, LikeValue}, + value::{GTValue, LTValue, LikeValue}, ArbitraryFrom, ArbitraryFromMaybe as _, GenerationContext, }, model::{ diff --git a/sql_generation/generation/table.rs b/sql_generation/generation/table.rs index 6e55942c3..2ced09fee 100644 --- a/sql_generation/generation/table.rs +++ b/sql_generation/generation/table.rs @@ -2,14 +2,9 @@ use std::sync::atomic::{AtomicU64, Ordering}; use indexmap::IndexSet; use rand::Rng; -use turso_core::Value; -use crate::generation::{ - gen_random_text, pick, readable_name_custom, Arbitrary, ArbitraryFrom, GenerationContext, -}; -use crate::model::table::{Column, ColumnType, Name, SimValue, Table}; - -use super::ArbitraryFromMaybe; +use crate::generation::{pick, readable_name_custom, Arbitrary, GenerationContext}; +use crate::model::table::{Column, ColumnType, Name, Table}; static COUNTER: AtomicU64 = AtomicU64::new(0); @@ -67,226 +62,3 @@ impl Arbitrary for ColumnType { pick(&[Self::Integer, Self::Float, Self::Text, Self::Blob], rng).to_owned() } } - -impl ArbitraryFrom<&Table> for Vec { - fn arbitrary_from( - rng: &mut R, - context: &C, - table: &Table, - ) -> Self { - let mut row = Vec::new(); - for column in table.columns.iter() { - let value = SimValue::arbitrary_from(rng, context, &column.column_type); - row.push(value); - } - row - } -} - -impl ArbitraryFrom<&Vec<&SimValue>> for SimValue { - fn arbitrary_from( - rng: &mut R, - _context: &C, - values: &Vec<&Self>, - ) -> Self { - if values.is_empty() { - return Self(Value::Null); - } - - pick(values, rng).to_owned().clone() - } -} - -impl ArbitraryFrom<&ColumnType> for SimValue { - fn arbitrary_from( - rng: &mut R, - _context: &C, - column_type: &ColumnType, - ) -> Self { - let value = match column_type { - ColumnType::Integer => Value::Integer(rng.random_range(i64::MIN..i64::MAX)), - ColumnType::Float => Value::Float(rng.random_range(-1e10..1e10)), - ColumnType::Text => Value::build_text(gen_random_text(rng)), - ColumnType::Blob => Value::Blob(gen_random_text(rng).as_bytes().to_vec()), - }; - SimValue(value) - } -} - -pub struct LTValue(pub SimValue); - -impl ArbitraryFrom<&Vec<&SimValue>> for LTValue { - fn arbitrary_from( - rng: &mut R, - context: &C, - values: &Vec<&SimValue>, - ) -> Self { - if values.is_empty() { - return Self(SimValue(Value::Null)); - } - - // Get value less than all values - let value = Value::exec_min(values.iter().map(|value| &value.0)); - Self::arbitrary_from(rng, context, &SimValue(value)) - } -} - -impl ArbitraryFrom<&SimValue> for LTValue { - fn arbitrary_from( - rng: &mut R, - _context: &C, - value: &SimValue, - ) -> Self { - let new_value = match &value.0 { - Value::Integer(i) => Value::Integer(rng.random_range(i64::MIN..*i - 1)), - Value::Float(f) => Value::Float(f - rng.random_range(0.0..1e10)), - value @ Value::Text(..) => { - // Either shorten the string, or make at least one character smaller and mutate the rest - let mut t = value.to_string(); - if rng.random_bool(0.01) { - t.pop(); - Value::build_text(t) - } else { - let mut t = t.chars().map(|c| c as u32).collect::>(); - let index = rng.random_range(0..t.len()); - t[index] -= 1; - // Mutate the rest of the string - for val in t.iter_mut().skip(index + 1) { - *val = rng.random_range('a' as u32..='z' as u32); - } - let t = t - .into_iter() - .map(|c| char::from_u32(c).unwrap_or('z')) - .collect::(); - Value::build_text(t) - } - } - Value::Blob(b) => { - // Either shorten the blob, or make at least one byte smaller and mutate the rest - let mut b = b.clone(); - if rng.random_bool(0.01) { - b.pop(); - Value::Blob(b) - } else { - let index = rng.random_range(0..b.len()); - b[index] -= 1; - // Mutate the rest of the blob - for val in b.iter_mut().skip(index + 1) { - *val = rng.random_range(0..=255); - } - Value::Blob(b) - } - } - _ => unreachable!(), - }; - Self(SimValue(new_value)) - } -} - -pub struct GTValue(pub SimValue); - -impl ArbitraryFrom<&Vec<&SimValue>> for GTValue { - fn arbitrary_from( - rng: &mut R, - context: &C, - values: &Vec<&SimValue>, - ) -> Self { - if values.is_empty() { - return Self(SimValue(Value::Null)); - } - // Get value greater than all values - let value = Value::exec_max(values.iter().map(|value| &value.0)); - - Self::arbitrary_from(rng, context, &SimValue(value)) - } -} - -impl ArbitraryFrom<&SimValue> for GTValue { - fn arbitrary_from( - rng: &mut R, - _context: &C, - value: &SimValue, - ) -> Self { - let new_value = match &value.0 { - Value::Integer(i) => Value::Integer(rng.random_range(*i..i64::MAX)), - Value::Float(f) => Value::Float(rng.random_range(*f..1e10)), - value @ Value::Text(..) => { - // Either lengthen the string, or make at least one character smaller and mutate the rest - let mut t = value.to_string(); - if rng.random_bool(0.01) { - t.push(rng.random_range(0..=255) as u8 as char); - Value::build_text(t) - } else { - let mut t = t.chars().map(|c| c as u32).collect::>(); - let index = rng.random_range(0..t.len()); - t[index] += 1; - // Mutate the rest of the string - for val in t.iter_mut().skip(index + 1) { - *val = rng.random_range('a' as u32..='z' as u32); - } - let t = t - .into_iter() - .map(|c| char::from_u32(c).unwrap_or('a')) - .collect::(); - Value::build_text(t) - } - } - Value::Blob(b) => { - // Either lengthen the blob, or make at least one byte smaller and mutate the rest - let mut b = b.clone(); - if rng.random_bool(0.01) { - b.push(rng.random_range(0..=255)); - Value::Blob(b) - } else { - let index = rng.random_range(0..b.len()); - b[index] += 1; - // Mutate the rest of the blob - for val in b.iter_mut().skip(index + 1) { - *val = rng.random_range(0..=255); - } - Value::Blob(b) - } - } - _ => unreachable!(), - }; - Self(SimValue(new_value)) - } -} - -pub struct LikeValue(pub SimValue); - -impl ArbitraryFromMaybe<&SimValue> for LikeValue { - fn arbitrary_from_maybe( - rng: &mut R, - _context: &C, - value: &SimValue, - ) -> Option { - match &value.0 { - value @ Value::Text(..) => { - let t = value.to_string(); - let mut t = t.chars().collect::>(); - // Remove a number of characters, either insert `_` for each character removed, or - // insert one `%` for the whole substring - let mut i = 0; - while i < t.len() { - if rng.random_bool(0.1) { - t[i] = '_'; - } else if rng.random_bool(0.05) { - t[i] = '%'; - // skip a list of characters - for _ in 0..rng.random_range(0..=3.min(t.len() - i - 1)) { - t.remove(i + 1); - } - } - i += 1; - } - let index = rng.random_range(0..t.len()); - t.insert(index, '%'); - Some(Self(SimValue(Value::build_text( - t.into_iter().collect::(), - )))) - } - _ => None, - } - } -} diff --git a/sql_generation/generation/value/cmp.rs b/sql_generation/generation/value/cmp.rs new file mode 100644 index 000000000..567a59a5e --- /dev/null +++ b/sql_generation/generation/value/cmp.rs @@ -0,0 +1,146 @@ +use turso_core::Value; + +use crate::{ + generation::{ArbitraryFrom, GenerationContext}, + model::table::SimValue, +}; + +pub struct LTValue(pub SimValue); + +impl ArbitraryFrom<&Vec<&SimValue>> for LTValue { + fn arbitrary_from( + rng: &mut R, + context: &C, + values: &Vec<&SimValue>, + ) -> Self { + if values.is_empty() { + return Self(SimValue(Value::Null)); + } + + // Get value less than all values + let value = Value::exec_min(values.iter().map(|value| &value.0)); + Self::arbitrary_from(rng, context, &SimValue(value)) + } +} + +impl ArbitraryFrom<&SimValue> for LTValue { + fn arbitrary_from( + rng: &mut R, + _context: &C, + value: &SimValue, + ) -> Self { + let new_value = match &value.0 { + Value::Integer(i) => Value::Integer(rng.random_range(i64::MIN..*i - 1)), + Value::Float(f) => Value::Float(f - rng.random_range(0.0..1e10)), + value @ Value::Text(..) => { + // Either shorten the string, or make at least one character smaller and mutate the rest + let mut t = value.to_string(); + if rng.random_bool(0.01) { + t.pop(); + Value::build_text(t) + } else { + let mut t = t.chars().map(|c| c as u32).collect::>(); + let index = rng.random_range(0..t.len()); + t[index] -= 1; + // Mutate the rest of the string + for val in t.iter_mut().skip(index + 1) { + *val = rng.random_range('a' as u32..='z' as u32); + } + let t = t + .into_iter() + .map(|c| char::from_u32(c).unwrap_or('z')) + .collect::(); + Value::build_text(t) + } + } + Value::Blob(b) => { + // Either shorten the blob, or make at least one byte smaller and mutate the rest + let mut b = b.clone(); + if rng.random_bool(0.01) { + b.pop(); + Value::Blob(b) + } else { + let index = rng.random_range(0..b.len()); + b[index] -= 1; + // Mutate the rest of the blob + for val in b.iter_mut().skip(index + 1) { + *val = rng.random_range(0..=255); + } + Value::Blob(b) + } + } + _ => unreachable!(), + }; + Self(SimValue(new_value)) + } +} + +pub struct GTValue(pub SimValue); + +impl ArbitraryFrom<&Vec<&SimValue>> for GTValue { + fn arbitrary_from( + rng: &mut R, + context: &C, + values: &Vec<&SimValue>, + ) -> Self { + if values.is_empty() { + return Self(SimValue(Value::Null)); + } + // Get value greater than all values + let value = Value::exec_max(values.iter().map(|value| &value.0)); + + Self::arbitrary_from(rng, context, &SimValue(value)) + } +} + +impl ArbitraryFrom<&SimValue> for GTValue { + fn arbitrary_from( + rng: &mut R, + _context: &C, + value: &SimValue, + ) -> Self { + let new_value = match &value.0 { + Value::Integer(i) => Value::Integer(rng.random_range(*i..i64::MAX)), + Value::Float(f) => Value::Float(rng.random_range(*f..1e10)), + value @ Value::Text(..) => { + // Either lengthen the string, or make at least one character smaller and mutate the rest + let mut t = value.to_string(); + if rng.random_bool(0.01) { + t.push(rng.random_range(0..=255) as u8 as char); + Value::build_text(t) + } else { + let mut t = t.chars().map(|c| c as u32).collect::>(); + let index = rng.random_range(0..t.len()); + t[index] += 1; + // Mutate the rest of the string + for val in t.iter_mut().skip(index + 1) { + *val = rng.random_range('a' as u32..='z' as u32); + } + let t = t + .into_iter() + .map(|c| char::from_u32(c).unwrap_or('a')) + .collect::(); + Value::build_text(t) + } + } + Value::Blob(b) => { + // Either lengthen the blob, or make at least one byte smaller and mutate the rest + let mut b = b.clone(); + if rng.random_bool(0.01) { + b.push(rng.random_range(0..=255)); + Value::Blob(b) + } else { + let index = rng.random_range(0..b.len()); + b[index] += 1; + // Mutate the rest of the blob + for val in b.iter_mut().skip(index + 1) { + *val = rng.random_range(0..=255); + } + Value::Blob(b) + } + } + _ => unreachable!(), + }; + Self(SimValue(new_value)) + } +} diff --git a/sql_generation/generation/value/mod.rs b/sql_generation/generation/value/mod.rs new file mode 100644 index 000000000..e0c98ad84 --- /dev/null +++ b/sql_generation/generation/value/mod.rs @@ -0,0 +1,58 @@ +use rand::Rng; +use turso_core::Value; + +use crate::{ + generation::{gen_random_text, pick, ArbitraryFrom, GenerationContext}, + model::table::{ColumnType, SimValue, Table}, +}; + +mod cmp; +mod pattern; + +pub use cmp::{GTValue, LTValue}; +pub use pattern::LikeValue; + +impl ArbitraryFrom<&Table> for Vec { + fn arbitrary_from( + rng: &mut R, + context: &C, + table: &Table, + ) -> Self { + let mut row = Vec::new(); + for column in table.columns.iter() { + let value = SimValue::arbitrary_from(rng, context, &column.column_type); + row.push(value); + } + row + } +} + +impl ArbitraryFrom<&Vec<&SimValue>> for SimValue { + fn arbitrary_from( + rng: &mut R, + _context: &C, + values: &Vec<&Self>, + ) -> Self { + if values.is_empty() { + return Self(Value::Null); + } + + pick(values, rng).to_owned().clone() + } +} + +impl ArbitraryFrom<&ColumnType> for SimValue { + fn arbitrary_from( + rng: &mut R, + _context: &C, + column_type: &ColumnType, + ) -> Self { + let value = match column_type { + ColumnType::Integer => Value::Integer(rng.random_range(i64::MIN..i64::MAX)), + ColumnType::Float => Value::Float(rng.random_range(-1e10..1e10)), + ColumnType::Text => Value::build_text(gen_random_text(rng)), + ColumnType::Blob => Value::Blob(gen_random_text(rng).as_bytes().to_vec()), + }; + SimValue(value) + } +} diff --git a/sql_generation/generation/value/pattern.rs b/sql_generation/generation/value/pattern.rs new file mode 100644 index 000000000..3bf0d7a9f --- /dev/null +++ b/sql_generation/generation/value/pattern.rs @@ -0,0 +1,44 @@ +use turso_core::Value; + +use crate::{ + generation::{ArbitraryFromMaybe, GenerationContext}, + model::table::SimValue, +}; + +pub struct LikeValue(pub SimValue); + +impl ArbitraryFromMaybe<&SimValue> for LikeValue { + fn arbitrary_from_maybe( + rng: &mut R, + _context: &C, + value: &SimValue, + ) -> Option { + match &value.0 { + value @ Value::Text(..) => { + let t = value.to_string(); + let mut t = t.chars().collect::>(); + // Remove a number of characters, either insert `_` for each character removed, or + // insert one `%` for the whole substring + let mut i = 0; + while i < t.len() { + if rng.random_bool(0.1) { + t[i] = '_'; + } else if rng.random_bool(0.05) { + t[i] = '%'; + // skip a list of characters + for _ in 0..rng.random_range(0..=3.min(t.len() - i - 1)) { + t.remove(i + 1); + } + } + i += 1; + } + let index = rng.random_range(0..t.len()); + t.insert(index, '%'); + Some(Self(SimValue(Value::build_text( + t.into_iter().collect::(), + )))) + } + _ => None, + } + } +} From 642ec3032d5b2a5c92409f9b6bdc429d0770408a Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Wed, 8 Oct 2025 12:07:49 -0300 Subject: [PATCH 136/428] use parser's `ColumnDefinition` for Sql Generation `Column` struct --- parser/Cargo.toml | 1 + parser/src/ast.rs | 5 +++++ sql_generation/Cargo.toml | 2 +- sql_generation/generation/table.rs | 3 +-- sql_generation/model/query/create.rs | 15 ++++++++------- sql_generation/model/table.rs | 22 +++++++++++++++++++--- whopper/main.rs | 28 +++++++++++++++++++++++----- 7 files changed, 58 insertions(+), 18 deletions(-) diff --git a/parser/Cargo.toml b/parser/Cargo.toml index 6f9720bc8..a140f4e44 100644 --- a/parser/Cargo.toml +++ b/parser/Cargo.toml @@ -13,6 +13,7 @@ name = "turso_parser" [features] default = [] serde = ["dep:serde", "bitflags/serde"] +simulator = [] [dependencies] bitflags = { workspace = true } diff --git a/parser/src/ast.rs b/parser/src/ast.rs index dae656cc4..81b47a967 100644 --- a/parser/src/ast.rs +++ b/parser/src/ast.rs @@ -1121,6 +1121,11 @@ pub struct NamedColumnConstraint { // https://sqlite.org/syntax/column-constraint.html #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "simulator", derive(strum::EnumDiscriminants))] +#[cfg_attr( + feature = "simulator", + strum_discriminants(derive(strum::VariantArray)) +)] pub enum ColumnConstraint { /// `PRIMARY KEY` PrimaryKey { diff --git a/sql_generation/Cargo.toml b/sql_generation/Cargo.toml index d42668237..5c4de8d6e 100644 --- a/sql_generation/Cargo.toml +++ b/sql_generation/Cargo.toml @@ -13,7 +13,7 @@ path = "lib.rs" hex = { workspace = true } serde = { workspace = true, features = ["derive"] } turso_core = { workspace = true, features = ["simulator"] } -turso_parser = { workspace = true, features = ["serde"] } +turso_parser = { workspace = true, features = ["serde", "simulator"] } rand = { workspace = true } anarchist-readable-name-generator-lib = "0.2.0" itertools = { workspace = true } diff --git a/sql_generation/generation/table.rs b/sql_generation/generation/table.rs index 2ced09fee..21c89a179 100644 --- a/sql_generation/generation/table.rs +++ b/sql_generation/generation/table.rs @@ -51,8 +51,7 @@ impl Arbitrary for Column { Self { name, column_type, - primary: false, - unique: false, + constraints: vec![], // TODO: later implement arbitrary here for ColumnConstraint } } } diff --git a/sql_generation/model/query/create.rs b/sql_generation/model/query/create.rs index 607d5fe8d..ee028e879 100644 --- a/sql_generation/model/query/create.rs +++ b/sql_generation/model/query/create.rs @@ -1,5 +1,6 @@ use std::fmt::Display; +use itertools::Itertools; use serde::{Deserialize, Serialize}; use crate::model::table::Table; @@ -13,13 +14,13 @@ impl Display for Create { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "CREATE TABLE {} (", self.table.name)?; - for (i, column) in self.table.columns.iter().enumerate() { - if i != 0 { - write!(f, ",")?; - } - write!(f, "{} {}", column.name, column.column_type)?; - } + let cols = self + .table + .columns + .iter() + .map(|column| column.to_string()) + .join(", "); - write!(f, ")") + write!(f, "{cols})") } } diff --git a/sql_generation/model/table.rs b/sql_generation/model/table.rs index 87057b42b..1060b8bb8 100644 --- a/sql_generation/model/table.rs +++ b/sql_generation/model/table.rs @@ -1,8 +1,9 @@ use std::{fmt::Display, hash::Hash, ops::Deref}; +use itertools::Itertools; use serde::{Deserialize, Serialize}; use turso_core::{numeric::Numeric, types}; -use turso_parser::ast; +use turso_parser::ast::{self, ColumnConstraint}; use crate::model::query::predicate::Predicate; @@ -63,8 +64,7 @@ impl Table { pub struct Column { pub name: String, pub column_type: ColumnType, - pub primary: bool, - pub unique: bool, + pub constraints: Vec, } // Uniquely defined by name in this case @@ -82,6 +82,22 @@ impl PartialEq for Column { impl Eq for Column {} +impl Display for Column { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let constraints = self + .constraints + .iter() + .map(|constraint| constraint.to_string()) + .join(" "); + let mut col_string = format!("{} {}", self.name, self.column_type); + if !constraints.is_empty() { + col_string.push(' '); + col_string.push_str(&constraints); + } + write!(f, "{col_string}") + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub enum ColumnType { Integer, diff --git a/whopper/main.rs b/whopper/main.rs index 086b4687e..16da0fbf8 100644 --- a/whopper/main.rs +++ b/whopper/main.rs @@ -18,7 +18,7 @@ use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitEx use turso_core::{ CipherMode, Connection, Database, DatabaseOpts, EncryptionOpts, IO, OpenFlags, Statement, }; -use turso_parser::ast::SortOrder; +use turso_parser::ast::{ColumnConstraint, SortOrder}; mod io; use crate::io::FILE_SIZE_SOFT_LIMIT; @@ -332,12 +332,18 @@ fn create_initial_schema(rng: &mut ChaCha8Rng) -> Vec { let num_columns = rng.random_range(2..=8); let mut columns = Vec::new(); + // TODO: there is no proper unique generation yet in whopper, so disable primary keys for now + + // let primary = ColumnConstraint::PrimaryKey { + // order: None, + // conflict_clause: None, + // auto_increment: false, + // }; // Always add an id column as primary key columns.push(Column { name: "id".to_string(), column_type: ColumnType::Integer, - primary: true, - unique: false, + constraints: vec![], }); // Add random columns @@ -348,11 +354,19 @@ fn create_initial_schema(rng: &mut ChaCha8Rng) -> Vec { _ => ColumnType::Float, }; + // FIXME: before sql_generation did not incorporate ColumnConstraint into the sql string + // now it does and it the simulation here fails `whopper` with UNIQUE CONSTRAINT ERROR + // 20% chance of unique + let constraints = if rng.random_bool(0.0) { + vec![ColumnConstraint::Unique(None)] + } else { + Vec::new() + }; + columns.push(Column { name: format!("col_{j}"), column_type: col_type, - primary: false, - unique: rng.random_bool(0.2), // 20% chance of unique + constraints, }); } @@ -366,6 +380,10 @@ fn create_initial_schema(rng: &mut ChaCha8Rng) -> Vec { schema.push(Create { table }); } + for create in &schema { + println!("{create}"); + } + schema } From 6980128a24436012377e15e59f26acc6008b0eda Mon Sep 17 00:00:00 2001 From: rajajisai Date: Thu, 9 Oct 2025 22:50:18 -0400 Subject: [PATCH 137/428] Ignore sqlite_sequence table when dumping tables in .clone --- cli/app.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cli/app.rs b/cli/app.rs index efc2f312f..9551afaf8 100644 --- a/cli/app.rs +++ b/cli/app.rs @@ -1453,6 +1453,10 @@ impl Limbo { StepResult::Row => { let row = rows.row().unwrap(); let name: &str = row.get::<&str>(0)?; + // Skip sqlite_sequence table + if name == "sqlite_sequence" { + continue; + } let ddl: &str = row.get::<&str>(1)?; writeln!(out, "{ddl};")?; Self::dump_table_from_conn(&conn, out, name, &mut progress)?; @@ -1567,7 +1571,6 @@ impl Limbo { if !has_seq { return Ok(()); } - writeln!(out, "DELETE FROM sqlite_sequence;")?; if let Some(mut rows) = conn.query("SELECT name, seq FROM sqlite_sequence")? { loop { From 51122d3e9cddeb59746dae4035b440d78af9c768 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Fri, 10 Oct 2025 11:39:06 +0400 Subject: [PATCH 138/428] fix clippy --- core/vector/operations/convert.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/vector/operations/convert.rs b/core/vector/operations/convert.rs index 96401123e..619c7be26 100644 --- a/core/vector/operations/convert.rs +++ b/core/vector/operations/convert.rs @@ -112,7 +112,7 @@ mod tests { dims: v1.dims, data: v1.data.clone(), }; - assert_vectors(&vector_convert(v_copy, v2.vector_type).unwrap(), &v2); + assert_vectors(&vector_convert(v_copy, v2.vector_type).unwrap(), v2); } } } From 74e04634aa8811d27cc8560fc794a21bb5b6a5d5 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Fri, 10 Oct 2025 13:12:53 +0300 Subject: [PATCH 139/428] Fix incorrectly using an equality constraint twice for index seek Prevents something like `WHERE x = 5 AND x = 5` from becoming a two component index key. Closes #3656 --- core/translate/optimizer/constraints.rs | 28 ++++++++++++++++++------- testing/join.test | 7 +++++++ 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/core/translate/optimizer/constraints.rs b/core/translate/optimizer/constraints.rs index 7e32f17c5..486c36687 100644 --- a/core/translate/optimizer/constraints.rs +++ b/core/translate/optimizer/constraints.rs @@ -486,7 +486,7 @@ pub fn usable_constraints_for_join_order<'a>( .map(|j| j.original_idx), ); let mut usable: Vec = Vec::new(); - let mut last_column_pos = 0; + let mut current_required_column_pos = 0; for cref in refs.iter() { let constraint = &constraints[cref.constraint_vec_pos]; let other_side_refers_to_self = constraint.lhs_mask.contains_table(table_idx); @@ -498,6 +498,7 @@ pub fn usable_constraints_for_join_order<'a>( break; } if Some(cref.index_col_pos) == usable.last().map(|x| x.index_col_pos) { + // Two constraints on the same index column can be combined into a single range constraint. assert_eq!(cref.sort_order, usable.last().unwrap().sort_order); assert_eq!(cref.index_col_pos, usable.last().unwrap().index_col_pos); assert_eq!( @@ -520,15 +521,28 @@ pub fn usable_constraints_for_join_order<'a>( } continue; } - if cref.index_col_pos != last_column_pos { + if cref.index_col_pos != current_required_column_pos { + // Index columns must be consumed contiguously in the order they appear in the index. break; } if usable.last().is_some_and(|x| x.eq.is_none()) { + // Usable index key must have 0-n equalities and then a maximum of 1 range constraint with one or both bounds set. + // If we already have a range constraint before this one, we must not add anything to it break; } - let constraint_group = match constraints[cref.constraint_vec_pos].operator { + let operator = constraints[cref.constraint_vec_pos].operator; + let table_col_pos = constraints[cref.constraint_vec_pos].table_col_pos; + if operator == ast::Operator::Equals + && usable + .last() + .is_some_and(|x| x.table_col_pos == table_col_pos) + { + // If we already have an equality constraint for this column, we can't use it again + continue; + } + let constraint_group = match operator { ast::Operator::Equals => RangeConstraintRef { - table_col_pos: constraints[cref.constraint_vec_pos].table_col_pos, + table_col_pos, index_col_pos: cref.index_col_pos, sort_order: cref.sort_order, eq: Some(cref.constraint_vec_pos), @@ -536,7 +550,7 @@ pub fn usable_constraints_for_join_order<'a>( upper_bound: None, }, ast::Operator::Greater | ast::Operator::GreaterEquals => RangeConstraintRef { - table_col_pos: constraints[cref.constraint_vec_pos].table_col_pos, + table_col_pos, index_col_pos: cref.index_col_pos, sort_order: cref.sort_order, eq: None, @@ -544,7 +558,7 @@ pub fn usable_constraints_for_join_order<'a>( upper_bound: None, }, ast::Operator::Less | ast::Operator::LessEquals => RangeConstraintRef { - table_col_pos: constraints[cref.constraint_vec_pos].table_col_pos, + table_col_pos, index_col_pos: cref.index_col_pos, sort_order: cref.sort_order, eq: None, @@ -554,7 +568,7 @@ pub fn usable_constraints_for_join_order<'a>( _ => continue, }; usable.push(constraint_group); - last_column_pos += 1; + current_required_column_pos += 1; } usable } diff --git a/testing/join.test b/testing/join.test index c14b96ab4..6c6aa7314 100755 --- a/testing/join.test +++ b/testing/join.test @@ -384,3 +384,10 @@ do_execsql_test_on_specific_db {:memory:} left-join-using-null { select a, b from t left join s using (a, b); } {1| 2|} + +# Regression test for: https://github.com/tursodatabase/turso/issues/3656 +do_execsql_test_on_specific_db {:memory:} redundant-join-condition { + create table t(x); + insert into t values ('lol'); + select t1.x from t t1 join t t2 on t1.x=t2.x where t1.x=t2.x; +} {lol} \ No newline at end of file From d0d6db301b58dbec1c1e446941f589ff5289f5f3 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Fri, 10 Oct 2025 12:39:39 +0200 Subject: [PATCH 140/428] core/btree: CursorTrait --- core/storage/btree.rs | 1108 ++++++++++++++++++++++++++++++++++------- 1 file changed, 925 insertions(+), 183 deletions(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index f1935c09e..0b7489530 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -40,6 +40,7 @@ use super::{ }; use parking_lot::RwLock; use std::{ + any::Any, cell::{Cell, Ref, RefCell}, cmp::{Ordering, Reverse}, collections::{BinaryHeap, HashMap}, @@ -509,6 +510,57 @@ pub enum CursorSeekState { }, } +pub trait CursorTrait: Any { + /// Move cursor to last entry. + fn last(&mut self) -> Result>; + /// Move cursor to next entry. + fn next(&mut self) -> Result>; + /// Move cursor to previous entry. + fn prev(&mut self) -> Result>; + /// Get the rowid of the entry the cursor is poiting to if any + fn rowid(&self) -> Result>>; + /// Get the record of the entry the cursor is poiting to if any + fn record(&self) -> Result>>>; + /// Move the cursor based on the key and the type of operation (op). + fn seek(&mut self, key: SeekKey<'_>, op: SeekOp) -> Result>; + /// Insert a record in the position the cursor is at. + fn insert(&mut self, key: &BTreeKey) -> Result>; + /// Delete a record in the position the cursor is at. + fn delete(&mut self) -> Result>; + fn set_null_flag(&mut self, flag: bool); + fn get_null_flag(&self) -> bool; + /// Check if a key exists. + fn exists(&mut self, key: &Value) -> Result>; + fn clear_btree(&mut self) -> Result>>; + fn btree_destroy(&mut self) -> Result>>; + /// Count the number of entries in the b-tree + /// + /// Only supposed to be used in the context of a simple Count Select Statement + fn count(&mut self) -> Result>; + fn is_empty(&self) -> bool; + fn root_page(&self) -> i64; + /// Move cursor at the start. + fn rewind(&mut self) -> Result>; + /// Check if cursor is poiting at a valid entry with a record. + fn has_record(&self) -> bool; + fn set_has_record(&self, has_record: bool); + fn get_index_info(&self) -> &IndexInfo; + + fn seek_end(&mut self) -> Result>; + fn seek_to_last(&mut self) -> Result>; + + // --- start: BTreeCursor specific functions ---- + fn invalidate_record(&mut self); + fn has_rowid(&self) -> bool; + fn record_cursor_mut(&self) -> std::cell::RefMut<'_, RecordCursor>; + fn get_pager(&self) -> Arc; + fn get_skip_advance(&self) -> bool; + + // FIXME: remove once we implement trait for mvcc + fn get_mvcc_cursor(&self) -> Arc>; + // --- end: BTreeCursor specific functions ---- +} + pub struct BTreeCursor { /// The multi-version cursor that is used to read and write to the database file. mv_cursor: Option>>, @@ -686,13 +738,6 @@ impl BTreeCursor { cursor } - pub fn has_rowid(&self) -> bool { - match &self.index_info { - Some(index_key_info) => index_key_info.has_rowid, - None => true, // currently we don't support WITHOUT ROWID tables - } - } - pub fn get_index_rowid_from_record(&self) -> Option { if !self.has_rowid() { return None; @@ -4405,181 +4450,6 @@ impl BTreeCursor { self.usable_space_cached } - pub fn seek_end(&mut self) -> Result> { - assert!(self.mv_cursor.is_none()); // unsure about this -_- - loop { - match self.seek_end_state { - SeekEndState::Start => { - let c = self.move_to_root()?; - self.seek_end_state = SeekEndState::ProcessPage; - if let Some(c) = c { - io_yield_one!(c); - } - } - SeekEndState::ProcessPage => { - let mem_page = self.stack.top_ref(); - let contents = mem_page.get_contents(); - if contents.is_leaf() { - // set cursor just past the last cell to append - self.stack.set_cell_index(contents.cell_count() as i32); - self.seek_end_state = SeekEndState::Start; - return Ok(IOResult::Done(())); - } - - match contents.rightmost_pointer() { - Some(right_most_pointer) => { - self.stack.set_cell_index(contents.cell_count() as i32 + 1); // invalid on interior - let (child, c) = self.read_page(right_most_pointer as i64)?; - self.stack.push(child); - if let Some(c) = c { - io_yield_one!(c); - } - } - None => unreachable!("interior page must have rightmost pointer"), - } - } - } - } - } - - #[instrument(skip_all, level = Level::DEBUG)] - pub fn seek_to_last(&mut self) -> Result> { - loop { - match self.seek_to_last_state { - SeekToLastState::Start => { - assert!(self.mv_cursor.is_none()); - let has_record = return_if_io!(self.move_to_rightmost()); - self.invalidate_record(); - self.has_record.replace(has_record); - if !has_record { - self.seek_to_last_state = SeekToLastState::IsEmpty; - continue; - } - return Ok(IOResult::Done(())); - } - SeekToLastState::IsEmpty => { - let is_empty = return_if_io!(self.is_empty_table()); - assert!(is_empty); - self.seek_to_last_state = SeekToLastState::Start; - return Ok(IOResult::Done(())); - } - } - } - } - - pub fn is_empty(&self) -> bool { - !self.has_record.get() - } - - pub fn root_page(&self) -> i64 { - self.root_page - } - - #[instrument(skip_all, level = Level::DEBUG)] - pub fn rewind(&mut self) -> Result> { - if self.valid_state == CursorValidState::Invalid { - return Ok(IOResult::Done(())); - } - self.skip_advance.set(false); - loop { - match self.rewind_state { - RewindState::Start => { - self.rewind_state = RewindState::NextRecord; - if let Some(mv_cursor) = &self.mv_cursor { - let mut mv_cursor = mv_cursor.write(); - mv_cursor.rewind(); - } else { - let c = self.move_to_root()?; - if let Some(c) = c { - io_yield_one!(c); - } - } - } - RewindState::NextRecord => { - let cursor_has_record = return_if_io!(self.get_next_record()); - self.invalidate_record(); - self.has_record.replace(cursor_has_record); - self.rewind_state = RewindState::Start; - return Ok(IOResult::Done(())); - } - } - } - } - - #[instrument(skip_all, level = Level::DEBUG)] - pub fn last(&mut self) -> Result> { - assert!(self.mv_cursor.is_none()); - let cursor_has_record = return_if_io!(self.move_to_rightmost()); - self.has_record.replace(cursor_has_record); - self.invalidate_record(); - Ok(IOResult::Done(())) - } - - #[instrument(skip_all, level = Level::DEBUG)] - pub fn next(&mut self) -> Result> { - if self.valid_state == CursorValidState::Invalid { - return Ok(IOResult::Done(false)); - } - if self.skip_advance.get() { - // See DeleteState::RestoreContextAfterBalancing - self.skip_advance.set(false); - let mem_page = self.stack.top_ref(); - let contents = mem_page.get_contents(); - let cell_idx = self.stack.current_cell_index(); - let cell_count = contents.cell_count(); - let has_record = cell_idx >= 0 && cell_idx < cell_count as i32; - if has_record { - self.has_record.set(true); - // If we are positioned at a record, we stop here without advancing. - return Ok(IOResult::Done(true)); - } - // But: if we aren't currently positioned at a record (for example, we are at the end of a page), - // we need to advance despite the skip_advance flag - // because the intent is to find the next record immediately after the one we just deleted. - } - loop { - match self.advance_state { - AdvanceState::Start => { - return_if_io!(self.restore_context()); - self.advance_state = AdvanceState::Advance; - } - AdvanceState::Advance => { - let cursor_has_record = return_if_io!(self.get_next_record()); - self.has_record.replace(cursor_has_record); - self.invalidate_record(); - return Ok(IOResult::Done(cursor_has_record)); - } - } - } - } - - pub fn invalidate_record(&mut self) { - self.get_immutable_record_or_create() - .as_mut() - .unwrap() - .invalidate(); - self.record_cursor.borrow_mut().invalidate(); - } - - #[instrument(skip_all, level = Level::DEBUG)] - pub fn prev(&mut self) -> Result> { - assert!(self.mv_cursor.is_none()); - loop { - match self.advance_state { - AdvanceState::Start => { - return_if_io!(self.restore_context()); - self.advance_state = AdvanceState::Advance; - } - AdvanceState::Advance => { - let cursor_has_record = return_if_io!(self.get_prev_record()); - self.has_record.replace(cursor_has_record); - self.invalidate_record(); - return Ok(IOResult::Done(cursor_has_record)); - } - } - } - } - #[instrument(skip(self), level = Level::DEBUG)] pub fn rowid(&self) -> Result>> { if let Some(mv_cursor) = &self.mv_cursor { @@ -5737,10 +5607,882 @@ impl BTreeCursor { self.pager .do_allocate_page(page_type, offset, BtreePageAllocMode::Any) } +} - pub fn get_mvcc_cursor(&self) -> Arc> { +impl CursorTrait for BTreeCursor { + #[instrument(skip_all, level = Level::DEBUG)] + fn next(&mut self) -> Result> { + if self.valid_state == CursorValidState::Invalid { + return Ok(IOResult::Done(false)); + } + if self.skip_advance.get() { + // See DeleteState::RestoreContextAfterBalancing + self.skip_advance.set(false); + let mem_page = self.stack.top_ref(); + let contents = mem_page.get_contents(); + let cell_idx = self.stack.current_cell_index(); + let cell_count = contents.cell_count(); + let has_record = cell_idx >= 0 && cell_idx < cell_count as i32; + if has_record { + self.has_record.set(true); + // If we are positioned at a record, we stop here without advancing. + return Ok(IOResult::Done(true)); + } + // But: if we aren't currently positioned at a record (for example, we are at the end of a page), + // we need to advance despite the skip_advance flag + // because the intent is to find the next record immediately after the one we just deleted. + } + loop { + match self.advance_state { + AdvanceState::Start => { + return_if_io!(self.restore_context()); + self.advance_state = AdvanceState::Advance; + } + AdvanceState::Advance => { + let cursor_has_record = return_if_io!(self.get_next_record()); + self.has_record.replace(cursor_has_record); + self.invalidate_record(); + return Ok(IOResult::Done(cursor_has_record)); + } + } + } + } + + #[instrument(skip_all, level = Level::DEBUG)] + fn last(&mut self) -> Result> { + assert!(self.mv_cursor.is_none()); + let cursor_has_record = return_if_io!(self.move_to_rightmost()); + self.has_record.replace(cursor_has_record); + self.invalidate_record(); + Ok(IOResult::Done(())) + } + + #[instrument(skip_all, level = Level::DEBUG)] + fn prev(&mut self) -> Result> { + assert!(self.mv_cursor.is_none()); + loop { + match self.advance_state { + AdvanceState::Start => { + return_if_io!(self.restore_context()); + self.advance_state = AdvanceState::Advance; + } + AdvanceState::Advance => { + let cursor_has_record = return_if_io!(self.get_prev_record()); + self.has_record.replace(cursor_has_record); + self.invalidate_record(); + return Ok(IOResult::Done(cursor_has_record)); + } + } + } + } + + #[instrument(skip(self), level = Level::DEBUG)] + fn rowid(&self) -> Result>> { + if let Some(mv_cursor) = &self.mv_cursor { + let mut mv_cursor = mv_cursor.write(); + let Some(rowid) = mv_cursor.current_row_id() else { + return Ok(IOResult::Done(None)); + }; + return Ok(IOResult::Done(Some(rowid.row_id))); + } + if self.get_null_flag() { + return Ok(IOResult::Done(None)); + } + if self.has_record.get() { + let page = self.stack.top_ref(); + let contents = page.get_contents(); + let page_type = contents.page_type(); + if page_type.is_table() { + let cell_idx = self.stack.current_cell_index(); + let rowid = contents.cell_table_leaf_read_rowid(cell_idx as usize)?; + Ok(IOResult::Done(Some(rowid))) + } else { + let _ = return_if_io!(self.record()); + Ok(IOResult::Done(self.get_index_rowid_from_record())) + } + } else { + Ok(IOResult::Done(None)) + } + } + + #[instrument(skip(self, key), level = Level::DEBUG)] + fn seek(&mut self, key: SeekKey<'_>, op: SeekOp) -> Result> { + if let Some(mv_cursor) = &self.mv_cursor { + let mut mv_cursor = mv_cursor.write(); + return mv_cursor.seek(key, op); + } + self.skip_advance.set(false); + // Empty trace to capture the span information + tracing::trace!(""); + // We need to clear the null flag for the table cursor before seeking, + // because it might have been set to false by an unmatched left-join row during the previous iteration + // on the outer loop. + self.set_null_flag(false); + let seek_result = return_if_io!(self.do_seek(key, op)); + self.invalidate_record(); + // Reset seek state + self.seek_state = CursorSeekState::Start; + self.valid_state = CursorValidState::Valid; + Ok(IOResult::Done(seek_result)) + } + + #[instrument(skip(self), level = Level::DEBUG)] + fn record(&self) -> Result>>> { + if !self.has_record.get() && self.mv_cursor.is_none() { + return Ok(IOResult::Done(None)); + } + let invalidated = self + .reusable_immutable_record + .borrow() + .as_ref() + .is_none_or(|record| record.is_invalidated()); + if !invalidated { + let record_ref = + Ref::filter_map(self.reusable_immutable_record.borrow(), |opt| opt.as_ref()) + .unwrap(); + return Ok(IOResult::Done(Some(record_ref))); + } + if let Some(mv_cursor) = &self.mv_cursor { + let mut mv_cursor = mv_cursor.write(); + let Some(row) = mv_cursor.current_row()? else { + return Ok(IOResult::Done(None)); + }; + self.get_immutable_record_or_create() + .as_mut() + .unwrap() + .invalidate(); + self.get_immutable_record_or_create() + .as_mut() + .unwrap() + .start_serialization(&row.data); + self.record_cursor.borrow_mut().invalidate(); + let record_ref = + Ref::filter_map(self.reusable_immutable_record.borrow(), |opt| opt.as_ref()) + .unwrap(); + return Ok(IOResult::Done(Some(record_ref))); + } + + let page = self.stack.top_ref(); + let contents = page.get_contents(); + let cell_idx = self.stack.current_cell_index(); + let cell = contents.cell_get(cell_idx as usize, self.usable_space())?; + let (payload, payload_size, first_overflow_page) = match cell { + BTreeCell::TableLeafCell(TableLeafCell { + payload, + payload_size, + first_overflow_page, + .. + }) => (payload, payload_size, first_overflow_page), + BTreeCell::IndexInteriorCell(IndexInteriorCell { + payload, + payload_size, + first_overflow_page, + .. + }) => (payload, payload_size, first_overflow_page), + BTreeCell::IndexLeafCell(IndexLeafCell { + payload, + first_overflow_page, + payload_size, + }) => (payload, payload_size, first_overflow_page), + _ => unreachable!("unexpected page_type"), + }; + if let Some(next_page) = first_overflow_page { + return_if_io!(self.process_overflow_read(payload, next_page, payload_size)) + } else { + self.get_immutable_record_or_create() + .as_mut() + .unwrap() + .invalidate(); + self.get_immutable_record_or_create() + .as_mut() + .unwrap() + .start_serialization(payload); + self.record_cursor.borrow_mut().invalidate(); + }; + + let record_ref = + Ref::filter_map(self.reusable_immutable_record.borrow(), |opt| opt.as_ref()).unwrap(); + Ok(IOResult::Done(Some(record_ref))) + } + + #[instrument(skip_all, level = Level::DEBUG)] + fn insert(&mut self, key: &BTreeKey) -> Result> { + tracing::debug!(valid_state = ?self.valid_state, cursor_state = ?self.state, is_write_in_progress = self.is_write_in_progress()); + match &self.mv_cursor { + Some(mv_cursor) => match key.maybe_rowid() { + Some(rowid) => { + let row_id = + crate::mvcc::database::RowID::new(mv_cursor.read().table_id, rowid); + let record_buf = key.get_record().unwrap().get_payload().to_vec(); + let num_columns = match key { + BTreeKey::IndexKey(record) => record.column_count(), + BTreeKey::TableRowId((_, record)) => { + record.as_ref().unwrap().column_count() + } + }; + let row = crate::mvcc::database::Row::new(row_id, record_buf, num_columns); + mv_cursor.write().insert(row)?; + } + None => todo!("Support mvcc inserts with index btrees"), + }, + None => { + return_if_io!(self.insert_into_page(key)); + if key.maybe_rowid().is_some() { + self.has_record.replace(true); + } + } + }; + Ok(IOResult::Done(())) + } + + #[instrument(skip(self), level = Level::DEBUG)] + fn delete(&mut self) -> Result> { + if let Some(mv_cursor) = &self.mv_cursor { + let rowid = mv_cursor.write().current_row_id().unwrap(); + mv_cursor.write().delete(rowid)?; + return Ok(IOResult::Done(())); + } + + if let CursorState::None = &self.state { + self.state = CursorState::Delete(DeleteState::Start); + } + + loop { + let usable_space = self.usable_space(); + let delete_state = match &mut self.state { + CursorState::Delete(x) => x, + _ => unreachable!("expected delete state"), + }; + tracing::debug!(?delete_state); + + match delete_state { + DeleteState::Start => { + let page = self.stack.top_ref(); + self.pager.add_dirty(page); + if matches!( + page.get_contents().page_type(), + PageType::TableLeaf | PageType::TableInterior + ) { + if return_if_io!(self.rowid()).is_none() { + self.state = CursorState::None; + return Ok(IOResult::Done(())); + } + } else if self.reusable_immutable_record.borrow().is_none() { + self.state = CursorState::None; + return Ok(IOResult::Done(())); + } + + self.state = CursorState::Delete(DeleteState::DeterminePostBalancingSeekKey); + } + + DeleteState::DeterminePostBalancingSeekKey => { + // FIXME: skip this work if we determine deletion wont result in balancing + // Right now we calculate the key every time for simplicity/debugging + // since it won't affect correctness which is more important + let page = self.stack.top_ref(); + let target_key = if page.is_index() { + let record = match return_if_io!(self.record()) { + Some(record) => record.clone(), + None => unreachable!("there should've been a record"), + }; + CursorContext { + key: CursorContextKey::IndexKeyRowId(record), + seek_op: SeekOp::GE { eq_only: true }, + } + } else { + let Some(rowid) = return_if_io!(self.rowid()) else { + panic!("cursor should be pointing to a record with a rowid"); + }; + CursorContext { + key: CursorContextKey::TableRowId(rowid), + seek_op: SeekOp::GE { eq_only: true }, + } + }; + + self.state = CursorState::Delete(DeleteState::LoadPage { + post_balancing_seek_key: Some(target_key), + }); + } + + DeleteState::LoadPage { + post_balancing_seek_key, + } => { + self.state = CursorState::Delete(DeleteState::FindCell { + post_balancing_seek_key: post_balancing_seek_key.take(), + }); + } + + DeleteState::FindCell { + post_balancing_seek_key, + } => { + let page = self.stack.top_ref(); + let cell_idx = self.stack.current_cell_index() as usize; + let contents = page.get_contents(); + if cell_idx >= contents.cell_count() { + return_corrupt!(format!( + "Corrupted page: cell index {} is out of bounds for page with {} cells", + cell_idx, + contents.cell_count() + )); + } + + tracing::debug!( + "DeleteState::FindCell: page_id: {}, cell_idx: {}", + page.get().id, + cell_idx + ); + + let cell = contents.cell_get(cell_idx, usable_space)?; + + let original_child_pointer = match &cell { + BTreeCell::TableInteriorCell(interior) => Some(interior.left_child_page), + BTreeCell::IndexInteriorCell(interior) => Some(interior.left_child_page), + _ => None, + }; + + self.state = CursorState::Delete(DeleteState::ClearOverflowPages { + cell_idx, + cell, + original_child_pointer, + post_balancing_seek_key: post_balancing_seek_key.take(), + }); + } + + DeleteState::ClearOverflowPages { cell, .. } => { + let cell = cell.clone(); + return_if_io!(self.clear_overflow_pages(&cell)); + + let CursorState::Delete(DeleteState::ClearOverflowPages { + cell_idx, + original_child_pointer, + ref mut post_balancing_seek_key, + .. + }) = self.state + else { + unreachable!("expected clear overflow pages state"); + }; + + let page = self.stack.top_ref(); + let contents = page.get_contents(); + + if !contents.is_leaf() { + self.state = CursorState::Delete(DeleteState::InteriorNodeReplacement { + page: page.clone(), + btree_depth: self.stack.current(), + cell_idx, + original_child_pointer, + post_balancing_seek_key: post_balancing_seek_key.take(), + }); + } else { + drop_cell(contents, cell_idx, usable_space)?; + + self.state = CursorState::Delete(DeleteState::CheckNeedsBalancing { + btree_depth: self.stack.current(), + post_balancing_seek_key: post_balancing_seek_key.take(), + interior_node_was_replaced: false, + }); + } + } + + DeleteState::InteriorNodeReplacement { .. } => { + // This is an interior node, we need to handle deletion differently. + // 1. Move cursor to the largest key in the left subtree. + // 2. Replace the cell in the interior (parent) node with that key. + // 3. Delete that key from the child page. + + // Step 1: Move cursor to the largest key in the left subtree. + // The largest key is always in a leaf, and so this traversal may involvegoing multiple pages downwards, + // so we store the page we are currently on. + + // avoid calling prev() because it internally calls restore_context() which may cause unintended behavior. + return_if_io!(self.get_prev_record()); + + let CursorState::Delete(DeleteState::InteriorNodeReplacement { + ref page, + btree_depth, + cell_idx, + original_child_pointer, + ref mut post_balancing_seek_key, + .. + }) = self.state + else { + unreachable!("expected interior node replacement state"); + }; + + // Ensure we keep the parent page at the same position as before the replacement. + self.stack + .node_states + .get_mut(btree_depth) + .expect("parent page should be on the stack") + .cell_idx = cell_idx as i32; + let (cell_payload, leaf_cell_idx) = { + let leaf_page = self.stack.top_ref(); + let leaf_contents = leaf_page.get_contents(); + assert!(leaf_contents.is_leaf()); + assert!(leaf_contents.cell_count() > 0); + let leaf_cell_idx = leaf_contents.cell_count() - 1; + let last_cell_on_child_page = + leaf_contents.cell_get(leaf_cell_idx, usable_space)?; + + let mut cell_payload: Vec = Vec::new(); + let child_pointer = + original_child_pointer.expect("there should be a pointer"); + // Rewrite the old leaf cell as an interior cell depending on type. + match last_cell_on_child_page { + BTreeCell::TableLeafCell(leaf_cell) => { + // Table interior cells contain the left child pointer and the rowid as varint. + cell_payload.extend_from_slice(&child_pointer.to_be_bytes()); + write_varint_to_vec(leaf_cell.rowid as u64, &mut cell_payload); + } + BTreeCell::IndexLeafCell(leaf_cell) => { + // Index interior cells contain: + // 1. The left child pointer + // 2. The payload size as varint + // 3. The payload + // 4. The first overflow page as varint, omitted if no overflow. + cell_payload.extend_from_slice(&child_pointer.to_be_bytes()); + write_varint_to_vec(leaf_cell.payload_size, &mut cell_payload); + cell_payload.extend_from_slice(leaf_cell.payload); + if let Some(first_overflow_page) = leaf_cell.first_overflow_page { + cell_payload + .extend_from_slice(&first_overflow_page.to_be_bytes()); + } + } + _ => unreachable!("Expected table leaf cell"), + } + (cell_payload, leaf_cell_idx) + }; + + let leaf_page = self.stack.top_ref(); + + self.pager.add_dirty(page); + self.pager.add_dirty(leaf_page); + + // Step 2: Replace the cell in the parent (interior) page. + { + let parent_contents = page.get_contents(); + let parent_page_id = page.get().id; + let left_child_page = u32::from_be_bytes( + cell_payload[..4].try_into().expect("invalid cell payload"), + ); + turso_assert!( + left_child_page as usize != parent_page_id, + "corrupt: current page and left child page of cell {} are both {}", + left_child_page, + parent_page_id + ); + + // First, drop the old cell that is being replaced. + drop_cell(parent_contents, cell_idx, usable_space)?; + // Then, insert the new cell (the predecessor) in its place. + insert_into_cell(parent_contents, &cell_payload, cell_idx, usable_space)?; + } + + // Step 3: Delete the predecessor cell from the leaf page. + { + let leaf_contents = leaf_page.get_contents(); + drop_cell(leaf_contents, leaf_cell_idx, usable_space)?; + } + + self.state = CursorState::Delete(DeleteState::CheckNeedsBalancing { + btree_depth, + post_balancing_seek_key: post_balancing_seek_key.take(), + interior_node_was_replaced: true, + }); + } + + DeleteState::CheckNeedsBalancing { btree_depth, .. } => { + let page = self.stack.top_ref(); + // Check if either the leaf page we took the replacement cell from underflows, or if the interior page we inserted it into overflows OR underflows. + // If the latter is true, we must always balance that level regardless of whether the leaf page (or any ancestor pages in between) need balancing. + + let leaf_underflows = { + let leaf_contents = page.get_contents(); + let free_space = compute_free_space(leaf_contents, usable_space); + free_space * 3 > usable_space * 2 + }; + + let interior_overflows_or_underflows = { + // Invariant: ancestor pages on the stack are pinned to the page cache, + // so we don't need return_if_locked_maybe_load! any ancestor, + // and we already loaded the current page above. + let interior_page = self + .stack + .get_page_at_level(*btree_depth) + .expect("ancestor page should be on the stack"); + let interior_contents = interior_page.get_contents(); + let overflows = !interior_contents.overflow_cells.is_empty(); + if overflows { + true + } else { + let free_space = compute_free_space(interior_contents, usable_space); + free_space * 3 > usable_space * 2 + } + }; + + let needs_balancing = leaf_underflows || interior_overflows_or_underflows; + + let CursorState::Delete(DeleteState::CheckNeedsBalancing { + btree_depth, + ref mut post_balancing_seek_key, + interior_node_was_replaced, + .. + }) = self.state + else { + unreachable!("expected check needs balancing state"); + }; + + if needs_balancing { + let balance_only_ancestor = + !leaf_underflows && interior_overflows_or_underflows; + if balance_only_ancestor { + // Only need to balance the ancestor page; move there immediately. + while self.stack.current() > btree_depth { + self.stack.pop(); + } + } + let balance_both = leaf_underflows && interior_overflows_or_underflows; + assert!(matches!(self.balance_state.sub_state, BalanceSubState::Start), "There should be no balancing operation in progress when delete state is {:?}, got: {:?}", self.state, self.balance_state.sub_state); + let post_balancing_seek_key = post_balancing_seek_key + .take() + .expect("post_balancing_seek_key should be Some"); + self.save_context(post_balancing_seek_key); + self.state = CursorState::Delete(DeleteState::Balancing { + balance_ancestor_at_depth: if balance_both { + Some(btree_depth) + } else { + None + }, + }); + } else { + // No balancing needed. + if interior_node_was_replaced { + // If we did replace an interior node, we need to advance the cursor once to + // get back at the interior node that now has the replaced content. + // The reason it is important to land here is that the replaced cell was smaller (LT) than the deleted cell, + // so we must ensure we skip over it. I.e., when BTreeCursor::next() is called, it will move past the cell + // that holds the replaced content. + self.state = + CursorState::Delete(DeleteState::PostInteriorNodeReplacement); + } else { + // If we didn't replace an interior node, we are done, + // except we need to retreat, so that the next call to BTreeCursor::next() lands at the next record (because we deleted the current one) + self.stack.retreat(); + self.state = CursorState::None; + return Ok(IOResult::Done(())); + } + } + } + DeleteState::PostInteriorNodeReplacement => { + return_if_io!(self.get_next_record()); + self.state = CursorState::None; + return Ok(IOResult::Done(())); + } + + DeleteState::Balancing { + balance_ancestor_at_depth, + } => { + let balance_ancestor_at_depth = *balance_ancestor_at_depth; + return_if_io!(self.balance(balance_ancestor_at_depth)); + self.state = CursorState::Delete(DeleteState::RestoreContextAfterBalancing); + } + DeleteState::RestoreContextAfterBalancing => { + return_if_io!(self.restore_context()); + + // We deleted key K, and performed a seek to: GE { eq_only: true } K. + // This means that the cursor is now pointing to the next key after K. + // We need to make the next call to BTreeCursor::next() a no-op so that we don't skip over + // a row when deleting rows in a loop. + self.skip_advance.set(true); + self.state = CursorState::None; + return Ok(IOResult::Done(())); + } + } + } + } + + #[inline(always)] + fn set_null_flag(&mut self, flag: bool) { + self.null_flag = flag; + } + + #[inline(always)] + fn get_null_flag(&self) -> bool { + self.null_flag + } + + #[instrument(skip_all, level = Level::DEBUG)] + fn exists(&mut self, key: &Value) -> Result> { + assert!(self.mv_cursor.is_none()); + let int_key = match key { + Value::Integer(i) => i, + _ => unreachable!("btree tables are indexed by integers!"), + }; + let seek_result = + return_if_io!(self.seek(SeekKey::TableRowId(*int_key), SeekOp::GE { eq_only: true })); + let exists = matches!(seek_result, SeekResult::Found); + self.invalidate_record(); + Ok(IOResult::Done(exists)) + } + + fn clear_btree(&mut self) -> Result>> { + self.destroy_btree_contents(true) + } + + #[instrument(skip(self), level = Level::DEBUG)] + fn btree_destroy(&mut self) -> Result>> { + self.destroy_btree_contents(false) + } + + #[instrument(skip(self), level = Level::DEBUG)] + fn count(&mut self) -> Result> { + if let Some(_mv_cursor) = &self.mv_cursor { + todo!("Implement count for mvcc"); + } + + let mut mem_page; + let mut contents; + + 'outer: loop { + let state = self.count_state; + match state { + CountState::Start => { + let c = self.move_to_root()?; + self.count_state = CountState::Loop; + if let Some(c) = c { + io_yield_one!(c); + } + } + CountState::Loop => { + self.stack.advance(); + mem_page = self.stack.top_ref(); + contents = mem_page.get_contents(); + + /* If this is a leaf page or the tree is not an int-key tree, then + ** this page contains countable entries. Increment the entry counter + ** accordingly. + */ + if !matches!(contents.page_type(), PageType::TableInterior) { + self.count += contents.cell_count(); + } + + let cell_idx = self.stack.current_cell_index() as usize; + + // Second condition is necessary in case we return if the page is locked in the loop below + if contents.is_leaf() || cell_idx > contents.cell_count() { + loop { + if !self.stack.has_parent() { + // All pages of the b-tree have been visited. Return successfully + let c = self.move_to_root()?; + self.count_state = CountState::Finish; + if let Some(c) = c { + io_yield_one!(c); + } + continue 'outer; + } + + // Move to parent + self.stack.pop(); + + mem_page = self.stack.top_ref(); + turso_assert!(mem_page.is_loaded(), "page should be loaded"); + contents = mem_page.get_contents(); + + let cell_idx = self.stack.current_cell_index() as usize; + + if cell_idx <= contents.cell_count() { + break; + } + } + } + + let cell_idx = self.stack.current_cell_index() as usize; + + assert!(cell_idx <= contents.cell_count(),); + assert!(!contents.is_leaf()); + + if cell_idx == contents.cell_count() { + // Move to right child + // should be safe as contents is not a leaf page + let right_most_pointer = contents.rightmost_pointer().unwrap(); + self.stack.advance(); + let (mem_page, c) = self.read_page(right_most_pointer as i64)?; + self.stack.push(mem_page); + if let Some(c) = c { + io_yield_one!(c); + } + } else { + // Move to child left page + let cell = contents.cell_get(cell_idx, self.usable_space())?; + + match cell { + BTreeCell::TableInteriorCell(TableInteriorCell { + left_child_page, + .. + }) + | BTreeCell::IndexInteriorCell(IndexInteriorCell { + left_child_page, + .. + }) => { + self.stack.advance(); + let (mem_page, c) = self.read_page(left_child_page as i64)?; + self.stack.push(mem_page); + if let Some(c) = c { + io_yield_one!(c); + } + } + _ => unreachable!(), + } + } + } + CountState::Finish => { + return Ok(IOResult::Done(self.count)); + } + } + } + } + fn is_empty(&self) -> bool { + !self.has_record.get() + } + + fn root_page(&self) -> i64 { + self.root_page + } + + #[instrument(skip_all, level = Level::DEBUG)] + fn rewind(&mut self) -> Result> { + if self.valid_state == CursorValidState::Invalid { + return Ok(IOResult::Done(())); + } + self.skip_advance.set(false); + loop { + match self.rewind_state { + RewindState::Start => { + self.rewind_state = RewindState::NextRecord; + if let Some(mv_cursor) = &self.mv_cursor { + let mut mv_cursor = mv_cursor.write(); + mv_cursor.rewind(); + } else { + let c = self.move_to_root()?; + if let Some(c) = c { + io_yield_one!(c); + } + } + } + RewindState::NextRecord => { + let cursor_has_record = return_if_io!(self.get_next_record()); + self.invalidate_record(); + self.has_record.replace(cursor_has_record); + self.rewind_state = RewindState::Start; + return Ok(IOResult::Done(())); + } + } + } + } + + fn has_rowid(&self) -> bool { + match &self.index_info { + Some(index_key_info) => index_key_info.has_rowid, + None => true, // currently we don't support WITHOUT ROWID tables + } + } + + fn invalidate_record(&mut self) { + self.get_immutable_record_or_create() + .as_mut() + .unwrap() + .invalidate(); + self.record_cursor.borrow_mut().invalidate(); + } + fn record_cursor_mut(&self) -> std::cell::RefMut<'_, RecordCursor> { + self.record_cursor.borrow_mut() + } + + fn get_pager(&self) -> Arc { + self.pager.clone() + } + + fn get_skip_advance(&self) -> bool { + self.skip_advance.get() + } + + fn has_record(&self) -> bool { + self.has_record.get() + } + + fn set_has_record(&self, has_record: bool) { + self.has_record.set(has_record) + } + + fn get_index_info(&self) -> &IndexInfo { + self.index_info.as_ref().unwrap() + } + + fn seek_end(&mut self) -> Result> { + assert!(self.mv_cursor.is_none()); // unsure about this -_- + loop { + match self.seek_end_state { + SeekEndState::Start => { + let c = self.move_to_root()?; + self.seek_end_state = SeekEndState::ProcessPage; + if let Some(c) = c { + io_yield_one!(c); + } + } + SeekEndState::ProcessPage => { + let mem_page = self.stack.top_ref(); + let contents = mem_page.get_contents(); + if contents.is_leaf() { + // set cursor just past the last cell to append + self.stack.set_cell_index(contents.cell_count() as i32); + self.seek_end_state = SeekEndState::Start; + return Ok(IOResult::Done(())); + } + + match contents.rightmost_pointer() { + Some(right_most_pointer) => { + self.stack.set_cell_index(contents.cell_count() as i32 + 1); // invalid on interior + let (child, c) = self.read_page(right_most_pointer as i64)?; + self.stack.push(child); + if let Some(c) = c { + io_yield_one!(c); + } + } + None => unreachable!("interior page must have rightmost pointer"), + } + } + } + } + } + + fn get_mvcc_cursor(&self) -> Arc> { self.mv_cursor.as_ref().unwrap().clone() } + + #[instrument(skip_all, level = Level::DEBUG)] + fn seek_to_last(&mut self) -> Result> { + loop { + match self.seek_to_last_state { + SeekToLastState::Start => { + assert!(self.mv_cursor.is_none()); + let has_record = return_if_io!(self.move_to_rightmost()); + self.invalidate_record(); + self.has_record.replace(has_record); + if !has_record { + self.seek_to_last_state = SeekToLastState::IsEmpty; + continue; + } + return Ok(IOResult::Done(())); + } + SeekToLastState::IsEmpty => { + let is_empty = return_if_io!(self.is_empty_table()); + assert!(is_empty); + self.seek_to_last_state = SeekToLastState::Start; + return Ok(IOResult::Done(())); + } + } + } + } } #[derive(Debug, thiserror::Error)] From b3ab51d66af9bb2508196016de615265ecd13703 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Fri, 10 Oct 2025 15:03:07 +0200 Subject: [PATCH 141/428] core/vdbe: store cursor as a dyn CursorTrait --- core/types.rs | 12 ++++++------ core/vdbe/execute.rs | 44 ++++++++++++++++++++++---------------------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/core/types.rs b/core/types.rs index 6de047f9b..bfcbb004e 100644 --- a/core/types.rs +++ b/core/types.rs @@ -8,7 +8,7 @@ use crate::ext::{ExtValue, ExtValueType}; use crate::numeric::format_float; use crate::pseudo::PseudoCursor; use crate::schema::Index; -use crate::storage::btree::BTreeCursor; +use crate::storage::btree::CursorTrait; use crate::storage::sqlite3_ondisk::{read_integer, read_value, read_varint, write_varint}; use crate::translate::collate::CollationSeq; use crate::translate::plan::IterationDirection; @@ -2269,7 +2269,7 @@ impl Record { } pub enum Cursor { - BTree(Box), + BTree(Box), Pseudo(PseudoCursor), Sorter(Sorter), Virtual(VirtualTableCursor), @@ -2289,8 +2289,8 @@ impl Debug for Cursor { } impl Cursor { - pub fn new_btree(cursor: BTreeCursor) -> Self { - Self::BTree(Box::new(cursor)) + pub fn new_btree(cursor: Box) -> Self { + Self::BTree(cursor) } pub fn new_pseudo(cursor: PseudoCursor) -> Self { @@ -2307,9 +2307,9 @@ impl Cursor { Self::MaterializedView(Box::new(cursor)) } - pub fn as_btree_mut(&mut self) -> &mut BTreeCursor { + pub fn as_btree_mut(&mut self) -> &mut dyn CursorTrait { match self { - Self::BTree(cursor) => cursor, + Self::BTree(cursor) => cursor.as_mut(), _ => panic!("Cursor is not a btree"), } } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 9cb2e6b7e..dcdf37e92 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -6,7 +6,7 @@ use crate::numeric::{NullableInteger, Numeric}; use crate::schema::Table; use crate::state_machine::StateMachine; use crate::storage::btree::{ - integrity_check, IntegrityCheckError, IntegrityCheckState, PageCategory, + integrity_check, CursorTrait, IntegrityCheckError, IntegrityCheckState, PageCategory, }; use crate::storage::database::DatabaseFile; use crate::storage::page_cache::PageCache; @@ -1099,7 +1099,7 @@ pub fn op_open_read( cursors .get_mut(*cursor_id) .unwrap() - .replace(Cursor::new_btree(cursor)); + .replace(Cursor::new_btree(Box::new(cursor))); } CursorType::BTreeIndex(index) => { let cursor = BTreeCursor::new_index( @@ -1112,7 +1112,7 @@ pub fn op_open_read( cursors .get_mut(*cursor_id) .unwrap() - .replace(Cursor::new_btree(cursor)); + .replace(Cursor::new_btree(Box::new(cursor))); } CursorType::Pseudo(_) => { panic!("OpenRead on pseudo cursor"); @@ -1630,7 +1630,7 @@ pub fn op_column( break 'ifnull; }; - let mut record_cursor = cursor.record_cursor.borrow_mut(); + let mut record_cursor = cursor.record_cursor_mut(); if record_cursor.offsets.is_empty() { let (header_size, header_len_bytes) = read_varint_fast(payload)?; @@ -2772,7 +2772,7 @@ pub fn op_row_id( let index_cursor = index_cursor.as_btree_mut(); let record = return_if_io!(index_cursor.record()); let record = record.as_ref().unwrap(); - let mut record_cursor_ref = index_cursor.record_cursor.borrow_mut(); + let mut record_cursor_ref = index_cursor.record_cursor_mut(); let record_cursor = record_cursor_ref.deref_mut(); let rowid = record.last_value(record_cursor).unwrap(); match rowid { @@ -3289,7 +3289,7 @@ pub fn seek_internal( // this same logic applies for indexes, but the next/prev record is expected to be found in the parent page's // divider cell. turso_assert!( - !cursor.skip_advance.get(), + !cursor.get_skip_advance(), "skip_advance should not be true in the middle of a seek operation" ); let result = match op { @@ -3299,7 +3299,7 @@ pub fn seek_internal( }; match result { IOResult::Done(found) => { - cursor.has_record.set(found); + cursor.set_has_record(found); cursor.invalidate_record(); found } @@ -3413,9 +3413,9 @@ pub fn op_idx_ge( registers_to_ref_values(&state.registers[*start_reg..*start_reg + *num_regs]); let tie_breaker = get_tie_breaker_from_idx_comp_op(insn); let ord = compare_records_generic( - &idx_record, // The serialized record from the index - &values, // The record built from registers - cursor.index_info.as_ref().unwrap(), // Sort order flags + &idx_record, // The serialized record from the index + &values, // The record built from registers + cursor.get_index_info(), // Sort order flags 0, tie_breaker, )?; @@ -3483,7 +3483,7 @@ pub fn op_idx_le( let ord = compare_records_generic( &idx_record, &values, - cursor.index_info.as_ref().unwrap(), + cursor.get_index_info(), 0, tie_breaker, )?; @@ -3534,7 +3534,7 @@ pub fn op_idx_gt( let ord = compare_records_generic( &idx_record, &values, - cursor.index_info.as_ref().unwrap(), + cursor.get_index_info(), 0, tie_breaker, )?; @@ -3586,7 +3586,7 @@ pub fn op_idx_lt( let ord = compare_records_generic( &idx_record, &values, - cursor.index_info.as_ref().unwrap(), + cursor.get_index_info(), 0, tie_breaker, )?; @@ -6323,7 +6323,7 @@ pub fn op_idx_insert( // Cursor is pointing at a record; if the index has a rowid, exclude it from the comparison since it's a pointer to the table row; // UNIQUE indexes disallow duplicates like (a=1,b=2,rowid=1) and (a=1,b=2,rowid=2). let existing_key = if cursor.has_rowid() { - let count = cursor.record_cursor.borrow_mut().count(record); + let count = cursor.record_cursor_mut().count(record); &record.get_values()[..count.saturating_sub(1)] } else { &record.get_values()[..] @@ -6336,7 +6336,7 @@ pub fn op_idx_insert( let conflict = compare_immutable( existing_key, inserted_key_vals, - &cursor.index_info.as_ref().unwrap().key_info, + &cursor.get_index_info().key_info, ) == std::cmp::Ordering::Equal; if conflict { if flags.has(IdxInsertFlags::NO_OP_DUPLICATE) { @@ -6833,7 +6833,7 @@ pub fn op_open_write( cursors .get_mut(*cursor_id) .unwrap() - .replace(Cursor::new_btree(cursor)); + .replace(Cursor::new_btree(Box::new(cursor))); } else { let num_columns = match cursor_type { CursorType::BTreeTable(table_rc) => table_rc.columns.len(), @@ -6847,7 +6847,7 @@ pub fn op_open_write( cursors .get_mut(*cursor_id) .unwrap() - .replace(Cursor::new_btree(cursor)); + .replace(Cursor::new_btree(Box::new(cursor))); } state.pc += 1; Ok(InsnFunctionStepResult::Step) @@ -7505,7 +7505,7 @@ pub enum OpOpenEphemeralState { // clippy complains this variant is too big when compared to the rest of the variants // so it says we need to box it here Rewind { - cursor: Box, + cursor: Box, }, } pub fn op_open_ephemeral( @@ -7633,13 +7633,13 @@ pub fn op_open_ephemeral( cursors .get_mut(cursor_id) .unwrap() - .replace(Cursor::new_btree(*cursor)); + .replace(Cursor::new_btree(cursor)); } CursorType::BTreeIndex(_) => { cursors .get_mut(cursor_id) .unwrap() - .replace(Cursor::new_btree(*cursor)); + .replace(Cursor::new_btree(cursor)); } CursorType::Pseudo(_) => { panic!("OpenEphemeral on pseudo cursor"); @@ -7685,7 +7685,7 @@ pub fn op_open_dup( // We use the pager from the original cursor instead of the one attached to // the connection because each ephemeral table creates its own pager (and // a separate database file). - let pager = &original_cursor.pager; + let pager = original_cursor.get_pager(); let mv_cursor = if let Some(tx_id) = program.connection.get_mv_tx_id() { let mv_store = mv_store.unwrap().clone(); @@ -7709,7 +7709,7 @@ pub fn op_open_dup( cursors .get_mut(*new_cursor_id) .unwrap() - .replace(Cursor::new_btree(cursor)); + .replace(Cursor::new_btree(Box::new(cursor))); } CursorType::BTreeIndex(table) => { // In principle, we could implement OpenDup for BTreeIndex, From 160a84250ef288e989bfd852efa4eced0a3ad7bc Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Fri, 10 Oct 2025 15:03:31 +0200 Subject: [PATCH 142/428] core: add CursorTrait imports where needed --- core/incremental/compiler.rs | 2 ++ core/incremental/operator.rs | 1 + core/incremental/persistence.rs | 2 +- core/incremental/view.rs | 4 ++-- core/mvcc/database/mod.rs | 1 + core/schema.rs | 2 +- 6 files changed, 8 insertions(+), 4 deletions(-) diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index 84f50bfc6..15794068b 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -2742,6 +2742,8 @@ mod tests { // This reads the actual persisted data from the BTree #[cfg(test)] fn get_current_state(pager: Arc, circuit: &DbspCircuit) -> Result { + use crate::storage::btree::CursorTrait; + let mut delta = Delta::new(); let main_data_root = circuit.main_data_root; diff --git a/core/incremental/operator.rs b/core/incremental/operator.rs index f4598e1fb..276249fb3 100644 --- a/core/incremental/operator.rs +++ b/core/incremental/operator.rs @@ -254,6 +254,7 @@ mod tests { use super::*; use crate::incremental::aggregate_operator::{AggregateOperator, AGG_TYPE_REGULAR}; use crate::incremental::dbsp::HashableRow; + use crate::storage::btree::CursorTrait; use crate::storage::pager::CreateBTreeFlags; use crate::types::Text; use crate::util::IOExt; diff --git a/core/incremental/persistence.rs b/core/incremental/persistence.rs index 81d0837c2..bba64a282 100644 --- a/core/incremental/persistence.rs +++ b/core/incremental/persistence.rs @@ -1,5 +1,5 @@ use crate::incremental::operator::{AggregateState, DbspStateCursors}; -use crate::storage::btree::{BTreeCursor, BTreeKey}; +use crate::storage::btree::{BTreeCursor, BTreeKey, CursorTrait}; use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult}; use crate::{return_if_io, LimboError, Result, Value}; diff --git a/core/incremental/view.rs b/core/incremental/view.rs index fc4a8bba6..b95c8a0ca 100644 --- a/core/incremental/view.rs +++ b/core/incremental/view.rs @@ -2,7 +2,7 @@ use super::compiler::{DbspCircuit, DbspCompiler, DeltaSet}; use super::dbsp::Delta; use super::operator::ComputationTracker; use crate::schema::{BTreeTable, Schema}; -use crate::storage::btree::BTreeCursor; +use crate::storage::btree::CursorTrait; use crate::translate::logical::LogicalPlanBuilder; use crate::types::{IOResult, Value}; use crate::util::{extract_view_columns, ViewColumnSchema}; @@ -1112,7 +1112,7 @@ impl IncrementalView { &mut self, conn: &std::sync::Arc, pager: &std::sync::Arc, - _btree_cursor: &mut BTreeCursor, + _btree_cursor: &mut dyn CursorTrait, ) -> crate::Result> { // Assert that this is a materialized view with a root page assert!( diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 95014cbc2..d6275b947 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -5,6 +5,7 @@ use crate::state_machine::StateTransition; use crate::state_machine::TransitionResult; use crate::storage::btree::BTreeCursor; use crate::storage::btree::BTreeKey; +use crate::storage::btree::CursorTrait; use crate::storage::btree::CursorValidState; use crate::storage::sqlite3_ondisk::DatabaseHeader; use crate::storage::wal::TursoRwLock; diff --git a/core/schema.rs b/core/schema.rs index f81c8fe2e..0bf9f464c 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -72,7 +72,7 @@ impl Clone for View { /// Type alias for regular views collection pub type ViewsMap = HashMap>; -use crate::storage::btree::BTreeCursor; +use crate::storage::btree::{BTreeCursor, CursorTrait}; use crate::translate::collate::CollationSeq; use crate::translate::plan::{SelectPlan, TableReferences}; use crate::util::{ From 2cc79471077e05a824b5dad0f896dc463184c1e5 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Tue, 7 Oct 2025 23:34:54 -0300 Subject: [PATCH 143/428] define alter table in sql_generation --- Cargo.lock | 1 + sql_generation/Cargo.toml | 1 + sql_generation/generation/query.rs | 50 ++++++++++++++++++++- sql_generation/model/query/alter_table.rs | 54 +++++++++++++++++++++++ sql_generation/model/query/mod.rs | 1 + 5 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 sql_generation/model/query/alter_table.rs diff --git a/Cargo.lock b/Cargo.lock index 2ddd72015..6d1db41bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3854,6 +3854,7 @@ dependencies = [ "rand_chacha 0.9.0", "schemars 1.0.4", "serde", + "strum", "tracing", "turso_core", "turso_parser", diff --git a/sql_generation/Cargo.toml b/sql_generation/Cargo.toml index 5c4de8d6e..8d1084f24 100644 --- a/sql_generation/Cargo.toml +++ b/sql_generation/Cargo.toml @@ -22,6 +22,7 @@ tracing = { workspace = true } schemars = { workspace = true } garde = { workspace = true, features = ["derive", "serde"] } indexmap = { workspace = true } +strum = { workspace = true } [dev-dependencies] rand_chacha = { workspace = true } diff --git a/sql_generation/generation/query.rs b/sql_generation/generation/query.rs index f2264720e..82d6296df 100644 --- a/sql_generation/generation/query.rs +++ b/sql_generation/generation/query.rs @@ -2,6 +2,7 @@ use crate::generation::{ gen_random_text, pick_n_unique, pick_unique, Arbitrary, ArbitraryFrom, ArbitrarySized, GenerationContext, }; +use crate::model::query::alter_table::{AlterTable, AlterTableType, AlterTableTypeDiscriminants}; use crate::model::query::predicate::Predicate; use crate::model::query::select::{ CompoundOperator, CompoundSelect, Distinctness, FromClause, OrderBy, ResultColumn, SelectBody, @@ -9,9 +10,12 @@ use crate::model::query::select::{ }; use crate::model::query::update::Update; use crate::model::query::{Create, CreateIndex, Delete, Drop, Insert, Select}; -use crate::model::table::{JoinTable, JoinType, JoinedTable, SimValue, Table, TableContext}; +use crate::model::table::{ + Column, JoinTable, JoinType, JoinedTable, Name, SimValue, Table, TableContext, +}; use indexmap::IndexSet; use itertools::Itertools; +use rand::seq::IndexedRandom; use rand::Rng; use turso_parser::ast::{Expr, SortOrder}; @@ -385,3 +389,47 @@ impl Arbitrary for Update { } } } + +impl Arbitrary for AlterTable { + fn arbitrary(rng: &mut R, context: &C) -> Self { + let table = pick(context.tables(), rng); + let choices: &'static [AlterTableTypeDiscriminants] = if table.columns.len() > 1 { + &[ + AlterTableTypeDiscriminants::RenameTo, + AlterTableTypeDiscriminants::AddColumn, + // AlterTableTypeDiscriminants::AlterColumn, + AlterTableTypeDiscriminants::RenameColumn, + AlterTableTypeDiscriminants::DropColumn, + ] + } else { + &[ + AlterTableTypeDiscriminants::RenameTo, + AlterTableTypeDiscriminants::AddColumn, + // AlterTableTypeDiscriminants::AlterColumn, + AlterTableTypeDiscriminants::RenameColumn, + ] + }; + let alter_table_type = match choices.choose(rng).unwrap() { + AlterTableTypeDiscriminants::RenameTo => AlterTableType::RenameTo { + new_name: Name::arbitrary(rng, context).0, + }, + AlterTableTypeDiscriminants::AddColumn => AlterTableType::AddColumn { + column: Column::arbitrary(rng, context), + }, + AlterTableTypeDiscriminants::AlterColumn => { + todo!(); + } + AlterTableTypeDiscriminants::RenameColumn => AlterTableType::RenameColumn { + old: pick(&table.columns, rng).name.clone(), + new: Name::arbitrary(rng, context).0, + }, + AlterTableTypeDiscriminants::DropColumn => AlterTableType::DropColumn { + column_name: pick(&table.columns, rng).name.clone(), + }, + }; + Self { + table_name: table.name.clone(), + alter_table_type, + } + } +} diff --git a/sql_generation/model/query/alter_table.rs b/sql_generation/model/query/alter_table.rs new file mode 100644 index 000000000..684198b35 --- /dev/null +++ b/sql_generation/model/query/alter_table.rs @@ -0,0 +1,54 @@ +use std::fmt::Display; + +use serde::{Deserialize, Serialize}; + +use crate::model::table::Column; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct AlterTable { + pub table_name: String, + pub alter_table_type: AlterTableType, +} + +// TODO: in the future maybe use parser AST's when we test almost the entire SQL spectrum +// so we can repeat less code +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, strum::EnumDiscriminants)] +pub enum AlterTableType { + /// `RENAME TO`: new table name + RenameTo { new_name: String }, + /// `ADD COLUMN` + AddColumn { column: Column }, + /// `ALTER COLUMN` + AlterColumn { old: String, new: Column }, + /// `RENAME COLUMN` + RenameColumn { + /// old name + old: String, + /// new name + new: String, + }, + /// `DROP COLUMN` + DropColumn { column_name: String }, +} + +impl Display for AlterTable { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "ALTER TABLE {} {}", + self.table_name, self.alter_table_type + ) + } +} + +impl Display for AlterTableType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AlterTableType::RenameTo { new_name } => write!(f, "RENAME TO {new_name}"), + AlterTableType::AddColumn { column } => write!(f, "ADD COLUMN {column}"), + AlterTableType::AlterColumn { old, new } => write!(f, "ALTER COLUMN {old} TO {new}"), + AlterTableType::RenameColumn { old, new } => write!(f, "RENAME COLUMN {old} TO {new}"), + AlterTableType::DropColumn { column_name } => write!(f, "DROP COLUMN {column_name}"), + } + } +} diff --git a/sql_generation/model/query/mod.rs b/sql_generation/model/query/mod.rs index 98ec2bdfd..9876ffe54 100644 --- a/sql_generation/model/query/mod.rs +++ b/sql_generation/model/query/mod.rs @@ -6,6 +6,7 @@ pub use drop_index::DropIndex; pub use insert::Insert; pub use select::Select; +pub mod alter_table; pub mod create; pub mod create_index; pub mod delete; From f593080c2ac1d42fa74c914af07e7231f03d941f Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Wed, 8 Oct 2025 13:46:29 -0300 Subject: [PATCH 144/428] add `Query::AlterTable` variant --- simulator/generation/plan.rs | 37 ++++++++++++-------------------- simulator/generation/property.rs | 24 +++++++++++---------- simulator/generation/query.rs | 13 +++++++++-- simulator/model/mod.rs | 34 +++++++++++++++++++++++------ 4 files changed, 66 insertions(+), 42 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 337978569..d1bc9f0d3 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -165,18 +165,7 @@ impl InteractionPlan { } pub(crate) fn stats(&self) -> InteractionStats { - let mut stats = InteractionStats { - select_count: 0, - insert_count: 0, - delete_count: 0, - update_count: 0, - create_count: 0, - create_index_count: 0, - drop_count: 0, - begin_count: 0, - commit_count: 0, - rollback_count: 0, - }; + let mut stats = InteractionStats::default(); fn query_stat(q: &Query, stats: &mut InteractionStats) { match q { @@ -190,6 +179,7 @@ impl InteractionPlan { Query::Begin(_) => stats.begin_count += 1, Query::Commit(_) => stats.commit_count += 1, Query::Rollback(_) => stats.rollback_count += 1, + Query::AlterTable(_) => stats.alter_table_count += 1, Query::Placeholder => {} } } @@ -699,18 +689,19 @@ impl Display for InteractionPlan { } } -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Default)] pub(crate) struct InteractionStats { - pub(crate) select_count: u32, - pub(crate) insert_count: u32, - pub(crate) delete_count: u32, - pub(crate) update_count: u32, - pub(crate) create_count: u32, - pub(crate) create_index_count: u32, - pub(crate) drop_count: u32, - pub(crate) begin_count: u32, - pub(crate) commit_count: u32, - pub(crate) rollback_count: u32, + pub select_count: u32, + pub insert_count: u32, + pub delete_count: u32, + pub update_count: u32, + pub create_count: u32, + pub create_index_count: u32, + pub drop_count: u32, + pub begin_count: u32, + pub commit_count: u32, + pub rollback_count: u32, + pub alter_table_count: u32, } impl Display for InteractionStats { diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index b67026dae..a858a6516 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1352,17 +1352,18 @@ fn assert_all_table_values( } #[derive(Debug)] -pub(crate) struct Remaining { - pub(crate) select: u32, - pub(crate) insert: u32, - pub(crate) create: u32, - pub(crate) create_index: u32, - pub(crate) delete: u32, - pub(crate) update: u32, - pub(crate) drop: u32, +pub(super) struct Remaining { + pub select: u32, + pub insert: u32, + pub create: u32, + pub create_index: u32, + pub delete: u32, + pub update: u32, + pub drop: u32, + pub alter_table: u32, } -pub(crate) fn remaining( +pub(super) fn remaining( max_interactions: u32, opts: &QueryProfile, stats: &InteractionStats, @@ -1417,6 +1418,7 @@ pub(crate) fn remaining( delete: remaining_delete, drop: remaining_drop, update: remaining_update, + alter_table: 0, // TODO: calculate remaining } } @@ -1727,7 +1729,7 @@ fn property_faulty_query( type PropertyGenFunc = fn(&mut R, &QueryDistribution, &G, bool) -> Property; impl PropertyDiscriminants { - pub(super) fn gen_function(&self) -> PropertyGenFunc + fn gen_function(&self) -> PropertyGenFunc where R: rand::Rng + ?Sized, G: GenerationContext, @@ -1756,7 +1758,7 @@ impl PropertyDiscriminants { } } - pub fn weight( + fn weight( &self, env: &SimulatorEnv, remaining: &Remaining, diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index 914b44b35..f0ab68f2a 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -79,6 +79,13 @@ fn random_create_index( Query::CreateIndex(create_index) } +fn random_alter_table( + rng: &mut R, + conn_ctx: &impl GenerationContext, +) -> Query { + todo!() +} + /// Possible queries that can be generated given the table state /// /// Does not take into account transactional statements @@ -93,7 +100,7 @@ pub const fn possible_queries(tables: &[Table]) -> &'static [QueryDiscriminants] type QueryGenFunc = fn(&mut R, &G) -> Query; impl QueryDiscriminants { - pub fn gen_function(&self) -> QueryGenFunc + fn gen_function(&self) -> QueryGenFunc where R: rand::Rng + ?Sized, G: GenerationContext, @@ -106,6 +113,7 @@ impl QueryDiscriminants { QueryDiscriminants::Update => random_update, QueryDiscriminants::Drop => random_drop, QueryDiscriminants::CreateIndex => random_create_index, + QueryDiscriminants::AlterTable => random_alter_table, QueryDiscriminants::Begin | QueryDiscriminants::Commit | QueryDiscriminants::Rollback => { @@ -117,7 +125,7 @@ impl QueryDiscriminants { } } - pub fn weight(&self, remaining: &Remaining) -> u32 { + fn weight(&self, remaining: &Remaining) -> u32 { match self { QueryDiscriminants::Create => remaining.create, // remaining.select / 3 is for the random_expr generation @@ -128,6 +136,7 @@ impl QueryDiscriminants { QueryDiscriminants::Update => remaining.update, QueryDiscriminants::Drop => remaining.drop, QueryDiscriminants::CreateIndex => remaining.create_index, + QueryDiscriminants::AlterTable => remaining.alter_table, QueryDiscriminants::Begin | QueryDiscriminants::Commit | QueryDiscriminants::Rollback => { diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 3f8a4ec9d..24c30eb2d 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize}; use sql_generation::model::{ query::{ Create, CreateIndex, Delete, Drop, Insert, Select, + alter_table::AlterTable, select::{CompoundOperator, FromClause, ResultColumn, SelectInner}, transaction::{Begin, Commit, Rollback}, update::Update, @@ -29,6 +30,7 @@ pub enum Query { Update(Update), Drop(Drop), CreateIndex(CreateIndex), + AlterTable(AlterTable), Begin(Begin), Commit(Commit), Rollback(Rollback), @@ -67,10 +69,13 @@ impl Query { | Query::Insert(Insert::Values { table, .. }) | Query::Delete(Delete { table, .. }) | Query::Update(Update { table, .. }) - | Query::Drop(Drop { table, .. }) => IndexSet::from_iter([table.clone()]), - Query::CreateIndex(CreateIndex { table_name, .. }) => { - IndexSet::from_iter([table_name.clone()]) - } + | Query::Drop(Drop { table, .. }) + | Query::CreateIndex(CreateIndex { + table_name: table, .. + }) + | Query::AlterTable(AlterTable { + table_name: table, .. + }) => IndexSet::from_iter([table.clone()]), Query::Begin(_) | Query::Commit(_) | Query::Rollback(_) => IndexSet::new(), Query::Placeholder => IndexSet::new(), } @@ -83,8 +88,13 @@ impl Query { | Query::Insert(Insert::Values { table, .. }) | Query::Delete(Delete { table, .. }) | Query::Update(Update { table, .. }) - | Query::Drop(Drop { table, .. }) => vec![table.clone()], - Query::CreateIndex(CreateIndex { table_name, .. }) => vec![table_name.clone()], + | Query::Drop(Drop { table, .. }) + | Query::CreateIndex(CreateIndex { + table_name: table, .. + }) + | Query::AlterTable(AlterTable { + table_name: table, .. + }) => vec![table.clone()], Query::Begin(..) | Query::Commit(..) | Query::Rollback(..) => vec![], Query::Placeholder => vec![], } @@ -117,6 +127,7 @@ impl Display for Query { Self::Update(update) => write!(f, "{update}"), Self::Drop(drop) => write!(f, "{drop}"), Self::CreateIndex(create_index) => write!(f, "{create_index}"), + Self::AlterTable(alter_table) => write!(f, "{alter_table}"), Self::Begin(begin) => write!(f, "{begin}"), Self::Commit(commit) => write!(f, "{commit}"), Self::Rollback(rollback) => write!(f, "{rollback}"), @@ -137,6 +148,7 @@ impl Shadow for Query { Query::Update(update) => update.shadow(env), Query::Drop(drop) => drop.shadow(env), Query::CreateIndex(create_index) => Ok(create_index.shadow(env)), + Query::AlterTable(alter_table) => alter_table.shadow(env), Query::Begin(begin) => Ok(begin.shadow(env)), Query::Commit(commit) => Ok(commit.shadow(env)), Query::Rollback(rollback) => Ok(rollback.shadow(env)), @@ -154,6 +166,7 @@ bitflags! { const UPDATE = 1 << 4; const DROP = 1 << 5; const CREATE_INDEX = 1 << 6; + const ALTER_TABLE = 1 << 7; } } @@ -182,6 +195,7 @@ impl From for QueryCapabilities { QueryDiscriminants::Update => Self::UPDATE, QueryDiscriminants::Drop => Self::DROP, QueryDiscriminants::CreateIndex => Self::CREATE_INDEX, + QueryDiscriminants::AlterTable => Self::ALTER_TABLE, QueryDiscriminants::Begin | QueryDiscriminants::Commit | QueryDiscriminants::Rollback => { @@ -522,3 +536,11 @@ impl Shadow for Update { Ok(vec![]) } } + +impl Shadow for AlterTable { + type Result = anyhow::Result>>; + + fn shadow(&self, tables: &mut ShadowTablesMut<'_>) -> Self::Result { + Ok(vec![]) + } +} From fb1042187b472940610c6a3b4b7759ac186efb83 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 10 Oct 2025 19:25:58 +0300 Subject: [PATCH 145/428] Turso 0.3.0-pre.1 --- Cargo.lock | 54 +++++++++---------- Cargo.toml | 34 ++++++------ bindings/javascript/package-lock.json | 36 ++++++------- bindings/javascript/package.json | 2 +- .../javascript/packages/common/package.json | 2 +- .../javascript/packages/native/package.json | 4 +- .../packages/wasm-common/package.json | 2 +- .../javascript/packages/wasm/package.json | 6 +-- .../sync/packages/common/package.json | 4 +- .../sync/packages/native/package.json | 6 +-- .../sync/packages/wasm/package.json | 8 +-- bindings/javascript/yarn.lock | 30 +++++------ 12 files changed, 94 insertions(+), 94 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ddd72015..4df9a828e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -690,7 +690,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_tester" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "anyhow", "assert_cmd", @@ -2278,7 +2278,7 @@ dependencies = [ [[package]] name = "limbo_completion" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "mimalloc", "turso_ext", @@ -2286,7 +2286,7 @@ dependencies = [ [[package]] name = "limbo_crypto" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "blake3", "data-encoding", @@ -2299,7 +2299,7 @@ dependencies = [ [[package]] name = "limbo_csv" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "csv", "mimalloc", @@ -2309,7 +2309,7 @@ dependencies = [ [[package]] name = "limbo_fuzzy" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "mimalloc", "turso_ext", @@ -2317,7 +2317,7 @@ dependencies = [ [[package]] name = "limbo_ipaddr" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "ipnetwork", "mimalloc", @@ -2326,7 +2326,7 @@ dependencies = [ [[package]] name = "limbo_percentile" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "mimalloc", "turso_ext", @@ -2334,7 +2334,7 @@ dependencies = [ [[package]] name = "limbo_regexp" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "mimalloc", "regex", @@ -2343,7 +2343,7 @@ dependencies = [ [[package]] name = "limbo_sim" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "anyhow", "bitflags 2.9.4", @@ -2379,7 +2379,7 @@ dependencies = [ [[package]] name = "limbo_sqlite_test_ext" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "cc", ] @@ -3115,7 +3115,7 @@ dependencies = [ [[package]] name = "py-turso" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "anyhow", "pyo3", @@ -3842,7 +3842,7 @@ checksum = "d372029cb5195f9ab4e4b9aef550787dce78b124fcaee8d82519925defcd6f0d" [[package]] name = "sql_generation" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "anarchist-readable-name-generator-lib 0.2.0", "anyhow", @@ -4356,7 +4356,7 @@ dependencies = [ [[package]] name = "turso" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "rand 0.9.2", "rand_chacha 0.9.0", @@ -4368,7 +4368,7 @@ dependencies = [ [[package]] name = "turso-java" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "jni", "thiserror 2.0.16", @@ -4377,7 +4377,7 @@ dependencies = [ [[package]] name = "turso_cli" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "anyhow", "cfg-if", @@ -4413,7 +4413,7 @@ dependencies = [ [[package]] name = "turso_core" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "aegis", "aes", @@ -4472,7 +4472,7 @@ dependencies = [ [[package]] name = "turso_dart" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "flutter_rust_bridge", "turso_core", @@ -4480,7 +4480,7 @@ dependencies = [ [[package]] name = "turso_ext" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "chrono", "getrandom 0.3.2", @@ -4489,7 +4489,7 @@ dependencies = [ [[package]] name = "turso_ext_tests" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "env_logger 0.11.7", "lazy_static", @@ -4500,7 +4500,7 @@ dependencies = [ [[package]] name = "turso_macros" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "proc-macro2", "quote", @@ -4509,7 +4509,7 @@ dependencies = [ [[package]] name = "turso_node" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "chrono", "napi", @@ -4522,7 +4522,7 @@ dependencies = [ [[package]] name = "turso_parser" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "bitflags 2.9.4", "criterion", @@ -4538,7 +4538,7 @@ dependencies = [ [[package]] name = "turso_sqlite3" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "env_logger 0.11.7", "libc", @@ -4551,7 +4551,7 @@ dependencies = [ [[package]] name = "turso_stress" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "anarchist-readable-name-generator-lib 0.1.2", "antithesis_sdk", @@ -4567,7 +4567,7 @@ dependencies = [ [[package]] name = "turso_sync_engine" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "base64", "bytes", @@ -4594,7 +4594,7 @@ dependencies = [ [[package]] name = "turso_sync_js" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "genawaiter", "napi", @@ -4609,7 +4609,7 @@ dependencies = [ [[package]] name = "turso_whopper" -version = "0.2.0" +version = "0.3.0-pre.1" dependencies = [ "anyhow", "clap", diff --git a/Cargo.toml b/Cargo.toml index 51998d10a..f62181620 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,29 +39,29 @@ exclude = [ ] [workspace.package] -version = "0.2.0" +version = "0.3.0-pre.1" authors = ["the Limbo authors"] edition = "2021" license = "MIT" repository = "https://github.com/tursodatabase/turso" [workspace.dependencies] -turso = { path = "bindings/rust", version = "0.2.0" } -turso_node = { path = "bindings/javascript", version = "0.2.0" } -limbo_completion = { path = "extensions/completion", version = "0.2.0" } -turso_core = { path = "core", version = "0.2.0" } -turso_sync_engine = { path = "sync/engine", version = "0.2.0" } -limbo_crypto = { path = "extensions/crypto", version = "0.2.0" } -limbo_csv = { path = "extensions/csv", version = "0.2.0" } -turso_ext = { path = "extensions/core", version = "0.2.0" } -turso_ext_tests = { path = "extensions/tests", version = "0.2.0" } -limbo_ipaddr = { path = "extensions/ipaddr", version = "0.2.0" } -turso_macros = { path = "macros", version = "0.2.0" } -limbo_percentile = { path = "extensions/percentile", version = "0.2.0" } -limbo_regexp = { path = "extensions/regexp", version = "0.2.0" } -limbo_uuid = { path = "extensions/uuid", version = "0.2.0" } -turso_parser = { path = "parser", version = "0.2.0" } -limbo_fuzzy = { path = "extensions/fuzzy", version = "0.2.0" } +turso = { path = "bindings/rust", version = "0.3.0-pre.1" } +turso_node = { path = "bindings/javascript", version = "0.3.0-pre.1" } +limbo_completion = { path = "extensions/completion", version = "0.3.0-pre.1" } +turso_core = { path = "core", version = "0.3.0-pre.1" } +turso_sync_engine = { path = "sync/engine", version = "0.3.0-pre.1" } +limbo_crypto = { path = "extensions/crypto", version = "0.3.0-pre.1" } +limbo_csv = { path = "extensions/csv", version = "0.3.0-pre.1" } +turso_ext = { path = "extensions/core", version = "0.3.0-pre.1" } +turso_ext_tests = { path = "extensions/tests", version = "0.3.0-pre.1" } +limbo_ipaddr = { path = "extensions/ipaddr", version = "0.3.0-pre.1" } +turso_macros = { path = "macros", version = "0.3.0-pre.1" } +limbo_percentile = { path = "extensions/percentile", version = "0.3.0-pre.1" } +limbo_regexp = { path = "extensions/regexp", version = "0.3.0-pre.1" } +limbo_uuid = { path = "extensions/uuid", version = "0.3.0-pre.1" } +turso_parser = { path = "parser", version = "0.3.0-pre.1" } +limbo_fuzzy = { path = "extensions/fuzzy", version = "0.3.0-pre.1" } sql_generation = { path = "sql_generation" } strum = { version = "0.26", features = ["derive"] } strum_macros = "0.26" diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json index 34da4b8a3..5a1659760 100644 --- a/bindings/javascript/package-lock.json +++ b/bindings/javascript/package-lock.json @@ -1,11 +1,11 @@ { "name": "javascript", - "version": "0.2.0", + "version": "0.3.0-pre.1", "lockfileVersion": 3, "requires": true, "packages": { "": { - "version": "0.2.0", + "version": "0.3.0-pre.1", "workspaces": [ "packages/common", "packages/wasm-common", @@ -3542,7 +3542,7 @@ }, "packages/common": { "name": "@tursodatabase/database-common", - "version": "0.2.0", + "version": "0.3.0-pre.1", "license": "MIT", "devDependencies": { "typescript": "^5.9.2", @@ -3551,10 +3551,10 @@ }, "packages/native": { "name": "@tursodatabase/database", - "version": "0.2.0", + "version": "0.3.0-pre.1", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0" + "@tursodatabase/database-common": "^0.3.0-pre.1" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3568,11 +3568,11 @@ }, "packages/wasm": { "name": "@tursodatabase/database-wasm", - "version": "0.2.0", + "version": "0.3.0-pre.1", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0", - "@tursodatabase/database-wasm-common": "^0.2.0" + "@tursodatabase/database-common": "^0.3.0-pre.1", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.1" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3585,7 +3585,7 @@ }, "packages/wasm-common": { "name": "@tursodatabase/database-wasm-common", - "version": "0.2.0", + "version": "0.3.0-pre.1", "license": "MIT", "dependencies": { "@napi-rs/wasm-runtime": "^1.0.5" @@ -3596,10 +3596,10 @@ }, "sync/packages/common": { "name": "@tursodatabase/sync-common", - "version": "0.2.0", + "version": "0.3.0-pre.1", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0" + "@tursodatabase/database-common": "^0.3.0-pre.1" }, "devDependencies": { "typescript": "^5.9.2" @@ -3607,11 +3607,11 @@ }, "sync/packages/native": { "name": "@tursodatabase/sync", - "version": "0.2.0", + "version": "0.3.0-pre.1", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0", - "@tursodatabase/sync-common": "^0.2.0" + "@tursodatabase/database-common": "^0.3.0-pre.1", + "@tursodatabase/sync-common": "^0.3.0-pre.1" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3622,12 +3622,12 @@ }, "sync/packages/wasm": { "name": "@tursodatabase/sync-wasm", - "version": "0.2.0", + "version": "0.3.0-pre.1", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.2.0", - "@tursodatabase/database-wasm-common": "^0.2.0", - "@tursodatabase/sync-common": "^0.2.0" + "@tursodatabase/database-common": "^0.3.0-pre.1", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.1", + "@tursodatabase/sync-common": "^0.3.0-pre.1" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json index 7625d4ca2..0722f2c73 100644 --- a/bindings/javascript/package.json +++ b/bindings/javascript/package.json @@ -14,5 +14,5 @@ "sync/packages/native", "sync/packages/wasm" ], - "version": "0.2.0" + "version": "0.3.0-pre.1" } diff --git a/bindings/javascript/packages/common/package.json b/bindings/javascript/packages/common/package.json index 9412bdda0..3f54dcf85 100644 --- a/bindings/javascript/packages/common/package.json +++ b/bindings/javascript/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-common", - "version": "0.2.0", + "version": "0.3.0-pre.1", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/native/package.json b/bindings/javascript/packages/native/package.json index f04125972..ada031000 100644 --- a/bindings/javascript/packages/native/package.json +++ b/bindings/javascript/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database", - "version": "0.2.0", + "version": "0.3.0-pre.1", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -47,7 +47,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0" + "@tursodatabase/database-common": "^0.3.0-pre.1" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/packages/wasm-common/package.json b/bindings/javascript/packages/wasm-common/package.json index c4f9d3bf4..9ff7af8d9 100644 --- a/bindings/javascript/packages/wasm-common/package.json +++ b/bindings/javascript/packages/wasm-common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-wasm-common", - "version": "0.2.0", + "version": "0.3.0-pre.1", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/wasm/package.json b/bindings/javascript/packages/wasm/package.json index eb7e3a542..638366f51 100644 --- a/bindings/javascript/packages/wasm/package.json +++ b/bindings/javascript/packages/wasm/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-wasm", - "version": "0.2.0", + "version": "0.3.0-pre.1", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -51,7 +51,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0", - "@tursodatabase/database-wasm-common": "^0.2.0" + "@tursodatabase/database-common": "^0.3.0-pre.1", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.1" } } diff --git a/bindings/javascript/sync/packages/common/package.json b/bindings/javascript/sync/packages/common/package.json index 72962d9f8..bf2b47219 100644 --- a/bindings/javascript/sync/packages/common/package.json +++ b/bindings/javascript/sync/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-common", - "version": "0.2.0", + "version": "0.3.0-pre.1", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -23,6 +23,6 @@ "test": "echo 'no tests'" }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0" + "@tursodatabase/database-common": "^0.3.0-pre.1" } } diff --git a/bindings/javascript/sync/packages/native/package.json b/bindings/javascript/sync/packages/native/package.json index be78f1452..ac732db7d 100644 --- a/bindings/javascript/sync/packages/native/package.json +++ b/bindings/javascript/sync/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync", - "version": "0.2.0", + "version": "0.3.0-pre.1", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -44,8 +44,8 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0", - "@tursodatabase/sync-common": "^0.2.0" + "@tursodatabase/database-common": "^0.3.0-pre.1", + "@tursodatabase/sync-common": "^0.3.0-pre.1" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/sync/packages/wasm/package.json b/bindings/javascript/sync/packages/wasm/package.json index 88d8ffd04..0f90be806 100644 --- a/bindings/javascript/sync/packages/wasm/package.json +++ b/bindings/javascript/sync/packages/wasm/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-wasm", - "version": "0.2.0", + "version": "0.3.0-pre.1", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -54,8 +54,8 @@ "#index": "./index.js" }, "dependencies": { - "@tursodatabase/database-common": "^0.2.0", - "@tursodatabase/database-wasm-common": "^0.2.0", - "@tursodatabase/sync-common": "^0.2.0" + "@tursodatabase/database-common": "^0.3.0-pre.1", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.1", + "@tursodatabase/sync-common": "^0.3.0-pre.1" } } diff --git a/bindings/javascript/yarn.lock b/bindings/javascript/yarn.lock index 7dcf99767..dfff14ee3 100644 --- a/bindings/javascript/yarn.lock +++ b/bindings/javascript/yarn.lock @@ -1092,13 +1092,13 @@ __metadata: linkType: hard "@napi-rs/wasm-runtime@npm:^1.0.1": - version: 1.0.6 - resolution: "@napi-rs/wasm-runtime@npm:1.0.6" + version: 1.0.7 + resolution: "@napi-rs/wasm-runtime@npm:1.0.7" dependencies: "@emnapi/core": "npm:^1.5.0" "@emnapi/runtime": "npm:^1.5.0" "@tybys/wasm-util": "npm:^0.10.1" - checksum: 10c0/af48168c6e13c970498fda3ce7238234a906bc69dd474dc9abd560cdf8a7dea6410147afec8f0191a1d19767c8347d8ec0125a8a93225312f7ac37e06e8c15ad + checksum: 10c0/2d8635498136abb49d6dbf7395b78c63422292240963bf055f307b77aeafbde57ae2c0ceaaef215601531b36d6eb92a2cdd6f5ba90ed2aa8127c27aff9c4ae55 languageName: node linkType: hard @@ -1586,7 +1586,7 @@ __metadata: languageName: node linkType: hard -"@tursodatabase/database-common@npm:^0.2.0, @tursodatabase/database-common@workspace:packages/common": +"@tursodatabase/database-common@npm:^0.3.0-pre.1, @tursodatabase/database-common@workspace:packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/database-common@workspace:packages/common" dependencies: @@ -1595,7 +1595,7 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/database-wasm-common@npm:^0.2.0, @tursodatabase/database-wasm-common@workspace:packages/wasm-common": +"@tursodatabase/database-wasm-common@npm:^0.3.0-pre.1, @tursodatabase/database-wasm-common@workspace:packages/wasm-common": version: 0.0.0-use.local resolution: "@tursodatabase/database-wasm-common@workspace:packages/wasm-common" dependencies: @@ -1609,8 +1609,8 @@ __metadata: resolution: "@tursodatabase/database-wasm@workspace:packages/wasm" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.2.0" - "@tursodatabase/database-wasm-common": "npm:^0.2.0" + "@tursodatabase/database-common": "npm:^0.3.0-pre.1" + "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.1" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1624,7 +1624,7 @@ __metadata: resolution: "@tursodatabase/database@workspace:packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.2.0" + "@tursodatabase/database-common": "npm:^0.3.0-pre.1" "@types/node": "npm:^24.3.1" better-sqlite3: "npm:^12.2.0" drizzle-kit: "npm:^0.31.4" @@ -1634,11 +1634,11 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/sync-common@npm:^0.2.0, @tursodatabase/sync-common@workspace:sync/packages/common": +"@tursodatabase/sync-common@npm:^0.3.0-pre.1, @tursodatabase/sync-common@workspace:sync/packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/sync-common@workspace:sync/packages/common" dependencies: - "@tursodatabase/database-common": "npm:^0.2.0" + "@tursodatabase/database-common": "npm:^0.3.0-pre.1" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1648,9 +1648,9 @@ __metadata: resolution: "@tursodatabase/sync-wasm@workspace:sync/packages/wasm" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.2.0" - "@tursodatabase/database-wasm-common": "npm:^0.2.0" - "@tursodatabase/sync-common": "npm:^0.2.0" + "@tursodatabase/database-common": "npm:^0.3.0-pre.1" + "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.1" + "@tursodatabase/sync-common": "npm:^0.3.0-pre.1" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1664,8 +1664,8 @@ __metadata: resolution: "@tursodatabase/sync@workspace:sync/packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.2.0" - "@tursodatabase/sync-common": "npm:^0.2.0" + "@tursodatabase/database-common": "npm:^0.3.0-pre.1" + "@tursodatabase/sync-common": "npm:^0.3.0-pre.1" "@types/node": "npm:^24.3.1" typescript: "npm:^5.9.2" vitest: "npm:^3.2.4" From 5153e2aa326715381ce8a254c2475cb8e9ff7c5d Mon Sep 17 00:00:00 2001 From: ultraman <1394466835@qq.com> Date: Sat, 11 Oct 2025 16:30:25 +0800 Subject: [PATCH 146/428] Fix disallow reserved prefixes in ALTER TABLE RENAME TO --- core/translate/alter.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/core/translate/alter.rs b/core/translate/alter.rs index 77a1949ea..4f8de0d99 100644 --- a/core/translate/alter.rs +++ b/core/translate/alter.rs @@ -6,7 +6,7 @@ use turso_parser::{ use crate::{ function::{AlterTableFunc, Func}, - schema::{Column, Table}, + schema::{Column, Table, RESERVED_TABLE_PREFIXES}, translate::{ emitter::Resolver, expr::{walk_expr, WalkControl}, @@ -41,6 +41,17 @@ pub fn translate_alter_table( crate::bail_parse_error!("table {} may not be modified", table_name); } + if let ast::AlterTableBody::RenameTo(new_table_name) = &alter_table { + let normalized_new_name = normalize_ident(new_table_name.as_str()); + + if RESERVED_TABLE_PREFIXES + .iter() + .any(|prefix| normalized_new_name.starts_with(prefix)) + { + crate::bail_parse_error!("Object name reserved for internal use: {}", new_table_name); + } + } + let table_indexes = resolver.schema.get_indices(table_name).collect::>(); if !table_indexes.is_empty() && !resolver.schema.indexes_enabled() { From fafbdbfa9d3003c2d55b4f71dab375f807d8aae2 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sat, 11 Oct 2025 14:18:46 -0300 Subject: [PATCH 147/428] persist files in sim memory io for integrity check --- simulator/main.rs | 3 +++ simulator/runner/io.rs | 5 +++++ simulator/runner/memory/io.rs | 11 +++++++++++ simulator/runner/mod.rs | 2 ++ 4 files changed, 21 insertions(+) diff --git a/simulator/main.rs b/simulator/main.rs index 6a5d097b8..8ef99435e 100644 --- a/simulator/main.rs +++ b/simulator/main.rs @@ -611,6 +611,8 @@ fn run_simulation_default( tracing::info!("Simulation completed"); + env.io.persist_files().unwrap(); + if result.error.is_none() { let ic = integrity_check(&env.get_db_path()); if let Err(err) = ic { @@ -684,6 +686,7 @@ const BANNER: &str = r#" "#; fn integrity_check(db_path: &Path) -> anyhow::Result<()> { + assert!(db_path.exists()); let conn = rusqlite::Connection::open(db_path)?; let mut stmt = conn.prepare("SELECT * FROM pragma_integrity_check;")?; let mut rows = stmt.query(())?; diff --git a/simulator/runner/io.rs b/simulator/runner/io.rs index 8eccd470a..c5c38f928 100644 --- a/simulator/runner/io.rs +++ b/simulator/runner/io.rs @@ -79,6 +79,11 @@ impl SimIO for SimulatorIO { fn close_files(&self) { self.files.borrow_mut().clear() } + + fn persist_files(&self) -> anyhow::Result<()> { + // Files are persisted automatically + Ok(()) + } } impl Clock for SimulatorIO { diff --git a/simulator/runner/memory/io.rs b/simulator/runner/memory/io.rs index 557ada9a2..975f0d7ce 100644 --- a/simulator/runner/memory/io.rs +++ b/simulator/runner/memory/io.rs @@ -190,6 +190,17 @@ impl SimIO for MemorySimIO { file.closed.set(true); } } + + fn persist_files(&self) -> anyhow::Result<()> { + let files = self.files.borrow(); + for (file_path, file) in files.iter() { + if file_path.ends_with(".db") || file_path.ends_with("wal") || file_path.ends_with("lg") + { + std::fs::write(file_path, &*file.buffer.borrow())?; + } + } + Ok(()) + } } impl Clock for MemorySimIO { diff --git a/simulator/runner/mod.rs b/simulator/runner/mod.rs index 0f60c95fb..ed898100a 100644 --- a/simulator/runner/mod.rs +++ b/simulator/runner/mod.rs @@ -20,4 +20,6 @@ pub trait SimIO: turso_core::IO { fn syncing(&self) -> bool; fn close_files(&self); + + fn persist_files(&self) -> anyhow::Result<()>; } From ae005427554331936c9d0df7751b03e2534863bf Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 12 Oct 2025 05:25:22 +0530 Subject: [PATCH 148/428] get em aliases shall they be used --- core/translate/update.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/translate/update.rs b/core/translate/update.rs index 867b919ea..6a32f8ad8 100644 --- a/core/translate/update.rs +++ b/core/translate/update.rs @@ -314,7 +314,12 @@ pub fn prepare_update_plan( Table::BTree(btree_table) => Table::BTree(btree_table.clone()), _ => unreachable!(), }, - identifier: table_name.to_string(), + // get em aliases + identifier: body.tbl_name.alias.as_ref().map_or_else( + || table_name.to_string(), + |alias| alias.as_str().to_string(), + ), + internal_id, op: build_scan_op(&table, iter_dir), join_info: None, From 88d5ee0cf1b6b8d9bd51b25e61472bd6931cc533 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 12 Oct 2025 05:45:59 +0530 Subject: [PATCH 149/428] names shall not be shared between tables,index,vtabs,views --- core/schema.rs | 59 +++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 13 deletions(-) diff --git a/core/schema.rs b/core/schema.rs index 0bf9f464c..a0227a171 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -289,9 +289,11 @@ impl Schema { } /// Add a regular (non-materialized) view - pub fn add_view(&mut self, view: View) { + pub fn add_view(&mut self, view: View) -> Result<()> { + self.check_object_name_conflict(&view.name)?; let name = normalize_ident(&view.name); self.views.insert(name, Arc::new(view)); + Ok(()) } /// Get a regular view by name @@ -300,14 +302,18 @@ impl Schema { self.views.get(&name).cloned() } - pub fn add_btree_table(&mut self, table: Arc) { + pub fn add_btree_table(&mut self, table: Arc) -> Result<()> { + self.check_object_name_conflict(&table.name)?; let name = normalize_ident(&table.name); self.tables.insert(name, Table::BTree(table).into()); + Ok(()) } - pub fn add_virtual_table(&mut self, table: Arc) { + pub fn add_virtual_table(&mut self, table: Arc) -> Result<()> { + self.check_object_name_conflict(&table.name)?; let name = normalize_ident(&table.name); self.tables.insert(name, Table::Virtual(table).into()); + Ok(()) } pub fn get_table(&self, name: &str) -> Option> { @@ -340,7 +346,8 @@ impl Schema { } } - pub fn add_index(&mut self, index: Arc) { + pub fn add_index(&mut self, index: Arc) -> Result<()> { + self.check_object_name_conflict(&index.name)?; let table_name = normalize_ident(&index.table_name); // We must add the new index to the front of the deque, because SQLite stores index definitions as a linked list // where the newest parsed index entry is at the head of list. If we would add it to the back of a regular Vec for example, @@ -350,7 +357,8 @@ impl Schema { self.indexes .entry(table_name) .or_default() - .push_front(index.clone()) + .push_front(index.clone()); + Ok(()) } pub fn get_indices(&self, table_name: &str) -> impl Iterator> { @@ -507,7 +515,7 @@ impl Schema { unparsed_sql_from_index.root_page, table.as_ref(), )?; - self.add_index(Arc::new(index)); + self.add_index(Arc::new(index))?; } } @@ -549,7 +557,7 @@ impl Schema { table.as_ref(), automatic_indexes.pop().unwrap(), 1, - )?)); + )?))?; } else { // Add single column unique index if let Some(autoidx) = automatic_indexes.pop() { @@ -557,7 +565,7 @@ impl Schema { table.as_ref(), autoidx, vec![(pos_in_table, unique_set.columns.first().unwrap().1)], - )?)); + )?))?; } } } @@ -575,7 +583,7 @@ impl Schema { table.as_ref(), automatic_indexes.pop().unwrap(), unique_set.columns.len(), - )?)); + )?))?; } else { // Add composite unique index let mut column_indices_and_sort_orders = @@ -593,7 +601,7 @@ impl Schema { table.as_ref(), automatic_indexes.pop().unwrap(), column_indices_and_sort_orders, - )?)); + )?))?; } } @@ -701,7 +709,7 @@ impl Schema { syms, )? }; - self.add_virtual_table(vtab); + self.add_virtual_table(vtab)?; } else { let table = BTreeTable::from_sql(sql, root_page)?; @@ -735,7 +743,7 @@ impl Schema { } } - self.add_btree_table(Arc::new(table)); + self.add_btree_table(Arc::new(table))?; } } "index" => { @@ -834,7 +842,7 @@ impl Schema { // Create regular view let view = View::new(name.to_string(), sql.to_string(), select, final_columns); - self.add_view(view); + self.add_view(view)?; } _ => {} } @@ -1105,6 +1113,31 @@ impl Schema { .and_then(|t| t.btree()) .is_some_and(|t| !t.foreign_keys.is_empty()) } + + fn check_object_name_conflict(&self, name: &str) -> Result<()> { + let normalized_name = normalize_ident(name); + if self.tables.contains_key(&normalized_name) { + return Err(crate::LimboError::ParseError(format!( + "table \"{}\" already exists", + name + ))); + } + if self.views.contains_key(&normalized_name) { + return Err(crate::LimboError::ParseError(format!( + "view \"{}\" already exists", + name + ))); + } + for index_list in self.indexes.values() { + if index_list.iter().any(|i| i.name.eq_ignore_ascii_case(name)) { + return Err(crate::LimboError::ParseError(format!( + "index \"{}\" already exists", + name + ))); + } + } + Ok(()) + } } impl Clone for Schema { From 6c082660ca6037a07b6f3b7007cbf81b372102a0 Mon Sep 17 00:00:00 2001 From: rajajisai Date: Sat, 11 Oct 2025 21:39:41 -0400 Subject: [PATCH 150/428] convert table name to lower case --- core/translate/schema.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/translate/schema.rs b/core/translate/schema.rs index 7191de03e..53f234c76 100644 --- a/core/translate/schema.rs +++ b/core/translate/schema.rs @@ -646,7 +646,8 @@ pub fn translate_drop_table( let null_reg = program.alloc_register(); // r1 program.emit_null(null_reg, None); let table_name_and_root_page_register = program.alloc_register(); // r2, this register is special because it's first used to track table name and then moved root page - let table_reg = program.emit_string8_new_reg(tbl_name.name.as_str().to_string()); // r3 + let table_reg = + program.emit_string8_new_reg(normalize_ident(tbl_name.name.as_str()).to_string()); // r3 program.mark_last_insn_constant(); let table_type = program.emit_string8_new_reg("trigger".to_string()); // r4 program.mark_last_insn_constant(); From 9061024fad94c0cec3d69c86b73898df2e5fd6d4 Mon Sep 17 00:00:00 2001 From: rajajisai Date: Sat, 11 Oct 2025 21:39:46 -0400 Subject: [PATCH 151/428] add test --- testing/drop_table.test | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/testing/drop_table.test b/testing/drop_table.test index 9365b70d4..d7d02256c 100755 --- a/testing/drop_table.test +++ b/testing/drop_table.test @@ -12,6 +12,15 @@ do_execsql_test_on_specific_db {:memory:} drop-table-basic-1 { SELECT count(*) FROM sqlite_schema WHERE type='table' AND name='t1'; } {0} +# The table should be dropped irrespective of the case of the table name. +do_execsql_test_on_specific_db {:memory:} drop-table-case-insensitive { + CREATE TABLE test (x INTEGER PRIMARY KEY); + INSERT INTO test VALUES (1); + INSERT INTO test VALUES (2); + DROP TABLE TeSt; + SELECT count(*) FROM sqlite_schema WHERE type='table' AND name='test'; +} {0} + # Test DROP TABLE IF EXISTS on existing table do_execsql_test_on_specific_db {:memory:} drop-table-if-exists-1 { CREATE TABLE t2 (x INTEGER PRIMARY KEY); From c59b0ffa650d1fbbdcb6b3d4e006314062a2c3ab Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sat, 11 Oct 2025 20:42:23 +0530 Subject: [PATCH 152/428] fix(core/vdbe):pass largest value from table to op_new_rowid --- core/vdbe/execute.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 0ee15fd91..61d715b83 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -6412,7 +6412,7 @@ pub fn op_new_rowid( NewRowid { cursor, rowid_reg, - .. + prev_largest_reg, }, insn ); @@ -6455,6 +6455,11 @@ pub fn op_new_rowid( return_if_io!(cursor.rowid()) }; + if *prev_largest_reg > 0 { + state.registers[*prev_largest_reg] = + Register::Value(Value::Integer(current_max.unwrap_or(0))); + } + match current_max { Some(rowid) if rowid < MAX_ROWID => { // Can use sequential From bd9ce7c485397d7a66c60f04800221b274337dc3 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sat, 11 Oct 2025 20:51:31 +0530 Subject: [PATCH 153/428] add test --- testing/autoincr.test | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/testing/autoincr.test b/testing/autoincr.test index cb28e8c5d..9b69b36a2 100755 --- a/testing/autoincr.test +++ b/testing/autoincr.test @@ -174,4 +174,17 @@ do_execsql_test_on_specific_db {:memory:} autoinc-conflict-on-nothing { INSERT INTO t (k) VALUES ('a') ON CONFLICT DO NOTHING; INSERT INTO t (k) VALUES ('b'); SELECT * FROM t ORDER BY id; -} {1|a 2|a 4|b} \ No newline at end of file +} {1|a 2|a 4|b} + +# https://github.com/tursodatabase/turso/issues/3664 +do_execsql_test_on_specific_db {:memory:} autoinc-skips-manually-updated-pk { + CREATE TABLE t(a INTEGER PRIMARY KEY AUTOINCREMENT); + INSERT INTO t DEFAULT VALUES; + select * from sqlite_sequence; + UPDATE t SET a = a + 1; + SELECT * FROM sqlite_sequence; + INSERT INTO t DEFAULT VALUES; + SELECT * FROM sqlite_sequence; +} {t|1 +t|1 +t|3} From 00bde0d52a3aea9e75dcc93bc13123cf8a00a8c2 Mon Sep 17 00:00:00 2001 From: Henrik Ingo Date: Sun, 12 Oct 2025 13:46:26 +0300 Subject: [PATCH 154/428] =?UTF-8?q?Nyrki=C3=B6=20nightly:=20Reduce=20frequ?= =?UTF-8?q?ency=20to=201=20per=2024h?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also fix a missed path in sqlite3 tests --- .github/workflows/perf_nightly.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/perf_nightly.yml b/.github/workflows/perf_nightly.yml index 2f6d41cbb..690c37595 100644 --- a/.github/workflows/perf_nightly.yml +++ b/.github/workflows/perf_nightly.yml @@ -4,7 +4,7 @@ on: workflow_dispatch: branches: ["main", "notmain", "master"] schedule: - - cron: '24 6,11,14,20 * * *' + - cron: '24 4 * * *' push: # branches: ["main", "notmain", "master"] branches: ["notmain"] @@ -98,7 +98,7 @@ jobs: - name: Analyze SQLITE3 result with Nyrkiö uses: nyrkio/change-detection@HEAD with: - name: clickbench/sqlite3 + name: nightly/clickbench/sqlite3 tool: time output-file-path: clickbench-sqlite3.txt fail-on-alert: false From 36bf88119f2b51aed14f225d2b823caaef875c64 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 12 Oct 2025 05:55:06 +0530 Subject: [PATCH 155/428] add tests clippy expect err to make clippy happy cleanup --- core/incremental/compiler.rs | 28 +++++++++++++++++++++------- core/incremental/view.rs | 20 ++++++++++++++++---- core/schema.rs | 25 +++++++++++++------------ core/translate/logical.rs | 12 +++++++++--- testing/create_table.test | 35 +++++++++++++++++++++++++++++++++++ 5 files changed, 94 insertions(+), 26 deletions(-) diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index 15794068b..f067515cc 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -2247,7 +2247,9 @@ mod tests { unique_sets: vec![], foreign_keys: vec![], }; - schema.add_btree_table(Arc::new(users_table)); + schema + .add_btree_table(Arc::new(users_table)) + .expect("Test setup: failed to add users table"); // Add products table for join tests let products_table = BTreeTable { @@ -2301,7 +2303,9 @@ mod tests { unique_sets: vec![], foreign_keys: vec![], }; - schema.add_btree_table(Arc::new(products_table)); + schema + .add_btree_table(Arc::new(products_table)) + .expect("Test setup: failed to add products table"); // Add orders table for join tests let orders_table = BTreeTable { @@ -2367,7 +2371,9 @@ mod tests { unique_sets: vec![], foreign_keys: vec![], }; - schema.add_btree_table(Arc::new(orders_table)); + schema + .add_btree_table(Arc::new(orders_table)) + .expect("Test setup: failed to add orders table"); // Add customers table with id and name for testing column ambiguity let customers_table = BTreeTable { @@ -2406,7 +2412,9 @@ mod tests { unique_sets: vec![], foreign_keys: vec![], }; - schema.add_btree_table(Arc::new(customers_table)); + schema + .add_btree_table(Arc::new(customers_table)) + .expect("Test setup: failed to add customers table"); // Add purchases table (junction table for three-way join) let purchases_table = BTreeTable { @@ -2469,7 +2477,9 @@ mod tests { unique_sets: vec![], foreign_keys: vec![], }; - schema.add_btree_table(Arc::new(purchases_table)); + schema + .add_btree_table(Arc::new(purchases_table)) + .expect("Test setup: failed to add purchases table"); // Add vendors table with id, name, and price (ambiguous columns with customers) let vendors_table = BTreeTable { @@ -2520,7 +2530,9 @@ mod tests { unique_sets: vec![], foreign_keys: vec![], }; - schema.add_btree_table(Arc::new(vendors_table)); + schema + .add_btree_table(Arc::new(vendors_table)) + .expect("Test setup: failed to add vendors table"); let sales_table = BTreeTable { name: "sales".to_string(), @@ -2558,7 +2570,9 @@ mod tests { unique_sets: vec![], foreign_keys: vec![], }; - schema.add_btree_table(Arc::new(sales_table)); + schema + .add_btree_table(Arc::new(sales_table)) + .expect("Test setup: failed to add sales table"); schema }}; diff --git a/core/incremental/view.rs b/core/incremental/view.rs index b95c8a0ca..957605d17 100644 --- a/core/incremental/view.rs +++ b/core/incremental/view.rs @@ -1565,10 +1565,22 @@ mod tests { unique_sets: vec![], }; - schema.add_btree_table(Arc::new(customers_table)); - schema.add_btree_table(Arc::new(orders_table)); - schema.add_btree_table(Arc::new(products_table)); - schema.add_btree_table(Arc::new(logs_table)); + schema + .add_btree_table(Arc::new(customers_table)) + .expect("Test setup: failed to add customers table"); + + schema + .add_btree_table(Arc::new(orders_table)) + .expect("Test setup: failed to add orders table"); + + schema + .add_btree_table(Arc::new(products_table)) + .expect("Test setup: failed to add products table"); + + schema + .add_btree_table(Arc::new(logs_table)) + .expect("Test setup: failed to add logs table"); + schema } diff --git a/core/schema.rs b/core/schema.rs index a0227a171..40cbb2d84 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1116,26 +1116,27 @@ impl Schema { fn check_object_name_conflict(&self, name: &str) -> Result<()> { let normalized_name = normalize_ident(name); + if self.tables.contains_key(&normalized_name) { - return Err(crate::LimboError::ParseError(format!( - "table \"{}\" already exists", - name - ))); + return Err(crate::LimboError::ParseError( + ["table \"", name, "\" already exists"].concat().to_string(), + )); } + if self.views.contains_key(&normalized_name) { - return Err(crate::LimboError::ParseError(format!( - "view \"{}\" already exists", - name - ))); + return Err(crate::LimboError::ParseError( + ["view \"", name, "\" already exists"].concat().to_string(), + )); } + for index_list in self.indexes.values() { if index_list.iter().any(|i| i.name.eq_ignore_ascii_case(name)) { - return Err(crate::LimboError::ParseError(format!( - "index \"{}\" already exists", - name - ))); + return Err(crate::LimboError::ParseError( + ["index \"", name, "\" already exists"].concat().to_string(), + )); } } + Ok(()) } } diff --git a/core/translate/logical.rs b/core/translate/logical.rs index 6564e2ba3..bdd29972e 100644 --- a/core/translate/logical.rs +++ b/core/translate/logical.rs @@ -2445,7 +2445,9 @@ mod tests { has_autoincrement: false, unique_sets: vec![], }; - schema.add_btree_table(Arc::new(users_table)); + schema + .add_btree_table(Arc::new(users_table)) + .expect("Test setup: failed to add users table"); // Create orders table let orders_table = BTreeTable { @@ -2508,7 +2510,9 @@ mod tests { unique_sets: vec![], foreign_keys: vec![], }; - schema.add_btree_table(Arc::new(orders_table)); + schema + .add_btree_table(Arc::new(orders_table)) + .expect("Test setup: failed to add orders table"); // Create products table let products_table = BTreeTable { @@ -2571,7 +2575,9 @@ mod tests { unique_sets: vec![], foreign_keys: vec![], }; - schema.add_btree_table(Arc::new(products_table)); + schema + .add_btree_table(Arc::new(products_table)) + .expect("Test setup: failed to add products table"); schema } diff --git a/testing/create_table.test b/testing/create_table.test index 289e0e98f..e17196c9f 100755 --- a/testing/create_table.test +++ b/testing/create_table.test @@ -66,3 +66,38 @@ do_execsql_test_in_memory_any_error create_table_duplicate_column_names_case_ins do_execsql_test_in_memory_any_error create_table_duplicate_column_names_quoted { CREATE TABLE t("a", a); } + +# https://github.com/tursodatabase/turso/issues/3675 +do_execsql_test_in_memory_any_error create_table_view_collision-1 { + CREATE VIEW v_same AS SELECT 1; + CREATE TABLE v_same(x INT); +} + +do_execsql_test_in_memory_any_error create_view_table_collision-1 { + CREATE TABLE t_same(x INT); + CREATE VIEW t_same AS SELECT 1; +} + +do_execsql_test_in_memory_any_error create_index_view_collision-1 { + CREATE VIEW i_same AS SELECT 1; + CREATE TABLE t1(x); + CREATE INDEX i_same ON t1(x); +} + +do_execsql_test_in_memory_any_error create_index_table_collision-1 { + CREATE TABLE i_same(x INT); + CREATE TABLE t2(y); + CREATE INDEX i_same ON t2(y); +} + +do_execsql_test_in_memory_any_error create_table_index_collision-1 { + CREATE TABLE t3(z); + CREATE INDEX ix_same ON t3(z); + CREATE TABLE ix_same(x INT); +} + +do_execsql_test_in_memory_any_error create_view_index_collision-1 { + CREATE TABLE t4(w); + CREATE INDEX ix_same ON t4(w); + CREATE VIEW ix_same AS SELECT 1; +} From 7e8dabaee5f0bbfbe306e1cc0c1404eecba3bfd6 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 12 Oct 2025 18:02:03 +0530 Subject: [PATCH 156/428] make comparison case sensitive --- core/util.rs | 2 +- testing/select.test | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/core/util.rs b/core/util.rs index b35fbdaa6..ea4fca470 100644 --- a/core/util.rs +++ b/core/util.rs @@ -312,7 +312,7 @@ pub fn module_args_from_sql(sql: &str) -> Result> { pub fn check_literal_equivalency(lhs: &Literal, rhs: &Literal) -> bool { match (lhs, rhs) { (Literal::Numeric(n1), Literal::Numeric(n2)) => cmp_numeric_strings(n1, n2), - (Literal::String(s1), Literal::String(s2)) => check_ident_equivalency(s1, s2), + (Literal::String(s1), Literal::String(s2)) => s1 == s2, (Literal::Blob(b1), Literal::Blob(b2)) => b1 == b2, (Literal::Keyword(k1), Literal::Keyword(k2)) => check_ident_equivalency(k1, k2), (Literal::Null, Literal::Null) => true, diff --git a/testing/select.test b/testing/select.test index 5dff582f5..1e6ebe9ce 100755 --- a/testing/select.test +++ b/testing/select.test @@ -999,3 +999,8 @@ do_execsql_test_in_memory_any_error limit-column-reference-error { CREATE TABLE t(a); SELECT * FROM t LIMIT (t.a); } + +do_execsql_test select-binary-collation { + SELECT 'a' = 'A'; + SELECT 'a' = 'a'; +} {0 1} \ No newline at end of file From 3491e1f42e12453ed94f0d6d215877fbe1987924 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 12 Oct 2025 22:17:35 +0530 Subject: [PATCH 157/428] add if alais and allow iff to have more arguments --- core/function.rs | 2 +- core/translate/expr.rs | 99 ++++++++++++++++++++++++------------------ 2 files changed, 58 insertions(+), 43 deletions(-) diff --git a/core/function.rs b/core/function.rs index 265816c8d..0a3236856 100644 --- a/core/function.rs +++ b/core/function.rs @@ -752,7 +752,7 @@ impl Func { "total_changes" => Ok(Self::Scalar(ScalarFunc::TotalChanges)), "glob" => Ok(Self::Scalar(ScalarFunc::Glob)), "ifnull" => Ok(Self::Scalar(ScalarFunc::IfNull)), - "iif" => Ok(Self::Scalar(ScalarFunc::Iif)), + "if" | "iif" => Ok(Self::Scalar(ScalarFunc::Iif)), "instr" => Ok(Self::Scalar(ScalarFunc::Instr)), "like" => Ok(Self::Scalar(ScalarFunc::Like)), "abs" => Ok(Self::Scalar(ScalarFunc::Abs)), diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 7bec98832..bcdd3f2c7 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -1143,51 +1143,66 @@ pub fn translate_expr( Ok(target_register) } ScalarFunc::Iif => { - if args.len() != 3 { - crate::bail_parse_error!( - "{} requires exactly 3 arguments", - srf.to_string() - ); + let args = expect_arguments_min!(args, 2, srf); + + let iif_end_label = program.allocate_label(); + let condition_reg = program.alloc_register(); + + for pair in args.chunks_exact(2) { + let condition_expr = &pair[0]; + let value_expr = &pair[1]; + let next_check_label = program.allocate_label(); + + translate_expr_no_constant_opt( + program, + referenced_tables, + condition_expr, + condition_reg, + resolver, + NoConstantOptReason::RegisterReuse, + )?; + + program.emit_insn(Insn::IfNot { + reg: condition_reg, + target_pc: next_check_label, + jump_if_null: true, + }); + + translate_expr_no_constant_opt( + program, + referenced_tables, + value_expr, + target_register, + resolver, + NoConstantOptReason::RegisterReuse, + )?; + program.emit_insn(Insn::Goto { + target_pc: iif_end_label, + }); + + program.preassign_label_to_next_insn(next_check_label); } - let temp_reg = program.alloc_register(); - translate_expr_no_constant_opt( - program, - referenced_tables, - &args[0], - temp_reg, - resolver, - NoConstantOptReason::RegisterReuse, - )?; - let jump_target_when_false = program.allocate_label(); - program.emit_insn(Insn::IfNot { - reg: temp_reg, - target_pc: jump_target_when_false, - jump_if_null: true, - }); - translate_expr_no_constant_opt( - program, - referenced_tables, - &args[1], - target_register, - resolver, - NoConstantOptReason::RegisterReuse, - )?; - let jump_target_result = program.allocate_label(); - program.emit_insn(Insn::Goto { - target_pc: jump_target_result, - }); - program.preassign_label_to_next_insn(jump_target_when_false); - translate_expr_no_constant_opt( - program, - referenced_tables, - &args[2], - target_register, - resolver, - NoConstantOptReason::RegisterReuse, - )?; - program.preassign_label_to_next_insn(jump_target_result); + + if args.len() % 2 != 0 { + translate_expr_no_constant_opt( + program, + referenced_tables, + args.last().unwrap(), + target_register, + resolver, + NoConstantOptReason::RegisterReuse, + )?; + } else { + program.emit_insn(Insn::Null { + dest: target_register, + dest_end: None, + }); + } + + program.preassign_label_to_next_insn(iif_end_label); Ok(target_register) } + ScalarFunc::Glob | ScalarFunc::Like => { if args.len() < 2 { crate::bail_parse_error!( From e1f23aeb2cf77b01a59945d0ff2b52a497efa2dc Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 12 Oct 2025 22:23:04 +0530 Subject: [PATCH 158/428] fmt and add tests --- core/translate/expr.rs | 2 +- testing/scalar-functions.test | 51 +++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index bcdd3f2c7..cb02e2e35 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -1187,7 +1187,7 @@ pub fn translate_expr( translate_expr_no_constant_opt( program, referenced_tables, - args.last().unwrap(), + args.last().unwrap(), target_register, resolver, NoConstantOptReason::RegisterReuse, diff --git a/testing/scalar-functions.test b/testing/scalar-functions.test index 963ccc5be..30ba59b83 100755 --- a/testing/scalar-functions.test +++ b/testing/scalar-functions.test @@ -1023,7 +1023,58 @@ do_execsql_test sum-8 { } {1.2} +# https://github.com/tursodatabase/turso/issues/3689 +do_execsql_test iif-3-args-true { + select iif(1 < 2, 'yes', 'no'); +} {yes} + +do_execsql_test iif-3-args-false { + select iif(1 > 2, 'yes', 'no'); +} {no} + +do_execsql_test iif-2-args-true { + select iif(1 < 2, 'yes'); +} {yes} + +do_execsql_test iif-2-args-false-is-null { + select iif(1 > 2, 'yes'); +} {} + +do_execsql_test iif-multi-args-finds-first-true { + select iif(0, 'a', 1, 'b', 2, 'c', 'default'); +} {b} + +do_execsql_test iif-multi-args-falls-to-else { + select iif(0, 'a', 0, 'b', 0, 'c', 'default'); +} {default} + +do_execsql_test if-alias-3-args-true { + select if(1 < 2, 'yes', 'no'); +} {yes} + +do_execsql_test if-alias-3-args-false { + select if(1 > 2, 'yes', 'no'); +} {no} + +do_execsql_test if-alias-2-args-true { + select if(1 < 2, 'ok'); +} {ok} + +do_execsql_test if-alias-multi-args-finds-first-true { + select if(0, 'a', 1, 'b', 'c'); +} {b} + +do_execsql_test if-alias-multi-args-falls-to-else { + select if(0, 'a', 0, 'b', 'c'); +} {c} + +do_execsql_test if-alias-multi-args-no-else-is-null { + select if(0, 'a', 0, 'b'); +} {} + + # TODO: sqlite seems not enable soundex() by default unless build it with SQLITE_SOUNDEX enabled. # do_execsql_test soundex-text { # select soundex('Pfister'), soundex('husobee'), soundex('Tymczak'), soundex('Ashcraft'), soundex('Robert'), soundex('Rupert'), soundex('Rubin'), soundex('Kant'), soundex('Knuth'), soundex('x'), soundex(''); # } {P236|H210|T522|A261|R163|R163|R150|K530|K530|X000|0000} + From 90615239a0c71ecaf3448be25bc3e8dab3fe530c Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 12 Oct 2025 23:02:21 +0530 Subject: [PATCH 159/428] use update flag conditionally before incrementing changes --- core/vdbe/execute.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 61d715b83..625dca6e4 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -5913,9 +5913,11 @@ pub fn op_insert( if let Some(rowid) = maybe_rowid { program.connection.update_last_rowid(rowid); - program - .n_change - .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + if !flag.has(InsertFlags::UPDATE_ROWID_CHANGE) { + program + .n_change + .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + } } let schema = program.connection.schema.read(); let dependent_views = schema.get_dependent_materialized_views(table_name); From 295612feeaddee101c10fa925b5fc339c312cf0f Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 12 Oct 2025 23:11:28 +0530 Subject: [PATCH 160/428] add test --- testing/changes.test | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/testing/changes.test b/testing/changes.test index ee03a2168..9f1f2123a 100644 --- a/testing/changes.test +++ b/testing/changes.test @@ -32,3 +32,13 @@ do_execsql_test_on_specific_db {:memory:} changes-doesnt-track-indexes { UPDATE users SET name = 'young' where age < 40; select changes(); } {6} + +# https://github.com/tursodatabase/turso/issues/3688 +do_execsql_test_on_specific_db {:memory:} changes-1.69 { + create table t(id integer primary key, value text); + insert into t values (1, 'a'); + select changes(); + update t set id = id+10 where id = 1; + select changes(); +} {1 +1} \ No newline at end of file From cd763ce3732e034ad423f90bba2619e074613aa2 Mon Sep 17 00:00:00 2001 From: rajajisai Date: Sun, 12 Oct 2025 22:46:25 -0400 Subject: [PATCH 161/428] Fix evalauting expression for limit and offset. --- core/translate/compound_select.rs | 57 +++++++++++------- core/translate/emitter.rs | 99 ++++++++++++++++++------------- 2 files changed, 93 insertions(+), 63 deletions(-) diff --git a/core/translate/compound_select.rs b/core/translate/compound_select.rs index 08a55d099..8066dd459 100644 --- a/core/translate/compound_select.rs +++ b/core/translate/compound_select.rs @@ -3,14 +3,13 @@ use crate::translate::collate::get_collseq_from_expr; use crate::translate::emitter::{emit_query, LimitCtx, Resolver, TranslateCtx}; use crate::translate::expr::translate_expr; use crate::translate::plan::{Plan, QueryDestination, SelectPlan}; -use crate::translate::result_row::try_fold_expr_to_i64; use crate::vdbe::builder::{CursorType, ProgramBuilder}; use crate::vdbe::insn::Insn; use crate::vdbe::BranchOffset; use crate::{emit_explain, QueryMode, SymbolTable}; use std::sync::Arc; use tracing::instrument; -use turso_parser::ast::{CompoundOperator, SortOrder}; +use turso_parser::ast::{CompoundOperator, Expr, Literal, SortOrder}; use tracing::Level; @@ -43,34 +42,46 @@ pub fn emit_program_for_compound_select( // the entire compound select, not just a single subselect. let limit_ctx = limit.as_ref().map(|limit| { let reg = program.alloc_register(); - if let Some(val) = try_fold_expr_to_i64(limit) { - program.emit_insn(Insn::Integer { - value: val, - dest: reg, - }); - } else { - program.add_comment(program.offset(), "OFFSET expr"); - _ = translate_expr(program, None, limit, reg, &right_most_ctx.resolver); - program.emit_insn(Insn::MustBeInt { reg }); + match limit.as_ref() { + Expr::Literal(Literal::Numeric(n)) => { + if let Ok(value) = n.parse::() { + program.add_comment(program.offset(), "LIMIT counter"); + program.emit_insn(Insn::Integer { value, dest: reg }); + } else { + let value = n.parse::().unwrap(); + program.emit_insn(Insn::Real { value, dest: reg }); + program.add_comment(program.offset(), "LIMIT counter"); + program.emit_insn(Insn::MustBeInt { reg }); + } + } + _ => { + _ = translate_expr(program, None, limit, reg, &right_most_ctx.resolver); + program.add_comment(program.offset(), "LIMIT counter"); + program.emit_insn(Insn::MustBeInt { reg }); + } } LimitCtx::new_shared(reg) }); let offset_reg = offset.as_ref().map(|offset_expr| { let reg = program.alloc_register(); - - if let Some(val) = try_fold_expr_to_i64(offset_expr) { - // Compile-time constant offset - program.emit_insn(Insn::Integer { - value: val, - dest: reg, - }); - } else { - program.add_comment(program.offset(), "OFFSET expr"); - _ = translate_expr(program, None, offset_expr, reg, &right_most_ctx.resolver); - program.emit_insn(Insn::MustBeInt { reg }); + match offset_expr.as_ref() { + Expr::Literal(Literal::Numeric(n)) => { + // Compile-time constant offset + if let Ok(value) = n.parse::() { + program.emit_insn(Insn::Integer { value, dest: reg }); + } else { + let value = n.parse::().unwrap(); + program.emit_insn(Insn::Real { value, dest: reg }); + } + } + _ => { + _ = translate_expr(program, None, offset_expr, reg, &right_most_ctx.resolver); + } } - + program.add_comment(program.offset(), "OFFSET counter"); + program.emit_insn(Insn::MustBeInt { reg }); let combined_reg = program.alloc_register(); + program.add_comment(program.offset(), "OFFSET + LIMIT"); program.emit_insn(Insn::OffsetLimit { offset_reg: reg, combined_reg, diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 2dabd4b82..6d635d763 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -6,7 +6,7 @@ use std::num::NonZeroUsize; use std::sync::Arc; use tracing::{instrument, Level}; -use turso_parser::ast::{self, Expr}; +use turso_parser::ast::{self, Expr, Literal}; use super::aggregation::emit_ungrouped_aggregation; use super::expr::translate_expr; @@ -37,7 +37,6 @@ use crate::translate::fkeys::{ }; use crate::translate::plan::{DeletePlan, JoinedTable, Plan, QueryDestination, Search}; use crate::translate::planner::ROWID_STRS; -use crate::translate::result_row::try_fold_expr_to_i64; use crate::translate::values::emit_values; use crate::translate::window::{emit_window_results, init_window, WindowMetadata}; use crate::util::{exprs_are_equivalent, normalize_ident}; @@ -1964,52 +1963,72 @@ fn init_limit( if limit_ctx.initialize_counter { if let Some(expr) = limit { - if let Some(value) = try_fold_expr_to_i64(expr) { - program.emit_insn(Insn::Integer { - value, - dest: limit_ctx.reg_limit, - }); - } else { - let r = limit_ctx.reg_limit; - program.add_comment(program.offset(), "OFFSET expr"); - _ = translate_expr(program, None, expr, r, &t_ctx.resolver); - program.emit_insn(Insn::MustBeInt { reg: r }); + match expr.as_ref() { + Expr::Literal(Literal::Numeric(n)) => { + if let Ok(value) = n.parse::() { + program.add_comment(program.offset(), "LIMIT counter"); + program.emit_insn(Insn::Integer { + value, + dest: limit_ctx.reg_limit, + }); + } else { + program.emit_insn(Insn::Real { + value: n.parse::().unwrap(), + dest: limit_ctx.reg_limit, + }); + program.add_comment(program.offset(), "LIMIT counter"); + program.emit_insn(Insn::MustBeInt { + reg: limit_ctx.reg_limit, + }); + } + } + _ => { + let r = limit_ctx.reg_limit; + + _ = translate_expr(program, None, expr, r, &t_ctx.resolver); + program.emit_insn(Insn::MustBeInt { reg: r }); + } } } } if t_ctx.reg_offset.is_none() { if let Some(expr) = offset { - if let Some(value) = try_fold_expr_to_i64(expr) { - if value != 0 { - let reg = program.alloc_register(); - t_ctx.reg_offset = Some(reg); - program.emit_insn(Insn::Integer { value, dest: reg }); - let combined_reg = program.alloc_register(); - t_ctx.reg_limit_offset_sum = Some(combined_reg); - program.emit_insn(Insn::OffsetLimit { - limit_reg: limit_ctx.reg_limit, - offset_reg: reg, - combined_reg, - }); + let offset_reg = program.alloc_register(); + t_ctx.reg_offset = Some(offset_reg); + match expr.as_ref() { + Expr::Literal(Literal::Numeric(n)) => { + if let Ok(value) = n.parse::() { + program.emit_insn(Insn::Integer { + value, + dest: offset_reg, + }); + } else { + let value = n.parse::().unwrap(); + program.emit_insn(Insn::Real { + value, + dest: limit_ctx.reg_limit, + }); + program.emit_insn(Insn::MustBeInt { + reg: limit_ctx.reg_limit, + }); + } + } + _ => { + _ = translate_expr(program, None, expr, offset_reg, &t_ctx.resolver); } - } else { - let reg = program.alloc_register(); - t_ctx.reg_offset = Some(reg); - let r = reg; - - program.add_comment(program.offset(), "OFFSET expr"); - _ = translate_expr(program, None, expr, r, &t_ctx.resolver); - program.emit_insn(Insn::MustBeInt { reg: r }); - - let combined_reg = program.alloc_register(); - t_ctx.reg_limit_offset_sum = Some(combined_reg); - program.emit_insn(Insn::OffsetLimit { - limit_reg: limit_ctx.reg_limit, - offset_reg: reg, - combined_reg, - }); } + program.add_comment(program.offset(), "OFFSET counter"); + program.emit_insn(Insn::MustBeInt { reg: offset_reg }); + + let combined_reg = program.alloc_register(); + t_ctx.reg_limit_offset_sum = Some(combined_reg); + program.add_comment(program.offset(), "OFFSET + LIMIT"); + program.emit_insn(Insn::OffsetLimit { + limit_reg: limit_ctx.reg_limit, + offset_reg, + combined_reg, + }); } } From f703cc7fa72e08bccca40b51118463539a4d00f7 Mon Sep 17 00:00:00 2001 From: rajajisai Date: Sun, 12 Oct 2025 22:46:41 -0400 Subject: [PATCH 162/428] Remove function --- core/translate/result_row.rs | 35 ----------------------------------- 1 file changed, 35 deletions(-) diff --git a/core/translate/result_row.rs b/core/translate/result_row.rs index c087a0abf..ec43c904f 100644 --- a/core/translate/result_row.rs +++ b/core/translate/result_row.rs @@ -1,5 +1,3 @@ -use turso_parser::ast::{Expr, Literal, Operator, UnaryOperator}; - use crate::{ vdbe::{ builder::ProgramBuilder, @@ -172,36 +170,3 @@ pub fn emit_offset(program: &mut ProgramBuilder, jump_to: BranchOffset, reg_offs decrement_by: 1, }); } - -#[allow(clippy::borrowed_box)] -pub fn try_fold_expr_to_i64(expr: &Box) -> Option { - match expr.as_ref() { - Expr::Literal(Literal::Numeric(n)) => n.parse::().ok(), - Expr::Literal(Literal::Null) => Some(0), - Expr::Id(name) if !name.quoted() => { - let lowered = name.as_str(); - if lowered == "true" { - Some(1) - } else if lowered == "false" { - Some(0) - } else { - None - } - } - Expr::Unary(UnaryOperator::Negative, inner) => try_fold_expr_to_i64(inner).map(|v| -v), - Expr::Unary(UnaryOperator::Positive, inner) => try_fold_expr_to_i64(inner), - Expr::Binary(left, op, right) => { - let l = try_fold_expr_to_i64(left)?; - let r = try_fold_expr_to_i64(right)?; - match op { - Operator::Add => Some(l.saturating_add(r)), - Operator::Subtract => Some(l.saturating_sub(r)), - Operator::Multiply => Some(l.saturating_mul(r)), - Operator::Divide if r != 0 => Some(l.saturating_div(r)), - _ => None, - } - } - - _ => None, - } -} From 9bbf3bb780af4b4639296ac9f84916807a418483 Mon Sep 17 00:00:00 2001 From: rajajisai Date: Sun, 12 Oct 2025 22:46:53 -0400 Subject: [PATCH 163/428] Add tests --- testing/offset.test | 49 ++++++++++++++++++++++++++++++++++++++ testing/select.test | 57 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+) mode change 100644 => 100755 testing/offset.test diff --git a/testing/offset.test b/testing/offset.test old mode 100644 new mode 100755 index 720fbebac..25ba69ce9 --- a/testing/offset.test +++ b/testing/offset.test @@ -64,3 +64,52 @@ do_execsql_test_on_specific_db {:memory:} select-ungrouped-aggregate-with-offset INSERT INTO t VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); SELECT COUNT(a) FROM t LIMIT 1 OFFSET 1; } {} + + +do_execsql_test_on_specific_db {:memory:} offset-expr-can-be-cast-losslessly-1 { + SELECT 1 LIMIT 3 OFFSET 1.1 + 2.9; +} {} + +do_execsql_test_on_specific_db {:memory:} offset-expr-can-be-cast-losslessly-2 { + CREATE TABLE T(a); + INSERT INTO T VALUES (1),(2),(3),(4); + SELECT * FROM T LIMIT 1+'2' OFFSET 1.6/2 + 3.6/3 + 4*0.25; +} {4} + +# Strings are cast to float. Final result is integer losslessly +do_execsql_test_on_specific_db {:memory:} offset-expr-can-be-cast-losslessly-3 { + CREATE TABLE T(a); + INSERT INTO T VALUES (1),(2),(3),(4); + SELECT * FROM T LIMIT 3 OFFSET '0.8' + '1.2' + '4'*'0.25'; +} {4} + +# Strings are cast to 0. Expression still valid. +do_execsql_test_on_specific_db {:memory:} offset-expr-int-and-string { + SELECT 1 LIMIT 3 OFFSET 3/3 + 'test' + 4*'test are best'; +} {} + +do_execsql_test_in_memory_error_content offset-expr-cannot-be-cast-losslessly-1 { + SELECT 1 LIMIT 3 OFFSET 1.1; +} {"the value in register cannot be cast to integer"} + +do_execsql_test_in_memory_error_content offset-expr-cannot-be-cast-losslessly-2 { + SELECT 1 LIMIT 3 OFFSET 1.1 + 2.2 + 1.9/8; +} {"the value in register cannot be cast to integer"} + +# Return error as float in expression cannot be cast losslessly +do_execsql_test_in_memory_error_content offset-expr-cannot-be-cast-losslessly-3 { + SELECT 1 LIMIT 3 OFFSET 1.1 + 'a'; +} {"the value in register cannot be cast to integer"} + +do_execsql_test_in_memory_error_content offset-expr-invalid-data-type-1 { + SELECT 1 LIMIT 3 OFFSET 'a'; +} {"the value in register cannot be cast to integer"} + +do_execsql_test_in_memory_error_content offset-expr-invalid-data-type-2 { + SELECT 1 LIMIT 3 OFFSET NULL; +} {"the value in register cannot be cast to integer"} + +# Expression below evaluates to NULL (string → 0) +do_execsql_test_in_memory_error_content offset-expr-invalid-data-type-3 { + SELECT 1 LIMIT 3 OFFSET 1/'iwillbezero ;-; '; +} {"the value in register cannot be cast to integer"} diff --git a/testing/select.test b/testing/select.test index 5dff582f5..9c0152ffa 100755 --- a/testing/select.test +++ b/testing/select.test @@ -951,6 +951,63 @@ foreach {testname limit ans} { "SELECT id FROM users ORDER BY id LIMIT $limit" $ans } +do_execsql_test_on_specific_db {:memory:} limit-expr-can-be-cast-losslessly-1 { + SELECT 1 LIMIT 1.1 + 2.9; +} {1} + +do_execsql_test_on_specific_db {:memory:} limit-expr-can-be-cast-losslessly-2 { + CREATE TABLE T(a); + INSERT INTO T VALUES (1),(1),(1),(1); + SELECT * FROM T LIMIT 1.6/2 + 3.6/3 + 4*0.25; +} {1 +1 +1} + +# Numeric strings are cast to float. The final evaluation of the expression returns an int losslessly +do_execsql_test_on_specific_db {:memory:} limit-expr-can-be-cast-losslessly-3 { + CREATE TABLE T(a); + INSERT INTO T VALUES (1),(1),(1),(1); + SELECT * FROM T LIMIT '0.8' + '1.2' + 4*0.25; +} {1 +1 +1} + +# Invalid strings are cast to 0. So expression is valid +do_execsql_test_on_specific_db {:memory:} limit-expr-int-and-string { + SELECT 1 LIMIT 3/3 + 'test' + 4*'test are best'; +} {1} + +do_execsql_test_in_memory_error_content limit-expr-cannot-be-cast-losslessly-1 { + SELECT 1 LIMIT 1.1; +} {"the value in register cannot be cast to integer"} + +do_execsql_test_in_memory_error_content limit-expr-cannot-be-cast-losslessly-2 { + SELECT 1 LIMIT 1.1 + 2.2 + 1.9/8; +} {"the value in register cannot be cast to integer"} + +# Return error as float in the expression cannot be cast losslessly +do_execsql_test_in_memory_error_content limit-expr-cannot-be-cast-losslessly-3 { + SELECT 1 LIMIT 1.1 +'a'; +} {"the value in register cannot be cast to integer"} + +do_execsql_test_in_memory_error_content limit-expr-invalid-data-type-1 { + SELECT 1 LIMIT 'a'; +} {"the value in register cannot be cast to integer"} + +do_execsql_test_in_memory_error_content limit-expr-invalid-data-type-2 { + SELECT 1 LIMIT NULL; +} {"the value in register cannot be cast to integer"} + +# The expression below evaluates to NULL as string is cast to 0 +do_execsql_test_in_memory_error_content limit-expr-invalid-data-type-3 { + SELECT 1 LIMIT 1/'iwillbezero ;-; ' ; +} {"the value in register cannot be cast to integer"} + +# Expression is evaluated as NULL +do_execsql_test_in_memory_error_content limit-expr-invalid-data-type-4 { + SELECT 1 LIMIT 4+NULL; +} {"the value in register cannot be cast to integer"} + do_execsql_test_on_specific_db {:memory:} rowid-references { CREATE TABLE test_table (id INTEGER); INSERT INTO test_table VALUES (5),(5); From 230755eb2e9ea3859d3f4cb911898df6985790de Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Wed, 8 Oct 2025 14:09:28 -0300 Subject: [PATCH 164/428] shadow for AlterTable --- simulator/model/mod.rs | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 24c30eb2d..90c04485f 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; use sql_generation::model::{ query::{ Create, CreateIndex, Delete, Drop, Insert, Select, - alter_table::AlterTable, + alter_table::{AlterTable, AlterTableType}, select::{CompoundOperator, FromClause, ResultColumn, SelectInner}, transaction::{Begin, Commit, Rollback}, update::Update, @@ -21,7 +21,6 @@ use crate::{generation::Shadow, runner::env::ShadowTablesMut}; // This type represents the potential queries on the database. #[derive(Debug, Clone, Serialize, Deserialize, strum::EnumDiscriminants)] -#[strum_discriminants(derive(strum::VariantArray, strum::EnumIter))] pub enum Query { Create(Create), Select(Select), @@ -541,6 +540,43 @@ impl Shadow for AlterTable { type Result = anyhow::Result>>; fn shadow(&self, tables: &mut ShadowTablesMut<'_>) -> Self::Result { + let table = tables + .iter_mut() + .find(|t| t.name == self.table_name) + .ok_or_else(|| anyhow::anyhow!("Table {} does not exist", self.table_name))?; + + match &self.alter_table_type { + AlterTableType::RenameTo { new_name } => { + table.name = new_name.clone(); + } + AlterTableType::AddColumn { column } => { + table.columns.push(column.clone()); + table.rows.iter_mut().for_each(|row| { + row.push(SimValue(turso_core::Value::Null)); + }); + } + AlterTableType::AlterColumn { old, new } => { + // TODO: have to see correct behaviour with indexes to see if we should error out + // in case there is some sort of conflict with this change + let col = table.columns.iter_mut().find(|c| c.name == *old).unwrap(); + *col = new.clone(); + } + AlterTableType::RenameColumn { old, new } => { + let col = table.columns.iter_mut().find(|c| c.name == *old).unwrap(); + col.name = new.clone(); + } + AlterTableType::DropColumn { column_name } => { + let col_idx = table + .columns + .iter() + .position(|c| c.name == *column_name) + .unwrap(); + table.columns.remove(col_idx); + table.rows.iter_mut().for_each(|row| { + row.remove(col_idx); + }); + } + }; Ok(vec![]) } } From c072058e4b9d1d276d17f61f6c60058558151d73 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Wed, 8 Oct 2025 16:24:16 -0300 Subject: [PATCH 165/428] add Alter Table query generation in Sim --- simulator/generation/plan.rs | 3 ++- simulator/generation/property.rs | 15 +++++++-------- simulator/generation/query.rs | 7 +++++-- simulator/model/mod.rs | 1 + simulator/profiles/query.rs | 17 +++++++++++++++++ 5 files changed, 32 insertions(+), 11 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index d1bc9f0d3..6285b62f7 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -708,7 +708,7 @@ impl Display for InteractionStats { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, - "Read: {}, Write: {}, Delete: {}, Update: {}, Create: {}, CreateIndex: {}, Drop: {}, Begin: {}, Commit: {}, Rollback: {}", + "Read: {}, Insert: {}, Delete: {}, Update: {}, Create: {}, CreateIndex: {}, Drop: {}, Begin: {}, Commit: {}, Rollback: {}, Alter Table: {}", self.select_count, self.insert_count, self.delete_count, @@ -719,6 +719,7 @@ impl Display for InteractionStats { self.begin_count, self.commit_count, self.rollback_count, + self.alter_table_count, ) } } diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index a858a6516..b4ac73f25 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1369,13 +1369,7 @@ pub(super) fn remaining( stats: &InteractionStats, mvcc: bool, ) -> Remaining { - let total_weight = opts.select_weight - + opts.create_table_weight - + opts.create_index_weight - + opts.insert_weight - + opts.update_weight - + opts.delete_weight - + opts.drop_table_weight; + let total_weight = opts.total_weight(); let total_select = (max_interactions * opts.select_weight) / total_weight; let total_insert = (max_interactions * opts.insert_weight) / total_weight; @@ -1384,6 +1378,7 @@ pub(super) fn remaining( let total_delete = (max_interactions * opts.delete_weight) / total_weight; let total_update = (max_interactions * opts.update_weight) / total_weight; let total_drop = (max_interactions * opts.drop_table_weight) / total_weight; + let total_alter_table = (max_interactions * opts.alter_table_weight) / total_weight; let remaining_select = total_select .checked_sub(stats.select_count) @@ -1405,6 +1400,10 @@ pub(super) fn remaining( .unwrap_or_default(); let remaining_drop = total_drop.checked_sub(stats.drop_count).unwrap_or_default(); + let remaining_alter_table = total_alter_table + .checked_sub(stats.alter_table_count) + .unwrap_or_default(); + if mvcc { // TODO: index not supported yet for mvcc remaining_create_index = 0; @@ -1418,7 +1417,7 @@ pub(super) fn remaining( delete: remaining_delete, drop: remaining_drop, update: remaining_update, - alter_table: 0, // TODO: calculate remaining + alter_table: remaining_alter_table, } } diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index f0ab68f2a..627e5e40e 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -9,7 +9,9 @@ use rand::{ use sql_generation::{ generation::{Arbitrary, ArbitraryFrom, GenerationContext, query::SelectFree}, model::{ - query::{Create, CreateIndex, Delete, Insert, Select, update::Update}, + query::{ + Create, CreateIndex, Delete, Insert, Select, alter_table::AlterTable, update::Update, + }, table::Table, }, }; @@ -83,7 +85,8 @@ fn random_alter_table( rng: &mut R, conn_ctx: &impl GenerationContext, ) -> Query { - todo!() + assert!(!conn_ctx.tables().is_empty()); + Query::AlterTable(AlterTable::arbitrary(rng, conn_ctx)) } /// Possible queries that can be generated given the table state diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 90c04485f..d50e386d7 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -216,6 +216,7 @@ impl QueryDiscriminants { QueryDiscriminants::Delete, QueryDiscriminants::Drop, QueryDiscriminants::CreateIndex, + QueryDiscriminants::AlterTable, ]; } diff --git a/simulator/profiles/query.rs b/simulator/profiles/query.rs index a58c983e0..ee9583596 100644 --- a/simulator/profiles/query.rs +++ b/simulator/profiles/query.rs @@ -22,6 +22,8 @@ pub struct QueryProfile { pub delete_weight: u32, #[garde(skip)] pub drop_table_weight: u32, + #[garde(skip)] + pub alter_table_weight: u32, } impl Default for QueryProfile { @@ -35,10 +37,25 @@ impl Default for QueryProfile { update_weight: 20, delete_weight: 20, drop_table_weight: 2, + alter_table_weight: 2, } } } +impl QueryProfile { + /// Attention: edit this function when another weight is added + pub fn total_weight(&self) -> u32 { + self.select_weight + + self.create_table_weight + + self.create_index_weight + + self.insert_weight + + self.update_weight + + self.delete_weight + + self.drop_table_weight + + self.alter_table_weight + } +} + #[derive(Debug, Clone, strum::VariantArray)] pub enum QueryTypes { CreateTable, From ab152890ddd0243fb2f9150e39a364e197459d82 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 9 Oct 2025 17:33:13 -0300 Subject: [PATCH 166/428] adjust generation of `GTValue` and `LTValue` to accomodate for Null Values --- sql_generation/generation/predicate/binary.rs | 78 ++++++++----------- sql_generation/generation/predicate/mod.rs | 10 --- sql_generation/generation/value/cmp.rs | 50 +++--------- sql_generation/generation/value/mod.rs | 12 ++- sql_generation/model/table.rs | 2 +- 5 files changed, 55 insertions(+), 97 deletions(-) diff --git a/sql_generation/generation/predicate/binary.rs b/sql_generation/generation/predicate/binary.rs index e3b52d5ec..25d813c76 100644 --- a/sql_generation/generation/predicate/binary.rs +++ b/sql_generation/generation/predicate/binary.rs @@ -16,44 +16,6 @@ use crate::{ }; impl Predicate { - /// Generate an [ast::Expr::Binary] [Predicate] from a column and [SimValue] - pub fn from_column_binary( - rng: &mut R, - context: &C, - column_name: &str, - value: &SimValue, - ) -> Predicate { - let expr = one_of( - vec![ - Box::new(|_| { - Expr::Binary( - Box::new(Expr::Id(ast::Name::exact(column_name.to_string()))), - ast::Operator::Equals, - Box::new(Expr::Literal(value.into())), - ) - }), - Box::new(|rng| { - let gt_value = GTValue::arbitrary_from(rng, context, value).0; - Expr::Binary( - Box::new(Expr::Id(ast::Name::exact(column_name.to_string()))), - ast::Operator::Greater, - Box::new(Expr::Literal(gt_value.into())), - ) - }), - Box::new(|rng| { - let lt_value = LTValue::arbitrary_from(rng, context, value).0; - Expr::Binary( - Box::new(Expr::Id(ast::Name::exact(column_name.to_string()))), - ast::Operator::Less, - Box::new(Expr::Literal(lt_value.into())), - ) - }), - ], - rng, - ); - Predicate(expr) - } - /// Produces a true [ast::Expr::Binary] [Predicate] that is true for the provided row in the given table pub fn true_binary( rng: &mut R, @@ -117,7 +79,8 @@ impl Predicate { ( 1, Box::new(|rng| { - let lt_value = LTValue::arbitrary_from(rng, context, value).0; + let lt_value = + LTValue::arbitrary_from(rng, context, (value, column.column_type)).0; Some(Expr::Binary( Box::new(ast::Expr::Qualified( ast::Name::from_string(&table_name), @@ -131,7 +94,8 @@ impl Predicate { ( 1, Box::new(|rng| { - let gt_value = GTValue::arbitrary_from(rng, context, value).0; + let gt_value = + GTValue::arbitrary_from(rng, context, (value, column.column_type)).0; Some(Expr::Binary( Box::new(ast::Expr::Qualified( ast::Name::from_string(&table_name), @@ -223,7 +187,8 @@ impl Predicate { ) }), Box::new(|rng| { - let gt_value = GTValue::arbitrary_from(rng, context, value).0; + let gt_value = + GTValue::arbitrary_from(rng, context, (value, column.column_type)).0; Expr::Binary( Box::new(ast::Expr::Qualified( ast::Name::from_string(&table_name), @@ -234,7 +199,8 @@ impl Predicate { ) }), Box::new(|rng| { - let lt_value = LTValue::arbitrary_from(rng, context, value).0; + let lt_value = + LTValue::arbitrary_from(rng, context, (value, column.column_type)).0; Expr::Binary( Box::new(ast::Expr::Qualified( ast::Name::from_string(&table_name), @@ -283,7 +249,12 @@ impl SimplePredicate { ) }), Box::new(|rng| { - let lt_value = LTValue::arbitrary_from(rng, context, column_value).0; + let lt_value = LTValue::arbitrary_from( + rng, + context, + (column_value, column.column.column_type), + ) + .0; Expr::Binary( Box::new(Expr::Qualified( ast::Name::from_string(table_name), @@ -294,7 +265,12 @@ impl SimplePredicate { ) }), Box::new(|rng| { - let gt_value = GTValue::arbitrary_from(rng, context, column_value).0; + let gt_value = GTValue::arbitrary_from( + rng, + context, + (column_value, column.column.column_type), + ) + .0; Expr::Binary( Box::new(Expr::Qualified( ast::Name::from_string(table_name), @@ -341,7 +317,12 @@ impl SimplePredicate { ) }), Box::new(|rng| { - let gt_value = GTValue::arbitrary_from(rng, context, column_value).0; + let gt_value = GTValue::arbitrary_from( + rng, + context, + (column_value, column.column.column_type), + ) + .0; Expr::Binary( Box::new(ast::Expr::Qualified( ast::Name::from_string(table_name), @@ -352,7 +333,12 @@ impl SimplePredicate { ) }), Box::new(|rng| { - let lt_value = LTValue::arbitrary_from(rng, context, column_value).0; + let lt_value = LTValue::arbitrary_from( + rng, + context, + (column_value, column.column.column_type), + ) + .0; Expr::Binary( Box::new(ast::Expr::Qualified( ast::Name::from_string(table_name), diff --git a/sql_generation/generation/predicate/mod.rs b/sql_generation/generation/predicate/mod.rs index d0dd375bb..75546848a 100644 --- a/sql_generation/generation/predicate/mod.rs +++ b/sql_generation/generation/predicate/mod.rs @@ -76,16 +76,6 @@ impl ArbitraryFrom<(&T, bool)> for Predicate { } } -impl ArbitraryFrom<(&str, &SimValue)> for Predicate { - fn arbitrary_from( - rng: &mut R, - context: &C, - (column_name, value): (&str, &SimValue), - ) -> Self { - Predicate::from_column_binary(rng, context, column_name, value) - } -} - impl ArbitraryFrom<(&Table, &Vec)> for Predicate { fn arbitrary_from( rng: &mut R, diff --git a/sql_generation/generation/value/cmp.rs b/sql_generation/generation/value/cmp.rs index 567a59a5e..31c710acd 100644 --- a/sql_generation/generation/value/cmp.rs +++ b/sql_generation/generation/value/cmp.rs @@ -2,32 +2,16 @@ use turso_core::Value; use crate::{ generation::{ArbitraryFrom, GenerationContext}, - model::table::SimValue, + model::table::{ColumnType, SimValue}, }; pub struct LTValue(pub SimValue); -impl ArbitraryFrom<&Vec<&SimValue>> for LTValue { - fn arbitrary_from( - rng: &mut R, - context: &C, - values: &Vec<&SimValue>, - ) -> Self { - if values.is_empty() { - return Self(SimValue(Value::Null)); - } - - // Get value less than all values - let value = Value::exec_min(values.iter().map(|value| &value.0)); - Self::arbitrary_from(rng, context, &SimValue(value)) - } -} - -impl ArbitraryFrom<&SimValue> for LTValue { +impl ArbitraryFrom<(&SimValue, ColumnType)> for LTValue { fn arbitrary_from( rng: &mut R, _context: &C, - value: &SimValue, + (value, _col_type): (&SimValue, ColumnType), ) -> Self { let new_value = match &value.0 { Value::Integer(i) => Value::Integer(rng.random_range(i64::MIN..*i - 1)), @@ -69,7 +53,8 @@ impl ArbitraryFrom<&SimValue> for LTValue { Value::Blob(b) } } - _ => unreachable!(), + // A value with storage class NULL is considered less than any other value (including another value with storage class NULL) + Value::Null => Value::Null, }; Self(SimValue(new_value)) } @@ -77,27 +62,11 @@ impl ArbitraryFrom<&SimValue> for LTValue { pub struct GTValue(pub SimValue); -impl ArbitraryFrom<&Vec<&SimValue>> for GTValue { +impl ArbitraryFrom<(&SimValue, ColumnType)> for GTValue { fn arbitrary_from( rng: &mut R, context: &C, - values: &Vec<&SimValue>, - ) -> Self { - if values.is_empty() { - return Self(SimValue(Value::Null)); - } - // Get value greater than all values - let value = Value::exec_max(values.iter().map(|value| &value.0)); - - Self::arbitrary_from(rng, context, &SimValue(value)) - } -} - -impl ArbitraryFrom<&SimValue> for GTValue { - fn arbitrary_from( - rng: &mut R, - _context: &C, - value: &SimValue, + (value, col_type): (&SimValue, ColumnType), ) -> Self { let new_value = match &value.0 { Value::Integer(i) => Value::Integer(rng.random_range(*i..i64::MAX)), @@ -139,7 +108,10 @@ impl ArbitraryFrom<&SimValue> for GTValue { Value::Blob(b) } } - _ => unreachable!(), + Value::Null => { + // Any value is greater than NULL, except NULL + SimValue::arbitrary_from(rng, context, col_type).0 + } }; Self(SimValue(new_value)) } diff --git a/sql_generation/generation/value/mod.rs b/sql_generation/generation/value/mod.rs index e0c98ad84..5062c9e57 100644 --- a/sql_generation/generation/value/mod.rs +++ b/sql_generation/generation/value/mod.rs @@ -51,8 +51,18 @@ impl ArbitraryFrom<&ColumnType> for SimValue { ColumnType::Integer => Value::Integer(rng.random_range(i64::MIN..i64::MAX)), ColumnType::Float => Value::Float(rng.random_range(-1e10..1e10)), ColumnType::Text => Value::build_text(gen_random_text(rng)), - ColumnType::Blob => Value::Blob(gen_random_text(rng).as_bytes().to_vec()), + ColumnType::Blob => Value::Blob(gen_random_text(rng).into_bytes()), }; SimValue(value) } } + +impl ArbitraryFrom for SimValue { + fn arbitrary_from( + rng: &mut R, + context: &C, + column_type: ColumnType, + ) -> Self { + SimValue::arbitrary_from(rng, context, &column_type) + } +} diff --git a/sql_generation/model/table.rs b/sql_generation/model/table.rs index 1060b8bb8..ceb650900 100644 --- a/sql_generation/model/table.rs +++ b/sql_generation/model/table.rs @@ -98,7 +98,7 @@ impl Display for Column { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub enum ColumnType { Integer, Float, From 9c2edbb8b7c9d88836f87d02734a58194b7137d2 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 9 Oct 2025 19:05:50 -0300 Subject: [PATCH 167/428] create separate Index struct for sql generation --- simulator/generation/query.rs | 2 +- simulator/model/mod.rs | 12 ++++++--- sql_generation/generation/query.rs | 10 +++++--- sql_generation/model/query/create_index.rs | 30 +++++++++++++++++----- sql_generation/model/table.rs | 11 ++++++-- whopper/main.rs | 18 ++++++++----- 6 files changed, 58 insertions(+), 25 deletions(-) diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index 627e5e40e..45140dfe6 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -73,7 +73,7 @@ fn random_create_index( .expect("table should exist") .indexes .iter() - .any(|i| i == &create_index.index_name) + .any(|i| i.index_name == create_index.index_name) { create_index = CreateIndex::arbitrary(rng, conn_ctx); } diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index d50e386d7..9f125a15b 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -13,7 +13,7 @@ use sql_generation::model::{ transaction::{Begin, Commit, Rollback}, update::Update, }, - table::{JoinTable, JoinType, SimValue, Table, TableContext}, + table::{Index, JoinTable, JoinType, SimValue, Table, TableContext}, }; use turso_parser::ast::Distinctness; @@ -70,7 +70,9 @@ impl Query { | Query::Update(Update { table, .. }) | Query::Drop(Drop { table, .. }) | Query::CreateIndex(CreateIndex { - table_name: table, .. + index: Index { + table_name: table, .. + }, }) | Query::AlterTable(AlterTable { table_name: table, .. @@ -89,7 +91,9 @@ impl Query { | Query::Update(Update { table, .. }) | Query::Drop(Drop { table, .. }) | Query::CreateIndex(CreateIndex { - table_name: table, .. + index: Index { + table_name: table, .. + }, }) | Query::AlterTable(AlterTable { table_name: table, .. @@ -243,7 +247,7 @@ impl Shadow for CreateIndex { .find(|t| t.name == self.table_name) .unwrap() .indexes - .push(self.index_name.clone()); + .push(self.index.clone()); vec![] } } diff --git a/sql_generation/generation/query.rs b/sql_generation/generation/query.rs index 82d6296df..ef035e0be 100644 --- a/sql_generation/generation/query.rs +++ b/sql_generation/generation/query.rs @@ -11,7 +11,7 @@ use crate::model::query::select::{ use crate::model::query::update::Update; use crate::model::query::{Create, CreateIndex, Delete, Drop, Insert, Select}; use crate::model::table::{ - Column, JoinTable, JoinType, JoinedTable, Name, SimValue, Table, TableContext, + Column, Index, JoinTable, JoinType, JoinedTable, Name, SimValue, Table, TableContext, }; use indexmap::IndexSet; use itertools::Itertools; @@ -362,9 +362,11 @@ impl Arbitrary for CreateIndex { ); CreateIndex { - index_name, - table_name: table.name.clone(), - columns, + index: Index { + index_name, + table_name: table.name.clone(), + columns, + }, } } } diff --git a/sql_generation/model/query/create_index.rs b/sql_generation/model/query/create_index.rs index db9d15a04..55548114e 100644 --- a/sql_generation/model/query/create_index.rs +++ b/sql_generation/model/query/create_index.rs @@ -1,11 +1,26 @@ +use std::ops::{Deref, DerefMut}; + use serde::{Deserialize, Serialize}; -use turso_parser::ast::SortOrder; + +use crate::model::table::Index; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct CreateIndex { - pub index_name: String, - pub table_name: String, - pub columns: Vec<(String, SortOrder)>, + pub index: Index, +} + +impl Deref for CreateIndex { + type Target = Index; + + fn deref(&self) -> &Self::Target { + &self.index + } +} + +impl DerefMut for CreateIndex { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.index + } } impl std::fmt::Display for CreateIndex { @@ -13,9 +28,10 @@ impl std::fmt::Display for CreateIndex { write!( f, "CREATE INDEX {} ON {} ({})", - self.index_name, - self.table_name, - self.columns + self.index.index_name, + self.index.table_name, + self.index + .columns .iter() .map(|(name, order)| format!("{name} {order}")) .collect::>() diff --git a/sql_generation/model/table.rs b/sql_generation/model/table.rs index ceb650900..e51ef8172 100644 --- a/sql_generation/model/table.rs +++ b/sql_generation/model/table.rs @@ -3,7 +3,7 @@ use std::{fmt::Display, hash::Hash, ops::Deref}; use itertools::Itertools; use serde::{Deserialize, Serialize}; use turso_core::{numeric::Numeric, types}; -use turso_parser::ast::{self, ColumnConstraint}; +use turso_parser::ast::{self, ColumnConstraint, SortOrder}; use crate::model::query::predicate::Predicate; @@ -46,7 +46,7 @@ pub struct Table { pub name: String, pub columns: Vec, pub rows: Vec>, - pub indexes: Vec, + pub indexes: Vec, } impl Table { @@ -117,6 +117,13 @@ impl Display for ColumnType { } } +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct Index { + pub table_name: String, + pub index_name: String, + pub columns: Vec<(String, SortOrder)>, +} + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JoinedTable { /// table name diff --git a/whopper/main.rs b/whopper/main.rs index 16da0fbf8..67f3f5e6b 100644 --- a/whopper/main.rs +++ b/whopper/main.rs @@ -4,11 +4,13 @@ use rand::{Rng, RngCore, SeedableRng}; use rand_chacha::ChaCha8Rng; use sql_generation::{ generation::{Arbitrary, GenerationContext, Opts}, - model::query::{ - create::Create, create_index::CreateIndex, delete::Delete, drop_index::DropIndex, - insert::Insert, select::Select, update::Update, + model::{ + query::{ + create::Create, create_index::CreateIndex, delete::Delete, drop_index::DropIndex, + insert::Insert, select::Select, update::Update, + }, + table::{Column, ColumnType, Index, Table}, }, - model::table::{Column, ColumnType, Table}, }; use std::cell::RefCell; use std::collections::HashMap; @@ -306,9 +308,11 @@ fn create_initial_indexes(rng: &mut ChaCha8Rng, tables: &[Table]) -> Vec Date: Thu, 9 Oct 2025 19:29:14 -0300 Subject: [PATCH 168/428] fix Drop Column to only be generated if no columns conflict in Indexes --- sql_generation/generation/query.rs | 105 ++++++++++++++++++++++------- 1 file changed, 80 insertions(+), 25 deletions(-) diff --git a/sql_generation/generation/query.rs b/sql_generation/generation/query.rs index ef035e0be..94aafaae9 100644 --- a/sql_generation/generation/query.rs +++ b/sql_generation/generation/query.rs @@ -1,6 +1,6 @@ use crate::generation::{ - gen_random_text, pick_n_unique, pick_unique, Arbitrary, ArbitraryFrom, ArbitrarySized, - GenerationContext, + gen_random_text, pick_index, pick_n_unique, pick_unique, Arbitrary, ArbitraryFrom, + ArbitrarySized, GenerationContext, }; use crate::model::query::alter_table::{AlterTable, AlterTableType, AlterTableTypeDiscriminants}; use crate::model::query::predicate::Predicate; @@ -392,26 +392,52 @@ impl Arbitrary for Update { } } -impl Arbitrary for AlterTable { - fn arbitrary(rng: &mut R, context: &C) -> Self { - let table = pick(context.tables(), rng); - let choices: &'static [AlterTableTypeDiscriminants] = if table.columns.len() > 1 { - &[ - AlterTableTypeDiscriminants::RenameTo, - AlterTableTypeDiscriminants::AddColumn, - // AlterTableTypeDiscriminants::AlterColumn, - AlterTableTypeDiscriminants::RenameColumn, - AlterTableTypeDiscriminants::DropColumn, - ] - } else { - &[ - AlterTableTypeDiscriminants::RenameTo, - AlterTableTypeDiscriminants::AddColumn, - // AlterTableTypeDiscriminants::AlterColumn, - AlterTableTypeDiscriminants::RenameColumn, - ] - }; - let alter_table_type = match choices.choose(rng).unwrap() { +const ALTER_TABLE_ALL: &[AlterTableTypeDiscriminants] = &[ + AlterTableTypeDiscriminants::RenameTo, + AlterTableTypeDiscriminants::AddColumn, + // AlterTableTypeDiscriminants::AlterColumn, + AlterTableTypeDiscriminants::RenameColumn, + AlterTableTypeDiscriminants::DropColumn, +]; +const ALTER_TABLE_NO_DROP: &[AlterTableTypeDiscriminants] = &[ + AlterTableTypeDiscriminants::RenameTo, + AlterTableTypeDiscriminants::AddColumn, + // AlterTableTypeDiscriminants::AlterColumn, + AlterTableTypeDiscriminants::RenameColumn, +]; + +// TODO: Unfortunately this diff strategy allocates a couple of IndexSet's +// in the future maybe change this to be more efficient. This is currently acceptable because this function +// is only called for `DropColumn` +fn get_column_diff(table: &Table) -> IndexSet<&str> { + // Columns that are referenced in INDEXES cannot be dropped + let column_cannot_drop = table + .indexes + .iter() + .flat_map(|index| index.columns.iter().map(|(col_name, _)| col_name.as_str())) + .collect::>(); + if column_cannot_drop.len() == table.columns.len() { + // Optimization: all columns are present in indexes so we do not need to but the table column set + return IndexSet::new(); + } + + let column_set: IndexSet<_, std::hash::RandomState> = + IndexSet::from_iter(table.columns.iter().map(|col| col.name.as_str())); + + let diff = column_set + .difference(&column_cannot_drop) + .copied() + .collect::>(); + diff +} + +impl ArbitraryFrom<(&Table, &[AlterTableTypeDiscriminants])> for AlterTableType { + fn arbitrary_from( + rng: &mut R, + context: &C, + (table, choices): (&Table, &[AlterTableTypeDiscriminants]), + ) -> Self { + match choices.choose(rng).unwrap() { AlterTableTypeDiscriminants::RenameTo => AlterTableType::RenameTo { new_name: Name::arbitrary(rng, context).0, }, @@ -425,10 +451,39 @@ impl Arbitrary for AlterTable { old: pick(&table.columns, rng).name.clone(), new: Name::arbitrary(rng, context).0, }, - AlterTableTypeDiscriminants::DropColumn => AlterTableType::DropColumn { - column_name: pick(&table.columns, rng).name.clone(), - }, + AlterTableTypeDiscriminants::DropColumn => { + let col_diff = get_column_diff(table); + + if col_diff.is_empty() { + // Generate a DropColumn if we can drop a column + return AlterTableType::arbitrary_from( + rng, + context, + (table, ALTER_TABLE_NO_DROP), + ); + } + + let col_idx = pick_index(col_diff.len(), rng); + let col_name = col_diff.get_index(col_idx).unwrap(); + + AlterTableType::DropColumn { + column_name: col_name.to_string(), + } + } + } + } +} + +impl Arbitrary for AlterTable { + fn arbitrary(rng: &mut R, context: &C) -> Self { + let table = pick(context.tables(), rng); + let choices: &'static [AlterTableTypeDiscriminants] = if table.columns.len() > 1 { + ALTER_TABLE_ALL + } else { + ALTER_TABLE_NO_DROP }; + + let alter_table_type = AlterTableType::arbitrary_from(rng, context, (table, choices)); Self { table_name: table.name.clone(), alter_table_type, From 703efaa724fe11c1a1c14e1470c0d59bf5b49b46 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 9 Oct 2025 21:15:31 -0300 Subject: [PATCH 169/428] adjust Properties to skip Alter Table in certain conditions --- simulator/generation/property.rs | 41 +++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index b4ac73f25..cadd42155 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -12,6 +12,7 @@ use sql_generation::{ model::{ query::{ Create, Delete, Drop, Insert, Select, + alter_table::{AlterTable, AlterTableType}, predicate::Predicate, select::{CompoundOperator, CompoundSelect, ResultColumn, SelectBody, SelectInner}, transaction::{Begin, Commit, Rollback}, @@ -283,7 +284,7 @@ impl Property { // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) // - [x] The inserted row will not be deleted. // - [x] The inserted row will not be updated. - // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) + // - [x] The table `t` will not be renamed, dropped, or altered. |rng: &mut R, ctx: &G, query_distr: &QueryDistribution, property: &Property| { let Property::InsertValuesSelect { insert, row_index, .. @@ -327,6 +328,10 @@ impl Property { // Cannot drop the table we are inserting None } + Query::AlterTable(AlterTable { table_name: t, .. }) if *t == table.name => { + // Cannot alter the table we are inserting + None + } _ => Some(query), } } @@ -334,7 +339,7 @@ impl Property { Property::DoubleCreateFailure { .. } => { // The interactions in the middle has the following constraints; // - [x] There will be no errors in the middle interactions.(best effort) - // - [ ] Table `t` will not be renamed or dropped.(todo: add this constraint once ALTER or DROP is implemented) + // - [x] Table `t` will not be renamed or dropped. |rng: &mut R, ctx: &G, query_distr: &QueryDistribution, property: &Property| { let Property::DoubleCreateFailure { create, .. } = property else { unreachable!() @@ -358,6 +363,10 @@ impl Property { // Cannot Drop the created table None } + Query::AlterTable(AlterTable { table_name: t, .. }) if *t == table.name => { + // Cannot alter the table we created + None + } _ => Some(query), } } @@ -365,7 +374,7 @@ impl Property { Property::DeleteSelect { .. } => { // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) // - [x] A row that holds for the predicate will not be inserted. - // - [ ] The table `t` will not be renamed, dropped, or altered. (todo: add this constraint once ALTER or DROP is implemented) + // - [x] The table `t` will not be renamed, dropped, or altered. |rng, ctx, query_distr, property| { let Property::DeleteSelect { @@ -412,13 +421,17 @@ impl Property { // Cannot Drop the same table None } + Query::AlterTable(AlterTable { table_name: t, .. }) if *t == table.name => { + // Cannot alter the same table + None + } _ => Some(query), } } } Property::DropSelect { .. } => { // - [x] There will be no errors in the middle interactions. (this constraint is impossible to check, so this is just best effort) - // - [-] The table `t` will not be created, no table will be renamed to `t`. (todo: update this constraint once ALTER is implemented) + // - [x] The table `t` will not be created, no table will be renamed to `t`. |rng, ctx, query_distr, property: &Property| { let Property::DropSelect { table: table_name, .. @@ -428,13 +441,19 @@ impl Property { }; let query = Query::arbitrary_from(rng, ctx, query_distr); - if let Query::Create(Create { table: t }) = &query - && t.name == *table_name - { - // - The table `t` will not be created - None - } else { - Some(query) + match &query { + Query::Create(Create { table: t }) if t.name == *table_name => { + // - The table `t` will not be created + None + } + Query::AlterTable(AlterTable { + table_name: t, + alter_table_type: AlterTableType::RenameTo { new_name }, + }) if t == table_name || new_name == table_name => { + // no table will be renamed to `t` + None + } + _ => Some(query), } } } From ca8be11a568770c0b2d38225e4ffb4a16e03ad77 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 9 Oct 2025 22:32:52 -0300 Subject: [PATCH 170/428] fix binary compare in simulator by taking into account NULL for certain compare ops --- sql_generation/model/table.rs | 40 +++++++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/sql_generation/model/table.rs b/sql_generation/model/table.rs index e51ef8172..dce2fdddf 100644 --- a/sql_generation/model/table.rs +++ b/sql_generation/model/table.rs @@ -186,19 +186,34 @@ impl Display for SimValue { impl SimValue { pub const FALSE: Self = SimValue(types::Value::Integer(0)); pub const TRUE: Self = SimValue(types::Value::Integer(1)); + pub const NULL: Self = SimValue(types::Value::Null); pub fn as_bool(&self) -> bool { Numeric::from(&self.0).try_into_bool().unwrap_or_default() } + #[inline] + fn is_null(&self) -> bool { + matches!(self.0, types::Value::Null) + } + + // The result of any binary operator is either a numeric value or NULL, except for the || concatenation operator, and the -> and ->> extract operators which can return values of any type. + // All operators generally evaluate to NULL when any operand is NULL, with specific exceptions as stated below. This is in accordance with the SQL92 standard. + // When paired with NULL: + // AND evaluates to 0 (false) when the other operand is false; and + // OR evaluates to 1 (true) when the other operand is true. + // The IS and IS NOT operators work like = and != except when one or both of the operands are NULL. In this case, if both operands are NULL, then the IS operator evaluates to 1 (true) and the IS NOT operator evaluates to 0 (false). If one operand is NULL and the other is not, then the IS operator evaluates to 0 (false) and the IS NOT operator is 1 (true). It is not possible for an IS or IS NOT expression to evaluate to NULL. + // The IS NOT DISTINCT FROM operator is an alternative spelling for the IS operator. Likewise, the IS DISTINCT FROM operator means the same thing as IS NOT. Standard SQL does not support the compact IS and IS NOT notation. Those compact forms are an SQLite extension. You must use the less readable IS NOT DISTINCT FROM and IS DISTINCT FROM operators in most other SQL database engines. + // TODO: support more predicates /// Returns a Result of a Binary Operation /// /// TODO: forget collations for now /// TODO: have the [ast::Operator::Equals], [ast::Operator::NotEquals], [ast::Operator::Greater], /// [ast::Operator::GreaterEquals], [ast::Operator::Less], [ast::Operator::LessEquals] function to be extracted - /// into its functions in turso_core so that it can be used here + /// into its functions in turso_core so that it can be used here. For now we just do the `not_null` check to avoid refactoring code in core pub fn binary_compare(&self, other: &Self, operator: ast::Operator) -> SimValue { + let not_null = !self.is_null() && !other.is_null(); match operator { ast::Operator::Add => self.0.exec_add(&other.0).into(), ast::Operator::And => self.0.exec_and(&other.0).into(), @@ -208,10 +223,10 @@ impl SimValue { ast::Operator::BitwiseOr => self.0.exec_bit_or(&other.0).into(), ast::Operator::BitwiseNot => todo!(), // TODO: Do not see any function usage of this operator in Core ast::Operator::Concat => self.0.exec_concat(&other.0).into(), - ast::Operator::Equals => (self == other).into(), + ast::Operator::Equals => not_null.then(|| self == other).into(), ast::Operator::Divide => self.0.exec_divide(&other.0).into(), - ast::Operator::Greater => (self > other).into(), - ast::Operator::GreaterEquals => (self >= other).into(), + ast::Operator::Greater => not_null.then(|| self > other).into(), + ast::Operator::GreaterEquals => not_null.then(|| self >= other).into(), // TODO: Test these implementations ast::Operator::Is => match (&self.0, &other.0) { (types::Value::Null, types::Value::Null) => true.into(), @@ -223,11 +238,11 @@ impl SimValue { .binary_compare(other, ast::Operator::Is) .unary_exec(ast::UnaryOperator::Not), ast::Operator::LeftShift => self.0.exec_shift_left(&other.0).into(), - ast::Operator::Less => (self < other).into(), - ast::Operator::LessEquals => (self <= other).into(), + ast::Operator::Less => not_null.then(|| self < other).into(), + ast::Operator::LessEquals => not_null.then(|| self <= other).into(), ast::Operator::Modulus => self.0.exec_remainder(&other.0).into(), ast::Operator::Multiply => self.0.exec_multiply(&other.0).into(), - ast::Operator::NotEquals => (self != other).into(), + ast::Operator::NotEquals => not_null.then(|| self != other).into(), ast::Operator::Or => self.0.exec_or(&other.0).into(), ast::Operator::RightShift => self.0.exec_shift_right(&other.0).into(), ast::Operator::Subtract => self.0.exec_subtract(&other.0).into(), @@ -372,7 +387,18 @@ impl From<&SimValue> for ast::Literal { } } +impl From> for SimValue { + #[inline] + fn from(value: Option) -> Self { + if value.is_none() { + return SimValue::NULL; + } + SimValue::from(value.unwrap()) + } +} + impl From for SimValue { + #[inline] fn from(value: bool) -> Self { if value { SimValue::TRUE From a18a4726857fde726d821f5e1792b5283310f809 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 9 Oct 2025 23:25:22 -0300 Subject: [PATCH 171/428] add option to disable `alter column` for differential testing --- simulator/runner/env.rs | 3 +++ sql_generation/generation/opts.rs | 17 +++++++++++++- sql_generation/generation/query.rs | 36 ++++++++++++++++++++++++------ 3 files changed, 48 insertions(+), 8 deletions(-) diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index 79497c38b..56c81fd83 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -351,6 +351,9 @@ impl SimulatorEnv { profile.io.enable = false; // Disable limits due to differences in return order from turso and rusqlite opts.disable_select_limit = true; + + // There is no `ALTER COLUMN` in SQLite + profile.query.gen_opts.query.alter_table.alter_column = false; } profile.validate().unwrap(); diff --git a/sql_generation/generation/opts.rs b/sql_generation/generation/opts.rs index 190033748..e9da87207 100644 --- a/sql_generation/generation/opts.rs +++ b/sql_generation/generation/opts.rs @@ -93,6 +93,8 @@ pub struct QueryOpts { pub from_clause: FromClauseOpts, #[garde(dive)] pub insert: InsertOpts, + #[garde(dive)] + pub alter_table: AlterTableOpts, } #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Validate)] @@ -198,6 +200,19 @@ impl Default for InsertOpts { } } +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Validate)] +#[serde(deny_unknown_fields)] +pub struct AlterTableOpts { + #[garde(skip)] + pub alter_column: bool, +} + +impl Default for AlterTableOpts { + fn default() -> Self { + Self { alter_column: true } + } +} + fn range_struct_min( min: T, ) -> impl FnOnce(&Range, &()) -> garde::Result { @@ -217,7 +232,7 @@ fn range_struct_min( } } -#[allow(dead_code)] +#[expect(dead_code)] fn range_struct_max( max: T, ) -> impl FnOnce(&Range, &()) -> garde::Result { diff --git a/sql_generation/generation/query.rs b/sql_generation/generation/query.rs index 94aafaae9..a21a37231 100644 --- a/sql_generation/generation/query.rs +++ b/sql_generation/generation/query.rs @@ -395,14 +395,25 @@ impl Arbitrary for Update { const ALTER_TABLE_ALL: &[AlterTableTypeDiscriminants] = &[ AlterTableTypeDiscriminants::RenameTo, AlterTableTypeDiscriminants::AddColumn, - // AlterTableTypeDiscriminants::AlterColumn, + AlterTableTypeDiscriminants::AlterColumn, AlterTableTypeDiscriminants::RenameColumn, AlterTableTypeDiscriminants::DropColumn, ]; const ALTER_TABLE_NO_DROP: &[AlterTableTypeDiscriminants] = &[ AlterTableTypeDiscriminants::RenameTo, AlterTableTypeDiscriminants::AddColumn, - // AlterTableTypeDiscriminants::AlterColumn, + AlterTableTypeDiscriminants::AlterColumn, + AlterTableTypeDiscriminants::RenameColumn, +]; +const ALTER_TABLE_NO_ALTER_COL: &[AlterTableTypeDiscriminants] = &[ + AlterTableTypeDiscriminants::RenameTo, + AlterTableTypeDiscriminants::AddColumn, + AlterTableTypeDiscriminants::RenameColumn, + AlterTableTypeDiscriminants::DropColumn, +]; +const ALTER_TABLE_NO_ALTER_COL_NO_DROP: &[AlterTableTypeDiscriminants] = &[ + AlterTableTypeDiscriminants::RenameTo, + AlterTableTypeDiscriminants::AddColumn, AlterTableTypeDiscriminants::RenameColumn, ]; @@ -459,7 +470,14 @@ impl ArbitraryFrom<(&Table, &[AlterTableTypeDiscriminants])> for AlterTableType return AlterTableType::arbitrary_from( rng, context, - (table, ALTER_TABLE_NO_DROP), + ( + table, + if context.opts().query.alter_table.alter_column { + ALTER_TABLE_NO_DROP + } else { + ALTER_TABLE_NO_ALTER_COL_NO_DROP + }, + ), ); } @@ -477,10 +495,14 @@ impl ArbitraryFrom<(&Table, &[AlterTableTypeDiscriminants])> for AlterTableType impl Arbitrary for AlterTable { fn arbitrary(rng: &mut R, context: &C) -> Self { let table = pick(context.tables(), rng); - let choices: &'static [AlterTableTypeDiscriminants] = if table.columns.len() > 1 { - ALTER_TABLE_ALL - } else { - ALTER_TABLE_NO_DROP + let choices = match ( + table.columns.len() > 1, + context.opts().query.alter_table.alter_column, + ) { + (true, true) => ALTER_TABLE_ALL, + (true, false) => ALTER_TABLE_NO_ALTER_COL, + (false, true) => ALTER_TABLE_NO_DROP, + (false, false) => ALTER_TABLE_NO_ALTER_COL_NO_DROP, }; let alter_table_type = AlterTableType::arbitrary_from(rng, context, (table, choices)); From 49e96afd39d5e43c4aec5ef66d20d44c445a4caf Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 10 Oct 2025 00:04:38 -0300 Subject: [PATCH 172/428] generate `ALTER COLUMN` --- simulator/model/mod.rs | 2 -- simulator/shrink/plan.rs | 2 ++ sql_generation/generation/query.rs | 29 ++++++++++++++++++++++++++--- 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 9f125a15b..a2120cd63 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -561,8 +561,6 @@ impl Shadow for AlterTable { }); } AlterTableType::AlterColumn { old, new } => { - // TODO: have to see correct behaviour with indexes to see if we should error out - // in case there is some sort of conflict with this change let col = table.columns.iter_mut().find(|c| c.name == *old).unwrap(); *col = new.clone(); } diff --git a/simulator/shrink/plan.rs b/simulator/shrink/plan.rs index 6da5d93e8..9f80f78cc 100644 --- a/simulator/shrink/plan.rs +++ b/simulator/shrink/plan.rs @@ -120,6 +120,8 @@ impl InteractionPlan { | Property::DeleteSelect { queries, .. } | Property::DropSelect { queries, .. } | Property::Queries { queries } => { + // Remove placeholder queries + queries.retain(|query| !matches!(query, Query::Placeholder)); extensional_queries.append(queries); } Property::AllTableHaveExpectedContent { tables } => { diff --git a/sql_generation/generation/query.rs b/sql_generation/generation/query.rs index a21a37231..17fe0f843 100644 --- a/sql_generation/generation/query.rs +++ b/sql_generation/generation/query.rs @@ -456,7 +456,31 @@ impl ArbitraryFrom<(&Table, &[AlterTableTypeDiscriminants])> for AlterTableType column: Column::arbitrary(rng, context), }, AlterTableTypeDiscriminants::AlterColumn => { - todo!(); + let col_diff = get_column_diff(table); + + if col_diff.is_empty() { + // Generate a DropColumn if we can drop a column + return AlterTableType::arbitrary_from( + rng, + context, + ( + table, + if choices.contains(&AlterTableTypeDiscriminants::DropColumn) { + ALTER_TABLE_NO_ALTER_COL + } else { + ALTER_TABLE_NO_ALTER_COL_NO_DROP + }, + ), + ); + } + + let col_idx = pick_index(col_diff.len(), rng); + let col_name = col_diff.get_index(col_idx).unwrap(); + + AlterTableType::AlterColumn { + old: col_name.to_string(), + new: Column::arbitrary(rng, context), + } } AlterTableTypeDiscriminants::RenameColumn => AlterTableType::RenameColumn { old: pick(&table.columns, rng).name.clone(), @@ -501,8 +525,7 @@ impl Arbitrary for AlterTable { ) { (true, true) => ALTER_TABLE_ALL, (true, false) => ALTER_TABLE_NO_ALTER_COL, - (false, true) => ALTER_TABLE_NO_DROP, - (false, false) => ALTER_TABLE_NO_ALTER_COL_NO_DROP, + (false, true) | (false, false) => ALTER_TABLE_NO_ALTER_COL_NO_DROP, }; let alter_table_type = AlterTableType::arbitrary_from(rng, context, (table, choices)); From b6c5fee300c722538b6975557d03ffa9ba1e586b Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 10 Oct 2025 11:24:25 -0300 Subject: [PATCH 173/428] do not count certain interactions in the InteractionPlan and correctly report the length when shrinking --- simulator/generation/plan.rs | 70 ++++++++++++++++++++++++++++++++++-- 1 file changed, 68 insertions(+), 2 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 6285b62f7..b5ff4e475 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -58,11 +58,19 @@ impl InteractionPlan { pub fn new_with(plan: Vec, mvcc: bool) -> Self { let len = plan .iter() - .filter(|interaction| !interaction.is_transaction()) + .filter(|interaction| !interaction.ignore()) .count(); Self { plan, mvcc, len } } + #[inline] + fn new_len(&self) -> usize { + self.plan + .iter() + .filter(|interaction| !interaction.ignore()) + .count() + } + /// Length of interactions that are not transaction statements #[inline] pub fn len(&self) -> usize { @@ -70,12 +78,59 @@ impl InteractionPlan { } pub fn push(&mut self, interactions: Interactions) { - if !interactions.is_transaction() { + if !interactions.ignore() { self.len += 1; } self.plan.push(interactions); } + pub fn remove(&mut self, index: usize) -> Interactions { + let interactions = self.plan.remove(index); + if !interactions.ignore() { + self.len -= 1; + } + interactions + } + + pub fn truncate(&mut self, len: usize) { + self.plan.truncate(len); + self.len = self.new_len(); + } + + pub fn retain_mut(&mut self, mut f: F) + where + F: FnMut(&mut Interactions) -> bool, + { + let f = |t: &mut Interactions| { + let ignore = t.ignore(); + let retain = f(t); + // removed an interaction that was not previously ignored + if !retain && !ignore { + self.len -= 1; + } + retain + }; + self.plan.retain_mut(f); + } + + #[expect(dead_code)] + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&Interactions) -> bool, + { + let f = |t: &Interactions| { + let ignore = t.ignore(); + let retain = f(t); + // removed an interaction that was not previously ignored + if !retain && !ignore { + self.len -= 1; + } + retain + }; + self.plan.retain(f); + self.len = self.new_len(); + } + /// Compute via diff computes a a plan from a given `.plan` file without the need to parse /// sql. This is possible because there are two versions of the plan file, one that is human /// readable and one that is serialized as JSON. Under watch mode, the users will be able to @@ -581,6 +636,17 @@ impl Interactions { InteractionsType::Query(..) | InteractionsType::Fault(..) => false, } } + + /// Interactions that are not counted/ignored in the InteractionPlan. + /// Used in InteractionPlan to not count certain interactions to its length, as they are just auxiliary. This allows more + /// meaningful interactions to be generation + fn ignore(&self) -> bool { + self.is_transaction() + || matches!( + self.interactions, + InteractionsType::Property(Property::AllTableHaveExpectedContent { .. }) + ) + } } impl Deref for Interactions { From 5f651961157f80d5131417dba74188b51d5114ec Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 10 Oct 2025 12:39:34 -0300 Subject: [PATCH 174/428] fix `load_bug` --- simulator/runner/bugbase.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/simulator/runner/bugbase.rs b/simulator/runner/bugbase.rs index dd0d6f432..8ebcd1bf7 100644 --- a/simulator/runner/bugbase.rs +++ b/simulator/runner/bugbase.rs @@ -293,22 +293,23 @@ impl BugBase { None => anyhow::bail!("No bugs found for seed {}", seed), Some(Bug::Unloaded { .. }) => { let plan = - std::fs::read_to_string(self.path.join(seed.to_string()).join("test.json")) + std::fs::read_to_string(self.path.join(seed.to_string()).join("plan.json")) .with_context(|| { format!( "should be able to read plan file at {}", - self.path.join(seed.to_string()).join("test.json").display() + self.path.join(seed.to_string()).join("plan.json").display() ) })?; let plan: InteractionPlan = serde_json::from_str(&plan) .with_context(|| "should be able to deserialize plan")?; - let shrunk_plan: Option = std::fs::read_to_string( - self.path.join(seed.to_string()).join("shrunk_test.json"), - ) - .with_context(|| "should be able to read shrunk plan file") - .and_then(|shrunk| serde_json::from_str(&shrunk).map_err(|e| anyhow!("{}", e))) - .ok(); + let shrunk_plan: Option = + std::fs::read_to_string(self.path.join(seed.to_string()).join("shrunk.json")) + .with_context(|| "should be able to read shrunk plan file") + .and_then(|shrunk| { + serde_json::from_str(&shrunk).map_err(|e| anyhow!("{}", e)) + }) + .ok(); let shrunk_plan: Option = shrunk_plan.and_then(|shrunk_plan| serde_json::from_str(&shrunk_plan).ok()); From d99e3f590f2c6093931d32a466da6a5e10b13de7 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 10 Oct 2025 13:06:58 -0300 Subject: [PATCH 175/428] `ALTER TABLE` should be added to `is_ddl` --- simulator/model/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index a2120cd63..20726cbbb 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -115,7 +115,7 @@ impl Query { pub fn is_ddl(&self) -> bool { matches!( self, - Self::Create(..) | Self::CreateIndex(..) | Self::Drop(..) + Self::Create(..) | Self::CreateIndex(..) | Self::Drop(..) | Self::AlterTable(..) ) } } From dca1137f8181ecd67ba2578435b3bfc734850a73 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 10 Oct 2025 13:08:41 -0300 Subject: [PATCH 176/428] rusqlite stop trying to get rows when we error with `InvalidColumnIndex` --- simulator/runner/execution.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/simulator/runner/execution.rs b/simulator/runner/execution.rs index 7bc9b40e4..3cc720967 100644 --- a/simulator/runner/execution.rs +++ b/simulator/runner/execution.rs @@ -283,16 +283,13 @@ fn limbo_integrity_check(conn: &Arc) -> Result<()> { Ok(()) } +#[instrument(skip(env, interaction, stack), fields(conn_index = interaction.connection_index, interaction = %interaction))] fn execute_interaction_rusqlite( env: &mut SimulatorEnv, interaction: &Interaction, stack: &mut Vec, ) -> turso_core::Result { - tracing::trace!( - "execute_interaction_rusqlite(connection_index={}, interaction={})", - interaction.connection_index, - interaction - ); + tracing::info!(""); let SimConnection::SQLiteConnection(conn) = &mut env.connections[interaction.connection_index] else { unreachable!() @@ -347,11 +344,19 @@ fn execute_query_rusqlite( match query { Query::Select(select) => { let mut stmt = connection.prepare(select.to_string().as_str())?; - let columns = stmt.column_count(); let rows = stmt.query_map([], |row| { let mut values = vec![]; - for i in 0..columns { - let value = row.get_unwrap(i); + for i in 0.. { + let value = match row.get(i) { + Ok(value) => value, + Err(err) => match err { + rusqlite::Error::InvalidColumnIndex(_) => break, + _ => { + tracing::error!(?err); + panic!("{err}") + } + }, + }; let value = match value { rusqlite::types::Value::Null => Value::Null, rusqlite::types::Value::Integer(i) => Value::Integer(i), From 773fa280631ceb3a1fed61f6776f01ffd76cfa1d Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Fri, 10 Oct 2025 15:51:31 -0300 Subject: [PATCH 177/428] workaround in sqlite for schema changes become visible to other connections --- simulator/runner/execution.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/simulator/runner/execution.rs b/simulator/runner/execution.rs index 3cc720967..1e30de520 100644 --- a/simulator/runner/execution.rs +++ b/simulator/runner/execution.rs @@ -341,6 +341,9 @@ fn execute_query_rusqlite( connection: &rusqlite::Connection, query: &Query, ) -> rusqlite::Result>> { + // https://sqlite.org/forum/forumpost/9fe5d047f0 + // Due to a bug in sqlite, we need to execute this query to clear the internal stmt cache so that schema changes become visible always to other connections + connection.query_one("SELECT * FROM pragma_user_version()", (), |_| Ok(()))?; match query { Query::Select(select) => { let mut stmt = connection.prepare(select.to_string().as_str())?; From c0f35cc17db0e4d4056b7bdc0d3c61f5878e8dad Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sat, 11 Oct 2025 14:03:53 -0300 Subject: [PATCH 178/428] disable `ALTER COLUMN` due to incompatibility with SQLITE INTEGRITY CHECK --- sql_generation/generation/opts.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sql_generation/generation/opts.rs b/sql_generation/generation/opts.rs index e9da87207..fcc818bbe 100644 --- a/sql_generation/generation/opts.rs +++ b/sql_generation/generation/opts.rs @@ -207,9 +207,12 @@ pub struct AlterTableOpts { pub alter_column: bool, } +#[expect(clippy::derivable_impls)] impl Default for AlterTableOpts { fn default() -> Self { - Self { alter_column: true } + Self { + alter_column: Default::default(), + } } } From 7b1b37095d5a0d32e52ee1d1e872229329b35f10 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 09:09:07 +0300 Subject: [PATCH 179/428] stress: Add busy timeout support with 5 second default Add `--busy-timeout` command-line option to turso-stress with a default value of 5000 ms. This helps prevent spurious database busy errors during concurrent stress testing and ensure that integrity checks are not skipped because of concurrent writes. --- stress/main.rs | 6 ++++++ stress/opts.rs | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/stress/main.rs b/stress/main.rs index 5091bd05b..f2ec5a179 100644 --- a/stress/main.rs +++ b/stress/main.rs @@ -493,6 +493,8 @@ async fn main() -> Result<(), Box> { let plan = plan.clone(); let conn = db.lock().await.connect()?; + conn.busy_timeout(std::time::Duration::from_millis(opts.busy_timeout))?; + conn.execute("PRAGMA data_sync_retry = 1", ()).await?; // Apply each DDL statement individually @@ -525,6 +527,8 @@ async fn main() -> Result<(), Box> { let handle = tokio::spawn(async move { let mut conn = db.lock().await.connect()?; + conn.busy_timeout(std::time::Duration::from_millis(opts.busy_timeout))?; + conn.execute("PRAGMA data_sync_retry = 1", ()).await?; println!("\rExecuting queries..."); @@ -542,6 +546,7 @@ async fn main() -> Result<(), Box> { } *db_guard = builder.build().await?; conn = db_guard.connect()?; + conn.busy_timeout(std::time::Duration::from_millis(opts.busy_timeout))?; } else if gen_bool(0.0) { // disabled // Reconnect to the database @@ -550,6 +555,7 @@ async fn main() -> Result<(), Box> { } let db_guard = db.lock().await; conn = db_guard.connect()?; + conn.busy_timeout(std::time::Duration::from_millis(opts.busy_timeout))?; } let sql = &plan.queries_per_thread[thread][query_index]; if !opts.silent { diff --git a/stress/opts.rs b/stress/opts.rs index fd53d7635..796f85847 100644 --- a/stress/opts.rs +++ b/stress/opts.rs @@ -66,4 +66,12 @@ pub struct Opts { /// Number of tables to use #[clap(long, help = "Select number of tables to create")] pub tables: Option, + + /// Busy timeout in milliseconds + #[clap( + long, + help = "Set busy timeout in milliseconds", + default_value_t = 5000 + )] + pub busy_timeout: u64, } From 2a02cafc737369bb324266902c5e4902a766d7a3 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 09:40:37 +0300 Subject: [PATCH 180/428] core/vdbe: Improve IdxDelete error messages with context We currently return the exact same error from two different IdxDelete paths. Improve the messages with context about what we're doing to make this error more debuggable. --- core/vdbe/execute.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 61d715b83..e56bd4fdb 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -6200,7 +6200,7 @@ pub fn op_idx_delete( .map(|i| &state.registers[i]) .collect::>(); return Err(LimboError::Corrupt(format!( - "IdxDelete: no matching index entry found for key {reg_values:?}" + "IdxDelete: no matching index entry found for key {reg_values:?} while seeking" ))); } state.pc += 1; @@ -6221,7 +6221,7 @@ pub fn op_idx_delete( .map(|i| &state.registers[i]) .collect::>(); return Err(LimboError::Corrupt(format!( - "IdxDelete: no matching index entry found for key {reg_values:?}" + "IdxDelete: no matching index entry found for key while verifying: {reg_values:?}" ))); } state.op_idx_delete_state = Some(OpIdxDeleteState::Deleting); From 171bcd83ec15e3f9b9d77ba4304dddfb65520aa2 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 13 Oct 2025 10:16:13 +0300 Subject: [PATCH 181/428] COMPAT.MD: note about WINDOW functions --- COMPAT.md | 1 + 1 file changed, 1 insertion(+) diff --git a/COMPAT.md b/COMPAT.md index d668c6f20..e14c04005 100644 --- a/COMPAT.md +++ b/COMPAT.md @@ -95,6 +95,7 @@ Turso aims to be fully compatible with SQLite, with opt-in features not supporte | UPDATE | Yes | | | VACUUM | No | | | WITH clause | Partial | No RECURSIVE, no MATERIALIZED, only SELECT supported in CTEs | +| WINDOW functions | Partial | only default frame definition, no window-specific functions (rank() etc) | #### [PRAGMA](https://www.sqlite.org/pragma.html) From 523b155df1314252cb5004dfb2be91ee240580dc Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 13 Oct 2025 10:34:33 +0300 Subject: [PATCH 182/428] Fix another "should have been rewritten" translation panic Closes #2158 --- core/translate/select.rs | 17 ++++++++++++++++- testing/values.test | 13 +++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/core/translate/select.rs b/core/translate/select.rs index 915c64242..ed216133a 100644 --- a/core/translate/select.rs +++ b/core/translate/select.rs @@ -505,7 +505,7 @@ fn prepare_one_select_plan( // Return the unoptimized query plan Ok(plan) } - ast::OneSelect::Values(values) => { + ast::OneSelect::Values(mut values) => { if !order_by.is_empty() { crate::bail_parse_error!("ORDER BY clause is not allowed with VALUES clause"); } @@ -522,6 +522,21 @@ fn prepare_one_select_plan( contains_aggregates: false, }); } + + for value_row in values.iter_mut() { + for value in value_row.iter_mut() { + bind_and_rewrite_expr( + value, + None, + None, + connection, + &mut program.param_ctx, + // Allow sqlite quirk of inserting "double-quoted" literals (which our AST maps as identifiers) + BindingBehavior::AllowUnboundIdentifiers, + )?; + } + } + let plan = SelectPlan { join_order: vec![], table_references: TableReferences::new(vec![], vec![]), diff --git a/testing/values.test b/testing/values.test index 8ebc33978..b0cc2464a 100755 --- a/testing/values.test +++ b/testing/values.test @@ -48,3 +48,16 @@ do_execsql_test_skip_lines_on_specific_db 1 {:memory:} values-double-quotes-subq .dbconfig dqs_dml on SELECT * FROM (VALUES ("subquery_string")); } {subquery_string} + +# regression test for: https://github.com/tursodatabase/turso/issues/2158 +do_execsql_test_on_specific_db {:memory:} values-between { + CREATE TABLE t0 (c0); + INSERT INTO t0 VALUES ((0 BETWEEN 0 AND 0)), (0); + SELECT * FROM t0; +} {1 +0} + +do_execsql_test_in_memory_any_error values-illegal-column-ref { + CREATE TABLE t0 (c0); + INSERT INTO t0 VALUES (c0); +} \ No newline at end of file From ee479d2e5206bffdbe7e596cf5107a2a3d2aed77 Mon Sep 17 00:00:00 2001 From: Avinash Sajjanshetty Date: Mon, 13 Oct 2025 13:47:25 +0530 Subject: [PATCH 183/428] Move all checksum tests behind the feature flag --- core/storage/checksum.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/core/storage/checksum.rs b/core/storage/checksum.rs index a67376048..4fb024f1c 100644 --- a/core/storage/checksum.rs +++ b/core/storage/checksum.rs @@ -94,6 +94,7 @@ impl Default for ChecksumContext { } #[cfg(test)] +#[cfg(feature = "checksum")] mod tests { use super::*; @@ -110,7 +111,6 @@ mod tests { } #[test] - #[cfg(feature = "checksum")] fn test_add_checksum_to_page() { let ctx = ChecksumContext::new(); let mut page = get_random_page(); @@ -139,7 +139,6 @@ mod tests { } #[test] - #[cfg(feature = "checksum")] fn test_verify_and_strip_checksum_mismatch() { let ctx = ChecksumContext::new(); let mut page = get_random_page(); @@ -165,7 +164,6 @@ mod tests { } #[test] - #[cfg(feature = "checksum")] fn test_verify_and_strip_checksum_corrupted_checksum() { let ctx = ChecksumContext::new(); let mut page = get_random_page(); From 4a2969447598704b729d70a58f795161b41c2756 Mon Sep 17 00:00:00 2001 From: Avinash Sajjanshetty Date: Mon, 13 Oct 2025 13:48:07 +0530 Subject: [PATCH 184/428] rename checksums tests appropriately --- core/storage/checksum.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/storage/checksum.rs b/core/storage/checksum.rs index 4fb024f1c..43bd0c5be 100644 --- a/core/storage/checksum.rs +++ b/core/storage/checksum.rs @@ -128,7 +128,7 @@ mod tests { } #[test] - fn test_verify_and_strip_checksum_valid() { + fn test_verify_checksum_valid() { let ctx = ChecksumContext::new(); let mut page = get_random_page(); @@ -139,7 +139,7 @@ mod tests { } #[test] - fn test_verify_and_strip_checksum_mismatch() { + fn test_verify_checksum_mismatch() { let ctx = ChecksumContext::new(); let mut page = get_random_page(); @@ -164,7 +164,7 @@ mod tests { } #[test] - fn test_verify_and_strip_checksum_corrupted_checksum() { + fn test_verify_checksum_corrupted_checksum() { let ctx = ChecksumContext::new(); let mut page = get_random_page(); From bd97c117ed22765fc5b733499c34e36f275b305a Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 12:19:09 +0300 Subject: [PATCH 185/428] whopper: Remove debug printouts --- whopper/main.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/whopper/main.rs b/whopper/main.rs index 16da0fbf8..0192c9020 100644 --- a/whopper/main.rs +++ b/whopper/main.rs @@ -379,11 +379,6 @@ fn create_initial_schema(rng: &mut ChaCha8Rng) -> Vec { schema.push(Create { table }); } - - for create in &schema { - println!("{create}"); - } - schema } From 59a1c2ae2e6ef5e4be3c596fe78504affc810ee4 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 13 Oct 2025 13:12:33 +0300 Subject: [PATCH 186/428] Disallow joining more than 63 tables Returns an error instead of panicing --- core/translate/optimizer/mod.rs | 6 +++++ core/translate/plan.rs | 5 ++++ .../query_processing/test_read_path.rs | 23 +++++++++++++++++++ 3 files changed, 34 insertions(+) diff --git a/core/translate/optimizer/mod.rs b/core/translate/optimizer/mod.rs index f053e7e7e..81dde810c 100644 --- a/core/translate/optimizer/mod.rs +++ b/core/translate/optimizer/mod.rs @@ -187,6 +187,12 @@ fn optimize_table_access( order_by: &mut Vec<(Box, SortOrder)>, group_by: &mut Option, ) -> Result>> { + if table_references.joined_tables().len() > TableReferences::MAX_JOINED_TABLES { + crate::bail_parse_error!( + "Only up to {} tables can be joined", + TableReferences::MAX_JOINED_TABLES + ); + } let access_methods_arena = RefCell::new(Vec::new()); let maybe_order_target = compute_order_target(order_by, group_by.as_mut()); let constraints_per_table = diff --git a/core/translate/plan.rs b/core/translate/plan.rs index 21fa84b69..2aef7d507 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -583,6 +583,11 @@ pub struct TableReferences { } impl TableReferences { + /// The maximum number of tables that can be joined together in a query. + /// This limit is arbitrary, although we currently use a u128 to represent the [crate::translate::planner::TableMask], + /// which can represent up to 128 tables. + /// Even at 63 tables we currently cannot handle the optimization performantly, hence the arbitrary cap. + pub const MAX_JOINED_TABLES: usize = 63; pub fn new( joined_tables: Vec, outer_query_refs: Vec, diff --git a/tests/integration/query_processing/test_read_path.rs b/tests/integration/query_processing/test_read_path.rs index 98874283b..5044be19d 100644 --- a/tests/integration/query_processing/test_read_path.rs +++ b/tests/integration/query_processing/test_read_path.rs @@ -897,3 +897,26 @@ fn test_multiple_connections_visibility() -> anyhow::Result<()> { assert_eq!(rows, vec![vec![rusqlite::types::Value::Integer(2)]]); Ok(()) } + +#[test] +/// Test that we can only join up to 63 tables, and trying to join more should fail with an error instead of panicing. +fn test_max_joined_tables_limit() { + let tmp_db = TempDatabase::new("test_max_joined_tables_limit", false); + let conn = tmp_db.connect_limbo(); + + // Create 64 tables + for i in 0..64 { + conn.execute(&format!("CREATE TABLE t{} (id INTEGER)", i)).unwrap(); + } + + // Try to join 64 tables - should fail + let mut sql = String::from("SELECT * FROM t0"); + for i in 1..64 { + sql.push_str(&format!(" JOIN t{} ON t{}.id = t0.id", i, i)); + } + + let Err(LimboError::ParseError(result)) = conn.prepare(&sql) else { + panic!("Expected an error but got no error"); + }; + assert!(result.contains("Only up to 63 tables can be joined")); +} From e055ed9a8dc097873e092fd06dcb3f23e0a3002e Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 13 Oct 2025 13:30:26 +0300 Subject: [PATCH 187/428] Allow arbitrarily many columns in a table Use roaring bitmaps because ColumnUsedMask is likely to be sparsely populated. --- Cargo.lock | 1 + core/Cargo.toml | 1 + core/translate/plan.rs | 20 ++---- .../query_processing/test_read_path.rs | 72 ++++++++++++++++++- 4 files changed, 79 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4df9a828e..da473a8fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4450,6 +4450,7 @@ dependencies = [ "rand_chacha 0.9.0", "regex", "regex-syntax", + "roaring", "rstest", "rusqlite", "rustix 1.0.7", diff --git a/core/Cargo.toml b/core/Cargo.toml index 7fa6afb78..a795f9d8c 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -83,6 +83,7 @@ turso_parser = { workspace = true } aegis = "0.9.0" twox-hash = "2.1.1" intrusive-collections = "0.9.7" +roaring = "0.11.2" [build-dependencies] chrono = { workspace = true, default-features = false } diff --git a/core/translate/plan.rs b/core/translate/plan.rs index 2aef7d507..3d61f21c8 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -757,33 +757,25 @@ impl TableReferences { } } -#[derive(Clone, Debug, Default, PartialEq, Eq)] +#[derive(Clone, Debug, Default, PartialEq)] #[repr(transparent)] -pub struct ColumnUsedMask(u128); +pub struct ColumnUsedMask(roaring::RoaringBitmap); impl ColumnUsedMask { pub fn set(&mut self, index: usize) { - assert!( - index < 128, - "ColumnUsedMask only supports up to 128 columns" - ); - self.0 |= 1 << index; + self.0.insert(index as u32); } pub fn get(&self, index: usize) -> bool { - assert!( - index < 128, - "ColumnUsedMask only supports up to 128 columns" - ); - self.0 & (1 << index) != 0 + self.0.contains(index as u32) } pub fn contains_all_set_bits_of(&self, other: &Self) -> bool { - self.0 & other.0 == other.0 + other.0.is_subset(&self.0) } pub fn is_empty(&self) -> bool { - self.0 == 0 + self.0.is_empty() } } diff --git a/tests/integration/query_processing/test_read_path.rs b/tests/integration/query_processing/test_read_path.rs index 5044be19d..3e3156cf9 100644 --- a/tests/integration/query_processing/test_read_path.rs +++ b/tests/integration/query_processing/test_read_path.rs @@ -1,5 +1,5 @@ use crate::common::{limbo_exec_rows, TempDatabase}; -use turso_core::{StepResult, Value}; +use turso_core::{LimboError, StepResult, Value}; #[test] fn test_statement_reset_bind() -> anyhow::Result<()> { @@ -920,3 +920,73 @@ fn test_max_joined_tables_limit() { }; assert!(result.contains("Only up to 63 tables can be joined")); } + +#[test] +/// Test that we can create and select from a table with 1000 columns. +fn test_many_columns() { + let mut create_sql = String::from("CREATE TABLE test ("); + for i in 0..1000 { + if i > 0 { + create_sql.push_str(", "); + } + create_sql.push_str(&format!("col{} INTEGER", i)); + } + create_sql.push(')'); + + let tmp_db = TempDatabase::new("test_many_columns", false); + let conn = tmp_db.connect_limbo(); + conn.execute(&create_sql).unwrap(); + + // Insert a row with values 0-999 + let mut insert_sql = String::from("INSERT INTO test VALUES ("); + for i in 0..1000 { + if i > 0 { + insert_sql.push_str(", "); + } + insert_sql.push_str(&i.to_string()); + } + insert_sql.push(')'); + conn.execute(&insert_sql).unwrap(); + + // Select every 100th column + let mut select_sql = String::from("SELECT "); + let mut first = true; + for i in (0..1000).step_by(100) { + if !first { + select_sql.push_str(", "); + } + select_sql.push_str(&format!("col{}", i)); + first = false; + } + select_sql.push_str(" FROM test"); + + let mut rows = Vec::new(); + let mut stmt = conn.prepare(&select_sql).unwrap(); + loop { + match stmt.step().unwrap() { + StepResult::Row => { + let row = stmt.row().unwrap(); + rows.push(row.get_values().cloned().collect::>()); + } + StepResult::IO => stmt.run_once().unwrap(), + _ => break, + } + } + + // Verify we got values 0,100,200,...,900 + assert_eq!( + rows, + vec![vec![ + turso_core::Value::Integer(0), + turso_core::Value::Integer(100), + turso_core::Value::Integer(200), + turso_core::Value::Integer(300), + turso_core::Value::Integer(400), + turso_core::Value::Integer(500), + turso_core::Value::Integer(600), + turso_core::Value::Integer(700), + turso_core::Value::Integer(800), + turso_core::Value::Integer(900), + ]] + ); +} From 3669437482cfc1a5e7cec5689fd8c30f27e3d670 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 13 Oct 2025 14:03:34 +0300 Subject: [PATCH 188/428] Add vibecoded tests for ColumnUsedMask --- core/translate/plan.rs | 124 ++++++++++++++++++ .../query_processing/test_read_path.rs | 3 +- 2 files changed, 126 insertions(+), 1 deletion(-) diff --git a/core/translate/plan.rs b/core/translate/plan.rs index 3d61f21c8..98cd8e2e2 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -1258,3 +1258,127 @@ pub struct WindowFunction { /// The expression from which the function was resolved. pub original_expr: Expr, } + +#[cfg(test)] +mod tests { + use super::*; + use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaCha8Rng, + }; + + #[test] + fn test_column_used_mask_empty() { + let mask = ColumnUsedMask::default(); + assert!(mask.is_empty()); + + let mut mask2 = ColumnUsedMask::default(); + mask2.set(0); + assert!(!mask2.is_empty()); + } + + #[test] + fn test_column_used_mask_set_and_get() { + let mut mask = ColumnUsedMask::default(); + + let max_columns = 10000; + let mut set_indices = Vec::new(); + let mut rng = ChaCha8Rng::seed_from_u64( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + ); + + for i in 0..max_columns { + if rng.next_u32() % 3 == 0 { + set_indices.push(i); + mask.set(i); + } + } + + // Verify set bits are present + for &i in &set_indices { + assert!(mask.get(i), "Expected bit {i} to be set"); + } + + // Verify unset bits are not present + for i in 0..max_columns { + if !set_indices.contains(&i) { + assert!(!mask.get(i), "Expected bit {i} to not be set"); + } + } + } + + #[test] + fn test_column_used_mask_subset_relationship() { + let mut full_mask = ColumnUsedMask::default(); + let mut subset_mask = ColumnUsedMask::default(); + + let max_columns = 5000; + let mut rng = ChaCha8Rng::seed_from_u64( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + ); + + // Create a pattern where subset has fewer bits + for i in 0..max_columns { + if rng.next_u32() % 5 == 0 { + full_mask.set(i); + if i % 2 == 0 { + subset_mask.set(i); + } + } + } + + // full_mask contains all bits of subset_mask + assert!(full_mask.contains_all_set_bits_of(&subset_mask)); + + // subset_mask does not contain all bits of full_mask + assert!(!subset_mask.contains_all_set_bits_of(&full_mask)); + + // A mask contains itself + assert!(full_mask.contains_all_set_bits_of(&full_mask)); + assert!(subset_mask.contains_all_set_bits_of(&subset_mask)); + } + + #[test] + fn test_column_used_mask_empty_subset() { + let mut mask = ColumnUsedMask::default(); + for i in (0..1000).step_by(7) { + mask.set(i); + } + + let empty_mask = ColumnUsedMask::default(); + + // Empty mask is subset of everything + assert!(mask.contains_all_set_bits_of(&empty_mask)); + assert!(empty_mask.contains_all_set_bits_of(&empty_mask)); + } + + #[test] + fn test_column_used_mask_sparse_indices() { + let mut sparse_mask = ColumnUsedMask::default(); + + // Test with very sparse, large indices + let sparse_indices = vec![0, 137, 1042, 5389, 10000, 50000, 100000, 500000, 1000000]; + + for &idx in &sparse_indices { + sparse_mask.set(idx); + } + + for &idx in &sparse_indices { + assert!(sparse_mask.get(idx), "Expected bit {idx} to be set"); + } + + // Check some indices that shouldn't be set + let unset_indices = vec![1, 100, 1000, 5000, 25000, 75000, 250000, 750000]; + for &idx in &unset_indices { + assert!(!sparse_mask.get(idx), "Expected bit {idx} to not be set"); + } + + assert!(!sparse_mask.is_empty()); + } +} diff --git a/tests/integration/query_processing/test_read_path.rs b/tests/integration/query_processing/test_read_path.rs index 3e3156cf9..a0e930713 100644 --- a/tests/integration/query_processing/test_read_path.rs +++ b/tests/integration/query_processing/test_read_path.rs @@ -906,7 +906,8 @@ fn test_max_joined_tables_limit() { // Create 64 tables for i in 0..64 { - conn.execute(&format!("CREATE TABLE t{} (id INTEGER)", i)).unwrap(); + conn.execute(&format!("CREATE TABLE t{} (id INTEGER)", i)) + .unwrap(); } // Try to join 64 tables - should fail From 2baea154b0df1a3f286a84fc45132a6aab5c16db Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 13 Oct 2025 14:07:17 +0300 Subject: [PATCH 189/428] clippy --- tests/integration/query_processing/test_read_path.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/query_processing/test_read_path.rs b/tests/integration/query_processing/test_read_path.rs index a0e930713..4b08df3b1 100644 --- a/tests/integration/query_processing/test_read_path.rs +++ b/tests/integration/query_processing/test_read_path.rs @@ -906,14 +906,14 @@ fn test_max_joined_tables_limit() { // Create 64 tables for i in 0..64 { - conn.execute(&format!("CREATE TABLE t{} (id INTEGER)", i)) + conn.execute(format!("CREATE TABLE t{i} (id INTEGER)")) .unwrap(); } // Try to join 64 tables - should fail let mut sql = String::from("SELECT * FROM t0"); for i in 1..64 { - sql.push_str(&format!(" JOIN t{} ON t{}.id = t0.id", i, i)); + sql.push_str(&format!(" JOIN t{i} ON t{i}.id = t0.id")); } let Err(LimboError::ParseError(result)) = conn.prepare(&sql) else { @@ -930,7 +930,7 @@ fn test_many_columns() { if i > 0 { create_sql.push_str(", "); } - create_sql.push_str(&format!("col{} INTEGER", i)); + create_sql.push_str(&format!("col{i} INTEGER")); } create_sql.push(')'); @@ -956,7 +956,7 @@ fn test_many_columns() { if !first { select_sql.push_str(", "); } - select_sql.push_str(&format!("col{}", i)); + select_sql.push_str(&format!("col{i}")); first = false; } select_sql.push_str(" FROM test"); From 57a06835bf8811ab22d528a7bd99dd564aac3ba1 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 12 Oct 2025 05:26:40 +0530 Subject: [PATCH 190/428] add test and fmt and clippy i was stupid remove comment --- core/translate/update.rs | 12 +++++------- testing/update.test | 10 ++++++++++ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/core/translate/update.rs b/core/translate/update.rs index 6a32f8ad8..a7dc21299 100644 --- a/core/translate/update.rs +++ b/core/translate/update.rs @@ -185,7 +185,10 @@ pub fn prepare_update_plan( Table::BTree(btree_table) => Table::BTree(btree_table.clone()), _ => unreachable!(), }, - identifier: table_name.to_string(), + identifier: body.tbl_name.alias.as_ref().map_or_else( + || table_name.to_string(), + |alias| alias.as_str().to_string(), + ), internal_id: program.table_reference_counter.next(), op: build_scan_op(&table, iter_dir), join_info: None, @@ -314,12 +317,7 @@ pub fn prepare_update_plan( Table::BTree(btree_table) => Table::BTree(btree_table.clone()), _ => unreachable!(), }, - // get em aliases - identifier: body.tbl_name.alias.as_ref().map_or_else( - || table_name.to_string(), - |alias| alias.as_str().to_string(), - ), - + identifier: table_name.to_string(), internal_id, op: build_scan_op(&table, iter_dir), join_info: None, diff --git a/testing/update.test b/testing/update.test index d55d98d0e..258cecde3 100755 --- a/testing/update.test +++ b/testing/update.test @@ -386,3 +386,13 @@ do_execsql_test_on_specific_db {:memory:} can-update-rowid-directly { UPDATE test SET rowid = 5; SELECT rowid, name from test; } {5|test} + +# https://github.com/tursodatabase/turso/issues/3678 +do_execsql_test_on_specific_db {:memory:} update-alias-visibility-in-where-clause { + create table t(a); + insert into t values (0); + insert into t values (5); + update t as tt set a = 1 where tt.a = 0; + select * from t; +} {1 +5} \ No newline at end of file From 6b3fcfd3d414a7f35ec484c2107d27916083c342 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Mon, 13 Oct 2025 19:11:55 +0530 Subject: [PATCH 191/428] explicit column aliase must have preference --- core/translate/expr.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index cb02e2e35..fc57a11e0 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -3446,12 +3446,11 @@ pub fn bind_and_rewrite_expr<'a>( if binding_behavior == BindingBehavior::TryResultColumnsFirst { if let Some(result_columns) = result_columns { for result_column in result_columns.iter() { - if result_column - .name(referenced_tables) - .is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id)) - { - *expr = result_column.expr.clone(); - return Ok(WalkControl::Continue); + if let Some(alias) = &result_column.alias { + if alias.eq_ignore_ascii_case(&normalized_id) { + *expr = result_column.expr.clone(); + return Ok(WalkControl::Continue); + } } } } From 41b7693c1361630360f0f75e572eeb195d714cf8 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Mon, 13 Oct 2025 19:12:35 +0530 Subject: [PATCH 192/428] add test --- testing/orderby.test | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/testing/orderby.test b/testing/orderby.test index e173d946b..13ca8c762 100755 --- a/testing/orderby.test +++ b/testing/orderby.test @@ -239,4 +239,13 @@ do_execsql_test_on_specific_db {:memory:} orderby_alias_precedence { INSERT INTO t VALUES (1,200),(2,100); SELECT x AS y, y AS x FROM t ORDER BY x; } {2|100 -1|200} \ No newline at end of file +1|200} + +# https://github.com/tursodatabase/turso/issues/3684 +do_execsql_test_on_specific_db {:memory:} orderby_alias_shadows_column { + CREATE TABLE t(a, b); + INSERT INTO t VALUES (1, 1), (2, 2), (3, 3); + SELECT a, -b AS a FROM t ORDER BY a; +} {3|-3 +2|-2 +1|-1} From 9d6066381e2a2201b6d047cefde5ac6dc3ffc200 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 13 Oct 2025 14:48:37 +0300 Subject: [PATCH 193/428] sql_generation: Fix implementation of LTValue and GTValue Current implementation can generate strings with control characters like null terminators that result in parse errors. --- sql_generation/generation/value/cmp.rs | 115 +++++++++++++++++++------ 1 file changed, 90 insertions(+), 25 deletions(-) diff --git a/sql_generation/generation/value/cmp.rs b/sql_generation/generation/value/cmp.rs index 31c710acd..400cfc14c 100644 --- a/sql_generation/generation/value/cmp.rs +++ b/sql_generation/generation/value/cmp.rs @@ -23,18 +23,7 @@ impl ArbitraryFrom<(&SimValue, ColumnType)> for LTValue { t.pop(); Value::build_text(t) } else { - let mut t = t.chars().map(|c| c as u32).collect::>(); - let index = rng.random_range(0..t.len()); - t[index] -= 1; - // Mutate the rest of the string - for val in t.iter_mut().skip(index + 1) { - *val = rng.random_range('a' as u32..='z' as u32); - } - let t = t - .into_iter() - .map(|c| char::from_u32(c).unwrap_or('z')) - .collect::(); - Value::build_text(t) + Value::build_text(mutate_string(&t, rng, MutationType::Decrement)) } } Value::Blob(b) => { @@ -75,21 +64,14 @@ impl ArbitraryFrom<(&SimValue, ColumnType)> for GTValue { // Either lengthen the string, or make at least one character smaller and mutate the rest let mut t = value.to_string(); if rng.random_bool(0.01) { - t.push(rng.random_range(0..=255) as u8 as char); + if rng.random_bool(0.5) { + t.push(rng.random_range(UPPERCASE_A..=UPPERCASE_Z) as u8 as char); + } else { + t.push(rng.random_range(LOWERCASE_A..=LOWERCASE_Z) as u8 as char); + } Value::build_text(t) } else { - let mut t = t.chars().map(|c| c as u32).collect::>(); - let index = rng.random_range(0..t.len()); - t[index] += 1; - // Mutate the rest of the string - for val in t.iter_mut().skip(index + 1) { - *val = rng.random_range('a' as u32..='z' as u32); - } - let t = t - .into_iter() - .map(|c| char::from_u32(c).unwrap_or('a')) - .collect::(); - Value::build_text(t) + Value::build_text(mutate_string(&t, rng, MutationType::Increment)) } } Value::Blob(b) => { @@ -116,3 +98,86 @@ impl ArbitraryFrom<(&SimValue, ColumnType)> for GTValue { Self(SimValue(new_value)) } } + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +enum MutationType { + Decrement, + Increment, +} + +const UPPERCASE_A: u32 = 'A' as u32; +const UPPERCASE_Z: u32 = 'Z' as u32; +const LOWERCASE_A: u32 = 'a' as u32; +const LOWERCASE_Z: u32 = 'z' as u32; + +fn mutate_string( + t: &str, + rng: &mut R, + mutation_type: MutationType, +) -> String { + let mut chars = t.chars().map(|c| c as u32).collect::>(); + let mut index; + let mut max_loops = 100; + loop { + index = rng.random_range(0..chars.len()); + if chars[index] > UPPERCASE_A && chars[index] < UPPERCASE_Z + || chars[index] > LOWERCASE_A && chars[index] < LOWERCASE_Z + { + break; + } + max_loops -= 1; + if max_loops == 0 { + panic!("Failed to find a printable character to decrement"); + } + } + + if mutation_type == MutationType::Decrement { + chars[index] -= 1; + } else { + chars[index] += 1; + } + + // Mutate the rest of the string with printable ASCII characters + for val in chars.iter_mut().skip(index + 1) { + if rng.random_bool(0.5) { + *val = rng.random_range(UPPERCASE_A..=UPPERCASE_Z); + } else { + *val = rng.random_range(LOWERCASE_A..=LOWERCASE_Z); + } + } + + chars + .into_iter() + .map(|c| char::from_u32(c).unwrap()) + .collect::() +} + +#[cfg(test)] +mod tests { + use anarchist_readable_name_generator_lib::readable_name; + + use super::*; + + #[test] + fn test_mutate_string_fuzz() { + let mut rng = rand::rng(); + for _ in 0..1000 { + let mut t = readable_name(); + while !t.is_ascii() { + t = readable_name(); + } + let t2 = mutate_string(&t, &mut rng, MutationType::Decrement); + assert!(t2.is_ascii(), "{}", t); + assert!(t2 < t); + } + for _ in 0..1000 { + let mut t = readable_name(); + while !t.is_ascii() { + t = readable_name(); + } + let t2 = mutate_string(&t, &mut rng, MutationType::Increment); + assert!(t2.is_ascii(), "{}", t); + assert!(t2 > t); + } + } +} From 08efce510eccb363651365289d49bc85a54f2cea Mon Sep 17 00:00:00 2001 From: Yevhen Kostryka <55651586+poodbooq@users.noreply.github.com> Date: Mon, 13 Oct 2025 18:45:31 +0300 Subject: [PATCH 194/428] Fix typo in manual.md In `SELECT` section there is `GROU BY` instead of `GROUP BY` --- docs/manual.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/manual.md b/docs/manual.md index c0df2efe8..8472c8696 100644 --- a/docs/manual.md +++ b/docs/manual.md @@ -396,7 +396,7 @@ ROLLBACK [ TRANSACTION ] SELECT expression [ FROM table-or-subquery ] [ WHERE condition ] - [ GROU BY expression ] + [ GROUP BY expression ] ``` **Example:** From 294f842e62b23cfff61757753e76b43604afef51 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 13 Oct 2025 13:09:11 -0300 Subject: [PATCH 195/428] DROP INDEX sql generation --- sql_generation/generation/query.rs | 21 ++++++++++++++++++++- sql_generation/model/query/drop_index.rs | 1 + whopper/main.rs | 19 ++++++++++++++----- 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/sql_generation/generation/query.rs b/sql_generation/generation/query.rs index 17fe0f843..c4be7f2d8 100644 --- a/sql_generation/generation/query.rs +++ b/sql_generation/generation/query.rs @@ -9,7 +9,7 @@ use crate::model::query::select::{ SelectInner, }; use crate::model::query::update::Update; -use crate::model::query::{Create, CreateIndex, Delete, Drop, Insert, Select}; +use crate::model::query::{Create, CreateIndex, Delete, Drop, DropIndex, Insert, Select}; use crate::model::table::{ Column, Index, JoinTable, JoinType, JoinedTable, Name, SimValue, Table, TableContext, }; @@ -535,3 +535,22 @@ impl Arbitrary for AlterTable { } } } + +impl Arbitrary for DropIndex { + fn arbitrary(rng: &mut R, context: &C) -> Self { + let tables_with_indexes = context + .tables() + .iter() + .filter(|table| !table.indexes.is_empty()) + .collect::>(); + + // Cannot DROP INDEX if there is no index to drop + assert!(!tables_with_indexes.is_empty()); + let table = tables_with_indexes.choose(rng).unwrap(); + let index = table.indexes.choose(rng).unwrap(); + Self { + index_name: index.index_name.clone(), + table_name: table.name.clone(), + } + } +} diff --git a/sql_generation/model/query/drop_index.rs b/sql_generation/model/query/drop_index.rs index 18cadb12d..670636efb 100644 --- a/sql_generation/model/query/drop_index.rs +++ b/sql_generation/model/query/drop_index.rs @@ -3,6 +3,7 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct DropIndex { pub index_name: String, + pub table_name: String, } impl std::fmt::Display for DropIndex { diff --git a/whopper/main.rs b/whopper/main.rs index affcdb807..0a18edf2c 100644 --- a/whopper/main.rs +++ b/whopper/main.rs @@ -68,7 +68,7 @@ struct SimulatorFiber { struct SimulatorContext { fibers: Vec, tables: Vec
, - indexes: Vec, + indexes: Vec<(String, String)>, opts: Opts, stats: Stats, disable_indexes: bool, @@ -210,7 +210,10 @@ fn main() -> anyhow::Result<()> { let mut context = SimulatorContext { fibers, tables, - indexes: indexes.iter().map(|idx| idx.index_name.clone()).collect(), + indexes: indexes + .iter() + .map(|idx| (idx.table_name.clone(), idx.index_name.clone())) + .collect(), opts: Opts::default(), stats: Stats::default(), disable_indexes: args.disable_indexes, @@ -567,7 +570,10 @@ fn perform_work( let sql = create_index.to_string(); if let Ok(stmt) = context.fibers[fiber_idx].connection.prepare(&sql) { context.fibers[fiber_idx].statement.replace(Some(stmt)); - context.indexes.push(create_index.index_name.clone()); + context.indexes.push(( + create_index.index.table_name.clone(), + create_index.index_name.clone(), + )); } trace!("{} CREATE INDEX: {}", fiber_idx, sql); } @@ -576,8 +582,11 @@ fn perform_work( // DROP INDEX (2%) if !context.disable_indexes && !context.indexes.is_empty() { let index_idx = rng.random_range(0..context.indexes.len()); - let index_name = context.indexes.remove(index_idx); - let drop_index = DropIndex { index_name }; + let (table_name, index_name) = context.indexes.remove(index_idx); + let drop_index = DropIndex { + table_name, + index_name, + }; let sql = drop_index.to_string(); if let Ok(stmt) = context.fibers[fiber_idx].connection.prepare(&sql) { context.fibers[fiber_idx].statement.replace(Some(stmt)); From b2e54d98163d80ea9ebaab95c1505da7226aa934 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 13 Oct 2025 13:17:42 -0300 Subject: [PATCH 196/428] add Drop Index to simulator model --- simulator/model/mod.rs | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 20726cbbb..8ee2fd14b 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -7,7 +7,7 @@ use itertools::Itertools; use serde::{Deserialize, Serialize}; use sql_generation::model::{ query::{ - Create, CreateIndex, Delete, Drop, Insert, Select, + Create, CreateIndex, Delete, Drop, DropIndex, Insert, Select, alter_table::{AlterTable, AlterTableType}, select::{CompoundOperator, FromClause, ResultColumn, SelectInner}, transaction::{Begin, Commit, Rollback}, @@ -30,6 +30,7 @@ pub enum Query { Drop(Drop), CreateIndex(CreateIndex), AlterTable(AlterTable), + DropIndex(DropIndex), Begin(Begin), Commit(Commit), Rollback(Rollback), @@ -76,6 +77,9 @@ impl Query { }) | Query::AlterTable(AlterTable { table_name: table, .. + }) + | Query::DropIndex(DropIndex { + table_name: table, .. }) => IndexSet::from_iter([table.clone()]), Query::Begin(_) | Query::Commit(_) | Query::Rollback(_) => IndexSet::new(), Query::Placeholder => IndexSet::new(), @@ -97,6 +101,9 @@ impl Query { }) | Query::AlterTable(AlterTable { table_name: table, .. + }) + | Query::DropIndex(DropIndex { + table_name: table, .. }) => vec![table.clone()], Query::Begin(..) | Query::Commit(..) | Query::Rollback(..) => vec![], Query::Placeholder => vec![], @@ -115,7 +122,11 @@ impl Query { pub fn is_ddl(&self) -> bool { matches!( self, - Self::Create(..) | Self::CreateIndex(..) | Self::Drop(..) | Self::AlterTable(..) + Self::Create(..) + | Self::CreateIndex(..) + | Self::Drop(..) + | Self::AlterTable(..) + | Self::DropIndex(..) ) } } @@ -131,6 +142,7 @@ impl Display for Query { Self::Drop(drop) => write!(f, "{drop}"), Self::CreateIndex(create_index) => write!(f, "{create_index}"), Self::AlterTable(alter_table) => write!(f, "{alter_table}"), + Self::DropIndex(drop_index) => write!(f, "{drop_index}"), Self::Begin(begin) => write!(f, "{begin}"), Self::Commit(commit) => write!(f, "{commit}"), Self::Rollback(rollback) => write!(f, "{rollback}"), @@ -152,6 +164,7 @@ impl Shadow for Query { Query::Drop(drop) => drop.shadow(env), Query::CreateIndex(create_index) => Ok(create_index.shadow(env)), Query::AlterTable(alter_table) => alter_table.shadow(env), + Query::DropIndex(drop_index) => drop_index.shadow(env), Query::Begin(begin) => Ok(begin.shadow(env)), Query::Commit(commit) => Ok(commit.shadow(env)), Query::Rollback(rollback) => Ok(rollback.shadow(env)), @@ -170,6 +183,7 @@ bitflags! { const DROP = 1 << 5; const CREATE_INDEX = 1 << 6; const ALTER_TABLE = 1 << 7; + const DROP_INDEX = 1 << 8; } } @@ -199,6 +213,7 @@ impl From for QueryCapabilities { QueryDiscriminants::Drop => Self::DROP, QueryDiscriminants::CreateIndex => Self::CREATE_INDEX, QueryDiscriminants::AlterTable => Self::ALTER_TABLE, + QueryDiscriminants::DropIndex => Self::DROP_INDEX, QueryDiscriminants::Begin | QueryDiscriminants::Commit | QueryDiscriminants::Rollback => { @@ -221,6 +236,7 @@ impl QueryDiscriminants { QueryDiscriminants::Drop, QueryDiscriminants::CreateIndex, QueryDiscriminants::AlterTable, + QueryDiscriminants::DropIndex, ]; } From f7ba97870140cf28e5786c5e3e43da1c48200ce0 Mon Sep 17 00:00:00 2001 From: Kyle Kelley Date: Mon, 13 Oct 2025 09:33:58 -0700 Subject: [PATCH 197/428] Recommend 0.2 in rust bindings Bump version number for crate docs starter setup --- bindings/rust/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/rust/README.md b/bindings/rust/README.md index 88d358c20..3d25c0556 100644 --- a/bindings/rust/README.md +++ b/bindings/rust/README.md @@ -18,7 +18,7 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -turso = "0.1" +turso = "0.2" tokio = { version = "1.0", features = ["full"] } ``` From bfeccf6543046e906770843ed753689c9491dd54 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 13 Oct 2025 13:32:45 -0300 Subject: [PATCH 198/428] integrate DropIndex in query generator --- simulator/generation/plan.rs | 13 +++++++++---- simulator/generation/property.rs | 18 ++++++++++++++++++ simulator/generation/query.rs | 18 +++++++++++++++++- simulator/model/mod.rs | 16 ++++++++++++++++ simulator/profiles/query.rs | 3 +++ 5 files changed, 63 insertions(+), 5 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index b5ff4e475..6c7e66384 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -235,6 +235,7 @@ impl InteractionPlan { Query::Commit(_) => stats.commit_count += 1, Query::Rollback(_) => stats.rollback_count += 1, Query::AlterTable(_) => stats.alter_table_count += 1, + Query::DropIndex(_) => stats.drop_index_count += 1, Query::Placeholder => {} } } @@ -472,11 +473,14 @@ impl<'a, R: rand::Rng> PlanGenerator<'a, R> { if let InteractionType::Query(Query::Placeholder) = &interaction.interaction { let stats = self.plan.stats(); + let conn_ctx = env.connection_context(interaction.connection_index); + let remaining_ = remaining( env.opts.max_interactions, &env.profile.query, &stats, env.profile.experimental_mvcc, + &conn_ctx, ); let InteractionsType::Property(property) = @@ -485,8 +489,6 @@ impl<'a, R: rand::Rng> PlanGenerator<'a, R> { unreachable!("only properties have extensional queries"); }; - let conn_ctx = env.connection_context(interaction.connection_index); - let queries = possible_queries(conn_ctx.tables()); let query_distr = QueryDistribution::new(queries, &remaining_); @@ -768,13 +770,14 @@ pub(crate) struct InteractionStats { pub commit_count: u32, pub rollback_count: u32, pub alter_table_count: u32, + pub drop_index_count: u32, } impl Display for InteractionStats { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, - "Read: {}, Insert: {}, Delete: {}, Update: {}, Create: {}, CreateIndex: {}, Drop: {}, Begin: {}, Commit: {}, Rollback: {}, Alter Table: {}", + "Read: {}, Insert: {}, Delete: {}, Update: {}, Create: {}, CreateIndex: {}, Drop: {}, Begin: {}, Commit: {}, Rollback: {}, Alter Table: {}, Drop Index: {}", self.select_count, self.insert_count, self.delete_count, @@ -786,6 +789,7 @@ impl Display for InteractionStats { self.commit_count, self.rollback_count, self.alter_table_count, + self.drop_index_count, ) } } @@ -1272,12 +1276,13 @@ impl ArbitraryFrom<(&SimulatorEnv, InteractionStats, usize)> for Interactions { &env.profile.query, &stats, env.profile.experimental_mvcc, + conn_ctx, ); let queries = possible_queries(conn_ctx.tables()); let query_distr = QueryDistribution::new(queries, &remaining_); - #[allow(clippy::type_complexity)] + #[expect(clippy::type_complexity)] let mut choices: Vec<(u32, Box Interactions>)> = vec![ ( query_distr.weights().total_weight(), diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index cadd42155..f2530dfeb 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1380,6 +1380,7 @@ pub(super) struct Remaining { pub update: u32, pub drop: u32, pub alter_table: u32, + pub drop_index: u32, } pub(super) fn remaining( @@ -1387,6 +1388,7 @@ pub(super) fn remaining( opts: &QueryProfile, stats: &InteractionStats, mvcc: bool, + context: &impl GenerationContext, ) -> Remaining { let total_weight = opts.total_weight(); @@ -1398,6 +1400,7 @@ pub(super) fn remaining( let total_update = (max_interactions * opts.update_weight) / total_weight; let total_drop = (max_interactions * opts.drop_table_weight) / total_weight; let total_alter_table = (max_interactions * opts.alter_table_weight) / total_weight; + let total_drop_index = (max_interactions * opts.drop_index) / total_weight; let remaining_select = total_select .checked_sub(stats.select_count) @@ -1423,9 +1426,23 @@ pub(super) fn remaining( .checked_sub(stats.alter_table_count) .unwrap_or_default(); + let mut remaining_drop_index = total_drop_index + .checked_sub(stats.alter_table_count) + .unwrap_or_default(); + if mvcc { // TODO: index not supported yet for mvcc remaining_create_index = 0; + remaining_drop_index = 0; + } + + // if there are no indexes do not allow creation of drop_index + if !context + .tables() + .iter() + .any(|table| !table.indexes.is_empty()) + { + remaining_drop_index = 0; } Remaining { @@ -1437,6 +1454,7 @@ pub(super) fn remaining( drop: remaining_drop, update: remaining_update, alter_table: remaining_alter_table, + drop_index: remaining_drop_index, } } diff --git a/simulator/generation/query.rs b/simulator/generation/query.rs index 45140dfe6..7445bd744 100644 --- a/simulator/generation/query.rs +++ b/simulator/generation/query.rs @@ -10,7 +10,8 @@ use sql_generation::{ generation::{Arbitrary, ArbitraryFrom, GenerationContext, query::SelectFree}, model::{ query::{ - Create, CreateIndex, Delete, Insert, Select, alter_table::AlterTable, update::Update, + Create, CreateIndex, Delete, DropIndex, Insert, Select, alter_table::AlterTable, + update::Update, }, table::Table, }, @@ -89,6 +90,19 @@ fn random_alter_table( Query::AlterTable(AlterTable::arbitrary(rng, conn_ctx)) } +fn random_drop_index( + rng: &mut R, + conn_ctx: &impl GenerationContext, +) -> Query { + assert!( + conn_ctx + .tables() + .iter() + .any(|table| !table.indexes.is_empty()) + ); + Query::DropIndex(DropIndex::arbitrary(rng, conn_ctx)) +} + /// Possible queries that can be generated given the table state /// /// Does not take into account transactional statements @@ -117,6 +131,7 @@ impl QueryDiscriminants { QueryDiscriminants::Drop => random_drop, QueryDiscriminants::CreateIndex => random_create_index, QueryDiscriminants::AlterTable => random_alter_table, + QueryDiscriminants::DropIndex => random_drop_index, QueryDiscriminants::Begin | QueryDiscriminants::Commit | QueryDiscriminants::Rollback => { @@ -140,6 +155,7 @@ impl QueryDiscriminants { QueryDiscriminants::Drop => remaining.drop, QueryDiscriminants::CreateIndex => remaining.create_index, QueryDiscriminants::AlterTable => remaining.alter_table, + QueryDiscriminants::DropIndex => remaining.drop_index, QueryDiscriminants::Begin | QueryDiscriminants::Commit | QueryDiscriminants::Rollback => { diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 8ee2fd14b..dac1ec5cb 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -599,3 +599,19 @@ impl Shadow for AlterTable { Ok(vec![]) } } + +impl Shadow for DropIndex { + type Result = anyhow::Result>>; + + fn shadow(&self, tables: &mut ShadowTablesMut<'_>) -> Self::Result { + let table = tables + .iter_mut() + .find(|t| t.name == self.table_name) + .ok_or_else(|| anyhow::anyhow!("Table {} does not exist", self.table_name))?; + + table + .indexes + .retain(|index| index.index_name != self.index_name); + Ok(vec![]) + } +} diff --git a/simulator/profiles/query.rs b/simulator/profiles/query.rs index ee9583596..95bcf146a 100644 --- a/simulator/profiles/query.rs +++ b/simulator/profiles/query.rs @@ -24,6 +24,8 @@ pub struct QueryProfile { pub drop_table_weight: u32, #[garde(skip)] pub alter_table_weight: u32, + #[garde(skip)] + pub drop_index: u32, } impl Default for QueryProfile { @@ -38,6 +40,7 @@ impl Default for QueryProfile { delete_weight: 20, drop_table_weight: 2, alter_table_weight: 2, + drop_index: 2, } } } From 45567e6837613c50b73175e20b7e2c815fcdcba6 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 13 Oct 2025 13:59:25 -0300 Subject: [PATCH 199/428] fix alter table shadowing to modify index column name on rename and alter --- simulator/model/mod.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/simulator/model/mod.rs b/simulator/model/mod.rs index 20726cbbb..40fbde612 100644 --- a/simulator/model/mod.rs +++ b/simulator/model/mod.rs @@ -563,10 +563,24 @@ impl Shadow for AlterTable { AlterTableType::AlterColumn { old, new } => { let col = table.columns.iter_mut().find(|c| c.name == *old).unwrap(); *col = new.clone(); + table.indexes.iter_mut().for_each(|index| { + index.columns.iter_mut().for_each(|(col_name, _)| { + if col_name == old { + *col_name = new.name.clone(); + } + }); + }); } AlterTableType::RenameColumn { old, new } => { let col = table.columns.iter_mut().find(|c| c.name == *old).unwrap(); col.name = new.clone(); + table.indexes.iter_mut().for_each(|index| { + index.columns.iter_mut().for_each(|(col_name, _)| { + if col_name == old { + *col_name = new.clone(); + } + }); + }); } AlterTableType::DropColumn { column_name } => { let col_idx = table From bc05497d99b6f1dbb6b656383db914a884835a8d Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Mon, 13 Oct 2025 19:26:18 +0200 Subject: [PATCH 200/428] core/mvcc: implement CursorTrait on MVCC cursor --- core/mvcc/cursor.rs | 326 +++++++++++++++++++++++++----------- core/mvcc/database/tests.rs | 72 ++++++-- core/storage/btree.rs | 78 ++++----- 3 files changed, 315 insertions(+), 161 deletions(-) diff --git a/core/mvcc/cursor.rs b/core/mvcc/cursor.rs index 0709b2641..f177181df 100644 --- a/core/mvcc/cursor.rs +++ b/core/mvcc/cursor.rs @@ -1,8 +1,10 @@ use crate::mvcc::clock::LogicalClock; use crate::mvcc::database::{MVTableId, MvStore, Row, RowID}; -use crate::types::{IOResult, SeekKey, SeekOp, SeekResult}; +use crate::storage::btree::{BTreeKey, CursorTrait}; +use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult}; use crate::Result; use crate::{Pager, Value}; +use std::cell::{Ref, RefCell}; use std::fmt::Debug; use std::ops::Bound; use std::sync::Arc; @@ -19,12 +21,14 @@ enum CursorPosition { #[derive(Debug)] pub struct MvccLazyCursor { pub db: Arc>, - current_pos: CursorPosition, + current_pos: RefCell, pub table_id: MVTableId, tx_id: u64, + /// Reusable immutable record, used to allow better allocation strategy. + reusable_immutable_record: RefCell>, } -impl MvccLazyCursor { +impl MvccLazyCursor { pub fn new( db: Arc>, tx_id: u64, @@ -36,55 +40,15 @@ impl MvccLazyCursor { let cursor = Self { db, tx_id, - current_pos: CursorPosition::BeforeFirst, + current_pos: RefCell::new(CursorPosition::BeforeFirst), table_id, + reusable_immutable_record: RefCell::new(None), }; Ok(cursor) } - /// Insert a row into the table. - /// Sets the cursor to the inserted row. - pub fn insert(&mut self, row: Row) -> Result<()> { - self.current_pos = CursorPosition::Loaded(row.id); - if self.db.read(self.tx_id, row.id)?.is_some() { - self.db.update(self.tx_id, row).inspect_err(|_| { - self.current_pos = CursorPosition::BeforeFirst; - })?; - } else { - self.db.insert(self.tx_id, row).inspect_err(|_| { - self.current_pos = CursorPosition::BeforeFirst; - })?; - } - Ok(()) - } - - pub fn delete(&mut self, rowid: RowID) -> Result<()> { - self.db.delete(self.tx_id, rowid)?; - Ok(()) - } - - pub fn current_row_id(&mut self) -> Option { - match self.current_pos { - CursorPosition::Loaded(id) => Some(id), - CursorPosition::BeforeFirst => { - // If we are before first, we need to try and find the first row. - let maybe_rowid = - self.db - .get_next_row_id_for_table(self.table_id, i64::MIN, self.tx_id); - if let Some(id) = maybe_rowid { - self.current_pos = CursorPosition::Loaded(id); - Some(id) - } else { - self.current_pos = CursorPosition::BeforeFirst; - None - } - } - CursorPosition::End => None, - } - } - - pub fn current_row(&mut self) -> Result> { - match self.current_pos { + pub fn current_row(&self) -> Result> { + match *self.current_pos.borrow() { CursorPosition::Loaded(id) => self.db.read(self.tx_id, id), CursorPosition::BeforeFirst => { // If we are before first, we need to try and find the first row. @@ -92,7 +56,7 @@ impl MvccLazyCursor { self.db .get_next_row_id_for_table(self.table_id, i64::MIN, self.tx_id); if let Some(id) = maybe_rowid { - self.current_pos = CursorPosition::Loaded(id); + self.current_pos.replace(CursorPosition::Loaded(id)); self.db.read(self.tx_id, id) } else { Ok(None) @@ -106,19 +70,57 @@ impl MvccLazyCursor { Ok(()) } + pub fn get_next_rowid(&mut self) -> i64 { + let _ = self.last(); + match *self.current_pos.borrow() { + CursorPosition::Loaded(id) => id.row_id + 1, + CursorPosition::BeforeFirst => 1, + CursorPosition::End => i64::MAX, + } + } + + fn get_immutable_record_or_create(&self) -> std::cell::RefMut<'_, Option> { + let mut reusable_immutable_record = self.reusable_immutable_record.borrow_mut(); + if reusable_immutable_record.is_none() { + let record = ImmutableRecord::new(1024); + reusable_immutable_record.replace(record); + } + reusable_immutable_record + } + + fn get_current_pos(&self) -> CursorPosition { + *self.current_pos.borrow() + } +} + +impl CursorTrait for MvccLazyCursor { + fn last(&mut self) -> Result> { + let last_rowid = self.db.get_last_rowid(self.table_id); + if let Some(last_rowid) = last_rowid { + self.current_pos.replace(CursorPosition::Loaded(RowID { + table_id: self.table_id, + row_id: last_rowid, + })); + } else { + self.current_pos.replace(CursorPosition::BeforeFirst); + } + Ok(IOResult::Done(())) + } + /// Move the cursor to the next row. Returns true if the cursor moved to the next row, false if the cursor is at the end of the table. - pub fn forward(&mut self) -> bool { - let before_first = matches!(self.current_pos, CursorPosition::BeforeFirst); - let min_id = match self.current_pos { + fn next(&mut self) -> Result> { + let before_first = matches!(self.get_current_pos(), CursorPosition::BeforeFirst); + let min_id = match *self.current_pos.borrow() { CursorPosition::Loaded(id) => id.row_id + 1, // TODO: do we need to forward twice? CursorPosition::BeforeFirst => i64::MIN, // we need to find first row, so we look from the first id, CursorPosition::End => { // let's keep same state, we reached the end so no point in moving forward. - return false; + return Ok(IOResult::Done(false)); } }; - self.current_pos = + + let new_position = match self .db .get_next_row_id_for_table(self.table_id, min_id, self.tx_id) @@ -134,46 +136,59 @@ impl MvccLazyCursor { } } }; - matches!(self.current_pos, CursorPosition::Loaded(_)) + self.current_pos.replace(new_position); + + Ok(IOResult::Done(matches!( + self.get_current_pos(), + CursorPosition::Loaded(_) + ))) } - /// Returns true if the is not pointing to any row. - pub fn is_empty(&self) -> bool { - // If we reached the end of the table, it means we traversed the whole table therefore there must be something in the table. - // If we have loaded a row, it means there is something in the table. - match self.current_pos { - CursorPosition::Loaded(_) => false, - CursorPosition::BeforeFirst => true, - CursorPosition::End => true, + fn prev(&mut self) -> Result> { + todo!() + } + + fn rowid(&self) -> Result>> { + let rowid = match self.get_current_pos() { + CursorPosition::Loaded(id) => Some(id.row_id), + CursorPosition::BeforeFirst => { + // If we are before first, we need to try and find the first row. + let maybe_rowid = + self.db + .get_next_row_id_for_table(self.table_id, i64::MIN, self.tx_id); + if let Some(id) = maybe_rowid { + self.current_pos.replace(CursorPosition::Loaded(id)); + Some(id.row_id) + } else { + self.current_pos.replace(CursorPosition::BeforeFirst); + None + } + } + CursorPosition::End => None, + }; + Ok(IOResult::Done(rowid)) + } + + fn record( + &self, + ) -> Result>>> { + let Some(row) = self.current_row()? else { + return Ok(IOResult::Done(None)); + }; + + { + let mut record = self.get_immutable_record_or_create(); + let record = record.as_mut().unwrap(); + record.invalidate(); + record.start_serialization(&row.data); } + + let record_ref = + Ref::filter_map(self.reusable_immutable_record.borrow(), |opt| opt.as_ref()).unwrap(); + Ok(IOResult::Done(Some(record_ref))) } - pub fn rewind(&mut self) { - self.current_pos = CursorPosition::BeforeFirst; - } - - pub fn last(&mut self) { - let last_rowid = self.db.get_last_rowid(self.table_id); - if let Some(last_rowid) = last_rowid { - self.current_pos = CursorPosition::Loaded(RowID { - table_id: self.table_id, - row_id: last_rowid, - }); - } else { - self.current_pos = CursorPosition::BeforeFirst; - } - } - - pub fn get_next_rowid(&mut self) -> i64 { - self.last(); - match self.current_pos { - CursorPosition::Loaded(id) => id.row_id + 1, - CursorPosition::BeforeFirst => 1, - CursorPosition::End => i64::MAX, - } - } - - pub fn seek(&mut self, seek_key: SeekKey<'_>, op: SeekOp) -> Result> { + fn seek(&mut self, seek_key: SeekKey<'_>, op: SeekOp) -> Result> { let row_id = match seek_key { SeekKey::TableRowId(row_id) => row_id, SeekKey::IndexKey(_) => { @@ -196,7 +211,7 @@ impl MvccLazyCursor { }; let rowid = self.db.seek_rowid(bound, lower_bound, self.tx_id); if let Some(rowid) = rowid { - self.current_pos = CursorPosition::Loaded(rowid); + self.current_pos.replace(CursorPosition::Loaded(rowid)); if op.eq_only() { if rowid.row_id == row_id { Ok(IOResult::Done(SeekResult::Found)) @@ -209,15 +224,59 @@ impl MvccLazyCursor { } else { let forwards = matches!(op, SeekOp::GE { eq_only: _ } | SeekOp::GT); if forwards { - self.last(); + let _ = self.last()?; } else { - self.rewind(); + let _ = self.rewind()?; } Ok(IOResult::Done(SeekResult::NotFound)) } } - pub fn exists(&mut self, key: &Value) -> Result> { + /// Insert a row into the table. + /// Sets the cursor to the inserted row. + fn insert(&mut self, key: &BTreeKey) -> Result> { + let Some(rowid) = key.maybe_rowid() else { + todo!() + }; + let row_id = RowID::new(self.table_id, rowid); + let record_buf = key.get_record().unwrap().get_payload().to_vec(); + let num_columns = match key { + BTreeKey::IndexKey(record) => record.column_count(), + BTreeKey::TableRowId((_, record)) => record.as_ref().unwrap().column_count(), + }; + let row = crate::mvcc::database::Row::new(row_id, record_buf, num_columns); + + self.current_pos.replace(CursorPosition::Loaded(row.id)); + if self.db.read(self.tx_id, row.id)?.is_some() { + self.db.update(self.tx_id, row).inspect_err(|_| { + self.current_pos.replace(CursorPosition::BeforeFirst); + })?; + } else { + self.db.insert(self.tx_id, row).inspect_err(|_| { + self.current_pos.replace(CursorPosition::BeforeFirst); + })?; + } + Ok(IOResult::Done(())) + } + + fn delete(&mut self) -> Result> { + let IOResult::Done(Some(rowid)) = self.rowid()? else { + todo!(); + }; + let rowid = RowID::new(self.table_id, rowid); + self.db.delete(self.tx_id, rowid)?; + Ok(IOResult::Done(())) + } + + fn set_null_flag(&mut self, _flag: bool) { + todo!() + } + + fn get_null_flag(&self) -> bool { + todo!() + } + + fn exists(&mut self, key: &Value) -> Result> { let int_key = match key { Value::Integer(i) => i, _ => unreachable!("btree tables are indexed by integers!"), @@ -234,11 +293,90 @@ impl MvccLazyCursor { ) .is_some(); if exists { - self.current_pos = CursorPosition::Loaded(RowID { + self.current_pos.replace(CursorPosition::Loaded(RowID { table_id: self.table_id, row_id: *int_key, - }); + })); } Ok(IOResult::Done(exists)) } + + fn clear_btree(&mut self) -> Result>> { + todo!() + } + + fn btree_destroy(&mut self) -> Result>> { + todo!() + } + + fn count(&mut self) -> Result> { + todo!() + } + + /// Returns true if the is not pointing to any row. + fn is_empty(&self) -> bool { + // If we reached the end of the table, it means we traversed the whole table therefore there must be something in the table. + // If we have loaded a row, it means there is something in the table. + match self.get_current_pos() { + CursorPosition::Loaded(_) => false, + CursorPosition::BeforeFirst => true, + CursorPosition::End => true, + } + } + + fn root_page(&self) -> i64 { + self.table_id.into() + } + + fn rewind(&mut self) -> Result> { + self.current_pos.replace(CursorPosition::BeforeFirst); + Ok(IOResult::Done(())) + } + + fn has_record(&self) -> bool { + todo!() + } + + fn set_has_record(&self, _has_record: bool) { + todo!() + } + + fn get_index_info(&self) -> &crate::types::IndexInfo { + todo!() + } + + fn seek_end(&mut self) -> Result> { + todo!() + } + + fn seek_to_last(&mut self) -> Result> { + todo!() + } + + fn invalidate_record(&mut self) { + self.get_immutable_record_or_create() + .as_mut() + .unwrap() + .invalidate(); + } + + fn has_rowid(&self) -> bool { + todo!() + } + + fn record_cursor_mut(&self) -> std::cell::RefMut<'_, crate::types::RecordCursor> { + todo!() + } + + fn get_pager(&self) -> Arc { + todo!() + } + + fn get_skip_advance(&self) -> bool { + todo!() + } + + fn get_mvcc_cursor(&self) -> Arc> { + todo!() + } } diff --git a/core/mvcc/database/tests.rs b/core/mvcc/database/tests.rs index 086391064..9198c5825 100644 --- a/core/mvcc/database/tests.rs +++ b/core/mvcc/database/tests.rs @@ -115,6 +115,10 @@ pub(crate) fn generate_simple_string_row(table_id: MVTableId, id: i64, data: &st } } +pub(crate) fn generate_simple_string_record(data: &str) -> ImmutableRecord { + ImmutableRecord::from_values(&[Value::Text(Text::new(data))], 1) +} + #[test] fn test_insert_read() { let db = MvccTestDb::new(); @@ -830,14 +834,21 @@ fn test_lazy_scan_cursor_basic() { .unwrap(); // Check first row - assert!(cursor.forward()); + assert!(matches!(cursor.next().unwrap(), IOResult::Done(true))); assert!(!cursor.is_empty()); let row = cursor.current_row().unwrap().unwrap(); assert_eq!(row.id.row_id, 1); // Iterate through all rows let mut count = 1; - while cursor.forward() { + loop { + let res = cursor.next().unwrap(); + let IOResult::Done(res) = res else { + panic!("unexpected next result {res:?}"); + }; + if !res { + break; + } count += 1; let row = cursor.current_row().unwrap().unwrap(); assert_eq!(row.id.row_id, count); @@ -847,7 +858,7 @@ fn test_lazy_scan_cursor_basic() { assert_eq!(count, 5); // After the last row, is_empty should return true - assert!(!cursor.forward()); + assert!(!matches!(cursor.next().unwrap(), IOResult::Done(true))); assert!(cursor.is_empty()); } @@ -865,7 +876,7 @@ fn test_lazy_scan_cursor_with_gaps() { .unwrap(); // Check first row - assert!(cursor.forward()); + assert!(matches!(cursor.next().unwrap(), IOResult::Done(true))); assert!(!cursor.is_empty()); let row = cursor.current_row().unwrap().unwrap(); assert_eq!(row.id.row_id, 5); @@ -874,12 +885,27 @@ fn test_lazy_scan_cursor_with_gaps() { let expected_ids = [5, 10, 15, 20, 30]; let mut index = 0; - assert_eq!(cursor.current_row_id().unwrap().row_id, expected_ids[index]); + let IOResult::Done(rowid) = cursor.rowid().unwrap() else { + unreachable!(); + }; + let rowid = rowid.unwrap(); + assert_eq!(rowid, expected_ids[index]); - while cursor.forward() { + loop { + let res = cursor.next().unwrap(); + let IOResult::Done(res) = res else { + panic!("unexpected next result {res:?}"); + }; + if !res { + break; + } index += 1; if index < expected_ids.len() { - assert_eq!(cursor.current_row_id().unwrap().row_id, expected_ids[index]); + let IOResult::Done(rowid) = cursor.rowid().unwrap() else { + unreachable!(); + }; + let rowid = rowid.unwrap(); + assert_eq!(rowid, expected_ids[index]); } } @@ -900,7 +926,7 @@ fn test_cursor_basic() { ) .unwrap(); - cursor.forward(); + let _ = cursor.next().unwrap(); // Check first row assert!(!cursor.is_empty()); @@ -909,7 +935,14 @@ fn test_cursor_basic() { // Iterate through all rows let mut count = 1; - while cursor.forward() { + loop { + let res = cursor.next().unwrap(); + let IOResult::Done(res) = res else { + panic!("unexpected next result {res:?}"); + }; + if !res { + break; + } count += 1; let row = cursor.current_row().unwrap().unwrap(); assert_eq!(row.id.row_id, count); @@ -919,7 +952,7 @@ fn test_cursor_basic() { assert_eq!(count, 5); // After the last row, is_empty should return true - assert!(!cursor.forward()); + assert!(!matches!(cursor.next().unwrap(), IOResult::Done(true))); assert!(cursor.is_empty()); } @@ -939,7 +972,7 @@ fn test_cursor_with_empty_table() { let table_id = -1; // Empty table // Test LazyScanCursor with empty table - let mut cursor = MvccLazyCursor::new( + let cursor = MvccLazyCursor::new( db.mvcc_store.clone(), tx_id, table_id, @@ -947,7 +980,8 @@ fn test_cursor_with_empty_table() { ) .unwrap(); assert!(cursor.is_empty()); - assert!(cursor.current_row_id().is_none()); + let rowid = cursor.rowid().unwrap(); + assert!(matches!(rowid, IOResult::Done(None))); } #[test] @@ -964,15 +998,17 @@ fn test_cursor_modification_during_scan() { .unwrap(); // Read first row - assert!(cursor.forward()); + assert!(matches!(cursor.next().unwrap(), IOResult::Done(true))); let first_row = cursor.current_row().unwrap().unwrap(); assert_eq!(first_row.id.row_id, 1); // Insert a new row with ID between existing rows let new_row_id = RowID::new(table_id.into(), 3); - let new_row = generate_simple_string_row(table_id.into(), new_row_id.row_id, "new_row"); + let new_row = generate_simple_string_record("new_row"); - cursor.insert(new_row).unwrap(); + let _ = cursor + .insert(&BTreeKey::TableRowId((new_row_id.row_id, Some(&new_row)))) + .unwrap(); let row = db.mvcc_store.read(tx_id, new_row_id).unwrap().unwrap(); let mut record = ImmutableRecord::new(1024); record.start_serialization(&row.data); @@ -986,7 +1022,7 @@ fn test_cursor_modification_during_scan() { assert_eq!(row.id.row_id, 3); // Continue scanning - the cursor should still work correctly - cursor.forward(); // Move to 4 + let _ = cursor.next().unwrap(); // Move to 4 let row = db .mvcc_store .read(tx_id, RowID::new(table_id.into(), 4)) @@ -994,14 +1030,14 @@ fn test_cursor_modification_during_scan() { .unwrap(); assert_eq!(row.id.row_id, 4); - cursor.forward(); // Move to 5 (our new row) + let _ = cursor.next().unwrap(); // Move to 5 (our new row) let row = db .mvcc_store .read(tx_id, RowID::new(table_id.into(), 5)) .unwrap() .unwrap(); assert_eq!(row.id.row_id, 5); - assert!(!cursor.forward()); + assert!(!matches!(cursor.next().unwrap(), IOResult::Done(true))); assert!(cursor.is_empty()); } diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 0b7489530..633c97599 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -337,7 +337,7 @@ impl BTreeKey<'_> { } /// Get the record, if present. Index will always be present, - fn get_record(&self) -> Option<&'_ ImmutableRecord> { + pub fn get_record(&self) -> Option<&'_ ImmutableRecord> { match self { BTreeKey::TableRowId((_, record)) => *record, BTreeKey::IndexKey(record) => Some(record), @@ -345,7 +345,7 @@ impl BTreeKey<'_> { } /// Get the rowid, if present. Index will never be present. - fn maybe_rowid(&self) -> Option { + pub fn maybe_rowid(&self) -> Option { match self { BTreeKey::TableRowId((rowid, _)) => Some(*rowid), BTreeKey::IndexKey(_) => None, @@ -1297,8 +1297,10 @@ impl BTreeCursor { pub fn get_next_record(&mut self) -> Result> { if let Some(mv_cursor) = &self.mv_cursor { let mut mv_cursor = mv_cursor.write(); - mv_cursor.forward(); - let rowid = mv_cursor.current_row_id(); + assert!(matches!(mv_cursor.next()?, IOResult::Done(_))); + let IOResult::Done(rowid) = mv_cursor.rowid()? else { + todo!() + }; match rowid { Some(_rowid) => { return Ok(IOResult::Done(true)); @@ -4453,11 +4455,14 @@ impl BTreeCursor { #[instrument(skip(self), level = Level::DEBUG)] pub fn rowid(&self) -> Result>> { if let Some(mv_cursor) = &self.mv_cursor { - let mut mv_cursor = mv_cursor.write(); - let Some(rowid) = mv_cursor.current_row_id() else { + let mv_cursor = mv_cursor.write(); + let IOResult::Done(rowid) = mv_cursor.rowid()? else { + todo!() + }; + let Some(rowid) = rowid else { return Ok(IOResult::Done(None)); }; - return Ok(IOResult::Done(Some(rowid.row_id))); + return Ok(IOResult::Done(Some(rowid))); } if self.get_null_flag() { return Ok(IOResult::Done(None)); @@ -4520,7 +4525,7 @@ impl BTreeCursor { return Ok(IOResult::Done(Some(record_ref))); } if let Some(mv_cursor) = &self.mv_cursor { - let mut mv_cursor = mv_cursor.write(); + let mv_cursor = mv_cursor.write(); let Some(row) = mv_cursor.current_row()? else { return Ok(IOResult::Done(None)); }; @@ -4586,22 +4591,9 @@ impl BTreeCursor { pub fn insert(&mut self, key: &BTreeKey) -> Result> { tracing::debug!(valid_state = ?self.valid_state, cursor_state = ?self.state, is_write_in_progress = self.is_write_in_progress()); match &self.mv_cursor { - Some(mv_cursor) => match key.maybe_rowid() { - Some(rowid) => { - let row_id = - crate::mvcc::database::RowID::new(mv_cursor.read().table_id, rowid); - let record_buf = key.get_record().unwrap().get_payload().to_vec(); - let num_columns = match key { - BTreeKey::IndexKey(record) => record.column_count(), - BTreeKey::TableRowId((_, record)) => { - record.as_ref().unwrap().column_count() - } - }; - let row = crate::mvcc::database::Row::new(row_id, record_buf, num_columns); - mv_cursor.write().insert(row)?; - } - None => todo!("Support mvcc inserts with index btrees"), - }, + Some(mv_cursor) => { + return_if_io!(mv_cursor.write().insert(key)); + } None => { return_if_io!(self.insert_into_page(key)); if key.maybe_rowid().is_some() { @@ -4627,8 +4619,7 @@ impl BTreeCursor { #[instrument(skip(self), level = Level::DEBUG)] pub fn delete(&mut self) -> Result> { if let Some(mv_cursor) = &self.mv_cursor { - let rowid = mv_cursor.write().current_row_id().unwrap(); - mv_cursor.write().delete(rowid)?; + return_if_io!(mv_cursor.write().delete()); return Ok(IOResult::Done(())); } @@ -5679,11 +5670,14 @@ impl CursorTrait for BTreeCursor { #[instrument(skip(self), level = Level::DEBUG)] fn rowid(&self) -> Result>> { if let Some(mv_cursor) = &self.mv_cursor { - let mut mv_cursor = mv_cursor.write(); - let Some(rowid) = mv_cursor.current_row_id() else { + let mv_cursor = mv_cursor.write(); + let IOResult::Done(rowid) = mv_cursor.rowid()? else { + todo!(); + }; + let Some(rowid) = rowid else { return Ok(IOResult::Done(None)); }; - return Ok(IOResult::Done(Some(rowid.row_id))); + return Ok(IOResult::Done(Some(rowid))); } if self.get_null_flag() { return Ok(IOResult::Done(None)); @@ -5743,7 +5737,7 @@ impl CursorTrait for BTreeCursor { return Ok(IOResult::Done(Some(record_ref))); } if let Some(mv_cursor) = &self.mv_cursor { - let mut mv_cursor = mv_cursor.write(); + let mv_cursor = mv_cursor.write(); let Some(row) = mv_cursor.current_row()? else { return Ok(IOResult::Done(None)); }; @@ -5809,22 +5803,9 @@ impl CursorTrait for BTreeCursor { fn insert(&mut self, key: &BTreeKey) -> Result> { tracing::debug!(valid_state = ?self.valid_state, cursor_state = ?self.state, is_write_in_progress = self.is_write_in_progress()); match &self.mv_cursor { - Some(mv_cursor) => match key.maybe_rowid() { - Some(rowid) => { - let row_id = - crate::mvcc::database::RowID::new(mv_cursor.read().table_id, rowid); - let record_buf = key.get_record().unwrap().get_payload().to_vec(); - let num_columns = match key { - BTreeKey::IndexKey(record) => record.column_count(), - BTreeKey::TableRowId((_, record)) => { - record.as_ref().unwrap().column_count() - } - }; - let row = crate::mvcc::database::Row::new(row_id, record_buf, num_columns); - mv_cursor.write().insert(row)?; - } - None => todo!("Support mvcc inserts with index btrees"), - }, + Some(mv_cursor) => { + return_if_io!(mv_cursor.write().insert(key)); + } None => { return_if_io!(self.insert_into_page(key)); if key.maybe_rowid().is_some() { @@ -5838,8 +5819,7 @@ impl CursorTrait for BTreeCursor { #[instrument(skip(self), level = Level::DEBUG)] fn delete(&mut self) -> Result> { if let Some(mv_cursor) = &self.mv_cursor { - let rowid = mv_cursor.write().current_row_id().unwrap(); - mv_cursor.write().delete(rowid)?; + return_if_io!(mv_cursor.write().delete()); return Ok(IOResult::Done(())); } @@ -6361,7 +6341,7 @@ impl CursorTrait for BTreeCursor { self.rewind_state = RewindState::NextRecord; if let Some(mv_cursor) = &self.mv_cursor { let mut mv_cursor = mv_cursor.write(); - mv_cursor.rewind(); + return_if_io!(mv_cursor.rewind()); } else { let c = self.move_to_root()?; if let Some(c) = c { From bd62c805360a0adda9d2476b89a43c22d4e9a436 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Sun, 12 Oct 2025 23:08:17 -0500 Subject: [PATCH 201/428] Implement generic file lock/unlock as a noop --- core/io/generic.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/io/generic.rs b/core/io/generic.rs index 8eef59d3b..c75702ae7 100644 --- a/core/io/generic.rs +++ b/core/io/generic.rs @@ -59,12 +59,12 @@ pub struct GenericFile { impl File for GenericFile { #[instrument(err, skip_all, level = Level::TRACE)] fn lock_file(&self, exclusive: bool) -> Result<()> { - unimplemented!() + Ok(()) } #[instrument(err, skip_all, level = Level::TRACE)] fn unlock_file(&self) -> Result<()> { - unimplemented!() + Ok(()) } #[instrument(skip(self, c), level = Level::TRACE)] From 2798fafa6ce364af916b3850b71acf31ed29a2b6 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 13 Oct 2025 14:45:51 -0300 Subject: [PATCH 202/428] proof issue 1454 --- testing/vector.test | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/testing/vector.test b/testing/vector.test index 7cae2ca5e..12aaef940 100755 --- a/testing/vector.test +++ b/testing/vector.test @@ -12,3 +12,45 @@ do_execsql_test vector-functions-valid { {[1,2,3]} {[-1000000000000000000]} } + +do_execsql_test_on_specific_db {:memory:} vector-insert { + CREATE TABLE IF NOT EXISTS vector_test ( + id INTEGER PRIMARY KEY, + format TEXT NOT NULL, + vec_data F32_BLOB(3) -- 3-dimensional vector + ); + INSERT INTO vector_test (id, format, vec_data) + VALUES (2, 'Bracketed_comma_separated', vector('[4.000000,5.000000,6.000000]')); + SELECT id, format, vector_extract(vec_data) from vector_test; +} {2|Bracketed_comma_separated|[4,5,6]} + +do_execsql_test_on_specific_db {:memory:} vector-insert { + CREATE TABLE IF NOT EXISTS vector_test ( + id INTEGER PRIMARY KEY, + format TEXT NOT NULL, + vec_data F32_BLOB(3) -- 3-dimensional vector + ); + INSERT INTO vector_test (id, format, vec_data) + VALUES (2, 'Bracketed_comma_separated', vector('[4.000000,5.000000,6.000000]')); + SELECT id, format, vector_extract(vec_data) from vector_test; +} {2|Bracketed_comma_separated|[4,5,6]} + +do_execsql_test_in_memory_error vector-insert-no-quotes { + CREATE TABLE IF NOT EXISTS vector_test ( + id INTEGER PRIMARY KEY, + format TEXT NOT NULL, + vec_data F32_BLOB(3) -- 3-dimensional vector + ); + INSERT INTO vector_test (id, format, vec_data) + VALUES (2, 'Bracketed_comma_separated', vector([4.000000,5.000000,6.000000])); +} { × Parse error: no such column: [4.000000,5.000000,6.000000]} + +do_execsql_test_in_memory_error_content vector-insert-double-quotes { + CREATE TABLE IF NOT EXISTS vector_test ( + id INTEGER PRIMARY KEY, + format TEXT NOT NULL, + vec_data F32_BLOB(3) -- 3-dimensional vector + ); + INSERT INTO vector_test (id, format, vec_data) + VALUES (2, 'Bracketed_comma_separated', vector("[4.000000,5.000000,6.000000]")); +} {no such column: [4.000000,5.000000,6.000000]} \ No newline at end of file From cd56f52bd69ad6a58c1dd4282e0d26ca0e4a92bb Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 13 Oct 2025 12:57:13 -0500 Subject: [PATCH 203/428] Add cfg attributes for running under Miri --- cli/main.rs | 2 +- core/ext/mod.rs | 4 ++-- core/io/mod.rs | 6 +++--- core/lib.rs | 6 +++--- core/storage/buffer_pool.rs | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cli/main.rs b/cli/main.rs index 5b5f98e2f..a9a4eeb89 100644 --- a/cli/main.rs +++ b/cli/main.rs @@ -16,7 +16,7 @@ use std::{ sync::{atomic::Ordering, LazyLock}, }; -#[cfg(not(target_family = "wasm"))] +#[cfg(all(not(target_family = "wasm"), not(miri)))] #[global_allocator] static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; diff --git a/core/ext/mod.rs b/core/ext/mod.rs index 1d73c3ba2..d58c86909 100644 --- a/core/ext/mod.rs +++ b/core/ext/mod.rs @@ -2,7 +2,7 @@ mod dynamic; mod vtab_xconnect; use crate::schema::{Schema, Table}; -#[cfg(all(target_os = "linux", feature = "io_uring"))] +#[cfg(all(target_os = "linux", feature = "io_uring", not(miri)))] use crate::UringIO; use crate::{function::ExternalFunc, Connection, Database}; use crate::{vtab::VirtualTable, SymbolTable}; @@ -146,7 +146,7 @@ impl Database { let io: Arc = match vfs { "memory" => Arc::new(MemoryIO::new()), "syscall" => Arc::new(SyscallIO::new()?), - #[cfg(all(target_os = "linux", feature = "io_uring"))] + #[cfg(all(target_os = "linux", feature = "io_uring", not(miri)))] "io_uring" => Arc::new(UringIO::new()?), other => match get_vfs_modules().iter().find(|v| v.0 == vfs) { Some((_, vfs)) => vfs.clone(), diff --git a/core/io/mod.rs b/core/io/mod.rs index c1940e191..0c0baa807 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -717,13 +717,13 @@ impl TempBufferCache { } cfg_block! { - #[cfg(all(target_os = "linux", feature = "io_uring"))] { + #[cfg(all(target_os = "linux", feature = "io_uring", not(miri)))] { mod io_uring; #[cfg(feature = "fs")] pub use io_uring::UringIO; } - #[cfg(target_family = "unix")] { + #[cfg(all(target_family = "unix", not(miri)))] { mod unix; #[cfg(feature = "fs")] pub use unix::UnixIO; @@ -731,7 +731,7 @@ cfg_block! { pub use PlatformIO as SyscallIO; } - #[cfg(not(any(target_family = "unix", target_os = "android", target_os = "ios")))] { + #[cfg(any(not(any(target_family = "unix", target_os = "android", target_os = "ios")), miri))] { mod generic; pub use generic::GenericIO as PlatformIO; pub use PlatformIO as SyscallIO; diff --git a/core/lib.rs b/core/lib.rs index 8f6d91d4e..2a0a558cf 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -53,9 +53,9 @@ use crate::{incremental::view::AllViewsTxState, translate::emitter::TransactionM use core::str; pub use error::{CompletionError, LimboError}; pub use io::clock::{Clock, Instant}; -#[cfg(all(feature = "fs", target_family = "unix"))] +#[cfg(all(feature = "fs", target_family = "unix", not(miri)))] pub use io::UnixIO; -#[cfg(all(feature = "fs", target_os = "linux", feature = "io_uring"))] +#[cfg(all(feature = "fs", target_os = "linux", feature = "io_uring", not(miri)))] pub use io::UringIO; pub use io::{ Buffer, Completion, CompletionType, File, GroupCompletion, MemoryIO, OpenFlags, PlatformIO, @@ -791,7 +791,7 @@ impl Database { None => match vfs.as_ref() { "memory" => Arc::new(MemoryIO::new()), "syscall" => Arc::new(SyscallIO::new()?), - #[cfg(all(target_os = "linux", feature = "io_uring"))] + #[cfg(all(target_os = "linux", feature = "io_uring", not(miri)))] "io_uring" => Arc::new(UringIO::new()?), other => { return Err(LimboError::InvalidArgument(format!("no such VFS: {other}"))); diff --git a/core/storage/buffer_pool.rs b/core/storage/buffer_pool.rs index 05fe77d6a..106c9bc8c 100644 --- a/core/storage/buffer_pool.rs +++ b/core/storage/buffer_pool.rs @@ -427,7 +427,7 @@ impl Arena { } } -#[cfg(unix)] +#[cfg(all(unix, not(miri)))] mod arena { #[cfg(target_vendor = "apple")] use libc::MAP_ANON as MAP_ANONYMOUS; @@ -463,7 +463,7 @@ mod arena { } } -#[cfg(not(unix))] +#[cfg(any(not(unix), miri))] mod arena { pub fn alloc(len: usize) -> *mut u8 { let layout = std::alloc::Layout::from_size_align(len, std::mem::size_of::()).unwrap(); From ce2f286df010b730ccebaaaf8639c4cd0878e9fe Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 13 Oct 2025 12:57:26 -0500 Subject: [PATCH 204/428] Replace git shell commands with std shims gix doesn't work here, since while it's pure Rust, it has a non-configurable dependency on crates using inline assembly, which Miri does not support. This commit is a bit of a hack, and only works in non-bare git repos without e.g packed-refs. --- simulator/runner/bugbase.rs | 69 ++++++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 23 deletions(-) diff --git a/simulator/runner/bugbase.rs b/simulator/runner/bugbase.rs index dd0d6f432..cefed3740 100644 --- a/simulator/runner/bugbase.rs +++ b/simulator/runner/bugbase.rs @@ -1,8 +1,9 @@ use std::{ collections::HashMap, - io::{self, Write}, - path::PathBuf, - process::Command, + env::current_dir, + fs::File, + io::{self, Read, Write}, + path::{Path, PathBuf}, time::SystemTime, }; @@ -452,28 +453,50 @@ impl BugBase { impl BugBase { pub(crate) fn get_current_commit_hash() -> anyhow::Result { - let output = Command::new("git") - .args(["rev-parse", "HEAD"]) - .output() - .with_context(|| "should be able to get the commit hash")?; - let commit_hash = String::from_utf8(output.stdout) - .with_context(|| "commit hash should be valid utf8")? - .trim() - .to_string(); - Ok(commit_hash) + let git_dir = find_git_dir(current_dir()?).with_context(|| "should be a git repo")?; + let hash = + resolve_head(&git_dir).with_context(|| "should be able to get the commit hash")?; + Ok(hash) } pub(crate) fn get_limbo_project_dir() -> anyhow::Result { - Ok(PathBuf::from( - String::from_utf8( - Command::new("git") - .args(["rev-parse", "--show-toplevel"]) - .output() - .with_context(|| "should be able to get the git path")? - .stdout, - ) - .with_context(|| "commit hash should be valid utf8")? - .trim(), - )) + let git_dir = find_git_dir(current_dir()?).with_context(|| "should be a git repo")?; + let workdir = git_dir + .parent() + .with_context(|| "work tree should be parent of .git")?; + Ok(workdir.to_path_buf()) } } + +fn find_git_dir(start_path: impl AsRef) -> Option { + // HACK ignores stuff like bare repo, worktree, etc. + let mut current = start_path.as_ref().to_path_buf(); + loop { + let git_path = current.join(".git"); + if git_path.is_dir() { + return Some(git_path); + } + if !current.pop() { + return None; + } + } +} + +fn resolve_head(git_dir: impl AsRef) -> anyhow::Result { + // HACK ignores stuff like packed-refs + let head_path = git_dir.as_ref().join("HEAD"); + let head_contents = read_to_string(&head_path)?; + if let Some(ref_path) = head_contents.strip_prefix("ref: ") { + let ref_file = git_dir.as_ref().join(ref_path); + read_to_string(&ref_file) + } else { + Ok(head_contents) + } +} + +fn read_to_string(path: impl AsRef) -> anyhow::Result { + let mut file = File::open(path)?; + let mut contents = String::new(); + file.read_to_string(&mut contents)?; + Ok(contents.trim().to_string()) +} From 74ef9ad5cab5a96d6d89293223a83b5aa8750ae1 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 13 Oct 2025 13:04:42 -0500 Subject: [PATCH 205/428] Drop weak in TursoRwLock::read's compare_exchange compare_exchange_weak can spuriously fail, which Miri obliges us with, causing a read deadlock --- core/storage/wal.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/storage/wal.rs b/core/storage/wal.rs index 070de2c52..fed9f15ad 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -171,7 +171,7 @@ impl TursoRwLock { // for success, Acquire establishes happens-before relationship with the previous Release from unlock // for failure we only care about reading it for the next iteration so we can use Relaxed. self.0 - .compare_exchange_weak(cur, desired, Ordering::Acquire, Ordering::Relaxed) + .compare_exchange(cur, desired, Ordering::Acquire, Ordering::Relaxed) .is_ok() } From dfc77b035068e8f4135d137371edbadb6b0b0f2e Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 13 Oct 2025 13:53:07 -0500 Subject: [PATCH 206/428] Non-Unix arena: use zeroed alloc to avoid UB Reads to the arena were flagged by Miri as UB since it contained uninitialized memory --- core/storage/buffer_pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/storage/buffer_pool.rs b/core/storage/buffer_pool.rs index 106c9bc8c..a38e878a8 100644 --- a/core/storage/buffer_pool.rs +++ b/core/storage/buffer_pool.rs @@ -467,7 +467,7 @@ mod arena { mod arena { pub fn alloc(len: usize) -> *mut u8 { let layout = std::alloc::Layout::from_size_align(len, std::mem::size_of::()).unwrap(); - unsafe { std::alloc::alloc(layout) } + unsafe { std::alloc::alloc_zeroed(layout) } } pub fn dealloc(ptr: *mut u8, len: usize) { let layout = std::alloc::Layout::from_size_align(len, std::mem::size_of::()).unwrap(); From 3d4c10df40a0dffc694061a7c7cf8bd94b100cb3 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 13 Oct 2025 13:55:48 -0500 Subject: [PATCH 207/428] Document using Miri to run the simulator --- simulator/README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/simulator/README.md b/simulator/README.md index 3de0afb99..b3c93e00a 100644 --- a/simulator/README.md +++ b/simulator/README.md @@ -118,6 +118,17 @@ For development purposes, you can run `make sim-schema` to generate a JsonSchema } ``` +## Run simulator using the Miri interpreter + +Miri is a deterministic Rust interpreter designed to identify undefined behavior. To run the simulator under Miri, use +```bash +MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-disable-stacked-borrows" RUST_LOG=limbo_sim=debug cargo +nightly miri run --bin limbo_sim -- --disable-integrity-check +```` +Notes: +- `-Zmiri-disable-isolation` is needed for host access (like opening a file) +- `-Zmiri-disable-stacked-borrows` this alias checking is experimental, so disabled for now +- `--disable-integrity-check` is needed since we can't run sqlite via the FFI in Miri + ## Resources - [(reading) TigerBeetle Deterministic Simulation Testing](https://docs.tigerbeetle.com/about/vopr/) From 4d843804b74ae4191c77522f311fb5f921e9c62b Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 13 Oct 2025 14:53:08 -0500 Subject: [PATCH 208/428] Add --disable-integrity-check option to simulator Miri can't execute sqlite via the FFI, so this needs to be configurable --- simulator/main.rs | 14 +++++++++----- simulator/runner/cli.rs | 5 +++++ simulator/runner/env.rs | 2 ++ 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/simulator/main.rs b/simulator/main.rs index 6a5d097b8..c60e9c5ce 100644 --- a/simulator/main.rs +++ b/simulator/main.rs @@ -612,12 +612,16 @@ fn run_simulation_default( tracing::info!("Simulation completed"); if result.error.is_none() { - let ic = integrity_check(&env.get_db_path()); - if let Err(err) = ic { - tracing::error!("integrity check failed: {}", err); - result.error = Some(turso_core::LimboError::InternalError(err.to_string())); + if env.opts.disable_integrity_check { + tracing::info!("skipping integrity check (disabled by configuration)"); } else { - tracing::info!("integrity check passed"); + let ic = integrity_check(&env.get_db_path()); + if let Err(err) = ic { + tracing::error!("integrity check failed: {}", err); + result.error = Some(turso_core::LimboError::InternalError(err.to_string())); + } else { + tracing::info!("integrity check passed"); + } } } diff --git a/simulator/runner/cli.rs b/simulator/runner/cli.rs index 6dec4b46d..8a941dde1 100644 --- a/simulator/runner/cli.rs +++ b/simulator/runner/cli.rs @@ -147,6 +147,11 @@ pub struct SimulatorCLI { default_value_t = false )] pub keep_files: bool, + #[clap( + long, + help = "Disable the SQLite integrity check at the end of a simulation" + )] + pub disable_integrity_check: bool, #[clap( long, help = "Use memory IO for complex simulations", diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index 79497c38b..fd960fba7 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -315,6 +315,7 @@ impl SimulatorEnv { max_interactions: rng.random_range(cli_opts.minimum_tests..=cli_opts.maximum_tests), max_time_simulation: cli_opts.maximum_time, disable_reopen_database: cli_opts.disable_reopen_database, + disable_integrity_check: cli_opts.disable_integrity_check, }; // Remove existing database file if it exists @@ -575,6 +576,7 @@ pub(crate) struct SimulatorOpts { pub(crate) disable_fsync_no_wait: bool, pub(crate) disable_faulty_query: bool, pub(crate) disable_reopen_database: bool, + pub(crate) disable_integrity_check: bool, pub(crate) max_interactions: u32, pub(crate) page_size: usize, From 0ef5ec007c91f17d76ca0ca4152a8accd08cc59b Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 13 Oct 2025 18:04:51 -0300 Subject: [PATCH 209/428] remove cfg for `MAP_ANONYMOUS` --- core/storage/buffer_pool.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/core/storage/buffer_pool.rs b/core/storage/buffer_pool.rs index 05fe77d6a..5b252e117 100644 --- a/core/storage/buffer_pool.rs +++ b/core/storage/buffer_pool.rs @@ -429,9 +429,6 @@ impl Arena { #[cfg(unix)] mod arena { - #[cfg(target_vendor = "apple")] - use libc::MAP_ANON as MAP_ANONYMOUS; - #[cfg(target_os = "linux")] use libc::MAP_ANONYMOUS; use libc::{mmap, munmap, MAP_PRIVATE, PROT_READ, PROT_WRITE}; use std::ffi::c_void; From 83dde9b55cf68c58e40012e53e62002eeb113388 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 13 Oct 2025 20:26:07 -0300 Subject: [PATCH 210/428] fix backwards compatible rowid alias behaviour --- core/schema.rs | 13 ++++++++++++- testing/insert.test | 18 +++++++++++++++++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/core/schema.rs b/core/schema.rs index 0bf9f464c..2bfe9dfc8 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1553,6 +1553,12 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R foreign_keys.push(Arc::new(fk)); } } + + // Due to a bug in SQLite, this check is needed to maintain backwards compatibility with rowid alias + // SQLite docs: https://sqlite.org/lang_createtable.html#rowids_and_the_integer_primary_key + // Issue: https://github.com/tursodatabase/turso/issues/3665 + let mut primary_key_desc_columns_constraint = false; + for ast::ColumnDefinition { col_name, col_type, @@ -1690,6 +1696,9 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R if primary_key { primary_key_columns.push((name.clone(), order)); + if order == SortOrder::Desc { + primary_key_desc_columns_constraint = true; + } } else if primary_key_columns .iter() .any(|(col_name, _)| col_name == &name) @@ -1702,7 +1711,9 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R ty, ty_str, primary_key, - is_rowid_alias: typename_exactly_integer && primary_key, + is_rowid_alias: typename_exactly_integer + && primary_key + && !primary_key_desc_columns_constraint, notnull, default, unique, diff --git a/testing/insert.test b/testing/insert.test index 4419bcafb..05c03329f 100755 --- a/testing/insert.test +++ b/testing/insert.test @@ -678,4 +678,20 @@ do_execsql_test_on_specific_db {:memory:} insert-rowid-select-rowid-success { INSERT INTO t(a) SELECT rowid FROM t; SELECT * FROM t; } {2 -1} \ No newline at end of file +1} + + +# Due to a bug in SQLite, this check is needed to maintain backwards compatibility with rowid alias +# SQLite docs: https://sqlite.org/lang_createtable.html#rowids_and_the_integer_primary_key +# Issue: https://github.com/tursodatabase/turso/issues/3665 +do_execsql_test_on_specific_db {:memory:} insert-rowid-backwards-compability { + CREATE TABLE t(a INTEGER PRIMARY KEY DESC); + INSERT INTO t(a) VALUES (123); + SELECT rowid, * FROM t; +} {1|123} + +do_execsql_test_on_specific_db {:memory:} insert-rowid-backwards-compability-2 { + CREATE TABLE t(a INTEGER, PRIMARY KEY (a DESC)); + INSERT INTO t(a) VALUES (123); + SELECT rowid, * FROM t; +} {123|123} \ No newline at end of file From 2e722af93c6afe9d251a3417509ea7aaf8447894 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 13 Oct 2025 15:18:38 -0300 Subject: [PATCH 211/428] proof issue 1710 --- testing/insert.test | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/testing/insert.test b/testing/insert.test index 4419bcafb..7321d1ec2 100755 --- a/testing/insert.test +++ b/testing/insert.test @@ -529,6 +529,13 @@ do_execsql_test_on_specific_db {:memory:} null-value-insert-null-type-column { SELECT * FROM test; } {1|} +# https://github.com/tursodatabase/turso/issues/1710 +do_execsql_test_in_memory_error_content uniq_constraint { + CREATE TABLE test (id INTEGER unique); + insert into test values (1); + insert into test values (1); +} {UNIQUE constraint failed: test.id (19)} + do_execsql_test_in_memory_error_content insert-explicit-rowid-conflict { create table t (x); insert into t(rowid, x) values (1, 1); From 5b2cce946acd2975f0e77af959b81345602a55b3 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 13 Oct 2025 21:04:43 -0300 Subject: [PATCH 212/428] do not reference workspace package by path --- bindings/python/Cargo.toml | 2 +- cli/Cargo.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bindings/python/Cargo.toml b/bindings/python/Cargo.toml index 2ffc62f8a..0a9f99e84 100644 --- a/bindings/python/Cargo.toml +++ b/bindings/python/Cargo.toml @@ -18,7 +18,7 @@ tracing_release = ["turso_core/tracing_release"] [dependencies] anyhow = "1.0" -turso_core = { path = "../../core", features = ["io_uring"] } +turso_core = { workspace = true, features = ["io_uring"] } pyo3 = { version = "0.24.1", features = ["anyhow"] } [build-dependencies] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index c1c60f928..35f691042 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -28,8 +28,8 @@ ctrlc = "3.4.4" dirs = "5.0.1" env_logger = { workspace = true } libc = "0.2.172" -turso_core = { path = "../core", default-features = true, features = ["cli_only"] } -limbo_completion = { path = "../extensions/completion", features = ["static"] } +turso_core = { workspace = true , default-features = true, features = ["cli_only"] } +limbo_completion = { workspace = true, features = ["static"] } miette = { workspace = true, features = ["fancy"] } nu-ansi-term = {version = "0.50.1", features = ["serde", "derive_serde_style"]} rustyline = { version = "15.0.0", default-features = true, features = [ From 29770382f99504259c356daceb8b5c5f04f47fb9 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 12:14:15 +0300 Subject: [PATCH 213/428] temporarily remove ephemeral plan construction from prepare_update_plan the decision to use an ephemeral table in UPDATE will be made after the optimizer has made the decision about which index to use. this will be implemented in a later commit. --- core/translate/update.rs | 135 ++++----------------------------------- 1 file changed, 14 insertions(+), 121 deletions(-) diff --git a/core/translate/update.rs b/core/translate/update.rs index a7dc21299..3849e0bdf 100644 --- a/core/translate/update.rs +++ b/core/translate/update.rs @@ -1,15 +1,13 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use crate::schema::{BTreeTable, Column, Type, ROWID_SENTINEL}; +use crate::schema::ROWID_SENTINEL; use crate::translate::emitter::Resolver; use crate::translate::expr::{ bind_and_rewrite_expr, walk_expr, BindingBehavior, ParamState, WalkControl, }; -use crate::translate::optimizer::optimize_select_plan; -use crate::translate::plan::{Operation, QueryDestination, Scan, Search, SelectPlan}; +use crate::translate::plan::{Operation, Scan}; use crate::translate::planner::{parse_limit, ROWID_STRS}; -use crate::vdbe::builder::CursorType; use crate::{ bail_parse_error, schema::{Schema, Table}, @@ -22,8 +20,7 @@ use super::emitter::emit_program; use super::expr::process_returning_clause; use super::optimizer::optimize_plan; use super::plan::{ - ColumnUsedMask, IterationDirection, JoinedTable, Plan, ResultSetColumn, TableReferences, - UpdatePlan, + ColumnUsedMask, IterationDirection, JoinedTable, Plan, TableReferences, UpdatePlan, }; use super::planner::parse_where; /* @@ -301,120 +298,16 @@ pub fn prepare_update_plan( // https://github.com/sqlite/sqlite/blob/master/src/update.c#L395 // https://github.com/sqlite/sqlite/blob/master/src/update.c#L670 let columns = table.columns(); - - let rowid_alias_used = set_clauses.iter().fold(false, |accum, (idx, _)| { - accum || (*idx != ROWID_SENTINEL && columns[*idx].is_rowid_alias) - }); - let direct_rowid_update = set_clauses.iter().any(|(idx, _)| *idx == ROWID_SENTINEL); - - let (ephemeral_plan, mut where_clause) = if rowid_alias_used || direct_rowid_update { - let mut where_clause = vec![]; - let internal_id = program.table_reference_counter.next(); - - let joined_tables = vec![JoinedTable { - table: match table.as_ref() { - Table::Virtual(vtab) => Table::Virtual(vtab.clone()), - Table::BTree(btree_table) => Table::BTree(btree_table.clone()), - _ => unreachable!(), - }, - identifier: table_name.to_string(), - internal_id, - op: build_scan_op(&table, iter_dir), - join_info: None, - col_used_mask: ColumnUsedMask::default(), - database_id: 0, - }]; - let mut table_references = TableReferences::new(joined_tables, vec![]); - - // Parse the WHERE clause - parse_where( - body.where_clause.as_deref(), - &mut table_references, - Some(&result_columns), - &mut where_clause, - connection, - &mut program.param_ctx, - )?; - - let table = Arc::new(BTreeTable { - root_page: 0, // Not relevant for ephemeral table definition - name: "ephemeral_scratch".to_string(), - has_rowid: true, - has_autoincrement: false, - primary_key_columns: vec![], - columns: vec![Column { - name: Some("rowid".to_string()), - ty: Type::Integer, - ty_str: "INTEGER".to_string(), - primary_key: true, - is_rowid_alias: false, - notnull: true, - default: None, - unique: false, - collation: None, - hidden: false, - }], - is_strict: false, - unique_sets: vec![], - foreign_keys: vec![], - }); - - let temp_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(table.clone())); - - let mut ephemeral_plan = SelectPlan { - table_references, - result_columns: vec![ResultSetColumn { - expr: Expr::RowId { - database: None, - table: internal_id, - }, - alias: None, - contains_aggregates: false, - }], - where_clause, // original WHERE terms from the UPDATE clause - group_by: None, // N/A - order_by: vec![], // N/A - aggregates: vec![], // N/A - limit: None, // N/A - query_destination: QueryDestination::EphemeralTable { - cursor_id: temp_cursor_id, - table, - }, - join_order: vec![], - offset: None, - contains_constant_false_condition: false, - distinctness: super::plan::Distinctness::NonDistinct, - values: vec![], - window: None, - }; - - optimize_select_plan(&mut ephemeral_plan, schema)?; - let table = ephemeral_plan - .table_references - .joined_tables() - .first() - .unwrap(); - // We do not need to emit an ephemeral plan if we are not going to loop over the table values - if matches!(table.op, Operation::Search(Search::RowidEq { .. })) { - (None, vec![]) - } else { - (Some(ephemeral_plan), vec![]) - } - } else { - (None, vec![]) - }; - - if ephemeral_plan.is_none() { - // Parse the WHERE clause - parse_where( - body.where_clause.as_deref(), - &mut table_references, - Some(&result_columns), - &mut where_clause, - connection, - &mut program.param_ctx, - )?; - }; + let mut where_clause = vec![]; + // Parse the WHERE clause + parse_where( + body.where_clause.as_deref(), + &mut table_references, + Some(&result_columns), + &mut where_clause, + connection, + &mut program.param_ctx, + )?; // Parse the LIMIT/OFFSET clause let (limit, offset) = body.limit.as_mut().map_or(Ok((None, None)), |l| { @@ -485,7 +378,7 @@ pub fn prepare_update_plan( offset, contains_constant_false_condition: false, indexes_to_update, - ephemeral_plan, + ephemeral_plan: None, cdc_update_alter_statement: None, })) } From bc80ac175452e48ba40dab671a76af24b7598016 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 12:18:13 +0300 Subject: [PATCH 214/428] require &mut ProgramBuilder argument in optimize_plan() this will be used for ephemeral plan construction for UPDATE in a later commit. --- core/translate/delete.rs | 2 +- core/translate/optimizer/mod.rs | 6 +++++- core/translate/select.rs | 2 +- core/translate/update.rs | 4 ++-- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/core/translate/delete.rs b/core/translate/delete.rs index 4d2dbdbff..e9d49da49 100644 --- a/core/translate/delete.rs +++ b/core/translate/delete.rs @@ -54,7 +54,7 @@ pub fn translate_delete( result_columns, connection, )?; - optimize_plan(&mut delete_plan, resolver.schema)?; + optimize_plan(&mut program, &mut delete_plan, resolver.schema)?; let Plan::Delete(ref delete) = delete_plan else { panic!("delete_plan is not a DeletePlan"); }; diff --git a/core/translate/optimizer/mod.rs b/core/translate/optimizer/mod.rs index 81dde810c..afe952b0c 100644 --- a/core/translate/optimizer/mod.rs +++ b/core/translate/optimizer/mod.rs @@ -115,7 +115,11 @@ fn optimize_delete_plan(plan: &mut DeletePlan, schema: &Schema) -> Result<()> { Ok(()) } -fn optimize_update_plan(plan: &mut UpdatePlan, schema: &Schema) -> Result<()> { +fn optimize_update_plan( + program: &mut ProgramBuilder, + plan: &mut UpdatePlan, + schema: &Schema, +) -> Result<()> { lift_common_subexpressions_from_binary_or_terms(&mut plan.where_clause)?; if let ConstantConditionEliminationResult::ImpossibleCondition = eliminate_constant_conditions(&mut plan.where_clause)? diff --git a/core/translate/select.rs b/core/translate/select.rs index 915c64242..2ed2daa6d 100644 --- a/core/translate/select.rs +++ b/core/translate/select.rs @@ -43,7 +43,7 @@ pub fn translate_select( query_destination, connection, )?; - optimize_plan(&mut select_plan, resolver.schema)?; + optimize_plan(&mut program, &mut select_plan, resolver.schema)?; let num_result_cols; let opts = match &select_plan { Plan::Select(select) => { diff --git a/core/translate/update.rs b/core/translate/update.rs index 3849e0bdf..1aac4c745 100644 --- a/core/translate/update.rs +++ b/core/translate/update.rs @@ -59,7 +59,7 @@ pub fn translate_update( connection: &Arc, ) -> crate::Result { let mut plan = prepare_update_plan(&mut program, resolver.schema, body, connection, false)?; - optimize_plan(&mut plan, resolver.schema)?; + optimize_plan(&mut program, &mut plan, resolver.schema)?; let opts = ProgramBuilderOpts { num_cursors: 1, approx_num_insns: 20, @@ -86,7 +86,7 @@ pub fn translate_update_for_schema_change( } } - optimize_plan(&mut plan, resolver.schema)?; + optimize_plan(&mut program, &mut plan, resolver.schema)?; let opts = ProgramBuilderOpts { num_cursors: 1, approx_num_insns: 20, From c2fe13ad4f0dc2ff80d32cf51127b06894d9865f Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 12:18:53 +0300 Subject: [PATCH 215/428] Update documentation of UpdatePlan::ephemeral_plan It now better reflects when it is used. --- core/translate/plan.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/translate/plan.rs b/core/translate/plan.rs index 98cd8e2e2..d197cb443 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -440,7 +440,10 @@ pub struct UpdatePlan { // whether the WHERE clause is always false pub contains_constant_false_condition: bool, pub indexes_to_update: Vec>, - // If the table's rowid alias is used, gather all the target rowids into an ephemeral table, and then use that table as the single JoinedTable for the actual UPDATE loop. + // If the UPDATE modifies any column that is present in the key of the btree used to iterate over the table (either the table itself or an index), + // gather all the target rowids into an ephemeral table, and then use that table as the single JoinedTable for the actual UPDATE loop. + // This ensures the keys of the btree used to iterate cannot be changed during the UPDATE loop itself, ensuring all the intended rows actually get + // updated and none are skipped. pub ephemeral_plan: Option, // For ALTER TABLE turso-db emits appropriate DDL statement in the "updates" cell of CDC table // This field is present only for update plan created for ALTER TABLE when CDC mode has "updates" values From 691dce6b8ac4bb582596e41800c60dfa90e0f64b Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 12:19:25 +0300 Subject: [PATCH 216/428] Make decision about UpdatePlan::ephemeral_plan _after_ optimizer An ephemeral table is required if the b-tree key of the table (rowid) or the index (index key) is affected by the UPDATE. --- core/translate/expr.rs | 7 +- core/translate/optimizer/mod.rs | 186 ++++++++++++++++++++++++++++---- core/translate/plan.rs | 5 + 3 files changed, 177 insertions(+), 21 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index cb02e2e35..4f546205e 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -1951,7 +1951,12 @@ pub fn translate_expr( let table = referenced_tables .unwrap() .find_table_by_internal_id(*table_ref_id) - .expect("table reference should be found"); + .unwrap_or_else(|| { + unreachable!( + "table reference should be found: {} (referenced_tables: {:?})", + table_ref_id, referenced_tables + ) + }); let Some(table_column) = table.get_column_at(*column) else { crate::bail_parse_error!("column index out of bounds"); diff --git a/core/translate/optimizer/mod.rs b/core/translate/optimizer/mod.rs index afe952b0c..798cd50f3 100644 --- a/core/translate/optimizer/mod.rs +++ b/core/translate/optimizer/mod.rs @@ -16,15 +16,19 @@ use turso_ext::{ConstraintInfo, ConstraintUsage}; use turso_parser::ast::{self, Expr, SortOrder}; use crate::{ - schema::{Index, IndexColumn, Schema, Table}, + schema::{BTreeTable, Column, Index, IndexColumn, Schema, Table, Type, ROWID_SENTINEL}, translate::{ optimizer::{ access_method::AccessMethodParams, constraints::{RangeConstraintRef, SeekRangeConstraint, TableConstraints}, }, - plan::{Scan, SeekKeyComponent}, + plan::{ + ColumnUsedMask, OuterQueryReference, QueryDestination, ResultSetColumn, Scan, + SeekKeyComponent, + }, }, types::SeekOp, + vdbe::builder::{CursorKey, CursorType, ProgramBuilder}, LimboError, Result, }; @@ -44,11 +48,11 @@ pub(crate) mod lift_common_subexpressions; pub(crate) mod order; #[tracing::instrument(skip_all, level = tracing::Level::DEBUG)] -pub fn optimize_plan(plan: &mut Plan, schema: &Schema) -> Result<()> { +pub fn optimize_plan(program: &mut ProgramBuilder, plan: &mut Plan, schema: &Schema) -> Result<()> { match plan { Plan::Select(plan) => optimize_select_plan(plan, schema)?, Plan::Delete(plan) => optimize_delete_plan(plan, schema)?, - Plan::Update(plan) => optimize_update_plan(plan, schema)?, + Plan::Update(plan) => optimize_update_plan(program, plan, schema)?, Plan::CompoundSelect { left, right_most, .. } => { @@ -136,28 +140,170 @@ fn optimize_update_plan( &mut None, )?; - // It is not safe to use an index that is going to be updated as the iteration index for a table. - // In these cases, we will fall back to a table scan. - // FIXME: this should probably be incorporated into the optimizer itself, but it's a smaller fix this way. let table_ref = &mut plan.table_references.joined_tables_mut()[0]; - // No index, OK. - let Some(index) = table_ref.op.index() else { - return Ok(()); + // An ephemeral table is required if the UPDATE modifies any column that is present in the key of the + // btree used to iterate over the table. + // For regular table scans or seeks, this is just the rowid or the rowid alias column (INTEGER PRIMARY KEY) + // For index scans and seeks, this is any column in the index used. + let requires_ephemeral_table = 'requires: { + let Some(btree_table) = table_ref.table.btree() else { + break 'requires false; + }; + let Some(index) = table_ref.op.index() else { + let rowid_alias_used = plan.set_clauses.iter().fold(false, |accum, (idx, _)| { + accum || (*idx != ROWID_SENTINEL && btree_table.columns[*idx].is_rowid_alias) + }); + if rowid_alias_used { + break 'requires true; + } + let direct_rowid_update = plan + .set_clauses + .iter() + .any(|(idx, _)| *idx == ROWID_SENTINEL); + if direct_rowid_update { + break 'requires true; + } + break 'requires false; + }; + + plan.set_clauses + .iter() + .any(|(idx, _)| index.columns.iter().any(|c| c.pos_in_table == *idx)) }; - // Iteration index not affected by update, OK. - if !plan.indexes_to_update.iter().any(|i| Arc::ptr_eq(index, i)) { + + if !requires_ephemeral_table { return Ok(()); } - // Otherwise, fall back to a table scan. - table_ref.op = Operation::Scan(Scan::BTreeTable { - iter_dir: IterationDirection::Forwards, - index: None, + + add_ephemeral_table_to_update_plan(program, plan) +} + +/// An ephemeral table is required if the UPDATE modifies any column that is present in the key of the +/// btree used to iterate over the table. +/// For regular table scans or seeks, the key is the rowid or the rowid alias column (INTEGER PRIMARY KEY). +/// For index scans and seeks, the key is any column in the index used. +/// +/// The ephemeral table will accumulate all the rowids of the rows that are affected by the UPDATE, +/// and then the temp table will be iterated over and the actual row updates performed. +/// +/// This is necessary because an UPDATE is implemented as a DELETE-then-INSERT operation, which could +/// mess up the iteration order of the rows by changing the keys in the table/index that the iteration +/// is performed over. The ephemeral table ensures stable iteration because it is not modified during +/// the UPDATE loop. +fn add_ephemeral_table_to_update_plan( + program: &mut ProgramBuilder, + plan: &mut UpdatePlan, +) -> Result<()> { + let internal_id = program.table_reference_counter.next(); + let ephemeral_table = Arc::new(BTreeTable { + root_page: 0, // Not relevant for ephemeral table definition + name: "ephemeral_scratch".to_string(), + has_rowid: true, + has_autoincrement: false, + primary_key_columns: vec![], + columns: vec![Column { + name: Some("rowid".to_string()), + ty: Type::Integer, + ty_str: "INTEGER".to_string(), + primary_key: true, + is_rowid_alias: false, + notnull: true, + default: None, + unique: false, + collation: None, + hidden: false, + }], + is_strict: false, + unique_sets: vec![], + foreign_keys: vec![], }); - // Revert the decision to use a WHERE clause term as an index constraint. - plan.where_clause - .iter_mut() - .for_each(|term| term.consumed = false); + + let temp_cursor_id = program.alloc_cursor_id_keyed( + CursorKey::table(internal_id), + CursorType::BTreeTable(ephemeral_table.clone()), + ); + + // The actual update loop will use the ephemeral table as the single [JoinedTable] which it then loops over. + let table_references_update = TableReferences::new( + vec![JoinedTable { + table: Table::BTree(ephemeral_table.clone()), + identifier: "ephemeral_scratch".to_string(), + internal_id, + op: Operation::Scan(Scan::BTreeTable { + iter_dir: IterationDirection::Forwards, + index: None, + }), + join_info: None, + col_used_mask: ColumnUsedMask::default(), + database_id: 0, + }], + vec![], + ); + + // Building the ephemeral table will use the TableReferences from the original plan -- i.e. if we chose an index scan originally, + // we will build the ephemeral table by using the same index scan and using the same WHERE filters. + let table_references_ephemeral_select = + std::mem::replace(&mut plan.table_references, table_references_update); + + for table in table_references_ephemeral_select.joined_tables() { + // The update loop needs to reference columns from the original source table, so we add it as an outer query reference. + plan.table_references + .add_outer_query_reference(OuterQueryReference { + identifier: table.identifier.clone(), + internal_id: table.internal_id, + table: table.table.clone(), + col_used_mask: table.col_used_mask.clone(), + }); + } + + let join_order = table_references_ephemeral_select + .joined_tables() + .iter() + .enumerate() + .map(|(i, t)| JoinOrderMember { + table_id: t.internal_id, + original_idx: i, + is_outer: t + .join_info + .as_ref() + .is_some_and(|join_info| join_info.outer), + }) + .collect(); + let rowid_internal_id = table_references_ephemeral_select + .joined_tables() + .first() + .unwrap() + .internal_id; + + let ephemeral_plan = SelectPlan { + table_references: table_references_ephemeral_select, + result_columns: vec![ResultSetColumn { + expr: Expr::RowId { + database: None, + table: rowid_internal_id, + }, + alias: None, + contains_aggregates: false, + }], + where_clause: plan.where_clause.drain(..).collect(), + group_by: None, // N/A + order_by: vec![], // N/A + aggregates: vec![], // N/A + limit: None, // N/A + query_destination: QueryDestination::EphemeralTable { + cursor_id: temp_cursor_id, + table: ephemeral_table, + }, + join_order, + offset: None, + contains_constant_false_condition: false, + distinctness: super::plan::Distinctness::NonDistinct, + values: vec![], + window: None, + }; + + plan.ephemeral_plan = Some(ephemeral_plan); Ok(()) } diff --git a/core/translate/plan.rs b/core/translate/plan.rs index d197cb443..6782607b1 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -616,6 +616,11 @@ impl TableReferences { self.joined_tables.push(joined_table); } + /// Add a new [OuterQueryReference] to the query plan. + pub fn add_outer_query_reference(&mut self, outer_query_reference: OuterQueryReference) { + self.outer_query_refs.push(outer_query_reference); + } + /// Returns an immutable reference to the [JoinedTable]s in the query plan. pub fn joined_tables(&self) -> &[JoinedTable] { &self.joined_tables From f5ee4807dafdd622038c0c5c40d8a5b4b37f3596 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 13:47:16 +0300 Subject: [PATCH 217/428] Properly differentiate between source and target in UPDATE - Encode information about ephemeral source table in OperationMode::UPDATE if present - Use OperationMode information to correctly resolve cursors in UPDATE --- core/translate/emitter.rs | 192 +++++++++++++++++++++++++----------- core/translate/main_loop.rs | 88 ++++++++++++----- core/translate/plan.rs | 64 +++++++++--- core/translate/select.rs | 4 +- core/translate/upsert.rs | 3 +- 5 files changed, 250 insertions(+), 101 deletions(-) diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 2dabd4b82..67ead10dc 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -43,7 +43,6 @@ use crate::translate::window::{emit_window_results, init_window, WindowMetadata} use crate::util::{exprs_are_equivalent, normalize_ident}; use crate::vdbe::builder::{CursorKey, CursorType, ProgramBuilder}; use crate::vdbe::insn::{CmpInsFlags, IdxInsertFlags, InsertFlags, RegisterOrLiteral}; -use crate::vdbe::CursorID; use crate::vdbe::{insn::Insn, BranchOffset}; use crate::Connection; use crate::{bail_parse_error, Result, SymbolTable}; @@ -190,13 +189,30 @@ impl<'a> TranslateCtx<'a> { } } +#[derive(Debug, Clone)] +/// Update row source for UPDATE statements +/// `Normal` is the default mode, it will iterate either the table itself or an index on the table. +/// `PrebuiltEphemeralTable` is used when an ephemeral table containing the target rowids to update has +/// been built and it is being used for iteration. +pub enum UpdateRowSource { + /// Iterate over the table itself or an index on the table + Normal, + /// Iterate over an ephemeral table containing the target rowids to update + PrebuiltEphemeralTable { + /// The cursor id of the ephemeral table that is being used to iterate the target rowids to update. + ephemeral_table_cursor_id: usize, + /// The table that is being updated. + target_table: Arc, + }, +} + /// Used to distinguish database operations #[allow(clippy::upper_case_acronyms, dead_code)] -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone)] pub enum OperationMode { SELECT, INSERT, - UPDATE, + UPDATE(UpdateRowSource), DELETE, } @@ -363,6 +379,7 @@ pub fn emit_query<'a>( &plan.join_order, &plan.where_clause, None, + OperationMode::SELECT, )?; // Process result columns and expressions in the inner loop @@ -374,7 +391,7 @@ pub fn emit_query<'a>( t_ctx, &plan.table_references, &plan.join_order, - None, + OperationMode::SELECT, )?; program.preassign_label_to_next_insn(after_main_loop_label); @@ -456,6 +473,7 @@ fn emit_program_for_delete( &[JoinOrderMember::default()], &plan.where_clause, None, + OperationMode::DELETE, )?; emit_delete_insns( @@ -472,7 +490,7 @@ fn emit_program_for_delete( &mut t_ctx, &plan.table_references, &[JoinOrderMember::default()], - None, + OperationMode::DELETE, )?; program.preassign_label_to_next_insn(after_main_loop_label); // Finalize program @@ -896,7 +914,15 @@ fn emit_program_for_update( }; *cursor_id }); - if let Some(ephemeral_plan) = ephemeral_plan { + let has_ephemeral_table = ephemeral_plan.is_some(); + + let target_table = if let Some(ephemeral_plan) = ephemeral_plan { + let table = ephemeral_plan + .table_references + .joined_tables() + .first() + .unwrap() + .clone(); program.emit_insn(Insn::OpenEphemeral { cursor_id: temp_cursor_id.unwrap(), is_table: true, @@ -904,7 +930,27 @@ fn emit_program_for_update( program.incr_nesting(); emit_program_for_select(program, resolver, ephemeral_plan)?; program.decr_nesting(); - } + Arc::new(table) + } else { + Arc::new( + plan.table_references + .joined_tables() + .first() + .unwrap() + .clone(), + ) + }; + + let mode = OperationMode::UPDATE(if has_ephemeral_table { + UpdateRowSource::PrebuiltEphemeralTable { + ephemeral_table_cursor_id: temp_cursor_id.expect( + "ephemeral table cursor id is always allocated if has_ephemeral_table is true", + ), + target_table: target_table.clone(), + } + } else { + UpdateRowSource::Normal + }); // Initialize the main loop init_loop( @@ -913,7 +959,7 @@ fn emit_program_for_update( &plan.table_references, &mut [], None, - OperationMode::UPDATE, + mode.clone(), &plan.where_clause, )?; @@ -950,8 +996,18 @@ fn emit_program_for_update( &[JoinOrderMember::default()], &plan.where_clause, temp_cursor_id, + mode.clone(), )?; + let target_table_cursor_id = + program.resolve_cursor_id(&CursorKey::table(target_table.internal_id)); + + let iteration_cursor_id = if has_ephemeral_table { + temp_cursor_id.unwrap() + } else { + target_table_cursor_id + }; + // Emit update instructions emit_update_insns( connection, @@ -959,7 +1015,9 @@ fn emit_program_for_update( &t_ctx, program, index_cursors, - temp_cursor_id, + iteration_cursor_id, + target_table_cursor_id, + target_table, )?; // Close the main loop @@ -968,7 +1026,7 @@ fn emit_program_for_update( &mut t_ctx, &plan.table_references, &[JoinOrderMember::default()], - temp_cursor_id, + mode.clone(), )?; program.preassign_label_to_next_insn(after_main_loop_label); @@ -980,20 +1038,28 @@ fn emit_program_for_update( } #[instrument(skip_all, level = Level::DEBUG)] +#[allow(clippy::too_many_arguments)] +/// Emits the instructions for the UPDATE loop. +/// +/// `iteration_cursor_id` is the cursor id of the table that is being iterated over. This can be either the table itself, an index, or an ephemeral table (see [crate::translate::plan::UpdatePlan]). +/// +/// `target_table_cursor_id` is the cursor id of the table that is being updated. +/// +/// `target_table` is the table that is being updated. fn emit_update_insns( connection: &Arc, plan: &mut UpdatePlan, t_ctx: &TranslateCtx, program: &mut ProgramBuilder, index_cursors: Vec<(usize, usize)>, - temp_cursor_id: Option, + iteration_cursor_id: usize, + target_table_cursor_id: usize, + target_table: Arc, ) -> crate::Result<()> { - // we can either use this obviously safe raw pointer or we can clone it - let table_ref: *const JoinedTable = plan.table_references.joined_tables().first().unwrap(); - let internal_id = unsafe { (*table_ref).internal_id }; + let internal_id = target_table.internal_id; let loop_labels = t_ctx.labels_main_loop.first().unwrap(); - let cursor_id = program.resolve_cursor_id(&CursorKey::table(internal_id)); - let (index, is_virtual) = match &unsafe { &*table_ref }.op { + let source_table = plan.table_references.joined_tables().first().unwrap(); + let (index, is_virtual) = match &source_table.op { Operation::Scan(Scan::BTreeTable { index, .. }) => ( index.as_ref().map(|index| { ( @@ -1003,7 +1069,7 @@ fn emit_update_insns( }), false, ), - Operation::Scan(_) => (None, unsafe { &*table_ref }.virtual_table().is_some()), + Operation::Scan(_) => (None, target_table.virtual_table().is_some()), Operation::Search(search) => match search { &Search::RowidEq { .. } | Search::Seek { index: None, .. } => (None, false), Search::Seek { @@ -1019,7 +1085,7 @@ fn emit_update_insns( }; let beg = program.alloc_registers( - unsafe { &*table_ref }.table.columns().len() + target_table.table.columns().len() + if is_virtual { 2 // two args before the relevant columns for VUpdate } else { @@ -1027,12 +1093,13 @@ fn emit_update_insns( }, ); program.emit_insn(Insn::RowId { - cursor_id: temp_cursor_id.unwrap_or(cursor_id), + cursor_id: iteration_cursor_id, dest: beg, }); // Check if rowid was provided (through INTEGER PRIMARY KEY as a rowid alias) - let rowid_alias_index = unsafe { &*table_ref } + let rowid_alias_index = target_table + .table .columns() .iter() .position(|c| c.is_rowid_alias); @@ -1054,15 +1121,18 @@ fn emit_update_insns( None }; - let check_rowid_not_exists_label = if has_user_provided_rowid { + let not_exists_check_required = + has_user_provided_rowid || iteration_cursor_id != target_table_cursor_id; + + let check_rowid_not_exists_label = if not_exists_check_required { Some(program.allocate_label()) } else { None }; - if has_user_provided_rowid { + if not_exists_check_required { program.emit_insn(Insn::NotExists { - cursor: cursor_id, + cursor: target_table_cursor_id, rowid_reg: beg, target_pc: check_rowid_not_exists_label.unwrap(), }); @@ -1089,7 +1159,7 @@ fn emit_update_insns( decrement_by: 1, }); } - let col_len = unsafe { &*table_ref }.columns().len(); + let col_len = target_table.table.columns().len(); // we scan a column at a time, loading either the column's values, or the new value // from the Set expression, into registers so we can emit a MakeRecord and update the row. @@ -1101,7 +1171,7 @@ fn emit_update_insns( } else { None }; - let table_name = unsafe { &*table_ref }.table.get_name(); + let table_name = target_table.table.get_name(); let start = if is_virtual { beg + 2 } else { beg + 1 }; @@ -1120,7 +1190,7 @@ fn emit_update_insns( }); } } - for (idx, table_column) in unsafe { &*table_ref }.columns().iter().enumerate() { + for (idx, table_column) in target_table.table.columns().iter().enumerate() { let target_reg = start + idx; if let Some((col_idx, expr)) = plan.set_clauses.iter().find(|(i, _)| *i == idx) { // Skip if this is the sentinel value @@ -1203,7 +1273,7 @@ fn emit_update_insns( program.emit_null(target_reg, None); } else if is_virtual { program.emit_insn(Insn::VColumn { - cursor_id, + cursor_id: target_table_cursor_id, column: idx, dest: target_reg, }); @@ -1217,7 +1287,7 @@ fn emit_update_insns( None } }) - .unwrap_or(&cursor_id); + .unwrap_or(&target_table_cursor_id); program.emit_column_or_rowid( cursor_id, column_idx_in_index.unwrap_or(idx), @@ -1238,12 +1308,12 @@ fn emit_update_insns( if connection.foreign_keys_enabled() { let rowid_new_reg = rowid_set_clause_reg.unwrap_or(beg); - if let Some(table_btree) = unsafe { &*table_ref }.btree() { + if let Some(table_btree) = target_table.table.btree() { stabilize_new_row_for_fk( program, &table_btree, &plan.set_clauses, - cursor_id, + target_table_cursor_id, start, rowid_new_reg, )?; @@ -1255,7 +1325,7 @@ fn emit_update_insns( &t_ctx.resolver, &table_btree, table_name, - cursor_id, + target_table_cursor_id, start, rowid_new_reg, &plan @@ -1278,7 +1348,7 @@ fn emit_update_insns( program, &t_ctx.resolver, &table_btree, - cursor_id, + target_table_cursor_id, beg, start, rowid_new_reg, @@ -1320,7 +1390,7 @@ fn emit_update_insns( // to refer to the new values, which are already loaded into registers starting at `start`. rewrite_where_for_update_registers( &mut new_where, - unsafe { &*table_ref }.columns(), + target_table.table.columns(), start, rowid_set_clause_reg.unwrap_or(beg), )?; @@ -1362,13 +1432,13 @@ fn emit_update_insns( let delete_start_reg = program.alloc_registers(num_regs); for (reg_offset, column_index) in index.columns.iter().enumerate() { program.emit_column_or_rowid( - cursor_id, + target_table_cursor_id, column_index.pos_in_table, delete_start_reg + reg_offset, ); } program.emit_insn(Insn::RowId { - cursor_id, + cursor_id: target_table_cursor_id, dest: delete_start_reg + num_regs - 1, }); program.emit_insn(Insn::IdxDelete { @@ -1400,7 +1470,8 @@ fn emit_update_insns( let rowid_reg = rowid_set_clause_reg.unwrap_or(beg); for (i, col) in index.columns.iter().enumerate() { - let col_in_table = unsafe { &*table_ref } + let col_in_table = target_table + .table .columns() .get(col.pos_in_table) .expect("column index out of bounds"); @@ -1435,7 +1506,7 @@ fn emit_update_insns( .columns .iter() .map(|ic| { - unsafe { &*table_ref }.columns()[ic.pos_in_table] + target_table.table.columns()[ic.pos_in_table] .affinity() .aff_mask() }) @@ -1505,7 +1576,7 @@ fn emit_update_insns( } } - if let Some(btree_table) = unsafe { &*table_ref }.btree() { + if let Some(btree_table) = target_table.table.btree() { if btree_table.is_strict { program.emit_insn(Insn::TypeCheck { start_reg: start, @@ -1528,7 +1599,7 @@ fn emit_update_insns( }); program.emit_insn(Insn::NotExists { - cursor: cursor_id, + cursor: target_table_cursor_id, rowid_reg: target_reg, target_pc: record_label, }); @@ -1536,7 +1607,8 @@ fn emit_update_insns( let description = if let Some(idx) = rowid_alias_index { String::from(table_name) + "." - + unsafe { &*table_ref } + + target_table + .table .columns() .get(idx) .unwrap() @@ -1557,7 +1629,8 @@ fn emit_update_insns( let record_reg = program.alloc_register(); - let affinity_str = unsafe { &*table_ref } + let affinity_str = target_table + .table .columns() .iter() .map(|col| col.affinity().aff_mask()) @@ -1571,9 +1644,9 @@ fn emit_update_insns( affinity_str: Some(affinity_str), }); - if has_user_provided_rowid { + if not_exists_check_required { program.emit_insn(Insn::NotExists { - cursor: cursor_id, + cursor: target_table_cursor_id, rowid_reg: beg, target_pc: check_rowid_not_exists_label.unwrap(), }); @@ -1587,7 +1660,7 @@ fn emit_update_insns( let cdc_rowid_before_reg = program.alloc_register(); if has_user_provided_rowid { program.emit_insn(Insn::RowId { - cursor_id, + cursor_id: target_table_cursor_id, dest: cdc_rowid_before_reg, }); Some(cdc_rowid_before_reg) @@ -1602,8 +1675,8 @@ fn emit_update_insns( let cdc_before_reg = if program.capture_data_changes_mode().has_before() { Some(emit_cdc_full_record( program, - unsafe { &*table_ref }.table.columns(), - cursor_id, + target_table.table.columns(), + target_table_cursor_id, cdc_rowid_before_reg.expect("cdc_rowid_before_reg must be set"), )) } else { @@ -1613,25 +1686,25 @@ fn emit_update_insns( // If we are updating the rowid, we cannot rely on overwrite on the // Insert instruction to update the cell. We need to first delete the current cell // and later insert the updated record - if has_user_provided_rowid { + if not_exists_check_required { program.emit_insn(Insn::Delete { - cursor_id, + cursor_id: target_table_cursor_id, table_name: table_name.to_string(), }); } program.emit_insn(Insn::Insert { - cursor: cursor_id, + cursor: target_table_cursor_id, key_reg: rowid_set_clause_reg.unwrap_or(beg), record_reg, - flag: if has_user_provided_rowid { + flag: if not_exists_check_required { // The previous Insn::NotExists and Insn::Delete seek to the old rowid, // so to insert a new user-provided rowid, we need to seek to the correct place. InsertFlags::new().require_seek().update_rowid_change() } else { InsertFlags::new() }, - table_name: unsafe { &*table_ref }.identifier.clone(), + table_name: target_table.identifier.clone(), }); // Emit RETURNING results if specified @@ -1651,7 +1724,7 @@ fn emit_update_insns( let cdc_after_reg = if program.capture_data_changes_mode().has_after() { Some(emit_cdc_patch_record( program, - &unsafe { &*table_ref }.table, + &target_table.table, start, record_reg, cdc_rowid_after_reg, @@ -1705,7 +1778,14 @@ fn emit_update_insns( emit_cdc_insns( program, &t_ctx.resolver, - OperationMode::UPDATE, + OperationMode::UPDATE(if plan.ephemeral_plan.is_some() { + UpdateRowSource::PrebuiltEphemeralTable { + ephemeral_table_cursor_id: iteration_cursor_id, + target_table: target_table.clone(), + } + } else { + UpdateRowSource::Normal + }), cdc_cursor_id, cdc_rowid_before_reg, cdc_before_reg, @@ -1715,10 +1795,10 @@ fn emit_update_insns( )?; } } - } else if unsafe { &*table_ref }.virtual_table().is_some() { + } else if target_table.virtual_table().is_some() { let arg_count = col_len + 2; program.emit_insn(Insn::VUpdate { - cursor_id, + cursor_id: target_table_cursor_id, arg_count, start_reg: beg, conflict_action: 0u16, @@ -1873,7 +1953,7 @@ pub fn emit_cdc_insns( let change_type = match operation_mode { OperationMode::INSERT => 1, - OperationMode::UPDATE | OperationMode::SELECT => 0, + OperationMode::UPDATE { .. } | OperationMode::SELECT => 0, OperationMode::DELETE => -1, }; program.emit_int(change_type, turso_cdc_registers + 2); diff --git a/core/translate/main_loop.rs b/core/translate/main_loop.rs index 1f9d0a069..32afc1c21 100644 --- a/core/translate/main_loop.rs +++ b/core/translate/main_loop.rs @@ -18,7 +18,9 @@ use super::{ Search, SeekDef, SelectPlan, TableReferences, WhereTerm, }, }; -use crate::translate::{collate::get_collseq_from_expr, window::emit_window_loop_source}; +use crate::translate::{ + collate::get_collseq_from_expr, emitter::UpdateRowSource, window::emit_window_loop_source, +}; use crate::{ schema::{Affinity, Index, IndexColumn, Table}, translate::{ @@ -129,8 +131,8 @@ pub fn init_loop( ); if matches!( - mode, - OperationMode::INSERT | OperationMode::UPDATE | OperationMode::DELETE + &mode, + OperationMode::INSERT | OperationMode::UPDATE { .. } | OperationMode::DELETE ) { assert!(tables.joined_tables().len() == 1); let changed_table = &tables.joined_tables()[0].table; @@ -202,9 +204,9 @@ pub fn init_loop( } } let (table_cursor_id, index_cursor_id) = - table.open_cursors(program, mode, t_ctx.resolver.schema)?; + table.open_cursors(program, mode.clone(), t_ctx.resolver.schema)?; match &table.op { - Operation::Scan(Scan::BTreeTable { index, .. }) => match (mode, &table.table) { + Operation::Scan(Scan::BTreeTable { index, .. }) => match (&mode, &table.table) { (OperationMode::SELECT, Table::BTree(btree)) => { let root_page = btree.root_page; if let Some(cursor_id) = table_cursor_id { @@ -259,14 +261,28 @@ pub fn init_loop( } } } - (OperationMode::UPDATE, Table::BTree(btree)) => { + (OperationMode::UPDATE(update_mode), Table::BTree(btree)) => { let root_page = btree.root_page; - program.emit_insn(Insn::OpenWrite { - cursor_id: table_cursor_id - .expect("table cursor is always opened in OperationMode::UPDATE"), - root_page: root_page.into(), - db: table.database_id, - }); + match &update_mode { + UpdateRowSource::Normal => { + program.emit_insn(Insn::OpenWrite { + cursor_id: table_cursor_id.expect( + "table cursor is always opened in OperationMode::UPDATE", + ), + root_page: root_page.into(), + db: table.database_id, + }); + } + UpdateRowSource::PrebuiltEphemeralTable { target_table, .. } => { + let target_table_cursor_id = program + .resolve_cursor_id(&CursorKey::table(target_table.internal_id)); + program.emit_insn(Insn::OpenWrite { + cursor_id: target_table_cursor_id, + root_page: target_table.btree().unwrap().root_page.into(), + db: table.database_id, + }); + } + } if let Some(index_cursor_id) = index_cursor_id { program.emit_insn(Insn::OpenWrite { cursor_id: index_cursor_id, @@ -281,7 +297,9 @@ pub fn init_loop( if let Table::Virtual(tbl) = &table.table { let is_write = matches!( mode, - OperationMode::INSERT | OperationMode::UPDATE | OperationMode::DELETE + OperationMode::INSERT + | OperationMode::UPDATE { .. } + | OperationMode::DELETE ); if is_write && tbl.readonly() { return Err(crate::LimboError::ReadOnly); @@ -303,7 +321,7 @@ pub fn init_loop( }); } } - OperationMode::DELETE | OperationMode::UPDATE => { + OperationMode::DELETE | OperationMode::UPDATE { .. } => { let table_cursor_id = table_cursor_id.expect( "table cursor is always opened in OperationMode::DELETE or OperationMode::UPDATE", ); @@ -316,7 +334,7 @@ pub fn init_loop( // For DELETE, we need to open all the indexes for writing // UPDATE opens these in emit_program_for_update() separately - if mode == OperationMode::DELETE { + if matches!(mode, OperationMode::DELETE) { if let Some(indexes) = t_ctx.resolver.schema.indexes.get(table.table.get_name()) { @@ -361,7 +379,7 @@ pub fn init_loop( db: table.database_id, }); } - OperationMode::UPDATE | OperationMode::DELETE => { + OperationMode::UPDATE { .. } | OperationMode::DELETE => { program.emit_insn(Insn::OpenWrite { cursor_id: index_cursor_id .expect("index cursor is always opened in Seek with index"), @@ -407,6 +425,7 @@ pub fn open_loop( join_order: &[JoinOrderMember], predicates: &[WhereTerm], temp_cursor_id: Option, + mode: OperationMode, ) -> Result<()> { for (join_index, join) in join_order.iter().enumerate() { let joined_table_index = join.original_idx; @@ -433,7 +452,7 @@ pub fn open_loop( } } - let (table_cursor_id, index_cursor_id) = table.resolve_cursors(program)?; + let (table_cursor_id, index_cursor_id) = table.resolve_cursors(program, mode.clone())?; match &table.op { Operation::Scan(scan) => { @@ -987,7 +1006,7 @@ pub fn close_loop( t_ctx: &mut TranslateCtx, tables: &TableReferences, join_order: &[JoinOrderMember], - temp_cursor_id: Option, + mode: OperationMode, ) -> Result<()> { // We close the loops for all tables in reverse order, i.e. innermost first. // OPEN t1 @@ -1005,20 +1024,28 @@ pub fn close_loop( .get(table_index) .expect("source has no loop labels"); - let (table_cursor_id, index_cursor_id) = table.resolve_cursors(program)?; + let (table_cursor_id, index_cursor_id) = table.resolve_cursors(program, mode.clone())?; match &table.op { Operation::Scan(scan) => { program.resolve_label(loop_labels.next, program.offset()); match scan { Scan::BTreeTable { iter_dir, .. } => { - let iteration_cursor_id = temp_cursor_id.unwrap_or_else(|| { + let iteration_cursor_id = if let OperationMode::UPDATE( + UpdateRowSource::PrebuiltEphemeralTable { + ephemeral_table_cursor_id, + .. + }, + ) = &mode + { + *ephemeral_table_cursor_id + } else { index_cursor_id.unwrap_or_else(|| { table_cursor_id.expect( "Either ephemeral or index or table cursor must be opened", ) }) - }); + }; if *iter_dir == IterationDirection::Backwards { program.emit_insn(Insn::Prev { cursor_id: iteration_cursor_id, @@ -1055,12 +1082,19 @@ pub fn close_loop( "Subqueries do not support index seeks" ); program.resolve_label(loop_labels.next, program.offset()); - let iteration_cursor_id = temp_cursor_id.unwrap_or_else(|| { - index_cursor_id.unwrap_or_else(|| { - table_cursor_id - .expect("Either ephemeral or index or table cursor must be opened") - }) - }); + let iteration_cursor_id = + if let OperationMode::UPDATE(UpdateRowSource::PrebuiltEphemeralTable { + ephemeral_table_cursor_id, + .. + }) = &mode + { + *ephemeral_table_cursor_id + } else { + index_cursor_id.unwrap_or_else(|| { + table_cursor_id + .expect("Either ephemeral or index or table cursor must be opened") + }) + }; // Rowid equality point lookups are handled with a SeekRowid instruction which does not loop, so there is no need to emit a Next instruction. if !matches!(search, Search::RowidEq { .. }) { let iter_dir = match search { diff --git a/core/translate/plan.rs b/core/translate/plan.rs index 6782607b1..1bc9a91d5 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -4,7 +4,10 @@ use turso_parser::ast::{self, FrameBound, FrameClause, FrameExclude, FrameMode, use crate::{ function::AggFunc, schema::{BTreeTable, Column, FromClauseSubquery, Index, Schema, Table}, - translate::{collate::get_collseq_from_expr, optimizer::constraints::SeekRangeConstraint}, + translate::{ + collate::get_collseq_from_expr, emitter::UpdateRowSource, + optimizer::constraints::SeekRangeConstraint, + }, vdbe::{ builder::{CursorKey, CursorType, ProgramBuilder}, insn::{IdxInsertFlags, Insn}, @@ -910,22 +913,43 @@ impl JoinedTable { Table::BTree(btree) => { let use_covering_index = self.utilizes_covering_index(); let index_is_ephemeral = index.is_some_and(|index| index.ephemeral); - let table_not_required = - OperationMode::SELECT == mode && use_covering_index && !index_is_ephemeral; + let table_not_required = matches!(mode, OperationMode::SELECT) + && use_covering_index + && !index_is_ephemeral; let table_cursor_id = if table_not_required { None } else { - // Check if this is a materialized view - let cursor_type = - if let Some(view_mutex) = schema.get_materialized_view(&btree.name) { - CursorType::MaterializedView(btree.clone(), view_mutex) - } else { - CursorType::BTreeTable(btree.clone()) - }; - Some( - program - .alloc_cursor_id_keyed(CursorKey::table(self.internal_id), cursor_type), - ) + if let OperationMode::UPDATE(UpdateRowSource::PrebuiltEphemeralTable { + target_table, + .. + }) = &mode + { + // The cursor for the ephemeral table was already allocated earlier. Let's allocate one for the target table though. + Some(program.alloc_cursor_id_keyed( + CursorKey::table(target_table.internal_id), + match &target_table.table { + Table::BTree(btree) => CursorType::BTreeTable(btree.clone()), + Table::Virtual(virtual_table) => { + CursorType::VirtualTable(virtual_table.clone()) + } + _ => unreachable!("target table must be a btree or virtual table"), + }, + )) + } else { + // Check if this is a materialized view + let cursor_type = + if let Some(view_mutex) = schema.get_materialized_view(&btree.name) { + CursorType::MaterializedView(btree.clone(), view_mutex) + } else { + CursorType::BTreeTable(btree.clone()) + }; + Some( + program.alloc_cursor_id_keyed( + CursorKey::table(self.internal_id), + cursor_type, + ), + ) + } }; let index_cursor_id = index.map(|index| { program.alloc_cursor_id_keyed( @@ -951,9 +975,19 @@ impl JoinedTable { pub fn resolve_cursors( &self, program: &mut ProgramBuilder, + mode: OperationMode, ) -> Result<(Option, Option)> { let index = self.op.index(); - let table_cursor_id = program.resolve_cursor_id_safe(&CursorKey::table(self.internal_id)); + let table_cursor_id = + if let OperationMode::UPDATE(UpdateRowSource::PrebuiltEphemeralTable { + target_table, + .. + }) = &mode + { + program.resolve_cursor_id_safe(&CursorKey::table(target_table.internal_id)) + } else { + program.resolve_cursor_id_safe(&CursorKey::table(self.internal_id)) + }; let index_cursor_id = index.map(|index| { program.resolve_cursor_id(&CursorKey::index(self.internal_id, index.clone())) }); diff --git a/core/translate/select.rs b/core/translate/select.rs index 2ed2daa6d..afcdc4726 100644 --- a/core/translate/select.rs +++ b/core/translate/select.rs @@ -4,7 +4,7 @@ use super::plan::{ Search, TableReferences, WhereTerm, Window, }; use crate::schema::Table; -use crate::translate::emitter::Resolver; +use crate::translate::emitter::{OperationMode, Resolver}; use crate::translate::expr::{bind_and_rewrite_expr, BindingBehavior, ParamState}; use crate::translate::group_by::compute_group_by_sort_order; use crate::translate::optimizer::optimize_plan; @@ -674,7 +674,7 @@ pub fn emit_simple_count( .joined_tables() .first() .unwrap() - .resolve_cursors(program)?; + .resolve_cursors(program, OperationMode::SELECT)?; let cursor_id = { match cursors { diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index 868f3a933..a3df77bc6 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -6,6 +6,7 @@ use turso_parser::ast::{self, Upsert}; use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY; use crate::schema::ROWID_SENTINEL; +use crate::translate::emitter::UpdateRowSource; use crate::translate::expr::{walk_expr, WalkControl}; use crate::translate::fkeys::{emit_fk_child_update_counters, emit_parent_pk_change_checks}; use crate::translate::insert::format_unique_violation_desc; @@ -814,7 +815,7 @@ pub fn emit_upsert( emit_cdc_insns( program, resolver, - OperationMode::UPDATE, + OperationMode::UPDATE(UpdateRowSource::Normal), cdc_id, conflict_rowid_reg, before_rec, From 3465a01bf5d4812f6d169a9c0fa8176fd3dedbb4 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 15:31:43 +0300 Subject: [PATCH 218/428] fuzz: sometimes make UPDATEd value a function of the old value --- tests/integration/fuzz/mod.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index e9b81cd33..803b3046d 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -1952,7 +1952,16 @@ mod tests { }; let query = if do_update { - let new_y = rng.random_range(0..1000); + let new_y = if rng.random_bool(0.5) { + // Update to a constant value + rng.random_range(0..1000).to_string() + } else { + let source_col = rng.random_range(0..num_cols); + // Update to a value that is a function of the another column + let operator = *["+", "-"].choose(&mut rng).unwrap(); + let amount = rng.random_range(0..1000); + format!("c{source_col} {operator} {amount}") + }; format!("UPDATE t SET c{affected_col} = {new_y} {where_clause}") } else { format!("DELETE FROM t {where_clause}") From 495e66e12bc590079e9f67eac3bd9a9f95e9cad2 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 15:32:22 +0300 Subject: [PATCH 219/428] fuzz: run rusqlite integrity check after each DML operation --- tests/integration/fuzz/mod.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 803b3046d..1cc99e8ff 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -2000,6 +2000,19 @@ mod tests { "Different results after mutation! limbo: {limbo_rows:?}, sqlite: {sqlite_rows:?}, seed: {seed}, query: {query}", ); + // Run integrity check on limbo db using rusqlite + if let Err(e) = rusqlite_integrity_check(&limbo_db.path) { + println!("{table_def};"); + for t in indexes.iter() { + println!("{t};"); + } + for t in dml_statements.iter() { + println!("{t};"); + } + println!("{query};"); + panic!("seed: {seed}, error: {e}"); + } + if sqlite_rows.is_empty() { break; } From 4b80678898cb3b43bdd744825c5a21252308bbe8 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 15:32:48 +0300 Subject: [PATCH 220/428] Allow case where cursor for btree is already opened When populating an ephemeral table for UPDATE, it may open a cursor on the (permanent) table - in this case we don't need to open it again in the UPDATE loop --- core/translate/plan.rs | 5 +++-- core/vdbe/builder.rs | 12 ++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/core/translate/plan.rs b/core/translate/plan.rs index 1bc9a91d5..21653d8af 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -924,8 +924,9 @@ impl JoinedTable { .. }) = &mode { - // The cursor for the ephemeral table was already allocated earlier. Let's allocate one for the target table though. - Some(program.alloc_cursor_id_keyed( + // The cursor for the ephemeral table was already allocated earlier. Let's allocate one for the target table, + // in case it wasn't already allocated when populating the ephemeral table. + Some(program.alloc_cursor_id_keyed_if_not_exists( CursorKey::table(target_table.internal_id), match &target_table.table { Table::BTree(btree) => CursorType::BTreeTable(btree.clone()), diff --git a/core/vdbe/builder.rs b/core/vdbe/builder.rs index 82710377e..667fa6b9d 100644 --- a/core/vdbe/builder.rs +++ b/core/vdbe/builder.rs @@ -311,6 +311,18 @@ impl ProgramBuilder { self._alloc_cursor_id(Some(key), cursor_type) } + pub fn alloc_cursor_id_keyed_if_not_exists( + &mut self, + key: CursorKey, + cursor_type: CursorType, + ) -> usize { + if let Some(cursor_id) = self.resolve_cursor_id_safe(&key) { + cursor_id + } else { + self._alloc_cursor_id(Some(key), cursor_type) + } + } + pub fn alloc_cursor_id(&mut self, cursor_type: CursorType) -> usize { self._alloc_cursor_id(None, cursor_type) } From 0173d31c042f896dc270d300fa10dbb5af0ce4b1 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 15:51:31 +0300 Subject: [PATCH 221/428] clippy: collapse nested if --- core/translate/plan.rs | 61 ++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/core/translate/plan.rs b/core/translate/plan.rs index 21653d8af..aa506ca76 100644 --- a/core/translate/plan.rs +++ b/core/translate/plan.rs @@ -918,40 +918,37 @@ impl JoinedTable { && !index_is_ephemeral; let table_cursor_id = if table_not_required { None + } else if let OperationMode::UPDATE(UpdateRowSource::PrebuiltEphemeralTable { + target_table, + .. + }) = &mode + { + // The cursor for the ephemeral table was already allocated earlier. Let's allocate one for the target table, + // in case it wasn't already allocated when populating the ephemeral table. + Some(program.alloc_cursor_id_keyed_if_not_exists( + CursorKey::table(target_table.internal_id), + match &target_table.table { + Table::BTree(btree) => CursorType::BTreeTable(btree.clone()), + Table::Virtual(virtual_table) => { + CursorType::VirtualTable(virtual_table.clone()) + } + _ => unreachable!("target table must be a btree or virtual table"), + }, + )) } else { - if let OperationMode::UPDATE(UpdateRowSource::PrebuiltEphemeralTable { - target_table, - .. - }) = &mode - { - // The cursor for the ephemeral table was already allocated earlier. Let's allocate one for the target table, - // in case it wasn't already allocated when populating the ephemeral table. - Some(program.alloc_cursor_id_keyed_if_not_exists( - CursorKey::table(target_table.internal_id), - match &target_table.table { - Table::BTree(btree) => CursorType::BTreeTable(btree.clone()), - Table::Virtual(virtual_table) => { - CursorType::VirtualTable(virtual_table.clone()) - } - _ => unreachable!("target table must be a btree or virtual table"), - }, - )) - } else { - // Check if this is a materialized view - let cursor_type = - if let Some(view_mutex) = schema.get_materialized_view(&btree.name) { - CursorType::MaterializedView(btree.clone(), view_mutex) - } else { - CursorType::BTreeTable(btree.clone()) - }; - Some( - program.alloc_cursor_id_keyed( - CursorKey::table(self.internal_id), - cursor_type, - ), - ) - } + // Check if this is a materialized view + let cursor_type = + if let Some(view_mutex) = schema.get_materialized_view(&btree.name) { + CursorType::MaterializedView(btree.clone(), view_mutex) + } else { + CursorType::BTreeTable(btree.clone()) + }; + Some( + program + .alloc_cursor_id_keyed(CursorKey::table(self.internal_id), cursor_type), + ) }; + let index_cursor_id = index.map(|index| { program.alloc_cursor_id_keyed( CursorKey::index(self.internal_id, index.clone()), From 87434b8a727867d90d60942355e52ca9f2246fb2 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 16:11:43 +0300 Subject: [PATCH 222/428] Do not count DELETEs occuring in an UPDATE stmt as separate changes --- core/translate/analyze.rs | 1 + core/translate/emitter.rs | 3 +++ core/translate/index.rs | 1 + core/translate/schema.rs | 3 +++ core/translate/upsert.rs | 1 + core/translate/view.rs | 2 ++ core/vdbe/execute.rs | 13 +++++++++---- core/vdbe/explain.rs | 2 +- core/vdbe/insn.rs | 2 ++ 9 files changed, 23 insertions(+), 5 deletions(-) diff --git a/core/translate/analyze.rs b/core/translate/analyze.rs index ece9a558b..665e43e0f 100644 --- a/core/translate/analyze.rs +++ b/core/translate/analyze.rs @@ -97,6 +97,7 @@ pub fn translate_analyze( program.emit_insn(Insn::Delete { cursor_id, table_name: "sqlite_stat1".to_string(), + is_part_of_update: false, }); program.emit_insn(Insn::Next { cursor_id, diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 67ead10dc..0d82c8965 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -859,6 +859,7 @@ fn emit_delete_insns( program.emit_insn(Insn::Delete { cursor_id: main_table_cursor_id, table_name: table_name.to_string(), + is_part_of_update: false, }); if let Some(index) = iteration_index { @@ -867,6 +868,7 @@ fn emit_delete_insns( program.emit_insn(Insn::Delete { cursor_id: iteration_index_cursor, table_name: index.name.clone(), + is_part_of_update: false, }); } } @@ -1690,6 +1692,7 @@ fn emit_update_insns( program.emit_insn(Insn::Delete { cursor_id: target_table_cursor_id, table_name: table_name.to_string(), + is_part_of_update: true, }); } diff --git a/core/translate/index.rs b/core/translate/index.rs index 5fa07dc0d..b680f560c 100644 --- a/core/translate/index.rs +++ b/core/translate/index.rs @@ -595,6 +595,7 @@ pub fn translate_drop_index( program.emit_insn(Insn::Delete { cursor_id: sqlite_schema_cursor_id, table_name: "sqlite_schema".to_string(), + is_part_of_update: false, }); program.resolve_label(next_label, program.offset()); diff --git a/core/translate/schema.rs b/core/translate/schema.rs index 53f234c76..5bf0e3682 100644 --- a/core/translate/schema.rs +++ b/core/translate/schema.rs @@ -745,6 +745,7 @@ pub fn translate_drop_table( program.emit_insn(Insn::Delete { cursor_id: sqlite_schema_cursor_id_0, table_name: SQLITE_TABLEID.to_string(), + is_part_of_update: false, }); program.resolve_label(next_label, program.offset()); @@ -945,6 +946,7 @@ pub fn translate_drop_table( program.emit_insn(Insn::Delete { cursor_id: sqlite_schema_cursor_id_1, table_name: SQLITE_TABLEID.to_string(), + is_part_of_update: false, }); program.emit_insn(Insn::Insert { cursor: sqlite_schema_cursor_id_1, @@ -1005,6 +1007,7 @@ pub fn translate_drop_table( program.emit_insn(Insn::Delete { cursor_id: seq_cursor_id, table_name: "sqlite_sequence".to_string(), + is_part_of_update: false, }); program.resolve_label(continue_loop_label, program.offset()); diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index a3df77bc6..0418402dd 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -726,6 +726,7 @@ pub fn emit_upsert( program.emit_insn(Insn::Delete { cursor_id: tbl_cursor_id, table_name: table.get_name().to_string(), + is_part_of_update: true, }); program.emit_insn(Insn::Insert { cursor: tbl_cursor_id, diff --git a/core/translate/view.rs b/core/translate/view.rs index b9b5ddcc0..47f0822d7 100644 --- a/core/translate/view.rs +++ b/core/translate/view.rs @@ -113,6 +113,7 @@ pub fn translate_create_materialized_view( program.emit_insn(Insn::Delete { cursor_id: view_cursor_id, table_name: normalized_view_name.clone(), + is_part_of_update: false, }); program.emit_insn(Insn::Next { cursor_id: view_cursor_id, @@ -409,6 +410,7 @@ pub fn translate_drop_view( program.emit_insn(Insn::Delete { cursor_id: sqlite_schema_cursor_id, table_name: "sqlite_schema".to_string(), + is_part_of_update: false, }); program.resolve_label(skip_delete_label, program.offset()); diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index e56bd4fdb..82b850ec1 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -6046,7 +6046,8 @@ pub fn op_delete( load_insn!( Delete { cursor_id, - table_name + table_name, + is_part_of_update, }, insn ); @@ -6131,9 +6132,13 @@ pub fn op_delete( } state.op_delete_state.sub_state = OpDeleteSubState::MaybeCaptureRecord; - program - .n_change - .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + if !is_part_of_update { + // DELETEs do not count towards the total changes if they are part of an UPDATE statement, + // i.e. the DELETE and subsequent INSERT of a row are the same "change". + program + .n_change + .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + } state.pc += 1; Ok(InsnFunctionStepResult::Step) } diff --git a/core/vdbe/explain.rs b/core/vdbe/explain.rs index ca5b74ef3..69434de17 100644 --- a/core/vdbe/explain.rs +++ b/core/vdbe/explain.rs @@ -1141,7 +1141,7 @@ pub fn insn_to_row( flag.0 as u16, format!("intkey=r[{key_reg}] data=r[{record_reg}]"), ), - Insn::Delete { cursor_id, table_name } => ( + Insn::Delete { cursor_id, table_name, .. } => ( "Delete", *cursor_id as i32, 0, diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index 64bae3947..a774edad5 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -791,6 +791,8 @@ pub enum Insn { Delete { cursor_id: CursorID, table_name: String, + /// Whether the DELETE is part of an UPDATE statement. If so, it doesn't count towards the change counter. + is_part_of_update: bool, }, /// If P5 is not zero, then raise an SQLITE_CORRUPT_INDEX error if no matching index entry From b3be21f472421e581f589ed659f8ddef4ceb365a Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 16:15:20 +0300 Subject: [PATCH 223/428] Do not count ephemeral table INSERTs as changes --- core/translate/result_row.rs | 4 +++- core/vdbe/execute.rs | 5 ++++- core/vdbe/insn.rs | 6 ++++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/core/translate/result_row.rs b/core/translate/result_row.rs index c087a0abf..2ec60e641 100644 --- a/core/translate/result_row.rs +++ b/core/translate/result_row.rs @@ -133,7 +133,9 @@ pub fn emit_result_row_and_limit( key_reg: result_columns_start_reg + (plan.result_columns.len() - 1), // Rowid reg is the last register record_reg, // since we are not doing an Insn::NewRowid or an Insn::NotExists here, we need to seek to ensure the insertion happens in the correct place. - flag: InsertFlags::new().require_seek(), + flag: InsertFlags::new() + .require_seek() + .is_ephemeral_table_insert(), table_name: table.name.clone(), }); } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 82b850ec1..2c4e159d7 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -5892,7 +5892,10 @@ pub fn op_insert( let cursor = cursor.as_btree_mut(); cursor.root_page() }; - if root_page != 1 && table_name != "sqlite_sequence" { + if root_page != 1 + && table_name != "sqlite_sequence" + && !flag.has(InsertFlags::EPHEMERAL_TABLE_INSERT) + { state.op_insert_state.sub_state = OpInsertSubState::UpdateLastRowid; } else { let schema = program.connection.schema.read(); diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index a774edad5..bf7bf89b3 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -112,6 +112,7 @@ pub struct InsertFlags(pub u8); impl InsertFlags { pub const UPDATE_ROWID_CHANGE: u8 = 0x01; // Flag indicating this is part of an UPDATE statement where the row's rowid is changed pub const REQUIRE_SEEK: u8 = 0x02; // Flag indicating that a seek is required to insert the row + pub const EPHEMERAL_TABLE_INSERT: u8 = 0x04; // Flag indicating that this is an insert into an ephemeral table pub fn new() -> Self { InsertFlags(0) @@ -130,6 +131,11 @@ impl InsertFlags { self.0 |= InsertFlags::UPDATE_ROWID_CHANGE; self } + + pub fn is_ephemeral_table_insert(mut self) -> Self { + self.0 |= InsertFlags::EPHEMERAL_TABLE_INSERT; + self + } } #[derive(Clone, Copy, Debug)] From b3b07252dc7d63393caf614dbf961daec189ad25 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 16:25:05 +0300 Subject: [PATCH 224/428] Add TCL smoke tests for UPDATEs affecting indexes --- testing/update.test | 104 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 103 insertions(+), 1 deletion(-) diff --git a/testing/update.test b/testing/update.test index 258cecde3..964dfba29 100755 --- a/testing/update.test +++ b/testing/update.test @@ -395,4 +395,106 @@ do_execsql_test_on_specific_db {:memory:} update-alias-visibility-in-where-claus update t as tt set a = 1 where tt.a = 0; select * from t; } {1 -5} \ No newline at end of file +5} + +# Basic UPDATE tests with indexes +do_execsql_test_on_specific_db {:memory:} update-non-indexed-column { + CREATE TABLE t (a INTEGER, b INTEGER); + CREATE INDEX idx_a ON t(a); + INSERT INTO t VALUES (1, 10), (2, 20), (3, 30); + UPDATE t SET b = 100 WHERE a = 2; + SELECT * FROM t ORDER BY a; +} {1|10 +2|100 +3|30} + +do_execsql_test_on_specific_db {:memory:} update-indexed-column { + CREATE TABLE t (a INTEGER, b INTEGER); + CREATE INDEX idx_a ON t(a); + INSERT INTO t VALUES (1, 10), (2, 20), (3, 30); + UPDATE t SET a = 5 WHERE a = 2; + SELECT * FROM t ORDER BY a; +} {1|10 +3|30 +5|20} + +do_execsql_test_on_specific_db {:memory:} update-both-indexed-and-non-indexed { + CREATE TABLE t (a INTEGER, b INTEGER, c INTEGER); + CREATE INDEX idx_a ON t(a); + INSERT INTO t VALUES (1, 10, 100), (2, 20, 200), (3, 30, 300); + UPDATE t SET a = 5, b = 50, c = 500 WHERE a = 2; + SELECT * FROM t ORDER BY a; +} {1|10|100 +3|30|300 +5|50|500} + +do_execsql_test_on_specific_db {:memory:} update-multiple-indexes { + CREATE TABLE t (a INTEGER, b INTEGER, c INTEGER); + CREATE INDEX idx_a ON t(a); + CREATE INDEX idx_b ON t(b); + INSERT INTO t VALUES (1, 10, 100), (2, 20, 200), (3, 30, 300); + UPDATE t SET a = 5, b = 50 WHERE c = 200; + SELECT * FROM t ORDER BY a; +} {1|10|100 +3|30|300 +5|50|200} + +do_execsql_test_on_specific_db {:memory:} update-all-rows-with-index { + CREATE TABLE t (a INTEGER, b INTEGER); + CREATE INDEX idx_a ON t(a); + INSERT INTO t VALUES (1, 10), (2, 20), (3, 30); + UPDATE t SET a = a + 10; + SELECT * FROM t ORDER BY a; +} {11|10 +12|20 +13|30} + +# Range update tests +do_execsql_test_on_specific_db {:memory:} update-range-non-indexed { + CREATE TABLE t (a INTEGER, b INTEGER); + CREATE INDEX idx_a ON t(a); + INSERT INTO t VALUES (1, 10), (2, 20), (3, 30), (4, 40), (5, 50); + UPDATE t SET b = 999 WHERE a >= 2 AND a <= 4; + SELECT * FROM t ORDER BY a; +} {1|10 +2|999 +3|999 +4|999 +5|50} + +do_execsql_test_on_specific_db {:memory:} update-range-indexed-column { + CREATE TABLE t (a INTEGER, b INTEGER); + CREATE INDEX idx_a ON t(a); + INSERT INTO t VALUES (1, 10), (2, 20), (3, 30), (4, 40), (5, 50); + UPDATE t SET a = a + 100 WHERE a >= 2 AND a < 4; + SELECT * FROM t ORDER BY a; +} {1|10 +4|40 +5|50 +102|20 +103|30} + +do_execsql_test_on_specific_db {:memory:} update-range-both-columns { + CREATE TABLE t (a INTEGER, b INTEGER, c INTEGER); + CREATE INDEX idx_a ON t(a); + INSERT INTO t VALUES (1, 10, 100), (2, 20, 200), (3, 30, 300), (4, 40, 400), (5, 50, 500); + UPDATE t SET a = a * 10, b = b * 2 WHERE a > 1 AND a < 5; + SELECT * FROM t ORDER BY a; +} {1|10|100 +5|50|500 +20|40|200 +30|60|300 +40|80|400} + +do_execsql_test_on_specific_db {:memory:} update-range-multiple-indexes { + CREATE TABLE t (a INTEGER, b INTEGER, c INTEGER); + CREATE INDEX idx_a ON t(a); + CREATE INDEX idx_b ON t(b); + INSERT INTO t VALUES (1, 10, 100), (2, 20, 200), (3, 30, 300), (4, 40, 400); + UPDATE t SET a = a + 10, b = b + 100 WHERE a BETWEEN 2 AND 3; + SELECT * FROM t ORDER BY a; +} {1|10|100 +4|40|400 +12|120|200 +13|130|300} + From 0ae4425e4c4caa3873e69a9f561f1fd5c1342a82 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 17:23:21 +0300 Subject: [PATCH 225/428] fuzz: create multi-column indices in table_index_mutation_fuzz --- tests/integration/fuzz/mod.rs | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 1cc99e8ff..dddf288d7 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -1844,7 +1844,7 @@ mod tests { pub fn table_index_mutation_fuzz() { let _ = env_logger::try_init(); let (mut rng, seed) = rng_from_time(); - println!("index_scan_single_key_mutation_fuzz seed: {seed}"); + println!("table_index_mutation_fuzz seed: {seed}"); const OUTER_ITERATIONS: usize = 100; for i in 0..OUTER_ITERATIONS { @@ -1866,9 +1866,33 @@ mod tests { let table_def = format!("CREATE TABLE t ({table_def})"); let num_indexes = rng.random_range(0..=num_cols); - let indexes = (0..num_indexes) - .map(|i| format!("CREATE INDEX idx_{i} ON t(c{i})")) - .collect::>(); + let mut indexes = Vec::new(); + for i in 0..num_indexes { + // Decide if this should be a single-column or multi-column index + let is_multi_column = rng.random_bool(0.5) && num_cols > 1; + + if is_multi_column { + // Create a multi-column index with 2-3 columns + let num_index_cols = rng.random_range(2..=3.min(num_cols)); + let mut index_cols = Vec::new(); + let mut available_cols: Vec = (0..num_cols).collect(); + + for _ in 0..num_index_cols { + let idx = rng.random_range(0..available_cols.len()); + let col = available_cols.remove(idx); + index_cols.push(format!("c{col}")); + } + + indexes.push(format!( + "CREATE INDEX idx_{i} ON t({})", + index_cols.join(", ") + )); + } else { + // Single-column index + let col = rng.random_range(0..num_cols); + indexes.push(format!("CREATE INDEX idx_{i} ON t(c{col})")); + } + } // Create tables and indexes in both databases let limbo_conn = limbo_db.connect_limbo(); From 3cbdf433a9c2db477aaea1a6f763832a1fa497b1 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 14 Oct 2025 17:26:21 +0300 Subject: [PATCH 226/428] fuzz: update multiple columns in table_index_mutation_fuzz --- tests/integration/fuzz/mod.rs | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index dddf288d7..428b2638c 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -1976,17 +1976,22 @@ mod tests { }; let query = if do_update { - let new_y = if rng.random_bool(0.5) { - // Update to a constant value - rng.random_range(0..1000).to_string() - } else { - let source_col = rng.random_range(0..num_cols); - // Update to a value that is a function of the another column - let operator = *["+", "-"].choose(&mut rng).unwrap(); - let amount = rng.random_range(0..1000); - format!("c{source_col} {operator} {amount}") - }; - format!("UPDATE t SET c{affected_col} = {new_y} {where_clause}") + let num_updates = rng.random_range(1..=num_cols); + let mut values = Vec::new(); + for _ in 0..num_updates { + let new_y = if rng.random_bool(0.5) { + // Update to a constant value + rng.random_range(0..1000).to_string() + } else { + let source_col = rng.random_range(0..num_cols); + // Update to a value that is a function of the another column + let operator = *["+", "-"].choose(&mut rng).unwrap(); + let amount = rng.random_range(0..1000); + format!("c{source_col} {operator} {amount}") + }; + values.push(format!("c{affected_col} = {new_y}")); + } + format!("UPDATE t SET {} {where_clause}", values.join(", ")) } else { format!("DELETE FROM t {where_clause}") }; From 796ff4b2ac60409089a271c1163eb543e6551825 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Tue, 14 Oct 2025 20:44:04 +0530 Subject: [PATCH 227/428] resolve explicit aliases for cannonical col binding --- core/translate/expr.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index fc57a11e0..cf98d3d9c 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -3543,12 +3543,11 @@ pub fn bind_and_rewrite_expr<'a>( if binding_behavior == BindingBehavior::TryCanonicalColumnsFirst { if let Some(result_columns) = result_columns { for result_column in result_columns.iter() { - if result_column - .name(referenced_tables) - .is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id)) - { - *expr = result_column.expr.clone(); - return Ok(WalkControl::Continue); + if let Some(alias) = &result_column.alias { + if alias.eq_ignore_ascii_case(&normalized_id) { + *expr = result_column.expr.clone(); + return Ok(WalkControl::Continue); + } } } } From e64aa5d0142a253b6e892834a153a7b8925bb553 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 2 Oct 2025 11:56:35 -0300 Subject: [PATCH 228/428] add tokio console to write-throughput test --- Cargo.lock | 484 +++++++++++++++++++++++++++++- Cargo.toml | 1 + perf/throughput/turso/Cargo.toml | 4 + perf/throughput/turso/src/main.rs | 17 +- 4 files changed, 497 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dd15bab8d..9e0a26c13 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -255,18 +255,104 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "atomic" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.74" @@ -291,6 +377,12 @@ dependencies = [ "backtrace", ] +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + [[package]] name = "base64" version = "0.22.1" @@ -639,6 +731,45 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "console-api" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8030735ecb0d128428b64cd379809817e620a40e5001c54465b99ec5feec2857" +dependencies = [ + "futures-core", + "prost 0.13.5", + "prost-types", + "tonic", + "tracing-core", +] + +[[package]] +name = "console-subscriber" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6539aa9c6a4cd31f4b1c040f860a1eac9aa80e7df6b05d506a6e7179936d6a01" +dependencies = [ + "console-api", + "crossbeam-channel", + "crossbeam-utils", + "futures-task", + "hdrhistogram", + "humantime", + "hyper-util", + "prost 0.13.5", + "prost-types", + "serde", + "serde_json", + "thread_local", + "tokio", + "tokio-stream", + "tonic", + "tracing", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "console_error_panic_hook" version = "0.1.7" @@ -1666,6 +1797,25 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.11.1", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "half" version = "2.5.0" @@ -1700,6 +1850,19 @@ dependencies = [ "hashbrown 0.15.2", ] +[[package]] +name = "hdrhistogram" +version = "7.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +dependencies = [ + "base64 0.21.7", + "byteorder", + "flate2", + "nom", + "num-traits", +] + [[package]] name = "heck" version = "0.5.0" @@ -1744,6 +1907,104 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "hyper" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "libc", + "pin-project-lite", + "socket2 0.6.0", + "tokio", + "tower-service", + "tracing", +] + [[package]] name = "iana-time-zone" version = "0.1.62" @@ -2459,6 +2720,12 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "md-5" version = "0.10.6" @@ -2548,6 +2815,12 @@ dependencies = [ "libmimalloc-sys", ] +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "minimad" version = "0.13.1" @@ -2557,6 +2830,12 @@ dependencies = [ "once_cell", ] +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.8.5" @@ -2672,6 +2951,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "notify" version = "8.0.0" @@ -2902,6 +3191,26 @@ dependencies = [ "sha2", ] +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "pin-project-lite" version = "0.2.16" @@ -2926,7 +3235,7 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eac26e981c03a6e53e0aee43c113e3202f5581d5360dae7bd2c70e800dd0451d" dependencies = [ - "base64", + "base64 0.22.1", "indexmap 2.11.1", "quick-xml 0.32.0", "serde", @@ -3090,6 +3399,16 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive 0.13.5", +] + [[package]] name = "prost" version = "0.14.1" @@ -3097,7 +3416,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.14.1", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.100", ] [[package]] @@ -3113,6 +3445,15 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost 0.13.5", +] + [[package]] name = "py-turso" version = "0.3.0-pre.1" @@ -3818,6 +4159,16 @@ dependencies = [ "serde", ] +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "socket2" version = "0.6.0" @@ -3984,6 +4335,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + [[package]] name = "synstructure" version = "0.13.1" @@ -4224,8 +4581,9 @@ dependencies = [ "pin-project-lite", "signal-hook-registry", "slab", - "socket2", + "socket2 0.6.0", "tokio-macros", + "tracing", "windows-sys 0.59.0", ] @@ -4240,6 +4598,30 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + [[package]] name = "toml" version = "0.8.22" @@ -4282,6 +4664,82 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost 0.13.5", + "socket2 0.5.10", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + [[package]] name = "tracing" version = "0.1.41" @@ -4355,6 +4813,12 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + [[package]] name = "turso" version = "0.3.0-pre.1" @@ -4571,13 +5035,13 @@ dependencies = [ name = "turso_sync_engine" version = "0.3.0-pre.1" dependencies = [ - "base64", + "base64 0.22.1", "bytes", "ctor 0.4.2", "futures", "genawaiter", "http", - "prost", + "prost 0.14.1", "rand 0.9.2", "rand_chacha 0.9.0", "roaring", @@ -4822,6 +5286,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -5201,6 +5674,7 @@ name = "write-throughput" version = "0.1.0" dependencies = [ "clap", + "console-subscriber", "futures", "tokio", "tracing-subscriber", diff --git a/Cargo.toml b/Cargo.toml index f62181620..b46ff6619 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -99,6 +99,7 @@ regex-syntax = { version = "0.8.5", default-features = false } similar = { version = "2.7.0" } similar-asserts = { version = "1.7.0" } bitmaps = { version = "3.2.1", default-features = false } +console-subscriber = { version = "0.4.1" } [profile.dev.package.similar] opt-level = 3 diff --git a/perf/throughput/turso/Cargo.toml b/perf/throughput/turso/Cargo.toml index d275cc223..8f958add1 100644 --- a/perf/throughput/turso/Cargo.toml +++ b/perf/throughput/turso/Cargo.toml @@ -7,9 +7,13 @@ edition = "2021" name = "write-throughput" path = "src/main.rs" +[features] +console = ["dep:console-subscriber" ,"tokio/tracing"] + [dependencies] turso = { workspace = true } clap = { workspace = true, features = ["derive"] } tokio = { workspace = true, default-features = true, features = ["full"] } futures = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } +console-subscriber = { workspace = true, optional = true } diff --git a/perf/throughput/turso/src/main.rs b/perf/throughput/turso/src/main.rs index dbe2318f1..6f7ec4a0e 100644 --- a/perf/throughput/turso/src/main.rs +++ b/perf/throughput/turso/src/main.rs @@ -2,7 +2,9 @@ use clap::{Parser, ValueEnum}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Barrier}; use std::time::{Duration, Instant}; -use tracing_subscriber::EnvFilter; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; +use tracing_subscriber::{EnvFilter, Layer}; use turso::{Builder, Database, Result}; #[derive(Debug, Clone, Copy, ValueEnum)] @@ -53,11 +55,18 @@ struct Args { } fn main() -> Result<()> { - tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) + #[cfg(feature = "console")] + let console_layer = console_subscriber::spawn(); + let fmt_layer = tracing_subscriber::fmt::layer() .with_ansi(false) .with_thread_ids(true) - .init(); + .with_filter(EnvFilter::from_default_env()); + let registry = tracing_subscriber::registry(); + #[cfg(feature = "console")] + let registry = registry.with(console_layer); + let registry = registry.with(fmt_layer); + + registry.init(); let args = Args::parse(); let rt = tokio::runtime::Builder::new_multi_thread() From 0d95a2924a5c132de5ea39fc809c73d105cc50ac Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Wed, 1 Oct 2025 13:02:09 -0300 Subject: [PATCH 229/428] pass optional waker to step --- core/lib.rs | 16 ++++++++++++++-- core/vdbe/mod.rs | 5 ++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 2a0a558cf..d233c9c26 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -63,6 +63,7 @@ pub use io::{ }; use parking_lot::RwLock; use schema::Schema; +use std::task::Waker; use std::{ borrow::Cow, cell::{Cell, RefCell}, @@ -2437,7 +2438,7 @@ impl BusyTimeout { } } - self.iteration += 1; + self.iteration = self.iteration.saturating_add(1); self.timeout = now + delay; } } @@ -2509,7 +2510,7 @@ impl Statement { self.state.interrupt(); } - pub fn step(&mut self) -> Result { + fn _step(&mut self, waker: Option<&Waker>) -> Result { if let Some(busy_timeout) = self.busy_timeout.as_ref() { if self.pager.io.now() < busy_timeout.timeout { // Yield the query as the timeout has not been reached yet @@ -2523,6 +2524,7 @@ impl Statement { self.mv_store.as_ref(), self.pager.clone(), self.query_mode, + waker, ) } else { const MAX_SCHEMA_RETRY: usize = 50; @@ -2531,6 +2533,7 @@ impl Statement { self.mv_store.as_ref(), self.pager.clone(), self.query_mode, + waker, ); for attempt in 0..MAX_SCHEMA_RETRY { // Only reprepare if we still need to update schema @@ -2544,6 +2547,7 @@ impl Statement { self.mv_store.as_ref(), self.pager.clone(), self.query_mode, + waker, ); } res @@ -2581,6 +2585,14 @@ impl Statement { res } + pub fn step(&mut self) -> Result { + self._step(None) + } + + pub fn step_with_waker(&mut self, waker: &Waker) -> Result { + self._step(Some(waker)) + } + pub(crate) fn run_ignore_rows(&mut self) -> Result<()> { loop { match self.step()? { diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index bcb4372d5..1728058e5 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -69,6 +69,7 @@ use std::{ atomic::{AtomicI64, Ordering}, Arc, }, + task::Waker, }; use tracing::{instrument, Level}; @@ -530,9 +531,10 @@ impl Program { mv_store: Option<&Arc>, pager: Arc, query_mode: QueryMode, + waker: Option<&Waker>, ) -> Result { match query_mode { - QueryMode::Normal => self.normal_step(state, mv_store, pager), + QueryMode::Normal => self.normal_step(state, mv_store, pager, waker), QueryMode::Explain => self.explain_step(state, mv_store, pager), QueryMode::ExplainQueryPlan => self.explain_query_plan_step(state, mv_store, pager), } @@ -645,6 +647,7 @@ impl Program { state: &mut ProgramState, mv_store: Option<&Arc>, pager: Arc, + waker: Option<&Waker>, ) -> Result { let enable_tracing = tracing::enabled!(tracing::Level::TRACE); loop { From 943ade7293d78814f79b1b8e3569eaa69d6b8613 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Wed, 1 Oct 2025 13:02:09 -0300 Subject: [PATCH 230/428] pass waker to completion for more efficient task scheduling --- bindings/rust/src/lib.rs | 74 +++++++++++++++++++------------ core/io/mod.rs | 94 +++++++++++++++++++++++++++++++++++----- core/types.rs | 12 +++++ core/vdbe/mod.rs | 2 + 4 files changed, 143 insertions(+), 39 deletions(-) diff --git a/bindings/rust/src/lib.rs b/bindings/rust/src/lib.rs index be95b4333..5d87ad5f0 100644 --- a/bindings/rust/src/lib.rs +++ b/bindings/rust/src/lib.rs @@ -46,10 +46,13 @@ pub use params::params_from_iter; pub use params::IntoParams; use std::fmt::Debug; +use std::future::Future; use std::num::NonZero; use std::sync::{Arc, Mutex}; +use std::task::Poll; pub use turso_core::EncryptionOpts; use turso_core::OpenFlags; + // Re-exports rows pub use crate::rows::{Row, Rows}; @@ -464,6 +467,45 @@ impl Clone for Statement { unsafe impl Send for Statement {} unsafe impl Sync for Statement {} +struct Execute { + stmt: Arc>, +} + +unsafe impl Send for Execute {} +unsafe impl Sync for Execute {} + +impl Future for Execute { + type Output = Result; + + fn poll( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let mut stmt = self.stmt.lock().unwrap(); + match stmt.step_with_waker(cx.waker()) { + Ok(turso_core::StepResult::Row) => Poll::Ready(Err(Error::SqlExecutionFailure( + "unexpected row during execution".to_string(), + ))), + Ok(turso_core::StepResult::Done) => { + let changes = stmt.n_change(); + assert!(changes >= 0); + Poll::Ready(Ok(changes as u64)) + } + Ok(turso_core::StepResult::IO) => { + stmt.run_once()?; + Poll::Pending + } + Ok(turso_core::StepResult::Busy) => Poll::Ready(Err(Error::SqlExecutionFailure( + "database is locked".to_string(), + ))), + Ok(turso_core::StepResult::Interrupt) => { + Poll::Ready(Err(Error::SqlExecutionFailure("interrupted".to_string()))) + } + Err(err) => Poll::Ready(Err(err.into())), + } + } +} + impl Statement { /// Query the database with this prepared statement. pub async fn query(&mut self, params: impl IntoParams) -> Result { @@ -514,33 +556,11 @@ impl Statement { } } } - loop { - let mut stmt = self.inner.lock().unwrap(); - match stmt.step() { - Ok(turso_core::StepResult::Row) => { - return Err(Error::SqlExecutionFailure( - "unexpected row during execution".to_string(), - )); - } - Ok(turso_core::StepResult::Done) => { - let changes = stmt.n_change(); - assert!(changes >= 0); - return Ok(changes as u64); - } - Ok(turso_core::StepResult::IO) => { - stmt.run_once()?; - } - Ok(turso_core::StepResult::Busy) => { - return Err(Error::SqlExecutionFailure("database is locked".to_string())); - } - Ok(turso_core::StepResult::Interrupt) => { - return Err(Error::SqlExecutionFailure("interrupted".to_string())); - } - Err(err) => { - return Err(err.into()); - } - } - } + + let execute = Execute { + stmt: self.inner.clone(), + }; + execute.await } /// Returns columns of the result of this prepared statement. diff --git a/core/io/mod.rs b/core/io/mod.rs index 0c0baa807..35f7d4786 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -3,11 +3,13 @@ use crate::storage::sqlite3_ondisk::WAL_FRAME_HEADER_SIZE; use crate::{BufferPool, CompletionError, Result}; use bitflags::bitflags; use cfg_block::cfg_block; +use parking_lot::Mutex; use std::cell::RefCell; use std::fmt; use std::ptr::NonNull; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, OnceLock}; +use std::task::Waker; use std::{fmt::Debug, pin::Pin}; pub trait File: Send + Sync { @@ -139,12 +141,64 @@ pub struct Completion { inner: Option>, } +#[derive(Debug, Default)] +struct ContextInner { + waker: Option, + // TODO: add abort signal +} + +#[derive(Debug, Clone)] +pub struct Context { + inner: Arc>, +} + +impl ContextInner { + pub fn new() -> Self { + Self { waker: None } + } + + pub fn wake(&mut self) { + if let Some(waker) = self.waker.take() { + waker.wake(); + } + } + + pub fn set_waker(&mut self, waker: &Waker) { + if let Some(curr_waker) = self.waker.as_mut() { + // only call and change waker if it would awake a different task + if !curr_waker.will_wake(waker) { + let prev_waker = std::mem::replace(curr_waker, waker.clone()); + prev_waker.wake(); + } + } else { + self.waker = Some(waker.clone()); + } + } +} + +impl Context { + pub fn new() -> Self { + Self { + inner: Arc::new(Mutex::new(ContextInner::new())), + } + } + + pub fn wake(&self) { + self.inner.lock().wake(); + } + + pub fn set_waker(&self, waker: &Waker) { + self.inner.lock().set_waker(waker); + } +} + struct CompletionInner { completion_type: CompletionType, /// None means we completed successfully // Thread safe with OnceLock result: std::sync::OnceLock>, needs_link: bool, + context: Context, /// Optional parent group this completion belongs to parent: OnceLock>, } @@ -293,26 +347,28 @@ pub enum CompletionType { Yield, } +impl CompletionInner { + fn new(completion_type: CompletionType, needs_link: bool) -> Self { + Self { + completion_type, + result: OnceLock::new(), + needs_link, + context: Context::new(), + parent: OnceLock::new(), + } + } +} + impl Completion { pub fn new(completion_type: CompletionType) -> Self { Self { - inner: Some(Arc::new(CompletionInner { - completion_type, - result: OnceLock::new(), - needs_link: false, - parent: OnceLock::new(), - })), + inner: Some(Arc::new(CompletionInner::new(completion_type, false))), } } pub fn new_linked(completion_type: CompletionType) -> Self { Self { - inner: Some(Arc::new(CompletionInner { - completion_type, - result: OnceLock::new(), - needs_link: true, - parent: OnceLock::new(), - })), + inner: Some(Arc::new(CompletionInner::new(completion_type, true))), } } @@ -375,6 +431,18 @@ impl Completion { Self { inner: None } } + pub fn wake(&self) { + self.get_inner().context.wake(); + } + + pub fn set_waker(&self, waker: &Waker) { + if self.finished() || self.inner.is_none() { + waker.wake_by_ref(); + } else { + self.get_inner().context.set_waker(waker); + } + } + pub fn succeeded(&self) -> bool { match &self.inner { Some(inner) => match &inner.completion_type { @@ -465,6 +533,8 @@ impl Completion { result.err() }); + // call the waker regardless + inner.context.wake(); } /// only call this method if you are sure that the completion is diff --git a/core/types.rs b/core/types.rs index bfcbb004e..085aaff1a 100644 --- a/core/types.rs +++ b/core/types.rs @@ -17,6 +17,7 @@ use crate::vdbe::Register; use crate::vtab::VirtualTableCursor; use crate::{turso_assert, Completion, CompletionError, Result, IO}; use std::fmt::{Debug, Display}; +use std::task::Waker; /// SQLite by default uses 2000 as maximum numbers in a row. /// It controlld by the constant called SQLITE_MAX_COLUMN @@ -2394,6 +2395,17 @@ impl IOCompletions { IOCompletions::Many(completions) => completions.iter().find_map(|c| c.get_error()), } } + + pub fn set_waker(&self, waker: Option<&Waker>) { + if let Some(waker) = waker { + match self { + IOCompletions::Single(c) => c.set_waker(waker), + IOCompletions::Many(completions) => { + completions.iter().for_each(|c| c.set_waker(waker)) + } + } + } + } } #[derive(Debug)] diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 1728058e5..584b62da4 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -665,6 +665,7 @@ impl Program { } if let Some(io) = &state.io_completions { if !io.finished() { + io.set_waker(waker); return Ok(StepResult::IO); } if let Some(err) = io.get_error() { @@ -697,6 +698,7 @@ impl Program { } Ok(InsnFunctionStepResult::IO(io)) => { // Instruction not complete - waiting for I/O, will resume at same PC + io.set_waker(waker); state.io_completions = Some(io); return Ok(StepResult::IO); } From ff955aeee98e612d7b76dba6309aa8f5e77aa41b Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 2 Oct 2025 00:38:40 -0300 Subject: [PATCH 231/428] simplify clock code by using a common struct --- core/io/clock.rs | 12 ++++++++++++ core/io/generic.rs | 10 ++++------ core/io/io_uring.rs | 8 ++------ core/io/memory.rs | 8 ++------ core/io/unix.rs | 8 ++------ core/io/vfs.rs | 8 ++------ core/io/windows.rs | 10 ++++------ 7 files changed, 28 insertions(+), 36 deletions(-) diff --git a/core/io/clock.rs b/core/io/clock.rs index d522ac278..06edc65e3 100644 --- a/core/io/clock.rs +++ b/core/io/clock.rs @@ -87,3 +87,15 @@ impl std::ops::Sub for Instant { pub trait Clock { fn now(&self) -> Instant; } + +pub struct DefaultClock; + +impl Clock for DefaultClock { + fn now(&self) -> Instant { + let now = chrono::Local::now(); + Instant { + secs: now.timestamp(), + micros: now.timestamp_subsec_micros(), + } + } +} diff --git a/core/io/generic.rs b/core/io/generic.rs index c75702ae7..b465a24cb 100644 --- a/core/io/generic.rs +++ b/core/io/generic.rs @@ -1,4 +1,6 @@ -use crate::{Clock, Completion, File, Instant, LimboError, OpenFlags, Result, IO}; +use crate::{ + io::clock::DefaultClock, Clock, Completion, File, Instant, LimboError, OpenFlags, Result, IO, +}; use parking_lot::RwLock; use std::io::{Read, Seek, Write}; use std::sync::Arc; @@ -44,11 +46,7 @@ impl IO for GenericIO { impl Clock for GenericIO { fn now(&self) -> Instant { - let now = chrono::Local::now(); - Instant { - secs: now.timestamp(), - micros: now.timestamp_subsec_micros(), - } + DefaultClock.now() } } diff --git a/core/io/io_uring.rs b/core/io/io_uring.rs index d9f79b874..c681649eb 100644 --- a/core/io/io_uring.rs +++ b/core/io/io_uring.rs @@ -1,7 +1,7 @@ #![allow(clippy::arc_with_non_send_sync)] use super::{common, Completion, CompletionInner, File, OpenFlags, IO}; -use crate::io::clock::{Clock, Instant}; +use crate::io::clock::{Clock, DefaultClock, Instant}; use crate::storage::wal::CKPT_BATCH_PAGES; use crate::{turso_assert, CompletionError, LimboError, Result}; use parking_lot::Mutex; @@ -697,11 +697,7 @@ impl IO for UringIO { impl Clock for UringIO { fn now(&self) -> Instant { - let now = chrono::Local::now(); - Instant { - secs: now.timestamp(), - micros: now.timestamp_subsec_micros(), - } + DefaultClock.now() } } diff --git a/core/io/memory.rs b/core/io/memory.rs index fc0549ca7..31c78a4b1 100644 --- a/core/io/memory.rs +++ b/core/io/memory.rs @@ -1,5 +1,5 @@ use super::{Buffer, Clock, Completion, File, OpenFlags, IO}; -use crate::Result; +use crate::{io::clock::DefaultClock, Result}; use crate::io::clock::Instant; use std::{ @@ -35,11 +35,7 @@ impl Default for MemoryIO { impl Clock for MemoryIO { fn now(&self) -> Instant { - let now = chrono::Local::now(); - Instant { - secs: now.timestamp(), - micros: now.timestamp_subsec_micros(), - } + DefaultClock.now() } } diff --git a/core/io/unix.rs b/core/io/unix.rs index bb17765f2..f95c9b95d 100644 --- a/core/io/unix.rs +++ b/core/io/unix.rs @@ -1,6 +1,6 @@ use super::{Completion, File, OpenFlags, IO}; use crate::error::LimboError; -use crate::io::clock::{Clock, Instant}; +use crate::io::clock::{Clock, DefaultClock, Instant}; use crate::io::common; use crate::Result; use parking_lot::Mutex; @@ -27,11 +27,7 @@ impl UnixIO { impl Clock for UnixIO { fn now(&self) -> Instant { - let now = chrono::Local::now(); - Instant { - secs: now.timestamp(), - micros: now.timestamp_subsec_micros(), - } + DefaultClock.now() } } diff --git a/core/io/vfs.rs b/core/io/vfs.rs index 52722a82e..b2ce62424 100644 --- a/core/io/vfs.rs +++ b/core/io/vfs.rs @@ -1,6 +1,6 @@ use super::{Buffer, Completion, File, OpenFlags, IO}; use crate::ext::VfsMod; -use crate::io::clock::{Clock, Instant}; +use crate::io::clock::{Clock, DefaultClock, Instant}; use crate::io::CompletionInner; use crate::{LimboError, Result}; use std::ffi::{c_void, CString}; @@ -10,11 +10,7 @@ use turso_ext::{BufferRef, IOCallback, SendPtr, VfsFileImpl, VfsImpl}; impl Clock for VfsMod { fn now(&self) -> Instant { - let now = chrono::Local::now(); - Instant { - secs: now.timestamp(), - micros: now.timestamp_subsec_micros(), - } + DefaultClock.now() } } diff --git a/core/io/windows.rs b/core/io/windows.rs index a884cc922..3431e5454 100644 --- a/core/io/windows.rs +++ b/core/io/windows.rs @@ -1,4 +1,6 @@ -use crate::{Clock, Completion, File, Instant, LimboError, OpenFlags, Result, IO}; +use crate::{ + io::clock::DefaultClock, Clock, Completion, File, Instant, LimboError, OpenFlags, Result, IO, +}; use parking_lot::RwLock; use std::io::{Read, Seek, Write}; use std::sync::Arc; @@ -44,11 +46,7 @@ impl IO for WindowsIO { impl Clock for WindowsIO { fn now(&self) -> Instant { - let now = chrono::Local::now(); - Instant { - secs: now.timestamp(), - micros: now.timestamp_subsec_micros(), - } + DefaultClock.now() } } From 23380a58d754ea13957a49777d11a8984c1b04b9 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 2 Oct 2025 11:34:05 -0300 Subject: [PATCH 232/428] make next truly async and non blocking --- bindings/rust/src/rows.rs | 65 +++++++++++++++++++++++++-------------- core/lib.rs | 6 ++++ 2 files changed, 48 insertions(+), 23 deletions(-) diff --git a/bindings/rust/src/rows.rs b/bindings/rust/src/rows.rs index 9102baaf3..d13193edc 100644 --- a/bindings/rust/src/rows.rs +++ b/bindings/rust/src/rows.rs @@ -2,7 +2,9 @@ use turso_core::types::FromValue; use crate::{Error, Result, Value}; use std::fmt::Debug; +use std::future::Future; use std::sync::{Arc, Mutex}; +use std::task::Poll; /// Results of a prepared statement query. pub struct Rows { @@ -28,33 +30,50 @@ impl Rows { } /// Fetch the next row of this result set. pub async fn next(&mut self) -> Result> { - loop { - let mut stmt = self - .inner - .lock() - .map_err(|e| Error::MutexError(e.to_string()))?; - match stmt.step()? { - turso_core::StepResult::Row => { - let row = stmt.row().unwrap(); - return Ok(Some(Row { - values: row.get_values().map(|v| v.to_owned()).collect(), - })); - } - turso_core::StepResult::Done => return Ok(None), - turso_core::StepResult::IO => { - if let Err(e) = stmt.run_once() { - return Err(e.into()); + struct Next { + stmt: Arc>, + } + + impl Future for Next { + type Output = Result>; + + fn poll( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let mut stmt = self + .stmt + .lock() + .map_err(|e| Error::MutexError(e.to_string()))?; + match stmt.step_with_waker(cx.waker())? { + turso_core::StepResult::Row => { + let row = stmt.row().unwrap(); + Poll::Ready(Ok(Some(Row { + values: row.get_values().map(|v| v.to_owned()).collect(), + }))) + } + turso_core::StepResult::Done => Poll::Ready(Ok(None)), + turso_core::StepResult::IO => { + stmt.run_once()?; + Poll::Pending + } + turso_core::StepResult::Busy => Poll::Ready(Err(Error::SqlExecutionFailure( + "database is locked".to_string(), + ))), + turso_core::StepResult::Interrupt => { + Poll::Ready(Err(Error::SqlExecutionFailure("interrupted".to_string()))) } - continue; - } - turso_core::StepResult::Busy => { - return Err(Error::SqlExecutionFailure("database is locked".to_string())) - } - turso_core::StepResult::Interrupt => { - return Err(Error::SqlExecutionFailure("interrupted".to_string())) } } } + + unsafe impl Send for Next {} + + let next = Next { + stmt: self.inner.clone(), + }; + + next.await } } diff --git a/core/lib.rs b/core/lib.rs index d233c9c26..25ed3114c 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -2514,6 +2514,9 @@ impl Statement { if let Some(busy_timeout) = self.busy_timeout.as_ref() { if self.pager.io.now() < busy_timeout.timeout { // Yield the query as the timeout has not been reached yet + if let Some(waker) = waker { + waker.wake_by_ref(); + } return Ok(StepResult::IO); } } @@ -2578,6 +2581,9 @@ impl Statement { }; if now < self.busy_timeout.as_ref().unwrap().timeout { + if let Some(waker) = waker { + waker.wake_by_ref(); + } res = Ok(StepResult::IO); } } From 818a68b3dd31eca107847fc7cd2f94e506cf4a22 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 2 Oct 2025 13:39:59 -0300 Subject: [PATCH 233/428] ignore busy errors for `test_concurrent_unique_constraint_regression` --- bindings/rust/tests/integration_tests.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bindings/rust/tests/integration_tests.rs b/bindings/rust/tests/integration_tests.rs index 19514532d..c0b25244d 100644 --- a/bindings/rust/tests/integration_tests.rs +++ b/bindings/rust/tests/integration_tests.rs @@ -402,7 +402,8 @@ async fn test_concurrent_unique_constraint_regression() { match result { Ok(_) => (), Err(Error::SqlExecutionFailure(e)) - if e.contains("UNIQUE constraint failed") => {} + if e.contains("UNIQUE constraint failed") + | e.contains("database is locked") => {} Err(e) => { panic!("Error executing statement: {e:?}"); } From 22e98964cc07f85feae166184acd0081c68d6483 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 14 Oct 2025 12:48:34 -0400 Subject: [PATCH 234/428] Refactor INSERT translation to a modular setup with emitter context --- core/translate/insert.rs | 1595 +++++++++++++++++++++----------------- core/translate/mod.rs | 26 +- core/translate/upsert.rs | 85 +- 3 files changed, 956 insertions(+), 750 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index a8339c6e3..c1fab97f1 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -2,7 +2,6 @@ use std::num::NonZeroUsize; use std::sync::Arc; use turso_parser::ast::{ self, Expr, InsertBody, OneSelect, QualifiedName, ResolveType, ResultColumn, Upsert, UpsertDo, - With, }; use crate::error::{ @@ -20,7 +19,7 @@ use crate::translate::fkeys::{ build_index_affinity_string, emit_fk_violation, emit_guarded_fk_decrement, index_probe, open_read_index, open_read_table, }; -use crate::translate::plan::TableReferences; +use crate::translate::plan::{ResultSetColumn, TableReferences}; use crate::translate::planner::ROWID_STRS; use crate::translate::upsert::{ collect_set_clauses_for_upsert, emit_upsert, resolve_upsert_target, ResolvedUpsertTarget, @@ -36,22 +35,136 @@ use crate::{ insn::Insn, }, }; -use crate::{Result, VirtualTable}; +use crate::{Connection, Result, VirtualTable}; use super::emitter::Resolver; use super::expr::{translate_expr, translate_expr_no_constant_opt, NoConstantOptReason}; use super::plan::QueryDestination; use super::select::translate_select; -struct TempTableCtx { +pub struct TempTableCtx { cursor_id: usize, loop_start_label: BranchOffset, loop_end_label: BranchOffset, } +/// Validate anything with this insert statement that should throw an early parse error +fn validate(table_name: &str, resolver: &Resolver, table: &Table) -> Result<()> { + // Check if this is a system table that should be protected from direct writes + if crate::schema::is_system_table(table_name) { + crate::bail_parse_error!("table {} may not be modified", table_name); + } + // Check if this table has any incompatible dependent views + let incompatible_views = resolver.schema.has_incompatible_dependent_views(table_name); + if !incompatible_views.is_empty() { + use crate::incremental::compiler::DBSP_CIRCUIT_VERSION; + crate::bail_parse_error!( + "Cannot INSERT into table '{}' because it has incompatible dependent materialized view(s): {}. \n\ + These views were created with a different DBSP version than the current version ({}). \n\ + Please DROP and recreate the view(s) before modifying this table.", + table_name, + incompatible_views.join(", "), + DBSP_CIRCUIT_VERSION + ); + } + + // Check if this is a materialized view + if resolver.schema.is_materialized_view(table_name) { + crate::bail_parse_error!("cannot modify materialized view {}", table_name); + } + if resolver.schema.table_has_indexes(table_name) && !resolver.schema.indexes_enabled() { + // Let's disable altering a table with indices altogether instead of checking column by + // column to be extra safe. + crate::bail_parse_error!( + "INSERT to table with indexes is disabled. Omit the `--experimental-indexes=false` flag to enable this feature." + ); + } + if table.btree().is_some_and(|t| !t.has_rowid) { + crate::bail_parse_error!("INSERT into WITHOUT ROWID table is not supported"); + } + + Ok(()) +} + +#[allow(dead_code)] +pub struct InsertEmitCtx<'a> { + pub table: &'a Arc, + pub idx_cursors: Vec<(&'a String, i64, usize)>, + pub temp_table_ctx: Option, + pub on_conflict: ResolveType, + pub num_values: usize, + pub yield_reg_opt: Option, + pub conflict_rowid_reg: usize, + pub cursor_id: usize, + + /// Labels + pub halt_label: BranchOffset, + pub row_done_label: BranchOffset, + pub stmt_epilogue: BranchOffset, + pub loop_start_label: BranchOffset, + pub key_ready_for_uniqueness_check_label: BranchOffset, + pub key_generation_label: BranchOffset, + pub select_exhausted_label: Option, + + /// CDC table info + pub cdc_table: Option<(usize, Arc)>, + /// Autoincrement sequence table info + pub autoincrement_meta: Option, +} + +impl<'a> InsertEmitCtx<'a> { + fn new( + program: &mut ProgramBuilder, + resolver: &'a Resolver, + table: &'a Arc, + on_conflict: Option, + cdc_table: Option<(usize, Arc)>, + num_values: usize, + temp_table_ctx: Option, + ) -> Self { + // allocate cursor id's for each btree index cursor we'll need to populate the indexes + // (idx name, root_page, idx cursor id) + let idx_cursors = resolver + .schema + .get_indices(table.name.as_str()) + .map(|idx| { + ( + &idx.name, + idx.root_page, + program.alloc_cursor_id(CursorType::BTreeIndex(idx.clone())), + ) + }) + .collect::>(); + let halt_label = program.allocate_label(); + let loop_start_label = program.allocate_label(); + let row_done_label = program.allocate_label(); + let stmt_epilogue = program.allocate_label(); + let key_ready_for_uniqueness_check_label = program.allocate_label(); + let key_generation_label = program.allocate_label(); + Self { + table, + idx_cursors, + temp_table_ctx, + on_conflict: on_conflict.unwrap_or(ResolveType::Abort), + yield_reg_opt: None, + conflict_rowid_reg: program.alloc_register(), + select_exhausted_label: None, + cursor_id: 0, // set later in emit_source_emission + halt_label, + row_done_label, + stmt_epilogue, + loop_start_label, + cdc_table, + num_values, + key_ready_for_uniqueness_check_label, + key_generation_label, + autoincrement_meta: None, + } + } +} + #[allow(clippy::too_many_arguments)] pub fn translate_insert( - with: Option, resolver: &Resolver, on_conflict: Option, tbl_name: QualifiedName, @@ -67,59 +180,15 @@ pub fn translate_insert( approx_num_labels: 5, }; program.extend(&opts); - if with.is_some() { - crate::bail_parse_error!("WITH clause is not supported"); - } - if on_conflict.is_some() { - crate::bail_parse_error!("ON CONFLICT clause is not supported"); - } - - if resolver - .schema - .table_has_indexes(&tbl_name.name.to_string()) - && !resolver.schema.indexes_enabled() - { - // Let's disable altering a table with indices altogether instead of checking column by - // column to be extra safe. - crate::bail_parse_error!( - "INSERT to table with indexes is disabled. Omit the `--experimental-indexes=false` flag to enable this feature." - ); - } let table_name = &tbl_name.name; - - // Check if this is a system table that should be protected from direct writes - if crate::schema::is_system_table(table_name.as_str()) { - crate::bail_parse_error!("table {} may not be modified", table_name); - } - let table = match resolver.schema.get_table(table_name.as_str()) { Some(table) => table, None => crate::bail_parse_error!("no such table: {}", table_name), }; + validate(table_name.as_str(), resolver, &table)?; + let fk_enabled = connection.foreign_keys_enabled(); - - // Check if this is a materialized view - if resolver.schema.is_materialized_view(table_name.as_str()) { - crate::bail_parse_error!("cannot modify materialized view {}", table_name); - } - - // Check if this table has any incompatible dependent views - let incompatible_views = resolver - .schema - .has_incompatible_dependent_views(table_name.as_str()); - if !incompatible_views.is_empty() { - use crate::incremental::compiler::DBSP_CIRCUIT_VERSION; - crate::bail_parse_error!( - "Cannot INSERT into table '{}' because it has incompatible dependent materialized view(s): {}. \n\ - These views were created with a different DBSP version than the current version ({}). \n\ - Please DROP and recreate the view(s) before modifying this table.", - table_name, - incompatible_views.join(", "), - DBSP_CIRCUIT_VERSION - ); - } - if let Some(virtual_table) = &table.virtual_table() { program = translate_virtual_table_insert( program, @@ -135,101 +204,19 @@ pub fn translate_insert( let Some(btree_table) = table.btree() else { crate::bail_parse_error!("no such table: {}", table_name); }; - if !btree_table.has_rowid { - crate::bail_parse_error!("INSERT into WITHOUT ROWID table is not supported"); - } let root_page = btree_table.root_page; - let mut values: Option>> = None; - let mut upsert_actions: Vec<(ResolvedUpsertTarget, BranchOffset, Box)> = Vec::new(); - - let mut inserting_multiple_rows = false; - if let InsertBody::Select(select, upsert_opt) = &mut body { - match &mut select.body.select { - // TODO see how to avoid clone - OneSelect::Values(values_expr) if values_expr.len() <= 1 => { - if values_expr.is_empty() { - crate::bail_parse_error!("no values to insert"); - } - for expr in values_expr.iter_mut().flat_map(|v| v.iter_mut()) { - match expr.as_mut() { - Expr::Id(name) => { - if name.quoted_with('"') { - *expr = - Expr::Literal(ast::Literal::String(name.as_literal())).into(); - } else { - // an INSERT INTO ... VALUES (...) cannot reference columns - crate::bail_parse_error!("no such column: {name}"); - } - } - Expr::Qualified(first_name, second_name) => { - // an INSERT INTO ... VALUES (...) cannot reference columns - crate::bail_parse_error!("no such column: {first_name}.{second_name}"); - } - _ => {} - } - bind_and_rewrite_expr( - expr, - None, - None, - connection, - &mut program.param_ctx, - BindingBehavior::ResultColumnsNotAllowed, - )?; - } - values = values_expr.pop(); - } - _ => inserting_multiple_rows = true, - } - while let Some(mut upsert) = upsert_opt.take() { - if let UpsertDo::Set { - ref mut sets, - ref mut where_clause, - } = &mut upsert.do_clause - { - for set in sets.iter_mut() { - bind_and_rewrite_expr( - &mut set.expr, - None, - None, - connection, - &mut program.param_ctx, - BindingBehavior::AllowUnboundIdentifiers, - )?; - } - if let Some(ref mut where_expr) = where_clause { - bind_and_rewrite_expr( - where_expr, - None, - None, - connection, - &mut program.param_ctx, - BindingBehavior::AllowUnboundIdentifiers, - )?; - } - } - let next = upsert.next.take(); - upsert_actions.push(( - // resolve the constrained target for UPSERT in the chain - resolve_upsert_target(resolver.schema, &table, &upsert)?, - program.allocate_label(), - upsert, - )); - *upsert_opt = next; - } - } + let BoundInsertResult { + mut values, + mut upsert_actions, + inserting_multiple_rows, + } = bind_insert(&mut program, resolver, &table, &mut body, connection)?; if inserting_multiple_rows && btree_table.has_autoincrement { ensure_sequence_initialized(&mut program, resolver.schema, &btree_table)?; } - let halt_label = program.allocate_label(); - let loop_start_label = program.allocate_label(); - let row_done_label = program.allocate_label(); - let stmt_epilogue = program.allocate_label(); - let mut select_exhausted_label: Option = None; - let cdc_table = prepare_cdc_if_necessary(&mut program, resolver.schema, table.get_name())?; // Process RETURNING clause using shared module @@ -240,234 +227,64 @@ pub fn translate_insert( &mut program, connection, )?; - let has_fks = fk_enabled && (resolver.schema.has_child_fks(table_name.as_str()) || resolver .schema .any_resolved_fks_referencing(table_name.as_str())); - let mut yield_reg_opt = None; - let mut temp_table_ctx = None; - let (num_values, cursor_id) = match body { - InsertBody::Select(select, _) => { - // Simple Common case of INSERT INTO
VALUES (...) - if matches!(&select.body.select, OneSelect::Values(values) if values.len() <= 1) { - ( - values.as_ref().unwrap().len(), - program.alloc_cursor_id(CursorType::BTreeTable(btree_table.clone())), - ) - } else { - // Multiple rows - use coroutine for value population - let yield_reg = program.alloc_register(); - let jump_on_definition_label = program.allocate_label(); - let start_offset_label = program.allocate_label(); - program.emit_insn(Insn::InitCoroutine { - yield_reg, - jump_on_definition: jump_on_definition_label, - start_offset: start_offset_label, - }); - program.preassign_label_to_next_insn(start_offset_label); - let query_destination = QueryDestination::CoroutineYield { - yield_reg, - coroutine_implementation_start: halt_label, - }; - program.incr_nesting(); - let result = - translate_select(select, resolver, program, query_destination, connection)?; - program = result.program; - program.decr_nesting(); + let mut ctx = InsertEmitCtx::new( + &mut program, + resolver, + &btree_table, + on_conflict, + cdc_table, + values.len(), + None, + ); - program.emit_insn(Insn::EndCoroutine { yield_reg }); - program.preassign_label_to_next_insn(jump_on_definition_label); - - let cursor_id = - program.alloc_cursor_id(CursorType::BTreeTable(btree_table.clone())); - - // From SQLite - /* Set useTempTable to TRUE if the result of the SELECT statement - ** should be written into a temporary table (template 4). Set to - ** FALSE if each output row of the SELECT can be written directly into - ** the destination table (template 3). - ** - ** A temp table must be used if the table being updated is also one - ** of the tables being read by the SELECT statement. Also use a - ** temp table in the case of row triggers. - */ - if program.is_table_open(&table) { - let temp_cursor_id = - program.alloc_cursor_id(CursorType::BTreeTable(btree_table.clone())); - temp_table_ctx = Some(TempTableCtx { - cursor_id: temp_cursor_id, - loop_start_label: program.allocate_label(), - loop_end_label: program.allocate_label(), - }); - - program.emit_insn(Insn::OpenEphemeral { - cursor_id: temp_cursor_id, - is_table: true, - }); - - // Main loop - program.preassign_label_to_next_insn(loop_start_label); - let yield_label = program.allocate_label(); - program.emit_insn(Insn::Yield { - yield_reg, - end_offset: yield_label, // stays local, we’ll route at loop end - }); - - let record_reg = program.alloc_register(); - let affinity_str = if columns.is_empty() { - btree_table - .columns - .iter() - .filter(|col| !col.hidden) - .map(|col| col.affinity().aff_mask()) - .collect::() - } else { - columns - .iter() - .map(|col_name| { - let column_name = normalize_ident(col_name.as_str()); - if ROWID_STRS - .iter() - .any(|s| s.eq_ignore_ascii_case(&column_name)) - { - return Affinity::Integer.aff_mask(); - } - table - .get_column_by_name(&column_name) - .unwrap() - .1 - .affinity() - .aff_mask() - }) - .collect::() - }; - - program.emit_insn(Insn::MakeRecord { - start_reg: program.reg_result_cols_start.unwrap_or(yield_reg + 1), - count: result.num_result_cols, - dest_reg: record_reg, - index_name: None, - affinity_str: Some(affinity_str), - }); - - let rowid_reg = program.alloc_register(); - program.emit_insn(Insn::NewRowid { - cursor: temp_cursor_id, - rowid_reg, - prev_largest_reg: 0, - }); - program.emit_insn(Insn::Insert { - cursor: temp_cursor_id, - key_reg: rowid_reg, - record_reg, - // since we are not doing an Insn::NewRowid or an Insn::NotExists here, we need to seek to ensure the insertion happens in the correct place. - flag: InsertFlags::new().require_seek(), - table_name: "".to_string(), - }); - // loop back - program.emit_insn(Insn::Goto { - target_pc: loop_start_label, - }); - program.preassign_label_to_next_insn(yield_label); - - program.emit_insn(Insn::OpenWrite { - cursor_id, - root_page: RegisterOrLiteral::Literal(root_page), - db: 0, - }); - } else { - program.emit_insn(Insn::OpenWrite { - cursor_id, - root_page: RegisterOrLiteral::Literal(root_page), - db: 0, - }); - - program.preassign_label_to_next_insn(loop_start_label); - - // on EOF, jump to select_exhausted to check FK constraints - let select_exhausted = program.allocate_label(); - select_exhausted_label = Some(select_exhausted); - program.emit_insn(Insn::Yield { - yield_reg, - end_offset: select_exhausted, - }); - } - - yield_reg_opt = Some(yield_reg); - (result.num_result_cols, cursor_id) - } - } - InsertBody::DefaultValues => { - let num_values = table.columns().len(); - values = Some( - table - .columns() - .iter() - .map(|c| { - c.default - .clone() - .unwrap_or(Box::new(ast::Expr::Literal(ast::Literal::Null))) - }) - .collect(), - ); - ( - num_values, - program.alloc_cursor_id(CursorType::BTreeTable(btree_table.clone())), - ) - } - }; + program = init_source_emission( + program, + &table, + connection, + &mut ctx, + resolver, + &mut values, + body, + &columns, + )?; let has_upsert = !upsert_actions.is_empty(); // Set up the program to return result columns if RETURNING is specified if !result_columns.is_empty() { program.result_columns = result_columns.clone(); } - - // allocate cursor id's for each btree index cursor we'll need to populate the indexes - // (idx name, root_page, idx cursor id) - let idx_cursors = resolver - .schema - .get_indices(table_name.as_str()) - .map(|idx| { - ( - &idx.name, - idx.root_page, - program.alloc_cursor_id(CursorType::BTreeIndex(idx.clone())), - ) - }) - .collect::>(); - - let insertion = build_insertion(&mut program, &table, &columns, num_values)?; - - let conflict_rowid_reg = program.alloc_register(); + let insertion = build_insertion(&mut program, &table, &columns, ctx.num_values)?; if inserting_multiple_rows { let select_result_start_reg = program .reg_result_cols_start - .unwrap_or(yield_reg_opt.unwrap() + 1); + .unwrap_or(ctx.yield_reg_opt.unwrap() + 1); translate_rows_multiple( &mut program, &insertion, select_result_start_reg, resolver, - &temp_table_ctx, + &ctx.temp_table_ctx, )?; } else { // Single row - populate registers directly program.emit_insn(Insn::OpenWrite { - cursor_id, + cursor_id: ctx.cursor_id, root_page: RegisterOrLiteral::Literal(root_page), db: 0, }); - translate_rows_single(&mut program, &values.unwrap(), &insertion, resolver)?; + translate_rows_single(&mut program, &values, &insertion, resolver)?; } // Open all the index btrees for writing - for idx_cursor in idx_cursors.iter() { + for idx_cursor in ctx.idx_cursors.iter() { program.emit_insn(Insn::OpenWrite { cursor_id: idx_cursor.2, root_page: idx_cursor.1.into(), @@ -476,11 +293,6 @@ pub fn translate_insert( } let has_user_provided_rowid = insertion.key.is_provided_by_user(); - let key_ready_for_uniqueness_check_label = program.allocate_label(); - let key_generation_label = program.allocate_label(); - - let mut autoincrement_meta = None; - if btree_table.has_autoincrement { let seq_table = resolver .schema @@ -500,7 +312,13 @@ pub fn translate_insert( let table_name_reg = program.emit_string8_new_reg(btree_table.name.clone()); let r_seq = program.alloc_register(); let r_seq_rowid = program.alloc_register(); - autoincrement_meta = Some((seq_cursor_id, r_seq, r_seq_rowid, table_name_reg)); + + ctx.autoincrement_meta = Some(AutoincMeta { + seq_cursor_id, + r_seq, + r_seq_rowid, + table_name_reg, + }); program.emit_insn(Insn::Integer { dest: r_seq, @@ -557,7 +375,7 @@ pub fn translate_insert( }); program.emit_insn(Insn::Goto { - target_pc: key_generation_label, + target_pc: ctx.key_generation_label, }); program.preassign_label_to_next_insn(must_be_int_label); @@ -566,18 +384,18 @@ pub fn translate_insert( }); program.emit_insn(Insn::Goto { - target_pc: key_ready_for_uniqueness_check_label, + target_pc: ctx.key_ready_for_uniqueness_check_label, }); } - program.preassign_label_to_next_insn(key_generation_label); - if let Some((_, r_seq, _, _)) = autoincrement_meta { + program.preassign_label_to_next_insn(ctx.key_generation_label); + if let Some(AutoincMeta { r_seq, .. }) = ctx.autoincrement_meta { let r_max = program.alloc_register(); let dummy_reg = program.alloc_register(); program.emit_insn(Insn::NewRowid { - cursor: cursor_id, + cursor: ctx.cursor_id, rowid_reg: dummy_reg, prev_largest_reg: r_max, }); @@ -618,7 +436,13 @@ pub fn translate_insert( value: 1, }); - if let Some((seq_cursor_id, _, r_seq_rowid, table_name_reg)) = autoincrement_meta { + if let Some(AutoincMeta { + seq_cursor_id, + r_seq_rowid, + table_name_reg, + .. + }) = ctx.autoincrement_meta + { emit_update_sqlite_sequence( &mut program, resolver.schema, @@ -630,319 +454,45 @@ pub fn translate_insert( } } else { program.emit_insn(Insn::NewRowid { - cursor: cursor_id, + cursor: ctx.cursor_id, rowid_reg: insertion.key_register(), prev_largest_reg: 0, }); } - program.preassign_label_to_next_insn(key_ready_for_uniqueness_check_label); + program.preassign_label_to_next_insn(ctx.key_ready_for_uniqueness_check_label); - match table.btree() { - Some(t) if t.is_strict => { - program.emit_insn(Insn::TypeCheck { - start_reg: insertion.first_col_register(), - count: insertion.col_mappings.len(), - check_generated: true, - table_reference: Arc::clone(&t), - }); - } - _ => (), + if ctx.table.is_strict { + program.emit_insn(Insn::TypeCheck { + start_reg: insertion.first_col_register(), + count: insertion.col_mappings.len(), + check_generated: true, + table_reference: Arc::clone(ctx.table), + }); } - let mut constraints_to_check = Vec::new(); - if has_user_provided_rowid { - // Check uniqueness constraint for rowid if it was provided by user. - // When the DB allocates it there are no need for separate uniqueness checks. - let position = upsert_actions - .iter() - .position(|(target, ..)| matches!(target, ResolvedUpsertTarget::PrimaryKey)); - constraints_to_check.push((ResolvedUpsertTarget::PrimaryKey, position)); - } - for index in resolver.schema.get_indices(table_name.as_str()) { - let position = upsert_actions - .iter() - .position(|(target, ..)| matches!(target, ResolvedUpsertTarget::Index(x) if Arc::ptr_eq(x, index))); - constraints_to_check.push((ResolvedUpsertTarget::Index(index.clone()), position)); - } - - constraints_to_check.sort_by(|(_, p1), (_, p2)| match (p1, p2) { - (Some(p1), Some(p2)) => p1.cmp(p2), - (Some(_), None) => std::cmp::Ordering::Less, - (None, Some(_)) => std::cmp::Ordering::Greater, - (None, None) => std::cmp::Ordering::Equal, - }); - - let upsert_catch_all_position = - if let Some((ResolvedUpsertTarget::CatchAll, ..)) = upsert_actions.last() { - Some(upsert_actions.len() - 1) - } else { - None - }; + let (constraints_to_check, upsert_catch_all_position) = build_constraints_to_check( + resolver, + table_name.as_str(), + &upsert_actions, + has_user_provided_rowid, + ); // We need to separate index handling and insertion into a `preflight` and a // `commit` phase, because in UPSERT mode we might need to skip the actual insertion, as we can // have a naked ON CONFLICT DO NOTHING, so if we eagerly insert any indexes, we could insert // invalid index entries before we hit a conflict down the line. - // - // Preflight phase: evaluate each applicable UNIQUE constraint and probe with NoConflict. - // If any probe hits: - // DO NOTHING -> jump to row_done_label. - // - // DO UPDATE (matching target) -> fetch conflicting rowid and jump to `upsert_entry`. - // - // otherwise, raise SQLITE_CONSTRAINT_UNIQUE - for (constraint, position) in constraints_to_check { - match constraint { - ResolvedUpsertTarget::PrimaryKey => { - let make_record_label = program.allocate_label(); - program.emit_insn(Insn::NotExists { - cursor: cursor_id, - rowid_reg: insertion.key_register(), - target_pc: make_record_label, - }); - let rowid_column_name = insertion.key.column_name(); + emit_preflight_constraint_checks( + &mut program, + &ctx, + resolver, + &insertion, + &upsert_actions, + &constraints_to_check, + upsert_catch_all_position, + )?; - // Conflict on rowid: attempt to route through UPSERT if it targets the PK, otherwise raise constraint. - // emit Halt for every case *except* when upsert handles the conflict - 'emit_halt: { - if let Some(position) = position.or(upsert_catch_all_position) { - // PK conflict: the conflicting rowid is exactly the attempted key - program.emit_insn(Insn::Copy { - src_reg: insertion.key_register(), - dst_reg: conflict_rowid_reg, - extra_amount: 0, - }); - program.emit_insn(Insn::Goto { - target_pc: upsert_actions[position].1, - }); - break 'emit_halt; - } - let mut description = String::with_capacity( - table_name.as_str().len() + rowid_column_name.len() + 2, - ); - description.push_str(table_name.as_str()); - description.push('.'); - description.push_str(rowid_column_name); - program.emit_insn(Insn::Halt { - err_code: SQLITE_CONSTRAINT_PRIMARYKEY, - description, - }); - } - program.preassign_label_to_next_insn(make_record_label); - } - ResolvedUpsertTarget::Index(index) => { - let column_mappings = index - .columns - .iter() - .map(|idx_col| insertion.get_col_mapping_by_name(&idx_col.name)); - // find which cursor we opened earlier for this index - let idx_cursor_id = idx_cursors - .iter() - .find(|(name, _, _)| *name == &index.name) - .map(|(_, _, c_id)| *c_id) - .expect("no cursor found for index"); - - let maybe_skip_probe_label = if let Some(where_clause) = &index.where_clause { - let mut where_for_eval = where_clause.as_ref().clone(); - rewrite_partial_index_where(&mut where_for_eval, &insertion)?; - let reg = program.alloc_register(); - translate_expr_no_constant_opt( - &mut program, - Some(&TableReferences::new_empty()), - &where_for_eval, - reg, - resolver, - NoConstantOptReason::RegisterReuse, - )?; - let lbl = program.allocate_label(); - program.emit_insn(Insn::IfNot { - reg, - target_pc: lbl, - jump_if_null: true, - }); - Some(lbl) - } else { - None - }; - - let num_cols = index.columns.len(); - // allocate scratch registers for the index columns plus rowid - let idx_start_reg = program.alloc_registers(num_cols + 1); - - // build unpacked key [idx_start_reg .. idx_start_reg+num_cols-1], and rowid in last reg, - // copy each index column from the table's column registers into these scratch regs - for (i, column_mapping) in column_mappings.clone().enumerate() { - // copy from the table's column register over to the index's scratch register - let Some(col_mapping) = column_mapping else { - return Err(crate::LimboError::PlanningError( - "Column not found in INSERT".to_string(), - )); - }; - program.emit_insn(Insn::Copy { - src_reg: col_mapping.register, - dst_reg: idx_start_reg + i, - extra_amount: 0, - }); - } - // last register is the rowid - program.emit_insn(Insn::Copy { - src_reg: insertion.key_register(), - dst_reg: idx_start_reg + num_cols, - extra_amount: 0, - }); - - if index.unique { - let aff = index - .columns - .iter() - .map(|ic| table.columns()[ic.pos_in_table].affinity().aff_mask()) - .collect::(); - program.emit_insn(Insn::Affinity { - start_reg: idx_start_reg, - count: NonZeroUsize::new(num_cols).expect("nonzero col count"), - affinities: aff, - }); - - if has_upsert { - let next_check = program.allocate_label(); - program.emit_insn(Insn::NoConflict { - cursor_id: idx_cursor_id, - target_pc: next_check, - record_reg: idx_start_reg, - num_regs: num_cols, - }); - - // Conflict detected, figure out if this UPSERT handles the conflict - if let Some(position) = position.or(upsert_catch_all_position) { - match &upsert_actions[position].2.do_clause { - UpsertDo::Nothing => { - // Bail out without writing anything - program.emit_insn(Insn::Goto { - target_pc: row_done_label, - }); - } - UpsertDo::Set { .. } => { - // Route to DO UPDATE: capture conflicting rowid then jump - program.emit_insn(Insn::IdxRowId { - cursor_id: idx_cursor_id, - dest: conflict_rowid_reg, - }); - program.emit_insn(Insn::Goto { - target_pc: upsert_actions[position].1, - }); - } - } - } - // No matching UPSERT handler so we emit constraint error - // (if conflict clause matched - VM will jump to later instructions and skip halt) - program.emit_insn(Insn::Halt { - err_code: SQLITE_CONSTRAINT_UNIQUE, - description: format_unique_violation_desc(table_name.as_str(), &index), - }); - - // continue preflight with next constraint - program.preassign_label_to_next_insn(next_check); - } else { - // No UPSERT fast-path: probe and immediately insert - let ok = program.allocate_label(); - program.emit_insn(Insn::NoConflict { - cursor_id: idx_cursor_id, - target_pc: ok, - record_reg: idx_start_reg, - num_regs: num_cols, - }); - // Unique violation without ON CONFLICT clause -> error - program.emit_insn(Insn::Halt { - err_code: SQLITE_CONSTRAINT_UNIQUE, - description: format_unique_violation_desc(table_name.as_str(), &index), - }); - program.preassign_label_to_next_insn(ok); - - // In the non-UPSERT case, we insert the index - let record_reg = program.alloc_register(); - program.emit_insn(Insn::MakeRecord { - start_reg: idx_start_reg, - count: num_cols + 1, - dest_reg: record_reg, - index_name: Some(index.name.clone()), - affinity_str: None, - }); - program.emit_insn(Insn::IdxInsert { - cursor_id: idx_cursor_id, - record_reg, - unpacked_start: Some(idx_start_reg), - unpacked_count: Some((num_cols + 1) as u16), - flags: IdxInsertFlags::new().nchange(true), - }); - } - } else { - // Non-unique index: in UPSERT mode we postpone writes to commit phase. - if !has_upsert { - // eager insert for non-unique, no UPSERT - let record_reg = program.alloc_register(); - program.emit_insn(Insn::MakeRecord { - start_reg: idx_start_reg, - count: num_cols + 1, - dest_reg: record_reg, - index_name: Some(index.name.clone()), - affinity_str: None, - }); - program.emit_insn(Insn::IdxInsert { - cursor_id: idx_cursor_id, - record_reg, - unpacked_start: Some(idx_start_reg), - unpacked_count: Some((num_cols + 1) as u16), - flags: IdxInsertFlags::new().nchange(true), - }); - } - } - - // Close the partial-index skip (preflight) - if let Some(lbl) = maybe_skip_probe_label { - program.resolve_label(lbl, program.offset()); - } - } - ResolvedUpsertTarget::CatchAll => unreachable!(), - } - } - - for column_mapping in insertion - .col_mappings - .iter() - .filter(|column_mapping| column_mapping.column.notnull) - { - // if this is rowid alias - turso-db will emit NULL as a column value and always use rowid for the row as a column value - if column_mapping.column.is_rowid_alias { - continue; - } - program.emit_insn(Insn::HaltIfNull { - target_reg: column_mapping.register, - err_code: SQLITE_CONSTRAINT_NOTNULL, - description: { - let mut description = String::with_capacity( - table_name.as_str().len() - + column_mapping - .column - .name - .as_ref() - .expect("Column name must be present") - .len() - + 2, - ); - description.push_str(table_name.as_str()); - description.push('.'); - description.push_str( - column_mapping - .column - .name - .as_ref() - .expect("Column name must be present"), - ); - description - }, - }); - } + emit_notnulls(&mut program, &ctx, &insertion); // Create and insert the record let affinity_str = insertion @@ -950,7 +500,6 @@ pub fn translate_insert( .iter() .map(|col_mapping| col_mapping.column.affinity().aff_mask()) .collect::(); - program.emit_insn(Insn::MakeRecord { start_reg: insertion.first_col_register(), count: insertion.col_mappings.len(), @@ -965,7 +514,8 @@ pub fn translate_insert( // and insert into all applicable indexes, we do not re-probe uniqueness here, as preflight // already guaranteed non-conflict. for index in resolver.schema.get_indices(table_name.as_str()) { - let idx_cursor_id = idx_cursors + let idx_cursor_id = ctx + .idx_cursors .iter() .find(|(name, _, _)| *name == &index.name) .map(|(_, _, c_id)| *c_id) @@ -1050,7 +600,7 @@ pub fn translate_insert( } program.emit_insn(Insn::Insert { - cursor: cursor_id, + cursor: ctx.cursor_id, key_reg: insertion.key_register(), record_reg: insertion.record_register(), flag: InsertFlags::new(), @@ -1062,7 +612,13 @@ pub fn translate_insert( emit_parent_side_fk_decrement_on_insert(&mut program, resolver, &btree_table, &insertion)?; } - if let Some((seq_cursor_id, r_seq, r_seq_rowid, table_name_reg)) = autoincrement_meta { + if let Some(AutoincMeta { + seq_cursor_id, + r_seq, + r_seq_rowid, + table_name_reg, + }) = ctx.autoincrement_meta + { let no_update_needed_label = program.allocate_label(); program.emit_insn(Insn::Le { lhs: insertion.key_register(), @@ -1088,7 +644,7 @@ pub fn translate_insert( } // Emit update in the CDC table if necessary (after the INSERT updated the table) - if let Some((cdc_cursor_id, _)) = &cdc_table { + if let Some((cdc_cursor_id, _)) = &ctx.cdc_table { let cdc_has_after = program.capture_data_changes_mode().has_after(); let after_record_reg = if cdc_has_after { Some(emit_cdc_patch_record( @@ -1125,46 +681,23 @@ pub fn translate_insert( emit_returning_results(&mut program, &result_columns, &value_registers)?; } program.emit_insn(Insn::Goto { - target_pc: row_done_label, + target_pc: ctx.row_done_label, }); - for (_, label, mut upsert) in upsert_actions { - program.preassign_label_to_next_insn(label); - - if let UpsertDo::Set { - ref mut sets, - ref mut where_clause, - } = upsert.do_clause - { - // Normalize SET pairs once - let mut rewritten_sets = collect_set_clauses_for_upsert(&table, sets)?; - - emit_upsert( - &mut program, - &table, - &insertion, - cursor_id, - conflict_rowid_reg, - &mut rewritten_sets, - where_clause, - resolver, - &idx_cursors, - &mut result_columns, - cdc_table.as_ref().map(|c| c.0), - row_done_label, - connection, - )?; - } else { - // UpsertDo::Nothing case - program.emit_insn(Insn::Goto { - target_pc: row_done_label, - }); - } - } + resolve_upserts( + &mut program, + resolver, + &mut upsert_actions, + &ctx, + &insertion, + &table, + &mut result_columns, + connection, + )?; if inserting_multiple_rows { - if let Some(temp_table_ctx) = temp_table_ctx { - program.resolve_label(row_done_label, program.offset()); + if let Some(temp_table_ctx) = ctx.temp_table_ctx { + program.resolve_label(ctx.row_done_label, program.offset()); program.emit_insn(Insn::Next { cursor_id: temp_table_ctx.cursor_id, @@ -1176,35 +709,432 @@ pub fn translate_insert( cursor_id: temp_table_ctx.cursor_id, }); program.emit_insn(Insn::Goto { - target_pc: stmt_epilogue, + target_pc: ctx.stmt_epilogue, }); } else { // For multiple rows which not require a temp table, loop back - program.resolve_label(row_done_label, program.offset()); + program.resolve_label(ctx.row_done_label, program.offset()); program.emit_insn(Insn::Goto { - target_pc: loop_start_label, + target_pc: ctx.loop_start_label, }); - if let Some(sel_eof) = select_exhausted_label { + if let Some(sel_eof) = ctx.select_exhausted_label { program.preassign_label_to_next_insn(sel_eof); program.emit_insn(Insn::Goto { - target_pc: stmt_epilogue, + target_pc: ctx.stmt_epilogue, }); } } } else { - program.resolve_label(row_done_label, program.offset()); + program.resolve_label(ctx.row_done_label, program.offset()); // single-row falls through to epilogue program.emit_insn(Insn::Goto { - target_pc: stmt_epilogue, + target_pc: ctx.stmt_epilogue, }); } - program.preassign_label_to_next_insn(stmt_epilogue); - program.resolve_label(halt_label, program.offset()); + program.preassign_label_to_next_insn(ctx.stmt_epilogue); + program.resolve_label(ctx.halt_label, program.offset()); Ok(program) } +#[allow(clippy::too_many_arguments)] +fn resolve_upserts( + program: &mut ProgramBuilder, + resolver: &Resolver, + upsert_actions: &mut [(ResolvedUpsertTarget, BranchOffset, Box)], + ctx: &InsertEmitCtx, + insertion: &Insertion, + table: &Table, + result_columns: &mut [ResultSetColumn], + connection: &Arc, +) -> Result<()> { + for (_, label, upsert) in upsert_actions { + program.preassign_label_to_next_insn(*label); + + if let UpsertDo::Set { + ref mut sets, + ref mut where_clause, + } = upsert.do_clause + { + // Normalize SET pairs once + let mut rewritten_sets = collect_set_clauses_for_upsert(table, sets)?; + + emit_upsert( + program, + table, + ctx, + insertion, + &mut rewritten_sets, + where_clause, + resolver, + result_columns, + connection, + )?; + } else { + // UpsertDo::Nothing case + program.emit_insn(Insn::Goto { + target_pc: ctx.row_done_label, + }); + } + } + Ok(()) +} + +fn emit_notnulls(program: &mut ProgramBuilder, ctx: &InsertEmitCtx, insertion: &Insertion) { + for column_mapping in insertion + .col_mappings + .iter() + .filter(|column_mapping| column_mapping.column.notnull) + { + // if this is rowid alias - turso-db will emit NULL as a column value and always use rowid for the row as a column value + if column_mapping.column.is_rowid_alias { + continue; + } + program.emit_insn(Insn::HaltIfNull { + target_reg: column_mapping.register, + err_code: SQLITE_CONSTRAINT_NOTNULL, + description: { + let mut description = String::with_capacity( + ctx.table.name.as_str().len() + + column_mapping + .column + .name + .as_ref() + .expect("Column name must be present") + .len() + + 2, + ); + description.push_str(ctx.table.name.as_str()); + description.push('.'); + description.push_str( + column_mapping + .column + .name + .as_ref() + .expect("Column name must be present"), + ); + description + }, + }); + } +} + +struct BoundInsertResult { + values: Vec>, + upsert_actions: Vec<(ResolvedUpsertTarget, BranchOffset, Box)>, + inserting_multiple_rows: bool, +} + +fn bind_insert( + program: &mut ProgramBuilder, + resolver: &Resolver, + table: &Table, + body: &mut InsertBody, + connection: &Arc, +) -> Result { + let mut values: Vec> = vec![]; + let mut upsert_actions: Vec<(ResolvedUpsertTarget, BranchOffset, Box)> = Vec::new(); + let mut inserting_multiple_rows = false; + match body { + InsertBody::DefaultValues => { + // Generate default values for the table + values = table + .columns() + .iter() + .filter(|c| !c.hidden) + .map(|c| { + c.default + .clone() + .unwrap_or(Box::new(ast::Expr::Literal(ast::Literal::Null))) + }) + .collect(); + } + InsertBody::Select(select, upsert_opt) => { + match &mut select.body.select { + // TODO see how to avoid clone + OneSelect::Values(values_expr) if values_expr.len() <= 1 => { + if values_expr.is_empty() { + crate::bail_parse_error!("no values to insert"); + } + for expr in values_expr.iter_mut().flat_map(|v| v.iter_mut()) { + match expr.as_mut() { + Expr::Id(name) => { + if name.quoted_with('"') { + *expr = Expr::Literal(ast::Literal::String(name.as_literal())) + .into(); + } else { + // an INSERT INTO ... VALUES (...) cannot reference columns + crate::bail_parse_error!("no such column: {name}"); + } + } + Expr::Qualified(first_name, second_name) => { + // an INSERT INTO ... VALUES (...) cannot reference columns + crate::bail_parse_error!( + "no such column: {first_name}.{second_name}" + ); + } + _ => {} + } + bind_and_rewrite_expr( + expr, + None, + None, + connection, + &mut program.param_ctx, + BindingBehavior::ResultColumnsNotAllowed, + )?; + } + values = values_expr.pop().unwrap_or_else(Vec::new); + } + _ => inserting_multiple_rows = true, + } + while let Some(mut upsert) = upsert_opt.take() { + if let UpsertDo::Set { + ref mut sets, + ref mut where_clause, + } = &mut upsert.do_clause + { + for set in sets.iter_mut() { + bind_and_rewrite_expr( + &mut set.expr, + None, + None, + connection, + &mut program.param_ctx, + BindingBehavior::AllowUnboundIdentifiers, + )?; + } + if let Some(ref mut where_expr) = where_clause { + bind_and_rewrite_expr( + where_expr, + None, + None, + connection, + &mut program.param_ctx, + BindingBehavior::AllowUnboundIdentifiers, + )?; + } + } + let next = upsert.next.take(); + upsert_actions.push(( + // resolve the constrained target for UPSERT in the chain + resolve_upsert_target(resolver.schema, table, &upsert)?, + program.allocate_label(), + upsert, + )); + *upsert_opt = next; + } + } + } + Ok(BoundInsertResult { + values, + upsert_actions, + inserting_multiple_rows, + }) +} + +/// Depending on the InsertBody, we begin to initialize the source of the insert values +/// into registers using the following methods: +/// +/// Values with a single row, expressions are directly evaluated into registers, so nothing +/// is emitted here, we simply allocate the cursor ID and store the arity. +/// +/// Values with multiple rows, we use a coroutine to yield each row into registers directly. +/// +/// Select, we use a coroutine to yield each row from the SELECT into registers, +/// materializing into a temporary table if the target table is also read by the SELECT. +/// +/// For DefaultValues, we allocate the cursor and extend the empty values vector with either the +/// default expressions registered for the columns, or NULLs, so they can be translated into +/// registers later. +#[allow(clippy::too_many_arguments)] +fn init_source_emission<'a>( + mut program: ProgramBuilder, + table: &Table, + connection: &Arc, + ctx: &mut InsertEmitCtx<'a>, + resolver: &Resolver, + values: &mut Vec>, + body: InsertBody, + columns: &'a [ast::Name], +) -> Result { + let (num_values, cursor_id) = match body { + InsertBody::Select(select, _) => { + // Simple Common case of INSERT INTO
VALUES (...) + if matches!(&select.body.select, OneSelect::Values(values) if values.len() <= 1) { + ( + values.len(), + program.alloc_cursor_id(CursorType::BTreeTable(ctx.table.clone())), + ) + } else { + // Multiple rows - use coroutine for value population + let yield_reg = program.alloc_register(); + let jump_on_definition_label = program.allocate_label(); + let start_offset_label = program.allocate_label(); + program.emit_insn(Insn::InitCoroutine { + yield_reg, + jump_on_definition: jump_on_definition_label, + start_offset: start_offset_label, + }); + program.preassign_label_to_next_insn(start_offset_label); + + let query_destination = QueryDestination::CoroutineYield { + yield_reg, + coroutine_implementation_start: ctx.halt_label, + }; + program.incr_nesting(); + let result = + translate_select(select, resolver, program, query_destination, connection)?; + program = result.program; + program.decr_nesting(); + + program.emit_insn(Insn::EndCoroutine { yield_reg }); + program.preassign_label_to_next_insn(jump_on_definition_label); + + let cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(ctx.table.clone())); + + // From SQLite + /* Set useTempTable to TRUE if the result of the SELECT statement + ** should be written into a temporary table (template 4). Set to + ** FALSE if each output row of the SELECT can be written directly into + ** the destination table (template 3). + ** + ** A temp table must be used if the table being updated is also one + ** of the tables being read by the SELECT statement. Also use a + ** temp table in the case of row triggers. + */ + if program.is_table_open(table) { + let temp_cursor_id = + program.alloc_cursor_id(CursorType::BTreeTable(ctx.table.clone())); + ctx.temp_table_ctx = Some(TempTableCtx { + cursor_id: temp_cursor_id, + loop_start_label: program.allocate_label(), + loop_end_label: program.allocate_label(), + }); + + program.emit_insn(Insn::OpenEphemeral { + cursor_id: temp_cursor_id, + is_table: true, + }); + + // Main loop + program.preassign_label_to_next_insn(ctx.loop_start_label); + let yield_label = program.allocate_label(); + program.emit_insn(Insn::Yield { + yield_reg, + end_offset: yield_label, // stays local, we’ll route at loop end + }); + + let record_reg = program.alloc_register(); + let affinity_str = if columns.is_empty() { + ctx.table + .columns + .iter() + .filter(|col| !col.hidden) + .map(|col| col.affinity().aff_mask()) + .collect::() + } else { + columns + .iter() + .map(|col_name| { + let column_name = normalize_ident(col_name.as_str()); + if ROWID_STRS + .iter() + .any(|s| s.eq_ignore_ascii_case(&column_name)) + { + return Affinity::Integer.aff_mask(); + } + table + .get_column_by_name(&column_name) + .unwrap() + .1 + .affinity() + .aff_mask() + }) + .collect::() + }; + + program.emit_insn(Insn::MakeRecord { + start_reg: program.reg_result_cols_start.unwrap_or(yield_reg + 1), + count: result.num_result_cols, + dest_reg: record_reg, + index_name: None, + affinity_str: Some(affinity_str), + }); + + let rowid_reg = program.alloc_register(); + program.emit_insn(Insn::NewRowid { + cursor: temp_cursor_id, + rowid_reg, + prev_largest_reg: 0, + }); + program.emit_insn(Insn::Insert { + cursor: temp_cursor_id, + key_reg: rowid_reg, + record_reg, + // since we are not doing an Insn::NewRowid or an Insn::NotExists here, we need to seek to ensure the insertion happens in the correct place. + flag: InsertFlags::new().require_seek(), + table_name: "".to_string(), + }); + // loop back + program.emit_insn(Insn::Goto { + target_pc: ctx.loop_start_label, + }); + program.preassign_label_to_next_insn(yield_label); + + program.emit_insn(Insn::OpenWrite { + cursor_id, + root_page: RegisterOrLiteral::Literal(ctx.table.root_page), + db: 0, + }); + } else { + program.emit_insn(Insn::OpenWrite { + cursor_id, + root_page: RegisterOrLiteral::Literal(ctx.table.root_page), + db: 0, + }); + + program.preassign_label_to_next_insn(ctx.loop_start_label); + + // on EOF, jump to select_exhausted to check FK constraints + let select_exhausted = program.allocate_label(); + ctx.select_exhausted_label = Some(select_exhausted); + program.emit_insn(Insn::Yield { + yield_reg, + end_offset: select_exhausted, + }); + } + + ctx.yield_reg_opt = Some(yield_reg); + (result.num_result_cols, cursor_id) + } + } + InsertBody::DefaultValues => { + let num_values = table.columns().len(); + values.extend(table.columns().into_iter().map(|c| { + c.default + .clone() + .unwrap_or(Box::new(ast::Expr::Literal(ast::Literal::Null))) + })); + ( + num_values, + program.alloc_cursor_id(CursorType::BTreeTable(ctx.table.clone())), + ) + } + }; + ctx.num_values = num_values; + ctx.cursor_id = cursor_id; + Ok(program) +} + +pub struct AutoincMeta { + seq_cursor_id: usize, + r_seq: usize, + r_seq_rowid: usize, + table_name_reg: usize, +} + pub const ROWID_COLUMN: &Column = &Column { name: None, ty: schema::Type::Integer, @@ -1591,6 +1521,245 @@ fn translate_column( Ok(()) } +// Preflight phase: evaluate each applicable UNIQUE constraint and probe with NoConflict. +// If any probe hits: +// DO NOTHING -> jump to row_done_label. +// +// DO UPDATE (matching target) -> fetch conflicting rowid and jump to `upsert_entry`. +// +// otherwise, raise SQLITE_CONSTRAINT_UNIQUE +fn emit_preflight_constraint_checks( + program: &mut ProgramBuilder, + ctx: &InsertEmitCtx, + resolver: &Resolver, + insertion: &Insertion, + upsert_actions: &[(ResolvedUpsertTarget, BranchOffset, Box)], + constraints_to_check: &[(ResolvedUpsertTarget, Option)], + upsert_catch_all_position: Option, +) -> Result<()> { + for (constraint, position) in constraints_to_check { + match constraint { + ResolvedUpsertTarget::PrimaryKey => { + let make_record_label = program.allocate_label(); + program.emit_insn(Insn::NotExists { + cursor: ctx.cursor_id, + rowid_reg: insertion.key_register(), + target_pc: make_record_label, + }); + let rowid_column_name = insertion.key.column_name(); + + // Conflict on rowid: attempt to route through UPSERT if it targets the PK, otherwise raise constraint. + // emit Halt for every case *except* when upsert handles the conflict + 'emit_halt: { + if let Some(position) = position.or(upsert_catch_all_position) { + // PK conflict: the conflicting rowid is exactly the attempted key + program.emit_insn(Insn::Copy { + src_reg: insertion.key_register(), + dst_reg: ctx.conflict_rowid_reg, + extra_amount: 0, + }); + program.emit_insn(Insn::Goto { + target_pc: upsert_actions[position].1, + }); + break 'emit_halt; + } + let mut description = + String::with_capacity(ctx.table.name.len() + rowid_column_name.len() + 2); + description.push_str(ctx.table.name.as_str()); + description.push('.'); + description.push_str(rowid_column_name); + program.emit_insn(Insn::Halt { + err_code: SQLITE_CONSTRAINT_PRIMARYKEY, + description, + }); + } + program.preassign_label_to_next_insn(make_record_label); + } + ResolvedUpsertTarget::Index(index) => { + let column_mappings = index + .columns + .iter() + .map(|idx_col| insertion.get_col_mapping_by_name(&idx_col.name)); + // find which cursor we opened earlier for this index + let idx_cursor_id = ctx + .idx_cursors + .iter() + .find(|(name, _, _)| *name == &index.name) + .map(|(_, _, c_id)| *c_id) + .expect("no cursor found for index"); + + let maybe_skip_probe_label = if let Some(where_clause) = &index.where_clause { + let mut where_for_eval = where_clause.as_ref().clone(); + rewrite_partial_index_where(&mut where_for_eval, insertion)?; + let reg = program.alloc_register(); + translate_expr_no_constant_opt( + program, + Some(&TableReferences::new_empty()), + &where_for_eval, + reg, + resolver, + NoConstantOptReason::RegisterReuse, + )?; + let lbl = program.allocate_label(); + program.emit_insn(Insn::IfNot { + reg, + target_pc: lbl, + jump_if_null: true, + }); + Some(lbl) + } else { + None + }; + + let num_cols = index.columns.len(); + // allocate scratch registers for the index columns plus rowid + let idx_start_reg = program.alloc_registers(num_cols + 1); + + // build unpacked key [idx_start_reg .. idx_start_reg+num_cols-1], and rowid in last reg, + // copy each index column from the table's column registers into these scratch regs + for (i, column_mapping) in column_mappings.clone().enumerate() { + // copy from the table's column register over to the index's scratch register + let Some(col_mapping) = column_mapping else { + return Err(crate::LimboError::PlanningError( + "Column not found in INSERT".to_string(), + )); + }; + program.emit_insn(Insn::Copy { + src_reg: col_mapping.register, + dst_reg: idx_start_reg + i, + extra_amount: 0, + }); + } + // last register is the rowid + program.emit_insn(Insn::Copy { + src_reg: insertion.key_register(), + dst_reg: idx_start_reg + num_cols, + extra_amount: 0, + }); + + if index.unique { + let aff = index + .columns + .iter() + .map(|ic| ctx.table.columns[ic.pos_in_table].affinity().aff_mask()) + .collect::(); + program.emit_insn(Insn::Affinity { + start_reg: idx_start_reg, + count: NonZeroUsize::new(num_cols).expect("nonzero col count"), + affinities: aff, + }); + + if !upsert_actions.is_empty() { + let next_check = program.allocate_label(); + program.emit_insn(Insn::NoConflict { + cursor_id: idx_cursor_id, + target_pc: next_check, + record_reg: idx_start_reg, + num_regs: num_cols, + }); + + // Conflict detected, figure out if this UPSERT handles the conflict + if let Some(position) = position.or(upsert_catch_all_position) { + match &upsert_actions[position].2.do_clause { + UpsertDo::Nothing => { + // Bail out without writing anything + program.emit_insn(Insn::Goto { + target_pc: ctx.row_done_label, + }); + } + UpsertDo::Set { .. } => { + // Route to DO UPDATE: capture conflicting rowid then jump + program.emit_insn(Insn::IdxRowId { + cursor_id: idx_cursor_id, + dest: ctx.conflict_rowid_reg, + }); + program.emit_insn(Insn::Goto { + target_pc: upsert_actions[position].1, + }); + } + } + } + // No matching UPSERT handler so we emit constraint error + // (if conflict clause matched - VM will jump to later instructions and skip halt) + program.emit_insn(Insn::Halt { + err_code: SQLITE_CONSTRAINT_UNIQUE, + description: format_unique_violation_desc( + ctx.table.name.as_str(), + &index, + ), + }); + + // continue preflight with next constraint + program.preassign_label_to_next_insn(next_check); + } else { + // No UPSERT fast-path: probe and immediately insert + let ok = program.allocate_label(); + program.emit_insn(Insn::NoConflict { + cursor_id: idx_cursor_id, + target_pc: ok, + record_reg: idx_start_reg, + num_regs: num_cols, + }); + // Unique violation without ON CONFLICT clause -> error + program.emit_insn(Insn::Halt { + err_code: SQLITE_CONSTRAINT_UNIQUE, + description: format_unique_violation_desc( + ctx.table.name.as_str(), + &index, + ), + }); + program.preassign_label_to_next_insn(ok); + + // In the non-UPSERT case, we insert the index + let record_reg = program.alloc_register(); + program.emit_insn(Insn::MakeRecord { + start_reg: idx_start_reg, + count: num_cols + 1, + dest_reg: record_reg, + index_name: Some(index.name.clone()), + affinity_str: None, + }); + program.emit_insn(Insn::IdxInsert { + cursor_id: idx_cursor_id, + record_reg, + unpacked_start: Some(idx_start_reg), + unpacked_count: Some((num_cols + 1) as u16), + flags: IdxInsertFlags::new().nchange(true), + }); + } + } else { + // Non-unique index: in UPSERT mode we postpone writes to commit phase. + if upsert_actions.is_empty() { + // eager insert for non-unique, no UPSERT + let record_reg = program.alloc_register(); + program.emit_insn(Insn::MakeRecord { + start_reg: idx_start_reg, + count: num_cols + 1, + dest_reg: record_reg, + index_name: Some(index.name.clone()), + affinity_str: None, + }); + program.emit_insn(Insn::IdxInsert { + cursor_id: idx_cursor_id, + record_reg, + unpacked_start: Some(idx_start_reg), + unpacked_count: Some((num_cols + 1) as u16), + flags: IdxInsertFlags::new().nchange(true), + }); + } + } + + // Close the partial-index skip (preflight) + if let Some(lbl) = maybe_skip_probe_label { + program.resolve_label(lbl, program.offset()); + } + } + ResolvedUpsertTarget::CatchAll => unreachable!(), + } + } + Ok(()) +} + // TODO: comeback here later to apply the same improvements on select fn translate_virtual_table_insert( mut program: ProgramBuilder, @@ -1821,6 +1990,44 @@ pub fn rewrite_partial_index_where( ) } +fn build_constraints_to_check( + resolver: &Resolver, + table_name: &str, + upsert_actions: &[(ResolvedUpsertTarget, BranchOffset, Box)], + has_user_provided_rowid: bool, +) -> (Vec<(ResolvedUpsertTarget, Option)>, Option) { + let mut constraints_to_check = Vec::new(); + if has_user_provided_rowid { + // Check uniqueness constraint for rowid if it was provided by user. + // When the DB allocates it there are no need for separate uniqueness checks. + let position = upsert_actions + .iter() + .position(|(target, ..)| matches!(target, ResolvedUpsertTarget::PrimaryKey)); + constraints_to_check.push((ResolvedUpsertTarget::PrimaryKey, position)); + } + for index in resolver.schema.get_indices(table_name) { + let position = upsert_actions + .iter() + .position(|(target, ..)| matches!(target, ResolvedUpsertTarget::Index(x) if Arc::ptr_eq(x, index))); + constraints_to_check.push((ResolvedUpsertTarget::Index(index.clone()), position)); + } + + constraints_to_check.sort_by(|(_, p1), (_, p2)| match (p1, p2) { + (Some(p1), Some(p2)) => p1.cmp(p2), + (Some(_), None) => std::cmp::Ordering::Less, + (None, Some(_)) => std::cmp::Ordering::Greater, + (None, None) => std::cmp::Ordering::Equal, + }); + + let upsert_catch_all_position = + if let Some((ResolvedUpsertTarget::CatchAll, ..)) = upsert_actions.last() { + Some(upsert_actions.len() - 1) + } else { + None + }; + (constraints_to_check, upsert_catch_all_position) +} + fn emit_update_sqlite_sequence( program: &mut ProgramBuilder, schema: &Schema, diff --git a/core/translate/mod.rs b/core/translate/mod.rs index d51d89dea..758031544 100644 --- a/core/translate/mod.rs +++ b/core/translate/mod.rs @@ -284,17 +284,21 @@ pub fn translate_inner( columns, body, returning, - } => translate_insert( - with, - resolver, - or_conflict, - tbl_name, - columns, - body, - returning, - program, - connection, - )?, + } => { + if with.is_some() { + crate::bail_parse_error!("WITH clause is not supported"); + } + translate_insert( + resolver, + or_conflict, + tbl_name, + columns, + body, + returning, + program, + connection, + )? + } }; // Indicate write operations so that in the epilogue we can emit the correct type of transaction diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index 868f3a933..3fb3db43a 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -8,7 +8,7 @@ use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY; use crate::schema::ROWID_SENTINEL; use crate::translate::expr::{walk_expr, WalkControl}; use crate::translate::fkeys::{emit_fk_child_update_counters, emit_parent_pk_change_checks}; -use crate::translate::insert::format_unique_violation_desc; +use crate::translate::insert::{format_unique_violation_desc, InsertEmitCtx}; use crate::translate::planner::ROWID_STRS; use crate::vdbe::insn::CmpInsFlags; use crate::Connection; @@ -31,7 +31,6 @@ use crate::{ vdbe::{ builder::ProgramBuilder, insn::{IdxInsertFlags, InsertFlags, Insn}, - BranchOffset, }, }; @@ -339,35 +338,31 @@ pub fn resolve_upsert_target( pub fn emit_upsert( program: &mut ProgramBuilder, table: &Table, + ctx: &InsertEmitCtx, insertion: &Insertion, - tbl_cursor_id: usize, - conflict_rowid_reg: usize, set_pairs: &mut [(usize, Box)], where_clause: &mut Option>, resolver: &Resolver, - idx_cursors: &[(&String, i64, usize)], returning: &mut [ResultSetColumn], - cdc_cursor_id: Option, - row_done_label: BranchOffset, connection: &Arc, ) -> crate::Result<()> { // Seek & snapshot CURRENT program.emit_insn(Insn::SeekRowid { - cursor_id: tbl_cursor_id, - src_reg: conflict_rowid_reg, - target_pc: row_done_label, + cursor_id: ctx.cursor_id, + src_reg: ctx.conflict_rowid_reg, + target_pc: ctx.row_done_label, }); - let num_cols = table.columns().len(); + let num_cols = ctx.table.columns.len(); let current_start = program.alloc_registers(num_cols); - for (i, col) in table.columns().iter().enumerate() { + for (i, col) in ctx.table.columns.iter().enumerate() { if col.is_rowid_alias { program.emit_insn(Insn::RowId { - cursor_id: tbl_cursor_id, + cursor_id: ctx.cursor_id, dest: current_start + i, }); } else { program.emit_insn(Insn::Column { - cursor_id: tbl_cursor_id, + cursor_id: ctx.cursor_id, column: i, dest: current_start + i, default: None, @@ -376,7 +371,7 @@ pub fn emit_upsert( } // BEFORE for index maintenance / CDC - let before_start = if cdc_cursor_id.is_some() || !idx_cursors.is_empty() { + let before_start = if ctx.cdc_table.is_some() || !ctx.idx_cursors.is_empty() { let s = program.alloc_registers(num_cols); program.emit_insn(Insn::Copy { src_reg: current_start, @@ -402,7 +397,7 @@ pub fn emit_upsert( pred, table, current_start, - conflict_rowid_reg, + ctx.conflict_rowid_reg, Some(table.get_name()), Some(insertion), true, @@ -411,7 +406,7 @@ pub fn emit_upsert( translate_expr(program, None, pred, pr, resolver)?; program.emit_insn(Insn::IfNot { reg: pr, - target_pc: row_done_label, + target_pc: ctx.row_done_label, jump_if_null: true, }); } @@ -423,7 +418,7 @@ pub fn emit_upsert( expr, table, current_start, - conflict_rowid_reg, + ctx.conflict_rowid_reg, Some(table.get_name()), Some(insertion), true, @@ -480,13 +475,13 @@ pub fn emit_upsert( }; let rowid_set_clause_reg = if has_user_provided_rowid { - Some(new_rowid_reg.unwrap_or(conflict_rowid_reg)) + Some(new_rowid_reg.unwrap_or(ctx.conflict_rowid_reg)) } else { None }; if let Some(bt) = table.btree() { if connection.foreign_keys_enabled() { - let rowid_new_reg = new_rowid_reg.unwrap_or(conflict_rowid_reg); + let rowid_new_reg = new_rowid_reg.unwrap_or(ctx.conflict_rowid_reg); // Child-side checks if resolver.schema.has_child_fks(bt.name.as_str()) { @@ -495,7 +490,7 @@ pub fn emit_upsert( resolver, &bt, table.get_name(), - tbl_cursor_id, + ctx.cursor_id, new_start, rowid_new_reg, &changed_cols, @@ -505,10 +500,10 @@ pub fn emit_upsert( program, resolver, &bt, - tbl_cursor_id, - conflict_rowid_reg, + ctx.cursor_id, + ctx.conflict_rowid_reg, new_start, - new_rowid_reg.unwrap_or(conflict_rowid_reg), + new_rowid_reg.unwrap_or(ctx.conflict_rowid_reg), rowid_set_clause_reg, set_pairs, )?; @@ -517,7 +512,7 @@ pub fn emit_upsert( // Index rebuild (DELETE old, INSERT new), honoring partial-index WHEREs if let Some(before) = before_start { - for (idx_name, _root, idx_cid) in idx_cursors { + for (idx_name, _root, idx_cid) in &ctx.idx_cursors { let idx_meta = resolver .schema .get_index(table.get_name(), idx_name) @@ -533,10 +528,10 @@ pub fn emit_upsert( table, idx_meta, before, - conflict_rowid_reg, + ctx.conflict_rowid_reg, resolver, ); - let new_rowid = new_rowid_reg.unwrap_or(conflict_rowid_reg); + let new_rowid = new_rowid_reg.unwrap_or(ctx.conflict_rowid_reg); let new_pred_reg = eval_partial_pred_for_row_image( program, table, idx_meta, new_start, new_rowid, resolver, ); @@ -563,7 +558,7 @@ pub fn emit_upsert( }); } program.emit_insn(Insn::Copy { - src_reg: conflict_rowid_reg, + src_reg: ctx.conflict_rowid_reg, dst_reg: del + k, extra_amount: 0, }); @@ -694,7 +689,7 @@ pub fn emit_upsert( // If equal to old rowid, skip uniqueness probe program.emit_insn(Insn::Eq { lhs: rnew, - rhs: conflict_rowid_reg, + rhs: ctx.conflict_rowid_reg, target_pc: ok, flags: CmpInsFlags::default(), collation: program.curr_collation(), @@ -702,7 +697,7 @@ pub fn emit_upsert( // If another row already has rnew -> constraint program.emit_insn(Insn::NotExists { - cursor: tbl_cursor_id, + cursor: ctx.cursor_id, rowid_reg: rnew, target_pc: ok, }); @@ -723,11 +718,11 @@ pub fn emit_upsert( // Now replace the row program.emit_insn(Insn::Delete { - cursor_id: tbl_cursor_id, + cursor_id: ctx.cursor_id, table_name: table.get_name().to_string(), }); program.emit_insn(Insn::Insert { - cursor: tbl_cursor_id, + cursor: ctx.cursor_id, key_reg: rnew, record_reg: rec, flag: InsertFlags::new().require_seek().update_rowid_change(), @@ -735,8 +730,8 @@ pub fn emit_upsert( }); } else { program.emit_insn(Insn::Insert { - cursor: tbl_cursor_id, - key_reg: conflict_rowid_reg, + cursor: ctx.cursor_id, + key_reg: ctx.conflict_rowid_reg, record_reg: rec, flag: InsertFlags::new(), table_name: table.get_name().to_string(), @@ -744,16 +739,16 @@ pub fn emit_upsert( } // emit CDC instructions - if let Some(cdc_id) = cdc_cursor_id { - let new_rowid = new_rowid_reg.unwrap_or(conflict_rowid_reg); + if let Some((cdc_id, _)) = ctx.cdc_table { + let new_rowid = new_rowid_reg.unwrap_or(ctx.conflict_rowid_reg); if new_rowid_reg.is_some() { // DELETE (before) let before_rec = if program.capture_data_changes_mode().has_before() { Some(emit_cdc_full_record( program, table.columns(), - tbl_cursor_id, - conflict_rowid_reg, + ctx.cursor_id, + ctx.conflict_rowid_reg, )) } else { None @@ -763,7 +758,7 @@ pub fn emit_upsert( resolver, OperationMode::DELETE, cdc_id, - conflict_rowid_reg, + ctx.conflict_rowid_reg, before_rec, None, None, @@ -796,7 +791,7 @@ pub fn emit_upsert( table, new_start, rec, - conflict_rowid_reg, + ctx.conflict_rowid_reg, )) } else { None @@ -805,8 +800,8 @@ pub fn emit_upsert( Some(emit_cdc_full_record( program, table.columns(), - tbl_cursor_id, - conflict_rowid_reg, + ctx.cursor_id, + ctx.conflict_rowid_reg, )) } else { None @@ -816,7 +811,7 @@ pub fn emit_upsert( resolver, OperationMode::UPDATE, cdc_id, - conflict_rowid_reg, + ctx.conflict_rowid_reg, before_rec, after_rec, None, @@ -828,7 +823,7 @@ pub fn emit_upsert( // RETURNING from NEW image + final rowid if !returning.is_empty() { let regs = ReturningValueRegisters { - rowid_register: new_rowid_reg.unwrap_or(conflict_rowid_reg), + rowid_register: new_rowid_reg.unwrap_or(ctx.conflict_rowid_reg), columns_start_register: new_start, num_columns: num_cols, }; @@ -836,7 +831,7 @@ pub fn emit_upsert( } program.emit_insn(Insn::Goto { - target_pc: row_done_label, + target_pc: ctx.row_done_label, }); Ok(()) } From d3bb8beb1706fdcf90db21c97fe02adf2e3760c6 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Tue, 14 Oct 2025 13:45:37 -0300 Subject: [PATCH 235/428] Run SQLite integrity check after stress test run --- Cargo.lock | 1 + stress/Cargo.toml | 1 + stress/main.rs | 30 ++++++++++++++++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index dd15bab8d..58da1ef9b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4559,6 +4559,7 @@ dependencies = [ "antithesis_sdk", "clap", "hex", + "rusqlite", "tempfile", "tokio", "tracing", diff --git a/stress/Cargo.toml b/stress/Cargo.toml index 2e51f003c..077f5f003 100644 --- a/stress/Cargo.toml +++ b/stress/Cargo.toml @@ -30,3 +30,4 @@ tracing = { workspace = true } tracing-appender = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } turso = { workspace = true } +rusqlite = { workspace = true } diff --git a/stress/main.rs b/stress/main.rs index f2ec5a179..313098b3b 100644 --- a/stress/main.rs +++ b/stress/main.rs @@ -450,6 +450,31 @@ pub fn init_tracing() -> Result { Ok(guard) } +fn integrity_check( + db_path: &std::path::Path, +) -> Result<(), Box> { + assert!(db_path.exists()); + let conn = rusqlite::Connection::open(db_path)?; + let mut stmt = conn.prepare("SELECT * FROM pragma_integrity_check;")?; + let mut rows = stmt.query(())?; + let mut result: Vec = Vec::new(); + + while let Some(row) = rows.next()? { + result.push(row.get(0)?); + } + if result.is_empty() { + return Err( + "simulation failed: integrity_check should return `ok` or a list of problems".into(), + ); + } + if !result[0].eq_ignore_ascii_case("ok") { + // Build a list of problems + result.iter_mut().for_each(|row| *row = format!("- {row}")); + return Err(format!("simulation failed: {}", result.join("\n")).into()); + } + Ok(()) +} + #[tokio::main] async fn main() -> Result<(), Box> { let _g = init_tracing()?; @@ -614,5 +639,10 @@ async fn main() -> Result<(), Box> { } println!("Done. SQL statements written to {}", opts.log_file); println!("Database file: {db_file}"); + + println!("Running SQLite Integrity check"); + + integrity_check(std::path::Path::new(&db_file))?; + Ok(()) } From 20bdb1133dfed9fb35fe091ededc64a68ab017d1 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 14 Oct 2025 13:00:31 -0400 Subject: [PATCH 236/428] fix clippy warnings --- core/translate/insert.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index c1fab97f1..ce6e1b8bb 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -821,6 +821,7 @@ fn emit_notnulls(program: &mut ProgramBuilder, ctx: &InsertEmitCtx, insertion: & } struct BoundInsertResult { + #[allow(clippy::vec_box)] values: Vec>, upsert_actions: Vec<(ResolvedUpsertTarget, BranchOffset, Box)>, inserting_multiple_rows: bool, @@ -948,7 +949,7 @@ fn bind_insert( /// For DefaultValues, we allocate the cursor and extend the empty values vector with either the /// default expressions registered for the columns, or NULLs, so they can be translated into /// registers later. -#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_arguments, clippy::vec_box)] fn init_source_emission<'a>( mut program: ProgramBuilder, table: &Table, @@ -1112,7 +1113,7 @@ fn init_source_emission<'a>( } InsertBody::DefaultValues => { let num_values = table.columns().len(); - values.extend(table.columns().into_iter().map(|c| { + values.extend(table.columns().iter().map(|c| { c.default .clone() .unwrap_or(Box::new(ast::Expr::Literal(ast::Literal::Null))) @@ -1685,7 +1686,7 @@ fn emit_preflight_constraint_checks( err_code: SQLITE_CONSTRAINT_UNIQUE, description: format_unique_violation_desc( ctx.table.name.as_str(), - &index, + index, ), }); @@ -1705,7 +1706,7 @@ fn emit_preflight_constraint_checks( err_code: SQLITE_CONSTRAINT_UNIQUE, description: format_unique_violation_desc( ctx.table.name.as_str(), - &index, + index, ), }); program.preassign_label_to_next_insn(ok); From 792877d421f8db1e669007f5772ae8a7f13576b7 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 14 Oct 2025 13:22:32 -0400 Subject: [PATCH 237/428] add doc comments to InsertEmitCtx --- core/translate/insert.rs | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index ce6e1b8bb..2fa5b8eec 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -42,12 +42,6 @@ use super::expr::{translate_expr, translate_expr_no_constant_opt, NoConstantOptR use super::plan::QueryDestination; use super::select::translate_select; -pub struct TempTableCtx { - cursor_id: usize, - loop_start_label: BranchOffset, - loop_end_label: BranchOffset, -} - /// Validate anything with this insert statement that should throw an early parse error fn validate(table_name: &str, resolver: &Resolver, table: &Table) -> Result<()> { // Check if this is a system table that should be protected from direct writes @@ -86,24 +80,48 @@ fn validate(table_name: &str, resolver: &Resolver, table: &Table) -> Result<()> Ok(()) } +pub struct TempTableCtx { + cursor_id: usize, + loop_start_label: BranchOffset, + loop_end_label: BranchOffset, +} + #[allow(dead_code)] pub struct InsertEmitCtx<'a> { + /// Parent table being inserted into pub table: &'a Arc, + + /// Index cursors we need to populate for this table + /// (idx name, root_page, idx cursor id) pub idx_cursors: Vec<(&'a String, i64, usize)>, + + /// Context for if the insert values are materialized first + /// into a temporary table pub temp_table_ctx: Option, + /// on conflict, default to ABORT pub on_conflict: ResolveType, + /// Arity of the insert values pub num_values: usize, + /// The yield register, if a coroutine is used to yield multiple rows pub yield_reg_opt: Option, + /// The register to hold the rowid of a conflicting row pub conflict_rowid_reg: usize, + /// The cursor id of the table being inserted into pub cursor_id: usize, - /// Labels + /// Label to jump to on HALT pub halt_label: BranchOffset, + /// Label to jump to when a row is done processing (either inserted or upserted) pub row_done_label: BranchOffset, + /// Jump here at the complete end of the statement pub stmt_epilogue: BranchOffset, + /// Beginning of the loop for multiple-row inserts pub loop_start_label: BranchOffset, + /// Label to jump to when a generated key is ready for uniqueness check pub key_ready_for_uniqueness_check_label: BranchOffset, + /// Label to jump to when no key is provided and one must be generated pub key_generation_label: BranchOffset, + /// Jump here when the insert value SELECT source has been fully exhausted pub select_exhausted_label: Option, /// CDC table info @@ -123,7 +141,6 @@ impl<'a> InsertEmitCtx<'a> { temp_table_ctx: Option, ) -> Self { // allocate cursor id's for each btree index cursor we'll need to populate the indexes - // (idx name, root_page, idx cursor id) let idx_cursors = resolver .schema .get_indices(table.name.as_str()) From 9dac7e00ba1b1c25ba50f401a87d01a1719afd68 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 14 Oct 2025 22:19:19 +0400 Subject: [PATCH 238/428] relax check in the vector test - fixes https://github.com/tursodatabase/turso/issues/3732 --- core/vector/vector_types.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index a5551a453..15d015b5d 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -444,11 +444,26 @@ pub(crate) mod tests { /// - The distance must be between 0 and 2 fn test_vector_distance(v1: &Vector, v2: &Vector) -> bool { match operations::distance_cos::vector_distance_cos(v1, v2) { - Ok(distance) => distance.is_nan() || (0.0..=2.0).contains(&distance), + Ok(distance) => distance.is_nan() || (0.0 - 1e-6..=2.0 + 1e-6).contains(&distance), Err(_) => true, } } + #[test] + fn test_vector_some_cosine_dist() { + let a = Vector { + vector_type: VectorType::Float32Dense, + dims: 2, + data: vec![0, 0, 0, 0, 52, 208, 106, 63], + }; + let b = Vector { + vector_type: VectorType::Float32Dense, + dims: 2, + data: vec![0, 0, 0, 0, 58, 100, 45, 192], + }; + assert!((operations::distance_cos::vector_distance_cos(&a, &b).unwrap() - 2.0).abs() <= 1e-6); + } + #[test] fn parse_string_vector_zero_length() { let vector = operations::text::vector_from_text(VectorType::Float32Dense, "[]").unwrap(); From 427a1456632ad83e57b5ec1c79e80603776b302b Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 14 Oct 2025 22:22:14 +0400 Subject: [PATCH 239/428] fmt --- core/vector/vector_types.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index 15d015b5d..dafa2ca23 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -461,7 +461,9 @@ pub(crate) mod tests { dims: 2, data: vec![0, 0, 0, 0, 58, 100, 45, 192], }; - assert!((operations::distance_cos::vector_distance_cos(&a, &b).unwrap() - 2.0).abs() <= 1e-6); + assert!( + (operations::distance_cos::vector_distance_cos(&a, &b).unwrap() - 2.0).abs() <= 1e-6 + ); } #[test] From aabc7b87a4e974096c0d1dc91fc6951af9013d6b Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Tue, 14 Oct 2025 15:46:01 -0300 Subject: [PATCH 240/428] perf/throughput force sqlite to use fullfsync --- perf/throughput/rusqlite/src/main.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/perf/throughput/rusqlite/src/main.rs b/perf/throughput/rusqlite/src/main.rs index c926f2a44..1e1d5c5f5 100644 --- a/perf/throughput/rusqlite/src/main.rs +++ b/perf/throughput/rusqlite/src/main.rs @@ -94,6 +94,7 @@ fn setup_database(db_path: &str) -> Result { conn.pragma_update(None, "journal_mode", "WAL")?; conn.pragma_update(None, "synchronous", "FULL")?; + conn.pragma_update(None, "fullfsync", "true")?; conn.execute( "CREATE TABLE IF NOT EXISTS test_table ( @@ -116,6 +117,9 @@ fn worker_thread( ) -> Result { let conn = Connection::open(&db_path)?; + conn.pragma_update(None, "synchronous", "FULL")?; + conn.pragma_update(None, "fullfsync", "true")?; + conn.busy_timeout(std::time::Duration::from_secs(30))?; start_barrier.wait(); From 2791f2f4793286bce789fae187f165d3239fe48b Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Wed, 15 Oct 2025 08:51:27 +0300 Subject: [PATCH 241/428] Fix change counter incrementation We merged two concurrent fixes to `nchange` handling last night and AFAICT the fix in #3692 was incorrect because it doesn't count UPDATEs in cases where the original row was DELETEd as part of the UPDATE statement. The correct fix was in 87434b8 --- core/vdbe/execute.rs | 9 +++---- testing/changes.test | 63 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 7 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index cbc5556a5..56b1961d7 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -5915,12 +5915,9 @@ pub fn op_insert( }; if let Some(rowid) = maybe_rowid { program.connection.update_last_rowid(rowid); - - if !flag.has(InsertFlags::UPDATE_ROWID_CHANGE) { - program - .n_change - .fetch_add(1, std::sync::atomic::Ordering::SeqCst); - } + program + .n_change + .fetch_add(1, std::sync::atomic::Ordering::SeqCst); } let schema = program.connection.schema.read(); let dependent_views = schema.get_dependent_materialized_views(table_name); diff --git a/testing/changes.test b/testing/changes.test index 9f1f2123a..74d7833b2 100644 --- a/testing/changes.test +++ b/testing/changes.test @@ -41,4 +41,65 @@ do_execsql_test_on_specific_db {:memory:} changes-1.69 { update t set id = id+10 where id = 1; select changes(); } {1 -1} \ No newline at end of file +1} + +do_execsql_test_on_specific_db {:memory:} changes-on-delete { + create table temp (t1 integer, primary key (t1)); + insert into temp values (1), (2), (3), (4), (5); + delete from temp where t1 > 2; + select changes(); +} {3} + +do_execsql_test_on_specific_db {:memory:} changes-on-update { + create table temp (t1 integer, t2 text, primary key (t1)); + insert into temp values (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'); + update temp set t2 = 'updated' where t1 <= 3; + select changes(); +} {3} + +do_execsql_test_on_specific_db {:memory:} changes-on-update-rowid { + create table temp (t1 integer primary key, t2 text); + insert into temp values (1, 'a'), (2, 'b'), (3, 'c'); + update temp set t1 = t1 + 10 where t1 = 2; + select changes(); +} {1} + + +do_execsql_test_on_specific_db {:memory:} changes-resets-after-select { + create table temp (t1 integer, primary key (t1)); + insert into temp values (1), (2), (3); + select * from temp; + select changes(); +} {1 +2 +3 +3} + +do_execsql_test_on_specific_db {:memory:} changes-on-delete-no-match { + create table temp (t1 integer, primary key (t1)); + insert into temp values (1), (2), (3); + delete from temp where t1 > 100; + select changes(); +} {0} + +do_execsql_test_on_specific_db {:memory:} changes-on-update-no-match { + create table temp (t1 integer, t2 text, primary key (t1)); + insert into temp values (1, 'a'), (2, 'b'); + update temp set t2 = 'updated' where t1 > 100; + select changes(); +} {0} + +do_execsql_test_on_specific_db {:memory:} changes-on-delete-all { + create table temp (t1 integer, primary key (t1)); + insert into temp values (1), (2), (3), (4), (5), (6); + delete from temp; + select changes(); +} {6} + +do_execsql_test_on_specific_db {:memory:} changes-mixed-operations { + create table temp (t1 integer, t2 text, primary key (t1)); + insert into temp values (1, 'a'), (2, 'b'), (3, 'c'); + update temp set t2 = 'updated' where t1 <= 2; + delete from temp where t1 = 1; + select changes(); +} {1} From 25cf56b8e805fa1de0dcc44ae1215589cbd46630 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Wed, 15 Oct 2025 09:41:44 +0300 Subject: [PATCH 242/428] Fix expected error message --- core/vdbe/execute.rs | 10 +++------- testing/offset.test | 12 ++++++------ testing/select.test | 14 +++++++------- 3 files changed, 16 insertions(+), 20 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 0ee15fd91..7cc8281c2 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -6547,21 +6547,17 @@ pub fn op_must_be_int( Value::Integer(_) => {} Value::Float(f) => match cast_real_to_integer(*f) { Ok(i) => state.registers[*reg] = Register::Value(Value::Integer(i)), - Err(_) => crate::bail_parse_error!( - "MustBeInt: the value in register cannot be cast to integer" - ), + Err(_) => crate::bail_parse_error!("datatype mismatch"), }, Value::Text(text) => match checked_cast_text_to_numeric(text.as_str()) { Ok(Value::Integer(i)) => state.registers[*reg] = Register::Value(Value::Integer(i)), Ok(Value::Float(f)) => { state.registers[*reg] = Register::Value(Value::Integer(f as i64)) } - _ => crate::bail_parse_error!( - "MustBeInt: the value in register cannot be cast to integer" - ), + _ => crate::bail_parse_error!("datatype mismatch"), }, _ => { - crate::bail_parse_error!("MustBeInt: the value in register cannot be cast to integer"); + crate::bail_parse_error!("datatype mismatch"); } }; state.pc += 1; diff --git a/testing/offset.test b/testing/offset.test index 25ba69ce9..935070de7 100755 --- a/testing/offset.test +++ b/testing/offset.test @@ -90,26 +90,26 @@ do_execsql_test_on_specific_db {:memory:} offset-expr-int-and-string { do_execsql_test_in_memory_error_content offset-expr-cannot-be-cast-losslessly-1 { SELECT 1 LIMIT 3 OFFSET 1.1; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} do_execsql_test_in_memory_error_content offset-expr-cannot-be-cast-losslessly-2 { SELECT 1 LIMIT 3 OFFSET 1.1 + 2.2 + 1.9/8; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} # Return error as float in expression cannot be cast losslessly do_execsql_test_in_memory_error_content offset-expr-cannot-be-cast-losslessly-3 { SELECT 1 LIMIT 3 OFFSET 1.1 + 'a'; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} do_execsql_test_in_memory_error_content offset-expr-invalid-data-type-1 { SELECT 1 LIMIT 3 OFFSET 'a'; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} do_execsql_test_in_memory_error_content offset-expr-invalid-data-type-2 { SELECT 1 LIMIT 3 OFFSET NULL; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} # Expression below evaluates to NULL (string → 0) do_execsql_test_in_memory_error_content offset-expr-invalid-data-type-3 { SELECT 1 LIMIT 3 OFFSET 1/'iwillbezero ;-; '; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} diff --git a/testing/select.test b/testing/select.test index 9c0152ffa..700403ff7 100755 --- a/testing/select.test +++ b/testing/select.test @@ -979,34 +979,34 @@ do_execsql_test_on_specific_db {:memory:} limit-expr-int-and-string { do_execsql_test_in_memory_error_content limit-expr-cannot-be-cast-losslessly-1 { SELECT 1 LIMIT 1.1; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} do_execsql_test_in_memory_error_content limit-expr-cannot-be-cast-losslessly-2 { SELECT 1 LIMIT 1.1 + 2.2 + 1.9/8; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} # Return error as float in the expression cannot be cast losslessly do_execsql_test_in_memory_error_content limit-expr-cannot-be-cast-losslessly-3 { SELECT 1 LIMIT 1.1 +'a'; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} do_execsql_test_in_memory_error_content limit-expr-invalid-data-type-1 { SELECT 1 LIMIT 'a'; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} do_execsql_test_in_memory_error_content limit-expr-invalid-data-type-2 { SELECT 1 LIMIT NULL; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} # The expression below evaluates to NULL as string is cast to 0 do_execsql_test_in_memory_error_content limit-expr-invalid-data-type-3 { SELECT 1 LIMIT 1/'iwillbezero ;-; ' ; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} # Expression is evaluated as NULL do_execsql_test_in_memory_error_content limit-expr-invalid-data-type-4 { SELECT 1 LIMIT 4+NULL; -} {"the value in register cannot be cast to integer"} +} {"datatype mismatch"} do_execsql_test_on_specific_db {:memory:} rowid-references { CREATE TABLE test_table (id INTEGER); From bae33cb52cab424dbf455125d70d00572526d29f Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Wed, 15 Oct 2025 09:47:10 +0300 Subject: [PATCH 243/428] Avoid unwrapping failed f64 parsing attempts --- core/translate/compound_select.rs | 98 +++++++++++++++++-------------- core/translate/emitter.rs | 14 +++-- 2 files changed, 62 insertions(+), 50 deletions(-) diff --git a/core/translate/compound_select.rs b/core/translate/compound_select.rs index 8066dd459..619d07585 100644 --- a/core/translate/compound_select.rs +++ b/core/translate/compound_select.rs @@ -6,7 +6,7 @@ use crate::translate::plan::{Plan, QueryDestination, SelectPlan}; use crate::vdbe::builder::{CursorType, ProgramBuilder}; use crate::vdbe::insn::Insn; use crate::vdbe::BranchOffset; -use crate::{emit_explain, QueryMode, SymbolTable}; +use crate::{emit_explain, LimboError, QueryMode, SymbolTable}; use std::sync::Arc; use tracing::instrument; use turso_parser::ast::{CompoundOperator, Expr, Literal, SortOrder}; @@ -40,56 +40,66 @@ pub fn emit_program_for_compound_select( // Each subselect shares the same limit_ctx and offset, because the LIMIT, OFFSET applies to // the entire compound select, not just a single subselect. - let limit_ctx = limit.as_ref().map(|limit| { - let reg = program.alloc_register(); - match limit.as_ref() { - Expr::Literal(Literal::Numeric(n)) => { - if let Ok(value) = n.parse::() { - program.add_comment(program.offset(), "LIMIT counter"); - program.emit_insn(Insn::Integer { value, dest: reg }); - } else { - let value = n.parse::().unwrap(); - program.emit_insn(Insn::Real { value, dest: reg }); + let limit_ctx = limit + .as_ref() + .map(|limit| { + let reg = program.alloc_register(); + match limit.as_ref() { + Expr::Literal(Literal::Numeric(n)) => { + if let Ok(value) = n.parse::() { + program.add_comment(program.offset(), "LIMIT counter"); + program.emit_insn(Insn::Integer { value, dest: reg }); + } else { + let value = n + .parse::() + .map_err(|_| LimboError::ParseError("invalid limit".to_string()))?; + program.emit_insn(Insn::Real { value, dest: reg }); + program.add_comment(program.offset(), "LIMIT counter"); + program.emit_insn(Insn::MustBeInt { reg }); + } + } + _ => { + _ = translate_expr(program, None, limit, reg, &right_most_ctx.resolver); program.add_comment(program.offset(), "LIMIT counter"); program.emit_insn(Insn::MustBeInt { reg }); } } - _ => { - _ = translate_expr(program, None, limit, reg, &right_most_ctx.resolver); - program.add_comment(program.offset(), "LIMIT counter"); - program.emit_insn(Insn::MustBeInt { reg }); - } - } - LimitCtx::new_shared(reg) - }); - let offset_reg = offset.as_ref().map(|offset_expr| { - let reg = program.alloc_register(); - match offset_expr.as_ref() { - Expr::Literal(Literal::Numeric(n)) => { - // Compile-time constant offset - if let Ok(value) = n.parse::() { - program.emit_insn(Insn::Integer { value, dest: reg }); - } else { - let value = n.parse::().unwrap(); - program.emit_insn(Insn::Real { value, dest: reg }); + Ok::<_, LimboError>(LimitCtx::new_shared(reg)) + }) + .transpose()?; + let offset_reg = offset + .as_ref() + .map(|offset_expr| { + let reg = program.alloc_register(); + match offset_expr.as_ref() { + Expr::Literal(Literal::Numeric(n)) => { + // Compile-time constant offset + if let Ok(value) = n.parse::() { + program.emit_insn(Insn::Integer { value, dest: reg }); + } else { + let value = n + .parse::() + .map_err(|_| LimboError::ParseError("invalid offset".to_string()))?; + program.emit_insn(Insn::Real { value, dest: reg }); + } + } + _ => { + _ = translate_expr(program, None, offset_expr, reg, &right_most_ctx.resolver); } } - _ => { - _ = translate_expr(program, None, offset_expr, reg, &right_most_ctx.resolver); - } - } - program.add_comment(program.offset(), "OFFSET counter"); - program.emit_insn(Insn::MustBeInt { reg }); - let combined_reg = program.alloc_register(); - program.add_comment(program.offset(), "OFFSET + LIMIT"); - program.emit_insn(Insn::OffsetLimit { - offset_reg: reg, - combined_reg, - limit_reg: limit_ctx.as_ref().unwrap().reg_limit, - }); + program.add_comment(program.offset(), "OFFSET counter"); + program.emit_insn(Insn::MustBeInt { reg }); + let combined_reg = program.alloc_register(); + program.add_comment(program.offset(), "OFFSET + LIMIT"); + program.emit_insn(Insn::OffsetLimit { + offset_reg: reg, + combined_reg, + limit_reg: limit_ctx.as_ref().unwrap().reg_limit, + }); - reg - }); + Ok::<_, LimboError>(reg) + }) + .transpose()?; // When a compound SELECT is part of a query that yields results to a coroutine (e.g. within an INSERT clause), // we must allocate registers for the result columns to be yielded. Each subselect will then yield to diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 6d635d763..0564927bd 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -256,7 +256,7 @@ pub fn emit_query<'a>( let after_main_loop_label = program.allocate_label(); t_ctx.label_main_loop_end = Some(after_main_loop_label); - init_limit(program, t_ctx, &plan.limit, &plan.offset); + init_limit(program, t_ctx, &plan.limit, &plan.offset)?; if !plan.values.is_empty() { let reg_result_cols_start = emit_values(program, plan, t_ctx)?; @@ -427,7 +427,7 @@ fn emit_program_for_delete( let after_main_loop_label = program.allocate_label(); t_ctx.label_main_loop_end = Some(after_main_loop_label); - init_limit(program, &mut t_ctx, &plan.limit, &None); + init_limit(program, &mut t_ctx, &plan.limit, &None)?; // No rows will be read from source table loops if there is a constant false condition eg. WHERE 0 if plan.contains_constant_false_condition { @@ -879,7 +879,7 @@ fn emit_program_for_update( let after_main_loop_label = program.allocate_label(); t_ctx.label_main_loop_end = Some(after_main_loop_label); - init_limit(program, &mut t_ctx, &plan.limit, &plan.offset); + init_limit(program, &mut t_ctx, &plan.limit, &plan.offset)?; // No rows will be read from source table loops if there is a constant false condition eg. WHERE 0 if plan.contains_constant_false_condition { @@ -1953,12 +1953,12 @@ fn init_limit( t_ctx: &mut TranslateCtx, limit: &Option>, offset: &Option>, -) { +) -> Result<()> { if t_ctx.limit_ctx.is_none() && limit.is_some() { t_ctx.limit_ctx = Some(LimitCtx::new(program)); } let Some(limit_ctx) = &t_ctx.limit_ctx else { - return; + return Ok(()); }; if limit_ctx.initialize_counter { @@ -2004,7 +2004,7 @@ fn init_limit( dest: offset_reg, }); } else { - let value = n.parse::().unwrap(); + let value = n.parse::()?; program.emit_insn(Insn::Real { value, dest: limit_ctx.reg_limit, @@ -2041,6 +2041,8 @@ fn init_limit( target_pc: main_loop_end, jump_if_null: false, }); + + Ok(()) } /// We have `Expr`s which have *not* had column references bound to them, From d3e8285d93e152a12a233dd794d05e81d64de8a1 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 15:30:31 +0300 Subject: [PATCH 244/428] core/io: Never skip a completion in CompletionGroup::add() The previous implementation of CompletionGroup::add() would filter out successfully-finished completions: if !completion.finished() || completion.failed() { self.completions.push(completion.clone()); } This caused a problem when combined with drain() in the calling code. Completions that were already finished would be removed from the source vector by drain() but not added to the group, effectively losing track of them. This breaks the invariant that all completions passed to a group must be tracked, regardless of their state. The build() method already handles finished completions correctly by not including them in the outstanding count. The fix is to always add all completions and let build() handle their state appropriately, matching the behavior of the old io_yield_many!() macro. --- core/io/mod.rs | 100 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 96 insertions(+), 4 deletions(-) diff --git a/core/io/mod.rs b/core/io/mod.rs index 0c0baa807..54e045d68 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -176,10 +176,7 @@ impl CompletionGroup { } pub fn add(&mut self, completion: &Completion) { - if !completion.finished() || completion.failed() { - self.completions.push(completion.clone()); - } - // Skip successfully finished completions + self.completions.push(completion.clone()); } pub fn build(self) -> Completion { @@ -962,4 +959,99 @@ mod tests { assert!(!group.succeeded()); assert_eq!(group.get_error(), Some(CompletionError::Aborted)); } + + #[test] + fn test_completion_group_tracks_all_completions() { + // This test verifies the fix for the bug where CompletionGroup::add() + // would skip successfully-finished completions. This caused problems + // when code used drain() to move completions into a group, because + // finished completions would be removed from the source but not tracked + // by the group, effectively losing them. + use std::sync::atomic::{AtomicUsize, Ordering}; + + let callback_count = Arc::new(AtomicUsize::new(0)); + let callback_count_clone = callback_count.clone(); + + // Simulate the pattern: create multiple completions, complete some, + // then add ALL of them to a group (like drain() would do) + let mut completions = Vec::new(); + + // Create 4 completions + for _ in 0..4 { + completions.push(Completion::new_write(|_| {})); + } + + // Complete 2 of them before adding to group (simulate async completion) + completions[0].complete(0); + completions[2].complete(0); + + // Now create a group and add ALL completions (like drain() would do) + let mut group = CompletionGroup::new(move |_| { + callback_count_clone.fetch_add(1, Ordering::SeqCst); + }); + + // Add all completions to the group + for c in &completions { + group.add(c); + } + + let group = group.build(); + + // The group should track all 4 completions: + // - c[0] and c[2] are already finished + // - c[1] and c[3] are still pending + // So the group should not be finished yet + assert!(!group.finished()); + assert_eq!(callback_count.load(Ordering::SeqCst), 0); + + // Complete the first pending completion + completions[1].complete(0); + assert!(!group.finished()); + assert_eq!(callback_count.load(Ordering::SeqCst), 0); + + // Complete the last pending completion - now group should finish + completions[3].complete(0); + assert!(group.finished()); + assert!(group.succeeded()); + assert_eq!(callback_count.load(Ordering::SeqCst), 1); + + // Verify no errors + assert!(group.get_error().is_none()); + } + + #[test] + fn test_completion_group_with_all_finished_successfully() { + // Edge case: all completions are already successfully finished + // when added to the group. The group should complete immediately. + use std::sync::atomic::{AtomicBool, Ordering}; + + let callback_called = Arc::new(AtomicBool::new(false)); + let callback_called_clone = callback_called.clone(); + + let mut completions = Vec::new(); + + // Create and immediately complete 3 completions + for _ in 0..3 { + let c = Completion::new_write(|_| {}); + c.complete(0); + completions.push(c); + } + + // Add all already-completed completions to group + let mut group = CompletionGroup::new(move |_| { + callback_called_clone.store(true, Ordering::SeqCst); + }); + + for c in &completions { + group.add(c); + } + + let group = group.build(); + + // Group should be immediately finished since all completions were done + assert!(group.finished()); + assert!(group.succeeded()); + assert!(callback_called.load(Ordering::SeqCst)); + assert!(group.get_error().is_none()); + } } From cdd6f6a45d470bff9edb65daed38935001f43afd Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 14 Oct 2025 09:56:10 +0300 Subject: [PATCH 245/428] core/io: Fix completion group parent notification The previous implementation of CompletionGroup would call the group's callback function directly when the last completion finished: if prev == 1 { let group_result = group.result.get().and_then(|e| *e); (group.complete)(group_result.map_or(Ok(0), Err)); } This broke nested completion groups because parent groups track their children via the Completion::callback() method. By calling the function pointer directly, we bypassed the completion chain and parent groups never received notification that their child had completed. The fix stores a reference to the group's own Completion object in self_completion during build(). When the last child finishes, we call group_completion.callback() instead of invoking the function directly. This properly propagates through the completion hierarchy, ensuring parent groups decrement their outstanding count and eventually complete. This matches the behavior of individual completions and maintains the invariant that all completions notify their parents through the unified callback() mechanism. --- core/io/mod.rs | 152 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 148 insertions(+), 4 deletions(-) diff --git a/core/io/mod.rs b/core/io/mod.rs index 54e045d68..bfed8f512 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -188,6 +188,11 @@ impl CompletionGroup { let group_completion = GroupCompletion::new(self.callback, total); let group = Completion::new(CompletionType::Group(group_completion)); + // Store the group completion reference for later callback + if let CompletionType::Group(ref g) = group.get_inner().completion_type { + let _ = g.inner.self_completion.set(group.clone()); + } + for mut c in self.completions { // If the completion has not completed, link it to the group. if !c.finished() { @@ -242,6 +247,8 @@ struct GroupCompletionInner { complete: Box) + Send + Sync>, /// Cached result after all completions finish result: OnceLock>, + /// Reference to the group's own Completion for notifying parents + self_completion: OnceLock, } impl GroupCompletion { @@ -254,6 +261,7 @@ impl GroupCompletion { outstanding: AtomicUsize::new(outstanding), complete: Box::new(complete), result: OnceLock::new(), + self_completion: OnceLock::new(), }), } } @@ -452,12 +460,14 @@ impl Completion { } let prev = group.outstanding.fetch_sub(1, Ordering::SeqCst); - // If this was the last completion, call the group callback + // If this was the last completion in the group, trigger the group's callback + // which will recursively call this same callback() method to notify parents if prev == 1 { - let group_result = group.result.get().and_then(|e| *e); - (group.complete)(group_result.map_or(Ok(0), Err)); + if let Some(group_completion) = group.self_completion.get() { + let group_result = group.result.get().and_then(|e| *e); + group_completion.callback(group_result.map_or(Ok(0), Err)); + } } - // TODO: remove self from parent group } result.err() @@ -1054,4 +1064,138 @@ mod tests { assert!(callback_called.load(Ordering::SeqCst)); assert!(group.get_error().is_none()); } + + #[test] + fn test_completion_group_nested() { + use std::sync::atomic::{AtomicUsize, Ordering}; + + // Track callbacks at different levels + let parent_called = Arc::new(AtomicUsize::new(0)); + let child1_called = Arc::new(AtomicUsize::new(0)); + let child2_called = Arc::new(AtomicUsize::new(0)); + + // Create child group 1 with 2 completions + let child1_called_clone = child1_called.clone(); + let mut child_group1 = CompletionGroup::new(move |_| { + child1_called_clone.fetch_add(1, Ordering::SeqCst); + }); + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + child_group1.add(&c1); + child_group1.add(&c2); + let child_group1 = child_group1.build(); + + // Create child group 2 with 2 completions + let child2_called_clone = child2_called.clone(); + let mut child_group2 = CompletionGroup::new(move |_| { + child2_called_clone.fetch_add(1, Ordering::SeqCst); + }); + let c3 = Completion::new_write(|_| {}); + let c4 = Completion::new_write(|_| {}); + child_group2.add(&c3); + child_group2.add(&c4); + let child_group2 = child_group2.build(); + + // Create parent group containing both child groups + let parent_called_clone = parent_called.clone(); + let mut parent_group = CompletionGroup::new(move |_| { + parent_called_clone.fetch_add(1, Ordering::SeqCst); + }); + parent_group.add(&child_group1); + parent_group.add(&child_group2); + let parent_group = parent_group.build(); + + // Initially nothing should be finished + assert!(!parent_group.finished()); + assert!(!child_group1.finished()); + assert!(!child_group2.finished()); + assert_eq!(parent_called.load(Ordering::SeqCst), 0); + assert_eq!(child1_called.load(Ordering::SeqCst), 0); + assert_eq!(child2_called.load(Ordering::SeqCst), 0); + + // Complete first completion in child group 1 + c1.complete(0); + assert!(!child_group1.finished()); + assert!(!parent_group.finished()); + assert_eq!(child1_called.load(Ordering::SeqCst), 0); + assert_eq!(parent_called.load(Ordering::SeqCst), 0); + + // Complete second completion in child group 1 - should finish child group 1 + c2.complete(0); + assert!(child_group1.finished()); + assert!(child_group1.succeeded()); + assert_eq!(child1_called.load(Ordering::SeqCst), 1); + + // Parent should not be finished yet because child group 2 is still pending + assert!(!parent_group.finished()); + assert_eq!(parent_called.load(Ordering::SeqCst), 0); + + // Complete first completion in child group 2 + c3.complete(0); + assert!(!child_group2.finished()); + assert!(!parent_group.finished()); + assert_eq!(child2_called.load(Ordering::SeqCst), 0); + assert_eq!(parent_called.load(Ordering::SeqCst), 0); + + // Complete second completion in child group 2 - should finish everything + c4.complete(0); + assert!(child_group2.finished()); + assert!(child_group2.succeeded()); + assert_eq!(child2_called.load(Ordering::SeqCst), 1); + + // Parent should now be finished + assert!(parent_group.finished()); + assert!(parent_group.succeeded()); + assert_eq!(parent_called.load(Ordering::SeqCst), 1); + assert!(parent_group.get_error().is_none()); + } + + #[test] + fn test_completion_group_nested_with_error() { + use std::sync::atomic::{AtomicBool, Ordering}; + + let parent_called = Arc::new(AtomicBool::new(false)); + let child_called = Arc::new(AtomicBool::new(false)); + + // Create child group with 2 completions + let child_called_clone = child_called.clone(); + let mut child_group = CompletionGroup::new(move |_| { + child_called_clone.store(true, Ordering::SeqCst); + }); + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + child_group.add(&c1); + child_group.add(&c2); + let child_group = child_group.build(); + + // Create parent group containing child group and another completion + let parent_called_clone = parent_called.clone(); + let mut parent_group = CompletionGroup::new(move |_| { + parent_called_clone.store(true, Ordering::SeqCst); + }); + let c3 = Completion::new_write(|_| {}); + parent_group.add(&child_group); + parent_group.add(&c3); + let parent_group = parent_group.build(); + + // Complete child group with success + c1.complete(0); + c2.complete(0); + assert!(child_group.finished()); + assert!(child_group.succeeded()); + assert!(child_called.load(Ordering::SeqCst)); + + // Parent still pending + assert!(!parent_group.finished()); + assert!(!parent_called.load(Ordering::SeqCst)); + + // Complete c3 with error + c3.error(CompletionError::Aborted); + + // Parent should finish with error + assert!(parent_group.finished()); + assert!(!parent_group.succeeded()); + assert_eq!(parent_group.get_error(), Some(CompletionError::Aborted)); + assert!(parent_called.load(Ordering::SeqCst)); + } } From 7ff427f9d48bfcb98d41fc9a1e6d55567a633971 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 15 Oct 2025 11:47:50 +0300 Subject: [PATCH 246/428] core/io: Fix completion group callback invocation for empty groups Spotted by @pedrocarlo. --- core/io/mod.rs | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/core/io/mod.rs b/core/io/mod.rs index bfed8f512..b571740d8 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -182,8 +182,8 @@ impl CompletionGroup { pub fn build(self) -> Completion { let total = self.completions.len(); if total == 0 { - let group_completion = GroupCompletion::new(self.callback, 0); - return Completion::new(CompletionType::Group(group_completion)); + (self.callback)(Ok(0)); + return Completion::new_yield(); } let group_completion = GroupCompletion::new(self.callback, total); let group = Completion::new(CompletionType::Group(group_completion)); @@ -759,11 +759,24 @@ mod tests { #[test] fn test_completion_group_empty() { - let group = CompletionGroup::new(|_| {}); + use std::sync::atomic::{AtomicBool, Ordering}; + + let callback_called = Arc::new(AtomicBool::new(false)); + let callback_called_clone = callback_called.clone(); + + let group = CompletionGroup::new(move |_| { + callback_called_clone.store(true, Ordering::SeqCst); + }); let group = group.build(); assert!(group.finished()); assert!(group.succeeded()); assert!(group.get_error().is_none()); + + // Verify the callback was actually called + assert!( + callback_called.load(Ordering::SeqCst), + "callback should be called for empty group" + ); } #[test] From 80876148cf9e51332f0b0b25055386a9b703e0b1 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 10:42:58 +0300 Subject: [PATCH 247/428] core/io: Add cancel() method to CompletionGroup --- core/io/mod.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/io/mod.rs b/core/io/mod.rs index b571740d8..e98192f9f 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -179,6 +179,12 @@ impl CompletionGroup { self.completions.push(completion.clone()); } + pub fn cancel(&self) { + for c in &self.completions { + c.abort(); + } + } + pub fn build(self) -> Completion { let total = self.completions.len(); if total == 0 { From 07ba7276b2f23f8f0ee71f1fe5b05da39ce1a24b Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 10:38:20 +0300 Subject: [PATCH 248/428] core/vdbe/sorter: Replace io_yield_many with completion groups --- core/vdbe/sorter.rs | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index ac7e07ed4..0e9eec00b 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -9,14 +9,14 @@ use tempfile; use crate::types::IOCompletions; use crate::{ error::LimboError, - io::{Buffer, Completion, File, OpenFlags, IO}, + io::{Buffer, Completion, CompletionGroup, File, OpenFlags, IO}, storage::sqlite3_ondisk::{read_varint, varint_len, write_varint}, translate::collate::CollationSeq, turso_assert, types::{IOResult, ImmutableRecord, KeyInfo, RecordCursor, ValueRef}, Result, }; -use crate::{io_yield_many, io_yield_one, return_if_io, CompletionError}; +use crate::{io_yield_one, return_if_io, CompletionError}; #[derive(Debug, Clone, Copy)] enum SortState { @@ -252,20 +252,21 @@ impl Sorter { fn init_chunk_heap(&mut self) -> Result> { match self.init_chunk_heap_state { InitChunkHeapState::Start => { - let mut completions: Vec = Vec::with_capacity(self.chunks.len()); + let mut group = CompletionGroup::new(|_| {}); for chunk in self.chunks.iter_mut() { match chunk.read() { Err(e) => { tracing::error!("Failed to read chunk: {e}"); - self.io.cancel(&completions)?; + group.cancel(); self.io.drain()?; return Err(e); } - Ok(c) => completions.push(c), + Ok(c) => group.add(&c), }; } self.init_chunk_heap_state = InitChunkHeapState::PushChunk; - io_yield_many!(completions); + let completion = group.build(); + io_yield_one!(completion); } InitChunkHeapState::PushChunk => { // Make sure all chunks read at least one record into their buffer. @@ -278,17 +279,19 @@ impl Sorter { ); self.chunk_heap.reserve(self.chunks.len()); // TODO: blocking will be unnecessary here with IO completions - let mut completions = vec![]; + let mut group = CompletionGroup::new(|_| {}); for chunk_idx in 0..self.chunks.len() { if let Some(c) = self.push_to_chunk_heap(chunk_idx)? { - completions.push(c); + group.add(&c); }; } self.init_chunk_heap_state = InitChunkHeapState::Start; - if !completions.is_empty() { - io_yield_many!(completions); + let completion = group.build(); + if completion.finished() { + Ok(IOResult::Done(())) + } else { + io_yield_one!(completion); } - Ok(IOResult::Done(())) } } } From 986faa42da65ddb644f6366c9cc0cb2a46b7b3b3 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 10:47:41 +0300 Subject: [PATCH 249/428] core/storage/pager: Replace io_yield_many with completion groups --- core/storage/pager.rs | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 021071134..258188c03 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -10,11 +10,11 @@ use crate::storage::{ }; use crate::types::{IOCompletions, WalState}; use crate::util::IOExt as _; -use crate::{io_yield_many, io_yield_one, IOContext}; use crate::{ - return_if_io, turso_assert, types::WalFrameInfo, Completion, Connection, IOResult, LimboError, - Result, TransactionState, + io::CompletionGroup, return_if_io, turso_assert, types::WalFrameInfo, Completion, Connection, + IOResult, LimboError, Result, TransactionState, }; +use crate::{io_yield_one, IOContext}; use parking_lot::RwLock; use std::cell::{RefCell, UnsafeCell}; use std::collections::HashSet; @@ -1627,7 +1627,7 @@ impl Pager { CommitState::Checkpoint => { match self.checkpoint()? { IOResult::IO(cmp) => { - let completions = { + let completion = { let mut commit_info = self.commit_info.write(); match cmp { IOCompletions::Single(c) => { @@ -1637,10 +1637,14 @@ impl Pager { commit_info.completions.extend(c); } } - std::mem::take(&mut commit_info.completions) + let mut group = CompletionGroup::new(|_| {}); + for c in commit_info.completions.drain(..) { + group.add(&c); + } + group.build() }; // TODO: remove serialization of checkpoint path - io_yield_many!(completions); + io_yield_one!(completion); } IOResult::Done(res) => { let mut commit_info = self.commit_info.write(); @@ -1679,21 +1683,25 @@ impl Pager { .unwrap() .as_millis() ); - let (should_finish, result, completions) = { + let (should_finish, result, completion) = { let mut commit_info = self.commit_info.write(); if commit_info.completions.iter().all(|c| c.succeeded()) { commit_info.completions.clear(); commit_info.state = CommitState::PrepareWal; - (true, commit_info.result.take(), Vec::new()) + (true, commit_info.result.take(), Completion::new_yield()) } else { - (false, None, std::mem::take(&mut commit_info.completions)) + let mut group = CompletionGroup::new(|_| {}); + for c in commit_info.completions.drain(..) { + group.add(&c); + } + (false, None, group.build()) } }; if should_finish { wal.borrow_mut().finish_append_frames_commit()?; return Ok(IOResult::Done(result.expect("commit result should be set"))); } - io_yield_many!(completions); + io_yield_one!(completion); } } } From 840d6a0df535eb089af370bf6ad9303ab3a8a1d0 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 10:52:35 +0300 Subject: [PATCH 250/428] core/storage/btree: Replace io_yield_many with completion group in B-Tree --- core/storage/btree.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 633c97599..23860a4d6 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -1,7 +1,8 @@ use tracing::{instrument, Level}; use crate::{ - io_yield_many, io_yield_one, + io::CompletionGroup, + io_yield_one, schema::Index, storage::{ pager::{BtreePageAllocMode, Pager}, @@ -2858,12 +2859,12 @@ impl BTreeCursor { // start loading right page first let mut pgno: u32 = unsafe { right_pointer.cast::().read().swap_bytes() }; let current_sibling = sibling_pointer; - let mut completions: Vec = Vec::with_capacity(current_sibling + 1); + let mut group = CompletionGroup::new(|_| {}); for i in (0..=current_sibling).rev() { match btree_read_page(&self.pager, pgno as i64) { Err(e) => { tracing::error!("error reading page {}: {}", pgno, e); - self.pager.io.cancel(&completions)?; + group.cancel(); self.pager.io.drain()?; return Err(e); } @@ -2872,7 +2873,7 @@ impl BTreeCursor { self.pager.add_dirty(&page); pages_to_balance[i].replace(page); if let Some(c) = c { - completions.push(c); + group.add(&c); } } } @@ -2939,8 +2940,9 @@ impl BTreeCursor { first_divider_cell: first_cell_divider, }); *sub_state = BalanceSubState::NonRootDoBalancing; - if !completions.is_empty() { - io_yield_many!(completions); + let completion = group.build(); + if !completion.finished() { + io_yield_one!(completion); } } BalanceSubState::NonRootDoBalancing => { From ff7f4629812cfd651c5d01694346b9adfb5b512e Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 10:57:30 +0300 Subject: [PATCH 251/428] core/vdbe/sorter: Replace IOCompletions::Many with completion group --- core/vdbe/sorter.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index 0e9eec00b..4f1f190f6 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -298,9 +298,11 @@ impl Sorter { fn next_from_chunk_heap(&mut self) -> Result>> { if !self.pending_completions.is_empty() { - return Ok(IOResult::IO(IOCompletions::Many( - self.pending_completions.drain(..).collect(), - ))); + let mut group = CompletionGroup::new(|_| {}); + for c in self.pending_completions.drain(..) { + group.add(&c); + } + return Ok(IOResult::IO(IOCompletions::Single(group.build()))); } // Make sure all chunks read at least one record into their buffer. if let Some((next_record, next_chunk_idx)) = self.chunk_heap.pop() { From 5fb93b8780cb47ed9014475d12b6973c0c8c3de4 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 10:58:20 +0300 Subject: [PATCH 252/428] core: Kill io_yield_many macro --- core/util.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/core/util.rs b/core/util.rs index ea4fca470..1093c61ba 100644 --- a/core/util.rs +++ b/core/util.rs @@ -29,12 +29,6 @@ macro_rules! io_yield_one { return Ok(IOResult::IO(IOCompletions::Single($c))); }; } -#[macro_export] -macro_rules! io_yield_many { - ($v:expr) => { - return Ok(IOResult::IO(IOCompletions::Many($v))); - }; -} #[macro_export] macro_rules! eq_ignore_ascii_case { From af3a90bf4b2bccd3637635d84d6dcf9861d2e070 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 13 Oct 2025 10:58:52 +0300 Subject: [PATCH 253/428] core: Kill Many variant from IOCompletions enum --- core/storage/pager.rs | 3 --- core/types.rs | 17 ----------------- 2 files changed, 20 deletions(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 258188c03..da22387cf 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1633,9 +1633,6 @@ impl Pager { IOCompletions::Single(c) => { commit_info.completions.push(c); } - IOCompletions::Many(c) => { - commit_info.completions.extend(c); - } } let mut group = CompletionGroup::new(|_| {}); for c in commit_info.completions.drain(..) { diff --git a/core/types.rs b/core/types.rs index bfcbb004e..00f89118f 100644 --- a/core/types.rs +++ b/core/types.rs @@ -2349,7 +2349,6 @@ impl Cursor { #[must_use] pub enum IOCompletions { Single(Completion), - Many(Vec), } impl IOCompletions { @@ -2357,26 +2356,12 @@ impl IOCompletions { pub fn wait(self, io: &I) -> Result<()> { match self { IOCompletions::Single(c) => io.wait_for_completion(c), - IOCompletions::Many(completions) => { - let mut completions = completions.into_iter(); - while let Some(c) = completions.next() { - let res = io.wait_for_completion(c); - if res.is_err() { - for c in completions { - c.abort(); - } - return res; - } - } - Ok(()) - } } } pub fn finished(&self) -> bool { match self { IOCompletions::Single(c) => c.finished(), - IOCompletions::Many(completions) => completions.iter().all(|c| c.finished()), } } @@ -2384,14 +2369,12 @@ impl IOCompletions { pub fn abort(&self) { match self { IOCompletions::Single(c) => c.abort(), - IOCompletions::Many(completions) => completions.iter().for_each(|c| c.abort()), } } pub fn get_error(&self) -> Option { match self { IOCompletions::Single(c) => c.get_error(), - IOCompletions::Many(completions) => completions.iter().find_map(|c| c.get_error()), } } } From d7a719418e38fe0ce08ae0b331183039cdeba921 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Wed, 15 Oct 2025 15:15:55 +0300 Subject: [PATCH 254/428] Fix: outer CTEs should be available in subqueries --- core/translate/planner.rs | 17 ++++++++++++++++- testing/subquery.test | 14 ++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/core/translate/planner.rs b/core/translate/planner.rs index 0cf355c01..08936abf3 100644 --- a/core/translate/planner.rs +++ b/core/translate/planner.rs @@ -317,11 +317,26 @@ fn parse_from_clause_table( ) } ast::SelectTable::Select(subselect, maybe_alias) => { + let outer_query_refs_for_subquery = table_references + .outer_query_refs() + .iter() + .cloned() + .chain( + ctes.iter() + .cloned() + .map(|t: JoinedTable| OuterQueryReference { + identifier: t.identifier, + internal_id: t.internal_id, + table: t.table, + col_used_mask: ColumnUsedMask::default(), + }), + ) + .collect::>(); let Plan::Select(subplan) = prepare_select_plan( subselect, resolver, program, - table_references.outer_query_refs(), + &outer_query_refs_for_subquery, QueryDestination::placeholder_for_subquery(), connection, )? diff --git a/testing/subquery.test b/testing/subquery.test index 98ecec001..3a909df34 100644 --- a/testing/subquery.test +++ b/testing/subquery.test @@ -433,3 +433,17 @@ do_execsql_test subquery-count-all { where u.id < 100 ); } {1089} + +do_execsql_test_on_specific_db {:memory:} subquery-cte-available-in-arbitrary-depth { + with cte as (select 1 as one) + select onehundredandeleven+1 as onehundredandtwelve + from ( + with cte2 as (select 10 as ten) + select onehundredandone+ten as onehundredandeleven + from ( + with cte3 as (select 100 as hundred) + select one+hundred as onehundredandone + from cte join cte3 + ) join cte2 + ); +} {112} \ No newline at end of file From 8e107ab18e5197e2d4d4ffcc7e0774d81e59c6e9 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 15 Oct 2025 14:53:36 +0400 Subject: [PATCH 255/428] slight reorder of operations --- core/storage/btree.rs | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 0b7489530..909bb5de3 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -5773,30 +5773,26 @@ impl CursorTrait for BTreeCursor { first_overflow_page, .. }) => (payload, payload_size, first_overflow_page), + BTreeCell::IndexLeafCell(IndexLeafCell { + payload, + payload_size, + first_overflow_page, + }) => (payload, payload_size, first_overflow_page), BTreeCell::IndexInteriorCell(IndexInteriorCell { payload, payload_size, first_overflow_page, .. }) => (payload, payload_size, first_overflow_page), - BTreeCell::IndexLeafCell(IndexLeafCell { - payload, - first_overflow_page, - payload_size, - }) => (payload, payload_size, first_overflow_page), _ => unreachable!("unexpected page_type"), }; if let Some(next_page) = first_overflow_page { return_if_io!(self.process_overflow_read(payload, next_page, payload_size)) } else { - self.get_immutable_record_or_create() - .as_mut() - .unwrap() - .invalidate(); - self.get_immutable_record_or_create() - .as_mut() - .unwrap() - .start_serialization(payload); + let mut record = self.get_immutable_record_or_create(); + let record = record.as_mut().unwrap(); + record.invalidate(); + record.start_serialization(payload); self.record_cursor.borrow_mut().invalidate(); }; From ae8adc044958145dfc3f1224c55f1b1b3be45422 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 15 Oct 2025 14:53:53 +0400 Subject: [PATCH 256/428] faster extend_from_slice --- core/types.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/core/types.rs b/core/types.rs index bfcbb004e..f61d7f371 100644 --- a/core/types.rs +++ b/core/types.rs @@ -1060,7 +1060,15 @@ impl ImmutableRecord { } pub fn start_serialization(&mut self, payload: &[u8]) { - self.as_blob_mut().extend_from_slice(payload); + let blob = self.as_blob_mut(); + blob.reserve(payload.len()); + + let len = blob.len(); + unsafe { + let dst = blob.as_mut_ptr().add(len); + std::ptr::copy_nonoverlapping(payload.as_ptr(), dst, payload.len()); + blob.set_len(len + payload.len()); + } } pub fn invalidate(&mut self) { From dba195bdfa331ab3e3533031f96c1632517d24eb Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 15 Oct 2025 15:43:37 +0400 Subject: [PATCH 257/428] avoid allocations --- core/types.rs | 34 +++++++++++++++++++++++----------- core/vdbe/execute.rs | 23 +++++++---------------- core/vdbe/mod.rs | 12 ++++++------ 3 files changed, 36 insertions(+), 33 deletions(-) diff --git a/core/types.rs b/core/types.rs index f61d7f371..bd0f8ff06 100644 --- a/core/types.rs +++ b/core/types.rs @@ -218,6 +218,12 @@ pub enum ValueRef<'a> { Blob(&'a [u8]), } +impl<'a, 'b> From<&'b ValueRef<'a>> for ValueRef<'a> { + fn from(value: &'b ValueRef<'a>) -> Self { + *value + } +} + impl Debug for ValueRef<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -1813,12 +1819,15 @@ fn compare_records_int( /// 4. **Length comparison**: If strings are equal, compares lengths /// 5. **Remaining fields**: If first field is equal and more fields exist, /// delegates to `compare_records_generic()` with `skip=1` -fn compare_records_string( +fn compare_records_string<'a, T>( serialized: &ImmutableRecord, - unpacked: &[ValueRef], + unpacked: &'a [T], index_info: &IndexInfo, tie_breaker: std::cmp::Ordering, -) -> Result { +) -> Result +where + ValueRef<'a>: From<&'a T>, +{ turso_assert!( index_info.key_info.len() >= unpacked.len(), "index_info.key_info.len() < unpacked.len()" @@ -1846,7 +1855,7 @@ fn compare_records_string( return compare_records_generic(serialized, unpacked, index_info, 0, tie_breaker); } - let ValueRef::Text(rhs_text, _) = &unpacked[0] else { + let ValueRef::Text(rhs_text, _) = (&unpacked[0]).into() else { return compare_records_generic(serialized, unpacked, index_info, 0, tie_breaker); }; @@ -1925,13 +1934,16 @@ fn compare_records_string( /// The serialized and unpacked records do not have to contain the same number /// of fields. If all fields that appear in both records are equal, then /// `tie_breaker` is returned. -pub fn compare_records_generic( +pub fn compare_records_generic<'a, T>( serialized: &ImmutableRecord, - unpacked: &[ValueRef], + unpacked: &'a [T], index_info: &IndexInfo, skip: usize, tie_breaker: std::cmp::Ordering, -) -> Result { +) -> Result +where + ValueRef<'a>: From<&'a T>, +{ turso_assert!( index_info.key_info.len() >= unpacked.len(), "index_info.key_info.len() < unpacked.len()" @@ -1971,7 +1983,7 @@ pub fn compare_records_generic( header_pos += bytes_read; let serial_type = SerialType::try_from(serial_type_raw)?; - let rhs_value = &unpacked[field_idx]; + let rhs_value = (&unpacked[field_idx]).into(); let lhs_value = match serial_type.kind() { SerialTypeKind::ConstInt0 => ValueRef::Integer(0), @@ -1993,14 +2005,14 @@ pub fn compare_records_generic( } (ValueRef::Integer(lhs_int), ValueRef::Float(rhs_float)) => { - sqlite_int_float_compare(*lhs_int, *rhs_float) + sqlite_int_float_compare(*lhs_int, rhs_float) } (ValueRef::Float(lhs_float), ValueRef::Integer(rhs_int)) => { - sqlite_int_float_compare(*rhs_int, *lhs_float).reverse() + sqlite_int_float_compare(rhs_int, *lhs_float).reverse() } - _ => lhs_value.partial_cmp(rhs_value).unwrap(), + _ => lhs_value.partial_cmp(&rhs_value).unwrap(), }; let final_comparison = match index_info.key_info[field_idx].sort_order { diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index e56bd4fdb..fa0878c64 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -19,7 +19,7 @@ use crate::types::{ }; use crate::util::normalize_ident; use crate::vdbe::insn::InsertFlags; -use crate::vdbe::{registers_to_ref_values, TxnCleanup}; +use crate::vdbe::TxnCleanup; use crate::vector::{vector32_sparse, vector_concat, vector_distance_jaccard, vector_slice}; use crate::{ error::{ @@ -3405,13 +3405,11 @@ pub fn op_idx_ge( let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { // Create the comparison record from registers - let values = - registers_to_ref_values(&state.registers[*start_reg..*start_reg + *num_regs]); let tie_breaker = get_tie_breaker_from_idx_comp_op(insn); let ord = compare_records_generic( - &idx_record, // The serialized record from the index - &values, // The record built from registers - cursor.get_index_info(), // Sort order flags + &idx_record, // The serialized record from the index + &state.registers[*start_reg..*start_reg + *num_regs], // The record built from registers + cursor.get_index_info(), // Sort order flags 0, tie_breaker, )?; @@ -3473,12 +3471,10 @@ pub fn op_idx_le( let cursor = cursor.as_btree_mut(); let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { - let values = - registers_to_ref_values(&state.registers[*start_reg..*start_reg + *num_regs]); let tie_breaker = get_tie_breaker_from_idx_comp_op(insn); let ord = compare_records_generic( &idx_record, - &values, + &state.registers[*start_reg..*start_reg + *num_regs], cursor.get_index_info(), 0, tie_breaker, @@ -3524,12 +3520,10 @@ pub fn op_idx_gt( let cursor = cursor.as_btree_mut(); let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { - let values = - registers_to_ref_values(&state.registers[*start_reg..*start_reg + *num_regs]); let tie_breaker = get_tie_breaker_from_idx_comp_op(insn); let ord = compare_records_generic( &idx_record, - &values, + &state.registers[*start_reg..*start_reg + *num_regs], cursor.get_index_info(), 0, tie_breaker, @@ -3575,13 +3569,10 @@ pub fn op_idx_lt( let cursor = cursor.as_btree_mut(); let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { - let values = - registers_to_ref_values(&state.registers[*start_reg..*start_reg + *num_regs]); - let tie_breaker = get_tie_breaker_from_idx_comp_op(insn); let ord = compare_records_generic( &idx_record, - &values, + &state.registers[*start_reg..*start_reg + *num_regs], cursor.get_index_info(), 0, tie_breaker, diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index bcb4372d5..a7b5cbd46 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -250,6 +250,12 @@ pub enum Register { Record(ImmutableRecord), } +impl<'a> From<&'a Register> for ValueRef<'a> { + fn from(value: &'a Register) -> Self { + value.get_value().as_ref() + } +} + impl Register { #[inline] pub fn is_null(&self) -> bool { @@ -1002,12 +1008,6 @@ fn make_record(registers: &[Register], start_reg: &usize, count: &usize) -> Immu ImmutableRecord::from_registers(regs, regs.len()) } -pub fn registers_to_ref_values<'a>(registers: &'a [Register]) -> Vec> { - registers - .iter() - .map(|reg| reg.get_value().as_ref()) - .collect() -} #[instrument(skip(program), level = Level::DEBUG)] fn trace_insn(program: &Program, addr: InsnReference, insn: &Insn) { From a6a5ffd8212495fee5369a716b26eba18780a62f Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 15 Oct 2025 15:44:25 +0400 Subject: [PATCH 258/428] move read_varint_fast closer to the read_varint impl --- core/storage/sqlite3_ondisk.rs | 38 ++++++++++++++++++++++++++++++++ core/vdbe/execute.rs | 40 +--------------------------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index 08f7addc3..f7e440fda 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -1486,6 +1486,44 @@ pub fn read_integer(buf: &[u8], serial_type: u8) -> Result { } } +/// Fast varint reader optimized for the common cases of 1-byte and 2-byte varints. +/// +/// This function is a performance-optimized version of `read_varint()` that handles +/// the most common varint cases inline before falling back to the full implementation. +/// It follows the same varint encoding as SQLite. +/// +/// # Optimized Cases +/// +/// - **Single-byte case**: Values 0-127 (0x00-0x7F) are returned immediately +/// - **Two-byte case**: Values 128-16383 (0x80-0x3FFF) are handled inline +/// - **Multi-byte case**: Larger values fall back to the full `read_varint()` implementation +/// +/// This function is similar to `sqlite3GetVarint32` +#[inline(always)] +pub fn read_varint_fast(buf: &[u8]) -> Result<(u64, usize)> { + // Fast path: Single-byte varint + if let Some(&first_byte) = buf.first() { + if first_byte & 0x80 == 0 { + return Ok((first_byte as u64, 1)); + } + } else { + crate::bail_corrupt_error!("Invalid varint"); + } + + // Fast path: Two-byte varint + if let Some(&second_byte) = buf.get(1) { + if second_byte & 0x80 == 0 { + let v = (((buf[0] & 0x7f) as u64) << 7) + (second_byte as u64); + return Ok((v, 2)); + } + } else { + crate::bail_corrupt_error!("Invalid varint"); + } + + //Fallback: Multi-byte varint + read_varint(buf) +} + #[inline(always)] pub fn read_varint(buf: &[u8]) -> Result<(u64, usize)> { let mut v: u64 = 0; diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index fa0878c64..4677c48e7 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -11,7 +11,7 @@ use crate::storage::btree::{ use crate::storage::database::DatabaseFile; use crate::storage::page_cache::PageCache; use crate::storage::pager::{AtomicDbState, CreateBTreeFlags, DbState}; -use crate::storage::sqlite3_ondisk::{read_varint, DatabaseHeader, PageSize}; +use crate::storage::sqlite3_ondisk::{read_varint_fast, DatabaseHeader, PageSize}; use crate::translate::collate::CollationSeq; use crate::types::{ compare_immutable, compare_records_generic, Extendable, IOCompletions, ImmutableRecord, @@ -1469,44 +1469,6 @@ pub fn op_last( Ok(InsnFunctionStepResult::Step) } -/// Fast varint reader optimized for the common cases of 1-byte and 2-byte varints. -/// -/// This function is a performance-optimized version of `read_varint()` that handles -/// the most common varint cases inline before falling back to the full implementation. -/// It follows the same varint encoding as SQLite. -/// -/// # Optimized Cases -/// -/// - **Single-byte case**: Values 0-127 (0x00-0x7F) are returned immediately -/// - **Two-byte case**: Values 128-16383 (0x80-0x3FFF) are handled inline -/// - **Multi-byte case**: Larger values fall back to the full `read_varint()` implementation -/// -/// This function is similar to `sqlite3GetVarint32` -#[inline(always)] -fn read_varint_fast(buf: &[u8]) -> Result<(u64, usize)> { - // Fast path: Single-byte varint - if let Some(&first_byte) = buf.first() { - if first_byte & 0x80 == 0 { - return Ok((first_byte as u64, 1)); - } - } else { - crate::bail_corrupt_error!("Invalid varint"); - } - - // Fast path: Two-byte varint - if let Some(&second_byte) = buf.get(1) { - if second_byte & 0x80 == 0 { - let v = (((buf[0] & 0x7f) as u64) << 7) + (second_byte as u64); - return Ok((v, 2)); - } - } else { - crate::bail_corrupt_error!("Invalid varint"); - } - - //Fallback: Multi-byte varint - read_varint(buf) -} - #[derive(Debug, Clone, Copy)] pub enum OpColumnState { Start, From f19c73822ee9afbb9c859fe6b7f3f9e0ee3bc576 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 15 Oct 2025 17:26:23 +0400 Subject: [PATCH 259/428] simplify serial_type size calculation --- core/vdbe/execute.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 4677c48e7..46cc68d79 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -1642,17 +1642,14 @@ pub fn op_column( 8 => 0, // CONST_INT1 9 => 0, - // BLOB - n if n >= 12 && n & 1 == 0 => (n - 12) >> 1, - // TEXT - n if n >= 13 && n & 1 == 1 => (n - 13) >> 1, // Reserved 10 | 11 => { return Err(LimboError::Corrupt(format!( "Reserved serial type: {serial_type}" ))) } - _ => unreachable!("Invalid serial type: {serial_type}"), + // BLOB or TEXT + n => (n - 12) / 2, } as usize; data_offset += data_size; record_cursor.offsets.push(data_offset); From c0fdaeb4755fa49b8fab928a566cd536b476c55f Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 15 Oct 2025 17:26:35 +0400 Subject: [PATCH 260/428] move more possible option higher --- core/vdbe/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index a7b5cbd46..2d60d854f 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -133,8 +133,8 @@ impl BranchOffset { /// Returns the offset value. Panics if the branch offset is a label or placeholder. pub fn as_offset_int(&self) -> InsnReference { match self { - BranchOffset::Label(v) => unreachable!("Unresolved label: {}", v), BranchOffset::Offset(v) => *v, + BranchOffset::Label(v) => unreachable!("Unresolved label: {}", v), BranchOffset::Placeholder => unreachable!("Unresolved placeholder"), } } From 68650cf594799d4c5a97de3670bfe157b5229698 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 15 Oct 2025 17:25:57 +0400 Subject: [PATCH 261/428] alternative read_variant implementation - it faster in benchmark (who knows why) - also seems bit faster for some my query - let's test on CI --- core/storage/sqlite3_ondisk.rs | 85 +++++++++++----------------------- core/vdbe/execute.rs | 7 ++- 2 files changed, 30 insertions(+), 62 deletions(-) diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index f7e440fda..8e82ffa13 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -1486,71 +1486,40 @@ pub fn read_integer(buf: &[u8], serial_type: u8) -> Result { } } -/// Fast varint reader optimized for the common cases of 1-byte and 2-byte varints. -/// -/// This function is a performance-optimized version of `read_varint()` that handles -/// the most common varint cases inline before falling back to the full implementation. -/// It follows the same varint encoding as SQLite. -/// -/// # Optimized Cases -/// -/// - **Single-byte case**: Values 0-127 (0x00-0x7F) are returned immediately -/// - **Two-byte case**: Values 128-16383 (0x80-0x3FFF) are handled inline -/// - **Multi-byte case**: Larger values fall back to the full `read_varint()` implementation -/// -/// This function is similar to `sqlite3GetVarint32` -#[inline(always)] -pub fn read_varint_fast(buf: &[u8]) -> Result<(u64, usize)> { - // Fast path: Single-byte varint - if let Some(&first_byte) = buf.first() { - if first_byte & 0x80 == 0 { - return Ok((first_byte as u64, 1)); - } - } else { - crate::bail_corrupt_error!("Invalid varint"); - } - - // Fast path: Two-byte varint - if let Some(&second_byte) = buf.get(1) { - if second_byte & 0x80 == 0 { - let v = (((buf[0] & 0x7f) as u64) << 7) + (second_byte as u64); - return Ok((v, 2)); - } - } else { - crate::bail_corrupt_error!("Invalid varint"); - } - - //Fallback: Multi-byte varint - read_varint(buf) -} - #[inline(always)] pub fn read_varint(buf: &[u8]) -> Result<(u64, usize)> { let mut v: u64 = 0; - for i in 0..8 { - match buf.get(i) { - Some(c) => { - v = (v << 7) + (c & 0x7f) as u64; - if (c & 0x80) == 0 { - return Ok((v, i + 1)); - } - } - None => { - crate::bail_corrupt_error!("Invalid varint"); - } + let mut i = 0; + let chunks = buf.chunks_exact(2); + for chunk in chunks { + let c1 = chunk[0]; + v = (v << 7) + (c1 & 0x7f) as u64; + i += 1; + if (c1 & 0x80) == 0 { + return Ok((v, i)); + } + let c2 = chunk[1]; + v = (v << 7) + (c2 & 0x7f) as u64; + i += 1; + if (c2 & 0x80) == 0 { + return Ok((v, i)); + } + if i == 8 { + break; } } - match buf.get(8) { + match buf.get(i) { Some(&c) => { - // Values requiring 9 bytes must have non-zero in the top 8 bits (value >= 1<<56). - // Since the final value is `(v<<8) + c`, the top 8 bits (v >> 48) must not be 0. - // If those are zero, this should be treated as corrupt. - // Perf? the comparison + branching happens only in parsing 9-byte varint which is rare. - if (v >> 48) == 0 { - bail_corrupt_error!("Invalid varint"); + if i < 8 && (c & 0x80) == 0 { + return Ok(((v << 7) + c as u64, i + 1)); + } else if i == 8 && (v >> 48) > 0 { + // Values requiring 9 bytes must have non-zero in the top 8 bits (value >= 1<<56). + // Since the final value is `(v<<8) + c`, the top 8 bits (v >> 48) must not be 0. + // If those are zero, this should be treated as corrupt. + // Perf? the comparison + branching happens only in parsing 9-byte varint which is rare. + return Ok(((v << 8) + c as u64, i + 1)); } - v = (v << 8) + c as u64; - Ok((v, 9)) + bail_corrupt_error!("Invalid varint"); } None => { bail_corrupt_error!("Invalid varint"); diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 46cc68d79..55c37e0a9 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -11,7 +11,7 @@ use crate::storage::btree::{ use crate::storage::database::DatabaseFile; use crate::storage::page_cache::PageCache; use crate::storage::pager::{AtomicDbState, CreateBTreeFlags, DbState}; -use crate::storage::sqlite3_ondisk::{read_varint_fast, DatabaseHeader, PageSize}; +use crate::storage::sqlite3_ondisk::{read_varint, DatabaseHeader, PageSize}; use crate::translate::collate::CollationSeq; use crate::types::{ compare_immutable, compare_records_generic, Extendable, IOCompletions, ImmutableRecord, @@ -1595,7 +1595,7 @@ pub fn op_column( let mut record_cursor = cursor.record_cursor_mut(); if record_cursor.offsets.is_empty() { - let (header_size, header_len_bytes) = read_varint_fast(payload)?; + let (header_size, header_len_bytes) = read_varint(payload)?; let header_size = header_size as usize; debug_assert!(header_size <= payload.len() && header_size <= 98307, "header_size: {header_size}, header_len_bytes: {header_len_bytes}, payload.len(): {}", payload.len()); @@ -1617,8 +1617,7 @@ pub fn op_column( while record_cursor.serial_types.len() <= target_column && parse_pos < record_cursor.header_size { - let (serial_type, varint_len) = - read_varint_fast(&payload[parse_pos..])?; + let (serial_type, varint_len) = read_varint(&payload[parse_pos..])?; record_cursor.serial_types.push(serial_type); parse_pos += varint_len; let data_size = match serial_type { From 7c919314a99778f11f842454ee70bb3cd29e82ef Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 14 Oct 2025 12:27:57 +0400 Subject: [PATCH 262/428] use heap-sort style algorithm for order by ... limit k queries --- core/translate/emitter.rs | 1 + core/translate/order_by.rs | 246 +++++++++++++++++++++++++++---------- 2 files changed, 184 insertions(+), 63 deletions(-) diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 2dabd4b82..5ae59fc57 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -304,6 +304,7 @@ pub fn emit_query<'a>( &plan.order_by, &plan.table_references, plan.group_by.is_some(), + plan.distinctness != Distinctness::NonDistinct, &plan.aggregates, )?; } diff --git a/core/translate/order_by.rs b/core/translate/order_by.rs index aafb32a21..09de9c3d3 100644 --- a/core/translate/order_by.rs +++ b/core/translate/order_by.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use turso_parser::ast::{self, SortOrder}; use crate::{ emit_explain, - schema::PseudoCursorType, + schema::{Index, IndexColumn, PseudoCursorType}, translate::{ collate::{get_collseq_from_expr, CollationSeq}, group_by::is_orderby_agg_or_const, @@ -11,7 +13,7 @@ use crate::{ util::exprs_are_equivalent, vdbe::{ builder::{CursorType, ProgramBuilder}, - insn::Insn, + insn::{IdxInsertFlags, Insn}, }, QueryMode, Result, }; @@ -39,6 +41,8 @@ pub struct SortMetadata { /// aggregates/constants, so that rows that tie on ORDER BY terms are output in /// the same relative order the underlying row stream produced them. pub has_sequence: bool, + /// Whether to use heap-sort with BTreeIndex instead of full-collection sort through Sorter + pub use_heap_sort: bool, } /// Initialize resources needed for ORDER BY processing @@ -49,54 +53,106 @@ pub fn init_order_by( order_by: &[(Box, SortOrder)], referenced_tables: &TableReferences, has_group_by: bool, + has_distinct: bool, aggregates: &[Aggregate], ) -> Result<()> { - let sort_cursor = program.alloc_cursor_id(CursorType::Sorter); let only_aggs = order_by .iter() .all(|(e, _)| is_orderby_agg_or_const(&t_ctx.resolver, e, aggregates)); // only emit sequence column if we have GROUP BY and ORDER BY is not only aggregates or constants let has_sequence = has_group_by && !only_aggs; + + let use_heap_sort = !has_distinct && !has_group_by && t_ctx.limit_ctx.is_some(); + if use_heap_sort { + assert!(!has_sequence); + } + let remappings = order_by_deduplicate_result_columns(order_by, result_columns, has_sequence); + let sort_cursor = if use_heap_sort { + let index_name = format!("heap_sort_{}", program.offset().as_offset_int()); // we don't really care about the name that much, just enough that we don't get name collisions + let mut index_columns = Vec::with_capacity(order_by.len() + result_columns.len()); + for (column, order) in order_by { + let collation = get_collseq_from_expr(column, referenced_tables)?; + let pos_in_table = index_columns.len(); + index_columns.push(IndexColumn { + name: pos_in_table.to_string(), + order: *order, + pos_in_table, + collation, + default: None, + }) + } + for _ in remappings.iter().filter(|r| !r.deduplicated) { + let pos_in_table = index_columns.len(); + index_columns.push(IndexColumn { + name: pos_in_table.to_string(), + order: SortOrder::Asc, + pos_in_table, + collation: None, + default: None, + }) + } + let index = Arc::new(Index { + name: index_name.clone(), + table_name: String::new(), + ephemeral: true, + root_page: 0, + columns: index_columns, + unique: false, + has_rowid: false, + where_clause: None, + }); + program.alloc_cursor_id(CursorType::BTreeIndex(index)) + } else { + program.alloc_cursor_id(CursorType::Sorter) + }; t_ctx.meta_sort = Some(SortMetadata { sort_cursor, reg_sorter_data: program.alloc_register(), - remappings: order_by_deduplicate_result_columns(order_by, result_columns, has_sequence), + remappings, has_sequence, + use_heap_sort, }); - /* - * Terms of the ORDER BY clause that is part of a SELECT statement may be assigned a collating sequence using the COLLATE operator, - * in which case the specified collating function is used for sorting. - * Otherwise, if the expression sorted by an ORDER BY clause is a column, - * then the collating sequence of the column is used to determine sort order. - * If the expression is not a column and has no COLLATE clause, then the BINARY collating sequence is used. - */ - let mut collations = order_by - .iter() - .map(|(expr, _)| get_collseq_from_expr(expr, referenced_tables)) - .collect::>>()?; + if use_heap_sort { + program.emit_insn(Insn::OpenEphemeral { + cursor_id: sort_cursor, + is_table: false, + }); + } else { + /* + * Terms of the ORDER BY clause that is part of a SELECT statement may be assigned a collating sequence using the COLLATE operator, + * in which case the specified collating function is used for sorting. + * Otherwise, if the expression sorted by an ORDER BY clause is a column, + * then the collating sequence of the column is used to determine sort order. + * If the expression is not a column and has no COLLATE clause, then the BINARY collating sequence is used. + */ + let mut collations = order_by + .iter() + .map(|(expr, _)| get_collseq_from_expr(expr, referenced_tables)) + .collect::>>()?; - if has_sequence { - // sequence column uses BINARY collation - collations.push(Some(CollationSeq::default())); + if has_sequence { + // sequence column uses BINARY collation + collations.push(Some(CollationSeq::default())); + } + + let key_len = order_by.len() + if has_sequence { 1 } else { 0 }; + + program.emit_insn(Insn::SorterOpen { + cursor_id: sort_cursor, + columns: key_len, + order: { + let mut ord: Vec = order_by.iter().map(|(_, d)| *d).collect(); + if has_sequence { + // sequence is ascending tiebreaker + ord.push(SortOrder::Asc); + } + ord + }, + collations, + }); } - - let key_len = order_by.len() + if has_sequence { 1 } else { 0 }; - - program.emit_insn(Insn::SorterOpen { - cursor_id: sort_cursor, - columns: key_len, - order: { - let mut ord: Vec = order_by.iter().map(|(_, d)| *d).collect(); - if has_sequence { - // sequence is ascending tiebreaker - ord.push(SortOrder::Asc); - } - ord - }, - collations, - }); Ok(()) } @@ -118,6 +174,7 @@ pub fn emit_order_by( reg_sorter_data, ref remappings, has_sequence, + use_heap_sort, } = *t_ctx.meta_sort.as_ref().unwrap(); let sorter_column_count = order_by.len() @@ -128,33 +185,44 @@ pub fn emit_order_by( // to emit correct explain output. emit_explain!(program, false, "USE TEMP B-TREE FOR ORDER BY".to_owned()); - let pseudo_cursor = program.alloc_cursor_id(CursorType::Pseudo(PseudoCursorType { - column_count: sorter_column_count, - })); + let cursor_id = if !use_heap_sort { + let pseudo_cursor = program.alloc_cursor_id(CursorType::Pseudo(PseudoCursorType { + column_count: sorter_column_count, + })); - program.emit_insn(Insn::OpenPseudo { - cursor_id: pseudo_cursor, - content_reg: reg_sorter_data, - num_fields: sorter_column_count, - }); + program.emit_insn(Insn::OpenPseudo { + cursor_id: pseudo_cursor, + content_reg: reg_sorter_data, + num_fields: sorter_column_count, + }); + + program.emit_insn(Insn::SorterSort { + cursor_id: sort_cursor, + pc_if_empty: sort_loop_end_label, + }); + pseudo_cursor + } else { + program.emit_insn(Insn::Rewind { + cursor_id: sort_cursor, + pc_if_empty: sort_loop_end_label, + }); + sort_cursor + }; - program.emit_insn(Insn::SorterSort { - cursor_id: sort_cursor, - pc_if_empty: sort_loop_end_label, - }); program.preassign_label_to_next_insn(sort_loop_start_label); emit_offset(program, sort_loop_next_label, t_ctx.reg_offset); - program.emit_insn(Insn::SorterData { - cursor_id: sort_cursor, - dest_reg: reg_sorter_data, - pseudo_cursor, - }); + if !use_heap_sort { + program.emit_insn(Insn::SorterData { + cursor_id: sort_cursor, + dest_reg: reg_sorter_data, + pseudo_cursor: cursor_id, + }); + } // We emit the columns in SELECT order, not sorter order (sorter always has the sort keys first). // This is tracked in sort_metadata.remappings. - let cursor_id = pseudo_cursor; let start_reg = t_ctx.reg_result_cols_start.unwrap(); for i in 0..result_columns.len() { let reg = start_reg + i; @@ -175,10 +243,17 @@ pub fn emit_order_by( )?; program.resolve_label(sort_loop_next_label, program.offset()); - program.emit_insn(Insn::SorterNext { - cursor_id: sort_cursor, - pc_if_next: sort_loop_start_label, - }); + if !use_heap_sort { + program.emit_insn(Insn::SorterNext { + cursor_id: sort_cursor, + pc_if_next: sort_loop_start_label, + }); + } else { + program.emit_insn(Insn::Next { + cursor_id: sort_cursor, + pc_if_next: sort_loop_start_label, + }); + } program.preassign_label_to_next_insn(sort_loop_end_label); Ok(()) @@ -333,16 +408,61 @@ pub fn order_by_sorter_insert( let SortMetadata { sort_cursor, reg_sorter_data, + use_heap_sort, .. } = sort_metadata; - sorter_insert( - program, - start_reg, - orderby_sorter_column_count, - *sort_cursor, - *reg_sorter_data, - ); + if *use_heap_sort { + // maintain top-k records in the index instead of materializing the whole sequence + let insert_label = program.allocate_label(); + let skip_label = program.allocate_label(); + let limit = t_ctx.limit_ctx.as_ref().expect("limit must be set"); + program.emit_insn(Insn::IfPos { + reg: limit.reg_limit, + target_pc: insert_label, + decrement_by: 1, + }); + program.emit_insn(Insn::Last { + cursor_id: *sort_cursor, + pc_if_empty: insert_label, + }); + program.emit_insn(Insn::IdxLE { + cursor_id: *sort_cursor, + start_reg: start_reg, + num_regs: orderby_sorter_column_count, + target_pc: skip_label, + }); + + program.emit_insn(Insn::Delete { + cursor_id: *sort_cursor, + table_name: "".to_string(), + }); + + program.preassign_label_to_next_insn(insert_label); + program.emit_insn(Insn::MakeRecord { + start_reg, + count: orderby_sorter_column_count, + dest_reg: *reg_sorter_data, + index_name: None, + affinity_str: None, + }); + program.emit_insn(Insn::IdxInsert { + cursor_id: *sort_cursor, + record_reg: *reg_sorter_data, + unpacked_start: None, + unpacked_count: None, + flags: IdxInsertFlags::new(), + }); + program.preassign_label_to_next_insn(skip_label); + } else { + sorter_insert( + program, + start_reg, + orderby_sorter_column_count, + *sort_cursor, + *reg_sorter_data, + ); + } Ok(()) } From 1a24139359e6b65dcf66696ae49fb1294babe160 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 14 Oct 2025 13:23:22 +0400 Subject: [PATCH 263/428] fix limit for order by queries with heap-sort style execution --- core/translate/order_by.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/translate/order_by.rs b/core/translate/order_by.rs index 09de9c3d3..7a91bc160 100644 --- a/core/translate/order_by.rs +++ b/core/translate/order_by.rs @@ -239,7 +239,11 @@ pub fn emit_order_by( plan, start_reg, t_ctx.limit_ctx, - Some(sort_loop_end_label), + if !use_heap_sort { + Some(sort_loop_end_label) + } else { + None + }, )?; program.resolve_label(sort_loop_next_label, program.offset()); From 5868270b0644818769468083aa9443391d535fcd Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 14 Oct 2025 13:29:49 +0400 Subject: [PATCH 264/428] fix clippy --- core/translate/order_by.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/translate/order_by.rs b/core/translate/order_by.rs index 7a91bc160..dfd218ef7 100644 --- a/core/translate/order_by.rs +++ b/core/translate/order_by.rs @@ -46,6 +46,7 @@ pub struct SortMetadata { } /// Initialize resources needed for ORDER BY processing +#[allow(clippy::too_many_arguments)] pub fn init_order_by( program: &mut ProgramBuilder, t_ctx: &mut TranslateCtx, @@ -432,7 +433,7 @@ pub fn order_by_sorter_insert( }); program.emit_insn(Insn::IdxLE { cursor_id: *sort_cursor, - start_reg: start_reg, + start_reg, num_regs: orderby_sorter_column_count, target_pc: skip_label, }); From b065e7d3801beda895294b7fefb7adc3f82a316f Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 14 Oct 2025 14:40:01 +0400 Subject: [PATCH 265/428] emit Sequence column for heap-sort in order to distinguish between rows with same order by key and result columns --- core/translate/order_by.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/core/translate/order_by.rs b/core/translate/order_by.rs index dfd218ef7..e4c72191d 100644 --- a/core/translate/order_by.rs +++ b/core/translate/order_by.rs @@ -61,13 +61,11 @@ pub fn init_order_by( .iter() .all(|(e, _)| is_orderby_agg_or_const(&t_ctx.resolver, e, aggregates)); - // only emit sequence column if we have GROUP BY and ORDER BY is not only aggregates or constants - let has_sequence = has_group_by && !only_aggs; - let use_heap_sort = !has_distinct && !has_group_by && t_ctx.limit_ctx.is_some(); - if use_heap_sort { - assert!(!has_sequence); - } + + // only emit sequence column if (we have GROUP BY and ORDER BY is not only aggregates or constants) OR (we decided to use heap-sort) + let has_sequence = (has_group_by && !only_aggs) || use_heap_sort; + let remappings = order_by_deduplicate_result_columns(order_by, result_columns, has_sequence); let sort_cursor = if use_heap_sort { let index_name = format!("heap_sort_{}", program.offset().as_offset_int()); // we don't really care about the name that much, just enough that we don't get name collisions @@ -83,6 +81,15 @@ pub fn init_order_by( default: None, }) } + let pos_in_table = index_columns.len(); + // add sequence number between ORDER BY columns and result column + index_columns.push(IndexColumn { + name: pos_in_table.to_string(), + order: SortOrder::Asc, + pos_in_table, + collation: Some(CollationSeq::Binary), + default: None, + }); for _ in remappings.iter().filter(|r| !r.deduplicated) { let pos_in_table = index_columns.len(); index_columns.push(IndexColumn { From a1260ca8c7030c3634babf0582efe3f92868cb91 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 14 Oct 2025 14:49:09 +0400 Subject: [PATCH 266/428] implement Sequence opcodes for any type of cursors --- core/vdbe/execute.rs | 11 +++++++---- core/vdbe/mod.rs | 4 ++++ core/vdbe/sorter.rs | 17 ----------------- 3 files changed, 11 insertions(+), 21 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 55c37e0a9..dcfa26046 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -5553,8 +5553,9 @@ pub fn op_sequence( }, insn ); - let cursor = state.get_cursor(*cursor_id).as_sorter_mut(); - let seq_num = cursor.next_sequence(); + let cursor_seq = state.cursor_seqs.get_mut(*cursor_id).unwrap(); + let seq_num = *cursor_seq; + *cursor_seq += 1; state.registers[*target_reg] = Register::Value(Value::Integer(seq_num)); state.pc += 1; Ok(InsnFunctionStepResult::Step) @@ -5575,8 +5576,10 @@ pub fn op_sequence_test( }, insn ); - let cursor = state.get_cursor(*cursor_id).as_sorter_mut(); - state.pc = if cursor.seq_beginning() { + let cursor_seq = state.cursor_seqs.get_mut(*cursor_id).unwrap(); + let was_zero = *cursor_seq == 0; + *cursor_seq += 1; + state.pc = if was_zero { target_pc.as_offset_int() } else { state.pc + 1 diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 2d60d854f..94ae80c41 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -282,6 +282,7 @@ pub struct ProgramState { pub io_completions: Option, pub pc: InsnReference, cursors: Vec>, + cursor_seqs: Vec, registers: Vec, pub(crate) result_row: Option, last_compare: Option, @@ -325,11 +326,13 @@ pub struct ProgramState { impl ProgramState { pub fn new(max_registers: usize, max_cursors: usize) -> Self { let cursors: Vec> = (0..max_cursors).map(|_| None).collect(); + let cursor_seqs = vec![0i64; max_cursors]; let registers = vec![Register::Value(Value::Null); max_registers]; Self { io_completions: None, pc: 0, cursors, + cursor_seqs, registers, result_row: None, last_compare: None, @@ -411,6 +414,7 @@ impl ProgramState { if let Some(max_cursors) = max_cursors { self.cursors.resize_with(max_cursors, || None); + self.cursor_seqs.resize(max_cursors, 0); } if let Some(max_resgisters) = max_registers { self.registers diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index ac7e07ed4..ef0864bec 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -86,7 +86,6 @@ pub struct Sorter { insert_state: InsertState, /// State machine for [Sorter::init_chunk_heap] init_chunk_heap_state: InitChunkHeapState, - seq_count: i64, pending_completions: Vec, } @@ -125,7 +124,6 @@ impl Sorter { sort_state: SortState::Start, insert_state: InsertState::Start, init_chunk_heap_state: InitChunkHeapState::Start, - seq_count: 0, pending_completions: Vec::new(), } } @@ -138,21 +136,6 @@ impl Sorter { self.current.is_some() } - /// Get current sequence count and increment it - pub fn next_sequence(&mut self) -> i64 { - let current = self.seq_count; - self.seq_count += 1; - current - } - - /// Test if at beginning of sequence (count == 0) and increment - /// Returns true if this was the first call (seq_count was 0) - pub fn seq_beginning(&mut self) -> bool { - let was_zero = self.seq_count == 0; - self.seq_count += 1; - was_zero - } - // We do the sorting here since this is what is called by the SorterSort instruction pub fn sort(&mut self) -> Result> { loop { From a2dbaafe69385d1849af646d6941cf986d812380 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 14 Oct 2025 14:49:27 +0400 Subject: [PATCH 267/428] add explicit test for multiple rows heap-sort bug --- testing/orderby.test | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/testing/orderby.test b/testing/orderby.test index e173d946b..ca929978a 100755 --- a/testing/orderby.test +++ b/testing/orderby.test @@ -239,4 +239,16 @@ do_execsql_test_on_specific_db {:memory:} orderby_alias_precedence { INSERT INTO t VALUES (1,200),(2,100); SELECT x AS y, y AS x FROM t ORDER BY x; } {2|100 -1|200} \ No newline at end of file +1|200} + +# Check that ORDER BY with heap-sort properly handle multiple rows with same order key + result values +do_execsql_test_on_specific_db {:memory:} orderby_same_rows { + CREATE TABLE t(x,y,z); + INSERT INTO t VALUES (1,2,3),(1,2,6),(1,2,9),(1,2,10),(1,3,-1),(1,3,-2); + SELECT x, y FROM t ORDER BY x, y LIMIT 10; +} {1|2 +1|2 +1|2 +1|2 +1|3 +1|3} \ No newline at end of file From af4c1e8bd4a1203b66e664e47797bcde3ba64ff1 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 14 Oct 2025 15:14:01 +0400 Subject: [PATCH 268/428] use proper register for limit --- core/translate/order_by.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/translate/order_by.rs b/core/translate/order_by.rs index e4c72191d..bd419e016 100644 --- a/core/translate/order_by.rs +++ b/core/translate/order_by.rs @@ -429,8 +429,9 @@ pub fn order_by_sorter_insert( let insert_label = program.allocate_label(); let skip_label = program.allocate_label(); let limit = t_ctx.limit_ctx.as_ref().expect("limit must be set"); + let limit_reg = t_ctx.reg_limit_offset_sum.unwrap_or(limit.reg_limit); program.emit_insn(Insn::IfPos { - reg: limit.reg_limit, + reg: limit_reg, target_pc: insert_label, decrement_by: 1, }); From dd34f7fd504fbbd57e7802da4e76447c4911ab09 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 14 Oct 2025 22:19:01 +0400 Subject: [PATCH 269/428] wip --- core/lib.rs | 1 + core/storage/btree.rs | 12 +++++++----- core/vdbe/execute.rs | 2 +- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 2a0a558cf..9b93d64ad 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -15,6 +15,7 @@ mod json; pub mod mvcc; mod parameters; mod pragma; +pub mod primitives; mod pseudo; mod schema; #[cfg(feature = "series")] diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 909bb5de3..5a37ba568 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -41,7 +41,7 @@ use super::{ use parking_lot::RwLock; use std::{ any::Any, - cell::{Cell, Ref, RefCell}, + cell::Cell, cmp::{Ordering, Reverse}, collections::{BinaryHeap, HashMap}, fmt::Debug, @@ -50,6 +50,8 @@ use std::{ sync::Arc, }; +use crate::primitives::{Ref, RefCell, RefMut}; + /// The B-Tree page header is 12 bytes for interior pages and 8 bytes for leaf pages. /// /// +--------+-----------------+-----------------+-----------------+--------+----- ..... ----+ @@ -552,7 +554,7 @@ pub trait CursorTrait: Any { // --- start: BTreeCursor specific functions ---- fn invalidate_record(&mut self); fn has_rowid(&self) -> bool; - fn record_cursor_mut(&self) -> std::cell::RefMut<'_, RecordCursor>; + fn record_cursor_mut(&self) -> RefMut<'_, RecordCursor>; fn get_pager(&self) -> Arc; fn get_skip_advance(&self) -> bool; @@ -5422,7 +5424,7 @@ impl BTreeCursor { Ok(IOResult::Done(())) } - fn get_immutable_record_or_create(&self) -> std::cell::RefMut<'_, Option> { + fn get_immutable_record_or_create(&self) -> RefMut<'_, Option> { let mut reusable_immutable_record = self.reusable_immutable_record.borrow_mut(); if reusable_immutable_record.is_none() { let page_size = self.pager.get_page_size_unchecked().get(); @@ -5432,7 +5434,7 @@ impl BTreeCursor { reusable_immutable_record } - fn get_immutable_record(&self) -> std::cell::RefMut<'_, Option> { + fn get_immutable_record(&self) -> RefMut<'_, Option> { self.reusable_immutable_record.borrow_mut() } @@ -6390,7 +6392,7 @@ impl CursorTrait for BTreeCursor { .invalidate(); self.record_cursor.borrow_mut().invalidate(); } - fn record_cursor_mut(&self) -> std::cell::RefMut<'_, RecordCursor> { + fn record_cursor_mut(&self) -> RefMut<'_, RecordCursor> { self.record_cursor.borrow_mut() } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index dcfa26046..e74f64a1c 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -110,7 +110,7 @@ macro_rules! load_insn { }; #[cfg(not(debug_assertions))] let Insn::$variant { $($field $(: $binding)?),*} = $insn else { - // this will optimize away the branch + // this will optimize away the branch unsafe { std::hint::unreachable_unchecked() }; }; }; From 4b3689e9e7d0263410112e4fd7420abf1d22b398 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 15 Oct 2025 16:27:02 +0400 Subject: [PATCH 270/428] avoid doing work in case of heap-sort optimization --- core/translate/order_by.rs | 66 +++++++++++++++++++++----------------- 1 file changed, 36 insertions(+), 30 deletions(-) diff --git a/core/translate/order_by.rs b/core/translate/order_by.rs index bd419e016..781482df5 100644 --- a/core/translate/order_by.rs +++ b/core/translate/order_by.rs @@ -324,6 +324,40 @@ pub fn order_by_sorter_insert( )?; } } + + let SortMetadata { + sort_cursor, + reg_sorter_data, + use_heap_sort, + .. + } = sort_metadata; + + let (insert_label, skip_label) = if *use_heap_sort { + // skip records which greater than current top-k maintained in a separate BTreeIndex + let insert_label = program.allocate_label(); + let skip_label = program.allocate_label(); + let limit = t_ctx.limit_ctx.as_ref().expect("limit must be set"); + let limit_reg = t_ctx.reg_limit_offset_sum.unwrap_or(limit.reg_limit); + program.emit_insn(Insn::IfPos { + reg: limit_reg, + target_pc: insert_label, + decrement_by: 1, + }); + program.emit_insn(Insn::Last { + cursor_id: *sort_cursor, + pc_if_empty: insert_label, + }); + program.emit_insn(Insn::IdxLE { + cursor_id: *sort_cursor, + start_reg, + num_regs: orderby_sorter_column_count, + target_pc: skip_label, + }); + (Some(insert_label), Some(skip_label)) + } else { + (None, None) + }; + let mut cur_reg = start_reg + order_by_len; if sort_metadata.has_sequence { program.emit_insn(Insn::Sequence { @@ -417,41 +451,13 @@ pub fn order_by_sorter_insert( } } - let SortMetadata { - sort_cursor, - reg_sorter_data, - use_heap_sort, - .. - } = sort_metadata; - if *use_heap_sort { - // maintain top-k records in the index instead of materializing the whole sequence - let insert_label = program.allocate_label(); - let skip_label = program.allocate_label(); - let limit = t_ctx.limit_ctx.as_ref().expect("limit must be set"); - let limit_reg = t_ctx.reg_limit_offset_sum.unwrap_or(limit.reg_limit); - program.emit_insn(Insn::IfPos { - reg: limit_reg, - target_pc: insert_label, - decrement_by: 1, - }); - program.emit_insn(Insn::Last { - cursor_id: *sort_cursor, - pc_if_empty: insert_label, - }); - program.emit_insn(Insn::IdxLE { - cursor_id: *sort_cursor, - start_reg, - num_regs: orderby_sorter_column_count, - target_pc: skip_label, - }); - program.emit_insn(Insn::Delete { cursor_id: *sort_cursor, table_name: "".to_string(), }); - program.preassign_label_to_next_insn(insert_label); + program.preassign_label_to_next_insn(insert_label.unwrap()); program.emit_insn(Insn::MakeRecord { start_reg, count: orderby_sorter_column_count, @@ -466,7 +472,7 @@ pub fn order_by_sorter_insert( unpacked_count: None, flags: IdxInsertFlags::new(), }); - program.preassign_label_to_next_insn(skip_label); + program.preassign_label_to_next_insn(skip_label.unwrap()); } else { sorter_insert( program, From 08f010b9692889e33b177ef82aae5bb6c5d86e8f Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 15 Oct 2025 18:16:31 +0300 Subject: [PATCH 271/428] Document ThreadSanitizer in CONTRIBUTING.md --- CONTRIBUTING.md | 65 +++++++++++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 27 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9a33c7319..46a751d35 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -88,6 +88,44 @@ echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid cargo bench --bench benchmark -- --profile-time=5 ``` +## Debugging bugs + +### Query execution debugging + +Turso aims towards SQLite compatibility. If you find a query that has different behavior than SQLite, the first step is to check what the generated bytecode looks like. + +To do that, first run the `EXPLAIN` command in `sqlite3` shell: + +``` +sqlite> EXPLAIN SELECT first_name FROM users; +addr opcode p1 p2 p3 p4 p5 comment +---- ------------- ---- ---- ---- ------------- -- ------------- +0 Init 0 7 0 0 Start at 7 +1 OpenRead 0 2 0 2 0 root=2 iDb=0; users +2 Rewind 0 6 0 0 +3 Column 0 1 1 0 r[1]= cursor 0 column 1 +4 ResultRow 1 1 0 0 output=r[1] +5 Next 0 3 0 1 +6 Halt 0 0 0 0 +7 Transaction 0 0 1 0 1 usesStmtJournal=0 +8 Goto 0 1 0 0 +``` + +and then run the same command in Turso's shell. + +If the bytecode is different, that's the bug -- work towards fixing code generation. +If the bytecode is the same, but query results are different, then the bug is somewhere in the virtual machine interpreter or storage layer. + +### Stress testing with sanitizers + +If you suspect a multi-threading issue, you can run the stress test with ThreadSanitizer enabled as follows: + +```console +rustup toolchain install nightly +rustup override set nightly +cargo run -Zbuild-std --target x86_64-unknown-linux-gnu -p turso_stress -- --vfs syscall --nr-threads 4 --nr-iterations 1000 +``` + ## Finding things to work on The issue tracker has issues tagged with [good first issue](https://github.com/tursodatabase/limbo/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22), @@ -115,33 +153,6 @@ To produce pull requests like this, you should learn how to use Git's interactiv For a longer discussion on good commits, see Al Tenhundfeld's [What makes a good git commit](https://www.simplethread.com/what-makes-a-good-git-commit/), for example. - -## Debugging query execution - -Turso aims towards SQLite compatibility. If you find a query that has different behavior than SQLite, the first step is to check what the generated bytecode looks like. - -To do that, first run the `EXPLAIN` command in `sqlite3` shell: - -``` -sqlite> EXPLAIN SELECT first_name FROM users; -addr opcode p1 p2 p3 p4 p5 comment ----- ------------- ---- ---- ---- ------------- -- ------------- -0 Init 0 7 0 0 Start at 7 -1 OpenRead 0 2 0 2 0 root=2 iDb=0; users -2 Rewind 0 6 0 0 -3 Column 0 1 1 0 r[1]= cursor 0 column 1 -4 ResultRow 1 1 0 0 output=r[1] -5 Next 0 3 0 1 -6 Halt 0 0 0 0 -7 Transaction 0 0 1 0 1 usesStmtJournal=0 -8 Goto 0 1 0 0 -``` - -and then run the same command in Turso's shell. - -If the bytecode is different, that's the bug -- work towards fixing code generation. -If the bytecode is the same, but query results are different, then the bug is somewhere in the virtual machine interpreter or storage layer. - ## Compatibility tests The `testing/test.all` is a starting point for adding functional tests using a similar syntax to SQLite. From 2df3d3ee0c9e654efc5c12a8a6f511ca2b5b2684 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Wed, 15 Oct 2025 21:51:52 +0530 Subject: [PATCH 272/428] add more tests --- testing/orderby.test | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/testing/orderby.test b/testing/orderby.test index 13ca8c762..0fac87856 100755 --- a/testing/orderby.test +++ b/testing/orderby.test @@ -249,3 +249,19 @@ do_execsql_test_on_specific_db {:memory:} orderby_alias_shadows_column { } {3|-3 2|-2 1|-1} + + do_execsql_test_in_memory_any_error order_by_ambiguous_column { + CREATE TABLE a(id INT, value INT); +INSERT INTO a VALUES (1, 10), (2, 20); + +CREATE TABLE b(id INT, value INT); +INSERT INTO b VALUES (1, 100), (2, 200); + +SELECT + a.id, + b.value +FROM + a JOIN b ON a.id = b.id +ORDER BY +value; + } \ No newline at end of file From 48eb456a12b5d03f8ec4bcad36f82d2d15912bf8 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 15 Oct 2025 20:04:17 -0400 Subject: [PATCH 273/428] Accept SEED env var for all fuzz tests --- tests/integration/fuzz/mod.rs | 70 +++++++++++++---------------------- 1 file changed, 25 insertions(+), 45 deletions(-) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 428b2638c..0e09c1c2c 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -3,7 +3,7 @@ pub mod grammar_generator; #[cfg(test)] mod tests { use rand::seq::{IndexedRandom, IteratorRandom, SliceRandom}; - use rand::{Rng, SeedableRng}; + use rand::Rng; use rand_chacha::ChaCha8Rng; use rusqlite::{params, types::Value}; use std::{collections::HashSet, io::Write}; @@ -12,8 +12,8 @@ mod tests { use crate::{ common::{ do_flush, limbo_exec_rows, limbo_exec_rows_fallible, limbo_stmt_get_column_names, - maybe_setup_tracing, rng_from_time, rng_from_time_or_env, rusqlite_integrity_check, - sqlite_exec_rows, TempDatabase, + maybe_setup_tracing, rng_from_time_or_env, rusqlite_integrity_check, sqlite_exec_rows, + TempDatabase, }, fuzz::grammar_generator::{const_str, rand_int, rand_str, GrammarGenerator}, }; @@ -221,12 +221,7 @@ mod tests { /// A test for verifying that index seek+scan works correctly for compound keys /// on indexes with various column orderings. pub fn index_scan_compound_key_fuzz() { - let (mut rng, seed) = if std::env::var("SEED").is_ok() { - let seed = std::env::var("SEED").unwrap().parse::().unwrap(); - (ChaCha8Rng::seed_from_u64(seed), seed) - } else { - rng_from_time() - }; + let (mut rng, seed) = rng_from_time_or_env(); let table_defs: [&str; 8] = [ "CREATE TABLE t (x, y, z, nonindexed_col, PRIMARY KEY (x, y, z))", "CREATE TABLE t (x, y, z, nonindexed_col, PRIMARY KEY (x desc, y, z))", @@ -516,12 +511,7 @@ mod tests { #[test] pub fn collation_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = if std::env::var("SEED").is_ok() { - let seed = std::env::var("SEED").unwrap().parse::().unwrap(); - (ChaCha8Rng::seed_from_u64(seed), seed) - } else { - rng_from_time() - }; + let (mut rng, seed) = rng_from_time_or_env(); println!("collation_fuzz seed: {seed}"); // Build six table variants that assign BINARY/NOCASE/RTRIM across (a,b,c) @@ -614,12 +604,7 @@ mod tests { // Fuzz WHERE clauses with and without explicit COLLATE on a/b/c let columns = ["a", "b", "c"]; let collates = [None, Some("BINARY"), Some("NOCASE"), Some("RTRIM")]; - let (mut rng, seed) = if std::env::var("SEED").is_ok() { - let seed = std::env::var("SEED").unwrap().parse::().unwrap(); - (ChaCha8Rng::seed_from_u64(seed), seed) - } else { - rng_from_time() - }; + let (mut rng, seed) = rng_from_time_or_env(); println!("collation_fuzz seed: {seed}"); const ITERS: usize = 3000; @@ -671,7 +656,7 @@ mod tests { #[ignore] // ignoring because every error I can find is due to sqlite sub-transaction behavior pub fn fk_deferred_constraints_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); println!("fk_deferred_constraints_fuzz seed: {seed}"); const OUTER_ITERS: usize = 10; @@ -987,7 +972,7 @@ mod tests { #[test] pub fn fk_single_pk_mutation_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); println!("fk_single_pk_mutation_fuzz seed: {seed}"); const OUTER_ITERS: usize = 20; @@ -1275,7 +1260,7 @@ mod tests { #[test] pub fn fk_edgecases_fuzzing() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); println!("fk_edgecases_minifuzz seed: {seed}"); const OUTER_ITERS: usize = 20; @@ -1630,7 +1615,7 @@ mod tests { #[test] pub fn fk_composite_pk_mutation_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); println!("fk_composite_pk_mutation_fuzz seed: {seed}"); const OUTER_ITERS: usize = 10; @@ -1843,7 +1828,7 @@ mod tests { /// Verify that the results are the same for SQLite and Turso. pub fn table_index_mutation_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); println!("table_index_mutation_fuzz seed: {seed}"); const OUTER_ITERATIONS: usize = 100; @@ -2064,12 +2049,7 @@ mod tests { const OUTER_ITERS: usize = 5; const INNER_ITERS: usize = 500; - let (mut rng, seed) = if std::env::var("SEED").is_ok() { - let seed = std::env::var("SEED").unwrap().parse::().unwrap(); - (ChaCha8Rng::seed_from_u64(seed), seed) - } else { - rng_from_time() - }; + let (mut rng, seed) = rng_from_time_or_env(); println!("partial_index_mutation_and_upsert_fuzz seed: {seed}"); // we want to hit unique constraints fairly often so limit the insert values const K_POOL: [&str; 35] = [ @@ -2400,7 +2380,7 @@ mod tests { #[test] pub fn compound_select_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("compound_select_fuzz seed: {seed}"); // Constants for fuzzing parameters @@ -2536,7 +2516,7 @@ mod tests { #[test] pub fn ddl_compatibility_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); const ITERATIONS: usize = 1000; for i in 0..ITERATIONS { let db = TempDatabase::new_empty(true); @@ -2705,7 +2685,7 @@ mod tests { let limbo_conn = db.connect_limbo(); let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("seed: {seed}"); for _ in 0..1024 { let query = g.generate(&mut rng, sql, 50); @@ -2824,7 +2804,7 @@ mod tests { let limbo_conn = db.connect_limbo(); let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("seed: {seed}"); for _ in 0..1024 { let query = g.generate(&mut rng, sql, 50); @@ -2984,7 +2964,7 @@ mod tests { let limbo_conn = db.connect_limbo(); let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("seed: {seed}"); for _ in 0..1024 { let query = g.generate(&mut rng, sql, 50); @@ -3353,7 +3333,7 @@ mod tests { let limbo_conn = db.connect_limbo(); let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("seed: {seed}"); for _ in 0..1024 { let query = g.generate(&mut rng, sql, 50); @@ -3402,7 +3382,7 @@ mod tests { let _ = env_logger::try_init(); let datatypes = ["INTEGER", "TEXT", "REAL", "BLOB"]; - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("seed: {seed}"); for _ in 0..1000 { @@ -3457,7 +3437,7 @@ mod tests { pub fn affinity_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("affinity_fuzz seed: {seed}"); for iteration in 0..500 { @@ -3558,7 +3538,7 @@ mod tests { pub fn sum_agg_fuzz_floats() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("seed: {seed}"); for _ in 0..100 { @@ -3604,7 +3584,7 @@ mod tests { pub fn sum_agg_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("seed: {seed}"); for _ in 0..100 { @@ -3648,7 +3628,7 @@ mod tests { fn concat_ws_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("seed: {seed}"); for _ in 0..100 { @@ -3694,7 +3674,7 @@ mod tests { pub fn total_agg_fuzz() { let _ = env_logger::try_init(); - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("seed: {seed}"); for _ in 0..100 { @@ -3770,7 +3750,7 @@ mod tests { ); } - let (mut rng, seed) = rng_from_time(); + let (mut rng, seed) = rng_from_time_or_env(); log::info!("seed: {seed}"); let mut i = 0; From 41d2a0af77983f701f2d8b2a2d6836812b931f63 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 15 Oct 2025 22:39:41 -0400 Subject: [PATCH 274/428] Add INSERT OR IGNORE handling and refactor INSERT further --- core/translate/insert.rs | 682 ++++++++++++++++++++++----------------- 1 file changed, 383 insertions(+), 299 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 2fa5b8eec..102d7541d 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -222,13 +222,18 @@ pub fn translate_insert( crate::bail_parse_error!("no such table: {}", table_name); }; - let root_page = btree_table.root_page; - let BoundInsertResult { mut values, mut upsert_actions, inserting_multiple_rows, - } = bind_insert(&mut program, resolver, &table, &mut body, connection)?; + } = bind_insert( + &mut program, + resolver, + &table, + &mut body, + connection, + on_conflict.unwrap_or(ResolveType::Abort), + )?; if inserting_multiple_rows && btree_table.has_autoincrement { ensure_sequence_initialized(&mut program, resolver.schema, &btree_table)?; @@ -278,110 +283,18 @@ pub fn translate_insert( } let insertion = build_insertion(&mut program, &table, &columns, ctx.num_values)?; - if inserting_multiple_rows { - let select_result_start_reg = program - .reg_result_cols_start - .unwrap_or(ctx.yield_reg_opt.unwrap() + 1); - translate_rows_multiple( - &mut program, - &insertion, - select_result_start_reg, - resolver, - &ctx.temp_table_ctx, - )?; - } else { - // Single row - populate registers directly - program.emit_insn(Insn::OpenWrite { - cursor_id: ctx.cursor_id, - root_page: RegisterOrLiteral::Literal(root_page), - db: 0, - }); - - translate_rows_single(&mut program, &values, &insertion, resolver)?; - } - - // Open all the index btrees for writing - for idx_cursor in ctx.idx_cursors.iter() { - program.emit_insn(Insn::OpenWrite { - cursor_id: idx_cursor.2, - root_page: idx_cursor.1.into(), - db: 0, - }); - } + translate_rows_and_open_tables( + &mut program, + resolver, + &insertion, + &ctx, + &values, + inserting_multiple_rows, + )?; let has_user_provided_rowid = insertion.key.is_provided_by_user(); - if btree_table.has_autoincrement { - let seq_table = resolver - .schema - .get_btree_table("sqlite_sequence") - .ok_or_else(|| { - crate::error::LimboError::InternalError( - "sqlite_sequence table not found".to_string(), - ) - })?; - let seq_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(seq_table.clone())); - program.emit_insn(Insn::OpenWrite { - cursor_id: seq_cursor_id, - root_page: seq_table.root_page.into(), - db: 0, - }); - let table_name_reg = program.emit_string8_new_reg(btree_table.name.clone()); - let r_seq = program.alloc_register(); - let r_seq_rowid = program.alloc_register(); - - ctx.autoincrement_meta = Some(AutoincMeta { - seq_cursor_id, - r_seq, - r_seq_rowid, - table_name_reg, - }); - - program.emit_insn(Insn::Integer { - dest: r_seq, - value: 0, - }); - program.emit_insn(Insn::Null { - dest: r_seq_rowid, - dest_end: None, - }); - - let loop_start_label = program.allocate_label(); - let loop_end_label = program.allocate_label(); - let found_label = program.allocate_label(); - - program.emit_insn(Insn::Rewind { - cursor_id: seq_cursor_id, - pc_if_empty: loop_end_label, - }); - program.preassign_label_to_next_insn(loop_start_label); - - let name_col_reg = program.alloc_register(); - program.emit_column_or_rowid(seq_cursor_id, 0, name_col_reg); - program.emit_insn(Insn::Ne { - lhs: table_name_reg, - rhs: name_col_reg, - target_pc: found_label, - flags: Default::default(), - collation: None, - }); - - program.emit_column_or_rowid(seq_cursor_id, 1, r_seq); - program.emit_insn(Insn::RowId { - cursor_id: seq_cursor_id, - dest: r_seq_rowid, - }); - program.emit_insn(Insn::Goto { - target_pc: loop_end_label, - }); - - program.preassign_label_to_next_insn(found_label); - program.emit_insn(Insn::Next { - cursor_id: seq_cursor_id, - pc_if_next: loop_start_label, - }); - program.preassign_label_to_next_insn(loop_end_label); - } + init_autoincrement(&mut program, &mut ctx, resolver)?; if has_user_provided_rowid { let must_be_int_label = program.allocate_label(); @@ -406,76 +319,8 @@ pub fn translate_insert( } program.preassign_label_to_next_insn(ctx.key_generation_label); - if let Some(AutoincMeta { r_seq, .. }) = ctx.autoincrement_meta { - let r_max = program.alloc_register(); - let dummy_reg = program.alloc_register(); - - program.emit_insn(Insn::NewRowid { - cursor: ctx.cursor_id, - rowid_reg: dummy_reg, - prev_largest_reg: r_max, - }); - - program.emit_insn(Insn::Copy { - src_reg: r_seq, - dst_reg: insertion.key_register(), - extra_amount: 0, - }); - program.emit_insn(Insn::MemMax { - dest_reg: insertion.key_register(), - src_reg: r_max, - }); - - let no_overflow_label = program.allocate_label(); - let max_i64_reg = program.alloc_register(); - program.emit_insn(Insn::Integer { - dest: max_i64_reg, - value: i64::MAX, - }); - program.emit_insn(Insn::Ne { - lhs: insertion.key_register(), - rhs: max_i64_reg, - target_pc: no_overflow_label, - flags: Default::default(), - collation: None, - }); - - program.emit_insn(Insn::Halt { - err_code: crate::error::SQLITE_FULL, - description: "database or disk is full".to_string(), - }); - - program.preassign_label_to_next_insn(no_overflow_label); - - program.emit_insn(Insn::AddImm { - register: insertion.key_register(), - value: 1, - }); - - if let Some(AutoincMeta { - seq_cursor_id, - r_seq_rowid, - table_name_reg, - .. - }) = ctx.autoincrement_meta - { - emit_update_sqlite_sequence( - &mut program, - resolver.schema, - seq_cursor_id, - r_seq_rowid, - table_name_reg, - insertion.key_register(), - )?; - } - } else { - program.emit_insn(Insn::NewRowid { - cursor: ctx.cursor_id, - rowid_reg: insertion.key_register(), - prev_largest_reg: 0, - }); - } + emit_rowid_generation(&mut program, resolver, &ctx, &insertion)?; program.preassign_label_to_next_insn(ctx.key_ready_for_uniqueness_check_label); @@ -488,7 +333,9 @@ pub fn translate_insert( }); } - let (constraints_to_check, upsert_catch_all_position) = build_constraints_to_check( + // Build a list of upsert constraints/indexes we need to run preflight + // checks against, in the proper order of evaluation, + let constraints = build_constraints_to_check( resolver, table_name.as_str(), &upsert_actions, @@ -505,8 +352,7 @@ pub fn translate_insert( resolver, &insertion, &upsert_actions, - &constraints_to_check, - upsert_catch_all_position, + &constraints, )?; emit_notnulls(&mut program, &ctx, &insertion); @@ -526,85 +372,9 @@ pub fn translate_insert( }); if has_upsert { - // COMMIT PHASE: no preflight jumps happened; emit the actual index writes now - // We re-check partial-index predicates against the NEW image, produce packed records, - // and insert into all applicable indexes, we do not re-probe uniqueness here, as preflight - // already guaranteed non-conflict. - for index in resolver.schema.get_indices(table_name.as_str()) { - let idx_cursor_id = ctx - .idx_cursors - .iter() - .find(|(name, _, _)| *name == &index.name) - .map(|(_, _, c_id)| *c_id) - .expect("no cursor found for index"); - - // Re-evaluate partial predicate on the would-be inserted image - let commit_skip_label = if let Some(where_clause) = &index.where_clause { - let mut where_for_eval = where_clause.as_ref().clone(); - rewrite_partial_index_where(&mut where_for_eval, &insertion)?; - let reg = program.alloc_register(); - translate_expr_no_constant_opt( - &mut program, - Some(&TableReferences::new_empty()), - &where_for_eval, - reg, - resolver, - NoConstantOptReason::RegisterReuse, - )?; - let lbl = program.allocate_label(); - program.emit_insn(Insn::IfNot { - reg, - target_pc: lbl, - jump_if_null: true, - }); - Some(lbl) - } else { - None - }; - - let num_cols = index.columns.len(); - let idx_start_reg = program.alloc_registers(num_cols + 1); - - // Build [key cols..., rowid] from insertion registers - for (i, idx_col) in index.columns.iter().enumerate() { - let Some(cm) = insertion.get_col_mapping_by_name(&idx_col.name) else { - return Err(crate::LimboError::PlanningError( - "Column not found in INSERT (commit phase)".to_string(), - )); - }; - program.emit_insn(Insn::Copy { - src_reg: cm.register, - dst_reg: idx_start_reg + i, - extra_amount: 0, - }); - } - program.emit_insn(Insn::Copy { - src_reg: insertion.key_register(), - dst_reg: idx_start_reg + num_cols, - extra_amount: 0, - }); - - let record_reg = program.alloc_register(); - program.emit_insn(Insn::MakeRecord { - start_reg: idx_start_reg, - count: num_cols + 1, - dest_reg: record_reg, - index_name: Some(index.name.clone()), - affinity_str: None, - }); - program.emit_insn(Insn::IdxInsert { - cursor_id: idx_cursor_id, - record_reg, - unpacked_start: Some(idx_start_reg), - unpacked_count: Some((num_cols + 1) as u16), - flags: IdxInsertFlags::new().nchange(true), - }); - - if let Some(lbl) = commit_skip_label { - program.resolve_label(lbl, program.offset()); - } - } + emit_commit_phase(&mut program, resolver, &insertion, &ctx)?; } + if has_fks { // Child-side check must run before Insert (may HALT or increment deferred counter) emit_fk_child_insert_checks( @@ -712,8 +482,13 @@ pub fn translate_insert( connection, )?; + emit_epilogue(&mut program, &ctx, inserting_multiple_rows); + Ok(program) +} + +fn emit_epilogue(program: &mut ProgramBuilder, ctx: &InsertEmitCtx, inserting_multiple_rows: bool) { if inserting_multiple_rows { - if let Some(temp_table_ctx) = ctx.temp_table_ctx { + if let Some(temp_table_ctx) = &ctx.temp_table_ctx { program.resolve_label(ctx.row_done_label, program.offset()); program.emit_insn(Insn::Next { @@ -748,11 +523,214 @@ pub fn translate_insert( target_pc: ctx.stmt_epilogue, }); } - program.preassign_label_to_next_insn(ctx.stmt_epilogue); program.resolve_label(ctx.halt_label, program.offset()); +} - Ok(program) +// COMMIT PHASE: no preflight jumps happened; emit the actual index writes now +// We re-check partial-index predicates against the NEW image, produce packed records, +// and insert into all applicable indexes, we do not re-probe uniqueness here, as preflight +// already guaranteed non-conflict. +fn emit_commit_phase( + program: &mut ProgramBuilder, + resolver: &Resolver, + insertion: &Insertion, + ctx: &InsertEmitCtx, +) -> Result<()> { + for index in resolver.schema.get_indices(ctx.table.name.as_str()) { + let idx_cursor_id = ctx + .idx_cursors + .iter() + .find(|(name, _, _)| *name == &index.name) + .map(|(_, _, c_id)| *c_id) + .expect("no cursor found for index"); + + // Re-evaluate partial predicate on the would-be inserted image + let commit_skip_label = if let Some(where_clause) = &index.where_clause { + let mut where_for_eval = where_clause.as_ref().clone(); + rewrite_partial_index_where(&mut where_for_eval, insertion)?; + let reg = program.alloc_register(); + translate_expr_no_constant_opt( + program, + Some(&TableReferences::new_empty()), + &where_for_eval, + reg, + resolver, + NoConstantOptReason::RegisterReuse, + )?; + let lbl = program.allocate_label(); + program.emit_insn(Insn::IfNot { + reg, + target_pc: lbl, + jump_if_null: true, + }); + Some(lbl) + } else { + None + }; + + let num_cols = index.columns.len(); + let idx_start_reg = program.alloc_registers(num_cols + 1); + + // Build [key cols..., rowid] from insertion registers + for (i, idx_col) in index.columns.iter().enumerate() { + let Some(cm) = insertion.get_col_mapping_by_name(&idx_col.name) else { + return Err(crate::LimboError::PlanningError( + "Column not found in INSERT (commit phase)".to_string(), + )); + }; + program.emit_insn(Insn::Copy { + src_reg: cm.register, + dst_reg: idx_start_reg + i, + extra_amount: 0, + }); + } + program.emit_insn(Insn::Copy { + src_reg: insertion.key_register(), + dst_reg: idx_start_reg + num_cols, + extra_amount: 0, + }); + + let record_reg = program.alloc_register(); + program.emit_insn(Insn::MakeRecord { + start_reg: idx_start_reg, + count: num_cols + 1, + dest_reg: record_reg, + index_name: Some(index.name.clone()), + affinity_str: None, + }); + program.emit_insn(Insn::IdxInsert { + cursor_id: idx_cursor_id, + record_reg, + unpacked_start: Some(idx_start_reg), + unpacked_count: Some((num_cols + 1) as u16), + flags: IdxInsertFlags::new().nchange(true), + }); + + if let Some(lbl) = commit_skip_label { + program.resolve_label(lbl, program.offset()); + } + } + Ok(()) +} + +fn translate_rows_and_open_tables( + program: &mut ProgramBuilder, + resolver: &Resolver, + insertion: &Insertion, + ctx: &InsertEmitCtx, + values: &[Box], + inserting_multiple_rows: bool, +) -> Result<()> { + if inserting_multiple_rows { + let select_result_start_reg = program + .reg_result_cols_start + .unwrap_or(ctx.yield_reg_opt.unwrap() + 1); + translate_rows_multiple( + program, + insertion, + select_result_start_reg, + resolver, + &ctx.temp_table_ctx, + )?; + } else { + // Single row - populate registers directly + program.emit_insn(Insn::OpenWrite { + cursor_id: ctx.cursor_id, + root_page: RegisterOrLiteral::Literal(ctx.table.root_page), + db: 0, + }); + + translate_rows_single(program, values, insertion, resolver)?; + } + + // Open all the index btrees for writing + for idx_cursor in ctx.idx_cursors.iter() { + program.emit_insn(Insn::OpenWrite { + cursor_id: idx_cursor.2, + root_page: idx_cursor.1.into(), + db: 0, + }); + } + Ok(()) +} + +fn emit_rowid_generation( + program: &mut ProgramBuilder, + resolver: &Resolver, + ctx: &InsertEmitCtx, + insertion: &Insertion, +) -> Result<()> { + if let Some(AutoincMeta { + r_seq, + seq_cursor_id, + r_seq_rowid, + table_name_reg, + .. + }) = ctx.autoincrement_meta + { + let r_max = program.alloc_register(); + + let dummy_reg = program.alloc_register(); + + program.emit_insn(Insn::NewRowid { + cursor: ctx.cursor_id, + rowid_reg: dummy_reg, + prev_largest_reg: r_max, + }); + + program.emit_insn(Insn::Copy { + src_reg: r_seq, + dst_reg: insertion.key_register(), + extra_amount: 0, + }); + program.emit_insn(Insn::MemMax { + dest_reg: insertion.key_register(), + src_reg: r_max, + }); + + let no_overflow_label = program.allocate_label(); + let max_i64_reg = program.alloc_register(); + program.emit_insn(Insn::Integer { + dest: max_i64_reg, + value: i64::MAX, + }); + program.emit_insn(Insn::Ne { + lhs: insertion.key_register(), + rhs: max_i64_reg, + target_pc: no_overflow_label, + flags: Default::default(), + collation: None, + }); + + program.emit_insn(Insn::Halt { + err_code: crate::error::SQLITE_FULL, + description: "database or disk is full".to_string(), + }); + + program.preassign_label_to_next_insn(no_overflow_label); + + program.emit_insn(Insn::AddImm { + register: insertion.key_register(), + value: 1, + }); + + emit_update_sqlite_sequence( + program, + resolver.schema, + seq_cursor_id, + r_seq_rowid, + table_name_reg, + insertion.key_register(), + )?; + } else { + program.emit_insn(Insn::NewRowid { + cursor: ctx.cursor_id, + rowid_reg: insertion.key_register(), + prev_largest_reg: 0, + }); + } + Ok(()) } #[allow(clippy::too_many_arguments)] @@ -798,6 +776,86 @@ fn resolve_upserts( Ok(()) } +fn init_autoincrement( + program: &mut ProgramBuilder, + ctx: &mut InsertEmitCtx, + resolver: &Resolver, +) -> Result<()> { + if ctx.table.has_autoincrement { + let seq_table = resolver + .schema + .get_btree_table("sqlite_sequence") + .ok_or_else(|| { + crate::error::LimboError::InternalError( + "sqlite_sequence table not found".to_string(), + ) + })?; + let seq_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(seq_table.clone())); + program.emit_insn(Insn::OpenWrite { + cursor_id: seq_cursor_id, + root_page: seq_table.root_page.into(), + db: 0, + }); + + let table_name_reg = program.emit_string8_new_reg(ctx.table.name.clone()); + let r_seq = program.alloc_register(); + let r_seq_rowid = program.alloc_register(); + + ctx.autoincrement_meta = Some(AutoincMeta { + seq_cursor_id, + r_seq, + r_seq_rowid, + table_name_reg, + }); + + program.emit_insn(Insn::Integer { + dest: r_seq, + value: 0, + }); + program.emit_insn(Insn::Null { + dest: r_seq_rowid, + dest_end: None, + }); + + let loop_start_label = program.allocate_label(); + let loop_end_label = program.allocate_label(); + let found_label = program.allocate_label(); + + program.emit_insn(Insn::Rewind { + cursor_id: seq_cursor_id, + pc_if_empty: loop_end_label, + }); + program.preassign_label_to_next_insn(loop_start_label); + + let name_col_reg = program.alloc_register(); + program.emit_column_or_rowid(seq_cursor_id, 0, name_col_reg); + program.emit_insn(Insn::Ne { + lhs: table_name_reg, + rhs: name_col_reg, + target_pc: found_label, + flags: Default::default(), + collation: None, + }); + + program.emit_column_or_rowid(seq_cursor_id, 1, r_seq); + program.emit_insn(Insn::RowId { + cursor_id: seq_cursor_id, + dest: r_seq_rowid, + }); + program.emit_insn(Insn::Goto { + target_pc: loop_end_label, + }); + + program.preassign_label_to_next_insn(found_label); + program.emit_insn(Insn::Next { + cursor_id: seq_cursor_id, + pc_if_next: loop_start_label, + }); + program.preassign_label_to_next_insn(loop_end_label); + } + Ok(()) +} + fn emit_notnulls(program: &mut ProgramBuilder, ctx: &InsertEmitCtx, insertion: &Insertion) { for column_mapping in insertion .col_mappings @@ -850,8 +908,10 @@ fn bind_insert( table: &Table, body: &mut InsertBody, connection: &Arc, + on_conflict: ResolveType, ) -> Result { let mut values: Vec> = vec![]; + let mut upsert: Option> = None; let mut upsert_actions: Vec<(ResolvedUpsertTarget, BranchOffset, Box)> = Vec::new(); let mut inserting_multiple_rows = false; match body { @@ -907,43 +967,60 @@ fn bind_insert( } _ => inserting_multiple_rows = true, } - while let Some(mut upsert) = upsert_opt.take() { - if let UpsertDo::Set { - ref mut sets, - ref mut where_clause, - } = &mut upsert.do_clause - { - for set in sets.iter_mut() { - bind_and_rewrite_expr( - &mut set.expr, - None, - None, - connection, - &mut program.param_ctx, - BindingBehavior::AllowUnboundIdentifiers, - )?; - } - if let Some(ref mut where_expr) = where_clause { - bind_and_rewrite_expr( - where_expr, - None, - None, - connection, - &mut program.param_ctx, - BindingBehavior::AllowUnboundIdentifiers, - )?; - } - } - let next = upsert.next.take(); - upsert_actions.push(( - // resolve the constrained target for UPSERT in the chain - resolve_upsert_target(resolver.schema, table, &upsert)?, - program.allocate_label(), - upsert, - )); - *upsert_opt = next; + upsert = upsert_opt.take(); + } + } + match on_conflict { + ResolveType::Ignore => { + upsert.replace(Box::new(ast::Upsert { + do_clause: UpsertDo::Nothing, + index: None, + next: None, + })); + } + ResolveType::Abort => {} + _ => { + crate::bail_parse_error!( + "INSERT OR {} is only supported with UPSERT", + on_conflict.to_string() + ); + } + } + while let Some(mut upsert_opt) = upsert.take() { + if let UpsertDo::Set { + ref mut sets, + ref mut where_clause, + } = &mut upsert_opt.do_clause + { + for set in sets.iter_mut() { + bind_and_rewrite_expr( + &mut set.expr, + None, + None, + connection, + &mut program.param_ctx, + BindingBehavior::AllowUnboundIdentifiers, + )?; + } + if let Some(ref mut where_expr) = where_clause { + bind_and_rewrite_expr( + where_expr, + None, + None, + connection, + &mut program.param_ctx, + BindingBehavior::AllowUnboundIdentifiers, + )?; } } + let next = upsert_opt.next.take(); + upsert_actions.push(( + // resolve the constrained target for UPSERT in the chain + resolve_upsert_target(resolver.schema, table, &upsert_opt)?, + program.allocate_label(), + upsert_opt, + )); + upsert = next; } Ok(BoundInsertResult { values, @@ -1552,10 +1629,9 @@ fn emit_preflight_constraint_checks( resolver: &Resolver, insertion: &Insertion, upsert_actions: &[(ResolvedUpsertTarget, BranchOffset, Box)], - constraints_to_check: &[(ResolvedUpsertTarget, Option)], - upsert_catch_all_position: Option, + constraints: &CheckConstraints, ) -> Result<()> { - for (constraint, position) in constraints_to_check { + for (constraint, position) in &constraints.constraints_to_check { match constraint { ResolvedUpsertTarget::PrimaryKey => { let make_record_label = program.allocate_label(); @@ -1569,7 +1645,7 @@ fn emit_preflight_constraint_checks( // Conflict on rowid: attempt to route through UPSERT if it targets the PK, otherwise raise constraint. // emit Halt for every case *except* when upsert handles the conflict 'emit_halt: { - if let Some(position) = position.or(upsert_catch_all_position) { + if let Some(position) = position.or(constraints.upsert_catch_all_position) { // PK conflict: the conflicting rowid is exactly the attempted key program.emit_insn(Insn::Copy { src_reg: insertion.key_register(), @@ -1677,7 +1753,7 @@ fn emit_preflight_constraint_checks( }); // Conflict detected, figure out if this UPSERT handles the conflict - if let Some(position) = position.or(upsert_catch_all_position) { + if let Some(position) = position.or(constraints.upsert_catch_all_position) { match &upsert_actions[position].2.do_clause { UpsertDo::Nothing => { // Bail out without writing anything @@ -2008,12 +2084,17 @@ pub fn rewrite_partial_index_where( ) } +struct CheckConstraints { + constraints_to_check: Vec<(ResolvedUpsertTarget, Option)>, + upsert_catch_all_position: Option, +} + fn build_constraints_to_check( resolver: &Resolver, table_name: &str, upsert_actions: &[(ResolvedUpsertTarget, BranchOffset, Box)], has_user_provided_rowid: bool, -) -> (Vec<(ResolvedUpsertTarget, Option)>, Option) { +) -> CheckConstraints { let mut constraints_to_check = Vec::new(); if has_user_provided_rowid { // Check uniqueness constraint for rowid if it was provided by user. @@ -2043,7 +2124,10 @@ fn build_constraints_to_check( } else { None }; - (constraints_to_check, upsert_catch_all_position) + CheckConstraints { + constraints_to_check, + upsert_catch_all_position, + } } fn emit_update_sqlite_sequence( From 3112f55e05a0465cef42cb8296028e160c71f9db Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 15 Oct 2025 22:46:55 -0400 Subject: [PATCH 275/428] Add TCL tests for INSERT OR IGNORE handling --- testing/insert.test | 75 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/testing/insert.test b/testing/insert.test index 26e5974bf..75b8630b3 100755 --- a/testing/insert.test +++ b/testing/insert.test @@ -701,4 +701,77 @@ do_execsql_test_on_specific_db {:memory:} insert-rowid-backwards-compability-2 { CREATE TABLE t(a INTEGER, PRIMARY KEY (a DESC)); INSERT INTO t(a) VALUES (123); SELECT rowid, * FROM t; -} {123|123} \ No newline at end of file +} {123|123} + + +do_execsql_test_on_specific_db {:memory:} ignore-pk-conflict { + CREATE TABLE t(a INTEGER PRIMARY KEY); + INSERT INTO t VALUES (1),(2),(3); + INSERT OR IGNORE INTO t VALUES (2); + SELECT a FROM t ORDER BY a; +} {1 +2 +3} + +do_execsql_test_on_specific_db {:memory:} ignore-unique-conflict { + CREATE TABLE t(a INTEGER, b TEXT UNIQUE); + INSERT INTO t VALUES (1,'x'),(2,'y'); + INSERT OR IGNORE INTO t VALUES (3,'y'); + SELECT a,b FROM t ORDER BY a; +} {1|x +2|y} + +do_execsql_test_on_specific_db {:memory:} ignore-multi-unique-conflict { + CREATE TABLE t(a UNIQUE, b UNIQUE, c); + INSERT INTO t VALUES (1,10,100),(2,20,200); + INSERT OR IGNORE INTO t VALUES (1,30,300); -- conflicts on a + INSERT OR IGNORE INTO t VALUES (3,20,300); -- conflicts on b + INSERT OR IGNORE INTO t VALUES (1,20,300); -- conflicts on both + SELECT a,b,c FROM t ORDER BY a; +} {1|10|100 +2|20|200} + +do_execsql_test_on_specific_db {:memory:} ignore-some-conflicts-multirow { + CREATE TABLE t(a INTEGER UNIQUE); + INSERT INTO t VALUES (2),(4); + INSERT OR IGNORE INTO t VALUES (1),(2),(3),(4),(5); + SELECT a FROM t ORDER BY a; +} {1 +2 +3 +4 +5} + +do_execsql_test_on_specific_db {:memory:} ignore-from-select { + CREATE TABLE src(x); + INSERT INTO src VALUES (1),(2),(2),(3); + CREATE TABLE dst(a INTEGER UNIQUE); + INSERT INTO dst VALUES (2); + INSERT OR IGNORE INTO dst SELECT x FROM src; + SELECT a FROM dst ORDER BY a; +} {1 +2 +3} + +do_execsql_test_on_specific_db {:memory:} ignore-null-in-unique { + CREATE TABLE t(a INTEGER UNIQUE); + INSERT INTO t VALUES (1),(NULL),(NULL); + INSERT OR IGNORE INTO t VALUES (1),(NULL); + SELECT COUNT(*) FROM t WHERE a IS NULL; +} {3} + +do_execsql_test_on_specific_db {:memory:} ignore-preserves-rowid { + CREATE TABLE t(data TEXT UNIQUE); + INSERT INTO t VALUES ('x'),('y'),('z'); + SELECT rowid, data FROM t WHERE data='y'; + INSERT OR IGNORE INTO t VALUES ('y'); + SELECT rowid, data FROM t WHERE data='y'; +} {2|y +2|y} + +do_execsql_test_on_specific_db {:memory:} ignore-intra-statement-dups { + CREATE TABLE t(a INTEGER PRIMARY KEY, b TEXT); + INSERT OR IGNORE INTO t VALUES (5,'first'),(6,'x'),(5,'second'),(5,'third'); + SELECT a,b FROM t ORDER BY a; +} {5|first +6|x} From 2a1be48f3a19c9a1478574f24150c676379eeab8 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Thu, 16 Oct 2025 01:10:33 -0300 Subject: [PATCH 276/428] do not run build.rs on debug mode --- core/build.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/core/build.rs b/core/build.rs index 50afee6bf..270fae925 100644 --- a/core/build.rs +++ b/core/build.rs @@ -1,7 +1,13 @@ -use std::fs; use std::path::PathBuf; +use std::{env, fs}; fn main() { + let profile = env::var("PROFILE").unwrap_or_else(|_| "debug".to_string()); + + if profile == "debug" { + println!("cargo::rerun-if-changed=build.rs"); + } + let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap()); let built_file = out_dir.join("built.rs"); From 25339a52006c685ded92b786185e3ccc631dff70 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 09:30:41 +0300 Subject: [PATCH 277/428] rename: CheckConstraints -> ConstraintsToCheck CHECK constraints is a separate SQL concept, so let's remove some potential confusion from the naming. --- core/translate/insert.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 102d7541d..74f3bc321 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -1629,7 +1629,7 @@ fn emit_preflight_constraint_checks( resolver: &Resolver, insertion: &Insertion, upsert_actions: &[(ResolvedUpsertTarget, BranchOffset, Box)], - constraints: &CheckConstraints, + constraints: &ConstraintsToCheck, ) -> Result<()> { for (constraint, position) in &constraints.constraints_to_check { match constraint { @@ -2084,7 +2084,7 @@ pub fn rewrite_partial_index_where( ) } -struct CheckConstraints { +struct ConstraintsToCheck { constraints_to_check: Vec<(ResolvedUpsertTarget, Option)>, upsert_catch_all_position: Option, } @@ -2094,7 +2094,7 @@ fn build_constraints_to_check( table_name: &str, upsert_actions: &[(ResolvedUpsertTarget, BranchOffset, Box)], has_user_provided_rowid: bool, -) -> CheckConstraints { +) -> ConstraintsToCheck { let mut constraints_to_check = Vec::new(); if has_user_provided_rowid { // Check uniqueness constraint for rowid if it was provided by user. @@ -2124,7 +2124,7 @@ fn build_constraints_to_check( } else { None }; - CheckConstraints { + ConstraintsToCheck { constraints_to_check, upsert_catch_all_position, } From 95f375791b282a84194dbb05ebebe7ea24e2d5ed Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 09:34:13 +0300 Subject: [PATCH 278/428] refactor: move condition outside init_autoincrement --- core/translate/insert.rs | 128 +++++++++++++++++++-------------------- 1 file changed, 63 insertions(+), 65 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 74f3bc321..f989953f6 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -294,7 +294,9 @@ pub fn translate_insert( let has_user_provided_rowid = insertion.key.is_provided_by_user(); - init_autoincrement(&mut program, &mut ctx, resolver)?; + if ctx.table.has_autoincrement { + init_autoincrement(&mut program, &mut ctx, resolver)?; + } if has_user_provided_rowid { let must_be_int_label = program.allocate_label(); @@ -781,78 +783,74 @@ fn init_autoincrement( ctx: &mut InsertEmitCtx, resolver: &Resolver, ) -> Result<()> { - if ctx.table.has_autoincrement { - let seq_table = resolver - .schema - .get_btree_table("sqlite_sequence") - .ok_or_else(|| { - crate::error::LimboError::InternalError( - "sqlite_sequence table not found".to_string(), - ) - })?; - let seq_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(seq_table.clone())); - program.emit_insn(Insn::OpenWrite { - cursor_id: seq_cursor_id, - root_page: seq_table.root_page.into(), - db: 0, - }); + let seq_table = resolver + .schema + .get_btree_table("sqlite_sequence") + .ok_or_else(|| { + crate::error::LimboError::InternalError("sqlite_sequence table not found".to_string()) + })?; + let seq_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(seq_table.clone())); + program.emit_insn(Insn::OpenWrite { + cursor_id: seq_cursor_id, + root_page: seq_table.root_page.into(), + db: 0, + }); - let table_name_reg = program.emit_string8_new_reg(ctx.table.name.clone()); - let r_seq = program.alloc_register(); - let r_seq_rowid = program.alloc_register(); + let table_name_reg = program.emit_string8_new_reg(ctx.table.name.clone()); + let r_seq = program.alloc_register(); + let r_seq_rowid = program.alloc_register(); - ctx.autoincrement_meta = Some(AutoincMeta { - seq_cursor_id, - r_seq, - r_seq_rowid, - table_name_reg, - }); + ctx.autoincrement_meta = Some(AutoincMeta { + seq_cursor_id, + r_seq, + r_seq_rowid, + table_name_reg, + }); - program.emit_insn(Insn::Integer { - dest: r_seq, - value: 0, - }); - program.emit_insn(Insn::Null { - dest: r_seq_rowid, - dest_end: None, - }); + program.emit_insn(Insn::Integer { + dest: r_seq, + value: 0, + }); + program.emit_insn(Insn::Null { + dest: r_seq_rowid, + dest_end: None, + }); - let loop_start_label = program.allocate_label(); - let loop_end_label = program.allocate_label(); - let found_label = program.allocate_label(); + let loop_start_label = program.allocate_label(); + let loop_end_label = program.allocate_label(); + let found_label = program.allocate_label(); - program.emit_insn(Insn::Rewind { - cursor_id: seq_cursor_id, - pc_if_empty: loop_end_label, - }); - program.preassign_label_to_next_insn(loop_start_label); + program.emit_insn(Insn::Rewind { + cursor_id: seq_cursor_id, + pc_if_empty: loop_end_label, + }); + program.preassign_label_to_next_insn(loop_start_label); - let name_col_reg = program.alloc_register(); - program.emit_column_or_rowid(seq_cursor_id, 0, name_col_reg); - program.emit_insn(Insn::Ne { - lhs: table_name_reg, - rhs: name_col_reg, - target_pc: found_label, - flags: Default::default(), - collation: None, - }); + let name_col_reg = program.alloc_register(); + program.emit_column_or_rowid(seq_cursor_id, 0, name_col_reg); + program.emit_insn(Insn::Ne { + lhs: table_name_reg, + rhs: name_col_reg, + target_pc: found_label, + flags: Default::default(), + collation: None, + }); - program.emit_column_or_rowid(seq_cursor_id, 1, r_seq); - program.emit_insn(Insn::RowId { - cursor_id: seq_cursor_id, - dest: r_seq_rowid, - }); - program.emit_insn(Insn::Goto { - target_pc: loop_end_label, - }); + program.emit_column_or_rowid(seq_cursor_id, 1, r_seq); + program.emit_insn(Insn::RowId { + cursor_id: seq_cursor_id, + dest: r_seq_rowid, + }); + program.emit_insn(Insn::Goto { + target_pc: loop_end_label, + }); - program.preassign_label_to_next_insn(found_label); - program.emit_insn(Insn::Next { - cursor_id: seq_cursor_id, - pc_if_next: loop_start_label, - }); - program.preassign_label_to_next_insn(loop_end_label); - } + program.preassign_label_to_next_insn(found_label); + program.emit_insn(Insn::Next { + cursor_id: seq_cursor_id, + pc_if_next: loop_start_label, + }); + program.preassign_label_to_next_insn(loop_end_label); Ok(()) } From bf5de920f2567c2cb10993ebd97dae189380762d Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 16 Oct 2025 11:10:43 +0300 Subject: [PATCH 279/428] core: Unsafe Send and Sync pushdown This patch pushes unsafe Send and Sync to individual components instead of doing it at Database level. This makes it easier for us to incrementally fix thread-safety, but avoid developers adding more thread unsafe code. --- core/incremental/compiler.rs | 10 ++++++++++ core/incremental/operator.rs | 4 +++- core/incremental/view.rs | 15 +++++++++++++++ core/io/mod.rs | 5 +++++ core/lib.rs | 7 +++++++ core/storage/pager.rs | 5 +++++ core/vdbe/mod.rs | 10 ++++++++++ 7 files changed, 55 insertions(+), 1 deletion(-) diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index f067515cc..b62795fab 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -329,6 +329,11 @@ pub struct DbspNode { pub executable: Box, } +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +unsafe impl Send for DbspNode {} +unsafe impl Sync for DbspNode {} + impl std::fmt::Debug for DbspNode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DbspNode") @@ -395,6 +400,11 @@ pub struct DbspCircuit { pub(super) internal_state_index_root: i64, } +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +unsafe impl Send for DbspCircuit {} +unsafe impl Sync for DbspCircuit {} + impl DbspCircuit { /// Create a new empty circuit with initial empty schema /// The actual output schema will be set when the root node is established diff --git a/core/incremental/operator.rs b/core/incremental/operator.rs index 276249fb3..70ab72d74 100644 --- a/core/incremental/operator.rs +++ b/core/incremental/operator.rs @@ -218,7 +218,9 @@ pub enum QueryOperator { /// Operator DAG (Directed Acyclic Graph) /// Base trait for incremental operators -pub trait IncrementalOperator: Debug { +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +pub trait IncrementalOperator: Debug + Send { /// Evaluate the operator with a state, without modifying internal state /// This is used during query execution to compute results /// May need to read from storage to get current state (e.g., for aggregates) diff --git a/core/incremental/view.rs b/core/incremental/view.rs index 957605d17..a82a1188b 100644 --- a/core/incremental/view.rs +++ b/core/incremental/view.rs @@ -40,6 +40,11 @@ pub enum PopulateState { Done, } +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +unsafe impl Send for PopulateState {} +unsafe impl Sync for PopulateState {} + /// State machine for merge_delta to handle I/O operations impl fmt::Debug for PopulateState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -130,6 +135,11 @@ pub struct AllViewsTxState { states: Rc>>>, } +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +unsafe impl Send for AllViewsTxState {} +unsafe impl Sync for AllViewsTxState {} + impl AllViewsTxState { /// Create a new container for view transaction states pub fn new() -> Self { @@ -210,6 +220,11 @@ pub struct IncrementalView { root_page: i64, } +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +unsafe impl Send for IncrementalView {} +unsafe impl Sync for IncrementalView {} + impl IncrementalView { /// Try to compile the SELECT statement into a DBSP circuit fn try_compile_circuit( diff --git a/core/io/mod.rs b/core/io/mod.rs index 35f7d4786..77ff1b807 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -67,6 +67,11 @@ pub trait File: Send + Sync { #[derive(Debug, Copy, Clone, PartialEq)] pub struct OpenFlags(i32); +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +unsafe impl Send for OpenFlags {} +unsafe impl Sync for OpenFlags {} + bitflags! { impl OpenFlags: i32 { const None = 0b00000000; diff --git a/core/lib.rs b/core/lib.rs index 25ed3114c..637e91c92 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -225,6 +225,8 @@ pub struct Database { n_connections: AtomicUsize, } +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 unsafe impl Send for Database {} unsafe impl Sync for Database {} @@ -1107,6 +1109,11 @@ pub struct Connection { fk_deferred_violations: AtomicIsize, } +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +unsafe impl Send for Connection {} +unsafe impl Sync for Connection {} + impl Drop for Connection { fn drop(&mut self) { if !self.is_closed() { diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 021071134..2c8490a1d 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -548,6 +548,11 @@ pub struct Pager { enable_encryption: AtomicBool, } +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +unsafe impl Send for Pager {} +unsafe impl Sync for Pager {} + #[cfg(not(feature = "omit_autovacuum"))] pub struct VacuumState { /// State machine for [Pager::ptrmap_get] diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 584b62da4..ed8190092 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -266,6 +266,11 @@ pub struct Row { count: usize, } +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +unsafe impl Send for Row {} +unsafe impl Sync for Row {} + #[derive(Debug, Clone, Copy, PartialEq)] pub enum TxnCleanup { None, @@ -317,6 +322,11 @@ pub struct ProgramState { fk_scope_counter: isize, } +// SAFETY: This needs to be audited for thread safety. +// See: https://github.com/tursodatabase/turso/issues/1552 +unsafe impl Send for ProgramState {} +unsafe impl Sync for ProgramState {} + impl ProgramState { pub fn new(max_registers: usize, max_cursors: usize) -> Self { let cursors: Vec> = (0..max_cursors).map(|_| None).collect(); From a450a43d6d89033a23dbf5f86df45c4a432bba1d Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 16 Oct 2025 11:30:30 +0300 Subject: [PATCH 280/428] dist: Add Linux/arm64 target for install package --- dist-workspace.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dist-workspace.toml b/dist-workspace.toml index 71bd31d3f..daa295ace 100644 --- a/dist-workspace.toml +++ b/dist-workspace.toml @@ -10,7 +10,7 @@ ci = "github" # The installers to generate for each app installers = ["shell", "powershell"] # Target platforms to build apps for (Rust target-triple syntax) -targets = ["aarch64-apple-darwin", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu", "x86_64-pc-windows-msvc"] +targets = ["aarch64-apple-darwin", "aarch64-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu", "x86_64-pc-windows-msvc"] # Which actions to run on pull requests pr-run-mode = "plan" # Path that installers should place binaries in From e9c0fdcb4b4f3b50c89b149bb5b98118a43f20a2 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 16 Oct 2025 11:31:30 +0300 Subject: [PATCH 281/428] Turso 0.3.0-pre.2 --- Cargo.lock | 54 +++++++++---------- Cargo.toml | 34 ++++++------ bindings/javascript/package-lock.json | 36 ++++++------- bindings/javascript/package.json | 2 +- .../javascript/packages/common/package.json | 2 +- .../javascript/packages/native/package.json | 4 +- .../packages/wasm-common/package.json | 2 +- .../javascript/packages/wasm/package.json | 6 +-- .../sync/packages/common/package.json | 4 +- .../sync/packages/native/package.json | 6 +-- .../sync/packages/wasm/package.json | 8 +-- bindings/javascript/yarn.lock | 36 ++++++------- 12 files changed, 97 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 664c2d283..ace2430ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -821,7 +821,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_tester" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "anyhow", "assert_cmd", @@ -2539,7 +2539,7 @@ dependencies = [ [[package]] name = "limbo_completion" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "mimalloc", "turso_ext", @@ -2547,7 +2547,7 @@ dependencies = [ [[package]] name = "limbo_crypto" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "blake3", "data-encoding", @@ -2560,7 +2560,7 @@ dependencies = [ [[package]] name = "limbo_csv" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "csv", "mimalloc", @@ -2570,7 +2570,7 @@ dependencies = [ [[package]] name = "limbo_fuzzy" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "mimalloc", "turso_ext", @@ -2578,7 +2578,7 @@ dependencies = [ [[package]] name = "limbo_ipaddr" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "ipnetwork", "mimalloc", @@ -2587,7 +2587,7 @@ dependencies = [ [[package]] name = "limbo_percentile" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "mimalloc", "turso_ext", @@ -2595,7 +2595,7 @@ dependencies = [ [[package]] name = "limbo_regexp" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "mimalloc", "regex", @@ -2604,7 +2604,7 @@ dependencies = [ [[package]] name = "limbo_sim" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "anyhow", "bitflags 2.9.4", @@ -2640,7 +2640,7 @@ dependencies = [ [[package]] name = "limbo_sqlite_test_ext" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "cc", ] @@ -3456,7 +3456,7 @@ dependencies = [ [[package]] name = "py-turso" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "anyhow", "pyo3", @@ -4193,7 +4193,7 @@ checksum = "d372029cb5195f9ab4e4b9aef550787dce78b124fcaee8d82519925defcd6f0d" [[package]] name = "sql_generation" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "anarchist-readable-name-generator-lib 0.2.0", "anyhow", @@ -4821,7 +4821,7 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "turso" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "rand 0.9.2", "rand_chacha 0.9.0", @@ -4833,7 +4833,7 @@ dependencies = [ [[package]] name = "turso-java" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "jni", "thiserror 2.0.16", @@ -4842,7 +4842,7 @@ dependencies = [ [[package]] name = "turso_cli" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "anyhow", "cfg-if", @@ -4878,7 +4878,7 @@ dependencies = [ [[package]] name = "turso_core" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "aegis", "aes", @@ -4938,7 +4938,7 @@ dependencies = [ [[package]] name = "turso_dart" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "flutter_rust_bridge", "turso_core", @@ -4946,7 +4946,7 @@ dependencies = [ [[package]] name = "turso_ext" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "chrono", "getrandom 0.3.2", @@ -4955,7 +4955,7 @@ dependencies = [ [[package]] name = "turso_ext_tests" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "env_logger 0.11.7", "lazy_static", @@ -4966,7 +4966,7 @@ dependencies = [ [[package]] name = "turso_macros" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "proc-macro2", "quote", @@ -4975,7 +4975,7 @@ dependencies = [ [[package]] name = "turso_node" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "chrono", "napi", @@ -4988,7 +4988,7 @@ dependencies = [ [[package]] name = "turso_parser" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "bitflags 2.9.4", "criterion", @@ -5004,7 +5004,7 @@ dependencies = [ [[package]] name = "turso_sqlite3" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "env_logger 0.11.7", "libc", @@ -5017,7 +5017,7 @@ dependencies = [ [[package]] name = "turso_stress" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "anarchist-readable-name-generator-lib 0.1.2", "antithesis_sdk", @@ -5034,7 +5034,7 @@ dependencies = [ [[package]] name = "turso_sync_engine" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "base64 0.22.1", "bytes", @@ -5061,7 +5061,7 @@ dependencies = [ [[package]] name = "turso_sync_js" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "genawaiter", "napi", @@ -5076,7 +5076,7 @@ dependencies = [ [[package]] name = "turso_whopper" -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" dependencies = [ "anyhow", "clap", diff --git a/Cargo.toml b/Cargo.toml index b46ff6619..6c82c91d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,29 +39,29 @@ exclude = [ ] [workspace.package] -version = "0.3.0-pre.1" +version = "0.3.0-pre.2" authors = ["the Limbo authors"] edition = "2021" license = "MIT" repository = "https://github.com/tursodatabase/turso" [workspace.dependencies] -turso = { path = "bindings/rust", version = "0.3.0-pre.1" } -turso_node = { path = "bindings/javascript", version = "0.3.0-pre.1" } -limbo_completion = { path = "extensions/completion", version = "0.3.0-pre.1" } -turso_core = { path = "core", version = "0.3.0-pre.1" } -turso_sync_engine = { path = "sync/engine", version = "0.3.0-pre.1" } -limbo_crypto = { path = "extensions/crypto", version = "0.3.0-pre.1" } -limbo_csv = { path = "extensions/csv", version = "0.3.0-pre.1" } -turso_ext = { path = "extensions/core", version = "0.3.0-pre.1" } -turso_ext_tests = { path = "extensions/tests", version = "0.3.0-pre.1" } -limbo_ipaddr = { path = "extensions/ipaddr", version = "0.3.0-pre.1" } -turso_macros = { path = "macros", version = "0.3.0-pre.1" } -limbo_percentile = { path = "extensions/percentile", version = "0.3.0-pre.1" } -limbo_regexp = { path = "extensions/regexp", version = "0.3.0-pre.1" } -limbo_uuid = { path = "extensions/uuid", version = "0.3.0-pre.1" } -turso_parser = { path = "parser", version = "0.3.0-pre.1" } -limbo_fuzzy = { path = "extensions/fuzzy", version = "0.3.0-pre.1" } +turso = { path = "bindings/rust", version = "0.3.0-pre.2" } +turso_node = { path = "bindings/javascript", version = "0.3.0-pre.2" } +limbo_completion = { path = "extensions/completion", version = "0.3.0-pre.2" } +turso_core = { path = "core", version = "0.3.0-pre.2" } +turso_sync_engine = { path = "sync/engine", version = "0.3.0-pre.2" } +limbo_crypto = { path = "extensions/crypto", version = "0.3.0-pre.2" } +limbo_csv = { path = "extensions/csv", version = "0.3.0-pre.2" } +turso_ext = { path = "extensions/core", version = "0.3.0-pre.2" } +turso_ext_tests = { path = "extensions/tests", version = "0.3.0-pre.2" } +limbo_ipaddr = { path = "extensions/ipaddr", version = "0.3.0-pre.2" } +turso_macros = { path = "macros", version = "0.3.0-pre.2" } +limbo_percentile = { path = "extensions/percentile", version = "0.3.0-pre.2" } +limbo_regexp = { path = "extensions/regexp", version = "0.3.0-pre.2" } +limbo_uuid = { path = "extensions/uuid", version = "0.3.0-pre.2" } +turso_parser = { path = "parser", version = "0.3.0-pre.2" } +limbo_fuzzy = { path = "extensions/fuzzy", version = "0.3.0-pre.2" } sql_generation = { path = "sql_generation" } strum = { version = "0.26", features = ["derive"] } strum_macros = "0.26" diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json index 5a1659760..a07550035 100644 --- a/bindings/javascript/package-lock.json +++ b/bindings/javascript/package-lock.json @@ -1,11 +1,11 @@ { "name": "javascript", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "lockfileVersion": 3, "requires": true, "packages": { "": { - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "workspaces": [ "packages/common", "packages/wasm-common", @@ -3542,7 +3542,7 @@ }, "packages/common": { "name": "@tursodatabase/database-common", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "license": "MIT", "devDependencies": { "typescript": "^5.9.2", @@ -3551,10 +3551,10 @@ }, "packages/native": { "name": "@tursodatabase/database", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.1" + "@tursodatabase/database-common": "^0.3.0-pre.2" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3568,11 +3568,11 @@ }, "packages/wasm": { "name": "@tursodatabase/database-wasm", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.1", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.1" + "@tursodatabase/database-common": "^0.3.0-pre.2", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.2" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3585,7 +3585,7 @@ }, "packages/wasm-common": { "name": "@tursodatabase/database-wasm-common", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "license": "MIT", "dependencies": { "@napi-rs/wasm-runtime": "^1.0.5" @@ -3596,10 +3596,10 @@ }, "sync/packages/common": { "name": "@tursodatabase/sync-common", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.1" + "@tursodatabase/database-common": "^0.3.0-pre.2" }, "devDependencies": { "typescript": "^5.9.2" @@ -3607,11 +3607,11 @@ }, "sync/packages/native": { "name": "@tursodatabase/sync", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.1", - "@tursodatabase/sync-common": "^0.3.0-pre.1" + "@tursodatabase/database-common": "^0.3.0-pre.2", + "@tursodatabase/sync-common": "^0.3.0-pre.2" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3622,12 +3622,12 @@ }, "sync/packages/wasm": { "name": "@tursodatabase/sync-wasm", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.1", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.1", - "@tursodatabase/sync-common": "^0.3.0-pre.1" + "@tursodatabase/database-common": "^0.3.0-pre.2", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.2", + "@tursodatabase/sync-common": "^0.3.0-pre.2" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json index 0722f2c73..be0db69b9 100644 --- a/bindings/javascript/package.json +++ b/bindings/javascript/package.json @@ -14,5 +14,5 @@ "sync/packages/native", "sync/packages/wasm" ], - "version": "0.3.0-pre.1" + "version": "0.3.0-pre.2" } diff --git a/bindings/javascript/packages/common/package.json b/bindings/javascript/packages/common/package.json index 3f54dcf85..10b10fff2 100644 --- a/bindings/javascript/packages/common/package.json +++ b/bindings/javascript/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-common", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/native/package.json b/bindings/javascript/packages/native/package.json index ada031000..4a90b7fbd 100644 --- a/bindings/javascript/packages/native/package.json +++ b/bindings/javascript/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -47,7 +47,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.1" + "@tursodatabase/database-common": "^0.3.0-pre.2" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/packages/wasm-common/package.json b/bindings/javascript/packages/wasm-common/package.json index 9ff7af8d9..9761b71bd 100644 --- a/bindings/javascript/packages/wasm-common/package.json +++ b/bindings/javascript/packages/wasm-common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-wasm-common", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/wasm/package.json b/bindings/javascript/packages/wasm/package.json index 638366f51..3382d804b 100644 --- a/bindings/javascript/packages/wasm/package.json +++ b/bindings/javascript/packages/wasm/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-wasm", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -51,7 +51,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.1", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.1" + "@tursodatabase/database-common": "^0.3.0-pre.2", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.2" } } diff --git a/bindings/javascript/sync/packages/common/package.json b/bindings/javascript/sync/packages/common/package.json index bf2b47219..deee9f3fc 100644 --- a/bindings/javascript/sync/packages/common/package.json +++ b/bindings/javascript/sync/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-common", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -23,6 +23,6 @@ "test": "echo 'no tests'" }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.1" + "@tursodatabase/database-common": "^0.3.0-pre.2" } } diff --git a/bindings/javascript/sync/packages/native/package.json b/bindings/javascript/sync/packages/native/package.json index ac732db7d..f64edbefc 100644 --- a/bindings/javascript/sync/packages/native/package.json +++ b/bindings/javascript/sync/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -44,8 +44,8 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.1", - "@tursodatabase/sync-common": "^0.3.0-pre.1" + "@tursodatabase/database-common": "^0.3.0-pre.2", + "@tursodatabase/sync-common": "^0.3.0-pre.2" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/sync/packages/wasm/package.json b/bindings/javascript/sync/packages/wasm/package.json index 0f90be806..51e9b2d48 100644 --- a/bindings/javascript/sync/packages/wasm/package.json +++ b/bindings/javascript/sync/packages/wasm/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-wasm", - "version": "0.3.0-pre.1", + "version": "0.3.0-pre.2", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -54,8 +54,8 @@ "#index": "./index.js" }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.1", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.1", - "@tursodatabase/sync-common": "^0.3.0-pre.1" + "@tursodatabase/database-common": "^0.3.0-pre.2", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.2", + "@tursodatabase/sync-common": "^0.3.0-pre.2" } } diff --git a/bindings/javascript/yarn.lock b/bindings/javascript/yarn.lock index dfff14ee3..02323dc19 100644 --- a/bindings/javascript/yarn.lock +++ b/bindings/javascript/yarn.lock @@ -1586,7 +1586,7 @@ __metadata: languageName: node linkType: hard -"@tursodatabase/database-common@npm:^0.3.0-pre.1, @tursodatabase/database-common@workspace:packages/common": +"@tursodatabase/database-common@npm:^0.3.0-pre.2, @tursodatabase/database-common@workspace:packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/database-common@workspace:packages/common" dependencies: @@ -1595,7 +1595,7 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/database-wasm-common@npm:^0.3.0-pre.1, @tursodatabase/database-wasm-common@workspace:packages/wasm-common": +"@tursodatabase/database-wasm-common@npm:^0.3.0-pre.2, @tursodatabase/database-wasm-common@workspace:packages/wasm-common": version: 0.0.0-use.local resolution: "@tursodatabase/database-wasm-common@workspace:packages/wasm-common" dependencies: @@ -1609,8 +1609,8 @@ __metadata: resolution: "@tursodatabase/database-wasm@workspace:packages/wasm" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.1" - "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.1" + "@tursodatabase/database-common": "npm:^0.3.0-pre.2" + "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.2" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1624,7 +1624,7 @@ __metadata: resolution: "@tursodatabase/database@workspace:packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.1" + "@tursodatabase/database-common": "npm:^0.3.0-pre.2" "@types/node": "npm:^24.3.1" better-sqlite3: "npm:^12.2.0" drizzle-kit: "npm:^0.31.4" @@ -1634,11 +1634,11 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/sync-common@npm:^0.3.0-pre.1, @tursodatabase/sync-common@workspace:sync/packages/common": +"@tursodatabase/sync-common@npm:^0.3.0-pre.2, @tursodatabase/sync-common@workspace:sync/packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/sync-common@workspace:sync/packages/common" dependencies: - "@tursodatabase/database-common": "npm:^0.3.0-pre.1" + "@tursodatabase/database-common": "npm:^0.3.0-pre.2" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1648,9 +1648,9 @@ __metadata: resolution: "@tursodatabase/sync-wasm@workspace:sync/packages/wasm" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.1" - "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.1" - "@tursodatabase/sync-common": "npm:^0.3.0-pre.1" + "@tursodatabase/database-common": "npm:^0.3.0-pre.2" + "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.2" + "@tursodatabase/sync-common": "npm:^0.3.0-pre.2" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1664,8 +1664,8 @@ __metadata: resolution: "@tursodatabase/sync@workspace:sync/packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.1" - "@tursodatabase/sync-common": "npm:^0.3.0-pre.1" + "@tursodatabase/database-common": "npm:^0.3.0-pre.2" + "@tursodatabase/sync-common": "npm:^0.3.0-pre.2" "@types/node": "npm:^24.3.1" typescript: "npm:^5.9.2" vitest: "npm:^3.2.4" @@ -2573,9 +2573,9 @@ __metadata: linkType: hard "exponential-backoff@npm:^3.1.1": - version: 3.1.2 - resolution: "exponential-backoff@npm:3.1.2" - checksum: 10c0/d9d3e1eafa21b78464297df91f1776f7fbaa3d5e3f7f0995648ca5b89c069d17055033817348d9f4a43d1c20b0eab84f75af6991751e839df53e4dfd6f22e844 + version: 3.1.3 + resolution: "exponential-backoff@npm:3.1.3" + checksum: 10c0/77e3ae682b7b1f4972f563c6dbcd2b0d54ac679e62d5d32f3e5085feba20483cf28bd505543f520e287a56d4d55a28d7874299941faf637e779a1aa5994d1267 languageName: node linkType: hard @@ -3089,8 +3089,8 @@ __metadata: linkType: hard "node-gyp@npm:latest": - version: 11.4.2 - resolution: "node-gyp@npm:11.4.2" + version: 11.5.0 + resolution: "node-gyp@npm:11.5.0" dependencies: env-paths: "npm:^2.2.0" exponential-backoff: "npm:^3.1.1" @@ -3104,7 +3104,7 @@ __metadata: which: "npm:^5.0.0" bin: node-gyp: bin/node-gyp.js - checksum: 10c0/0bfd3e96770ed70f07798d881dd37b4267708966d868a0e585986baac487d9cf5831285579fd629a83dc4e434f53e6416ce301097f2ee464cb74d377e4d8bdbe + checksum: 10c0/31ff49586991b38287bb15c3d529dd689cfc32f992eed9e6997b9d712d5d21fe818a8b1bbfe3b76a7e33765c20210c5713212f4aa329306a615b87d8a786da3a languageName: node linkType: hard From 56d570217675c1f729fb390b9a36bfc85bcd527e Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 12:05:53 +0300 Subject: [PATCH 282/428] Fix: rolling back tx should set autocommit to true Rolling back a transaction should result in `connection.auto_commit` being set back to true. Added a regression test for this where a UNIQUE constraint violation rolls back the transaction and trying to COMMIT will fail. Currently, our default conflict resolution strategy is ROLLBACK, which ends the transaction. In SQLite, the default is ABORT, which rolls back the current statement but allows the transaction to continue. We should migrate to default ABORT once we support subtransactions. --- core/vdbe/mod.rs | 1 + .../query_processing/test_transactions.rs | 32 +++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 584b62da4..962064fcd 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -992,6 +992,7 @@ impl Program { } } else { pager.rollback_tx(&self.connection); + self.connection.auto_commit.store(true, Ordering::SeqCst); } self.connection.set_tx_state(TransactionState::None); } diff --git a/tests/integration/query_processing/test_transactions.rs b/tests/integration/query_processing/test_transactions.rs index a96235153..36df15f9d 100644 --- a/tests/integration/query_processing/test_transactions.rs +++ b/tests/integration/query_processing/test_transactions.rs @@ -176,6 +176,38 @@ fn test_transaction_visibility() { } } +#[test] +/// Currently, our default conflict resolution strategy is ROLLBACK, which ends the transaction. +/// In SQLite, the default is ABORT, which rolls back the current statement but allows the transaction to continue. +/// We should migrate to default ABORT once we support subtransactions. +fn test_constraint_error_aborts_transaction() { + let tmp_db = TempDatabase::new("test_constraint_error_aborts_transaction.db", true); + let conn = tmp_db.connect_limbo(); + + // Create table succeeds + conn.execute("CREATE TABLE t (a INTEGER PRIMARY KEY)") + .unwrap(); + + // Begin succeeds + conn.execute("BEGIN").unwrap(); + + // First insert succeeds + conn.execute("INSERT INTO t VALUES (1),(2)").unwrap(); + + // Second insert fails due to UNIQUE constraint + let result = conn.execute("INSERT INTO t VALUES (2),(3)"); + assert!(matches!(result, Err(LimboError::Constraint(_)))); + + // Commit fails because the transaction was aborted by the constraint error + let result = conn.execute("COMMIT"); + assert!(matches!(result, Err(LimboError::TxError(_)))); + + // Make sure table is empty + let stmt = conn.query("SELECT COUNT(*) FROM t").unwrap().unwrap(); + let row = helper_read_single_row(stmt); + assert_eq!(row, vec![Value::Integer(0)]); +} + #[test] fn test_mvcc_transactions_autocommit() { let tmp_db = TempDatabase::new_with_opts( From 4de36d28e6b8c1a6d09932818956f1613fde3b49 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 14:00:26 +0300 Subject: [PATCH 283/428] deps: add tracing to rust bindings --- Cargo.lock | 2 ++ bindings/rust/Cargo.toml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 9e0a26c13..c39f5ddaf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4828,6 +4828,8 @@ dependencies = [ "tempfile", "thiserror 2.0.16", "tokio", + "tracing", + "tracing-subscriber", "turso_core", ] diff --git a/bindings/rust/Cargo.toml b/bindings/rust/Cargo.toml index d799b5320..42bce00cd 100644 --- a/bindings/rust/Cargo.toml +++ b/bindings/rust/Cargo.toml @@ -19,6 +19,8 @@ tracing_release = ["turso_core/tracing_release"] [dependencies] turso_core = { workspace = true, features = ["io_uring"] } thiserror = { workspace = true } +tracing-subscriber.workspace = true +tracing.workspace = true [dev-dependencies] tempfile = { workspace = true } From 6f1bda14380816c4e4936def1d4a4cff698809ee Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 14:01:54 +0300 Subject: [PATCH 284/428] Instrument test_drop() with tracing --- bindings/rust/src/transaction.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/bindings/rust/src/transaction.rs b/bindings/rust/src/transaction.rs index b68cc1fab..6da5c133d 100644 --- a/bindings/rust/src/transaction.rs +++ b/bindings/rust/src/transaction.rs @@ -329,6 +329,7 @@ mod test { #[tokio::test] async fn test_drop() -> Result<()> { + let _ = tracing_subscriber::fmt::try_init(); let mut conn = checked_memory_handle().await?; { let tx = conn.transaction().await?; From 213af28cf3c24fb6d69be6b4f99e70facd546518 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 14:02:07 +0300 Subject: [PATCH 285/428] rust bindings: make Statement::query:row() finish execution otherwise the statement will be considered to be in progress, and its Drop implementation will roll back the transaction it is in. --- bindings/rust/src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bindings/rust/src/lib.rs b/bindings/rust/src/lib.rs index 5d87ad5f0..94a6556c9 100644 --- a/bindings/rust/src/lib.rs +++ b/bindings/rust/src/lib.rs @@ -596,7 +596,11 @@ impl Statement { pub async fn query_row(&mut self, params: impl IntoParams) -> Result { let mut rows = self.query(params).await?; - rows.next().await?.ok_or(Error::QueryReturnedNoRows) + let first_row = rows.next().await?.ok_or(Error::QueryReturnedNoRows)?; + // Discard remaining rows so that the statement is executed to completion + // Otherwise Drop of the statement will cause transaction rollback + while rows.next().await?.is_some() {} + Ok(first_row) } } From e8e583ace650b4368e231c70c7f3c9857bfe7446 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 14:28:18 +0300 Subject: [PATCH 286/428] Default ON CONFLICT behavior should be ROLLBACK --- core/translate/insert.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index f989953f6..64bd29ed6 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -232,7 +232,7 @@ pub fn translate_insert( &table, &mut body, connection, - on_conflict.unwrap_or(ResolveType::Abort), + on_conflict.unwrap_or(ResolveType::Rollback), )?; if inserting_multiple_rows && btree_table.has_autoincrement { @@ -976,7 +976,10 @@ fn bind_insert( next: None, })); } - ResolveType::Abort => {} + ResolveType::Rollback => { + // This is the current default behavior for INSERT in tursodb - the transaction will be rolled back if the insert fails. + // In SQLite, the default is ABORT and we should use that one once we support subtransactions. + } _ => { crate::bail_parse_error!( "INSERT OR {} is only supported with UPSERT", From d77dd8400d948e1640e424105af18a334eb4aa04 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 15:42:51 +0300 Subject: [PATCH 287/428] bindings/rust: rollback dangling tx on next access of DB, instead of panicing Closes #3748 Right now if any error happens during an interactive tx that causes the `Transaction` to drop, the program will panic. I don't know how good this solution is, but we can at least prevent a panic by storing whether the connection has a dangling transaction and roll it back automatically the next time the connection tries to do something. --- bindings/rust/src/lib.rs | 27 +++++++++++ bindings/rust/src/transaction.rs | 77 +++++++++++++++++++++++++++----- 2 files changed, 94 insertions(+), 10 deletions(-) diff --git a/bindings/rust/src/lib.rs b/bindings/rust/src/lib.rs index 5d87ad5f0..4f35728dc 100644 --- a/bindings/rust/src/lib.rs +++ b/bindings/rust/src/lib.rs @@ -48,6 +48,8 @@ pub use params::IntoParams; use std::fmt::Debug; use std::future::Future; use std::num::NonZero; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; use std::task::Poll; pub use turso_core::EncryptionOpts; @@ -222,6 +224,11 @@ impl Database { pub struct Connection { inner: Arc>>, transaction_behavior: TransactionBehavior, + /// Whether there is a dangling transaction after it was dropped without being finished. + /// We cannot rollback the transaction on Drop because drop is not async. + /// Instead, we roll back the dangling transaction whenever a new transaction is requested + /// or the connection queries/executes. + dangling_tx: AtomicBool, } impl Clone for Connection { @@ -229,6 +236,7 @@ impl Clone for Connection { Self { inner: Arc::clone(&self.inner), transaction_behavior: self.transaction_behavior, + dangling_tx: AtomicBool::new(self.dangling_tx.load(Ordering::SeqCst)), } } } @@ -242,17 +250,34 @@ impl Connection { let connection = Connection { inner: Arc::new(Mutex::new(conn)), transaction_behavior: TransactionBehavior::Deferred, + dangling_tx: AtomicBool::new(false), }; connection } + + fn has_dangling_tx(&self) -> bool { + self.dangling_tx.load(Ordering::SeqCst) + } + + async fn maybe_rollback_dangling_tx(&self) -> Result<()> { + if self.has_dangling_tx() { + let mut stmt = self.prepare("ROLLBACK").await?; + stmt.execute(()).await?; + self.dangling_tx.store(false, Ordering::SeqCst); + } + Ok(()) + } + /// Query the database with SQL. pub async fn query(&self, sql: &str, params: impl IntoParams) -> Result { + self.maybe_rollback_dangling_tx().await?; let mut stmt = self.prepare(sql).await?; stmt.query(params).await } /// Execute SQL statement on the database. pub async fn execute(&self, sql: &str, params: impl IntoParams) -> Result { + self.maybe_rollback_dangling_tx().await?; let mut stmt = self.prepare(sql).await?; stmt.execute(params).await } @@ -337,6 +362,7 @@ impl Connection { /// Execute a batch of SQL statements on the database. pub async fn execute_batch(&self, sql: &str) -> Result<()> { + self.maybe_rollback_dangling_tx().await?; self.prepare_execute_batch(sql).await?; Ok(()) } @@ -358,6 +384,7 @@ impl Connection { } async fn prepare_execute_batch(&self, sql: impl AsRef) -> Result<()> { + self.maybe_rollback_dangling_tx().await?; let conn = self .inner .lock() diff --git a/bindings/rust/src/transaction.rs b/bindings/rust/src/transaction.rs index b68cc1fab..9780856a5 100644 --- a/bindings/rust/src/transaction.rs +++ b/bindings/rust/src/transaction.rs @@ -1,4 +1,4 @@ -use std::ops::Deref; +use std::{ops::Deref, sync::atomic::Ordering}; use crate::{Connection, Result}; @@ -63,7 +63,7 @@ pub enum DropBehavior { pub struct Transaction<'conn> { conn: &'conn Connection, drop_behavior: DropBehavior, - must_finish: bool, + in_progress: bool, } impl Transaction<'_> { @@ -99,7 +99,7 @@ impl Transaction<'_> { conn.execute(query, ()).await.map(move |_| Transaction { conn, drop_behavior: DropBehavior::Rollback, - must_finish: true, + in_progress: true, }) } @@ -126,7 +126,7 @@ impl Transaction<'_> { #[inline] async fn _commit(&mut self) -> Result<()> { - self.must_finish = false; + self.in_progress = false; self.conn.execute("COMMIT", ()).await?; Ok(()) } @@ -139,7 +139,7 @@ impl Transaction<'_> { #[inline] async fn _rollback(&mut self) -> Result<()> { - self.must_finish = false; + self.in_progress = false; self.conn.execute("ROLLBACK", ()).await?; Ok(()) } @@ -186,8 +186,10 @@ impl Deref for Transaction<'_> { impl Drop for Transaction<'_> { #[inline] fn drop(&mut self) { - if self.must_finish { - panic!("Transaction dropped without finish()") + if self.in_progress { + self.conn.dangling_tx.store(true, Ordering::SeqCst); + } else { + self.conn.dangling_tx.store(false, Ordering::SeqCst); } } } @@ -221,7 +223,8 @@ impl Connection { /// Will return `Err` if the call fails. #[inline] pub async fn transaction(&mut self) -> Result> { - Transaction::new(self, self.transaction_behavior).await + self.transaction_with_behavior(self.transaction_behavior) + .await } /// Begin a new transaction with a specified behavior. @@ -236,6 +239,7 @@ impl Connection { &mut self, behavior: TransactionBehavior, ) -> Result> { + self.maybe_rollback_dangling_tx().await?; Transaction::new(self, behavior).await } @@ -318,13 +322,66 @@ mod test { } #[tokio::test] - #[should_panic(expected = "Transaction dropped without finish()")] - async fn test_drop_panic() { + async fn test_drop_rollback_on_new_transaction() { let mut conn = checked_memory_handle().await.unwrap(); { let tx = conn.transaction().await.unwrap(); tx.execute("INSERT INTO foo VALUES(?)", &[1]).await.unwrap(); + // Drop without finish - should be rolled back when next transaction starts } + + // Start a new transaction - this should rollback the dangling one + let tx = conn.transaction().await.unwrap(); + tx.execute("INSERT INTO foo VALUES(?)", &[2]).await.unwrap(); + let result = tx + .prepare("SELECT SUM(x) FROM foo") + .await + .unwrap() + .query_row(()) + .await + .unwrap(); + + // The insert from the dropped transaction should have been rolled back + assert_eq!(2, result.get::(0).unwrap()); + tx.finish().await.unwrap(); + } + + #[tokio::test] + async fn test_drop_rollback_on_query() { + let mut conn = checked_memory_handle().await.unwrap(); + { + let tx = conn.transaction().await.unwrap(); + tx.execute("INSERT INTO foo VALUES(?)", &[1]).await.unwrap(); + // Drop without finish - should be rolled back when conn.query is called + } + + // Using conn.query should rollback the dangling transaction + let mut rows = conn.query("SELECT count(*) FROM foo", ()).await.unwrap(); + let result = rows.next().await.unwrap().unwrap(); + + // The insert from the dropped transaction should have been rolled back + assert_eq!(0, result.get::(0).unwrap()); + } + + #[tokio::test] + async fn test_drop_rollback_on_execute() { + let mut conn = checked_memory_handle().await.unwrap(); + { + let tx = conn.transaction().await.unwrap(); + tx.execute("INSERT INTO foo VALUES(?)", &[1]).await.unwrap(); + // Drop without finish - should be rolled back when conn.execute is called + } + + // Using conn.execute should rollback the dangling transaction + conn.execute("INSERT INTO foo VALUES(?)", &[2]) + .await + .unwrap(); + + let mut rows = conn.query("SELECT count(*) FROM foo", ()).await.unwrap(); + let result = rows.next().await.unwrap().unwrap(); + + // The insert from the dropped transaction should have been rolled back + assert_eq!(1, result.get::(0).unwrap()); } #[tokio::test] From 57eb63cee06f623585f56c8d494cf91f7d3c2698 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Thu, 16 Oct 2025 14:50:08 +0200 Subject: [PATCH 288/428] core/bree: remove duplicated code in BTreeCursor --- core/incremental/aggregate_operator.rs | 1 + core/incremental/compiler.rs | 2 +- core/incremental/cursor.rs | 2 +- core/incremental/join_operator.rs | 1 + .../mvcc/database/checkpoint_state_machine.rs | 2 +- core/storage/btree.rs | 716 +----------------- 6 files changed, 34 insertions(+), 690 deletions(-) diff --git a/core/incremental/aggregate_operator.rs b/core/incremental/aggregate_operator.rs index e577f05e2..e0ed53d9f 100644 --- a/core/incremental/aggregate_operator.rs +++ b/core/incremental/aggregate_operator.rs @@ -7,6 +7,7 @@ use crate::incremental::operator::{ generate_storage_id, ComputationTracker, DbspStateCursors, EvalState, IncrementalOperator, }; use crate::incremental::persistence::{ReadRecord, WriteRow}; +use crate::storage::btree::CursorTrait; use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult, ValueRef}; use crate::{return_and_restore_if_io, return_if_io, LimboError, Result, Value}; use std::collections::{BTreeMap, HashMap, HashSet}; diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index b62795fab..98cc29896 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -12,7 +12,7 @@ use crate::incremental::operator::{ IncrementalOperator, InputOperator, JoinOperator, JoinType, ProjectOperator, }; use crate::schema::Type; -use crate::storage::btree::{BTreeCursor, BTreeKey}; +use crate::storage::btree::{BTreeCursor, BTreeKey, CursorTrait}; // Note: logical module must be made pub(crate) in translate/mod.rs use crate::translate::logical::{ BinaryOperator, Column, ColumnInfo, JoinType as LogicalJoinType, LogicalExpr, LogicalPlan, diff --git a/core/incremental/cursor.rs b/core/incremental/cursor.rs index 22070f29d..a33734c72 100644 --- a/core/incremental/cursor.rs +++ b/core/incremental/cursor.rs @@ -5,7 +5,7 @@ use crate::{ view::{IncrementalView, ViewTransactionState}, }, return_if_io, - storage::btree::BTreeCursor, + storage::btree::{BTreeCursor, CursorTrait}, types::{IOResult, SeekKey, SeekOp, SeekResult, Value}, LimboError, Pager, Result, }; diff --git a/core/incremental/join_operator.rs b/core/incremental/join_operator.rs index 722274559..982545ca9 100644 --- a/core/incremental/join_operator.rs +++ b/core/incremental/join_operator.rs @@ -6,6 +6,7 @@ use crate::incremental::operator::{ generate_storage_id, ComputationTracker, DbspStateCursors, EvalState, IncrementalOperator, }; use crate::incremental::persistence::WriteRow; +use crate::storage::btree::CursorTrait; use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult}; use crate::{return_and_restore_if_io, return_if_io, Result, Value}; use std::sync::{Arc, Mutex}; diff --git a/core/mvcc/database/checkpoint_state_machine.rs b/core/mvcc/database/checkpoint_state_machine.rs index 16b5d3a4a..778431adc 100644 --- a/core/mvcc/database/checkpoint_state_machine.rs +++ b/core/mvcc/database/checkpoint_state_machine.rs @@ -4,7 +4,7 @@ use crate::mvcc::database::{ SQLITE_SCHEMA_MVCC_TABLE_ID, }; use crate::state_machine::{StateMachine, StateTransition, TransitionResult}; -use crate::storage::btree::BTreeCursor; +use crate::storage::btree::{BTreeCursor, CursorTrait}; use crate::storage::pager::CreateBTreeFlags; use crate::storage::wal::{CheckpointMode, TursoRwLock}; use crate::types::{IOCompletions, IOResult, ImmutableRecord, RecordCursor}; diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 633c97599..6b1441538 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -4452,562 +4452,6 @@ impl BTreeCursor { self.usable_space_cached } - #[instrument(skip(self), level = Level::DEBUG)] - pub fn rowid(&self) -> Result>> { - if let Some(mv_cursor) = &self.mv_cursor { - let mv_cursor = mv_cursor.write(); - let IOResult::Done(rowid) = mv_cursor.rowid()? else { - todo!() - }; - let Some(rowid) = rowid else { - return Ok(IOResult::Done(None)); - }; - return Ok(IOResult::Done(Some(rowid))); - } - if self.get_null_flag() { - return Ok(IOResult::Done(None)); - } - if self.has_record.get() { - let page = self.stack.top_ref(); - let contents = page.get_contents(); - let page_type = contents.page_type(); - if page_type.is_table() { - let cell_idx = self.stack.current_cell_index(); - let rowid = contents.cell_table_leaf_read_rowid(cell_idx as usize)?; - Ok(IOResult::Done(Some(rowid))) - } else { - let _ = return_if_io!(self.record()); - Ok(IOResult::Done(self.get_index_rowid_from_record())) - } - } else { - Ok(IOResult::Done(None)) - } - } - - #[instrument(skip(self, key), level = Level::DEBUG)] - pub fn seek(&mut self, key: SeekKey<'_>, op: SeekOp) -> Result> { - if let Some(mv_cursor) = &self.mv_cursor { - let mut mv_cursor = mv_cursor.write(); - return mv_cursor.seek(key, op); - } - self.skip_advance.set(false); - // Empty trace to capture the span information - tracing::trace!(""); - // We need to clear the null flag for the table cursor before seeking, - // because it might have been set to false by an unmatched left-join row during the previous iteration - // on the outer loop. - self.set_null_flag(false); - let seek_result = return_if_io!(self.do_seek(key, op)); - self.invalidate_record(); - // Reset seek state - self.seek_state = CursorSeekState::Start; - self.valid_state = CursorValidState::Valid; - Ok(IOResult::Done(seek_result)) - } - - /// Return a reference to the record the cursor is currently pointing to. - /// If record was not parsed yet, then we have to parse it and in case of I/O we yield control - /// back. - #[instrument(skip(self), level = Level::DEBUG)] - pub fn record(&self) -> Result>>> { - if !self.has_record.get() && self.mv_cursor.is_none() { - return Ok(IOResult::Done(None)); - } - let invalidated = self - .reusable_immutable_record - .borrow() - .as_ref() - .is_none_or(|record| record.is_invalidated()); - if !invalidated { - let record_ref = - Ref::filter_map(self.reusable_immutable_record.borrow(), |opt| opt.as_ref()) - .unwrap(); - return Ok(IOResult::Done(Some(record_ref))); - } - if let Some(mv_cursor) = &self.mv_cursor { - let mv_cursor = mv_cursor.write(); - let Some(row) = mv_cursor.current_row()? else { - return Ok(IOResult::Done(None)); - }; - self.get_immutable_record_or_create() - .as_mut() - .unwrap() - .invalidate(); - self.get_immutable_record_or_create() - .as_mut() - .unwrap() - .start_serialization(&row.data); - self.record_cursor.borrow_mut().invalidate(); - let record_ref = - Ref::filter_map(self.reusable_immutable_record.borrow(), |opt| opt.as_ref()) - .unwrap(); - return Ok(IOResult::Done(Some(record_ref))); - } - - let page = self.stack.top_ref(); - let contents = page.get_contents(); - let cell_idx = self.stack.current_cell_index(); - let cell = contents.cell_get(cell_idx as usize, self.usable_space())?; - let (payload, payload_size, first_overflow_page) = match cell { - BTreeCell::TableLeafCell(TableLeafCell { - payload, - payload_size, - first_overflow_page, - .. - }) => (payload, payload_size, first_overflow_page), - BTreeCell::IndexInteriorCell(IndexInteriorCell { - payload, - payload_size, - first_overflow_page, - .. - }) => (payload, payload_size, first_overflow_page), - BTreeCell::IndexLeafCell(IndexLeafCell { - payload, - first_overflow_page, - payload_size, - }) => (payload, payload_size, first_overflow_page), - _ => unreachable!("unexpected page_type"), - }; - if let Some(next_page) = first_overflow_page { - return_if_io!(self.process_overflow_read(payload, next_page, payload_size)) - } else { - self.get_immutable_record_or_create() - .as_mut() - .unwrap() - .invalidate(); - self.get_immutable_record_or_create() - .as_mut() - .unwrap() - .start_serialization(payload); - self.record_cursor.borrow_mut().invalidate(); - }; - - let record_ref = - Ref::filter_map(self.reusable_immutable_record.borrow(), |opt| opt.as_ref()).unwrap(); - Ok(IOResult::Done(Some(record_ref))) - } - - #[instrument(skip_all, level = Level::DEBUG)] - pub fn insert(&mut self, key: &BTreeKey) -> Result> { - tracing::debug!(valid_state = ?self.valid_state, cursor_state = ?self.state, is_write_in_progress = self.is_write_in_progress()); - match &self.mv_cursor { - Some(mv_cursor) => { - return_if_io!(mv_cursor.write().insert(key)); - } - None => { - return_if_io!(self.insert_into_page(key)); - if key.maybe_rowid().is_some() { - self.has_record.replace(true); - } - } - }; - Ok(IOResult::Done(())) - } - - /// Delete state machine flow: - /// 1. Start -> check if the rowid to be delete is present in the page or not. If not we early return - /// 2. DeterminePostBalancingSeekKey -> determine the key to seek to after balancing. - /// 3. LoadPage -> load the page. - /// 4. FindCell -> find the cell to be deleted in the page. - /// 5. ClearOverflowPages -> Clear the overflow pages if there are any before dropping the cell, then if we are in a leaf page we just drop the cell in place. - /// if we are in interior page, we need to rotate keys in order to replace current cell (InteriorNodeReplacement). - /// 6. InteriorNodeReplacement -> we copy the left subtree leaf node into the deleted interior node's place. - /// 7. Balancing -> perform balancing - /// 8. PostInteriorNodeReplacement -> if an interior node was replaced, we need to advance the cursor once. - /// 9. SeekAfterBalancing -> adjust the cursor to a node that is closer to the deleted value. go to Finish - /// 10. Finish -> Delete operation is done. Return CursorResult(Ok()) - #[instrument(skip(self), level = Level::DEBUG)] - pub fn delete(&mut self) -> Result> { - if let Some(mv_cursor) = &self.mv_cursor { - return_if_io!(mv_cursor.write().delete()); - return Ok(IOResult::Done(())); - } - - if let CursorState::None = &self.state { - self.state = CursorState::Delete(DeleteState::Start); - } - - loop { - let usable_space = self.usable_space(); - let delete_state = match &mut self.state { - CursorState::Delete(x) => x, - _ => unreachable!("expected delete state"), - }; - tracing::debug!(?delete_state); - - match delete_state { - DeleteState::Start => { - let page = self.stack.top_ref(); - self.pager.add_dirty(page); - if matches!( - page.get_contents().page_type(), - PageType::TableLeaf | PageType::TableInterior - ) { - if return_if_io!(self.rowid()).is_none() { - self.state = CursorState::None; - return Ok(IOResult::Done(())); - } - } else if self.reusable_immutable_record.borrow().is_none() { - self.state = CursorState::None; - return Ok(IOResult::Done(())); - } - - self.state = CursorState::Delete(DeleteState::DeterminePostBalancingSeekKey); - } - - DeleteState::DeterminePostBalancingSeekKey => { - // FIXME: skip this work if we determine deletion wont result in balancing - // Right now we calculate the key every time for simplicity/debugging - // since it won't affect correctness which is more important - let page = self.stack.top_ref(); - let target_key = if page.is_index() { - let record = match return_if_io!(self.record()) { - Some(record) => record.clone(), - None => unreachable!("there should've been a record"), - }; - CursorContext { - key: CursorContextKey::IndexKeyRowId(record), - seek_op: SeekOp::GE { eq_only: true }, - } - } else { - let Some(rowid) = return_if_io!(self.rowid()) else { - panic!("cursor should be pointing to a record with a rowid"); - }; - CursorContext { - key: CursorContextKey::TableRowId(rowid), - seek_op: SeekOp::GE { eq_only: true }, - } - }; - - self.state = CursorState::Delete(DeleteState::LoadPage { - post_balancing_seek_key: Some(target_key), - }); - } - - DeleteState::LoadPage { - post_balancing_seek_key, - } => { - self.state = CursorState::Delete(DeleteState::FindCell { - post_balancing_seek_key: post_balancing_seek_key.take(), - }); - } - - DeleteState::FindCell { - post_balancing_seek_key, - } => { - let page = self.stack.top_ref(); - let cell_idx = self.stack.current_cell_index() as usize; - let contents = page.get_contents(); - if cell_idx >= contents.cell_count() { - return_corrupt!(format!( - "Corrupted page: cell index {} is out of bounds for page with {} cells", - cell_idx, - contents.cell_count() - )); - } - - tracing::debug!( - "DeleteState::FindCell: page_id: {}, cell_idx: {}", - page.get().id, - cell_idx - ); - - let cell = contents.cell_get(cell_idx, usable_space)?; - - let original_child_pointer = match &cell { - BTreeCell::TableInteriorCell(interior) => Some(interior.left_child_page), - BTreeCell::IndexInteriorCell(interior) => Some(interior.left_child_page), - _ => None, - }; - - self.state = CursorState::Delete(DeleteState::ClearOverflowPages { - cell_idx, - cell, - original_child_pointer, - post_balancing_seek_key: post_balancing_seek_key.take(), - }); - } - - DeleteState::ClearOverflowPages { cell, .. } => { - let cell = cell.clone(); - return_if_io!(self.clear_overflow_pages(&cell)); - - let CursorState::Delete(DeleteState::ClearOverflowPages { - cell_idx, - original_child_pointer, - ref mut post_balancing_seek_key, - .. - }) = self.state - else { - unreachable!("expected clear overflow pages state"); - }; - - let page = self.stack.top_ref(); - let contents = page.get_contents(); - - if !contents.is_leaf() { - self.state = CursorState::Delete(DeleteState::InteriorNodeReplacement { - page: page.clone(), - btree_depth: self.stack.current(), - cell_idx, - original_child_pointer, - post_balancing_seek_key: post_balancing_seek_key.take(), - }); - } else { - drop_cell(contents, cell_idx, usable_space)?; - - self.state = CursorState::Delete(DeleteState::CheckNeedsBalancing { - btree_depth: self.stack.current(), - post_balancing_seek_key: post_balancing_seek_key.take(), - interior_node_was_replaced: false, - }); - } - } - - DeleteState::InteriorNodeReplacement { .. } => { - // This is an interior node, we need to handle deletion differently. - // 1. Move cursor to the largest key in the left subtree. - // 2. Replace the cell in the interior (parent) node with that key. - // 3. Delete that key from the child page. - - // Step 1: Move cursor to the largest key in the left subtree. - // The largest key is always in a leaf, and so this traversal may involvegoing multiple pages downwards, - // so we store the page we are currently on. - - // avoid calling prev() because it internally calls restore_context() which may cause unintended behavior. - return_if_io!(self.get_prev_record()); - - let CursorState::Delete(DeleteState::InteriorNodeReplacement { - ref page, - btree_depth, - cell_idx, - original_child_pointer, - ref mut post_balancing_seek_key, - .. - }) = self.state - else { - unreachable!("expected interior node replacement state"); - }; - - // Ensure we keep the parent page at the same position as before the replacement. - self.stack - .node_states - .get_mut(btree_depth) - .expect("parent page should be on the stack") - .cell_idx = cell_idx as i32; - let (cell_payload, leaf_cell_idx) = { - let leaf_page = self.stack.top_ref(); - let leaf_contents = leaf_page.get_contents(); - assert!(leaf_contents.is_leaf()); - assert!(leaf_contents.cell_count() > 0); - let leaf_cell_idx = leaf_contents.cell_count() - 1; - let last_cell_on_child_page = - leaf_contents.cell_get(leaf_cell_idx, usable_space)?; - - let mut cell_payload: Vec = Vec::new(); - let child_pointer = - original_child_pointer.expect("there should be a pointer"); - // Rewrite the old leaf cell as an interior cell depending on type. - match last_cell_on_child_page { - BTreeCell::TableLeafCell(leaf_cell) => { - // Table interior cells contain the left child pointer and the rowid as varint. - cell_payload.extend_from_slice(&child_pointer.to_be_bytes()); - write_varint_to_vec(leaf_cell.rowid as u64, &mut cell_payload); - } - BTreeCell::IndexLeafCell(leaf_cell) => { - // Index interior cells contain: - // 1. The left child pointer - // 2. The payload size as varint - // 3. The payload - // 4. The first overflow page as varint, omitted if no overflow. - cell_payload.extend_from_slice(&child_pointer.to_be_bytes()); - write_varint_to_vec(leaf_cell.payload_size, &mut cell_payload); - cell_payload.extend_from_slice(leaf_cell.payload); - if let Some(first_overflow_page) = leaf_cell.first_overflow_page { - cell_payload - .extend_from_slice(&first_overflow_page.to_be_bytes()); - } - } - _ => unreachable!("Expected table leaf cell"), - } - (cell_payload, leaf_cell_idx) - }; - - let leaf_page = self.stack.top_ref(); - - self.pager.add_dirty(page); - self.pager.add_dirty(leaf_page); - - // Step 2: Replace the cell in the parent (interior) page. - { - let parent_contents = page.get_contents(); - let parent_page_id = page.get().id; - let left_child_page = u32::from_be_bytes( - cell_payload[..4].try_into().expect("invalid cell payload"), - ); - turso_assert!( - left_child_page as usize != parent_page_id, - "corrupt: current page and left child page of cell {} are both {}", - left_child_page, - parent_page_id - ); - - // First, drop the old cell that is being replaced. - drop_cell(parent_contents, cell_idx, usable_space)?; - // Then, insert the new cell (the predecessor) in its place. - insert_into_cell(parent_contents, &cell_payload, cell_idx, usable_space)?; - } - - // Step 3: Delete the predecessor cell from the leaf page. - { - let leaf_contents = leaf_page.get_contents(); - drop_cell(leaf_contents, leaf_cell_idx, usable_space)?; - } - - self.state = CursorState::Delete(DeleteState::CheckNeedsBalancing { - btree_depth, - post_balancing_seek_key: post_balancing_seek_key.take(), - interior_node_was_replaced: true, - }); - } - - DeleteState::CheckNeedsBalancing { btree_depth, .. } => { - let page = self.stack.top_ref(); - // Check if either the leaf page we took the replacement cell from underflows, or if the interior page we inserted it into overflows OR underflows. - // If the latter is true, we must always balance that level regardless of whether the leaf page (or any ancestor pages in between) need balancing. - - let leaf_underflows = { - let leaf_contents = page.get_contents(); - let free_space = compute_free_space(leaf_contents, usable_space); - free_space * 3 > usable_space * 2 - }; - - let interior_overflows_or_underflows = { - // Invariant: ancestor pages on the stack are pinned to the page cache, - // so we don't need return_if_locked_maybe_load! any ancestor, - // and we already loaded the current page above. - let interior_page = self - .stack - .get_page_at_level(*btree_depth) - .expect("ancestor page should be on the stack"); - let interior_contents = interior_page.get_contents(); - let overflows = !interior_contents.overflow_cells.is_empty(); - if overflows { - true - } else { - let free_space = compute_free_space(interior_contents, usable_space); - free_space * 3 > usable_space * 2 - } - }; - - let needs_balancing = leaf_underflows || interior_overflows_or_underflows; - - let CursorState::Delete(DeleteState::CheckNeedsBalancing { - btree_depth, - ref mut post_balancing_seek_key, - interior_node_was_replaced, - .. - }) = self.state - else { - unreachable!("expected check needs balancing state"); - }; - - if needs_balancing { - let balance_only_ancestor = - !leaf_underflows && interior_overflows_or_underflows; - if balance_only_ancestor { - // Only need to balance the ancestor page; move there immediately. - while self.stack.current() > btree_depth { - self.stack.pop(); - } - } - let balance_both = leaf_underflows && interior_overflows_or_underflows; - assert!(matches!(self.balance_state.sub_state, BalanceSubState::Start), "There should be no balancing operation in progress when delete state is {:?}, got: {:?}", self.state, self.balance_state.sub_state); - let post_balancing_seek_key = post_balancing_seek_key - .take() - .expect("post_balancing_seek_key should be Some"); - self.save_context(post_balancing_seek_key); - self.state = CursorState::Delete(DeleteState::Balancing { - balance_ancestor_at_depth: if balance_both { - Some(btree_depth) - } else { - None - }, - }); - } else { - // No balancing needed. - if interior_node_was_replaced { - // If we did replace an interior node, we need to advance the cursor once to - // get back at the interior node that now has the replaced content. - // The reason it is important to land here is that the replaced cell was smaller (LT) than the deleted cell, - // so we must ensure we skip over it. I.e., when BTreeCursor::next() is called, it will move past the cell - // that holds the replaced content. - self.state = - CursorState::Delete(DeleteState::PostInteriorNodeReplacement); - } else { - // If we didn't replace an interior node, we are done, - // except we need to retreat, so that the next call to BTreeCursor::next() lands at the next record (because we deleted the current one) - self.stack.retreat(); - self.state = CursorState::None; - return Ok(IOResult::Done(())); - } - } - } - DeleteState::PostInteriorNodeReplacement => { - return_if_io!(self.get_next_record()); - self.state = CursorState::None; - return Ok(IOResult::Done(())); - } - - DeleteState::Balancing { - balance_ancestor_at_depth, - } => { - let balance_ancestor_at_depth = *balance_ancestor_at_depth; - return_if_io!(self.balance(balance_ancestor_at_depth)); - self.state = CursorState::Delete(DeleteState::RestoreContextAfterBalancing); - } - DeleteState::RestoreContextAfterBalancing => { - return_if_io!(self.restore_context()); - - // We deleted key K, and performed a seek to: GE { eq_only: true } K. - // This means that the cursor is now pointing to the next key after K. - // We need to make the next call to BTreeCursor::next() a no-op so that we don't skip over - // a row when deleting rows in a loop. - self.skip_advance.set(true); - self.state = CursorState::None; - return Ok(IOResult::Done(())); - } - } - } - } - - /// In outer joins, whenever the right-side table has no matching row, the query must still return a row - /// for each left-side row. In order to achieve this, we set the null flag on the right-side table cursor - /// so that it returns NULL for all columns until cleared. - #[inline(always)] - pub fn set_null_flag(&mut self, flag: bool) { - self.null_flag = flag; - } - - #[inline(always)] - pub fn get_null_flag(&self) -> bool { - self.null_flag - } - - #[instrument(skip_all, level = Level::DEBUG)] - pub fn exists(&mut self, key: &Value) -> Result> { - assert!(self.mv_cursor.is_none()); - let int_key = match key { - Value::Integer(i) => i, - _ => unreachable!("btree tables are indexed by integers!"), - }; - let seek_result = - return_if_io!(self.seek(SeekKey::TableRowId(*int_key), SeekOp::GE { eq_only: true })); - let exists = matches!(seek_result, SeekResult::Found); - self.invalidate_record(); - Ok(IOResult::Done(exists)) - } - /// Clear the overflow pages linked to a specific page provided by the leaf cell /// Uses a state machine to keep track of it's operations so that traversal can be /// resumed from last point after IO interruption @@ -5088,26 +4532,6 @@ impl BTreeCursor { } } - /// Deletes all content from the B-Tree but preserves the root page. - /// - /// Unlike [`btree_destroy`], which frees all pages including the root, - /// this method only clears the tree’s contents. The root page remains - /// allocated and is reset to an empty leaf page. - pub fn clear_btree(&mut self) -> Result>> { - self.destroy_btree_contents(true) - } - - /// Destroys the entire B-Tree, including the root page. - /// - /// All pages belonging to the tree are freed, leaving no trace of the B-Tree. - /// Use this when the structure itself is no longer needed. - /// - /// For cases where the B-Tree should remain allocated but emptied, see [`btree_clear`]. - #[instrument(skip(self), level = Level::DEBUG)] - pub fn btree_destroy(&mut self) -> Result>> { - self.destroy_btree_contents(false) - } - /// Deletes all contents of the B-tree by freeing all its pages in an iterative depth-first order. /// This ensures child pages are freed before their parents /// Uses a state machine to keep track of the operation to ensure IO doesn't cause repeated traversals @@ -5431,117 +4855,6 @@ impl BTreeCursor { matches!(self.state, CursorState::Write(_)) } - /// Count the number of entries in the b-tree - /// - /// Only supposed to be used in the context of a simple Count Select Statement - #[instrument(skip(self), level = Level::DEBUG)] - pub fn count(&mut self) -> Result> { - if let Some(_mv_cursor) = &self.mv_cursor { - todo!("Implement count for mvcc"); - } - - let mut mem_page; - let mut contents; - - 'outer: loop { - let state = self.count_state; - match state { - CountState::Start => { - let c = self.move_to_root()?; - self.count_state = CountState::Loop; - if let Some(c) = c { - io_yield_one!(c); - } - } - CountState::Loop => { - self.stack.advance(); - mem_page = self.stack.top_ref(); - contents = mem_page.get_contents(); - - /* If this is a leaf page or the tree is not an int-key tree, then - ** this page contains countable entries. Increment the entry counter - ** accordingly. - */ - if !matches!(contents.page_type(), PageType::TableInterior) { - self.count += contents.cell_count(); - } - - let cell_idx = self.stack.current_cell_index() as usize; - - // Second condition is necessary in case we return if the page is locked in the loop below - if contents.is_leaf() || cell_idx > contents.cell_count() { - loop { - if !self.stack.has_parent() { - // All pages of the b-tree have been visited. Return successfully - let c = self.move_to_root()?; - self.count_state = CountState::Finish; - if let Some(c) = c { - io_yield_one!(c); - } - continue 'outer; - } - - // Move to parent - self.stack.pop(); - - mem_page = self.stack.top_ref(); - turso_assert!(mem_page.is_loaded(), "page should be loaded"); - contents = mem_page.get_contents(); - - let cell_idx = self.stack.current_cell_index() as usize; - - if cell_idx <= contents.cell_count() { - break; - } - } - } - - let cell_idx = self.stack.current_cell_index() as usize; - - assert!(cell_idx <= contents.cell_count(),); - assert!(!contents.is_leaf()); - - if cell_idx == contents.cell_count() { - // Move to right child - // should be safe as contents is not a leaf page - let right_most_pointer = contents.rightmost_pointer().unwrap(); - self.stack.advance(); - let (mem_page, c) = self.read_page(right_most_pointer as i64)?; - self.stack.push(mem_page); - if let Some(c) = c { - io_yield_one!(c); - } - } else { - // Move to child left page - let cell = contents.cell_get(cell_idx, self.usable_space())?; - - match cell { - BTreeCell::TableInteriorCell(TableInteriorCell { - left_child_page, - .. - }) - | BTreeCell::IndexInteriorCell(IndexInteriorCell { - left_child_page, - .. - }) => { - self.stack.advance(); - let (mem_page, c) = self.read_page(left_child_page as i64)?; - self.stack.push(mem_page); - if let Some(c) = c { - io_yield_one!(c); - } - } - _ => unreachable!(), - } - } - } - CountState::Finish => { - return Ok(IOResult::Done(self.count)); - } - } - } - } - // Save cursor context, to be restored later pub fn save_context(&mut self, cursor_context: CursorContext) { self.valid_state = CursorValidState::RequireSeek; @@ -5817,6 +5130,18 @@ impl CursorTrait for BTreeCursor { } #[instrument(skip(self), level = Level::DEBUG)] + /// Delete state machine flow: + /// 1. Start -> check if the rowid to be delete is present in the page or not. If not we early return + /// 2. DeterminePostBalancingSeekKey -> determine the key to seek to after balancing. + /// 3. LoadPage -> load the page. + /// 4. FindCell -> find the cell to be deleted in the page. + /// 5. ClearOverflowPages -> Clear the overflow pages if there are any before dropping the cell, then if we are in a leaf page we just drop the cell in place. + /// if we are in interior page, we need to rotate keys in order to replace current cell (InteriorNodeReplacement). + /// 6. InteriorNodeReplacement -> we copy the left subtree leaf node into the deleted interior node's place. + /// 7. Balancing -> perform balancing + /// 8. PostInteriorNodeReplacement -> if an interior node was replaced, we need to advance the cursor once. + /// 9. SeekAfterBalancing -> adjust the cursor to a node that is closer to the deleted value. go to Finish + /// 10. Finish -> Delete operation is done. Return CursorResult(Ok()) fn delete(&mut self) -> Result> { if let Some(mv_cursor) = &self.mv_cursor { return_if_io!(mv_cursor.write().delete()); @@ -6182,6 +5507,9 @@ impl CursorTrait for BTreeCursor { } #[inline(always)] + /// In outer joins, whenever the right-side table has no matching row, the query must still return a row + /// for each left-side row. In order to achieve this, we set the null flag on the right-side table cursor + /// so that it returns NULL for all columns until cleared. fn set_null_flag(&mut self, flag: bool) { self.null_flag = flag; } @@ -6205,16 +5533,30 @@ impl CursorTrait for BTreeCursor { Ok(IOResult::Done(exists)) } + /// Deletes all content from the B-Tree but preserves the root page. + /// + /// Unlike [`btree_destroy`], which frees all pages including the root, + /// this method only clears the tree’s contents. The root page remains + /// allocated and is reset to an empty leaf page. fn clear_btree(&mut self) -> Result>> { self.destroy_btree_contents(true) } + /// Destroys the entire B-Tree, including the root page. + /// + /// All pages belonging to the tree are freed, leaving no trace of the B-Tree. + /// Use this when the structure itself is no longer needed. + /// + /// For cases where the B-Tree should remain allocated but emptied, see [`btree_clear`]. #[instrument(skip(self), level = Level::DEBUG)] fn btree_destroy(&mut self) -> Result>> { self.destroy_btree_contents(false) } #[instrument(skip(self), level = Level::DEBUG)] + /// Count the number of entries in the b-tree + /// + /// Only supposed to be used in the context of a simple Count Select Statement fn count(&mut self) -> Result> { if let Some(_mv_cursor) = &self.mv_cursor { todo!("Implement count for mvcc"); From 455f0fbc461a4fa222b9387da34482af26e8fd9f Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 15:53:58 +0300 Subject: [PATCH 289/428] Set in_progress to false AFTER executing the statement --- bindings/rust/src/transaction.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bindings/rust/src/transaction.rs b/bindings/rust/src/transaction.rs index 9780856a5..c265a0de8 100644 --- a/bindings/rust/src/transaction.rs +++ b/bindings/rust/src/transaction.rs @@ -126,8 +126,8 @@ impl Transaction<'_> { #[inline] async fn _commit(&mut self) -> Result<()> { - self.in_progress = false; self.conn.execute("COMMIT", ()).await?; + self.in_progress = false; Ok(()) } @@ -139,8 +139,8 @@ impl Transaction<'_> { #[inline] async fn _rollback(&mut self) -> Result<()> { - self.in_progress = false; self.conn.execute("ROLLBACK", ()).await?; + self.in_progress = false; Ok(()) } From 392932206182f8ae8214e275bd7c1330b91b985b Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 16:38:42 +0300 Subject: [PATCH 290/428] Propagate DropBehavior to dangling_tx so DropBehavior makes sense --- bindings/rust/src/lib.rs | 74 +++++++++++++++++++++++--------- bindings/rust/src/transaction.rs | 36 +++++++++++++--- 2 files changed, 84 insertions(+), 26 deletions(-) diff --git a/bindings/rust/src/lib.rs b/bindings/rust/src/lib.rs index 4f35728dc..491acb926 100644 --- a/bindings/rust/src/lib.rs +++ b/bindings/rust/src/lib.rs @@ -48,7 +48,7 @@ pub use params::IntoParams; use std::fmt::Debug; use std::future::Future; use std::num::NonZero; -use std::sync::atomic::AtomicBool; +use std::sync::atomic::AtomicU8; use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; use std::task::Poll; @@ -57,6 +57,7 @@ use turso_core::OpenFlags; // Re-exports rows pub use crate::rows::{Row, Rows}; +use crate::transaction::DropBehavior; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -220,15 +221,39 @@ impl Database { } } +/// Atomic wrapper for [DropBehavior] +struct AtomicDropBehavior { + inner: AtomicU8, +} + +impl AtomicDropBehavior { + fn new(behavior: DropBehavior) -> Self { + Self { + inner: AtomicU8::new(behavior.into()), + } + } + + fn load(&self, ordering: Ordering) -> DropBehavior { + self.inner.load(ordering).into() + } + + fn store(&self, behavior: DropBehavior, ordering: Ordering) { + self.inner.store(behavior.into(), ordering); + } +} + /// A database connection. pub struct Connection { inner: Arc>>, transaction_behavior: TransactionBehavior, - /// Whether there is a dangling transaction after it was dropped without being finished. - /// We cannot rollback the transaction on Drop because drop is not async. - /// Instead, we roll back the dangling transaction whenever a new transaction is requested + /// If there is a dangling transaction after it was dropped without being finished, + /// [Connection::dangling_tx] will be set to the [DropBehavior] of the dangling transaction, + /// and the corresponding action will be taken when a new transaction is requested /// or the connection queries/executes. - dangling_tx: AtomicBool, + /// We cannot do this eagerly on Drop because drop is not async. + /// + /// By default, the value is [DropBehavior::Ignore] which effectively does nothing. + dangling_tx: AtomicDropBehavior, } impl Clone for Connection { @@ -236,7 +261,7 @@ impl Clone for Connection { Self { inner: Arc::clone(&self.inner), transaction_behavior: self.transaction_behavior, - dangling_tx: AtomicBool::new(self.dangling_tx.load(Ordering::SeqCst)), + dangling_tx: AtomicDropBehavior::new(self.dangling_tx.load(Ordering::SeqCst)), } } } @@ -250,34 +275,43 @@ impl Connection { let connection = Connection { inner: Arc::new(Mutex::new(conn)), transaction_behavior: TransactionBehavior::Deferred, - dangling_tx: AtomicBool::new(false), + dangling_tx: AtomicDropBehavior::new(DropBehavior::Ignore), }; connection } - fn has_dangling_tx(&self) -> bool { - self.dangling_tx.load(Ordering::SeqCst) - } - - async fn maybe_rollback_dangling_tx(&self) -> Result<()> { - if self.has_dangling_tx() { - let mut stmt = self.prepare("ROLLBACK").await?; - stmt.execute(()).await?; - self.dangling_tx.store(false, Ordering::SeqCst); + async fn maybe_handle_dangling_tx(&self) -> Result<()> { + match self.dangling_tx.load(Ordering::SeqCst) { + DropBehavior::Rollback => { + let mut stmt = self.prepare("ROLLBACK").await?; + stmt.execute(()).await?; + self.dangling_tx + .store(DropBehavior::Ignore, Ordering::SeqCst); + } + DropBehavior::Commit => { + let mut stmt = self.prepare("COMMIT").await?; + stmt.execute(()).await?; + self.dangling_tx + .store(DropBehavior::Ignore, Ordering::SeqCst); + } + DropBehavior::Ignore => {} + DropBehavior::Panic => { + panic!("Transaction dropped unexpectedly."); + } } Ok(()) } /// Query the database with SQL. pub async fn query(&self, sql: &str, params: impl IntoParams) -> Result { - self.maybe_rollback_dangling_tx().await?; + self.maybe_handle_dangling_tx().await?; let mut stmt = self.prepare(sql).await?; stmt.query(params).await } /// Execute SQL statement on the database. pub async fn execute(&self, sql: &str, params: impl IntoParams) -> Result { - self.maybe_rollback_dangling_tx().await?; + self.maybe_handle_dangling_tx().await?; let mut stmt = self.prepare(sql).await?; stmt.execute(params).await } @@ -362,7 +396,7 @@ impl Connection { /// Execute a batch of SQL statements on the database. pub async fn execute_batch(&self, sql: &str) -> Result<()> { - self.maybe_rollback_dangling_tx().await?; + self.maybe_handle_dangling_tx().await?; self.prepare_execute_batch(sql).await?; Ok(()) } @@ -384,7 +418,7 @@ impl Connection { } async fn prepare_execute_batch(&self, sql: impl AsRef) -> Result<()> { - self.maybe_rollback_dangling_tx().await?; + self.maybe_handle_dangling_tx().await?; let conn = self .inner .lock() diff --git a/bindings/rust/src/transaction.rs b/bindings/rust/src/transaction.rs index c265a0de8..75d0b69c5 100644 --- a/bindings/rust/src/transaction.rs +++ b/bindings/rust/src/transaction.rs @@ -36,6 +36,29 @@ pub enum DropBehavior { Panic, } +impl From for u8 { + fn from(behavior: DropBehavior) -> Self { + match behavior { + DropBehavior::Rollback => 0, + DropBehavior::Commit => 1, + DropBehavior::Ignore => 2, + DropBehavior::Panic => 3, + } + } +} + +impl From for DropBehavior { + fn from(value: u8) -> Self { + match value { + 0 => DropBehavior::Rollback, + 1 => DropBehavior::Commit, + 2 => DropBehavior::Ignore, + 3 => DropBehavior::Panic, + _ => panic!("Invalid drop behavior: {value}"), + } + } +} + /// Represents a transaction on a database connection. /// /// ## Note @@ -187,9 +210,13 @@ impl Drop for Transaction<'_> { #[inline] fn drop(&mut self) { if self.in_progress { - self.conn.dangling_tx.store(true, Ordering::SeqCst); + self.conn + .dangling_tx + .store(self.drop_behavior(), Ordering::SeqCst); } else { - self.conn.dangling_tx.store(false, Ordering::SeqCst); + self.conn + .dangling_tx + .store(DropBehavior::Ignore, Ordering::SeqCst); } } } @@ -239,7 +266,7 @@ impl Connection { &mut self, behavior: TransactionBehavior, ) -> Result> { - self.maybe_rollback_dangling_tx().await?; + self.maybe_handle_dangling_tx().await?; Transaction::new(self, behavior).await } @@ -390,14 +417,12 @@ mod test { { let tx = conn.transaction().await?; tx.execute("INSERT INTO foo VALUES(?)", &[1]).await?; - tx.finish().await?; // default: rollback } { let mut tx = conn.transaction().await?; tx.execute("INSERT INTO foo VALUES(?)", &[2]).await?; tx.set_drop_behavior(DropBehavior::Commit); - tx.finish().await?; } { let tx = conn.transaction().await?; @@ -408,7 +433,6 @@ mod test { .await?; assert_eq!(2, result.get::(0)?); - tx.finish().await?; } Ok(()) } From 7728f3ab58bb7c14063bbb7cf1b86fa64b8ff51f Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 16:40:02 +0300 Subject: [PATCH 291/428] Update DropBehavior related documentation to reflect reality --- bindings/rust/src/transaction.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bindings/rust/src/transaction.rs b/bindings/rust/src/transaction.rs index 75d0b69c5..5e0b214de 100644 --- a/bindings/rust/src/transaction.rs +++ b/bindings/rust/src/transaction.rs @@ -65,7 +65,7 @@ impl From for DropBehavior { /// /// Transactions will roll back by default. Use `commit` method to explicitly /// commit the transaction, or use `set_drop_behavior` to change what happens -/// when the transaction is dropped. +/// on the next access to the connection after the transaction is dropped. /// /// ## Example /// @@ -224,7 +224,8 @@ impl Drop for Transaction<'_> { impl Connection { /// Begin a new transaction with the default behavior (DEFERRED). /// - /// The transaction defaults to rolling back when it is dropped. If you + /// The transaction defaults to rolling back on the next access to the connection + /// if it is not finished when the transaction is dropped. If you /// want the transaction to commit, you must call /// [`commit`](Transaction::commit) or /// [`set_drop_behavior(DropBehavior::Commit)`](Transaction::set_drop_behavior). From 6e34c505e1e0f9e96efda572dd6100b53601ef3b Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 16 Oct 2025 17:19:16 +0300 Subject: [PATCH 292/428] Fix build after I/O many completions removal --- core/types.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/core/types.rs b/core/types.rs index 11592556d..a7259f8b1 100644 --- a/core/types.rs +++ b/core/types.rs @@ -2383,9 +2383,6 @@ impl IOCompletions { if let Some(waker) = waker { match self { IOCompletions::Single(c) => c.set_waker(waker), - IOCompletions::Many(completions) => { - completions.iter().for_each(|c| c.set_waker(waker)) - } } } } From 9d4813df04ee473b57c779cacd252ab6de673955 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 16 Oct 2025 19:00:40 +0300 Subject: [PATCH 293/428] Reduce Antithesis runtime to 4 hours --- scripts/antithesis/launch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/antithesis/launch.sh b/scripts/antithesis/launch.sh index 2f95c3975..83ec5caf6 100755 --- a/scripts/antithesis/launch.sh +++ b/scripts/antithesis/launch.sh @@ -3,7 +3,7 @@ curl --fail -u "$ANTITHESIS_USER:$ANTITHESIS_PASSWD" \ -X POST https://$ANTITHESIS_TENANT.antithesis.com/api/v1/launch/limbo \ -d "{\"params\": { \"antithesis.description\":\"basic_test on main\", - \"custom.duration\":\"8\", + \"custom.duration\":\"4\", \"antithesis.config_image\":\"$ANTITHESIS_DOCKER_REPO/limbo-config:antithesis-latest\", \"antithesis.images\":\"$ANTITHESIS_DOCKER_REPO/limbo-workload:antithesis-latest\", \"antithesis.report.recipients\":\"$ANTITHESIS_EMAIL\" From bd33b3fa839d0d73a16e6dfae367f3d7223d7bbc Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 13:07:12 -0400 Subject: [PATCH 294/428] Throw parse error on CHECK constraint in create table --- core/translate/schema.rs | 59 +++++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/core/translate/schema.rs b/core/translate/schema.rs index 5bf0e3682..152cca652 100644 --- a/core/translate/schema.rs +++ b/core/translate/schema.rs @@ -26,6 +26,39 @@ use crate::{bail_parse_error, Result}; use turso_ext::VTabKind; +fn validate(body: &ast::CreateTableBody, connection: &Connection) -> Result<()> { + if let ast::CreateTableBody::ColumnsAndConstraints { + options, columns, .. + } = &body + { + if options.contains(ast::TableOptions::STRICT) && !connection.experimental_strict_enabled() + { + bail_parse_error!( + "STRICT tables are an experimental feature. Enable them with --experimental-strict flag" + ); + } + for i in 0..columns.len() { + let col_i = &columns[i]; + for constraint in &col_i.constraints { + // don't silently ignore CHECK constraints, throw parse error for now + if let ast::ColumnConstraint::Check { .. } = constraint.constraint { + bail_parse_error!("CHECK constraints are not supported yet"); + } + } + for j in &columns[(i + 1)..] { + if col_i + .col_name + .as_str() + .eq_ignore_ascii_case(j.col_name.as_str()) + { + bail_parse_error!("duplicate column name: {}", j.col_name.as_str()); + } + } + } + } + Ok(()) +} + pub fn translate_create_table( tbl_name: ast::QualifiedName, resolver: &Resolver, @@ -39,32 +72,8 @@ pub fn translate_create_table( if temporary { bail_parse_error!("TEMPORARY table not supported yet"); } + validate(&body, connection)?; - if let ast::CreateTableBody::ColumnsAndConstraints { columns, .. } = &body { - for i in 0..columns.len() { - let col_i = &columns[i]; - - for j in &columns[(i + 1)..] { - if col_i - .col_name - .as_str() - .eq_ignore_ascii_case(j.col_name.as_str()) - { - bail_parse_error!("duplicate column name: {}", j.col_name.as_str()); - } - } - } - } - - // Check for STRICT mode without experimental flag - if let ast::CreateTableBody::ColumnsAndConstraints { options, .. } = &body { - if options.contains(ast::TableOptions::STRICT) && !connection.experimental_strict_enabled() - { - bail_parse_error!( - "STRICT tables are an experimental feature. Enable them with --experimental-strict flag" - ); - } - } let opts = ProgramBuilderOpts { num_cursors: 1, approx_num_insns: 30, From f033af4a93f92e11ef1126c8e594869c9e8ef3db Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 13:30:05 -0400 Subject: [PATCH 295/428] Throw parse error on CHECK constraint in create table when opening new db --- core/schema.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/schema.rs b/core/schema.rs index d92c6e612..b744a99a5 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1632,6 +1632,9 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R let mut collation = None; for c_def in constraints { match &c_def.constraint { + ast::ColumnConstraint::Check { .. } => { + crate::bail_parse_error!("CHECK constraints are not yet supported"); + } ast::ColumnConstraint::PrimaryKey { order: o, auto_increment, From e417188cb25e8488fe4c2e0a9846f50389c680de Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 12:32:56 -0400 Subject: [PATCH 296/428] Fix panic when selecting explicit rowid from FROM clause subquery --- core/translate/expr.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 4f546205e..e66400cb0 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -3495,14 +3495,18 @@ pub fn bind_and_rewrite_expr<'a>( )); } // only if we haven't found a match, check for explicit rowid reference - } else if let Some(row_id_expr) = parse_row_id( - &normalized_id, - referenced_tables.joined_tables()[0].internal_id, - || referenced_tables.joined_tables().len() != 1, - )? { - *expr = row_id_expr; - - return Ok(WalkControl::Continue); + } else { + let is_btree_table = matches!(joined_table.table, Table::BTree(_)); + if is_btree_table { + if let Some(row_id_expr) = parse_row_id( + &normalized_id, + referenced_tables.joined_tables()[0].internal_id, + || referenced_tables.joined_tables().len() != 1, + )? { + *expr = row_id_expr; + return Ok(WalkControl::Continue); + } + } } } From 959df4c4bc2e42f7c09867f5878ce4523e769592 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 12:50:26 -0400 Subject: [PATCH 297/428] Add TCL test for rowid from clause subquery panic --- testing/select.test | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/testing/select.test b/testing/select.test index d21e10c1a..72688913c 100755 --- a/testing/select.test +++ b/testing/select.test @@ -1060,4 +1060,17 @@ do_execsql_test_in_memory_any_error limit-column-reference-error { do_execsql_test select-binary-collation { SELECT 'a' = 'A'; SELECT 'a' = 'a'; -} {0 1} \ No newline at end of file +} {0 1} + +# https://github.com/tursodatabase/turso/issues/3667 regression test +do_execsql_test_in_memory_error_content rowid-select-from-clause-subquery { + CREATE TABLE t(a); + SELECT rowid FROM (SELECT * FROM t); +} {"no such column: rowid"} + +do_execsql_test_on_specific_db {:memory:} rowid-select-from-clause-subquery-explicit-works { + CREATE TABLE t(a); + INSERT INTO t values ('abc'); + SELECT rowid,a FROM (SELECT rowid,a FROM t); +} {1|abc} + From 75e86b2c20e41d43c75713f0d9200394d19e615b Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 14:26:58 -0400 Subject: [PATCH 298/428] Throw parse error on GENERATED constraint in create table when opening new db --- core/schema.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/schema.rs b/core/schema.rs index b744a99a5..a006b85ab 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1635,6 +1635,9 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R ast::ColumnConstraint::Check { .. } => { crate::bail_parse_error!("CHECK constraints are not yet supported"); } + ast::ColumnConstraint::Generated { .. } => { + crate::bail_parse_error!("GENERATED columns are not yet supported"); + } ast::ColumnConstraint::PrimaryKey { order: o, auto_increment, From 04c9eee4f1d6204b25bc6d1535c684bbb259c3cc Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 14:27:22 -0400 Subject: [PATCH 299/428] Throw parse error on GENERATED constraint when creating new table --- core/translate/schema.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/core/translate/schema.rs b/core/translate/schema.rs index 152cca652..120ad5ce9 100644 --- a/core/translate/schema.rs +++ b/core/translate/schema.rs @@ -41,8 +41,14 @@ fn validate(body: &ast::CreateTableBody, connection: &Connection) -> Result<()> let col_i = &columns[i]; for constraint in &col_i.constraints { // don't silently ignore CHECK constraints, throw parse error for now - if let ast::ColumnConstraint::Check { .. } = constraint.constraint { - bail_parse_error!("CHECK constraints are not supported yet"); + match constraint.constraint { + ast::ColumnConstraint::Check { .. } => { + bail_parse_error!("CHECK constraints are not supported yet"); + } + ast::ColumnConstraint::Generated { .. } => { + bail_parse_error!("GENERATED columns are not supported yet"); + } + _ => {} } } for j in &columns[(i + 1)..] { From 9de9927b524f68569a641ecf4466790f101aad18 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 14:47:40 -0400 Subject: [PATCH 300/428] fix clippy warning --- core/schema.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/schema.rs b/core/schema.rs index a006b85ab..c708b2f4d 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1730,7 +1730,6 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R }; foreign_keys.push(Arc::new(fk)); } - _ => {} } } From 2ca388d78dd27a60d4a175006cecc4867afe31f7 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Thu, 16 Oct 2025 22:00:01 +0300 Subject: [PATCH 301/428] WAL: don't hold shared lock across IO operations Without this change and running: ``` cd stress cargo run -- --nr-threads=4 -i 1000 --verbose --busy-timeout=0 ``` I can produce a deadlock quite reliably. With this change, I can't. Even with 5 second busy timeout (the default), the run makes progress although it is slow as hell because of the busy timeout. --- core/storage/wal.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/core/storage/wal.rs b/core/storage/wal.rs index fed9f15ad..1fd114a94 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -1322,10 +1322,12 @@ impl Wal for WalFile { tracing::debug!("wal_sync finish"); syncing.store(false, Ordering::SeqCst); }); - let shared = self.get_shared(); + let file = { + let shared = self.get_shared(); + assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); + shared.file.as_ref().unwrap().clone() + }; self.syncing.store(true, Ordering::SeqCst); - assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); - let file = shared.file.as_ref().unwrap(); let c = file.sync(completion)?; Ok(c) } @@ -1575,9 +1577,11 @@ impl Wal for WalFile { let c = Completion::new_write_linked(cmp); - let shared = self.get_shared(); - assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); - let file = shared.file.as_ref().unwrap(); + let file = { + let shared = self.get_shared(); + assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); + shared.file.as_ref().unwrap().clone() + }; let c = file.pwritev(start_off, iovecs, c)?; Ok(c) } From edaa1b675e1a23cca2bc50ada43c97c6a6c48fc2 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 15:45:20 -0400 Subject: [PATCH 302/428] Prevent column definitions on CREATE TABLE or opening DB with ON CONFLICT on column def --- core/schema.rs | 25 +++++++++++++++++++++---- core/translate/schema.rs | 10 ++++++++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/core/schema.rs b/core/schema.rs index c708b2f4d..ce30d3950 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1641,8 +1641,14 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R ast::ColumnConstraint::PrimaryKey { order: o, auto_increment, + conflict_clause, .. } => { + if conflict_clause.is_some() { + crate::bail_parse_error!( + "ON CONFLICT not implemented for column definition" + ); + } if !primary_key_columns.is_empty() { crate::bail_parse_error!( "table \"{}\" has more than one primary key", @@ -1661,7 +1667,16 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R is_primary_key: true, }); } - ast::ColumnConstraint::NotNull { nullable, .. } => { + ast::ColumnConstraint::NotNull { + nullable, + conflict_clause, + .. + } => { + if conflict_clause.is_some() { + crate::bail_parse_error!( + "ON CONFLICT not implemented for column definition" + ); + } notnull = !nullable; } ast::ColumnConstraint::Default(ref expr) => { @@ -1670,9 +1685,11 @@ pub fn create_table(tbl_name: &str, body: &CreateTableBody, root_page: i64) -> R ); } // TODO: for now we don't check Resolve type of unique - ast::ColumnConstraint::Unique(on_conflict) => { - if on_conflict.is_some() { - unimplemented!("ON CONFLICT not implemented"); + ast::ColumnConstraint::Unique(conflict) => { + if conflict.is_some() { + crate::bail_parse_error!( + "ON CONFLICT not implemented for column definition" + ); } unique = true; unique_sets.push(UniqueSet { diff --git a/core/translate/schema.rs b/core/translate/schema.rs index 120ad5ce9..0442b5542 100644 --- a/core/translate/schema.rs +++ b/core/translate/schema.rs @@ -48,6 +48,16 @@ fn validate(body: &ast::CreateTableBody, connection: &Connection) -> Result<()> ast::ColumnConstraint::Generated { .. } => { bail_parse_error!("GENERATED columns are not supported yet"); } + ast::ColumnConstraint::NotNull { + conflict_clause, .. + } + | ast::ColumnConstraint::PrimaryKey { + conflict_clause, .. + } if conflict_clause.is_some() => { + bail_parse_error!( + "ON CONFLICT clauses are not supported yet in column definitions" + ); + } _ => {} } } From 10c69b910ec3fbb74036b282b7b16bd3b68bc1b3 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 16:39:10 -0400 Subject: [PATCH 303/428] Prevent ambiguous self-join table reference --- core/translate/select.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/core/translate/select.rs b/core/translate/select.rs index 59b6ff6cb..376745f77 100644 --- a/core/translate/select.rs +++ b/core/translate/select.rs @@ -225,6 +225,16 @@ fn prepare_one_select_plan( &mut table_references, connection, )?; + // Validate that all table references have unique identifiers + let mut seen_identifiers = std::collections::HashSet::new(); + for table in table_references.joined_tables().iter() { + if !seen_identifiers.insert(&table.identifier) { + crate::bail_parse_error!( + "table name {} specified more than once - use aliases to distinguish multiple references", + table.identifier + ); + } + } // Preallocate space for the result columns let result_columns = Vec::with_capacity( From 79c5234122ccb4c22e49930c28d1a8ca7bbc74fa Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 16:43:08 -0400 Subject: [PATCH 304/428] Add TCL test for self ambiguous join --- testing/select.test | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/testing/select.test b/testing/select.test index 72688913c..c42e38e42 100755 --- a/testing/select.test +++ b/testing/select.test @@ -1074,3 +1074,23 @@ do_execsql_test_on_specific_db {:memory:} rowid-select-from-clause-subquery-expl SELECT rowid,a FROM (SELECT rowid,a FROM t); } {1|abc} +# https://github.com/tursodatabase/turso/issues/3505 regression test +do_execsql_test_in_memory_any_error ambiguous-self-join { + CREATE TABLE T(a); + INSERT INTO t VALUES (1), (2), (3); + SELECT * fROM t JOIN t; +} + +do_execsql_test_on_specific_db {:memory:} unambiguous-self-join { + CREATE TABLE T(a); + INSERT INTO t VALUES (1), (2), (3); + SELECT * fROM t as ta JOIN t order by ta.a; +} {1|1 +1|2 +1|3 +2|1 +2|2 +2|3 +3|1 +3|2 +3|3} From ddd674c34012e1a587a4c44006c4fd97e254775e Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 16 Oct 2025 18:32:48 -0400 Subject: [PATCH 305/428] Move duplicate table identifier checking to parse_join to allow for natural joins --- core/translate/planner.rs | 22 +++++++++++++++++++++- core/translate/select.rs | 10 ---------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/core/translate/planner.rs b/core/translate/planner.rs index 08936abf3..f7d930bd6 100644 --- a/core/translate/planner.rs +++ b/core/translate/planner.rs @@ -986,9 +986,29 @@ fn parse_join( crate::bail_parse_error!("NATURAL JOIN cannot be combined with ON or USING clause"); } + // this is called once for each join, so we only need to check the rightmost table + // against all previous tables for duplicates + let rightmost_table = table_references.joined_tables().last().unwrap(); + let has_duplicate = table_references + .joined_tables() + .iter() + .take(table_references.joined_tables().len() - 1) + .any(|t| t.identifier == rightmost_table.identifier); + + if has_duplicate + && !natural + && constraint + .as_ref() + .is_none_or(|c| !matches!(c, ast::JoinConstraint::Using(_))) + { + // Duplicate table names are only allowed for NATURAL or USING joins + crate::bail_parse_error!( + "table name {} specified more than once - use an alias to disambiguate", + rightmost_table.identifier + ); + } let constraint = if natural { assert!(table_references.joined_tables().len() >= 2); - let rightmost_table = table_references.joined_tables().last().unwrap(); // NATURAL JOIN is first transformed into a USING join with the common columns let mut distinct_names: Vec = vec![]; // TODO: O(n^2) maybe not great for large tables or big multiway joins diff --git a/core/translate/select.rs b/core/translate/select.rs index 376745f77..59b6ff6cb 100644 --- a/core/translate/select.rs +++ b/core/translate/select.rs @@ -225,16 +225,6 @@ fn prepare_one_select_plan( &mut table_references, connection, )?; - // Validate that all table references have unique identifiers - let mut seen_identifiers = std::collections::HashSet::new(); - for table in table_references.joined_tables().iter() { - if !seen_identifiers.insert(&table.identifier) { - crate::bail_parse_error!( - "table name {} specified more than once - use aliases to distinguish multiple references", - table.identifier - ); - } - } // Preallocate space for the result columns let result_columns = Vec::with_capacity( From bae3a42564f5b098cad5c820ee5a6cb309ea4fff Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Fri, 17 Oct 2025 08:47:40 +0300 Subject: [PATCH 306/428] stress: prevent thread from holding write lock and then stopping When a stress thread runs out of work, execute COMMIT and ignore the result. This prevents the currently extremely common scenario where: - Thread x does BEGIN - Thread x does e.g. INSERT ... Then runs out of iterations and stops. No other threads can write anything and they just wait for 5 seconds (busy timeout) and then try again, forever. --- stress/main.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stress/main.rs b/stress/main.rs index 313098b3b..7c899e0ab 100644 --- a/stress/main.rs +++ b/stress/main.rs @@ -629,6 +629,8 @@ async fn main() -> Result<(), Box> { } } } + // In case this thread is running an exclusive transaction, commit it so that it doesn't block other threads. + let _ = conn.execute("COMMIT", ()).await; Ok::<_, Box>(()) }); handles.push(handle); From d187be74a52e85966bd6a22b93500f266a5a88d5 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Fri, 17 Oct 2025 09:25:06 +0300 Subject: [PATCH 307/428] antithesis-tests: don't create CHECK constraints we don't support them, and we just started returning parse errors for trying to do so -> failures in antithesis runs --- .../stress-composer/parallel_driver_create_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/antithesis-tests/stress-composer/parallel_driver_create_table.py b/antithesis-tests/stress-composer/parallel_driver_create_table.py index 5d786d2da..7f3d2ba3a 100755 --- a/antithesis-tests/stress-composer/parallel_driver_create_table.py +++ b/antithesis-tests/stress-composer/parallel_driver_create_table.py @@ -49,7 +49,7 @@ print(f"Creating new table: tbl_{next_table_num}") # Define possible data types and constraints data_types = ["INTEGER", "REAL", "TEXT", "BLOB", "NUMERIC"] -constraints = ["", "NOT NULL", "DEFAULT 0", "DEFAULT ''", "UNIQUE", "CHECK (col_0 > 0)"] +constraints = ["", "NOT NULL", "DEFAULT 0", "DEFAULT ''", "UNIQUE"] # Generate random number of columns (2-10) col_count = 2 + (get_random() % 9) From e3ec4f7ea25c6b331a4fec38f65b9309ac2bb139 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 8 Sep 2025 11:11:57 +0300 Subject: [PATCH 308/428] sql_generation: Add support for predefined columns --- sql_generation/generation/table.rs | 31 ++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/sql_generation/generation/table.rs b/sql_generation/generation/table.rs index 21c89a179..e8b722180 100644 --- a/sql_generation/generation/table.rs +++ b/sql_generation/generation/table.rs @@ -16,21 +16,33 @@ impl Arbitrary for Name { } } -impl Arbitrary for Table { - fn arbitrary(rng: &mut R, context: &C) -> Self { +impl Table { + /// Generate a table with some predefined columns + pub fn arbitrary_with_columns( + rng: &mut R, + context: &C, + name: String, + predefined_columns: Vec, + ) -> Self { let opts = context.opts().table.clone(); - let name = Name::arbitrary(rng, context).0; let large_table = opts.large_table.enable && rng.random_bool(opts.large_table.large_table_prob); - let column_size = if large_table { + let target_column_size = if large_table { rng.random_range(opts.large_table.column_range) } else { rng.random_range(opts.column_range) } as usize; - let mut column_set = IndexSet::with_capacity(column_size); + + // Start with predefined columns + let mut column_set = IndexSet::with_capacity(target_column_size); + for col in predefined_columns { + column_set.insert(col); + } + + // Generate additional columns to reach target size for col in std::iter::repeat_with(|| Column::arbitrary(rng, context)) { column_set.insert(col); - if column_set.len() == column_size { + if column_set.len() >= target_column_size { break; } } @@ -44,6 +56,13 @@ impl Arbitrary for Table { } } +impl Arbitrary for Table { + fn arbitrary(rng: &mut R, context: &C) -> Self { + let name = Name::arbitrary(rng, context).0; + Table::arbitrary_with_columns(rng, context, name, vec![]) + } +} + impl Arbitrary for Column { fn arbitrary(rng: &mut R, context: &C) -> Self { let name = Name::arbitrary(rng, context).0; From 0fcb0b889aff3fcee2a7b25f066618af96ea24fa Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 8 Sep 2025 18:22:42 +0300 Subject: [PATCH 309/428] sql_generation: Fix predicate evaluation for JOIN operations --- sql_generation/model/query/select.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/sql_generation/model/query/select.rs b/sql_generation/model/query/select.rs index 8add21bfb..3ba71c06f 100644 --- a/sql_generation/model/query/select.rs +++ b/sql_generation/model/query/select.rs @@ -243,28 +243,24 @@ impl FromClause { match join.join_type { JoinType::Inner => { // Implement inner join logic - let join_rows = joined_table - .rows - .iter() - .filter(|row| join.on.test(row, joined_table)) - .cloned() - .collect::>(); // take a cartesian product of the rows let all_row_pairs = join_table .rows .clone() .into_iter() - .cartesian_product(join_rows.iter()); + .cartesian_product(joined_table.rows.iter()); + let mut new_rows = Vec::new(); for (row1, row2) in all_row_pairs { let row = row1.iter().chain(row2.iter()).cloned().collect::>(); let is_in = join.on.test(&row, &join_table); if is_in { - join_table.rows.push(row); + new_rows.push(row); } } + join_table.rows = new_rows; } _ => todo!(), } From 6f71059f071f43ca09ba8a2f7216afbb3610535f Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 7 Sep 2025 14:33:18 +0300 Subject: [PATCH 310/428] tests: Add rowid alias fuzz test case This adds a new fuzz test case to verify that any query returns the same results with and without a rowid alias. --- Cargo.lock | 2 + tests/Cargo.toml | 2 + tests/integration/fuzz/mod.rs | 1 + tests/integration/fuzz/rowid_alias.rs | 210 ++++++++++++++++++++++++++ 4 files changed, 215 insertions(+) create mode 100644 tests/integration/fuzz/rowid_alias.rs diff --git a/Cargo.lock b/Cargo.lock index 69d059d0e..f3d6ced1d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -831,6 +831,7 @@ dependencies = [ "rand 0.9.2", "rand_chacha 0.9.0", "rusqlite", + "sql_generation", "tempfile", "test-log", "tokio", @@ -838,6 +839,7 @@ dependencies = [ "tracing-subscriber", "turso", "turso_core", + "turso_parser", "twox-hash", "zerocopy 0.8.26", ] diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 837635850..416d26768 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -29,6 +29,8 @@ rand = { workspace = true } zerocopy = "0.8.26" ctor = "0.5.0" twox-hash = "2.1.1" +sql_generation = { path = "../sql_generation" } +turso_parser = { workspace = true } [dev-dependencies] test-log = { version = "0.2.17", features = ["trace"] } diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 0e09c1c2c..caffe5582 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -1,4 +1,5 @@ pub mod grammar_generator; +pub mod rowid_alias; #[cfg(test)] mod tests { diff --git a/tests/integration/fuzz/rowid_alias.rs b/tests/integration/fuzz/rowid_alias.rs new file mode 100644 index 000000000..e9de059cc --- /dev/null +++ b/tests/integration/fuzz/rowid_alias.rs @@ -0,0 +1,210 @@ +use crate::common::{limbo_exec_rows, TempDatabase}; +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaCha8Rng; +use sql_generation::{ + generation::{Arbitrary, GenerationContext, Opts}, + model::{ + query::{Create, Insert, Select}, + table::{Column, ColumnType, Table}, + }, +}; +use turso_parser::ast::ColumnConstraint; + +fn rng_from_time_or_env() -> (ChaCha8Rng, u64) { + let seed = if let Ok(seed_str) = std::env::var("FUZZ_SEED") { + seed_str.parse::().expect("Invalid FUZZ_SEED value") + } else { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() + }; + let rng = ChaCha8Rng::seed_from_u64(seed); + (rng, seed) +} + +// Our test context that implements GenerationContext +#[derive(Debug, Clone)] +struct FuzzTestContext { + opts: Opts, + tables: Vec
, +} + +impl FuzzTestContext { + fn new() -> Self { + Self { + opts: Opts::default(), + tables: Vec::new(), + } + } + + fn add_table(&mut self, table: Table) { + self.tables.push(table); + } +} + +impl GenerationContext for FuzzTestContext { + fn tables(&self) -> &Vec
{ + &self.tables + } + + fn opts(&self) -> &Opts { + &self.opts + } +} + +// Convert a table's CREATE statement to use INTEGER PRIMARY KEY (rowid alias) +fn convert_to_rowid_alias(create_sql: &str) -> String { + // Since we always generate INTEGER PRIMARY KEY, just return as-is + create_sql.to_string() +} + +// Convert a table's CREATE statement to NOT use rowid alias +fn convert_to_no_rowid_alias(create_sql: &str) -> String { + // Replace INTEGER PRIMARY KEY with INT PRIMARY KEY to disable rowid alias + create_sql.replace("INTEGER PRIMARY KEY", "INT PRIMARY KEY") +} + +#[test] +pub fn rowid_alias_differential_fuzz() { + let (mut rng, seed) = rng_from_time_or_env(); + tracing::info!("rowid_alias_differential_fuzz seed: {}", seed); + + // Number of queries to test + let num_queries = if let Ok(num) = std::env::var("FUZZ_NUM_QUERIES") { + num.parse::().unwrap_or(1000) + } else { + 1000 + }; + + // Create two Limbo databases with indexes enabled + let db_with_alias = TempDatabase::new_empty(true); + let db_without_alias = TempDatabase::new_empty(true); + + // Connect to both databases + let conn_with_alias = db_with_alias.connect_limbo(); + let conn_without_alias = db_without_alias.connect_limbo(); + + // Create our test context + let mut context = FuzzTestContext::new(); + + let mut successful_queries = 0; + let mut skipped_queries = 0; + + for iteration in 0..num_queries { + // Decide whether to create a new table, insert data, or generate a query + let action = + if context.tables.is_empty() || (context.tables.len() < 5 && rng.random_bool(0.1)) { + 0 // Create a new table + } else if rng.random_bool(0.3) { + 1 // Insert data + } else { + 2 // Generate a SELECT query + }; + + match action { + 0 => { + // Generate a new table with an integer primary key + let primary_key = Column { + name: "id".to_string(), + column_type: ColumnType::Integer, + constraints: vec![ColumnConstraint::PrimaryKey { + order: None, + conflict_clause: None, + auto_increment: false, + }], + }; + let table_name = format!("table_{}", context.tables.len()); + let table = Table::arbitrary_with_columns( + &mut rng, + &context, + table_name, + vec![primary_key], + ); + let create = Create { + table: table.clone(), + }; + + // Create table with rowid alias in first database + let create_with_alias = convert_to_rowid_alias(&create.to_string()); + let _ = limbo_exec_rows(&db_with_alias, &conn_with_alias, &create_with_alias); + + // Create table without rowid alias in second database + let create_without_alias = convert_to_no_rowid_alias(&create.to_string()); + let _ = limbo_exec_rows( + &db_without_alias, + &conn_without_alias, + &create_without_alias, + ); + + // Add table to context for future query generation + context.add_table(table); + + skipped_queries += 1; + continue; + } + 1 => { + // Generate and execute an INSERT statement + let insert = Insert::arbitrary(&mut rng, &context); + let insert_str = insert.to_string(); + + // Execute the insert in both databases + let _ = limbo_exec_rows(&db_with_alias, &conn_with_alias, &insert_str); + let _ = limbo_exec_rows(&db_without_alias, &conn_without_alias, &insert_str); + + // Update the table's rows in the context so predicate generation knows about the data + if let Insert::Values { + table: table_name, + values, + } = &insert + { + for table in &mut context.tables { + if table.name == *table_name { + table.rows.extend(values.clone()); + break; + } + } + } + + skipped_queries += 1; + continue; + } + _ => { + // Continue to generate SELECT query below + } + } + + let select = Select::arbitrary(&mut rng, &context); + let query_str = select.to_string(); + + tracing::debug!("Comparing query {}: {}", iteration, query_str); + + let with_alias_results = limbo_exec_rows(&db_with_alias, &conn_with_alias, &query_str); + let without_alias_results = + limbo_exec_rows(&db_without_alias, &conn_without_alias, &query_str); + + let mut sorted_with_alias = with_alias_results; + let mut sorted_without_alias = without_alias_results; + + // Sort results to handle different row ordering + sorted_with_alias.sort_by(|a, b| format!("{a:?}").cmp(&format!("{b:?}"))); + sorted_without_alias.sort_by(|a, b| format!("{a:?}").cmp(&format!("{b:?}"))); + + assert_eq!( + sorted_with_alias, sorted_without_alias, + "Query produced different results with and without rowid alias!\n\ + Query: {query_str}\n\ + With rowid alias: {sorted_with_alias:?}\n\ + Without rowid alias: {sorted_without_alias:?}\n\ + Seed: {seed}" + ); + + successful_queries += 1; + } + + tracing::info!( + "Rowid alias differential fuzz test completed: {} queries tested successfully, {} queries skipped", + successful_queries, + skipped_queries + ); +} From 3020966fbdc56f4aa8e3bf0a87a31db4be78bc3f Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 17 Oct 2025 11:08:36 +0300 Subject: [PATCH 311/428] Turso 0.3.0-pre.3 --- Cargo.lock | 54 +++++++++---------- Cargo.toml | 34 ++++++------ bindings/javascript/package-lock.json | 36 ++++++------- bindings/javascript/package.json | 2 +- .../javascript/packages/common/package.json | 2 +- .../javascript/packages/native/package.json | 4 +- .../packages/wasm-common/package.json | 2 +- .../javascript/packages/wasm/package.json | 6 +-- .../sync/packages/common/package.json | 4 +- .../sync/packages/native/package.json | 6 +-- .../sync/packages/wasm/package.json | 8 +-- bindings/javascript/yarn.lock | 24 ++++----- 12 files changed, 91 insertions(+), 91 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 69d059d0e..0e0690948 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -821,7 +821,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_tester" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "anyhow", "assert_cmd", @@ -2539,7 +2539,7 @@ dependencies = [ [[package]] name = "limbo_completion" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "mimalloc", "turso_ext", @@ -2547,7 +2547,7 @@ dependencies = [ [[package]] name = "limbo_crypto" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "blake3", "data-encoding", @@ -2560,7 +2560,7 @@ dependencies = [ [[package]] name = "limbo_csv" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "csv", "mimalloc", @@ -2570,7 +2570,7 @@ dependencies = [ [[package]] name = "limbo_fuzzy" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "mimalloc", "turso_ext", @@ -2578,7 +2578,7 @@ dependencies = [ [[package]] name = "limbo_ipaddr" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "ipnetwork", "mimalloc", @@ -2587,7 +2587,7 @@ dependencies = [ [[package]] name = "limbo_percentile" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "mimalloc", "turso_ext", @@ -2595,7 +2595,7 @@ dependencies = [ [[package]] name = "limbo_regexp" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "mimalloc", "regex", @@ -2604,7 +2604,7 @@ dependencies = [ [[package]] name = "limbo_sim" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "anyhow", "bitflags 2.9.4", @@ -2640,7 +2640,7 @@ dependencies = [ [[package]] name = "limbo_sqlite_test_ext" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "cc", ] @@ -3456,7 +3456,7 @@ dependencies = [ [[package]] name = "py-turso" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "anyhow", "pyo3", @@ -4193,7 +4193,7 @@ checksum = "d372029cb5195f9ab4e4b9aef550787dce78b124fcaee8d82519925defcd6f0d" [[package]] name = "sql_generation" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "anarchist-readable-name-generator-lib 0.2.0", "anyhow", @@ -4821,7 +4821,7 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "turso" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "rand 0.9.2", "rand_chacha 0.9.0", @@ -4835,7 +4835,7 @@ dependencies = [ [[package]] name = "turso-java" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "jni", "thiserror 2.0.16", @@ -4844,7 +4844,7 @@ dependencies = [ [[package]] name = "turso_cli" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "anyhow", "cfg-if", @@ -4880,7 +4880,7 @@ dependencies = [ [[package]] name = "turso_core" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "aegis", "aes", @@ -4940,7 +4940,7 @@ dependencies = [ [[package]] name = "turso_dart" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "flutter_rust_bridge", "turso_core", @@ -4948,7 +4948,7 @@ dependencies = [ [[package]] name = "turso_ext" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "chrono", "getrandom 0.3.2", @@ -4957,7 +4957,7 @@ dependencies = [ [[package]] name = "turso_ext_tests" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "env_logger 0.11.7", "lazy_static", @@ -4968,7 +4968,7 @@ dependencies = [ [[package]] name = "turso_macros" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "proc-macro2", "quote", @@ -4977,7 +4977,7 @@ dependencies = [ [[package]] name = "turso_node" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "chrono", "napi", @@ -4990,7 +4990,7 @@ dependencies = [ [[package]] name = "turso_parser" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "bitflags 2.9.4", "criterion", @@ -5006,7 +5006,7 @@ dependencies = [ [[package]] name = "turso_sqlite3" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "env_logger 0.11.7", "libc", @@ -5019,7 +5019,7 @@ dependencies = [ [[package]] name = "turso_stress" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "anarchist-readable-name-generator-lib 0.1.2", "antithesis_sdk", @@ -5036,7 +5036,7 @@ dependencies = [ [[package]] name = "turso_sync_engine" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "base64 0.22.1", "bytes", @@ -5063,7 +5063,7 @@ dependencies = [ [[package]] name = "turso_sync_js" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "genawaiter", "napi", @@ -5078,7 +5078,7 @@ dependencies = [ [[package]] name = "turso_whopper" -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" dependencies = [ "anyhow", "clap", diff --git a/Cargo.toml b/Cargo.toml index 6c82c91d4..328c86c66 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,29 +39,29 @@ exclude = [ ] [workspace.package] -version = "0.3.0-pre.2" +version = "0.3.0-pre.3" authors = ["the Limbo authors"] edition = "2021" license = "MIT" repository = "https://github.com/tursodatabase/turso" [workspace.dependencies] -turso = { path = "bindings/rust", version = "0.3.0-pre.2" } -turso_node = { path = "bindings/javascript", version = "0.3.0-pre.2" } -limbo_completion = { path = "extensions/completion", version = "0.3.0-pre.2" } -turso_core = { path = "core", version = "0.3.0-pre.2" } -turso_sync_engine = { path = "sync/engine", version = "0.3.0-pre.2" } -limbo_crypto = { path = "extensions/crypto", version = "0.3.0-pre.2" } -limbo_csv = { path = "extensions/csv", version = "0.3.0-pre.2" } -turso_ext = { path = "extensions/core", version = "0.3.0-pre.2" } -turso_ext_tests = { path = "extensions/tests", version = "0.3.0-pre.2" } -limbo_ipaddr = { path = "extensions/ipaddr", version = "0.3.0-pre.2" } -turso_macros = { path = "macros", version = "0.3.0-pre.2" } -limbo_percentile = { path = "extensions/percentile", version = "0.3.0-pre.2" } -limbo_regexp = { path = "extensions/regexp", version = "0.3.0-pre.2" } -limbo_uuid = { path = "extensions/uuid", version = "0.3.0-pre.2" } -turso_parser = { path = "parser", version = "0.3.0-pre.2" } -limbo_fuzzy = { path = "extensions/fuzzy", version = "0.3.0-pre.2" } +turso = { path = "bindings/rust", version = "0.3.0-pre.3" } +turso_node = { path = "bindings/javascript", version = "0.3.0-pre.3" } +limbo_completion = { path = "extensions/completion", version = "0.3.0-pre.3" } +turso_core = { path = "core", version = "0.3.0-pre.3" } +turso_sync_engine = { path = "sync/engine", version = "0.3.0-pre.3" } +limbo_crypto = { path = "extensions/crypto", version = "0.3.0-pre.3" } +limbo_csv = { path = "extensions/csv", version = "0.3.0-pre.3" } +turso_ext = { path = "extensions/core", version = "0.3.0-pre.3" } +turso_ext_tests = { path = "extensions/tests", version = "0.3.0-pre.3" } +limbo_ipaddr = { path = "extensions/ipaddr", version = "0.3.0-pre.3" } +turso_macros = { path = "macros", version = "0.3.0-pre.3" } +limbo_percentile = { path = "extensions/percentile", version = "0.3.0-pre.3" } +limbo_regexp = { path = "extensions/regexp", version = "0.3.0-pre.3" } +limbo_uuid = { path = "extensions/uuid", version = "0.3.0-pre.3" } +turso_parser = { path = "parser", version = "0.3.0-pre.3" } +limbo_fuzzy = { path = "extensions/fuzzy", version = "0.3.0-pre.3" } sql_generation = { path = "sql_generation" } strum = { version = "0.26", features = ["derive"] } strum_macros = "0.26" diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json index a07550035..68feab117 100644 --- a/bindings/javascript/package-lock.json +++ b/bindings/javascript/package-lock.json @@ -1,11 +1,11 @@ { "name": "javascript", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "lockfileVersion": 3, "requires": true, "packages": { "": { - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "workspaces": [ "packages/common", "packages/wasm-common", @@ -3542,7 +3542,7 @@ }, "packages/common": { "name": "@tursodatabase/database-common", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "license": "MIT", "devDependencies": { "typescript": "^5.9.2", @@ -3551,10 +3551,10 @@ }, "packages/native": { "name": "@tursodatabase/database", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.2" + "@tursodatabase/database-common": "^0.3.0-pre.3" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3568,11 +3568,11 @@ }, "packages/wasm": { "name": "@tursodatabase/database-wasm", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.2", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.2" + "@tursodatabase/database-common": "^0.3.0-pre.3", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.3" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3585,7 +3585,7 @@ }, "packages/wasm-common": { "name": "@tursodatabase/database-wasm-common", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "license": "MIT", "dependencies": { "@napi-rs/wasm-runtime": "^1.0.5" @@ -3596,10 +3596,10 @@ }, "sync/packages/common": { "name": "@tursodatabase/sync-common", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.2" + "@tursodatabase/database-common": "^0.3.0-pre.3" }, "devDependencies": { "typescript": "^5.9.2" @@ -3607,11 +3607,11 @@ }, "sync/packages/native": { "name": "@tursodatabase/sync", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.2", - "@tursodatabase/sync-common": "^0.3.0-pre.2" + "@tursodatabase/database-common": "^0.3.0-pre.3", + "@tursodatabase/sync-common": "^0.3.0-pre.3" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3622,12 +3622,12 @@ }, "sync/packages/wasm": { "name": "@tursodatabase/sync-wasm", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.2", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.2", - "@tursodatabase/sync-common": "^0.3.0-pre.2" + "@tursodatabase/database-common": "^0.3.0-pre.3", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.3", + "@tursodatabase/sync-common": "^0.3.0-pre.3" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json index be0db69b9..0c19f4697 100644 --- a/bindings/javascript/package.json +++ b/bindings/javascript/package.json @@ -14,5 +14,5 @@ "sync/packages/native", "sync/packages/wasm" ], - "version": "0.3.0-pre.2" + "version": "0.3.0-pre.3" } diff --git a/bindings/javascript/packages/common/package.json b/bindings/javascript/packages/common/package.json index 10b10fff2..188f5bbfa 100644 --- a/bindings/javascript/packages/common/package.json +++ b/bindings/javascript/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-common", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/native/package.json b/bindings/javascript/packages/native/package.json index 4a90b7fbd..7150ff058 100644 --- a/bindings/javascript/packages/native/package.json +++ b/bindings/javascript/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -47,7 +47,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.2" + "@tursodatabase/database-common": "^0.3.0-pre.3" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/packages/wasm-common/package.json b/bindings/javascript/packages/wasm-common/package.json index 9761b71bd..0b328856a 100644 --- a/bindings/javascript/packages/wasm-common/package.json +++ b/bindings/javascript/packages/wasm-common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-wasm-common", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/wasm/package.json b/bindings/javascript/packages/wasm/package.json index 3382d804b..e6214d967 100644 --- a/bindings/javascript/packages/wasm/package.json +++ b/bindings/javascript/packages/wasm/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-wasm", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -51,7 +51,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.2", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.2" + "@tursodatabase/database-common": "^0.3.0-pre.3", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.3" } } diff --git a/bindings/javascript/sync/packages/common/package.json b/bindings/javascript/sync/packages/common/package.json index deee9f3fc..3f308e4d9 100644 --- a/bindings/javascript/sync/packages/common/package.json +++ b/bindings/javascript/sync/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-common", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -23,6 +23,6 @@ "test": "echo 'no tests'" }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.2" + "@tursodatabase/database-common": "^0.3.0-pre.3" } } diff --git a/bindings/javascript/sync/packages/native/package.json b/bindings/javascript/sync/packages/native/package.json index f64edbefc..822f8adf7 100644 --- a/bindings/javascript/sync/packages/native/package.json +++ b/bindings/javascript/sync/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -44,8 +44,8 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.2", - "@tursodatabase/sync-common": "^0.3.0-pre.2" + "@tursodatabase/database-common": "^0.3.0-pre.3", + "@tursodatabase/sync-common": "^0.3.0-pre.3" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/sync/packages/wasm/package.json b/bindings/javascript/sync/packages/wasm/package.json index 51e9b2d48..31543a6ca 100644 --- a/bindings/javascript/sync/packages/wasm/package.json +++ b/bindings/javascript/sync/packages/wasm/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-wasm", - "version": "0.3.0-pre.2", + "version": "0.3.0-pre.3", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -54,8 +54,8 @@ "#index": "./index.js" }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.2", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.2", - "@tursodatabase/sync-common": "^0.3.0-pre.2" + "@tursodatabase/database-common": "^0.3.0-pre.3", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.3", + "@tursodatabase/sync-common": "^0.3.0-pre.3" } } diff --git a/bindings/javascript/yarn.lock b/bindings/javascript/yarn.lock index 02323dc19..de25e12a3 100644 --- a/bindings/javascript/yarn.lock +++ b/bindings/javascript/yarn.lock @@ -1586,7 +1586,7 @@ __metadata: languageName: node linkType: hard -"@tursodatabase/database-common@npm:^0.3.0-pre.2, @tursodatabase/database-common@workspace:packages/common": +"@tursodatabase/database-common@npm:^0.3.0-pre.3, @tursodatabase/database-common@workspace:packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/database-common@workspace:packages/common" dependencies: @@ -1595,7 +1595,7 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/database-wasm-common@npm:^0.3.0-pre.2, @tursodatabase/database-wasm-common@workspace:packages/wasm-common": +"@tursodatabase/database-wasm-common@npm:^0.3.0-pre.3, @tursodatabase/database-wasm-common@workspace:packages/wasm-common": version: 0.0.0-use.local resolution: "@tursodatabase/database-wasm-common@workspace:packages/wasm-common" dependencies: @@ -1609,8 +1609,8 @@ __metadata: resolution: "@tursodatabase/database-wasm@workspace:packages/wasm" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.2" - "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.2" + "@tursodatabase/database-common": "npm:^0.3.0-pre.3" + "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.3" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1624,7 +1624,7 @@ __metadata: resolution: "@tursodatabase/database@workspace:packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.2" + "@tursodatabase/database-common": "npm:^0.3.0-pre.3" "@types/node": "npm:^24.3.1" better-sqlite3: "npm:^12.2.0" drizzle-kit: "npm:^0.31.4" @@ -1634,11 +1634,11 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/sync-common@npm:^0.3.0-pre.2, @tursodatabase/sync-common@workspace:sync/packages/common": +"@tursodatabase/sync-common@npm:^0.3.0-pre.3, @tursodatabase/sync-common@workspace:sync/packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/sync-common@workspace:sync/packages/common" dependencies: - "@tursodatabase/database-common": "npm:^0.3.0-pre.2" + "@tursodatabase/database-common": "npm:^0.3.0-pre.3" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1648,9 +1648,9 @@ __metadata: resolution: "@tursodatabase/sync-wasm@workspace:sync/packages/wasm" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.2" - "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.2" - "@tursodatabase/sync-common": "npm:^0.3.0-pre.2" + "@tursodatabase/database-common": "npm:^0.3.0-pre.3" + "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.3" + "@tursodatabase/sync-common": "npm:^0.3.0-pre.3" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1664,8 +1664,8 @@ __metadata: resolution: "@tursodatabase/sync@workspace:sync/packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.2" - "@tursodatabase/sync-common": "npm:^0.3.0-pre.2" + "@tursodatabase/database-common": "npm:^0.3.0-pre.3" + "@tursodatabase/sync-common": "npm:^0.3.0-pre.3" "@types/node": "npm:^24.3.1" typescript: "npm:^5.9.2" vitest: "npm:^3.2.4" From e03f6dbf94bb27fcb6155a64d20ff42ad2958f0e Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 17 Oct 2025 20:09:00 +0300 Subject: [PATCH 312/428] core/storage: Reduce logging level --- core/storage/sqlite3_ondisk.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index 08f7addc3..1e5827198 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -1912,7 +1912,7 @@ impl StreamingWalReader { wfs.loaded.store(true, Ordering::SeqCst); self.done.store(true, Ordering::Release); - tracing::info!( + tracing::debug!( "WAL loading complete: {} frames processed, last commit at frame {}", st.frame_idx - 1, max_frame From b837232b13022e5f52295f4520745318975a9cf9 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 18 Oct 2025 12:05:33 -0400 Subject: [PATCH 313/428] Remove tests that alter testing.db from views.test --- testing/views.test | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/views.test b/testing/views.test index e7ca6a938..f5829abaa 100755 --- a/testing/views.test +++ b/testing/views.test @@ -230,13 +230,13 @@ do_execsql_test_on_specific_db {:memory:} view-with-having { } {C|380 A|250} -do_execsql_test_error view-self-circle-detection { +do_execsql_test_in_memory_error_content view-self-circle-detection { CREATE VIEW v AS SELECT * FROM v; SELECT * FROM v; } {view v is circularly defined} -do_execsql_test_error view-mutual-circle-detection { +do_execsql_test_in_memory_error_content view-mutual-circle-detection { CREATE VIEW v AS SELECT * FROM vv; CREATE VIEW vv AS SELECT * FROM v; SELECT * FROM v; From 43681379a0cde9968a138d74c0aa13693f1ad945 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 18 Oct 2025 13:19:29 -0400 Subject: [PATCH 314/428] Add couple small helper utilities to rewrite column fk definition --- core/util.rs | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/core/util.rs b/core/util.rs index 1093c61ba..6caad2e05 100644 --- a/core/util.rs +++ b/core/util.rs @@ -1331,6 +1331,38 @@ pub fn extract_view_columns( Ok(ViewColumnSchema { tables, columns }) } +pub fn rewrite_fk_parent_cols_if_self_ref( + clause: &mut ast::ForeignKeyClause, + table: &str, + from: &str, + to: &str, +) { + if normalize_ident(clause.tbl_name.as_str()) == normalize_ident(table) { + for c in &mut clause.columns { + if normalize_ident(c.col_name.as_str()) == normalize_ident(from) { + c.col_name = ast::Name::exact(to.to_owned()); + } + } + } +} +/// Update a column-level REFERENCES (col,...) constraint too. +pub fn rewrite_column_references_if_needed( + col: &mut ast::ColumnDefinition, + table: &str, + from: &str, + to: &str, +) { + for cc in &mut col.constraints { + if let ast::NamedColumnConstraint { + constraint: ast::ColumnConstraint::ForeignKey { clause, .. }, + .. + } = cc + { + rewrite_fk_parent_cols_if_self_ref(clause, table, from, to); + } + } +} + #[cfg(test)] pub mod tests { use super::*; From 4dcabf37f16adb958a41dbfce845bbb25eda5b75 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 18 Oct 2025 13:20:18 -0400 Subject: [PATCH 315/428] Fix to_sql method on BTreeTable to include foreign keys --- core/schema.rs | 67 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 6 deletions(-) diff --git a/core/schema.rs b/core/schema.rs index ce30d3950..a367e47a8 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1358,12 +1358,13 @@ impl BTreeTable { /// `CREATE TABLE t (x)`, whereas sqlite stores it with the original extra whitespace. pub fn to_sql(&self) -> String { let mut sql = format!("CREATE TABLE {} (", self.name); + + // Add columns for (i, column) in self.columns.iter().enumerate() { if i > 0 { sql.push_str(", "); } - - // we need to wrap the column name in square brackets if it contains special characters + // Wrap column name in square brackets if it contains special characters let column_name = column.name.as_ref().expect("column name is None"); if identifier_contains_special_chars(column_name) { sql.push('['); @@ -1372,7 +1373,6 @@ impl BTreeTable { } else { sql.push_str(column_name); } - if !column.ty_str.is_empty() { sql.push(' '); sql.push_str(&column.ty_str); @@ -1380,20 +1380,75 @@ impl BTreeTable { if column.notnull { sql.push_str(" NOT NULL"); } - if column.unique { sql.push_str(" UNIQUE"); } - if column.primary_key { sql.push_str(" PRIMARY KEY"); } - if let Some(default) = &column.default { sql.push_str(" DEFAULT "); sql.push_str(&default.to_string()); } } + + let has_table_pk = !self.primary_key_columns.is_empty(); + // Add table-level PRIMARY KEY constraint if exists + if has_table_pk { + sql.push_str(", PRIMARY KEY ("); + for (i, col) in self.primary_key_columns.iter().enumerate() { + if i > 0 { + sql.push_str(", "); + } + sql.push_str(&col.0); + } + sql.push(')'); + } + + for fk in &self.foreign_keys { + sql.push_str(", FOREIGN KEY ("); + for (i, col) in fk.child_columns.iter().enumerate() { + if i > 0 { + sql.push_str(", "); + } + sql.push_str(col); + } + sql.push_str(") REFERENCES "); + sql.push_str(&fk.parent_table); + sql.push('('); + for (i, col) in fk.parent_columns.iter().enumerate() { + if i > 0 { + sql.push_str(", "); + } + sql.push_str(col); + } + sql.push(')'); + + // Add ON DELETE/UPDATE actions, NoAction is default so just make empty in that case + if fk.on_delete != RefAct::NoAction { + sql.push_str(" ON DELETE "); + sql.push_str(match fk.on_delete { + RefAct::SetNull => "SET NULL", + RefAct::SetDefault => "SET DEFAULT", + RefAct::Cascade => "CASCADE", + RefAct::Restrict => "RESTRICT", + _ => "", + }); + } + if fk.on_update != RefAct::NoAction { + sql.push_str(" ON UPDATE "); + sql.push_str(match fk.on_update { + RefAct::SetNull => "SET NULL", + RefAct::SetDefault => "SET DEFAULT", + RefAct::Cascade => "CASCADE", + RefAct::Restrict => "RESTRICT", + _ => "", + }); + } + if fk.deferred { + sql.push_str(" DEFERRABLE INITIALLY DEFERRED"); + } + } sql.push(')'); sql } From 25aa2b7190972160fc78febc4e3e56efb7bc8fbf Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 18 Oct 2025 13:21:59 -0400 Subject: [PATCH 316/428] Properly reparse and revalidate parent and child foreign keys when altering columns --- core/vdbe/execute.rs | 270 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 224 insertions(+), 46 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 962f1b3bc..a268e1d73 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -17,7 +17,9 @@ use crate::types::{ compare_immutable, compare_records_generic, Extendable, IOCompletions, ImmutableRecord, SeekResult, Text, }; -use crate::util::normalize_ident; +use crate::util::{ + normalize_ident, rewrite_column_references_if_needed, rewrite_fk_parent_cols_if_self_ref, +}; use crate::vdbe::insn::InsertFlags; use crate::vdbe::{registers_to_ref_values, TxnCleanup}; use crate::vector::{vector32_sparse, vector_concat, vector_distance_jaccard, vector_slice}; @@ -73,7 +75,7 @@ use super::{ }; use parking_lot::RwLock; use rand::{thread_rng, Rng}; -use turso_parser::ast::{self, Name, SortOrder}; +use turso_parser::ast::{self, ForeignKeyClause, Name, SortOrder}; use turso_parser::parser::Parser; use super::{ @@ -5463,11 +5465,9 @@ pub fn op_function( .parse_column_definition(true) .unwrap(); - let new_sql = 'sql: { - if table != tbl_name { - break 'sql None; - } + let rename_to = normalize_ident(column_def.col_name.as_str()); + let new_sql = 'sql: { let Value::Text(sql) = sql else { break 'sql None; }; @@ -5521,32 +5521,159 @@ pub fn op_function( temporary, if_not_exists, } => { - if table != normalize_ident(tbl_name.name.as_str()) { - break 'sql None; - } - let ast::CreateTableBody::ColumnsAndConstraints { mut columns, - constraints, + mut constraints, options, } = body else { todo!() }; - let column = columns - .iter_mut() - .find(|column| { - column.col_name.as_str() == original_rename_from.as_str() - }) - .expect("column being renamed should be present"); + let normalized_tbl_name = normalize_ident(tbl_name.name.as_str()); - match alter_func { - AlterTableFunc::AlterColumn => *column = column_def, - AlterTableFunc::RenameColumn => { - column.col_name = column_def.col_name + if normalized_tbl_name == table { + // This is the table being altered - update its column + let column = columns + .iter_mut() + .find(|column| { + column.col_name.as_str() + == original_rename_from.as_str() + }) + .expect("column being renamed should be present"); + + match alter_func { + AlterTableFunc::AlterColumn => *column = column_def.clone(), + AlterTableFunc::RenameColumn => { + column.col_name = column_def.col_name.clone() + } + _ => unreachable!(), + } + + // Update table-level constraints (PRIMARY KEY, UNIQUE, FOREIGN KEY) + for constraint in &mut constraints { + match &mut constraint.constraint { + ast::TableConstraint::PrimaryKey { + columns: pk_cols, + .. + } => { + for col in pk_cols { + let (ast::Expr::Name(ref name) + | ast::Expr::Id(ref name)) = *col.expr + else { + return Err(LimboError::ParseError("Unexpected expression in PRIMARY KEY constraint".to_string())); + }; + if normalize_ident(name.as_str()) == rename_from + { + *col.expr = ast::Expr::Name(Name::exact( + column_def.col_name.as_str().to_owned(), + )); + } + } + } + ast::TableConstraint::Unique { + columns: uniq_cols, + .. + } => { + for col in uniq_cols { + let (ast::Expr::Name(ref name) + | ast::Expr::Id(ref name)) = *col.expr + else { + return Err(LimboError::ParseError("Unexpected expression in PRIMARY KEY constraint".to_string())); + }; + if normalize_ident(name.as_str()) == rename_from + { + *col.expr = ast::Expr::Name(Name::exact( + column_def.col_name.as_str().to_owned(), + )); + } + } + } + ast::TableConstraint::ForeignKey { + columns: child_cols, + clause, + .. + } => { + // Update child columns in this table's FK definitions + for child_col in child_cols { + if normalize_ident(child_col.col_name.as_str()) + == rename_from + { + child_col.col_name = Name::exact( + column_def.col_name.as_str().to_owned(), + ); + } + } + rewrite_fk_parent_cols_if_self_ref( + clause, + &normalized_tbl_name, + &rename_from, + column_def.col_name.as_str(), + ); + } + _ => {} + } + + for col in &mut columns { + rewrite_column_references_if_needed( + col, + &normalized_tbl_name, + &rename_from, + column_def.col_name.as_str(), + ); + } + } + } else { + // This is a different table, check if it has FKs referencing the renamed column + let mut fk_updated = false; + + for constraint in &mut constraints { + if let ast::TableConstraint::ForeignKey { + columns: _, + clause: + ForeignKeyClause { + tbl_name, + columns: parent_cols, + .. + }, + .. + } = &mut constraint.constraint + { + // Check if this FK references the table being altered + if normalize_ident(tbl_name.as_str()) == table { + // Update parent column references if they match the renamed column + for parent_col in parent_cols { + if normalize_ident(parent_col.col_name.as_str()) + == rename_from + { + parent_col.col_name = Name::exact( + column_def.col_name.as_str().to_owned(), + ); + fk_updated = true; + } + } + } + } + } + for col in &mut columns { + let before = fk_updated; + let mut local_col = col.clone(); + rewrite_column_references_if_needed( + &mut local_col, + &table, + &rename_from, + column_def.col_name.as_str(), + ); + if local_col != *col { + *col = local_col; + fk_updated = true; + } + } + + // Only return updated SQL if we actually changed something + if !fk_updated { + break 'sql None; } - _ => unreachable!(), } Some( @@ -5563,7 +5690,7 @@ pub fn op_function( .to_string(), ) } - _ => todo!(), + _ => None, } }; @@ -8238,43 +8365,94 @@ pub fn op_alter_column( .clone() }; let new_column = crate::schema::Column::from(definition); + let new_name = definition.col_name.as_str().to_owned(); conn.with_schema_mut(|schema| { - let table = schema + let table_arc = schema .tables .get_mut(&normalized_table_name) - .expect("table being renamed should be in schema"); + .expect("table being ALTERed should be in schema"); + let table = Arc::make_mut(table_arc); - let table = Arc::make_mut(table); - - let Table::BTree(btree) = table else { - panic!("only btree tables can be renamed"); + let Table::BTree(ref mut btree_arc) = table else { + panic!("only btree tables can be altered"); }; - - let btree = Arc::make_mut(btree); - - let column = btree + let btree = Arc::make_mut(btree_arc); + let col = btree .columns .get_mut(*column_index) - .expect("renamed column should be in schema"); + .expect("column being ALTERed should be in schema"); - if let Some(indexes) = schema.indexes.get_mut(&normalized_table_name) { - for index in indexes { - let index = Arc::make_mut(index); - for index_column in &mut index.columns { - if index_column.name - == *column.name.as_ref().expect("btree column should be named") - { - index_column.name = definition.col_name.as_str().to_owned(); + // Update indexes on THIS table that name the old column (you already had this) + if let Some(idxs) = schema.indexes.get_mut(&normalized_table_name) { + for idx in idxs { + let idx = Arc::make_mut(idx); + for ic in &mut idx.columns { + if ic.name.eq_ignore_ascii_case( + col.name.as_ref().expect("btree column should be named"), + ) { + ic.name = new_name.clone(); + } + } + } + } + if *rename { + col.name = Some(new_name.clone()); + } else { + *col = new_column.clone(); + } + + // Keep primary_key_columns consistent (names may change on rename) + for (pk_name, _ord) in &mut btree.primary_key_columns { + if pk_name.eq_ignore_ascii_case(&old_column_name) { + *pk_name = new_name.clone(); + } + } + + // Maintain rowid-alias bit after change/rename (INTEGER PRIMARY KEY) + if !*rename { + // recompute alias from `new_column` + btree.columns[*column_index].is_rowid_alias = new_column.is_rowid_alias; + } + + // Update this table’s OWN foreign keys + for fk_arc in &mut btree.foreign_keys { + let fk = Arc::make_mut(fk_arc); + // child side: rename child column if it matches + for cc in &mut fk.child_columns { + if cc.eq_ignore_ascii_case(&old_column_name) { + *cc = new_name.clone(); + } + } + // parent side: if self-referencing, rename parent column too + if normalize_ident(&fk.parent_table) == normalized_table_name { + for pc in &mut fk.parent_columns { + if pc.eq_ignore_ascii_case(&old_column_name) { + *pc = new_name.clone(); } } } } - if *rename { - column.name = new_column.name; - } else { - *column = new_column; + // fix OTHER tables that reference this table as parent + for (tname, t_arc) in schema.tables.iter_mut() { + if normalize_ident(tname) == normalized_table_name { + continue; + } + if let Table::BTree(ref mut child_btree_arc) = Arc::make_mut(t_arc) { + let child_btree = Arc::make_mut(child_btree_arc); + for fk_arc in &mut child_btree.foreign_keys { + if normalize_ident(&fk_arc.parent_table) != normalized_table_name { + continue; + } + let fk = Arc::make_mut(fk_arc); + for pc in &mut fk.parent_columns { + if pc.eq_ignore_ascii_case(&old_column_name) { + *pc = new_name.clone(); + } + } + } + } } }); From fe3a4de0ab9932d6761e6b04367b4de3ed7c8b45 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Sat, 18 Oct 2025 13:22:18 -0400 Subject: [PATCH 317/428] Add TCL tests for altering columns that have foreign keys --- core/vdbe/execute.rs | 3 +- testing/alter_column.test | 216 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 217 insertions(+), 2 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index a268e1d73..9176f8781 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -5579,7 +5579,7 @@ pub fn op_function( let (ast::Expr::Name(ref name) | ast::Expr::Id(ref name)) = *col.expr else { - return Err(LimboError::ParseError("Unexpected expression in PRIMARY KEY constraint".to_string())); + return Err(LimboError::ParseError("Unexpected expression in UNIQUE constraint".to_string())); }; if normalize_ident(name.as_str()) == rename_from { @@ -5675,7 +5675,6 @@ pub fn op_function( break 'sql None; } } - Some( ast::Stmt::CreateTable { tbl_name, diff --git a/testing/alter_column.test b/testing/alter_column.test index 3672497ab..1b4da6dd0 100755 --- a/testing/alter_column.test +++ b/testing/alter_column.test @@ -22,3 +22,219 @@ do_execsql_test_in_memory_any_error fail-alter-column-unique { CREATE TABLE t (a); ALTER TABLE t ALTER COLUMN a TO a UNIQUE; } + +do_execsql_test_on_specific_db {:memory:} alter-table-rename-pk-column { + CREATE TABLE customers (cust_id INTEGER PRIMARY KEY, cust_name TEXT); + INSERT INTO customers VALUES (1, 'Alice'), (2, 'Bob'); + + ALTER TABLE customers RENAME COLUMN cust_id TO customer_id; + + SELECT sql FROM sqlite_schema WHERE name = 'customers'; + SELECT customer_id, cust_name FROM customers ORDER BY customer_id; +} { + "CREATE TABLE customers (customer_id INTEGER PRIMARY KEY, cust_name TEXT)" + "1|Alice" + "2|Bob" +} + +do_execsql_test_on_specific_db {:memory:} alter-table-rename-composite-pk { + CREATE TABLE products (category TEXT, prod_code TEXT, name TEXT, PRIMARY KEY (category, prod_code)); + INSERT INTO products VALUES ('Electronics', 'E001', 'Laptop'); + + ALTER TABLE products RENAME COLUMN prod_code TO product_code; + + SELECT sql FROM sqlite_schema WHERE name = 'products'; + SELECT category, product_code, name FROM products; +} { + "CREATE TABLE products (category TEXT, product_code TEXT, name TEXT, PRIMARY KEY (category, product_code))" + "Electronics|E001|Laptop" +} + +# Foreign key child column rename +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-child { + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child (cid INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent(id)); + INSERT INTO parent VALUES (1); + INSERT INTO child VALUES (1, 1); + + ALTER TABLE child RENAME COLUMN pid TO parent_id; + + SELECT sql FROM sqlite_schema WHERE name = 'child'; +} { + "CREATE TABLE child (cid INTEGER PRIMARY KEY, parent_id INTEGER, FOREIGN KEY (parent_id) REFERENCES parent (id))" +} + +# Foreign key parent column rename - critical test +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-parent { + CREATE TABLE orders (order_id INTEGER PRIMARY KEY, date TEXT); + CREATE TABLE items (item_id INTEGER PRIMARY KEY, oid INTEGER, FOREIGN KEY (oid) REFERENCES orders(order_id)); + + ALTER TABLE orders RENAME COLUMN order_id TO ord_id; + + SELECT sql FROM sqlite_schema WHERE name = 'orders'; + SELECT sql FROM sqlite_schema WHERE name = 'items'; +} { + "CREATE TABLE orders (ord_id INTEGER PRIMARY KEY, date TEXT)" + "CREATE TABLE items (item_id INTEGER PRIMARY KEY, oid INTEGER, FOREIGN KEY (oid) REFERENCES orders (ord_id))" +} + +# Composite foreign key parent rename +do_execsql_test_on_specific_db {:memory:} alter-table-rename-composite-fk-parent { + CREATE TABLE products (cat TEXT, code TEXT, PRIMARY KEY (cat, code)); + CREATE TABLE inventory (id INTEGER PRIMARY KEY, cat TEXT, code TEXT, FOREIGN KEY (cat, code) REFERENCES products(cat, code)); + + ALTER TABLE products RENAME COLUMN code TO sku; + + SELECT sql FROM sqlite_schema WHERE name = 'products'; + SELECT sql FROM sqlite_schema WHERE name = 'inventory'; +} { + "CREATE TABLE products (cat TEXT, sku TEXT, PRIMARY KEY (cat, sku))" + "CREATE TABLE inventory (id INTEGER PRIMARY KEY, cat TEXT, code TEXT, FOREIGN KEY (cat, code) REFERENCES products (cat, sku))" +} + +# Multiple foreign keys to same parent +do_execsql_test_on_specific_db {:memory:} alter-table-rename-multiple-fks { + CREATE TABLE users (uid INTEGER PRIMARY KEY); + CREATE TABLE messages (mid INTEGER PRIMARY KEY, sender INTEGER, receiver INTEGER, + FOREIGN KEY (sender) REFERENCES users(uid), + FOREIGN KEY (receiver) REFERENCES users(uid)); + + ALTER TABLE users RENAME COLUMN uid TO user_id; + + SELECT sql FROM sqlite_schema WHERE name = 'messages'; +} { + "CREATE TABLE messages (mid INTEGER PRIMARY KEY, sender INTEGER, receiver INTEGER, FOREIGN KEY (sender) REFERENCES users (user_id), FOREIGN KEY (receiver) REFERENCES users (user_id))" +} + +# Self-referencing foreign key +do_execsql_test_on_specific_db {:memory:} alter-table-rename-self-ref-fk { + CREATE TABLE employees (emp_id INTEGER PRIMARY KEY, manager_id INTEGER, + FOREIGN KEY (manager_id) REFERENCES employees(emp_id)); + + ALTER TABLE employees RENAME COLUMN emp_id TO employee_id; + + SELECT sql FROM sqlite_schema WHERE name = 'employees'; +} { + "CREATE TABLE employees (employee_id INTEGER PRIMARY KEY, manager_id INTEGER, FOREIGN KEY (manager_id) REFERENCES employees (employee_id))" +} + +# Chain of FK renames - parent is both PK and referenced +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-chain { + CREATE TABLE t1 (a INTEGER PRIMARY KEY); + CREATE TABLE t2 (b INTEGER PRIMARY KEY, a_ref INTEGER, FOREIGN KEY (a_ref) REFERENCES t1(a)); + CREATE TABLE t3 (c INTEGER PRIMARY KEY, b_ref INTEGER, FOREIGN KEY (b_ref) REFERENCES t2(b)); + + ALTER TABLE t1 RENAME COLUMN a TO a_new; + ALTER TABLE t2 RENAME COLUMN b TO b_new; + + SELECT sql FROM sqlite_schema WHERE name = 't2'; + SELECT sql FROM sqlite_schema WHERE name = 't3'; +} { + "CREATE TABLE t2 (b_new INTEGER PRIMARY KEY, a_ref INTEGER, FOREIGN KEY (a_ref) REFERENCES t1 (a_new))" + "CREATE TABLE t3 (c INTEGER PRIMARY KEY, b_ref INTEGER, FOREIGN KEY (b_ref) REFERENCES t2 (b_new))" +} + +# FK with ON DELETE/UPDATE actions +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-actions { + CREATE TABLE parent (pid INTEGER PRIMARY KEY); + CREATE TABLE child (cid INTEGER PRIMARY KEY, pid INTEGER, + FOREIGN KEY (pid) REFERENCES parent(pid) ON DELETE CASCADE ON UPDATE RESTRICT); + + ALTER TABLE parent RENAME COLUMN pid TO parent_id; + + SELECT sql FROM sqlite_schema WHERE name = 'child'; +} { + "CREATE TABLE child (cid INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent (parent_id) ON DELETE CASCADE ON UPDATE RESTRICT)" +} + +# FK with DEFERRABLE +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-deferrable { + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, + FOREIGN KEY (pid) REFERENCES parent(id) DEFERRABLE INITIALLY DEFERRED); + + ALTER TABLE parent RENAME COLUMN id TO parent_id; + + SELECT sql FROM sqlite_schema WHERE name = 'child'; +} { + "CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent (parent_id) DEFERRABLE INITIALLY DEFERRED)" +} + +# Rename with quoted identifiers in FK +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-quoted { + CREATE TABLE "parent table" ("parent id" INTEGER PRIMARY KEY); + CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, + FOREIGN KEY (pid) REFERENCES "parent table"("parent id")); + + ALTER TABLE "parent table" RENAME COLUMN "parent id" TO "new id"; + + SELECT sql FROM sqlite_schema WHERE name = 'child'; +} { + "CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES \"parent table\" (\"new id\"))" +} + +# Verify FK constraint still works after rename +do_execsql_test_on_specific_db {:memory:} alter-table-fk-constraint-after-rename { + PRAGMA foreign_keys = ON; + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent(id)); + INSERT INTO parent VALUES (1); + INSERT INTO child VALUES (1, 1); + + ALTER TABLE parent RENAME COLUMN id TO parent_id; + + -- This should work + INSERT INTO child VALUES (2, 1); + SELECT COUNT(*) FROM child; +} { + "2" +} + +# FK constraint violation after rename should still fail +do_execsql_test_in_memory_any_error alter-table-fk-violation-after-rename { + PRAGMA foreign_keys = ON; + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent(id)); + INSERT INTO parent VALUES (1); + + ALTER TABLE parent RENAME COLUMN id TO parent_id; + + -- This should fail with FK violation + INSERT INTO child VALUES (1, 999); +} + +# Complex scenario with multiple table constraints +do_execsql_test_on_specific_db {:memory:} alter-table-rename-complex-constraints { + CREATE TABLE t ( + a INTEGER, + b TEXT, + c REAL, + PRIMARY KEY (a, b), + UNIQUE (b, c), + FOREIGN KEY (a) REFERENCES t(a) + ); + + ALTER TABLE t RENAME COLUMN a TO x; + ALTER TABLE t RENAME COLUMN b TO y; + + SELECT sql FROM sqlite_schema WHERE name = 't'; +} { + "CREATE TABLE t (x INTEGER, y TEXT, c REAL, PRIMARY KEY (x, y), UNIQUE (y, c), FOREIGN KEY (x) REFERENCES t (x))" +} + +# Rename column that appears in both PK and FK +do_execsql_test_on_specific_db {:memory:} alter-table-rename-pk-and-fk { + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child ( + id INTEGER PRIMARY KEY, + parent_ref INTEGER, + FOREIGN KEY (id) REFERENCES parent(id), + FOREIGN KEY (parent_ref) REFERENCES parent(id) + ); + + ALTER TABLE parent RENAME COLUMN id TO pid; + + SELECT sql FROM sqlite_schema WHERE name = 'child'; +} { + "CREATE TABLE child (id INTEGER PRIMARY KEY, parent_ref INTEGER, FOREIGN KEY (id) REFERENCES parent (pid), FOREIGN KEY (parent_ref) REFERENCES parent (pid))" +} From ba2570d0b8bf8afbc46306a8201321b0c1bea0af Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 19 Oct 2025 13:17:25 +0530 Subject: [PATCH 318/428] cli:scrolling and enable suggestion for wrong commands --- cli/manual.rs | 151 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 114 insertions(+), 37 deletions(-) diff --git a/cli/manual.rs b/cli/manual.rs index b6a459696..96b7dbff3 100644 --- a/cli/manual.rs +++ b/cli/manual.rs @@ -1,7 +1,15 @@ use include_dir::{include_dir, Dir}; use rand::seq::SliceRandom; -use std::io::{IsTerminal, Write}; -use termimad::MadSkin; +use std::io::{stdout, IsTerminal, Write}; + +use termimad::{ + crossterm::{ + event::{read, Event, KeyCode}, + queue, + terminal::{enable_raw_mode, disable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, + }, + Area, MadSkin, MadView, +}; static MANUAL_DIR: Dir = include_dir!("$CARGO_MANIFEST_DIR/manuals"); @@ -63,36 +71,81 @@ fn strip_frontmatter(content: &str) -> &str { } } +fn levenshtein(a: &str, b: &str) -> usize { + let a_chars: Vec<_> = a.chars().collect(); + let b_chars: Vec<_> = b.chars().collect(); + let (a_len, b_len) = (a_chars.len(), b_chars.len()); + if a_len == 0 { return b_len; } + if b_len == 0 { return a_len; } + let mut prev_row: Vec = (0..=b_len).collect(); + let mut current_row = vec![0; b_len + 1]; + for i in 1..=a_len { + current_row[0] = i; + for j in 1..=b_len { + let substitution_cost = if a_chars[i - 1] == b_chars[j - 1] { 0 } else { 1 }; + current_row[j] = (prev_row[j] + 1) + .min(current_row[j - 1] + 1) + .min(prev_row[j - 1] + substitution_cost); + } + prev_row.clone_from_slice(¤t_row); + } + prev_row[b_len] +} + +fn find_closest_manual_page<'a>( + page_name: &str, + available_pages: impl Iterator, +) -> Option<&'a str> { + + const RELATIVE_SIMILARITY_THRESHOLD: f64 = 0.4; + + available_pages + .filter_map(|candidate| { + let distance = levenshtein(page_name, candidate); + let longer_len = std::cmp::max(page_name.chars().count(), candidate.chars().count()); + if longer_len == 0 { return None; } + let relative_distance = distance as f64 / longer_len as f64; + + if relative_distance < RELATIVE_SIMILARITY_THRESHOLD { + Some((candidate, distance)) + } else { + None + } + }) + .min_by_key(|&(_, score)| score) + .map(|(name, _)| name) +} + + pub fn display_manual(page: Option<&str>, writer: &mut dyn Write) -> anyhow::Result<()> { let page_name = page.unwrap_or("index"); let file_name = format!("{page_name}.md"); - // Try to find the manual page - let content = if let Some(file) = MANUAL_DIR.get_file(&file_name) { - file.contents_utf8() - .ok_or_else(|| anyhow::anyhow!("Failed to read manual page: {}", page_name))? + if let Some(file) = MANUAL_DIR.get_file(&file_name) { + let content = file.contents_utf8().ok_or_else(|| anyhow::anyhow!("Failed to read manual page: {}", page_name))?; + let content = strip_frontmatter(content); + if IsTerminal::is_terminal(&std::io::stdout()) { + render_in_terminal(content)?; + } else { + writeln!(writer, "{content}")?; + } + Ok(()) } else if page.is_none() { // If no page specified, list available pages return list_available_manuals(writer); } else { - return Err(anyhow::anyhow!("Manual page not found: {}", page_name)); - }; - - // Strip frontmatter before displaying - let content = strip_frontmatter(content); - - // Check if we're in a terminal or piped output - if IsTerminal::is_terminal(&std::io::stdout()) { - // Use termimad for nice terminal rendering - render_in_terminal(content)?; - } else { - // Plain output for pipes/redirects - writeln!(writer, "{content}")?; + let available_pages = MANUAL_DIR.files().filter_map(|file| { + file.path().file_stem().and_then(|stem| stem.to_str()) + }); + let mut error_message = format!("Manual page not found: {}", page_name); + if let Some(suggestion) = find_closest_manual_page(page_name, available_pages) { + error_message.push_str(&format!("\n\nDid you mean '.manual {}'?", suggestion)); + } + Err(anyhow::anyhow!(error_message)) } - - Ok(()) } + fn render_in_terminal(content: &str) -> anyhow::Result<()> { // Create a skin with nice styling let mut skin = MadSkin::default(); @@ -107,8 +160,40 @@ fn render_in_terminal(content: &str) -> anyhow::Result<()> { skin.code_block .set_fg(termimad::crossterm::style::Color::Green); - // Just print the formatted content - skin.print_text(content); + let mut w = stdout(); + queue!(w, EnterAlternateScreen)?; + enable_raw_mode()?; + + let area = Area::full_screen(); + let mut view = MadView::from(content.to_string(), area, skin); + + loop { + view.write_on(&mut w)?; + w.flush()?; + + match read()? { + Event::Key(key) => match key.code { + + KeyCode::Up | KeyCode::Char('k') => view.try_scroll_lines(-1), + KeyCode::Down | KeyCode::Char('j') => view.try_scroll_lines(1), + KeyCode::PageUp => view.try_scroll_pages(-1), + KeyCode::PageDown => view.try_scroll_pages(1), + + KeyCode::Esc | KeyCode::Char('q') | KeyCode::Enter => break, + + _ => {} + }, + Event::Resize(width, height) => { + let new_area = Area::new(0, 0, width, height); + view.resize(&new_area); + } + _ => {} + } + } + + disable_raw_mode()?; + queue!(w, LeaveAlternateScreen)?; + w.flush()?; Ok(()) } @@ -116,29 +201,21 @@ fn render_in_terminal(content: &str) -> anyhow::Result<()> { fn list_available_manuals(writer: &mut dyn Write) -> anyhow::Result<()> { writeln!(writer, "Available manual pages:")?; writeln!(writer)?; - - let mut pages: Vec = Vec::new(); - - for file in MANUAL_DIR.files() { - if let Some(name) = file.path().file_stem() { - if let Some(name_str) = name.to_str() { - pages.push(name_str.to_string()); - } - } - } - + let mut pages: Vec = MANUAL_DIR.files() + .filter_map(|file| file.path().file_stem()?.to_str().map(String::from)) + .collect(); pages.sort(); - for page in pages { + for page in &pages { writeln!(writer, " .manual {page} # or .man {page}")?; } - if MANUAL_DIR.files().count() == 0 { + if pages.is_empty() { writeln!(writer, " (No manual pages found)")?; } - writeln!(writer)?; writeln!(writer, "Usage: .manual or .man ")?; Ok(()) } + From 3658a94f06be4c0a7b1166db1107b09646354791 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 19 Oct 2025 13:20:40 +0530 Subject: [PATCH 319/428] fmt --- cli/manual.rs | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/cli/manual.rs b/cli/manual.rs index 96b7dbff3..af0af2f8c 100644 --- a/cli/manual.rs +++ b/cli/manual.rs @@ -6,7 +6,7 @@ use termimad::{ crossterm::{ event::{read, Event, KeyCode}, queue, - terminal::{enable_raw_mode, disable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, + terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, }, Area, MadSkin, MadView, }; @@ -75,14 +75,22 @@ fn levenshtein(a: &str, b: &str) -> usize { let a_chars: Vec<_> = a.chars().collect(); let b_chars: Vec<_> = b.chars().collect(); let (a_len, b_len) = (a_chars.len(), b_chars.len()); - if a_len == 0 { return b_len; } - if b_len == 0 { return a_len; } + if a_len == 0 { + return b_len; + } + if b_len == 0 { + return a_len; + } let mut prev_row: Vec = (0..=b_len).collect(); let mut current_row = vec![0; b_len + 1]; for i in 1..=a_len { current_row[0] = i; for j in 1..=b_len { - let substitution_cost = if a_chars[i - 1] == b_chars[j - 1] { 0 } else { 1 }; + let substitution_cost = if a_chars[i - 1] == b_chars[j - 1] { + 0 + } else { + 1 + }; current_row[j] = (prev_row[j] + 1) .min(current_row[j - 1] + 1) .min(prev_row[j - 1] + substitution_cost); @@ -96,14 +104,15 @@ fn find_closest_manual_page<'a>( page_name: &str, available_pages: impl Iterator, ) -> Option<&'a str> { - const RELATIVE_SIMILARITY_THRESHOLD: f64 = 0.4; available_pages .filter_map(|candidate| { let distance = levenshtein(page_name, candidate); let longer_len = std::cmp::max(page_name.chars().count(), candidate.chars().count()); - if longer_len == 0 { return None; } + if longer_len == 0 { + return None; + } let relative_distance = distance as f64 / longer_len as f64; if relative_distance < RELATIVE_SIMILARITY_THRESHOLD { @@ -116,13 +125,14 @@ fn find_closest_manual_page<'a>( .map(|(name, _)| name) } - pub fn display_manual(page: Option<&str>, writer: &mut dyn Write) -> anyhow::Result<()> { let page_name = page.unwrap_or("index"); let file_name = format!("{page_name}.md"); if let Some(file) = MANUAL_DIR.get_file(&file_name) { - let content = file.contents_utf8().ok_or_else(|| anyhow::anyhow!("Failed to read manual page: {}", page_name))?; + let content = file + .contents_utf8() + .ok_or_else(|| anyhow::anyhow!("Failed to read manual page: {}", page_name))?; let content = strip_frontmatter(content); if IsTerminal::is_terminal(&std::io::stdout()) { render_in_terminal(content)?; @@ -134,9 +144,9 @@ pub fn display_manual(page: Option<&str>, writer: &mut dyn Write) -> anyhow::Res // If no page specified, list available pages return list_available_manuals(writer); } else { - let available_pages = MANUAL_DIR.files().filter_map(|file| { - file.path().file_stem().and_then(|stem| stem.to_str()) - }); + let available_pages = MANUAL_DIR + .files() + .filter_map(|file| file.path().file_stem().and_then(|stem| stem.to_str())); let mut error_message = format!("Manual page not found: {}", page_name); if let Some(suggestion) = find_closest_manual_page(page_name, available_pages) { error_message.push_str(&format!("\n\nDid you mean '.manual {}'?", suggestion)); @@ -145,7 +155,6 @@ pub fn display_manual(page: Option<&str>, writer: &mut dyn Write) -> anyhow::Res } } - fn render_in_terminal(content: &str) -> anyhow::Result<()> { // Create a skin with nice styling let mut skin = MadSkin::default(); @@ -173,7 +182,6 @@ fn render_in_terminal(content: &str) -> anyhow::Result<()> { match read()? { Event::Key(key) => match key.code { - KeyCode::Up | KeyCode::Char('k') => view.try_scroll_lines(-1), KeyCode::Down | KeyCode::Char('j') => view.try_scroll_lines(1), KeyCode::PageUp => view.try_scroll_pages(-1), @@ -201,7 +209,8 @@ fn render_in_terminal(content: &str) -> anyhow::Result<()> { fn list_available_manuals(writer: &mut dyn Write) -> anyhow::Result<()> { writeln!(writer, "Available manual pages:")?; writeln!(writer)?; - let mut pages: Vec = MANUAL_DIR.files() + let mut pages: Vec = MANUAL_DIR + .files() .filter_map(|file| file.path().file_stem()?.to_str().map(String::from)) .collect(); pages.sort(); @@ -218,4 +227,3 @@ fn list_available_manuals(writer: &mut dyn Write) -> anyhow::Result<()> { Ok(()) } - From 055b38787d880572a49169563b5938639ed42621 Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 19 Oct 2025 13:22:35 +0530 Subject: [PATCH 320/428] more keybindings --- cli/manual.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cli/manual.rs b/cli/manual.rs index af0af2f8c..ad95f754d 100644 --- a/cli/manual.rs +++ b/cli/manual.rs @@ -186,6 +186,8 @@ fn render_in_terminal(content: &str) -> anyhow::Result<()> { KeyCode::Down | KeyCode::Char('j') => view.try_scroll_lines(1), KeyCode::PageUp => view.try_scroll_pages(-1), KeyCode::PageDown => view.try_scroll_pages(1), + KeyCode::Char('g') => view.scroll = 0, + KeyCode::Char('G') => view.try_scroll_lines(i32::MAX), KeyCode::Esc | KeyCode::Char('q') | KeyCode::Enter => break, From aad3c00e57b58f6da3d3d05115ef65c72b22281a Mon Sep 17 00:00:00 2001 From: Pavan-Nambi Date: Sun, 19 Oct 2025 13:25:40 +0530 Subject: [PATCH 321/428] clippy --- cli/manual.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cli/manual.rs b/cli/manual.rs index ad95f754d..b1266a38e 100644 --- a/cli/manual.rs +++ b/cli/manual.rs @@ -71,6 +71,7 @@ fn strip_frontmatter(content: &str) -> &str { } } +// not ideal but enough for our usecase , probably overkill maybe. fn levenshtein(a: &str, b: &str) -> usize { let a_chars: Vec<_> = a.chars().collect(); let b_chars: Vec<_> = b.chars().collect(); @@ -147,9 +148,9 @@ pub fn display_manual(page: Option<&str>, writer: &mut dyn Write) -> anyhow::Res let available_pages = MANUAL_DIR .files() .filter_map(|file| file.path().file_stem().and_then(|stem| stem.to_str())); - let mut error_message = format!("Manual page not found: {}", page_name); + let mut error_message = format!("Manual page not found: {page_name}"); if let Some(suggestion) = find_closest_manual_page(page_name, available_pages) { - error_message.push_str(&format!("\n\nDid you mean '.manual {}'?", suggestion)); + error_message.push_str(&format!("\n\nDid you mean '.manual {suggestion}'?")); } Err(anyhow::anyhow!(error_message)) } From 591b43634efb951e8b799ea51077218bdfae8753 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 20 Oct 2025 12:30:32 +0300 Subject: [PATCH 322/428] tests/integration: Disable rowid alias differential fuzz test case The fuzz test seems to find something that causes the tests to hang. Let's disable it for now. --- tests/integration/fuzz/rowid_alias.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/fuzz/rowid_alias.rs b/tests/integration/fuzz/rowid_alias.rs index e9de059cc..52570e9ec 100644 --- a/tests/integration/fuzz/rowid_alias.rs +++ b/tests/integration/fuzz/rowid_alias.rs @@ -66,6 +66,7 @@ fn convert_to_no_rowid_alias(create_sql: &str) -> String { } #[test] +#[ignore] pub fn rowid_alias_differential_fuzz() { let (mut rng, seed) = rng_from_time_or_env(); tracing::info!("rowid_alias_differential_fuzz seed: {}", seed); From bebe230b05f860ea89b88f3c5a7fb615fffc6fa8 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 20 Oct 2025 13:59:02 +0300 Subject: [PATCH 323/428] Regression test: deferred FK violations are checked before commit --- .../query_processing/test_transactions.rs | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tests/integration/query_processing/test_transactions.rs b/tests/integration/query_processing/test_transactions.rs index 36df15f9d..89f102ae5 100644 --- a/tests/integration/query_processing/test_transactions.rs +++ b/tests/integration/query_processing/test_transactions.rs @@ -208,6 +208,37 @@ fn test_constraint_error_aborts_transaction() { assert_eq!(row, vec![Value::Integer(0)]); } +#[test] +/// Regression test for https://github.com/tursodatabase/turso/issues/3784 where dirty pages +/// were flushed to WAL _before_ deferred FK violations were checked. This resulted in the +/// violations being persisted to the database, even though the transaction was aborted. +/// This test ensures that dirty pages are not flushed to WAL until after deferred violations are checked. +fn test_deferred_fk_violation_rollback_in_autocommit() { + let tmp_db = TempDatabase::new("test_deferred_fk_violation_rollback.db", true); + let conn = tmp_db.connect_limbo(); + + // Enable foreign keys + conn.execute("PRAGMA foreign_keys = ON").unwrap(); + + // Create parent and child tables with deferred FK constraint + conn.execute("CREATE TABLE parent(a PRIMARY KEY)").unwrap(); + conn.execute("CREATE TABLE child(a, b, FOREIGN KEY(b) REFERENCES parent(a) DEFERRABLE INITIALLY DEFERRED)") + .unwrap(); + + // This insert should fail because parent(1) doesn't exist + // and the deferred FK violation should be caught at statement end in autocommit mode + let result = conn.execute("INSERT INTO child VALUES(1,1)"); + assert!(matches!(result, Err(LimboError::Constraint(_)))); + + // Do a truncating checkpoint + conn.execute("PRAGMA wal_checkpoint(TRUNCATE)").unwrap(); + + // Verify that the child table is empty (the insert was rolled back) + let stmt = conn.query("SELECT COUNT(*) FROM child").unwrap().unwrap(); + let row = helper_read_single_row(stmt); + assert_eq!(row, vec![Value::Integer(0)]); +} + #[test] fn test_mvcc_transactions_autocommit() { let tmp_db = TempDatabase::new_with_opts( From 10532544dc2e569586f4ef593556883e007f193f Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Mon, 20 Oct 2025 14:00:49 +0300 Subject: [PATCH 324/428] Fix: check deferred FK violations before committing to WAL DEFERRED was a bit too deferred - it allowed the dirty pages to be written out to WAL before checking for violations, resulting in the violations effectively being committed even though the transaction ended up aborting --- core/vdbe/execute.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 962f1b3bc..974538de0 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2165,21 +2165,21 @@ pub fn halt( let auto_commit = program.connection.auto_commit.load(Ordering::SeqCst); tracing::trace!("halt(auto_commit={})", auto_commit); if auto_commit { - let res = program.commit_txn(pager.clone(), state, mv_store, false); - if res.is_ok() - && program.connection.foreign_keys_enabled() + // In autocommit mode, a statement that leaves deferred violations must fail here. + if program.connection.foreign_keys_enabled() && program .connection .fk_deferred_violations .swap(0, Ordering::AcqRel) > 0 { - // In autocommit mode, a statement that leaves deferred violations must fail here. return Err(LimboError::Constraint( "foreign key constraint failed".to_string(), )); } - res.map(Into::into) + program + .commit_txn(pager.clone(), state, mv_store, false) + .map(Into::into) } else { Ok(InsnFunctionStepResult::Done) } From b00a27696004f0ccb98dd20cc6b39429ebcfdaeb Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 19 Oct 2025 18:28:11 -0300 Subject: [PATCH 325/428] add scoped locking for SharedWalFile to avoid holding locks for longer than needed --- core/storage/wal.rs | 250 +++++++++++++++++++++++--------------------- 1 file changed, 133 insertions(+), 117 deletions(-) diff --git a/core/storage/wal.rs b/core/storage/wal.rs index 1fd114a94..87863b1a3 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -822,16 +822,16 @@ impl Wal for WalFile { self.max_frame_read_lock_index.load(Ordering::Acquire), NO_LOCK_HELD ); - let (shared_max, nbackfills, last_checksum, checkpoint_seq, transaction_count) = { - let shared = self.get_shared(); - let mx = shared.max_frame.load(Ordering::Acquire); - let nb = shared.nbackfills.load(Ordering::Acquire); - let ck = shared.last_checksum; - let checkpoint_seq = shared.wal_header.lock().checkpoint_seq; - let transaction_count = shared.transaction_count.load(Ordering::Acquire); - (mx, nb, ck, checkpoint_seq, transaction_count) - }; - let db_changed = self.db_changed(&self.get_shared()); + let (shared_max, nbackfills, last_checksum, checkpoint_seq, transaction_count) = self + .with_shared(|shared| { + let mx = shared.max_frame.load(Ordering::Acquire); + let nb = shared.nbackfills.load(Ordering::Acquire); + let ck = shared.last_checksum; + let checkpoint_seq = shared.wal_header.lock().checkpoint_seq; + let transaction_count = shared.transaction_count.load(Ordering::Acquire); + (mx, nb, ck, checkpoint_seq, transaction_count) + }); + let db_changed = self.with_shared(|shared| self.db_changed(shared)); // WAL is already fully back‑filled into the main DB image // (mxFrame == nBackfill). Readers can therefore ignore the @@ -840,7 +840,7 @@ impl Wal for WalFile { if shared_max == nbackfills { tracing::debug!("begin_read_tx: WAL is already fully back‑filled into the main DB image, shared_max={}, nbackfills={}", shared_max, nbackfills); let lock_0_idx = 0; - if !self.get_shared().read_locks[lock_0_idx].read() { + if !self.with_shared(|shared| shared.read_locks[lock_0_idx].read()) { tracing::debug!("begin_read_tx: read lock 0 is already held, returning Busy"); return Err(LimboError::Busy); } @@ -864,13 +864,15 @@ impl Wal for WalFile { // Find largest mark <= mx among slots 1..N let mut best_idx: i64 = -1; let mut best_mark: u32 = 0; - for (idx, lock) in self.get_shared().read_locks.iter().enumerate().skip(1) { - let m = lock.get_value(); - if m != READMARK_NOT_USED && m <= shared_max as u32 && m > best_mark { - best_mark = m; - best_idx = idx as i64; + self.with_shared(|shared| { + for (idx, lock) in shared.read_locks.iter().enumerate().skip(1) { + let m = lock.get_value(); + if m != READMARK_NOT_USED && m <= shared_max as u32 && m > best_mark { + best_mark = m; + best_idx = idx as i64; + } } - } + }); // If none found or lagging, try to claim/update a slot if best_idx == -1 || (best_mark as u64) < shared_max { @@ -901,20 +903,19 @@ impl Wal for WalFile { // Now take a shared read on that slot, and if we are successful, // grab another snapshot of the shared state. - let (mx2, nb2, cksm2, ckpt_seq2) = { - let shared = self.get_shared(); + let (mx2, nb2, cksm2, ckpt_seq2) = self.with_shared(|shared| { if !shared.read_locks[best_idx as usize].read() { // TODO: we should retry here instead of always returning Busy return Err(LimboError::Busy); } let checkpoint_seq = shared.wal_header.lock().checkpoint_seq; - ( + Ok(( shared.max_frame.load(Ordering::Acquire), shared.nbackfills.load(Ordering::Acquire), shared.last_checksum, checkpoint_seq, - ) - }; + )) + })?; // sqlite/src/wal.c 3225 // Now that the read-lock has been obtained, check that neither the @@ -1008,7 +1009,7 @@ impl Wal for WalFile { #[instrument(skip_all, level = Level::DEBUG)] fn end_write_tx(&self) { tracing::debug!("end_write_txn"); - self.get_shared().write_lock.unlock(); + self.with_shared(|shared| shared.write_lock.unlock()); } /// Find the latest frame containing a page. @@ -1029,10 +1030,13 @@ impl Wal for WalFile { // // if it's not, than pages from WAL range [frame_watermark..nBackfill] are already in the DB file, // and in case if page first occurrence in WAL was after frame_watermark - we will be unable to read proper previous version of the page - turso_assert!( - frame_watermark.is_none() || frame_watermark.unwrap() >= self.get_shared().nbackfills.load(Ordering::Acquire), - "frame_watermark must be >= than current WAL backfill amount: frame_watermark={:?}, nBackfill={}", frame_watermark, self.get_shared().nbackfills.load(Ordering::Acquire) - ); + self.with_shared(|shared| { + let nbackfills = shared.nbackfills.load(Ordering::Acquire); + turso_assert!( + frame_watermark.is_none() || frame_watermark.unwrap() >= nbackfills, + "frame_watermark must be >= than current WAL backfill amount: frame_watermark={:?}, nBackfill={}", frame_watermark, nbackfills + ); + }); // if we are holding read_lock 0 and didn't write anything to the WAL, skip and read right from db file. // @@ -1050,30 +1054,31 @@ impl Wal for WalFile { ); return Ok(None); } - let shared = self.get_shared(); - let frames = shared.frame_cache.lock(); - let range = frame_watermark.map(|x| 0..=x).unwrap_or( - self.min_frame.load(Ordering::Acquire)..=self.max_frame.load(Ordering::Acquire), - ); - tracing::debug!( - "find_frame(page_id={}, frame_watermark={:?}): min_frame={}, max_frame={}", - page_id, - frame_watermark, - self.min_frame.load(Ordering::Acquire), - self.max_frame.load(Ordering::Acquire) - ); - if let Some(list) = frames.get(&page_id) { - if let Some(f) = list.iter().rfind(|&&f| range.contains(&f)) { - tracing::debug!( - "find_frame(page_id={}, frame_watermark={:?}): found frame={}", - page_id, - frame_watermark, - *f - ); - return Ok(Some(*f)); + self.with_shared(|shared| { + let frames = shared.frame_cache.lock(); + let range = frame_watermark.map(|x| 0..=x).unwrap_or( + self.min_frame.load(Ordering::Acquire)..=self.max_frame.load(Ordering::Acquire), + ); + tracing::debug!( + "find_frame(page_id={}, frame_watermark={:?}): min_frame={}, max_frame={}", + page_id, + frame_watermark, + self.min_frame.load(Ordering::Acquire), + self.max_frame.load(Ordering::Acquire) + ); + if let Some(list) = frames.get(&page_id) { + if let Some(f) = list.iter().rfind(|&&f| range.contains(&f)) { + tracing::debug!( + "find_frame(page_id={}, frame_watermark={:?}): found frame={}", + page_id, + frame_watermark, + *f + ); + return Ok(Some(*f)); + } } - } - Ok(None) + Ok(None) + }) } /// Read a frame from the WAL. @@ -1110,8 +1115,7 @@ impl Wal for WalFile { let epoch = shared_file.read().epoch.load(Ordering::Acquire); frame.set_wal_tag(frame_id, epoch); }); - let file = { - let shared = self.get_shared(); + let file = self.with_shared(|shared| { assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); // important not to hold shared lock beyond this point to avoid deadlock scenario where: // thread 1: takes readlock here, passes reference to shared.file to begin_read_wal_frame @@ -1125,7 +1129,7 @@ impl Wal for WalFile { // when there are writers waiting to acquire the lock. // Because of this, attempts to recursively acquire a read lock within a single thread may result in a deadlock." shared.file.as_ref().unwrap().clone() - }; + }); begin_read_wal_frame( file.as_ref(), offset + WAL_FRAME_HEADER_SIZE as u64, @@ -1184,9 +1188,10 @@ impl Wal for WalFile { } } }); - let shared = self.get_shared(); - assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); - let file = shared.file.as_ref().unwrap(); + let file = self.with_shared(|shared| { + assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); + shared.file.as_ref().unwrap().clone() + }); let c = begin_read_wal_frame_raw(&self.buffer_pool, file.as_ref(), offset, complete)?; Ok(c) } @@ -1243,9 +1248,10 @@ impl Wal for WalFile { } } }); - let shared = self.get_shared(); - assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); - let file = shared.file.as_ref().unwrap(); + let file = self.with_shared(|shared| { + assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); + shared.file.as_ref().unwrap().clone() + }); let c = begin_read_wal_frame( file.as_ref(), offset + WAL_FRAME_HEADER_SIZE as u64, @@ -1266,13 +1272,12 @@ impl Wal for WalFile { // perform actual write let offset = self.frame_offset(frame_id); - let (header, file) = { - let shared = self.get_shared(); + let (header, file) = self.with_shared(|shared| { let header = shared.wal_header.clone(); assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); let file = shared.file.as_ref().unwrap().clone(); (header, file) - }; + }); let header = header.lock(); let checksums = self.last_checksum; let (checksums, frame_bytes) = prepare_wal_frame( @@ -1296,10 +1301,11 @@ impl Wal for WalFile { #[instrument(skip_all, level = Level::DEBUG)] fn should_checkpoint(&self) -> bool { - let shared = self.get_shared(); - let frame_id = shared.max_frame.load(Ordering::Acquire) as usize; - let nbackfills = shared.nbackfills.load(Ordering::Acquire) as usize; - frame_id > self.checkpoint_threshold + nbackfills + self.with_shared(|shared| { + let frame_id = shared.max_frame.load(Ordering::Acquire) as usize; + let nbackfills = shared.nbackfills.load(Ordering::Acquire) as usize; + frame_id > self.checkpoint_threshold + nbackfills + }) } #[instrument(skip_all, level = Level::DEBUG)] @@ -1322,11 +1328,10 @@ impl Wal for WalFile { tracing::debug!("wal_sync finish"); syncing.store(false, Ordering::SeqCst); }); - let file = { - let shared = self.get_shared(); + let file = self.with_shared(|shared| { assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); shared.file.as_ref().unwrap().clone() - }; + }); self.syncing.store(true, Ordering::SeqCst); let c = file.sync(completion)?; Ok(c) @@ -1338,11 +1343,11 @@ impl Wal for WalFile { } fn get_max_frame_in_wal(&self) -> u64 { - self.get_shared().max_frame.load(Ordering::Acquire) + self.with_shared(|shared| shared.max_frame.load(Ordering::Acquire)) } fn get_checkpoint_seq(&self) -> u32 { - self.get_shared().wal_header.lock().checkpoint_seq + self.with_shared(|shared| shared.wal_header.lock().checkpoint_seq) } fn get_max_frame(&self) -> u64 { @@ -1355,8 +1360,7 @@ impl Wal for WalFile { #[instrument(skip_all, level = Level::DEBUG)] fn rollback(&mut self) { - let (max_frame, last_checksum) = { - let shared = self.get_shared(); + let (max_frame, last_checksum) = self.with_shared(|shared| { let max_frame = shared.max_frame.load(Ordering::Acquire); let mut frame_cache = shared.frame_cache.lock(); frame_cache.retain(|_page_id, frames| { @@ -1367,7 +1371,7 @@ impl Wal for WalFile { !frames.is_empty() }); (max_frame, shared.last_checksum) - }; + }); self.last_checksum = last_checksum; self.max_frame.store(max_frame, Ordering::Release); self.reset_internal_states(); @@ -1413,7 +1417,7 @@ impl Wal for WalFile { } fn prepare_wal_start(&mut self, page_size: PageSize) -> Result> { - if self.get_shared().is_initialized()? { + if self.with_shared(|shared| shared.is_initialized())? { return Ok(None); } tracing::debug!("ensure_header_if_needed"); @@ -1447,17 +1451,22 @@ impl Wal for WalFile { }; self.max_frame.store(0, Ordering::Release); - let shared = self.get_shared(); - assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); - let file = shared.file.as_ref().unwrap(); - let c = sqlite3_ondisk::begin_write_wal_header(file.as_ref(), &shared.wal_header.lock())?; + let (header, file) = self.with_shared(|shared| { + assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); + ( + *shared.wal_header.lock(), + shared.file.as_ref().unwrap().clone(), + ) + }); + let c = sqlite3_ondisk::begin_write_wal_header(file.as_ref(), &header)?; Ok(Some(c)) } fn prepare_wal_finish(&mut self) -> Result { - let shared = self.get_shared(); - assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); - let file = shared.file.as_ref().unwrap(); + let file = self.with_shared(|shared| { + assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); + shared.file.as_ref().unwrap().clone() + }); let shared = self.shared.clone(); let c = file.sync(Completion::new_sync(move |_| { shared.read().initialized.store(true, Ordering::Release); @@ -1477,18 +1486,17 @@ impl Wal for WalFile { "we limit number of iovecs to IOV_MAX" ); turso_assert!( - self.get_shared().is_initialized()?, + self.with_shared(|shared| shared.is_initialized())?, "WAL must be prepared with prepare_wal_start/prepare_wal_finish method" ); - let (header, shared_page_size, epoch) = { - let shared = self.get_shared(); + let (header, shared_page_size, epoch) = self.with_shared(|shared| { let hdr_guard = shared.wal_header.lock(); let header: WalHeader = *hdr_guard; let shared_page_size = header.page_size; let epoch = shared.epoch.load(Ordering::Acquire); (header, shared_page_size, epoch) - }; + }); turso_assert!( shared_page_size == page_sz.get(), "page size mismatch, tried to change page size after WAL header was already initialized: shared.page_size={shared_page_size}, page_size={}", @@ -1577,11 +1585,10 @@ impl Wal for WalFile { let c = Completion::new_write_linked(cmp); - let file = { - let shared = self.get_shared(); + let file = self.with_shared(|shared| { assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); shared.file.as_ref().unwrap().clone() - }; + }); let c = file.pwritev(start_off, iovecs, c)?; Ok(c) } @@ -1596,8 +1603,10 @@ impl Wal for WalFile { } fn update_max_frame(&mut self) { - let new_max_frame = self.get_shared().max_frame.load(Ordering::Acquire); - self.max_frame.store(new_max_frame, Ordering::Release); + self.with_shared(|shared| { + let new_max_frame = shared.max_frame.load(Ordering::Acquire); + self.max_frame.store(new_max_frame, Ordering::Release); + }) } } @@ -1649,7 +1658,7 @@ impl WalFile { } fn page_size(&self) -> u32 { - self.get_shared().wal_header.lock().page_size + self.with_shared(|shared| shared.wal_header.lock().page_size) } fn frame_offset(&self, frame_id: u64) -> u64 { @@ -1677,7 +1686,7 @@ impl WalFile { } } - fn get_shared(&self) -> parking_lot::RwLockReadGuard<'_, WalFileShared> { + fn _get_shared(&self) -> parking_lot::RwLockReadGuard<'_, WalFileShared> { // WASM in browser main thread doesn't have a way to "park" a thread // so, we spin way here instead of calling blocking lock #[cfg(target_family = "wasm")] @@ -1696,11 +1705,19 @@ impl WalFile { } } + #[inline] + fn with_shared(&self, func: F) -> R + where + F: FnOnce(&WalFileShared) -> R, + { + let shared = self._get_shared(); + func(&shared) + } + fn complete_append_frame(&mut self, page_id: u64, frame_id: u64, checksums: (u32, u32)) { self.last_checksum = checksums; self.max_frame.store(frame_id, Ordering::Release); - let shared = self.get_shared(); - { + self.with_shared(|shared| { let mut frame_cache = shared.frame_cache.lock(); match frame_cache.get_mut(&page_id) { Some(frames) => { @@ -1710,7 +1727,7 @@ impl WalFile { frame_cache.insert(page_id, vec![frame_id]); } } - } + }) } fn reset_internal_states(&mut self) { @@ -1745,12 +1762,11 @@ impl WalFile { // so no other checkpointer can run. fsync WAL if there are unapplied frames. // Decide the largest frame we are allowed to back‑fill. CheckpointState::Start => { - let (max_frame, nbackfills) = { - let shared = self.get_shared(); + let (max_frame, nbackfills) = self.with_shared(|shared| { let max_frame = shared.max_frame.load(Ordering::Acquire); let n_backfills = shared.nbackfills.load(Ordering::Acquire); (max_frame, n_backfills) - }; + }); let needs_backfill = max_frame > nbackfills; if !needs_backfill && !mode.should_restart_log() { // there are no frames to copy over and we don't need to reset @@ -1786,8 +1802,7 @@ impl WalFile { self.ongoing_checkpoint.max_frame = max_frame; self.ongoing_checkpoint.min_frame = nbackfills + 1; - let to_checkpoint = { - let shared = self.get_shared(); + let to_checkpoint = self.with_shared(|shared| { let frame_cache = shared.frame_cache.lock(); let mut list = Vec::with_capacity( self.ongoing_checkpoint @@ -1808,7 +1823,7 @@ impl WalFile { // sort by frame_id for read locality list.sort_unstable_by(|a, b| (a.1, a.0).cmp(&(b.1, b.0))); list - }; + }); self.ongoing_checkpoint.pages_to_checkpoint = to_checkpoint; self.ongoing_checkpoint.current_page = 0; self.ongoing_checkpoint.inflight_writes.clear(); @@ -1839,7 +1854,7 @@ impl WalFile { if self.ongoing_checkpoint.process_pending_reads() { tracing::trace!("Drained reads into batch"); } - let epoch = self.get_shared().epoch.load(Ordering::Acquire); + let epoch = self.with_shared(|shared| shared.epoch.load(Ordering::Acquire)); // Issue reads until we hit limits 'inner: while self.ongoing_checkpoint.should_issue_reads() { let (page_id, target_frame) = self.ongoing_checkpoint.pages_to_checkpoint @@ -1931,8 +1946,7 @@ impl WalFile { self.ongoing_checkpoint.complete(), "checkpoint pending flush must have finished" ); - let checkpoint_result = { - let shared = self.get_shared(); + let checkpoint_result = self.with_shared(|shared| { let current_mx = shared.max_frame.load(Ordering::Acquire); let nbackfills = shared.nbackfills.load(Ordering::Acquire); // Record two num pages fields to return as checkpoint result to caller. @@ -1964,14 +1978,16 @@ impl WalFile { checkpoint_max_frame, ) } - }; + }); // store the max frame we were able to successfully checkpoint. // NOTE: we don't have a .shm file yet, so it's safe to update nbackfills here // before we sync, because if we crash and then recover, we will checkpoint the entire db anyway. - self.get_shared() - .nbackfills - .store(self.ongoing_checkpoint.max_frame, Ordering::Release); + self.with_shared(|shared| { + shared + .nbackfills + .store(self.ongoing_checkpoint.max_frame, Ordering::Release) + }); if mode.require_all_backfilled() && !checkpoint_result.everything_backfilled() { return Err(LimboError::Busy); @@ -2002,7 +2018,7 @@ impl WalFile { checkpoint_result.take().unwrap() }; // increment wal epoch to ensure no stale pages are used for backfilling - self.get_shared().epoch.fetch_add(1, Ordering::Release); + self.with_shared(|shared| shared.epoch.fetch_add(1, Ordering::Release)); // store a copy of the checkpoint result to return in the future if pragma // wal_checkpoint is called and we haven't backfilled again since. @@ -2129,7 +2145,7 @@ impl WalFile { // reinitialize in‑memory state self.get_shared_mut().restart_wal_header(&self.io, mode); - let cksm = self.get_shared().last_checksum; + let cksm = self.with_shared(|shared| shared.last_checksum); self.last_checksum = cksm; self.max_frame.store(0, Ordering::Release); self.min_frame.store(0, Ordering::Release); @@ -2138,12 +2154,11 @@ impl WalFile { } fn truncate_log(&mut self) -> Result> { - let file = { - let shared = self.get_shared(); + let file = self.with_shared(|shared| { assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); shared.initialized.store(false, Ordering::Release); shared.file.as_ref().unwrap().clone() - }; + }); let CheckpointState::Truncate { sync_sent, @@ -2250,9 +2265,10 @@ impl WalFile { }) }; // schedule read of the page payload - let shared = self.get_shared(); - assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); - let file = shared.file.as_ref().unwrap(); + let file = self.with_shared(|shared| { + assert!(shared.enabled.load(Ordering::SeqCst), "WAL must be enabled"); + shared.file.as_ref().unwrap().clone() + }); let c = begin_read_wal_frame( file.as_ref(), offset + WAL_FRAME_HEADER_SIZE as u64, From ba9a1ebbef91bd45fb875e80193f20004fc4564d Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 19 Oct 2025 19:45:05 -0300 Subject: [PATCH 326/428] add mutable scoped locking for SharedWalFile --- core/storage/wal.rs | 162 +++++++++++++++++++++++--------------------- 1 file changed, 84 insertions(+), 78 deletions(-) diff --git a/core/storage/wal.rs b/core/storage/wal.rs index 87863b1a3..8a7ed81a6 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -876,23 +876,19 @@ impl Wal for WalFile { // If none found or lagging, try to claim/update a slot if best_idx == -1 || (best_mark as u64) < shared_max { - for (idx, lock) in self - .get_shared_mut() - .read_locks - .iter_mut() - .enumerate() - .skip(1) - { - if !lock.write() { - continue; // busy slot + self.with_shared_mut(|shared| { + for (idx, lock) in shared.read_locks.iter_mut().enumerate().skip(1) { + if !lock.write() { + continue; // busy slot + } + // claim or bump this slot + lock.set_value_exclusive(shared_max as u32); + best_idx = idx as i64; + best_mark = shared_max as u32; + lock.unlock(); + break; } - // claim or bump this slot - lock.set_value_exclusive(shared_max as u32); - best_idx = idx as i64; - best_mark = shared_max as u32; - lock.unlock(); - break; - } + }) } if best_idx == -1 || best_mark != shared_max as u32 { @@ -968,7 +964,7 @@ impl Wal for WalFile { fn end_read_tx(&self) { let slot = self.max_frame_read_lock_index.load(Ordering::Acquire); if slot != NO_LOCK_HELD { - self.get_shared_mut().read_locks[slot].unlock(); + self.with_shared_mut(|shared| shared.read_locks[slot].unlock()); self.max_frame_read_lock_index .store(NO_LOCK_HELD, Ordering::Release); tracing::debug!("end_read_tx(slot={slot})"); @@ -980,29 +976,29 @@ impl Wal for WalFile { /// Begin a write transaction #[instrument(skip_all, level = Level::DEBUG)] fn begin_write_tx(&mut self) -> Result<()> { - let shared = self.get_shared_mut(); - // sqlite/src/wal.c 3702 - // Cannot start a write transaction without first holding a read - // transaction. - // assert(pWal->readLock >= 0); - // assert(pWal->writeLock == 0 && pWal->iReCksum == 0); - turso_assert!( - self.max_frame_read_lock_index.load(Ordering::Acquire) != NO_LOCK_HELD, - "must have a read transaction to begin a write transaction" - ); - if !shared.write_lock.write() { - return Err(LimboError::Busy); - } - let db_changed = self.db_changed(&shared); - if !db_changed { - drop(shared); - return Ok(()); - } + self.with_shared_mut(|shared| { + // sqlite/src/wal.c 3702 + // Cannot start a write transaction without first holding a read + // transaction. + // assert(pWal->readLock >= 0); + // assert(pWal->writeLock == 0 && pWal->iReCksum == 0); + turso_assert!( + self.max_frame_read_lock_index.load(Ordering::Acquire) != NO_LOCK_HELD, + "must have a read transaction to begin a write transaction" + ); + if !shared.write_lock.write() { + return Err(LimboError::Busy); + } + let db_changed = self.db_changed(shared); + if !db_changed { + return Ok(()); + } - // Snapshot is stale, give up and let caller retry from scratch - tracing::debug!("unable to upgrade transaction from read to write: snapshot is stale, give up and let caller retry from scratch, self.max_frame={}, shared_max={}", self.max_frame.load(Ordering::Acquire), shared.max_frame.load(Ordering::Acquire)); - shared.write_lock.unlock(); - Err(LimboError::Busy) + // Snapshot is stale, give up and let caller retry from scratch + tracing::debug!("unable to upgrade transaction from read to write: snapshot is stale, give up and let caller retry from scratch, self.max_frame={}, shared_max={}", self.max_frame.load(Ordering::Acquire), shared.max_frame.load(Ordering::Acquire)); + shared.write_lock.unlock(); + Err(LimboError::Busy) + }) } /// End a write transaction @@ -1379,18 +1375,19 @@ impl Wal for WalFile { #[instrument(skip_all, level = Level::DEBUG)] fn finish_append_frames_commit(&mut self) -> Result<()> { - let mut shared = self.get_shared_mut(); - shared - .max_frame - .store(self.max_frame.load(Ordering::Acquire), Ordering::Release); - tracing::trace!(max_frame = self.max_frame.load(Ordering::Acquire), ?self.last_checksum); - shared.last_checksum = self.last_checksum; - self.transaction_count.fetch_add(1, Ordering::Release); - shared.transaction_count.store( - self.transaction_count.load(Ordering::Acquire), - Ordering::Release, - ); - Ok(()) + self.with_shared_mut(|shared| { + shared + .max_frame + .store(self.max_frame.load(Ordering::Acquire), Ordering::Release); + tracing::trace!(max_frame = self.max_frame.load(Ordering::Acquire), ?self.last_checksum); + shared.last_checksum = self.last_checksum; + self.transaction_count.fetch_add(1, Ordering::Release); + shared.transaction_count.store( + self.transaction_count.load(Ordering::Acquire), + Ordering::Release, + ); + Ok(()) + }) } fn changed_pages_after(&self, frame_watermark: u64) -> Result> { @@ -1421,8 +1418,7 @@ impl Wal for WalFile { return Ok(None); } tracing::debug!("ensure_header_if_needed"); - self.last_checksum = { - let mut shared = self.get_shared_mut(); + self.last_checksum = self.with_shared_mut(|shared| { let checksum = { let mut hdr = shared.wal_header.lock(); hdr.magic = if cfg!(target_endian = "big") { @@ -1448,7 +1444,7 @@ impl Wal for WalFile { }; shared.last_checksum = checksum; checksum - }; + }); self.max_frame.store(0, Ordering::Release); let (header, file) = self.with_shared(|shared| { @@ -1667,7 +1663,7 @@ impl WalFile { WAL_HEADER_SIZE as u64 + page_offset } - fn get_shared_mut(&self) -> parking_lot::RwLockWriteGuard<'_, WalFileShared> { + fn _get_shared_mut(&self) -> parking_lot::RwLockWriteGuard<'_, WalFileShared> { // WASM in browser main thread doesn't have a way to "park" a thread // so, we spin way here instead of calling blocking lock #[cfg(target_family = "wasm")] @@ -1705,6 +1701,15 @@ impl WalFile { } } + #[inline] + fn with_shared_mut(&self, func: F) -> R + where + F: FnOnce(&mut WalFileShared) -> R, + { + let mut shared = self._get_shared_mut(); + func(&mut shared) + } + #[inline] fn with_shared(&self, func: F) -> R where @@ -2087,29 +2092,30 @@ impl WalFile { /// We never modify slot values while a reader holds that slot's lock. /// TOOD: implement proper BUSY handling behavior fn determine_max_safe_checkpoint_frame(&self) -> u64 { - let mut shared = self.get_shared_mut(); - let shared_max = shared.max_frame.load(Ordering::Acquire); - let mut max_safe_frame = shared_max; + self.with_shared_mut(|shared| { + let shared_max = shared.max_frame.load(Ordering::Acquire); + let mut max_safe_frame = shared_max; - for (read_lock_idx, read_lock) in shared.read_locks.iter_mut().enumerate().skip(1) { - let this_mark = read_lock.get_value(); - if this_mark < max_safe_frame as u32 { - let busy = !read_lock.write(); - if !busy { - let val = if read_lock_idx == 1 { - // store the max_frame for the default read slot 1 - max_safe_frame as u32 + for (read_lock_idx, read_lock) in shared.read_locks.iter_mut().enumerate().skip(1) { + let this_mark = read_lock.get_value(); + if this_mark < max_safe_frame as u32 { + let busy = !read_lock.write(); + if !busy { + let val = if read_lock_idx == 1 { + // store the max_frame for the default read slot 1 + max_safe_frame as u32 + } else { + READMARK_NOT_USED + }; + read_lock.set_value_exclusive(val); + read_lock.unlock(); } else { - READMARK_NOT_USED - }; - read_lock.set_value_exclusive(val); - read_lock.unlock(); - } else { - max_safe_frame = this_mark as u64; + max_safe_frame = this_mark as u64; + } } } - } - max_safe_frame + max_safe_frame + }) } /// Called once the entire WAL has been back‑filled in RESTART or TRUNCATE mode. @@ -2125,9 +2131,8 @@ impl WalFile { self.checkpoint_guard ); tracing::debug!("restart_log(mode={mode:?})"); - { + self.with_shared_mut(|shared| { // Block all readers - let mut shared = self.get_shared_mut(); for idx in 1..shared.read_locks.len() { let lock = &mut shared.read_locks[idx]; if !lock.write() { @@ -2141,10 +2146,11 @@ impl WalFile { // after the log is reset, we must set all secondary marks to READMARK_NOT_USED so the next reader selects a fresh slot lock.set_value_exclusive(READMARK_NOT_USED); } - } + Ok(()) + })?; // reinitialize in‑memory state - self.get_shared_mut().restart_wal_header(&self.io, mode); + self.with_shared_mut(|shared| shared.restart_wal_header(&self.io, mode)); let cksm = self.with_shared(|shared| shared.last_checksum); self.last_checksum = cksm; self.max_frame.store(0, Ordering::Release); From baf649affb1807d647fa867f460affdf319cab2b Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 20 Oct 2025 13:04:35 -0300 Subject: [PATCH 327/428] add insert randomblob benchmark --- core/Cargo.toml | 2 +- core/benches/benchmark.rs | 114 +++++++++++++++++++++++++++++++++++++- 2 files changed, 114 insertions(+), 2 deletions(-) diff --git a/core/Cargo.toml b/core/Cargo.toml index a795f9d8c..473ecbd9a 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -100,7 +100,7 @@ criterion = { workspace = true, features = [ "async_futures", ] } rstest = "0.18.2" -rusqlite.workspace = true +rusqlite = { workspace = true, features = ["series"] } quickcheck = { version = "1.0", default-features = false } quickcheck_macros = { version = "1.0", default-features = false } rand = "0.8.5" # Required for quickcheck diff --git a/core/benches/benchmark.rs b/core/benches/benchmark.rs index 51b844aad..0ef1c6875 100644 --- a/core/benches/benchmark.rs +++ b/core/benches/benchmark.rs @@ -2,6 +2,7 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criteri use pprof::criterion::{Output, PProfProfiler}; use regex::Regex; use std::{sync::Arc, time::Instant}; +use tempfile::TempDir; use turso_core::{Database, LimboError, PlatformIO, StepResult}; #[cfg(not(target_family = "wasm"))] @@ -16,6 +17,36 @@ fn rusqlite_open() -> rusqlite::Connection { sqlite_conn } +fn setup_rusqlite(temp_dir: &TempDir, query: &str) -> rusqlite::Connection { + let db_path = temp_dir.path().join("bench.db"); + let sqlite_conn = rusqlite::Connection::open(db_path).unwrap(); + sqlite_conn + .pragma_update(None, "synchronous", "FULL") + .unwrap(); + sqlite_conn + .pragma_update(None, "journal_mode", "WAL") + .unwrap(); + sqlite_conn + .pragma_update(None, "locking_mode", "EXCLUSIVE") + .unwrap(); + let journal_mode = sqlite_conn + .pragma_query_value(None, "journal_mode", |row| row.get::<_, String>(0)) + .unwrap(); + assert_eq!(journal_mode.to_lowercase(), "wal"); + let synchronous = sqlite_conn + .pragma_query_value(None, "synchronous", |row| row.get::<_, usize>(0)) + .unwrap(); + const FULL: usize = 2; + assert_eq!(synchronous, FULL); + + // load the generate_series extension + rusqlite::vtab::series::load_module(&sqlite_conn).unwrap(); + + // Create test table + sqlite_conn.execute(query, []).unwrap(); + sqlite_conn +} + fn bench_open(criterion: &mut Criterion) { // https://github.com/tursodatabase/turso/issues/174 // The rusqlite benchmark crashes on Mac M1 when using the flamegraph features @@ -896,9 +927,90 @@ fn bench_concurrent_writes(criterion: &mut Criterion) { }); } +fn bench_insert_randomblob(criterion: &mut Criterion) { + // The rusqlite benchmark crashes on Mac M1 when using the flamegraph features + let enable_rusqlite = std::env::var("DISABLE_RUSQLITE_BENCHMARK").is_err(); + + let mut group = criterion.benchmark_group("Insert rows in batches"); + + // Test different batch sizes + for batch_size in [1, 10, 100] { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("bench.db"); + + #[allow(clippy::arc_with_non_send_sync)] + let io = Arc::new(PlatformIO::new().unwrap()); + let db = Database::open_file(io.clone(), db_path.to_str().unwrap(), false, false).unwrap(); + let limbo_conn = db.connect().unwrap(); + + let mut stmt = limbo_conn.query("CREATE TABLE test(x)").unwrap().unwrap(); + + loop { + match stmt.step().unwrap() { + turso_core::StepResult::IO => { + stmt.run_once().unwrap(); + } + turso_core::StepResult::Done => { + break; + } + turso_core::StepResult::Row => { + unreachable!(); + } + turso_core::StepResult::Interrupt | turso_core::StepResult::Busy => { + unreachable!(); + } + } + } + + let random_blob = format!( + "INSERT INTO test select randomblob(1024 * 100) from generate_series(1, {batch_size});" + ); + + group.bench_function(format!("limbo_insert_{batch_size}_randomblob"), |b| { + let mut stmt = limbo_conn.prepare(&random_blob).unwrap(); + b.iter(|| { + loop { + match stmt.step().unwrap() { + turso_core::StepResult::IO => { + stmt.run_once().unwrap(); + } + turso_core::StepResult::Done => { + break; + } + turso_core::StepResult::Row => { + unreachable!(); + } + turso_core::StepResult::Interrupt | turso_core::StepResult::Busy => { + unreachable!(); + } + } + } + stmt.reset(); + }); + }); + + if enable_rusqlite { + let temp_dir = tempfile::tempdir().unwrap(); + let sqlite_conn = setup_rusqlite(&temp_dir, "CREATE TABLE test(x)"); + + group.bench_function(format!("sqlite_insert_{batch_size}_randomblob"), |b| { + let mut stmt = sqlite_conn.prepare(&random_blob).unwrap(); + b.iter(|| { + let mut rows = stmt.raw_query(); + while let Some(row) = rows.next().unwrap() { + black_box(row); + } + }); + }); + } + } + + group.finish(); +} + criterion_group! { name = benches; config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); - targets = bench_open, bench_alter, bench_prepare_query, bench_execute_select_1, bench_execute_select_rows, bench_execute_select_count, bench_insert_rows, bench_concurrent_writes + targets = bench_open, bench_alter, bench_prepare_query, bench_execute_select_1, bench_execute_select_rows, bench_execute_select_count, bench_insert_rows, bench_concurrent_writes, bench_insert_randomblob } criterion_main!(benches); From 5d7b057b8ac6d1621723be3a3a9ee7dab363a224 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Thu, 16 Oct 2025 23:08:49 -0500 Subject: [PATCH 328/428] Enable turso_stress to run in Miri antithesis_sdk needs to have default features disabled in the workspace so turso_stress is free to select the noop implementation for Miri --- Cargo.toml | 2 +- bindings/rust/src/lib.rs | 8 ++++---- core/Cargo.toml | 2 +- stress/Cargo.toml | 2 +- stress/main.rs | 8 +++++--- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 328c86c66..8ac02dab6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,7 +90,7 @@ fallible-iterator = "0.3.0" criterion = "0.5" chrono = { version = "0.4.42", default-features = false } hex = "0.4" -antithesis_sdk = "0.2" +antithesis_sdk = { version = "0.2", default-features = false } cfg-if = "1.0.0" tracing-appender = "0.2.3" env_logger = { version = "0.11.6", default-features = false } diff --git a/bindings/rust/src/lib.rs b/bindings/rust/src/lib.rs index d8d597cc6..8cd47d599 100644 --- a/bindings/rust/src/lib.rs +++ b/bindings/rust/src/lib.rs @@ -153,14 +153,14 @@ impl Builder { match vfs_choice { "memory" => Ok(Arc::new(turso_core::MemoryIO::new())), "syscall" => { - #[cfg(target_family = "unix")] + #[cfg(all(target_family = "unix", not(miri)))] { Ok(Arc::new( turso_core::UnixIO::new() .map_err(|e| Error::SqlExecutionFailure(e.to_string()))?, )) } - #[cfg(not(target_family = "unix"))] + #[cfg(any(not(target_family = "unix"), miri))] { Ok(Arc::new( turso_core::PlatformIO::new() @@ -168,12 +168,12 @@ impl Builder { )) } } - #[cfg(target_os = "linux")] + #[cfg(all(target_os = "linux", not(miri)))] "io_uring" => Ok(Arc::new( turso_core::UringIO::new() .map_err(|e| Error::SqlExecutionFailure(e.to_string()))?, )), - #[cfg(not(target_os = "linux"))] + #[cfg(any(not(target_os = "linux"), miri))] "io_uring" => Err(Error::SqlExecutionFailure( "io_uring is only available on Linux targets".to_string(), )), diff --git a/core/Cargo.toml b/core/Cargo.toml index a795f9d8c..d5651a2e4 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -15,7 +15,7 @@ path = "lib.rs" [features] default = ["fs", "uuid", "time", "json", "series", "encryption"] -antithesis = ["dep:antithesis_sdk"] +antithesis = ["dep:antithesis_sdk", "antithesis_sdk?/full"] tracing_release = ["tracing/release_max_level_info"] conn_raw_api = [] fs = ["turso_ext/vfs"] diff --git a/stress/Cargo.toml b/stress/Cargo.toml index 077f5f003..7221e80ca 100644 --- a/stress/Cargo.toml +++ b/stress/Cargo.toml @@ -16,7 +16,7 @@ path = "main.rs" [features] default = ["experimental_indexes"] -antithesis = ["turso/antithesis"] +antithesis = ["turso/antithesis", "antithesis_sdk/full"] experimental_indexes = [] [dependencies] diff --git a/stress/main.rs b/stress/main.rs index 7c899e0ab..879002547 100644 --- a/stress/main.rs +++ b/stress/main.rs @@ -642,9 +642,11 @@ async fn main() -> Result<(), Box> { println!("Done. SQL statements written to {}", opts.log_file); println!("Database file: {db_file}"); - println!("Running SQLite Integrity check"); - - integrity_check(std::path::Path::new(&db_file))?; + #[cfg(not(miri))] + { + println!("Running SQLite Integrity check"); + integrity_check(std::path::Path::new(&db_file))?; + } Ok(()) } From 2cb0a9b34b0deb0ddabdc255cdef0fa1f2ad935f Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Sun, 19 Oct 2025 23:38:34 -0500 Subject: [PATCH 329/428] Use read_unaligned with *u8 cast to *u32 Avoids undefined behavior due to unaligned read caught with Miri --- core/storage/btree.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 31e89b01f..65659c4b3 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -2857,7 +2857,8 @@ impl BTreeCursor { // load sibling pages // start loading right page first - let mut pgno: u32 = unsafe { right_pointer.cast::().read().swap_bytes() }; + let mut pgno: u32 = + unsafe { right_pointer.cast::().read_unaligned().swap_bytes() }; let current_sibling = sibling_pointer; let mut group = CompletionGroup::new(|_| {}); for i in (0..=current_sibling).rev() { From a614b51ebfeac3ba78e4b2092e1c6598c02e04c0 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Mon, 20 Oct 2025 13:04:35 -0300 Subject: [PATCH 330/428] change randomblob generation to use thread_rng --- core/vdbe/execute.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 962f1b3bc..f7f4b16cb 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -72,7 +72,7 @@ use super::{ CommitState, }; use parking_lot::RwLock; -use rand::{thread_rng, Rng}; +use rand::{thread_rng, Rng, RngCore}; use turso_parser::ast::{self, Name, SortOrder}; use turso_parser::parser::Parser; @@ -8664,7 +8664,7 @@ impl Value { .max(1) as usize; let mut blob: Vec = vec![0; length]; - getrandom::getrandom(&mut blob).expect("Failed to generate random blob"); + rand::thread_rng().fill_bytes(&mut blob); Value::Blob(blob) } From b92f4cb9c493d76d3b5740d574105bec0edfa27a Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 20 Oct 2025 01:00:48 -0500 Subject: [PATCH 331/428] Make Miri easier to run --- simulator/run-miri.sh | 31 +++++++++++++++++++++++++++++++ simulator/runner/cli.rs | 11 ++++++++--- stress/opts.rs | 10 +++++++++- stress/run-miri.sh | 4 ++++ 4 files changed, 52 insertions(+), 4 deletions(-) create mode 100755 simulator/run-miri.sh create mode 100755 stress/run-miri.sh diff --git a/simulator/run-miri.sh b/simulator/run-miri.sh new file mode 100755 index 000000000..e065f3c68 --- /dev/null +++ b/simulator/run-miri.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +ARGS=("$@") + +# Intercept the seed if it's passed +while [[ $# -gt 0 ]]; do + case $1 in + -s=*|--seed=*) + seed="${1#*=}" + shift + ;; + -s|--seed) + seed="$2" + shift 2 + ;; + *) + shift + ;; + esac +done +# Otherwise make one up +if [ -z "$seed" ]; then + # Dump 8 bytes of /dev/random as decimal u64 + seed=$(od -An -N8 -tu8 /dev/random | tr -d ' ') + ARGS+=("--seed" "${seed}") + echo "Generated seed for Miri and simulator: ${seed}" +else + echo "Intercepted simulator seed to pass to Miri: ${seed}" +fi + +MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-disable-stacked-borrows -Zmiri-seed=${seed}" cargo +nightly miri run --bin limbo_sim -- "${ARGS[@]}" diff --git a/simulator/runner/cli.rs b/simulator/runner/cli.rs index 8a941dde1..97062dd2d 100644 --- a/simulator/runner/cli.rs +++ b/simulator/runner/cli.rs @@ -30,7 +30,7 @@ pub struct SimulatorCLI { short = 'n', long, help = "change the maximum size of the randomly generated sequence of interactions", - default_value_t = 5000, + default_value_t = normal_or_miri(5000, 50), value_parser = clap::value_parser!(u32).range(1..) )] pub maximum_tests: u32, @@ -38,7 +38,7 @@ pub struct SimulatorCLI { short = 'k', long, help = "change the minimum size of the randomly generated sequence of interactions", - default_value_t = 1000, + default_value_t = normal_or_miri(1000, 10), value_parser = clap::value_parser!(u32).range(1..) )] pub minimum_tests: u32, @@ -149,7 +149,8 @@ pub struct SimulatorCLI { pub keep_files: bool, #[clap( long, - help = "Disable the SQLite integrity check at the end of a simulation" + help = "Disable the SQLite integrity check at the end of a simulation", + default_value_t = normal_or_miri(false, true) )] pub disable_integrity_check: bool, #[clap( @@ -279,3 +280,7 @@ impl ValueParserFactory for ProfileType { ProfileTypeParser } } + +const fn normal_or_miri(normal_val: T, miri_val: T) -> T { + if cfg!(miri) { miri_val } else { normal_val } +} diff --git a/stress/opts.rs b/stress/opts.rs index 796f85847..aac93f67d 100644 --- a/stress/opts.rs +++ b/stress/opts.rs @@ -21,7 +21,7 @@ pub struct Opts { short = 'i', long, help = "the number of iterations", - default_value_t = 100000 + default_value_t = normal_or_miri(100_000, 1000) )] pub nr_iterations: usize, @@ -75,3 +75,11 @@ pub struct Opts { )] pub busy_timeout: u64, } + +const fn normal_or_miri(normal_val: T, miri_val: T) -> T { + if cfg!(miri) { + miri_val + } else { + normal_val + } +} diff --git a/stress/run-miri.sh b/stress/run-miri.sh new file mode 100755 index 000000000..2de0069e5 --- /dev/null +++ b/stress/run-miri.sh @@ -0,0 +1,4 @@ +#!/bin/bash + + +MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-disable-stacked-borrows" cargo +nightly miri run -p turso_stress -- "$@" From 948bd557cd60d37a05d8f28815f1ba78b4f52d41 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 21 Oct 2025 14:58:39 +0400 Subject: [PATCH 332/428] use simsimd for dense operations --- Cargo.lock | 21 ++- .../javascript/packages/wasm/promise.test.ts | 13 ++ core/Cargo.toml | 1 + core/vector/operations/distance_cos.rs | 133 ++++++++++++------ core/vector/operations/distance_l2.rs | 95 ++++++++++--- 5 files changed, 196 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ccd556376..6a5e3d900 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -523,10 +523,11 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.17" +version = "1.2.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" +checksum = "ac9fe6cdbb24b6ade63616c0a0688e45bb56732262c158df3c0c4bea4ca47cb7" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -1504,6 +1505,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" + [[package]] name = "findshlibs" version = "0.10.2" @@ -4143,6 +4150,15 @@ dependencies = [ "similar", ] +[[package]] +name = "simsimd" +version = "6.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e3f209c5a8155b8458b1a0d3a6fc9fa09d201e6086fdaae18e9e283b9274f8f" +dependencies = [ + "cc", +] + [[package]] name = "slab" version = "0.4.9" @@ -4925,6 +4941,7 @@ dependencies = [ "rustix 1.0.7", "ryu", "serde", + "simsimd", "sorted-vec", "strum", "strum_macros", diff --git a/bindings/javascript/packages/wasm/promise.test.ts b/bindings/javascript/packages/wasm/promise.test.ts index d80dee8b7..7cdd8bc1b 100644 --- a/bindings/javascript/packages/wasm/promise.test.ts +++ b/bindings/javascript/packages/wasm/promise.test.ts @@ -1,6 +1,19 @@ import { expect, test } from 'vitest' import { connect, Database } from './promise-default.js' +test('vector-test', async () => { + const db = await connect(":memory:"); + const v1 = new Array(1024).fill(0).map((_, i) => i); + const v2 = new Array(1024).fill(0).map((_, i) => 1024 - i); + const result = await db.prepare(`SELECT + vector_distance_cos(vector32('${JSON.stringify(v1)}'), vector32('${JSON.stringify(v2)}')) as cosf32, + vector_distance_cos(vector64('${JSON.stringify(v1)}'), vector64('${JSON.stringify(v2)}')) as cosf64, + vector_distance_l2(vector32('${JSON.stringify(v1)}'), vector32('${JSON.stringify(v2)}')) as l2f32, + vector_distance_l2(vector64('${JSON.stringify(v1)}'), vector64('${JSON.stringify(v2)}')) as l2f64 + `).all(); + console.info(result); +}) + test('explain', async () => { const db = await connect(":memory:"); const stmt = db.prepare("EXPLAIN SELECT 1"); diff --git a/core/Cargo.toml b/core/Cargo.toml index d5651a2e4..cae081cb9 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -84,6 +84,7 @@ aegis = "0.9.0" twox-hash = "2.1.1" intrusive-collections = "0.9.7" roaring = "0.11.2" +simsimd = "6.5.3" [build-dependencies] chrono = { workspace = true, default-features = false } diff --git a/core/vector/operations/distance_cos.rs b/core/vector/operations/distance_cos.rs index aaa2c86f6..437d069c6 100644 --- a/core/vector/operations/distance_cos.rs +++ b/core/vector/operations/distance_cos.rs @@ -2,6 +2,7 @@ use crate::{ vector::vector_types::{Vector, VectorSparse, VectorType}, LimboError, Result, }; +use simsimd::SpatialSimilarity; pub fn vector_distance_cos(v1: &Vector, v2: &Vector) -> Result { if v1.dims != v2.dims { @@ -15,11 +16,23 @@ pub fn vector_distance_cos(v1: &Vector, v2: &Vector) -> Result { )); } match v1.vector_type { - VectorType::Float32Dense => Ok(vector_f32_distance_cos( + #[cfg(not(target_family = "wasm"))] + VectorType::Float32Dense => Ok(vector_f32_distance_cos_simsimd( v1.as_f32_slice(), v2.as_f32_slice(), )), - VectorType::Float64Dense => Ok(vector_f64_distance_cos( + #[cfg(target_family = "wasm")] + VectorType::Float32Dense => Ok(vector_f32_distance_cos_rust( + v1.as_f32_slice(), + v2.as_f32_slice(), + )), + #[cfg(not(target_family = "wasm"))] + VectorType::Float64Dense => Ok(vector_f64_distance_cos_simsimd( + v1.as_f64_slice(), + v2.as_f64_slice(), + )), + #[cfg(target_family = "wasm")] + VectorType::Float64Dense => Ok(vector_f64_distance_cos_rust( v1.as_f64_slice(), v2.as_f64_slice(), )), @@ -30,44 +43,44 @@ pub fn vector_distance_cos(v1: &Vector, v2: &Vector) -> Result { } } -fn vector_f32_distance_cos(v1: &[f32], v2: &[f32]) -> f64 { - let (mut dot, mut norm1, mut norm2) = (0.0, 0.0, 0.0); - - let dims = v1.len(); - for i in 0..dims { - let e1 = v1[i]; - let e2 = v2[i]; - dot += e1 * e2; - norm1 += e1 * e1; - norm2 += e2 * e2; - } - - // Check for zero norms to avoid division by zero - if norm1 == 0.0 || norm2 == 0.0 { - return f64::NAN; - } - - 1.0 - (dot / (norm1 * norm2).sqrt()) as f64 +#[allow(dead_code)] +fn vector_f32_distance_cos_simsimd(v1: &[f32], v2: &[f32]) -> f64 { + f32::cosine(v1, v2).unwrap_or(f64::NAN) } -fn vector_f64_distance_cos(v1: &[f64], v2: &[f64]) -> f64 { +// SimSIMD do not support WASM for now, so we have alternative implementation: https://github.com/ashvardanian/SimSIMD/issues/189 +#[allow(dead_code)] +fn vector_f32_distance_cos_rust(v1: &[f32], v2: &[f32]) -> f64 { let (mut dot, mut norm1, mut norm2) = (0.0, 0.0, 0.0); - - let dims = v1.len(); - for i in 0..dims { - let e1 = v1[i]; - let e2 = v2[i]; - dot += e1 * e2; - norm1 += e1 * e1; - norm2 += e2 * e2; + for (a, b) in v1.iter().zip(v2.iter()) { + dot += a * b; + norm1 += a * a; + norm2 += b * b; } - - // Check for zero norms if norm1 == 0.0 || norm2 == 0.0 { - return f64::NAN; + return 0.0; } + (1.0 - dot / (norm1 * norm2).sqrt()) as f64 +} - 1.0 - (dot / (norm1 * norm2).sqrt()) +#[allow(dead_code)] +fn vector_f64_distance_cos_simsimd(v1: &[f64], v2: &[f64]) -> f64 { + f64::cosine(v1, v2).unwrap_or(f64::NAN) +} + +// SimSIMD do not support WASM for now, so we have alternative implementation: https://github.com/ashvardanian/SimSIMD/issues/189 +#[allow(dead_code)] +fn vector_f64_distance_cos_rust(v1: &[f64], v2: &[f64]) -> f64 { + let (mut dot, mut norm1, mut norm2) = (0.0, 0.0, 0.0); + for (a, b) in v1.iter().zip(v2.iter()) { + dot += a * b; + norm1 += a * a; + norm2 += b * b; + } + if norm1 == 0.0 || norm2 == 0.0 { + return 0.0; + } + 1.0 - dot / (norm1 * norm2).sqrt() } fn vector_f32_sparse_distance_cos(v1: VectorSparse, v2: VectorSparse) -> f64 { @@ -120,20 +133,26 @@ mod tests { #[test] fn test_vector_distance_cos_f32() { - assert!(vector_f32_distance_cos(&[], &[]).is_nan()); - assert!(vector_f32_distance_cos(&[1.0, 2.0], &[0.0, 0.0]).is_nan()); - assert_eq!(vector_f32_distance_cos(&[1.0, 2.0], &[1.0, 2.0]), 0.0); - assert_eq!(vector_f32_distance_cos(&[1.0, 2.0], &[-1.0, -2.0]), 2.0); - assert_eq!(vector_f32_distance_cos(&[1.0, 2.0], &[-2.0, 1.0]), 1.0); + assert_eq!(vector_f32_distance_cos_simsimd(&[], &[]), 0.0); + assert_eq!( + vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[0.0, 0.0]), + 1.0 + ); + assert!(vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[1.0, 2.0]).abs() < 1e-9); + assert!((vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[-1.0, -2.0]) - 2.0).abs() < 1e-9); + assert!((vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[-2.0, 1.0]) - 1.0).abs() < 1e-9); } #[test] fn test_vector_distance_cos_f64() { - assert!(vector_f64_distance_cos(&[], &[]).is_nan()); - assert!(vector_f64_distance_cos(&[1.0, 2.0], &[0.0, 0.0]).is_nan()); - assert_eq!(vector_f64_distance_cos(&[1.0, 2.0], &[1.0, 2.0]), 0.0); - assert_eq!(vector_f64_distance_cos(&[1.0, 2.0], &[-1.0, -2.0]), 2.0); - assert_eq!(vector_f64_distance_cos(&[1.0, 2.0], &[-2.0, 1.0]), 1.0); + assert_eq!(vector_f64_distance_cos_simsimd(&[], &[]), 0.0); + assert_eq!( + vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[0.0, 0.0]), + 1.0 + ); + assert!(vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[1.0, 2.0]).abs() < 1e-9); + assert!((vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[-1.0, -2.0]) - 2.0).abs() < 1e-9); + assert!((vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[-2.0, 1.0]) - 1.0).abs() < 1e-9); } #[test] @@ -148,7 +167,7 @@ mod tests { idx: &[1, 2], values: &[1.0, 3.0] }, - ) - vector_f32_distance_cos(&[1.0, 2.0, 0.0], &[0.0, 1.0, 3.0])) + ) - vector_f32_distance_cos_simsimd(&[1.0, 2.0, 0.0], &[0.0, 1.0, 3.0])) .abs() < 1e-7 ); @@ -169,4 +188,30 @@ mod tests { (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-6 } + + #[quickcheck] + fn prop_vector_distance_cos_rust_vs_simsimd_f32( + v1: ArbitraryVector<100>, + v2: ArbitraryVector<100>, + ) -> bool { + let v1 = vector_convert(v1.into(), VectorType::Float32Dense).unwrap(); + let v2 = vector_convert(v2.into(), VectorType::Float32Dense).unwrap(); + let d1 = vector_f32_distance_cos_rust(v1.as_f32_slice(), v2.as_f32_slice()); + let d2 = vector_f32_distance_cos_simsimd(v1.as_f32_slice(), v2.as_f32_slice()); + println!("d1 vs d2: {} vs {}", d1, d2); + (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-4 + } + + #[quickcheck] + fn prop_vector_distance_cos_rust_vs_simsimd_f64( + v1: ArbitraryVector<100>, + v2: ArbitraryVector<100>, + ) -> bool { + let v1 = vector_convert(v1.into(), VectorType::Float64Dense).unwrap(); + let v2 = vector_convert(v2.into(), VectorType::Float64Dense).unwrap(); + let d1 = vector_f64_distance_cos_rust(v1.as_f64_slice(), v2.as_f64_slice()); + let d2 = vector_f64_distance_cos_simsimd(v1.as_f64_slice(), v2.as_f64_slice()); + println!("d1 vs d2: {} vs {}", d1, d2); + (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-6 + } } diff --git a/core/vector/operations/distance_l2.rs b/core/vector/operations/distance_l2.rs index 68d01857a..84f21db14 100644 --- a/core/vector/operations/distance_l2.rs +++ b/core/vector/operations/distance_l2.rs @@ -2,6 +2,7 @@ use crate::{ vector::vector_types::{Vector, VectorSparse, VectorType}, LimboError, Result, }; +use simsimd::SpatialSimilarity; pub fn vector_distance_l2(v1: &Vector, v2: &Vector) -> Result { if v1.dims != v2.dims { @@ -15,12 +16,26 @@ pub fn vector_distance_l2(v1: &Vector, v2: &Vector) -> Result { )); } match v1.vector_type { - VectorType::Float32Dense => { - Ok(vector_f32_distance_l2(v1.as_f32_slice(), v2.as_f32_slice())) - } - VectorType::Float64Dense => { - Ok(vector_f64_distance_l2(v1.as_f64_slice(), v2.as_f64_slice())) - } + #[cfg(not(target_family = "wasm"))] + VectorType::Float32Dense => Ok(vector_f32_distance_l2_simsimd( + v1.as_f32_slice(), + v2.as_f32_slice(), + )), + #[cfg(target_family = "wasm")] + VectorType::Float32Dense => Ok(vector_f32_distance_l2_rust( + v1.as_f32_slice(), + v2.as_f32_slice(), + )), + #[cfg(not(target_family = "wasm"))] + VectorType::Float64Dense => Ok(vector_f64_distance_l2_simsimd( + v1.as_f64_slice(), + v2.as_f64_slice(), + )), + #[cfg(target_family = "wasm")] + VectorType::Float64Dense => Ok(vector_f64_distance_l2_rust( + v1.as_f64_slice(), + v2.as_f64_slice(), + )), VectorType::Float32Sparse => Ok(vector_f32_sparse_distance_l2( v1.as_f32_sparse(), v2.as_f32_sparse(), @@ -28,7 +43,14 @@ pub fn vector_distance_l2(v1: &Vector, v2: &Vector) -> Result { } } -fn vector_f32_distance_l2(v1: &[f32], v2: &[f32]) -> f64 { +#[allow(dead_code)] +fn vector_f32_distance_l2_simsimd(v1: &[f32], v2: &[f32]) -> f64 { + f32::euclidean(v1, v2).unwrap_or(f64::NAN) +} + +// SimSIMD do not support WASM for now, so we have alternative implementation: https://github.com/ashvardanian/SimSIMD/issues/189 +#[allow(dead_code)] +fn vector_f32_distance_l2_rust(v1: &[f32], v2: &[f32]) -> f64 { let sum = v1 .iter() .zip(v2.iter()) @@ -37,7 +59,14 @@ fn vector_f32_distance_l2(v1: &[f32], v2: &[f32]) -> f64 { sum.sqrt() } -fn vector_f64_distance_l2(v1: &[f64], v2: &[f64]) -> f64 { +#[allow(dead_code)] +fn vector_f64_distance_l2_simsimd(v1: &[f64], v2: &[f64]) -> f64 { + f64::euclidean(v1, v2).unwrap_or(f64::NAN) +} + +// SimSIMD do not support WASM for now, so we have alternative implementation: https://github.com/ashvardanian/SimSIMD/issues/189 +#[allow(dead_code)] +fn vector_f64_distance_l2_rust(v1: &[f64], v2: &[f64]) -> f64 { let sum = v1 .iter() .zip(v2.iter()) @@ -102,7 +131,7 @@ mod tests { ]; let results = vectors .iter() - .map(|v| vector_f32_distance_l2(&query, v)) + .map(|v| vector_f32_distance_l2_rust(&query, v)) .collect::>(); assert_eq!(results, expected); } @@ -111,41 +140,41 @@ mod tests { fn test_vector_distance_l2_odd_len() { let v = (0..5).map(|x| x as f32).collect::>(); let query = (2..7).map(|x| x as f32).collect::>(); - assert_eq!(vector_f32_distance_l2(&v, &query), 20.0_f64.sqrt()); + assert_eq!(vector_f32_distance_l2_rust(&v, &query), 20.0_f64.sqrt()); } #[test] fn test_vector_distance_l2_f32() { - assert_eq!(vector_f32_distance_l2(&[], &[]), 0.0); + assert_eq!(vector_f32_distance_l2_rust(&[], &[]), 0.0); assert_eq!( - vector_f32_distance_l2(&[1.0, 2.0], &[0.0, 0.0]), + vector_f32_distance_l2_rust(&[1.0, 2.0], &[0.0, 0.0]), (1f64 + 2f64 * 2f64).sqrt() ); - assert_eq!(vector_f32_distance_l2(&[1.0, 2.0], &[1.0, 2.0]), 0.0); + assert_eq!(vector_f32_distance_l2_rust(&[1.0, 2.0], &[1.0, 2.0]), 0.0); assert_eq!( - vector_f32_distance_l2(&[1.0, 2.0], &[-1.0, -2.0]), + vector_f32_distance_l2_rust(&[1.0, 2.0], &[-1.0, -2.0]), (2f64 * 2f64 + 4f64 * 4f64).sqrt() ); assert_eq!( - vector_f32_distance_l2(&[1.0, 2.0], &[-2.0, 1.0]), + vector_f32_distance_l2_rust(&[1.0, 2.0], &[-2.0, 1.0]), (3f64 * 3f64 + 1f64 * 1f64).sqrt() ); } #[test] fn test_vector_distance_l2_f64() { - assert_eq!(vector_f64_distance_l2(&[], &[]), 0.0); + assert_eq!(vector_f64_distance_l2_rust(&[], &[]), 0.0); assert_eq!( - vector_f64_distance_l2(&[1.0, 2.0], &[0.0, 0.0]), + vector_f64_distance_l2_rust(&[1.0, 2.0], &[0.0, 0.0]), (1f64 + 2f64 * 2f64).sqrt() ); - assert_eq!(vector_f64_distance_l2(&[1.0, 2.0], &[1.0, 2.0]), 0.0); + assert_eq!(vector_f64_distance_l2_rust(&[1.0, 2.0], &[1.0, 2.0]), 0.0); assert_eq!( - vector_f64_distance_l2(&[1.0, 2.0], &[-1.0, -2.0]), + vector_f64_distance_l2_rust(&[1.0, 2.0], &[-1.0, -2.0]), (2f64 * 2f64 + 4f64 * 4f64).sqrt() ); assert_eq!( - vector_f64_distance_l2(&[1.0, 2.0], &[-2.0, 1.0]), + vector_f64_distance_l2_rust(&[1.0, 2.0], &[-2.0, 1.0]), (3f64 * 3f64 + 1f64 * 1f64).sqrt() ); } @@ -162,7 +191,7 @@ mod tests { idx: &[1, 2], values: &[1.0, 3.0] }, - ) - vector_f32_distance_l2(&[1.0, 2.0, 0.0], &[0.0, 1.0, 3.0])) + ) - vector_f32_distance_l2_rust(&[1.0, 2.0, 0.0], &[0.0, 1.0, 3.0])) .abs() < 1e-7 ); @@ -183,4 +212,28 @@ mod tests { (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-6 } + + #[quickcheck] + fn prop_vector_distance_l2_rust_vs_simsimd_f32( + v1: ArbitraryVector<100>, + v2: ArbitraryVector<100>, + ) -> bool { + let v1 = vector_convert(v1.into(), VectorType::Float32Dense).unwrap(); + let v2 = vector_convert(v2.into(), VectorType::Float32Dense).unwrap(); + let d1 = vector_f32_distance_l2_rust(v1.as_f32_slice(), v2.as_f32_slice()); + let d2 = vector_f32_distance_l2_simsimd(v1.as_f32_slice(), v2.as_f32_slice()); + (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-4 + } + + #[quickcheck] + fn prop_vector_distance_l2_rust_vs_simsimd_f64( + v1: ArbitraryVector<100>, + v2: ArbitraryVector<100>, + ) -> bool { + let v1 = vector_convert(v1.into(), VectorType::Float64Dense).unwrap(); + let v2 = vector_convert(v2.into(), VectorType::Float64Dense).unwrap(); + let d1 = vector_f64_distance_l2_rust(v1.as_f64_slice(), v2.as_f64_slice()); + let d2 = vector_f64_distance_l2_simsimd(v1.as_f64_slice(), v2.as_f64_slice()); + (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-6 + } } From b67fabdd62b5ecf0cf1351b9b20a4d9e194cb12a Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 14:34:27 +0300 Subject: [PATCH 333/428] Fix git directory resolution in simulator to support worktrees sim cannot be run in a git worktree on main --- simulator/runner/bugbase.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/simulator/runner/bugbase.rs b/simulator/runner/bugbase.rs index a34847655..89ad25e71 100644 --- a/simulator/runner/bugbase.rs +++ b/simulator/runner/bugbase.rs @@ -470,12 +470,18 @@ impl BugBase { } fn find_git_dir(start_path: impl AsRef) -> Option { - // HACK ignores stuff like bare repo, worktree, etc. let mut current = start_path.as_ref().to_path_buf(); loop { let git_path = current.join(".git"); if git_path.is_dir() { return Some(git_path); + } else if git_path.is_file() { + // Handle git worktrees - .git is a file containing "gitdir: " + if let Ok(contents) = read_to_string(&git_path) { + if let Some(gitdir) = contents.strip_prefix("gitdir: ") { + return Some(PathBuf::from(gitdir)); + } + } } if !current.pop() { return None; From 2483d08bca3f78a0b04456d052deb772064e6e5e Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 21 Oct 2025 16:28:00 +0400 Subject: [PATCH 334/428] do not allocate if possible --- core/vector/mod.rs | 4 +- core/vector/operations/concat.rs | 26 ++++--- core/vector/operations/convert.rs | 20 +++-- core/vector/operations/serialize.rs | 17 +++-- core/vector/operations/slice.rs | 14 ++-- core/vector/operations/text.rs | 18 +++-- core/vector/vector_types.rs | 111 +++++++++++++++++++--------- 7 files changed, 136 insertions(+), 74 deletions(-) diff --git a/core/vector/mod.rs b/core/vector/mod.rs index 514780a78..14cb2a462 100644 --- a/core/vector/mod.rs +++ b/core/vector/mod.rs @@ -20,7 +20,7 @@ pub fn parse_vector(value: &Register, type_hint: Option) -> Result Err(LimboError::ConversionError( "Invalid vector type".to_string(), @@ -81,7 +81,7 @@ pub fn vector_extract(args: &[Register]) -> Result { return Ok(Value::build_text("[]")); } - let vector = Vector::from_blob(blob.to_vec())?; + let vector = Vector::from_vec(blob.to_vec())?; Ok(Value::build_text(operations::text::vector_to_text(&vector))) } diff --git a/core/vector/operations/concat.rs b/core/vector/operations/concat.rs index 3e7f6a4f1..178258838 100644 --- a/core/vector/operations/concat.rs +++ b/core/vector/operations/concat.rs @@ -3,7 +3,7 @@ use crate::{ LimboError, Result, }; -pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result { +pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result> { if v1.vector_type != v2.vector_type { return Err(LimboError::ConversionError( "Mismatched vector types".into(), @@ -12,17 +12,17 @@ pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result { let data = match v1.vector_type { VectorType::Float32Dense | VectorType::Float64Dense => { - let mut data = Vec::with_capacity(v1.data.len() + v2.data.len()); - data.extend_from_slice(&v1.data); - data.extend_from_slice(&v2.data); + let mut data = Vec::with_capacity(v1.bin_len() + v2.bin_len()); + data.extend_from_slice(&v1.bin_data()); + data.extend_from_slice(&v2.bin_data()); data } VectorType::Float32Sparse => { - let mut data = Vec::with_capacity(v1.data.len() + v2.data.len()); - data.extend_from_slice(&v1.data[..v1.data.len() / 2]); - data.extend_from_slice(&v2.data[..v2.data.len() / 2]); - data.extend_from_slice(&v1.data[v1.data.len() / 2..]); - data.extend_from_slice(&v2.data[v2.data.len() / 2..]); + let mut data = Vec::with_capacity(v1.bin_len() + v2.bin_len()); + data.extend_from_slice(&v1.bin_data()[..v1.bin_len() / 2]); + data.extend_from_slice(&v2.bin_data()[..v2.bin_len() / 2]); + data.extend_from_slice(&v1.bin_data()[v1.bin_len() / 2..]); + data.extend_from_slice(&v2.bin_data()[v2.bin_len() / 2..]); data } }; @@ -30,7 +30,8 @@ pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result { Ok(Vector { vector_type: v1.vector_type, dims: v1.dims + v2.dims, - data, + owned: Some(data), + refer: None, }) } @@ -41,7 +42,7 @@ mod tests { vector_types::{Vector, VectorType}, }; - fn float32_vec_from(slice: &[f32]) -> Vector { + fn float32_vec_from(slice: &[f32]) -> Vector<'static> { let mut data = Vec::new(); for &v in slice { data.extend_from_slice(&v.to_le_bytes()); @@ -50,7 +51,8 @@ mod tests { Vector { vector_type: VectorType::Float32Dense, dims: slice.len(), - data, + owned: Some(data), + refer: None, } } diff --git a/core/vector/operations/convert.rs b/core/vector/operations/convert.rs index 619c7be26..1db0ab99e 100644 --- a/core/vector/operations/convert.rs +++ b/core/vector/operations/convert.rs @@ -69,7 +69,7 @@ mod tests { fn assert_vectors(v1: &Vector, v2: &Vector) { assert_eq!(v1.vector_type, v2.vector_type); assert_eq!(v1.dims, v2.dims); - assert_eq!(v1.data, v2.data); + assert_eq!(v1.bin_data(), v2.bin_data()); } #[test] @@ -77,30 +77,33 @@ mod tests { let vf32 = Vector { vector_type: VectorType::Float32Dense, dims: 3, - data: concat(&[ + owned: Some(concat(&[ 1.0f32.to_le_bytes(), 0.0f32.to_le_bytes(), 2.0f32.to_le_bytes(), - ]), + ])), + refer: None, }; let vf64 = Vector { vector_type: VectorType::Float64Dense, dims: 3, - data: concat(&[ + owned: Some(concat(&[ 1.0f64.to_le_bytes(), 0.0f64.to_le_bytes(), 2.0f64.to_le_bytes(), - ]), + ])), + refer: None, }; let vf32_sparse = Vector { vector_type: VectorType::Float32Sparse, dims: 3, - data: concat(&[ + owned: Some(concat(&[ 1.0f32.to_le_bytes(), 2.0f32.to_le_bytes(), 0u32.to_le_bytes(), 2u32.to_le_bytes(), - ]), + ])), + refer: None, }; let vectors = [vf32, vf64, vf32_sparse]; @@ -110,7 +113,8 @@ mod tests { let v_copy = Vector { vector_type: v1.vector_type, dims: v1.dims, - data: v1.data.clone(), + owned: v1.owned.clone(), + refer: None, }; assert_vectors(&vector_convert(v_copy, v2.vector_type).unwrap(), v2); } diff --git a/core/vector/operations/serialize.rs b/core/vector/operations/serialize.rs index 622819a59..8f8c3af9d 100644 --- a/core/vector/operations/serialize.rs +++ b/core/vector/operations/serialize.rs @@ -3,17 +3,20 @@ use crate::{ Value, }; -pub fn vector_serialize(mut x: Vector) -> Value { +pub fn vector_serialize(x: Vector) -> Value { match x.vector_type { - VectorType::Float32Dense => Value::from_blob(x.data), + VectorType::Float32Dense => Value::from_blob(x.bin_eject()), VectorType::Float64Dense => { - x.data.push(2); - Value::from_blob(x.data) + let mut data = x.bin_eject(); + data.push(2); + Value::from_blob(data) } VectorType::Float32Sparse => { - x.data.extend_from_slice(&(x.dims as u32).to_le_bytes()); - x.data.push(9); - Value::from_blob(x.data) + let dims = x.dims; + let mut data = x.bin_eject(); + data.extend_from_slice(&(dims as u32).to_le_bytes()); + data.push(9); + Value::from_blob(data) } } } diff --git a/core/vector/operations/slice.rs b/core/vector/operations/slice.rs index 003195070..a1f6b99d8 100644 --- a/core/vector/operations/slice.rs +++ b/core/vector/operations/slice.rs @@ -3,7 +3,7 @@ use crate::{ LimboError, Result, }; -pub fn vector_slice(vector: &Vector, start: usize, end: usize) -> Result { +pub fn vector_slice(vector: &Vector, start: usize, end: usize) -> Result> { if start > end { return Err(LimboError::InvalidArgument( "start index must not be greater than end index".into(), @@ -18,12 +18,14 @@ pub fn vector_slice(vector: &Vector, start: usize, end: usize) -> Result VectorType::Float32Dense => Ok(Vector { vector_type: vector.vector_type, dims: end - start, - data: vector.data[start * 4..end * 4].to_vec(), + owned: Some(vector.bin_data()[start * 4..end * 4].to_vec()), + refer: None, }), VectorType::Float64Dense => Ok(Vector { vector_type: vector.vector_type, dims: end - start, - data: vector.data[start * 8..end * 8].to_vec(), + owned: Some(vector.bin_data()[start * 8..end * 8].to_vec()), + refer: None, }), VectorType::Float32Sparse => { let mut values = Vec::new(); @@ -41,7 +43,8 @@ pub fn vector_slice(vector: &Vector, start: usize, end: usize) -> Result Ok(Vector { vector_type: vector.vector_type, dims: end - start, - data: values, + owned: Some(values), + refer: None, }) } } @@ -63,7 +66,8 @@ mod tests { Vector { vector_type: VectorType::Float32Dense, dims: slice.len(), - data, + owned: Some(data), + refer: None, } } diff --git a/core/vector/operations/text.rs b/core/vector/operations/text.rs index c6403d812..810bd8bc0 100644 --- a/core/vector/operations/text.rs +++ b/core/vector/operations/text.rs @@ -56,7 +56,8 @@ pub fn vector_from_text(vector_type: VectorType, text: &str) -> Result { Vector { vector_type, dims: 0, - data: Vec::new(), + owned: Some(Vec::new()), + refer: None, } } }); @@ -69,7 +70,7 @@ pub fn vector_from_text(vector_type: VectorType, text: &str) -> Result { } } -fn vector32_from_text<'a>(tokens: impl Iterator) -> Result { +fn vector32_from_text<'a>(tokens: impl Iterator) -> Result> { let mut data = Vec::new(); for token in tokens { let value = token @@ -85,11 +86,12 @@ fn vector32_from_text<'a>(tokens: impl Iterator) -> Result(tokens: impl Iterator) -> Result { +fn vector64_from_text<'a>(tokens: impl Iterator) -> Result> { let mut data = Vec::new(); for token in tokens { let value = token @@ -105,11 +107,12 @@ fn vector64_from_text<'a>(tokens: impl Iterator) -> Result(tokens: impl Iterator) -> Result { +fn vector32_sparse_from_text<'a>(tokens: impl Iterator) -> Result> { let mut idx = Vec::new(); let mut values = Vec::new(); let mut dims = 0u32; @@ -135,6 +138,7 @@ fn vector32_sparse_from_text<'a>(tokens: impl Iterator) -> Resul Ok(Vector { vector_type: VectorType::Float32Sparse, dims: dims as usize, - data: values, + owned: Some(values), + refer: None, }) } diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index dafa2ca23..c6c599090 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -8,10 +8,11 @@ pub enum VectorType { } #[derive(Debug)] -pub struct Vector { +pub struct Vector<'a> { pub vector_type: VectorType, pub dims: usize, - pub data: Vec, + pub owned: Option>, + pub refer: Option<&'a [u8]>, } #[derive(Debug)] @@ -20,14 +21,14 @@ pub struct VectorSparse<'a, T: std::fmt::Debug> { pub values: &'a [T], } -impl Vector { - pub fn vector_type(mut blob: Vec) -> Result<(VectorType, Vec)> { +impl<'a> Vector<'a> { + pub fn vector_type(blob: &[u8]) -> Result<(VectorType, usize)> { // Even-sized blobs are always float32. if blob.len() % 2 == 0 { - return Ok((VectorType::Float32Dense, blob)); + return Ok((VectorType::Float32Dense, blob.len())); } // Odd-sized blobs have type byte at the end - let vector_type = blob.pop().unwrap(); + let vector_type = blob[blob.len() - 1]; /* vector types used by LibSQL: (see https://github.com/tursodatabase/libsql/blob/a55bf61192bdb89e97568de593c4af5b70d24bde/libsql-sqlite3/src/vectorInt.h#L52) @@ -39,12 +40,12 @@ impl Vector { #define VECTOR_TYPE_FLOATB16 6 */ match vector_type { - 1 => Ok((VectorType::Float32Dense, blob)), - 2 => Ok((VectorType::Float64Dense, blob)), + 1 => Ok((VectorType::Float32Dense, blob.len() - 1)), + 2 => Ok((VectorType::Float64Dense, blob.len() - 1)), 3..=6 => Err(LimboError::ConversionError( "unsupported vector type from LibSQL".to_string(), )), - 9 => Ok((VectorType::Float32Sparse, blob)), + 9 => Ok((VectorType::Float32Sparse, blob.len() - 1)), _ => Err(LimboError::ConversionError(format!( "unknown vector type: {vector_type}" ))), @@ -63,7 +64,8 @@ impl Vector { Self { vector_type: VectorType::Float32Dense, dims, - data: values, + owned: Some(values), + refer: None, } } pub fn from_f64(mut values_f64: Vec) -> Self { @@ -79,7 +81,8 @@ impl Vector { Self { vector_type: VectorType::Float64Dense, dims, - data: values, + owned: Some(values), + refer: None, } } pub fn from_f32_sparse(dims: usize, mut values_f32: Vec, mut idx_u32: Vec) -> Self { @@ -105,14 +108,27 @@ impl Vector { Self { vector_type: VectorType::Float32Sparse, dims, - data: values, + owned: Some(values), + refer: None, } } - pub fn from_blob(blob: Vec) -> Result { - let (vector_type, data) = Self::vector_type(blob)?; - Self::from_data(vector_type, data) + pub fn from_vec(mut blob: Vec) -> Result { + let (vector_type, len) = Self::vector_type(&blob)?; + blob.truncate(len); + Self::from_data(vector_type, Some(blob), None) } - pub fn from_data(vector_type: VectorType, mut data: Vec) -> Result { + pub fn from_slice(blob: &'a [u8]) -> Result { + let (vector_type, len) = Self::vector_type(&blob)?; + Self::from_data(vector_type, None, Some(&blob[..len])) + } + pub fn from_data( + vector_type: VectorType, + owned: Option>, + refer: Option<&'a [u8]>, + ) -> Result { + let owned_slice = owned.as_ref().map(|x| x.as_slice()); + let refer_slice = refer.as_ref().map(|&x| x); + let data = owned_slice.unwrap_or_else(|| refer_slice.unwrap()); match vector_type { VectorType::Float32Dense => { if data.len() % 4 != 0 { @@ -124,7 +140,8 @@ impl Vector { Ok(Vector { vector_type, dims: data.len() / 4, - data, + owned, + refer, }) } VectorType::Float64Dense => { @@ -137,7 +154,8 @@ impl Vector { Ok(Vector { vector_type, dims: data.len() / 8, - data, + owned, + refer, }) } VectorType::Float32Sparse => { @@ -147,17 +165,41 @@ impl Vector { data.len(), ))); } - let dims_bytes = data.split_off(data.len() - 4); + let original_len = data.len(); + let dims_bytes = &data[original_len - 4..]; let dims = u32::from_le_bytes(dims_bytes.try_into().unwrap()) as usize; + let owned = owned.map(|mut x| { + x.truncate(original_len - 4); + x + }); + let refer = refer.map(|x| &x[0..original_len - 4]); let vector = Vector { vector_type, dims, - data, + owned, + refer, }; Ok(vector) } } } + + pub fn bin_len(&self) -> usize { + let owned = self.owned.as_ref().map(|x| x.len()); + let refer = self.refer.as_ref().map(|x| x.len()); + owned.unwrap_or_else(|| refer.unwrap()) + } + + pub fn bin_data(&'a self) -> &'a [u8] { + let owned = self.owned.as_ref().map(|x| x.as_slice()); + let refer = self.refer.as_ref().map(|&x| x); + owned.unwrap_or_else(|| refer.unwrap()) + } + + pub fn bin_eject(self) -> Vec { + self.owned.unwrap_or_else(|| self.refer.unwrap().to_vec()) + } + /// # Safety /// /// This method is used to reinterpret the underlying `Vec` data @@ -171,12 +213,12 @@ impl Vector { } assert_eq!( - self.data.len(), + self.bin_len(), self.dims * std::mem::size_of::(), "data length must equal dims * size_of::()" ); - let ptr = self.data.as_ptr(); + let ptr = self.bin_data().as_ptr(); let align = std::mem::align_of::(); assert_eq!( ptr.align_offset(align), @@ -200,12 +242,12 @@ impl Vector { } assert_eq!( - self.data.len(), + self.bin_len(), self.dims * std::mem::size_of::(), "data length must equal dims * size_of::()" ); - let ptr = self.data.as_ptr(); + let ptr = self.bin_data().as_ptr(); let align = std::mem::align_of::(); assert_eq!( ptr.align_offset(align), @@ -218,14 +260,14 @@ impl Vector { pub fn as_f32_sparse(&self) -> VectorSparse<'_, f32> { debug_assert!(self.vector_type == VectorType::Float32Sparse); - let ptr = self.data.as_ptr(); + let ptr = self.bin_data().as_ptr(); let align = std::mem::align_of::(); assert_eq!( ptr.align_offset(align), 0, "data pointer must be aligned to {align} bytes for f32 access" ); - let length = self.data.len() / 4 / 2; + let length = self.bin_data().len() / 4 / 2; let values = unsafe { std::slice::from_raw_parts(ptr as *const f32, length) }; let idx = unsafe { std::slice::from_raw_parts((ptr as *const u32).add(length), length) }; debug_assert!(idx.is_sorted()); @@ -292,12 +334,13 @@ pub(crate) mod tests { } /// Convert an ArbitraryVector to a Vector. - impl From> for Vector { + impl From> for Vector<'static> { fn from(v: ArbitraryVector) -> Self { Vector { vector_type: v.vector_type, dims: DIMS, - data: v.data, + owned: Some(v.data), + refer: None, } } } @@ -357,7 +400,7 @@ pub(crate) mod tests { let vtype = v.vector_type; let value = operations::serialize::vector_serialize(v); let blob = value.to_blob().unwrap().to_vec(); - match Vector::vector_type(blob) { + match Vector::vector_type(&blob) { Ok((detected_type, _)) => detected_type == vtype, Err(_) => false, } @@ -396,12 +439,12 @@ pub(crate) mod tests { VectorType::Float32Dense => { let slice = v.as_f32_slice(); // Check if the slice length matches the dimensions and the data length is correct (4 bytes per float) - slice.len() == DIMS && (slice.len() * 4 == v.data.len()) + slice.len() == DIMS && (slice.len() * 4 == v.bin_len()) } VectorType::Float64Dense => { let slice = v.as_f64_slice(); // Check if the slice length matches the dimensions and the data length is correct (8 bytes per float) - slice.len() == DIMS && (slice.len() * 8 == v.data.len()) + slice.len() == DIMS && (slice.len() * 8 == v.bin_len()) } _ => unreachable!(), } @@ -454,12 +497,14 @@ pub(crate) mod tests { let a = Vector { vector_type: VectorType::Float32Dense, dims: 2, - data: vec![0, 0, 0, 0, 52, 208, 106, 63], + owned: Some(vec![0, 0, 0, 0, 52, 208, 106, 63]), + refer: None, }; let b = Vector { vector_type: VectorType::Float32Dense, dims: 2, - data: vec![0, 0, 0, 0, 58, 100, 45, 192], + owned: Some(vec![0, 0, 0, 0, 58, 100, 45, 192]), + refer: None, }; assert!( (operations::distance_cos::vector_distance_cos(&a, &b).unwrap() - 2.0).abs() <= 1e-6 From 6139dde081a6111893e517f3a8cec31bbfc608f8 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 21 Oct 2025 16:00:04 +0300 Subject: [PATCH 335/428] Revert "Merge 'core/translate: fix ALTER COLUMN to propagate other constraint references' from Preston Thorpe" This reverts commit 1151f49ff40a191dfed68ab32beb4c80da6bd291, reversing changes made to f4da2194f4c207681430f70e698a55de9cebcbeb. --- core/schema.rs | 67 +--------- core/util.rs | 32 ----- core/vdbe/execute.rs | 269 +++++++------------------------------- testing/alter_column.test | 216 ------------------------------ 4 files changed, 52 insertions(+), 532 deletions(-) diff --git a/core/schema.rs b/core/schema.rs index a367e47a8..ce30d3950 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1358,13 +1358,12 @@ impl BTreeTable { /// `CREATE TABLE t (x)`, whereas sqlite stores it with the original extra whitespace. pub fn to_sql(&self) -> String { let mut sql = format!("CREATE TABLE {} (", self.name); - - // Add columns for (i, column) in self.columns.iter().enumerate() { if i > 0 { sql.push_str(", "); } - // Wrap column name in square brackets if it contains special characters + + // we need to wrap the column name in square brackets if it contains special characters let column_name = column.name.as_ref().expect("column name is None"); if identifier_contains_special_chars(column_name) { sql.push('['); @@ -1373,6 +1372,7 @@ impl BTreeTable { } else { sql.push_str(column_name); } + if !column.ty_str.is_empty() { sql.push(' '); sql.push_str(&column.ty_str); @@ -1380,75 +1380,20 @@ impl BTreeTable { if column.notnull { sql.push_str(" NOT NULL"); } + if column.unique { sql.push_str(" UNIQUE"); } + if column.primary_key { sql.push_str(" PRIMARY KEY"); } + if let Some(default) = &column.default { sql.push_str(" DEFAULT "); sql.push_str(&default.to_string()); } } - - let has_table_pk = !self.primary_key_columns.is_empty(); - // Add table-level PRIMARY KEY constraint if exists - if has_table_pk { - sql.push_str(", PRIMARY KEY ("); - for (i, col) in self.primary_key_columns.iter().enumerate() { - if i > 0 { - sql.push_str(", "); - } - sql.push_str(&col.0); - } - sql.push(')'); - } - - for fk in &self.foreign_keys { - sql.push_str(", FOREIGN KEY ("); - for (i, col) in fk.child_columns.iter().enumerate() { - if i > 0 { - sql.push_str(", "); - } - sql.push_str(col); - } - sql.push_str(") REFERENCES "); - sql.push_str(&fk.parent_table); - sql.push('('); - for (i, col) in fk.parent_columns.iter().enumerate() { - if i > 0 { - sql.push_str(", "); - } - sql.push_str(col); - } - sql.push(')'); - - // Add ON DELETE/UPDATE actions, NoAction is default so just make empty in that case - if fk.on_delete != RefAct::NoAction { - sql.push_str(" ON DELETE "); - sql.push_str(match fk.on_delete { - RefAct::SetNull => "SET NULL", - RefAct::SetDefault => "SET DEFAULT", - RefAct::Cascade => "CASCADE", - RefAct::Restrict => "RESTRICT", - _ => "", - }); - } - if fk.on_update != RefAct::NoAction { - sql.push_str(" ON UPDATE "); - sql.push_str(match fk.on_update { - RefAct::SetNull => "SET NULL", - RefAct::SetDefault => "SET DEFAULT", - RefAct::Cascade => "CASCADE", - RefAct::Restrict => "RESTRICT", - _ => "", - }); - } - if fk.deferred { - sql.push_str(" DEFERRABLE INITIALLY DEFERRED"); - } - } sql.push(')'); sql } diff --git a/core/util.rs b/core/util.rs index 6caad2e05..1093c61ba 100644 --- a/core/util.rs +++ b/core/util.rs @@ -1331,38 +1331,6 @@ pub fn extract_view_columns( Ok(ViewColumnSchema { tables, columns }) } -pub fn rewrite_fk_parent_cols_if_self_ref( - clause: &mut ast::ForeignKeyClause, - table: &str, - from: &str, - to: &str, -) { - if normalize_ident(clause.tbl_name.as_str()) == normalize_ident(table) { - for c in &mut clause.columns { - if normalize_ident(c.col_name.as_str()) == normalize_ident(from) { - c.col_name = ast::Name::exact(to.to_owned()); - } - } - } -} -/// Update a column-level REFERENCES (col,...) constraint too. -pub fn rewrite_column_references_if_needed( - col: &mut ast::ColumnDefinition, - table: &str, - from: &str, - to: &str, -) { - for cc in &mut col.constraints { - if let ast::NamedColumnConstraint { - constraint: ast::ColumnConstraint::ForeignKey { clause, .. }, - .. - } = cc - { - rewrite_fk_parent_cols_if_self_ref(clause, table, from, to); - } - } -} - #[cfg(test)] pub mod tests { use super::*; diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 9291ead56..974538de0 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -17,9 +17,7 @@ use crate::types::{ compare_immutable, compare_records_generic, Extendable, IOCompletions, ImmutableRecord, SeekResult, Text, }; -use crate::util::{ - normalize_ident, rewrite_column_references_if_needed, rewrite_fk_parent_cols_if_self_ref, -}; +use crate::util::normalize_ident; use crate::vdbe::insn::InsertFlags; use crate::vdbe::{registers_to_ref_values, TxnCleanup}; use crate::vector::{vector32_sparse, vector_concat, vector_distance_jaccard, vector_slice}; @@ -75,7 +73,7 @@ use super::{ }; use parking_lot::RwLock; use rand::{thread_rng, Rng}; -use turso_parser::ast::{self, ForeignKeyClause, Name, SortOrder}; +use turso_parser::ast::{self, Name, SortOrder}; use turso_parser::parser::Parser; use super::{ @@ -5465,9 +5463,11 @@ pub fn op_function( .parse_column_definition(true) .unwrap(); - let rename_to = normalize_ident(column_def.col_name.as_str()); - let new_sql = 'sql: { + if table != tbl_name { + break 'sql None; + } + let Value::Text(sql) = sql else { break 'sql None; }; @@ -5521,160 +5521,34 @@ pub fn op_function( temporary, if_not_exists, } => { + if table != normalize_ident(tbl_name.name.as_str()) { + break 'sql None; + } + let ast::CreateTableBody::ColumnsAndConstraints { mut columns, - mut constraints, + constraints, options, } = body else { todo!() }; - let normalized_tbl_name = normalize_ident(tbl_name.name.as_str()); + let column = columns + .iter_mut() + .find(|column| { + column.col_name.as_str() == original_rename_from.as_str() + }) + .expect("column being renamed should be present"); - if normalized_tbl_name == table { - // This is the table being altered - update its column - let column = columns - .iter_mut() - .find(|column| { - column.col_name.as_str() - == original_rename_from.as_str() - }) - .expect("column being renamed should be present"); - - match alter_func { - AlterTableFunc::AlterColumn => *column = column_def.clone(), - AlterTableFunc::RenameColumn => { - column.col_name = column_def.col_name.clone() - } - _ => unreachable!(), - } - - // Update table-level constraints (PRIMARY KEY, UNIQUE, FOREIGN KEY) - for constraint in &mut constraints { - match &mut constraint.constraint { - ast::TableConstraint::PrimaryKey { - columns: pk_cols, - .. - } => { - for col in pk_cols { - let (ast::Expr::Name(ref name) - | ast::Expr::Id(ref name)) = *col.expr - else { - return Err(LimboError::ParseError("Unexpected expression in PRIMARY KEY constraint".to_string())); - }; - if normalize_ident(name.as_str()) == rename_from - { - *col.expr = ast::Expr::Name(Name::exact( - column_def.col_name.as_str().to_owned(), - )); - } - } - } - ast::TableConstraint::Unique { - columns: uniq_cols, - .. - } => { - for col in uniq_cols { - let (ast::Expr::Name(ref name) - | ast::Expr::Id(ref name)) = *col.expr - else { - return Err(LimboError::ParseError("Unexpected expression in UNIQUE constraint".to_string())); - }; - if normalize_ident(name.as_str()) == rename_from - { - *col.expr = ast::Expr::Name(Name::exact( - column_def.col_name.as_str().to_owned(), - )); - } - } - } - ast::TableConstraint::ForeignKey { - columns: child_cols, - clause, - .. - } => { - // Update child columns in this table's FK definitions - for child_col in child_cols { - if normalize_ident(child_col.col_name.as_str()) - == rename_from - { - child_col.col_name = Name::exact( - column_def.col_name.as_str().to_owned(), - ); - } - } - rewrite_fk_parent_cols_if_self_ref( - clause, - &normalized_tbl_name, - &rename_from, - column_def.col_name.as_str(), - ); - } - _ => {} - } - - for col in &mut columns { - rewrite_column_references_if_needed( - col, - &normalized_tbl_name, - &rename_from, - column_def.col_name.as_str(), - ); - } - } - } else { - // This is a different table, check if it has FKs referencing the renamed column - let mut fk_updated = false; - - for constraint in &mut constraints { - if let ast::TableConstraint::ForeignKey { - columns: _, - clause: - ForeignKeyClause { - tbl_name, - columns: parent_cols, - .. - }, - .. - } = &mut constraint.constraint - { - // Check if this FK references the table being altered - if normalize_ident(tbl_name.as_str()) == table { - // Update parent column references if they match the renamed column - for parent_col in parent_cols { - if normalize_ident(parent_col.col_name.as_str()) - == rename_from - { - parent_col.col_name = Name::exact( - column_def.col_name.as_str().to_owned(), - ); - fk_updated = true; - } - } - } - } - } - for col in &mut columns { - let before = fk_updated; - let mut local_col = col.clone(); - rewrite_column_references_if_needed( - &mut local_col, - &table, - &rename_from, - column_def.col_name.as_str(), - ); - if local_col != *col { - *col = local_col; - fk_updated = true; - } - } - - // Only return updated SQL if we actually changed something - if !fk_updated { - break 'sql None; + match alter_func { + AlterTableFunc::AlterColumn => *column = column_def, + AlterTableFunc::RenameColumn => { + column.col_name = column_def.col_name } + _ => unreachable!(), } + Some( ast::Stmt::CreateTable { tbl_name, @@ -5689,7 +5563,7 @@ pub fn op_function( .to_string(), ) } - _ => None, + _ => todo!(), } }; @@ -8364,94 +8238,43 @@ pub fn op_alter_column( .clone() }; let new_column = crate::schema::Column::from(definition); - let new_name = definition.col_name.as_str().to_owned(); conn.with_schema_mut(|schema| { - let table_arc = schema + let table = schema .tables .get_mut(&normalized_table_name) - .expect("table being ALTERed should be in schema"); - let table = Arc::make_mut(table_arc); + .expect("table being renamed should be in schema"); - let Table::BTree(ref mut btree_arc) = table else { - panic!("only btree tables can be altered"); + let table = Arc::make_mut(table); + + let Table::BTree(btree) = table else { + panic!("only btree tables can be renamed"); }; - let btree = Arc::make_mut(btree_arc); - let col = btree + + let btree = Arc::make_mut(btree); + + let column = btree .columns .get_mut(*column_index) - .expect("column being ALTERed should be in schema"); + .expect("renamed column should be in schema"); - // Update indexes on THIS table that name the old column (you already had this) - if let Some(idxs) = schema.indexes.get_mut(&normalized_table_name) { - for idx in idxs { - let idx = Arc::make_mut(idx); - for ic in &mut idx.columns { - if ic.name.eq_ignore_ascii_case( - col.name.as_ref().expect("btree column should be named"), - ) { - ic.name = new_name.clone(); + if let Some(indexes) = schema.indexes.get_mut(&normalized_table_name) { + for index in indexes { + let index = Arc::make_mut(index); + for index_column in &mut index.columns { + if index_column.name + == *column.name.as_ref().expect("btree column should be named") + { + index_column.name = definition.col_name.as_str().to_owned(); } } } } + if *rename { - col.name = Some(new_name.clone()); + column.name = new_column.name; } else { - *col = new_column.clone(); - } - - // Keep primary_key_columns consistent (names may change on rename) - for (pk_name, _ord) in &mut btree.primary_key_columns { - if pk_name.eq_ignore_ascii_case(&old_column_name) { - *pk_name = new_name.clone(); - } - } - - // Maintain rowid-alias bit after change/rename (INTEGER PRIMARY KEY) - if !*rename { - // recompute alias from `new_column` - btree.columns[*column_index].is_rowid_alias = new_column.is_rowid_alias; - } - - // Update this table’s OWN foreign keys - for fk_arc in &mut btree.foreign_keys { - let fk = Arc::make_mut(fk_arc); - // child side: rename child column if it matches - for cc in &mut fk.child_columns { - if cc.eq_ignore_ascii_case(&old_column_name) { - *cc = new_name.clone(); - } - } - // parent side: if self-referencing, rename parent column too - if normalize_ident(&fk.parent_table) == normalized_table_name { - for pc in &mut fk.parent_columns { - if pc.eq_ignore_ascii_case(&old_column_name) { - *pc = new_name.clone(); - } - } - } - } - - // fix OTHER tables that reference this table as parent - for (tname, t_arc) in schema.tables.iter_mut() { - if normalize_ident(tname) == normalized_table_name { - continue; - } - if let Table::BTree(ref mut child_btree_arc) = Arc::make_mut(t_arc) { - let child_btree = Arc::make_mut(child_btree_arc); - for fk_arc in &mut child_btree.foreign_keys { - if normalize_ident(&fk_arc.parent_table) != normalized_table_name { - continue; - } - let fk = Arc::make_mut(fk_arc); - for pc in &mut fk.parent_columns { - if pc.eq_ignore_ascii_case(&old_column_name) { - *pc = new_name.clone(); - } - } - } - } + *column = new_column; } }); diff --git a/testing/alter_column.test b/testing/alter_column.test index 1b4da6dd0..3672497ab 100755 --- a/testing/alter_column.test +++ b/testing/alter_column.test @@ -22,219 +22,3 @@ do_execsql_test_in_memory_any_error fail-alter-column-unique { CREATE TABLE t (a); ALTER TABLE t ALTER COLUMN a TO a UNIQUE; } - -do_execsql_test_on_specific_db {:memory:} alter-table-rename-pk-column { - CREATE TABLE customers (cust_id INTEGER PRIMARY KEY, cust_name TEXT); - INSERT INTO customers VALUES (1, 'Alice'), (2, 'Bob'); - - ALTER TABLE customers RENAME COLUMN cust_id TO customer_id; - - SELECT sql FROM sqlite_schema WHERE name = 'customers'; - SELECT customer_id, cust_name FROM customers ORDER BY customer_id; -} { - "CREATE TABLE customers (customer_id INTEGER PRIMARY KEY, cust_name TEXT)" - "1|Alice" - "2|Bob" -} - -do_execsql_test_on_specific_db {:memory:} alter-table-rename-composite-pk { - CREATE TABLE products (category TEXT, prod_code TEXT, name TEXT, PRIMARY KEY (category, prod_code)); - INSERT INTO products VALUES ('Electronics', 'E001', 'Laptop'); - - ALTER TABLE products RENAME COLUMN prod_code TO product_code; - - SELECT sql FROM sqlite_schema WHERE name = 'products'; - SELECT category, product_code, name FROM products; -} { - "CREATE TABLE products (category TEXT, product_code TEXT, name TEXT, PRIMARY KEY (category, product_code))" - "Electronics|E001|Laptop" -} - -# Foreign key child column rename -do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-child { - CREATE TABLE parent (id INTEGER PRIMARY KEY); - CREATE TABLE child (cid INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent(id)); - INSERT INTO parent VALUES (1); - INSERT INTO child VALUES (1, 1); - - ALTER TABLE child RENAME COLUMN pid TO parent_id; - - SELECT sql FROM sqlite_schema WHERE name = 'child'; -} { - "CREATE TABLE child (cid INTEGER PRIMARY KEY, parent_id INTEGER, FOREIGN KEY (parent_id) REFERENCES parent (id))" -} - -# Foreign key parent column rename - critical test -do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-parent { - CREATE TABLE orders (order_id INTEGER PRIMARY KEY, date TEXT); - CREATE TABLE items (item_id INTEGER PRIMARY KEY, oid INTEGER, FOREIGN KEY (oid) REFERENCES orders(order_id)); - - ALTER TABLE orders RENAME COLUMN order_id TO ord_id; - - SELECT sql FROM sqlite_schema WHERE name = 'orders'; - SELECT sql FROM sqlite_schema WHERE name = 'items'; -} { - "CREATE TABLE orders (ord_id INTEGER PRIMARY KEY, date TEXT)" - "CREATE TABLE items (item_id INTEGER PRIMARY KEY, oid INTEGER, FOREIGN KEY (oid) REFERENCES orders (ord_id))" -} - -# Composite foreign key parent rename -do_execsql_test_on_specific_db {:memory:} alter-table-rename-composite-fk-parent { - CREATE TABLE products (cat TEXT, code TEXT, PRIMARY KEY (cat, code)); - CREATE TABLE inventory (id INTEGER PRIMARY KEY, cat TEXT, code TEXT, FOREIGN KEY (cat, code) REFERENCES products(cat, code)); - - ALTER TABLE products RENAME COLUMN code TO sku; - - SELECT sql FROM sqlite_schema WHERE name = 'products'; - SELECT sql FROM sqlite_schema WHERE name = 'inventory'; -} { - "CREATE TABLE products (cat TEXT, sku TEXT, PRIMARY KEY (cat, sku))" - "CREATE TABLE inventory (id INTEGER PRIMARY KEY, cat TEXT, code TEXT, FOREIGN KEY (cat, code) REFERENCES products (cat, sku))" -} - -# Multiple foreign keys to same parent -do_execsql_test_on_specific_db {:memory:} alter-table-rename-multiple-fks { - CREATE TABLE users (uid INTEGER PRIMARY KEY); - CREATE TABLE messages (mid INTEGER PRIMARY KEY, sender INTEGER, receiver INTEGER, - FOREIGN KEY (sender) REFERENCES users(uid), - FOREIGN KEY (receiver) REFERENCES users(uid)); - - ALTER TABLE users RENAME COLUMN uid TO user_id; - - SELECT sql FROM sqlite_schema WHERE name = 'messages'; -} { - "CREATE TABLE messages (mid INTEGER PRIMARY KEY, sender INTEGER, receiver INTEGER, FOREIGN KEY (sender) REFERENCES users (user_id), FOREIGN KEY (receiver) REFERENCES users (user_id))" -} - -# Self-referencing foreign key -do_execsql_test_on_specific_db {:memory:} alter-table-rename-self-ref-fk { - CREATE TABLE employees (emp_id INTEGER PRIMARY KEY, manager_id INTEGER, - FOREIGN KEY (manager_id) REFERENCES employees(emp_id)); - - ALTER TABLE employees RENAME COLUMN emp_id TO employee_id; - - SELECT sql FROM sqlite_schema WHERE name = 'employees'; -} { - "CREATE TABLE employees (employee_id INTEGER PRIMARY KEY, manager_id INTEGER, FOREIGN KEY (manager_id) REFERENCES employees (employee_id))" -} - -# Chain of FK renames - parent is both PK and referenced -do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-chain { - CREATE TABLE t1 (a INTEGER PRIMARY KEY); - CREATE TABLE t2 (b INTEGER PRIMARY KEY, a_ref INTEGER, FOREIGN KEY (a_ref) REFERENCES t1(a)); - CREATE TABLE t3 (c INTEGER PRIMARY KEY, b_ref INTEGER, FOREIGN KEY (b_ref) REFERENCES t2(b)); - - ALTER TABLE t1 RENAME COLUMN a TO a_new; - ALTER TABLE t2 RENAME COLUMN b TO b_new; - - SELECT sql FROM sqlite_schema WHERE name = 't2'; - SELECT sql FROM sqlite_schema WHERE name = 't3'; -} { - "CREATE TABLE t2 (b_new INTEGER PRIMARY KEY, a_ref INTEGER, FOREIGN KEY (a_ref) REFERENCES t1 (a_new))" - "CREATE TABLE t3 (c INTEGER PRIMARY KEY, b_ref INTEGER, FOREIGN KEY (b_ref) REFERENCES t2 (b_new))" -} - -# FK with ON DELETE/UPDATE actions -do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-actions { - CREATE TABLE parent (pid INTEGER PRIMARY KEY); - CREATE TABLE child (cid INTEGER PRIMARY KEY, pid INTEGER, - FOREIGN KEY (pid) REFERENCES parent(pid) ON DELETE CASCADE ON UPDATE RESTRICT); - - ALTER TABLE parent RENAME COLUMN pid TO parent_id; - - SELECT sql FROM sqlite_schema WHERE name = 'child'; -} { - "CREATE TABLE child (cid INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent (parent_id) ON DELETE CASCADE ON UPDATE RESTRICT)" -} - -# FK with DEFERRABLE -do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-deferrable { - CREATE TABLE parent (id INTEGER PRIMARY KEY); - CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, - FOREIGN KEY (pid) REFERENCES parent(id) DEFERRABLE INITIALLY DEFERRED); - - ALTER TABLE parent RENAME COLUMN id TO parent_id; - - SELECT sql FROM sqlite_schema WHERE name = 'child'; -} { - "CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent (parent_id) DEFERRABLE INITIALLY DEFERRED)" -} - -# Rename with quoted identifiers in FK -do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-quoted { - CREATE TABLE "parent table" ("parent id" INTEGER PRIMARY KEY); - CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, - FOREIGN KEY (pid) REFERENCES "parent table"("parent id")); - - ALTER TABLE "parent table" RENAME COLUMN "parent id" TO "new id"; - - SELECT sql FROM sqlite_schema WHERE name = 'child'; -} { - "CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES \"parent table\" (\"new id\"))" -} - -# Verify FK constraint still works after rename -do_execsql_test_on_specific_db {:memory:} alter-table-fk-constraint-after-rename { - PRAGMA foreign_keys = ON; - CREATE TABLE parent (id INTEGER PRIMARY KEY); - CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent(id)); - INSERT INTO parent VALUES (1); - INSERT INTO child VALUES (1, 1); - - ALTER TABLE parent RENAME COLUMN id TO parent_id; - - -- This should work - INSERT INTO child VALUES (2, 1); - SELECT COUNT(*) FROM child; -} { - "2" -} - -# FK constraint violation after rename should still fail -do_execsql_test_in_memory_any_error alter-table-fk-violation-after-rename { - PRAGMA foreign_keys = ON; - CREATE TABLE parent (id INTEGER PRIMARY KEY); - CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent(id)); - INSERT INTO parent VALUES (1); - - ALTER TABLE parent RENAME COLUMN id TO parent_id; - - -- This should fail with FK violation - INSERT INTO child VALUES (1, 999); -} - -# Complex scenario with multiple table constraints -do_execsql_test_on_specific_db {:memory:} alter-table-rename-complex-constraints { - CREATE TABLE t ( - a INTEGER, - b TEXT, - c REAL, - PRIMARY KEY (a, b), - UNIQUE (b, c), - FOREIGN KEY (a) REFERENCES t(a) - ); - - ALTER TABLE t RENAME COLUMN a TO x; - ALTER TABLE t RENAME COLUMN b TO y; - - SELECT sql FROM sqlite_schema WHERE name = 't'; -} { - "CREATE TABLE t (x INTEGER, y TEXT, c REAL, PRIMARY KEY (x, y), UNIQUE (y, c), FOREIGN KEY (x) REFERENCES t (x))" -} - -# Rename column that appears in both PK and FK -do_execsql_test_on_specific_db {:memory:} alter-table-rename-pk-and-fk { - CREATE TABLE parent (id INTEGER PRIMARY KEY); - CREATE TABLE child ( - id INTEGER PRIMARY KEY, - parent_ref INTEGER, - FOREIGN KEY (id) REFERENCES parent(id), - FOREIGN KEY (parent_ref) REFERENCES parent(id) - ); - - ALTER TABLE parent RENAME COLUMN id TO pid; - - SELECT sql FROM sqlite_schema WHERE name = 'child'; -} { - "CREATE TABLE child (id INTEGER PRIMARY KEY, parent_ref INTEGER, FOREIGN KEY (id) REFERENCES parent (pid), FOREIGN KEY (parent_ref) REFERENCES parent (pid))" -} From 7c746e476ce2e6d9e677a1ba1ebb07ae29db4e50 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 21 Oct 2025 09:43:23 -0400 Subject: [PATCH 336/428] Fix to_sql method on BTreeTable to not double write primary keys --- core/schema.rs | 63 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/core/schema.rs b/core/schema.rs index ce30d3950..9ee195d94 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -1358,6 +1358,8 @@ impl BTreeTable { /// `CREATE TABLE t (x)`, whereas sqlite stores it with the original extra whitespace. pub fn to_sql(&self) -> String { let mut sql = format!("CREATE TABLE {} (", self.name); + let needs_pk_inline = self.primary_key_columns.len() == 1; + // Add columns for (i, column) in self.columns.iter().enumerate() { if i > 0 { sql.push_str(", "); @@ -1384,8 +1386,7 @@ impl BTreeTable { if column.unique { sql.push_str(" UNIQUE"); } - - if column.primary_key { + if needs_pk_inline && column.primary_key { sql.push_str(" PRIMARY KEY"); } @@ -1394,6 +1395,64 @@ impl BTreeTable { sql.push_str(&default.to_string()); } } + + let has_table_pk = !self.primary_key_columns.is_empty(); + // Add table-level PRIMARY KEY constraint if exists + if !needs_pk_inline && has_table_pk { + sql.push_str(", PRIMARY KEY ("); + for (i, col) in self.primary_key_columns.iter().enumerate() { + if i > 0 { + sql.push_str(", "); + } + sql.push_str(&col.0); + } + sql.push(')'); + } + + for fk in &self.foreign_keys { + sql.push_str(", FOREIGN KEY ("); + for (i, col) in fk.child_columns.iter().enumerate() { + if i > 0 { + sql.push_str(", "); + } + sql.push_str(col); + } + sql.push_str(") REFERENCES "); + sql.push_str(&fk.parent_table); + sql.push('('); + for (i, col) in fk.parent_columns.iter().enumerate() { + if i > 0 { + sql.push_str(", "); + } + sql.push_str(col); + } + sql.push(')'); + + // Add ON DELETE/UPDATE actions, NoAction is default so just make empty in that case + if fk.on_delete != RefAct::NoAction { + sql.push_str(" ON DELETE "); + sql.push_str(match fk.on_delete { + RefAct::SetNull => "SET NULL", + RefAct::SetDefault => "SET DEFAULT", + RefAct::Cascade => "CASCADE", + RefAct::Restrict => "RESTRICT", + _ => "", + }); + } + if fk.on_update != RefAct::NoAction { + sql.push_str(" ON UPDATE "); + sql.push_str(match fk.on_update { + RefAct::SetNull => "SET NULL", + RefAct::SetDefault => "SET DEFAULT", + RefAct::Cascade => "CASCADE", + RefAct::Restrict => "RESTRICT", + _ => "", + }); + } + if fk.deferred { + sql.push_str(" DEFERRABLE INITIALLY DEFERRED"); + } + } sql.push(')'); sql } From 05bd75275fc50ae14b2dcc684eeeb5af88168e2f Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 21 Oct 2025 17:29:07 +0300 Subject: [PATCH 337/428] tests/integration: Reduce collation fuzz test iterations The collation fuzz test case takes up to 4 minutes to run, making it the slowest of all the test cases. Let's reduce iteration count a bit to make this more CI friendly. --- tests/integration/fuzz/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index caffe5582..d3630cf66 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -608,7 +608,7 @@ mod tests { let (mut rng, seed) = rng_from_time_or_env(); println!("collation_fuzz seed: {seed}"); - const ITERS: usize = 3000; + const ITERS: usize = 1000; for iter in 0..ITERS { if iter % (ITERS / 100).max(1) == 0 { println!("collation_fuzz: iteration {}/{}", iter + 1, ITERS); From 06e3b9611bc65e05b55c236ba6f5340d5380faf0 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 21 Oct 2025 10:40:31 -0400 Subject: [PATCH 338/428] Add helpers to rewrite REFERENCES from foriegn keys in ColumnDefinition --- core/util.rs | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/core/util.rs b/core/util.rs index 1093c61ba..26696eabe 100644 --- a/core/util.rs +++ b/core/util.rs @@ -1331,6 +1331,39 @@ pub fn extract_view_columns( Ok(ViewColumnSchema { tables, columns }) } +pub fn rewrite_fk_parent_cols_if_self_ref( + clause: &mut ast::ForeignKeyClause, + table: &str, + from: &str, + to: &str, +) { + if normalize_ident(clause.tbl_name.as_str()) == normalize_ident(table) { + for c in &mut clause.columns { + if normalize_ident(c.col_name.as_str()) == normalize_ident(from) { + c.col_name = ast::Name::exact(to.to_owned()); + } + } + } +} + +/// Update a column-level REFERENCES (col,...) constraint +pub fn rewrite_column_references_if_needed( + col: &mut ast::ColumnDefinition, + table: &str, + from: &str, + to: &str, +) { + for cc in &mut col.constraints { + if let ast::NamedColumnConstraint { + constraint: ast::ColumnConstraint::ForeignKey { clause, .. }, + .. + } = cc + { + rewrite_fk_parent_cols_if_self_ref(clause, table, from, to); + } + } +} + #[cfg(test)] pub mod tests { use super::*; From 2fbd4b7cec084b57e0c78802d1ccd8842cb9611b Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 21 Oct 2025 10:46:52 -0400 Subject: [PATCH 339/428] Ensure op_alter_column and Func::AlterColumn are fixing table references to columns with fk's --- core/vdbe/execute.rs | 271 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 224 insertions(+), 47 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 0d7daeacd..292657274 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -17,7 +17,9 @@ use crate::types::{ compare_immutable, compare_records_generic, Extendable, IOCompletions, ImmutableRecord, SeekResult, Text, }; -use crate::util::normalize_ident; +use crate::util::{ + normalize_ident, rewrite_column_references_if_needed, rewrite_fk_parent_cols_if_self_ref, +}; use crate::vdbe::insn::InsertFlags; use crate::vdbe::{registers_to_ref_values, TxnCleanup}; use crate::vector::{vector32_sparse, vector_concat, vector_distance_jaccard, vector_slice}; @@ -73,7 +75,7 @@ use super::{ }; use parking_lot::RwLock; use rand::{thread_rng, Rng, RngCore}; -use turso_parser::ast::{self, Name, SortOrder}; +use turso_parser::ast::{self, ForeignKeyClause, Name, SortOrder}; use turso_parser::parser::Parser; use super::{ @@ -5463,11 +5465,9 @@ pub fn op_function( .parse_column_definition(true) .unwrap(); - let new_sql = 'sql: { - if table != tbl_name { - break 'sql None; - } + let rename_to = normalize_ident(column_def.col_name.as_str()); + let new_sql = 'sql: { let Value::Text(sql) = sql else { break 'sql None; }; @@ -5521,34 +5521,160 @@ pub fn op_function( temporary, if_not_exists, } => { - if table != normalize_ident(tbl_name.name.as_str()) { - break 'sql None; - } - let ast::CreateTableBody::ColumnsAndConstraints { mut columns, - constraints, + mut constraints, options, } = body else { todo!() }; - let column = columns - .iter_mut() - .find(|column| { - column.col_name.as_str() == original_rename_from.as_str() - }) - .expect("column being renamed should be present"); + let normalized_tbl_name = normalize_ident(tbl_name.name.as_str()); - match alter_func { - AlterTableFunc::AlterColumn => *column = column_def, - AlterTableFunc::RenameColumn => { - column.col_name = column_def.col_name + if normalized_tbl_name == table { + // This is the table being altered - update its column + let column = columns + .iter_mut() + .find(|column| { + column.col_name.as_str() + == original_rename_from.as_str() + }) + .expect("column being renamed should be present"); + + match alter_func { + AlterTableFunc::AlterColumn => *column = column_def.clone(), + AlterTableFunc::RenameColumn => { + column.col_name = column_def.col_name.clone() + } + _ => unreachable!(), } - _ => unreachable!(), - } + // Update table-level constraints (PRIMARY KEY, UNIQUE, FOREIGN KEY) + for constraint in &mut constraints { + match &mut constraint.constraint { + ast::TableConstraint::PrimaryKey { + columns: pk_cols, + .. + } => { + for col in pk_cols { + let (ast::Expr::Name(ref name) + | ast::Expr::Id(ref name)) = *col.expr + else { + return Err(LimboError::ParseError("Unexpected expression in PRIMARY KEY constraint".to_string())); + }; + if normalize_ident(name.as_str()) == rename_from + { + *col.expr = ast::Expr::Name(Name::exact( + column_def.col_name.as_str().to_owned(), + )); + } + } + } + ast::TableConstraint::Unique { + columns: uniq_cols, + .. + } => { + for col in uniq_cols { + let (ast::Expr::Name(ref name) + | ast::Expr::Id(ref name)) = *col.expr + else { + return Err(LimboError::ParseError("Unexpected expression in UNIQUE constraint".to_string())); + }; + if normalize_ident(name.as_str()) == rename_from + { + *col.expr = ast::Expr::Name(Name::exact( + column_def.col_name.as_str().to_owned(), + )); + } + } + } + ast::TableConstraint::ForeignKey { + columns: child_cols, + clause, + .. + } => { + // Update child columns in this table's FK definitions + for child_col in child_cols { + if normalize_ident(child_col.col_name.as_str()) + == rename_from + { + child_col.col_name = Name::exact( + column_def.col_name.as_str().to_owned(), + ); + } + } + rewrite_fk_parent_cols_if_self_ref( + clause, + &normalized_tbl_name, + &rename_from, + column_def.col_name.as_str(), + ); + } + _ => {} + } + + for col in &mut columns { + rewrite_column_references_if_needed( + col, + &normalized_tbl_name, + &rename_from, + column_def.col_name.as_str(), + ); + } + } + } else { + // This is a different table, check if it has FKs referencing the renamed column + let mut fk_updated = false; + + for constraint in &mut constraints { + if let ast::TableConstraint::ForeignKey { + columns: _, + clause: + ForeignKeyClause { + tbl_name, + columns: parent_cols, + .. + }, + .. + } = &mut constraint.constraint + { + // Check if this FK references the table being altered + if normalize_ident(tbl_name.as_str()) == table { + // Update parent column references if they match the renamed column + for parent_col in parent_cols { + if normalize_ident(parent_col.col_name.as_str()) + == rename_from + { + parent_col.col_name = Name::exact( + column_def.col_name.as_str().to_owned(), + ); + fk_updated = true; + } + } + } + } + } + for col in &mut columns { + let before = fk_updated; + let mut local_col = col.clone(); + rewrite_column_references_if_needed( + &mut local_col, + &table, + &rename_from, + column_def.col_name.as_str(), + ); + if local_col != *col { + *col = local_col; + fk_updated = true; + } + } + + // Only return updated SQL if we actually changed something + if !fk_updated { + break 'sql None; + } + } Some( ast::Stmt::CreateTable { tbl_name, @@ -5563,7 +5689,7 @@ pub fn op_function( .to_string(), ) } - _ => todo!(), + _ => None, } }; @@ -8238,43 +8364,94 @@ pub fn op_alter_column( .clone() }; let new_column = crate::schema::Column::from(definition); + let new_name = definition.col_name.as_str().to_owned(); conn.with_schema_mut(|schema| { - let table = schema + let table_arc = schema .tables .get_mut(&normalized_table_name) - .expect("table being renamed should be in schema"); + .expect("table being ALTERed should be in schema"); + let table = Arc::make_mut(table_arc); - let table = Arc::make_mut(table); - - let Table::BTree(btree) = table else { - panic!("only btree tables can be renamed"); + let Table::BTree(ref mut btree_arc) = table else { + panic!("only btree tables can be altered"); }; - - let btree = Arc::make_mut(btree); - - let column = btree + let btree = Arc::make_mut(btree_arc); + let col = btree .columns .get_mut(*column_index) - .expect("renamed column should be in schema"); + .expect("column being ALTERed should be in schema"); - if let Some(indexes) = schema.indexes.get_mut(&normalized_table_name) { - for index in indexes { - let index = Arc::make_mut(index); - for index_column in &mut index.columns { - if index_column.name - == *column.name.as_ref().expect("btree column should be named") - { - index_column.name = definition.col_name.as_str().to_owned(); + // Update indexes on THIS table that name the old column (you already had this) + if let Some(idxs) = schema.indexes.get_mut(&normalized_table_name) { + for idx in idxs { + let idx = Arc::make_mut(idx); + for ic in &mut idx.columns { + if ic.name.eq_ignore_ascii_case( + col.name.as_ref().expect("btree column should be named"), + ) { + ic.name = new_name.clone(); + } + } + } + } + if *rename { + col.name = Some(new_name.clone()); + } else { + *col = new_column.clone(); + } + + // Keep primary_key_columns consistent (names may change on rename) + for (pk_name, _ord) in &mut btree.primary_key_columns { + if pk_name.eq_ignore_ascii_case(&old_column_name) { + *pk_name = new_name.clone(); + } + } + + // Maintain rowid-alias bit after change/rename (INTEGER PRIMARY KEY) + if !*rename { + // recompute alias from `new_column` + btree.columns[*column_index].is_rowid_alias = new_column.is_rowid_alias; + } + + // Update this table’s OWN foreign keys + for fk_arc in &mut btree.foreign_keys { + let fk = Arc::make_mut(fk_arc); + // child side: rename child column if it matches + for cc in &mut fk.child_columns { + if cc.eq_ignore_ascii_case(&old_column_name) { + *cc = new_name.clone(); + } + } + // parent side: if self-referencing, rename parent column too + if normalize_ident(&fk.parent_table) == normalized_table_name { + for pc in &mut fk.parent_columns { + if pc.eq_ignore_ascii_case(&old_column_name) { + *pc = new_name.clone(); } } } } - if *rename { - column.name = new_column.name; - } else { - *column = new_column; + // fix OTHER tables that reference this table as parent + for (tname, t_arc) in schema.tables.iter_mut() { + if normalize_ident(tname) == normalized_table_name { + continue; + } + if let Table::BTree(ref mut child_btree_arc) = Arc::make_mut(t_arc) { + let child_btree = Arc::make_mut(child_btree_arc); + for fk_arc in &mut child_btree.foreign_keys { + if normalize_ident(&fk_arc.parent_table) != normalized_table_name { + continue; + } + let fk = Arc::make_mut(fk_arc); + for pc in &mut fk.parent_columns { + if pc.eq_ignore_ascii_case(&old_column_name) { + *pc = new_name.clone(); + } + } + } + } } }); From c48d7a09632537f8d073a3a21e7bd16bb56ad32d Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 21 Oct 2025 10:47:08 -0400 Subject: [PATCH 340/428] Add tcl tests for alter column fixes --- testing/alter_column.test | 216 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 216 insertions(+) diff --git a/testing/alter_column.test b/testing/alter_column.test index 3672497ab..1b4da6dd0 100755 --- a/testing/alter_column.test +++ b/testing/alter_column.test @@ -22,3 +22,219 @@ do_execsql_test_in_memory_any_error fail-alter-column-unique { CREATE TABLE t (a); ALTER TABLE t ALTER COLUMN a TO a UNIQUE; } + +do_execsql_test_on_specific_db {:memory:} alter-table-rename-pk-column { + CREATE TABLE customers (cust_id INTEGER PRIMARY KEY, cust_name TEXT); + INSERT INTO customers VALUES (1, 'Alice'), (2, 'Bob'); + + ALTER TABLE customers RENAME COLUMN cust_id TO customer_id; + + SELECT sql FROM sqlite_schema WHERE name = 'customers'; + SELECT customer_id, cust_name FROM customers ORDER BY customer_id; +} { + "CREATE TABLE customers (customer_id INTEGER PRIMARY KEY, cust_name TEXT)" + "1|Alice" + "2|Bob" +} + +do_execsql_test_on_specific_db {:memory:} alter-table-rename-composite-pk { + CREATE TABLE products (category TEXT, prod_code TEXT, name TEXT, PRIMARY KEY (category, prod_code)); + INSERT INTO products VALUES ('Electronics', 'E001', 'Laptop'); + + ALTER TABLE products RENAME COLUMN prod_code TO product_code; + + SELECT sql FROM sqlite_schema WHERE name = 'products'; + SELECT category, product_code, name FROM products; +} { + "CREATE TABLE products (category TEXT, product_code TEXT, name TEXT, PRIMARY KEY (category, product_code))" + "Electronics|E001|Laptop" +} + +# Foreign key child column rename +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-child { + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child (cid INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent(id)); + INSERT INTO parent VALUES (1); + INSERT INTO child VALUES (1, 1); + + ALTER TABLE child RENAME COLUMN pid TO parent_id; + + SELECT sql FROM sqlite_schema WHERE name = 'child'; +} { + "CREATE TABLE child (cid INTEGER PRIMARY KEY, parent_id INTEGER, FOREIGN KEY (parent_id) REFERENCES parent (id))" +} + +# Foreign key parent column rename - critical test +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-parent { + CREATE TABLE orders (order_id INTEGER PRIMARY KEY, date TEXT); + CREATE TABLE items (item_id INTEGER PRIMARY KEY, oid INTEGER, FOREIGN KEY (oid) REFERENCES orders(order_id)); + + ALTER TABLE orders RENAME COLUMN order_id TO ord_id; + + SELECT sql FROM sqlite_schema WHERE name = 'orders'; + SELECT sql FROM sqlite_schema WHERE name = 'items'; +} { + "CREATE TABLE orders (ord_id INTEGER PRIMARY KEY, date TEXT)" + "CREATE TABLE items (item_id INTEGER PRIMARY KEY, oid INTEGER, FOREIGN KEY (oid) REFERENCES orders (ord_id))" +} + +# Composite foreign key parent rename +do_execsql_test_on_specific_db {:memory:} alter-table-rename-composite-fk-parent { + CREATE TABLE products (cat TEXT, code TEXT, PRIMARY KEY (cat, code)); + CREATE TABLE inventory (id INTEGER PRIMARY KEY, cat TEXT, code TEXT, FOREIGN KEY (cat, code) REFERENCES products(cat, code)); + + ALTER TABLE products RENAME COLUMN code TO sku; + + SELECT sql FROM sqlite_schema WHERE name = 'products'; + SELECT sql FROM sqlite_schema WHERE name = 'inventory'; +} { + "CREATE TABLE products (cat TEXT, sku TEXT, PRIMARY KEY (cat, sku))" + "CREATE TABLE inventory (id INTEGER PRIMARY KEY, cat TEXT, code TEXT, FOREIGN KEY (cat, code) REFERENCES products (cat, sku))" +} + +# Multiple foreign keys to same parent +do_execsql_test_on_specific_db {:memory:} alter-table-rename-multiple-fks { + CREATE TABLE users (uid INTEGER PRIMARY KEY); + CREATE TABLE messages (mid INTEGER PRIMARY KEY, sender INTEGER, receiver INTEGER, + FOREIGN KEY (sender) REFERENCES users(uid), + FOREIGN KEY (receiver) REFERENCES users(uid)); + + ALTER TABLE users RENAME COLUMN uid TO user_id; + + SELECT sql FROM sqlite_schema WHERE name = 'messages'; +} { + "CREATE TABLE messages (mid INTEGER PRIMARY KEY, sender INTEGER, receiver INTEGER, FOREIGN KEY (sender) REFERENCES users (user_id), FOREIGN KEY (receiver) REFERENCES users (user_id))" +} + +# Self-referencing foreign key +do_execsql_test_on_specific_db {:memory:} alter-table-rename-self-ref-fk { + CREATE TABLE employees (emp_id INTEGER PRIMARY KEY, manager_id INTEGER, + FOREIGN KEY (manager_id) REFERENCES employees(emp_id)); + + ALTER TABLE employees RENAME COLUMN emp_id TO employee_id; + + SELECT sql FROM sqlite_schema WHERE name = 'employees'; +} { + "CREATE TABLE employees (employee_id INTEGER PRIMARY KEY, manager_id INTEGER, FOREIGN KEY (manager_id) REFERENCES employees (employee_id))" +} + +# Chain of FK renames - parent is both PK and referenced +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-chain { + CREATE TABLE t1 (a INTEGER PRIMARY KEY); + CREATE TABLE t2 (b INTEGER PRIMARY KEY, a_ref INTEGER, FOREIGN KEY (a_ref) REFERENCES t1(a)); + CREATE TABLE t3 (c INTEGER PRIMARY KEY, b_ref INTEGER, FOREIGN KEY (b_ref) REFERENCES t2(b)); + + ALTER TABLE t1 RENAME COLUMN a TO a_new; + ALTER TABLE t2 RENAME COLUMN b TO b_new; + + SELECT sql FROM sqlite_schema WHERE name = 't2'; + SELECT sql FROM sqlite_schema WHERE name = 't3'; +} { + "CREATE TABLE t2 (b_new INTEGER PRIMARY KEY, a_ref INTEGER, FOREIGN KEY (a_ref) REFERENCES t1 (a_new))" + "CREATE TABLE t3 (c INTEGER PRIMARY KEY, b_ref INTEGER, FOREIGN KEY (b_ref) REFERENCES t2 (b_new))" +} + +# FK with ON DELETE/UPDATE actions +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-actions { + CREATE TABLE parent (pid INTEGER PRIMARY KEY); + CREATE TABLE child (cid INTEGER PRIMARY KEY, pid INTEGER, + FOREIGN KEY (pid) REFERENCES parent(pid) ON DELETE CASCADE ON UPDATE RESTRICT); + + ALTER TABLE parent RENAME COLUMN pid TO parent_id; + + SELECT sql FROM sqlite_schema WHERE name = 'child'; +} { + "CREATE TABLE child (cid INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent (parent_id) ON DELETE CASCADE ON UPDATE RESTRICT)" +} + +# FK with DEFERRABLE +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-deferrable { + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, + FOREIGN KEY (pid) REFERENCES parent(id) DEFERRABLE INITIALLY DEFERRED); + + ALTER TABLE parent RENAME COLUMN id TO parent_id; + + SELECT sql FROM sqlite_schema WHERE name = 'child'; +} { + "CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent (parent_id) DEFERRABLE INITIALLY DEFERRED)" +} + +# Rename with quoted identifiers in FK +do_execsql_test_on_specific_db {:memory:} alter-table-rename-fk-quoted { + CREATE TABLE "parent table" ("parent id" INTEGER PRIMARY KEY); + CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, + FOREIGN KEY (pid) REFERENCES "parent table"("parent id")); + + ALTER TABLE "parent table" RENAME COLUMN "parent id" TO "new id"; + + SELECT sql FROM sqlite_schema WHERE name = 'child'; +} { + "CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES \"parent table\" (\"new id\"))" +} + +# Verify FK constraint still works after rename +do_execsql_test_on_specific_db {:memory:} alter-table-fk-constraint-after-rename { + PRAGMA foreign_keys = ON; + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent(id)); + INSERT INTO parent VALUES (1); + INSERT INTO child VALUES (1, 1); + + ALTER TABLE parent RENAME COLUMN id TO parent_id; + + -- This should work + INSERT INTO child VALUES (2, 1); + SELECT COUNT(*) FROM child; +} { + "2" +} + +# FK constraint violation after rename should still fail +do_execsql_test_in_memory_any_error alter-table-fk-violation-after-rename { + PRAGMA foreign_keys = ON; + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child (id INTEGER PRIMARY KEY, pid INTEGER, FOREIGN KEY (pid) REFERENCES parent(id)); + INSERT INTO parent VALUES (1); + + ALTER TABLE parent RENAME COLUMN id TO parent_id; + + -- This should fail with FK violation + INSERT INTO child VALUES (1, 999); +} + +# Complex scenario with multiple table constraints +do_execsql_test_on_specific_db {:memory:} alter-table-rename-complex-constraints { + CREATE TABLE t ( + a INTEGER, + b TEXT, + c REAL, + PRIMARY KEY (a, b), + UNIQUE (b, c), + FOREIGN KEY (a) REFERENCES t(a) + ); + + ALTER TABLE t RENAME COLUMN a TO x; + ALTER TABLE t RENAME COLUMN b TO y; + + SELECT sql FROM sqlite_schema WHERE name = 't'; +} { + "CREATE TABLE t (x INTEGER, y TEXT, c REAL, PRIMARY KEY (x, y), UNIQUE (y, c), FOREIGN KEY (x) REFERENCES t (x))" +} + +# Rename column that appears in both PK and FK +do_execsql_test_on_specific_db {:memory:} alter-table-rename-pk-and-fk { + CREATE TABLE parent (id INTEGER PRIMARY KEY); + CREATE TABLE child ( + id INTEGER PRIMARY KEY, + parent_ref INTEGER, + FOREIGN KEY (id) REFERENCES parent(id), + FOREIGN KEY (parent_ref) REFERENCES parent(id) + ); + + ALTER TABLE parent RENAME COLUMN id TO pid; + + SELECT sql FROM sqlite_schema WHERE name = 'child'; +} { + "CREATE TABLE child (id INTEGER PRIMARY KEY, parent_ref INTEGER, FOREIGN KEY (id) REFERENCES parent (pid), FOREIGN KEY (parent_ref) REFERENCES parent (pid))" +} From 08197e345ab53180fcf765423c5d43d9a1c67dad Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 21 Oct 2025 11:22:29 -0400 Subject: [PATCH 341/428] Fix cdc test to assert for correct schema output --- tests/integration/functions/test_cdc.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/integration/functions/test_cdc.rs b/tests/integration/functions/test_cdc.rs index d631c1f93..28606eca3 100644 --- a/tests/integration/functions/test_cdc.rs +++ b/tests/integration/functions/test_cdc.rs @@ -1107,9 +1107,7 @@ fn test_cdc_schema_changes_alter_table() { Value::Text("t".to_string()), Value::Text("t".to_string()), Value::Integer(4), - Value::Text( - "CREATE TABLE t (x PRIMARY KEY, y PRIMARY KEY, z UNIQUE)".to_string() - ) + Value::Text("CREATE TABLE t (x, y, z UNIQUE, PRIMARY KEY (x, y))".to_string()) ])), Value::Blob(record([ Value::Integer(0), @@ -1135,9 +1133,7 @@ fn test_cdc_schema_changes_alter_table() { Value::Text("t".to_string()), Value::Text("t".to_string()), Value::Integer(4), - Value::Text( - "CREATE TABLE t (x PRIMARY KEY, y PRIMARY KEY, z UNIQUE)".to_string() - ) + Value::Text("CREATE TABLE t (x, y, z UNIQUE, PRIMARY KEY (x, y))".to_string()) ])), Value::Blob(record([ Value::Text("table".to_string()), @@ -1145,7 +1141,7 @@ fn test_cdc_schema_changes_alter_table() { Value::Text("t".to_string()), Value::Integer(4), Value::Text( - "CREATE TABLE t (x PRIMARY KEY, y PRIMARY KEY, z UNIQUE, t)".to_string() + "CREATE TABLE t (x, y, z UNIQUE, t, PRIMARY KEY (x, y))".to_string() ) ])), Value::Blob(record([ From a327747531cd9da6cd8fcf2cf980dbdbe1b80920 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Tue, 21 Oct 2025 11:10:41 -0300 Subject: [PATCH 342/428] organize completion code in a separate file --- core/io/completions.rs | 984 +++++++++++++++++++++++++++++++++++++ core/io/mod.rs | 1039 ++-------------------------------------- 2 files changed, 1018 insertions(+), 1005 deletions(-) create mode 100644 core/io/completions.rs diff --git a/core/io/completions.rs b/core/io/completions.rs new file mode 100644 index 000000000..a381324c6 --- /dev/null +++ b/core/io/completions.rs @@ -0,0 +1,984 @@ +use core::fmt::{self, Debug}; +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, OnceLock, + }, + task::Waker, +}; + +use parking_lot::Mutex; + +use crate::{Buffer, CompletionError}; + +pub type ReadComplete = dyn Fn(Result<(Arc, i32), CompletionError>); +pub type WriteComplete = dyn Fn(Result); +pub type SyncComplete = dyn Fn(Result); +pub type TruncateComplete = dyn Fn(Result); + +#[must_use] +#[derive(Debug, Clone)] +pub struct Completion { + /// Optional completion state. If None, it means we are Yield in order to not allocate anything + pub(super) inner: Option>, +} + +#[derive(Debug, Default)] +struct ContextInner { + waker: Option, + // TODO: add abort signal +} + +#[derive(Debug, Clone)] +pub struct Context { + inner: Arc>, +} + +impl ContextInner { + pub fn new() -> Self { + Self { waker: None } + } + + pub fn wake(&mut self) { + if let Some(waker) = self.waker.take() { + waker.wake(); + } + } + + pub fn set_waker(&mut self, waker: &Waker) { + if let Some(curr_waker) = self.waker.as_mut() { + // only call and change waker if it would awake a different task + if !curr_waker.will_wake(waker) { + let prev_waker = std::mem::replace(curr_waker, waker.clone()); + prev_waker.wake(); + } + } else { + self.waker = Some(waker.clone()); + } + } +} + +impl Context { + pub fn new() -> Self { + Self { + inner: Arc::new(Mutex::new(ContextInner::new())), + } + } + + pub fn wake(&self) { + self.inner.lock().wake(); + } + + pub fn set_waker(&self, waker: &Waker) { + self.inner.lock().set_waker(waker); + } +} + +pub(super) struct CompletionInner { + completion_type: CompletionType, + /// None means we completed successfully + // Thread safe with OnceLock + pub(super) result: std::sync::OnceLock>, + needs_link: bool, + context: Context, + /// Optional parent group this completion belongs to + parent: OnceLock>, +} + +impl fmt::Debug for CompletionInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CompletionInner") + .field("completion_type", &self.completion_type) + .field("needs_link", &self.needs_link) + .field("parent", &self.parent.get().is_some()) + .finish() + } +} + +pub struct CompletionGroup { + completions: Vec, + callback: Box) + Send + Sync>, +} + +impl CompletionGroup { + pub fn new(callback: F) -> Self + where + F: Fn(Result) + Send + Sync + 'static, + { + Self { + completions: Vec::new(), + callback: Box::new(callback), + } + } + + pub fn add(&mut self, completion: &Completion) { + self.completions.push(completion.clone()); + } + + pub fn cancel(&self) { + for c in &self.completions { + c.abort(); + } + } + + pub fn build(self) -> Completion { + let total = self.completions.len(); + if total == 0 { + (self.callback)(Ok(0)); + return Completion::new_yield(); + } + let group_completion = GroupCompletion::new(self.callback, total); + let group = Completion::new(CompletionType::Group(group_completion)); + + // Store the group completion reference for later callback + if let CompletionType::Group(ref g) = group.get_inner().completion_type { + let _ = g.inner.self_completion.set(group.clone()); + } + + for mut c in self.completions { + // If the completion has not completed, link it to the group. + if !c.finished() { + c.link_internal(&group); + continue; + } + let group_inner = match &group.get_inner().completion_type { + CompletionType::Group(g) => &g.inner, + _ => unreachable!(), + }; + // Return early if there was an error. + if let Some(err) = c.get_error() { + let _ = group_inner.result.set(Some(err)); + group_inner.outstanding.store(0, Ordering::SeqCst); + (group_inner.complete)(Err(err)); + return group; + } + // Mark the successful completion as done. + group_inner.outstanding.fetch_sub(1, Ordering::SeqCst); + } + + let group_inner = match &group.get_inner().completion_type { + CompletionType::Group(g) => &g.inner, + _ => unreachable!(), + }; + if group_inner.outstanding.load(Ordering::SeqCst) == 0 { + (group_inner.complete)(Ok(0)); + } + group + } +} + +pub struct GroupCompletion { + inner: Arc, +} + +impl fmt::Debug for GroupCompletion { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GroupCompletion") + .field( + "outstanding", + &self.inner.outstanding.load(Ordering::SeqCst), + ) + .finish() + } +} + +struct GroupCompletionInner { + /// Number of completions that need to finish + outstanding: AtomicUsize, + /// Callback to invoke when all completions finish + complete: Box) + Send + Sync>, + /// Cached result after all completions finish + result: OnceLock>, + /// Reference to the group's own Completion for notifying parents + self_completion: OnceLock, +} + +impl GroupCompletion { + pub fn new(complete: F, outstanding: usize) -> Self + where + F: Fn(Result) + Send + Sync + 'static, + { + Self { + inner: Arc::new(GroupCompletionInner { + outstanding: AtomicUsize::new(outstanding), + complete: Box::new(complete), + result: OnceLock::new(), + self_completion: OnceLock::new(), + }), + } + } + + pub fn callback(&self, result: Result) { + assert_eq!( + self.inner.outstanding.load(Ordering::SeqCst), + 0, + "callback called before all completions finished" + ); + (self.inner.complete)(result); + } +} + +impl Debug for CompletionType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Read(..) => f.debug_tuple("Read").finish(), + Self::Write(..) => f.debug_tuple("Write").finish(), + Self::Sync(..) => f.debug_tuple("Sync").finish(), + Self::Truncate(..) => f.debug_tuple("Truncate").finish(), + Self::Group(..) => f.debug_tuple("Group").finish(), + Self::Yield => f.debug_tuple("Yield").finish(), + } + } +} + +pub enum CompletionType { + Read(ReadCompletion), + Write(WriteCompletion), + Sync(SyncCompletion), + Truncate(TruncateCompletion), + Group(GroupCompletion), + Yield, +} + +impl CompletionInner { + fn new(completion_type: CompletionType, needs_link: bool) -> Self { + Self { + completion_type, + result: OnceLock::new(), + needs_link, + context: Context::new(), + parent: OnceLock::new(), + } + } +} + +impl Completion { + pub fn new(completion_type: CompletionType) -> Self { + Self { + inner: Some(Arc::new(CompletionInner::new(completion_type, false))), + } + } + + pub fn new_linked(completion_type: CompletionType) -> Self { + Self { + inner: Some(Arc::new(CompletionInner::new(completion_type, true))), + } + } + + pub(super) fn get_inner(&self) -> &Arc { + self.inner.as_ref().unwrap() + } + + pub fn needs_link(&self) -> bool { + self.get_inner().needs_link + } + + pub fn new_write_linked(complete: F) -> Self + where + F: Fn(Result) + 'static, + { + Self::new_linked(CompletionType::Write(WriteCompletion::new(Box::new( + complete, + )))) + } + + pub fn new_write(complete: F) -> Self + where + F: Fn(Result) + 'static, + { + Self::new(CompletionType::Write(WriteCompletion::new(Box::new( + complete, + )))) + } + + pub fn new_read(buf: Arc, complete: F) -> Self + where + F: Fn(Result<(Arc, i32), CompletionError>) + 'static, + { + Self::new(CompletionType::Read(ReadCompletion::new( + buf, + Box::new(complete), + ))) + } + pub fn new_sync(complete: F) -> Self + where + F: Fn(Result) + 'static, + { + Self::new(CompletionType::Sync(SyncCompletion::new(Box::new( + complete, + )))) + } + + pub fn new_trunc(complete: F) -> Self + where + F: Fn(Result) + 'static, + { + Self::new(CompletionType::Truncate(TruncateCompletion::new(Box::new( + complete, + )))) + } + + /// Create a yield completion. These are completed by default allowing to yield control without + /// allocating memory. + pub fn new_yield() -> Self { + Self { inner: None } + } + + pub fn wake(&self) { + self.get_inner().context.wake(); + } + + pub fn set_waker(&self, waker: &Waker) { + if self.finished() || self.inner.is_none() { + waker.wake_by_ref(); + } else { + self.get_inner().context.set_waker(waker); + } + } + + pub fn succeeded(&self) -> bool { + match &self.inner { + Some(inner) => match &inner.completion_type { + CompletionType::Group(g) => { + g.inner.outstanding.load(Ordering::SeqCst) == 0 + && g.inner.result.get().is_none_or(|e| e.is_none()) + } + _ => inner.result.get().is_some(), + }, + None => true, + } + } + + pub fn failed(&self) -> bool { + match &self.inner { + Some(inner) => inner.result.get().is_some_and(|val| val.is_some()), + None => false, + } + } + + pub fn get_error(&self) -> Option { + match &self.inner { + Some(inner) => { + match &inner.completion_type { + CompletionType::Group(g) => { + // For groups, check the group's cached result field + // (set when the last completion finishes) + g.inner.result.get().and_then(|res| *res) + } + _ => inner.result.get().and_then(|res| *res), + } + } + None => None, + } + } + + /// Checks if the Completion completed or errored + pub fn finished(&self) -> bool { + match &self.inner { + Some(inner) => match &inner.completion_type { + CompletionType::Group(g) => g.inner.outstanding.load(Ordering::SeqCst) == 0, + _ => inner.result.get().is_some(), + }, + None => true, + } + } + + pub fn complete(&self, result: i32) { + let result = Ok(result); + self.callback(result); + } + + pub fn error(&self, err: CompletionError) { + let result = Err(err); + self.callback(result); + } + + pub fn abort(&self) { + self.error(CompletionError::Aborted); + } + + fn callback(&self, result: Result) { + let inner = self.get_inner(); + inner.result.get_or_init(|| { + match &inner.completion_type { + CompletionType::Read(r) => r.callback(result), + CompletionType::Write(w) => w.callback(result), + CompletionType::Sync(s) => s.callback(result), // fix + CompletionType::Truncate(t) => t.callback(result), + CompletionType::Group(g) => g.callback(result), + CompletionType::Yield => {} + }; + + if let Some(group) = inner.parent.get() { + // Capture first error in group + if let Err(err) = result { + let _ = group.result.set(Some(err)); + } + let prev = group.outstanding.fetch_sub(1, Ordering::SeqCst); + + // If this was the last completion in the group, trigger the group's callback + // which will recursively call this same callback() method to notify parents + if prev == 1 { + if let Some(group_completion) = group.self_completion.get() { + let group_result = group.result.get().and_then(|e| *e); + group_completion.callback(group_result.map_or(Ok(0), Err)); + } + } + } + + result.err() + }); + // call the waker regardless + inner.context.wake(); + } + + /// only call this method if you are sure that the completion is + /// a ReadCompletion, panics otherwise + pub fn as_read(&self) -> &ReadCompletion { + let inner = self.get_inner(); + match inner.completion_type { + CompletionType::Read(ref r) => r, + _ => unreachable!(), + } + } + + /// Link this completion to a group completion (internal use only) + fn link_internal(&mut self, group: &Completion) { + let group_inner = match &group.get_inner().completion_type { + CompletionType::Group(g) => &g.inner, + _ => panic!("link_internal() requires a group completion"), + }; + + // Set the parent (can only be set once) + if self.get_inner().parent.set(group_inner.clone()).is_err() { + panic!("completion can only be linked once"); + } + } +} + +pub struct ReadCompletion { + pub buf: Arc, + pub complete: Box, +} + +impl ReadCompletion { + pub fn new(buf: Arc, complete: Box) -> Self { + Self { buf, complete } + } + + pub fn buf(&self) -> &Buffer { + &self.buf + } + + pub fn callback(&self, bytes_read: Result) { + (self.complete)(bytes_read.map(|b| (self.buf.clone(), b))); + } + + pub fn buf_arc(&self) -> Arc { + self.buf.clone() + } +} + +pub struct WriteCompletion { + pub complete: Box, +} + +impl WriteCompletion { + pub fn new(complete: Box) -> Self { + Self { complete } + } + + pub fn callback(&self, bytes_written: Result) { + (self.complete)(bytes_written); + } +} + +pub struct SyncCompletion { + pub complete: Box, +} + +impl SyncCompletion { + pub fn new(complete: Box) -> Self { + Self { complete } + } + + pub fn callback(&self, res: Result) { + (self.complete)(res); + } +} + +pub struct TruncateCompletion { + pub complete: Box, +} + +impl TruncateCompletion { + pub fn new(complete: Box) -> Self { + Self { complete } + } + + pub fn callback(&self, res: Result) { + (self.complete)(res); + } +} + +#[cfg(test)] +mod tests { + use crate::CompletionError; + + use super::*; + + #[test] + fn test_completion_group_empty() { + use std::sync::atomic::{AtomicBool, Ordering}; + + let callback_called = Arc::new(AtomicBool::new(false)); + let callback_called_clone = callback_called.clone(); + + let group = CompletionGroup::new(move |_| { + callback_called_clone.store(true, Ordering::SeqCst); + }); + let group = group.build(); + assert!(group.finished()); + assert!(group.succeeded()); + assert!(group.get_error().is_none()); + + // Verify the callback was actually called + assert!( + callback_called.load(Ordering::SeqCst), + "callback should be called for empty group" + ); + } + + #[test] + fn test_completion_group_single_completion() { + let mut group = CompletionGroup::new(|_| {}); + let c = Completion::new_write(|_| {}); + group.add(&c); + let group = group.build(); + + assert!(!group.finished()); + assert!(!group.succeeded()); + + c.complete(0); + + assert!(group.finished()); + assert!(group.succeeded()); + assert!(group.get_error().is_none()); + } + + #[test] + fn test_completion_group_multiple_completions() { + let mut group = CompletionGroup::new(|_| {}); + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + let c3 = Completion::new_write(|_| {}); + group.add(&c1); + group.add(&c2); + group.add(&c3); + let group = group.build(); + + assert!(!group.succeeded()); + assert!(!group.finished()); + + c1.complete(0); + assert!(!group.succeeded()); + assert!(!group.finished()); + + c2.complete(0); + assert!(!group.succeeded()); + assert!(!group.finished()); + + c3.complete(0); + assert!(group.succeeded()); + assert!(group.finished()); + } + + #[test] + fn test_completion_group_with_error() { + let mut group = CompletionGroup::new(|_| {}); + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + group.add(&c1); + group.add(&c2); + let group = group.build(); + + c1.complete(0); + c2.error(CompletionError::Aborted); + + assert!(group.finished()); + assert!(!group.succeeded()); + assert_eq!(group.get_error(), Some(CompletionError::Aborted)); + } + + #[test] + fn test_completion_group_callback() { + use std::sync::atomic::{AtomicBool, Ordering}; + let called = Arc::new(AtomicBool::new(false)); + let called_clone = called.clone(); + + let mut group = CompletionGroup::new(move |_| { + called_clone.store(true, Ordering::SeqCst); + }); + + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + group.add(&c1); + group.add(&c2); + let group = group.build(); + + assert!(!called.load(Ordering::SeqCst)); + + c1.complete(0); + assert!(!called.load(Ordering::SeqCst)); + + c2.complete(0); + assert!(called.load(Ordering::SeqCst)); + assert!(group.finished()); + assert!(group.succeeded()); + } + + #[test] + fn test_completion_group_some_already_completed() { + // Test some completions added to group, then finish before build() + let mut group = CompletionGroup::new(|_| {}); + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + let c3 = Completion::new_write(|_| {}); + + // Add all to group while pending + group.add(&c1); + group.add(&c2); + group.add(&c3); + + // Complete c1 and c2 AFTER adding but BEFORE build() + c1.complete(0); + c2.complete(0); + + let group = group.build(); + + // c1 and c2 finished before build(), so outstanding should account for them + // Only c3 should be pending + assert!(!group.finished()); + assert!(!group.succeeded()); + + // Complete c3 + c3.complete(0); + + // Now the group should be finished + assert!(group.finished()); + assert!(group.succeeded()); + assert!(group.get_error().is_none()); + } + + #[test] + fn test_completion_group_all_already_completed() { + // Test when all completions are already finished before build() + let mut group = CompletionGroup::new(|_| {}); + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + + // Complete both before adding to group + c1.complete(0); + c2.complete(0); + + group.add(&c1); + group.add(&c2); + + let group = group.build(); + + // All completions were already complete, so group should be finished immediately + assert!(group.finished()); + assert!(group.succeeded()); + assert!(group.get_error().is_none()); + } + + #[test] + fn test_completion_group_mixed_finished_and_pending() { + use std::sync::atomic::{AtomicBool, Ordering}; + let called = Arc::new(AtomicBool::new(false)); + let called_clone = called.clone(); + + let mut group = CompletionGroup::new(move |_| { + called_clone.store(true, Ordering::SeqCst); + }); + + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + let c3 = Completion::new_write(|_| {}); + let c4 = Completion::new_write(|_| {}); + + // Complete c1 and c3 before adding to group + c1.complete(0); + c3.complete(0); + + group.add(&c1); + group.add(&c2); + group.add(&c3); + group.add(&c4); + + let group = group.build(); + + // Only c2 and c4 should be pending + assert!(!group.finished()); + assert!(!called.load(Ordering::SeqCst)); + + c2.complete(0); + assert!(!group.finished()); + assert!(!called.load(Ordering::SeqCst)); + + c4.complete(0); + assert!(group.finished()); + assert!(group.succeeded()); + assert!(called.load(Ordering::SeqCst)); + } + + #[test] + fn test_completion_group_already_completed_with_error() { + // Test when a completion finishes with error before build() + let mut group = CompletionGroup::new(|_| {}); + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + + // Complete c1 with error before adding to group + c1.error(CompletionError::Aborted); + + group.add(&c1); + group.add(&c2); + + let group = group.build(); + + // Group should immediately fail with the error + assert!(group.finished()); + assert!(!group.succeeded()); + assert_eq!(group.get_error(), Some(CompletionError::Aborted)); + } + + #[test] + fn test_completion_group_tracks_all_completions() { + // This test verifies the fix for the bug where CompletionGroup::add() + // would skip successfully-finished completions. This caused problems + // when code used drain() to move completions into a group, because + // finished completions would be removed from the source but not tracked + // by the group, effectively losing them. + use std::sync::atomic::{AtomicUsize, Ordering}; + + let callback_count = Arc::new(AtomicUsize::new(0)); + let callback_count_clone = callback_count.clone(); + + // Simulate the pattern: create multiple completions, complete some, + // then add ALL of them to a group (like drain() would do) + let mut completions = Vec::new(); + + // Create 4 completions + for _ in 0..4 { + completions.push(Completion::new_write(|_| {})); + } + + // Complete 2 of them before adding to group (simulate async completion) + completions[0].complete(0); + completions[2].complete(0); + + // Now create a group and add ALL completions (like drain() would do) + let mut group = CompletionGroup::new(move |_| { + callback_count_clone.fetch_add(1, Ordering::SeqCst); + }); + + // Add all completions to the group + for c in &completions { + group.add(c); + } + + let group = group.build(); + + // The group should track all 4 completions: + // - c[0] and c[2] are already finished + // - c[1] and c[3] are still pending + // So the group should not be finished yet + assert!(!group.finished()); + assert_eq!(callback_count.load(Ordering::SeqCst), 0); + + // Complete the first pending completion + completions[1].complete(0); + assert!(!group.finished()); + assert_eq!(callback_count.load(Ordering::SeqCst), 0); + + // Complete the last pending completion - now group should finish + completions[3].complete(0); + assert!(group.finished()); + assert!(group.succeeded()); + assert_eq!(callback_count.load(Ordering::SeqCst), 1); + + // Verify no errors + assert!(group.get_error().is_none()); + } + + #[test] + fn test_completion_group_with_all_finished_successfully() { + // Edge case: all completions are already successfully finished + // when added to the group. The group should complete immediately. + use std::sync::atomic::{AtomicBool, Ordering}; + + let callback_called = Arc::new(AtomicBool::new(false)); + let callback_called_clone = callback_called.clone(); + + let mut completions = Vec::new(); + + // Create and immediately complete 3 completions + for _ in 0..3 { + let c = Completion::new_write(|_| {}); + c.complete(0); + completions.push(c); + } + + // Add all already-completed completions to group + let mut group = CompletionGroup::new(move |_| { + callback_called_clone.store(true, Ordering::SeqCst); + }); + + for c in &completions { + group.add(c); + } + + let group = group.build(); + + // Group should be immediately finished since all completions were done + assert!(group.finished()); + assert!(group.succeeded()); + assert!(callback_called.load(Ordering::SeqCst)); + assert!(group.get_error().is_none()); + } + + #[test] + fn test_completion_group_nested() { + use std::sync::atomic::{AtomicUsize, Ordering}; + + // Track callbacks at different levels + let parent_called = Arc::new(AtomicUsize::new(0)); + let child1_called = Arc::new(AtomicUsize::new(0)); + let child2_called = Arc::new(AtomicUsize::new(0)); + + // Create child group 1 with 2 completions + let child1_called_clone = child1_called.clone(); + let mut child_group1 = CompletionGroup::new(move |_| { + child1_called_clone.fetch_add(1, Ordering::SeqCst); + }); + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + child_group1.add(&c1); + child_group1.add(&c2); + let child_group1 = child_group1.build(); + + // Create child group 2 with 2 completions + let child2_called_clone = child2_called.clone(); + let mut child_group2 = CompletionGroup::new(move |_| { + child2_called_clone.fetch_add(1, Ordering::SeqCst); + }); + let c3 = Completion::new_write(|_| {}); + let c4 = Completion::new_write(|_| {}); + child_group2.add(&c3); + child_group2.add(&c4); + let child_group2 = child_group2.build(); + + // Create parent group containing both child groups + let parent_called_clone = parent_called.clone(); + let mut parent_group = CompletionGroup::new(move |_| { + parent_called_clone.fetch_add(1, Ordering::SeqCst); + }); + parent_group.add(&child_group1); + parent_group.add(&child_group2); + let parent_group = parent_group.build(); + + // Initially nothing should be finished + assert!(!parent_group.finished()); + assert!(!child_group1.finished()); + assert!(!child_group2.finished()); + assert_eq!(parent_called.load(Ordering::SeqCst), 0); + assert_eq!(child1_called.load(Ordering::SeqCst), 0); + assert_eq!(child2_called.load(Ordering::SeqCst), 0); + + // Complete first completion in child group 1 + c1.complete(0); + assert!(!child_group1.finished()); + assert!(!parent_group.finished()); + assert_eq!(child1_called.load(Ordering::SeqCst), 0); + assert_eq!(parent_called.load(Ordering::SeqCst), 0); + + // Complete second completion in child group 1 - should finish child group 1 + c2.complete(0); + assert!(child_group1.finished()); + assert!(child_group1.succeeded()); + assert_eq!(child1_called.load(Ordering::SeqCst), 1); + + // Parent should not be finished yet because child group 2 is still pending + assert!(!parent_group.finished()); + assert_eq!(parent_called.load(Ordering::SeqCst), 0); + + // Complete first completion in child group 2 + c3.complete(0); + assert!(!child_group2.finished()); + assert!(!parent_group.finished()); + assert_eq!(child2_called.load(Ordering::SeqCst), 0); + assert_eq!(parent_called.load(Ordering::SeqCst), 0); + + // Complete second completion in child group 2 - should finish everything + c4.complete(0); + assert!(child_group2.finished()); + assert!(child_group2.succeeded()); + assert_eq!(child2_called.load(Ordering::SeqCst), 1); + + // Parent should now be finished + assert!(parent_group.finished()); + assert!(parent_group.succeeded()); + assert_eq!(parent_called.load(Ordering::SeqCst), 1); + assert!(parent_group.get_error().is_none()); + } + + #[test] + fn test_completion_group_nested_with_error() { + use std::sync::atomic::{AtomicBool, Ordering}; + + let parent_called = Arc::new(AtomicBool::new(false)); + let child_called = Arc::new(AtomicBool::new(false)); + + // Create child group with 2 completions + let child_called_clone = child_called.clone(); + let mut child_group = CompletionGroup::new(move |_| { + child_called_clone.store(true, Ordering::SeqCst); + }); + let c1 = Completion::new_write(|_| {}); + let c2 = Completion::new_write(|_| {}); + child_group.add(&c1); + child_group.add(&c2); + let child_group = child_group.build(); + + // Create parent group containing child group and another completion + let parent_called_clone = parent_called.clone(); + let mut parent_group = CompletionGroup::new(move |_| { + parent_called_clone.store(true, Ordering::SeqCst); + }); + let c3 = Completion::new_write(|_| {}); + parent_group.add(&child_group); + parent_group.add(&c3); + let parent_group = parent_group.build(); + + // Complete child group with success + c1.complete(0); + c2.complete(0); + assert!(child_group.finished()); + assert!(child_group.succeeded()); + assert!(child_called.load(Ordering::SeqCst)); + + // Parent still pending + assert!(!parent_group.finished()); + assert!(!parent_called.load(Ordering::SeqCst)); + + // Complete c3 with error + c3.error(CompletionError::Aborted); + + // Parent should finish with error + assert!(parent_group.finished()); + assert!(!parent_group.succeeded()); + assert_eq!(parent_group.get_error(), Some(CompletionError::Aborted)); + assert!(parent_called.load(Ordering::SeqCst)); + } +} diff --git a/core/io/mod.rs b/core/io/mod.rs index f107ad66b..ea733f820 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -1,17 +1,46 @@ use crate::storage::buffer_pool::ArenaBuffer; use crate::storage::sqlite3_ondisk::WAL_FRAME_HEADER_SIZE; -use crate::{BufferPool, CompletionError, Result}; +use crate::{BufferPool, Result}; use bitflags::bitflags; use cfg_block::cfg_block; -use parking_lot::Mutex; use std::cell::RefCell; use std::fmt; use std::ptr::NonNull; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::{Arc, OnceLock}; -use std::task::Waker; +use std::sync::Arc; use std::{fmt::Debug, pin::Pin}; +cfg_block! { + #[cfg(all(target_os = "linux", feature = "io_uring", not(miri)))] { + mod io_uring; + #[cfg(feature = "fs")] + pub use io_uring::UringIO; + } + + #[cfg(all(target_family = "unix", not(miri)))] { + mod unix; + #[cfg(feature = "fs")] + pub use unix::UnixIO; + pub use unix::UnixIO as PlatformIO; + pub use PlatformIO as SyscallIO; + } + + #[cfg(any(not(any(target_family = "unix", target_os = "android", target_os = "ios")), miri))] { + mod generic; + pub use generic::GenericIO as PlatformIO; + pub use PlatformIO as SyscallIO; + } +} + +mod memory; +#[cfg(feature = "fs")] +mod vfs; +pub use memory::MemoryIO; +pub mod clock; +mod common; +mod completions; +pub use clock::Clock; +pub use completions::*; + pub trait File: Send + Sync { fn lock_file(&self, exclusive: bool) -> Result<()>; fn unlock_file(&self) -> Result<()>; @@ -134,516 +163,6 @@ pub trait IO: Clock + Send + Sync { } } -pub type ReadComplete = dyn Fn(Result<(Arc, i32), CompletionError>); -pub type WriteComplete = dyn Fn(Result); -pub type SyncComplete = dyn Fn(Result); -pub type TruncateComplete = dyn Fn(Result); - -#[must_use] -#[derive(Debug, Clone)] -pub struct Completion { - /// Optional completion state. If None, it means we are Yield in order to not allocate anything - inner: Option>, -} - -#[derive(Debug, Default)] -struct ContextInner { - waker: Option, - // TODO: add abort signal -} - -#[derive(Debug, Clone)] -pub struct Context { - inner: Arc>, -} - -impl ContextInner { - pub fn new() -> Self { - Self { waker: None } - } - - pub fn wake(&mut self) { - if let Some(waker) = self.waker.take() { - waker.wake(); - } - } - - pub fn set_waker(&mut self, waker: &Waker) { - if let Some(curr_waker) = self.waker.as_mut() { - // only call and change waker if it would awake a different task - if !curr_waker.will_wake(waker) { - let prev_waker = std::mem::replace(curr_waker, waker.clone()); - prev_waker.wake(); - } - } else { - self.waker = Some(waker.clone()); - } - } -} - -impl Context { - pub fn new() -> Self { - Self { - inner: Arc::new(Mutex::new(ContextInner::new())), - } - } - - pub fn wake(&self) { - self.inner.lock().wake(); - } - - pub fn set_waker(&self, waker: &Waker) { - self.inner.lock().set_waker(waker); - } -} - -struct CompletionInner { - completion_type: CompletionType, - /// None means we completed successfully - // Thread safe with OnceLock - result: std::sync::OnceLock>, - needs_link: bool, - context: Context, - /// Optional parent group this completion belongs to - parent: OnceLock>, -} - -impl fmt::Debug for CompletionInner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("CompletionInner") - .field("completion_type", &self.completion_type) - .field("needs_link", &self.needs_link) - .field("parent", &self.parent.get().is_some()) - .finish() - } -} - -pub struct CompletionGroup { - completions: Vec, - callback: Box) + Send + Sync>, -} - -impl CompletionGroup { - pub fn new(callback: F) -> Self - where - F: Fn(Result) + Send + Sync + 'static, - { - Self { - completions: Vec::new(), - callback: Box::new(callback), - } - } - - pub fn add(&mut self, completion: &Completion) { - self.completions.push(completion.clone()); - } - - pub fn cancel(&self) { - for c in &self.completions { - c.abort(); - } - } - - pub fn build(self) -> Completion { - let total = self.completions.len(); - if total == 0 { - (self.callback)(Ok(0)); - return Completion::new_yield(); - } - let group_completion = GroupCompletion::new(self.callback, total); - let group = Completion::new(CompletionType::Group(group_completion)); - - // Store the group completion reference for later callback - if let CompletionType::Group(ref g) = group.get_inner().completion_type { - let _ = g.inner.self_completion.set(group.clone()); - } - - for mut c in self.completions { - // If the completion has not completed, link it to the group. - if !c.finished() { - c.link_internal(&group); - continue; - } - let group_inner = match &group.get_inner().completion_type { - CompletionType::Group(g) => &g.inner, - _ => unreachable!(), - }; - // Return early if there was an error. - if let Some(err) = c.get_error() { - let _ = group_inner.result.set(Some(err)); - group_inner.outstanding.store(0, Ordering::SeqCst); - (group_inner.complete)(Err(err)); - return group; - } - // Mark the successful completion as done. - group_inner.outstanding.fetch_sub(1, Ordering::SeqCst); - } - - let group_inner = match &group.get_inner().completion_type { - CompletionType::Group(g) => &g.inner, - _ => unreachable!(), - }; - if group_inner.outstanding.load(Ordering::SeqCst) == 0 { - (group_inner.complete)(Ok(0)); - } - group - } -} - -pub struct GroupCompletion { - inner: Arc, -} - -impl fmt::Debug for GroupCompletion { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("GroupCompletion") - .field( - "outstanding", - &self.inner.outstanding.load(Ordering::SeqCst), - ) - .finish() - } -} - -struct GroupCompletionInner { - /// Number of completions that need to finish - outstanding: AtomicUsize, - /// Callback to invoke when all completions finish - complete: Box) + Send + Sync>, - /// Cached result after all completions finish - result: OnceLock>, - /// Reference to the group's own Completion for notifying parents - self_completion: OnceLock, -} - -impl GroupCompletion { - pub fn new(complete: F, outstanding: usize) -> Self - where - F: Fn(Result) + Send + Sync + 'static, - { - Self { - inner: Arc::new(GroupCompletionInner { - outstanding: AtomicUsize::new(outstanding), - complete: Box::new(complete), - result: OnceLock::new(), - self_completion: OnceLock::new(), - }), - } - } - - pub fn callback(&self, result: Result) { - assert_eq!( - self.inner.outstanding.load(Ordering::SeqCst), - 0, - "callback called before all completions finished" - ); - (self.inner.complete)(result); - } -} - -impl Debug for CompletionType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Read(..) => f.debug_tuple("Read").finish(), - Self::Write(..) => f.debug_tuple("Write").finish(), - Self::Sync(..) => f.debug_tuple("Sync").finish(), - Self::Truncate(..) => f.debug_tuple("Truncate").finish(), - Self::Group(..) => f.debug_tuple("Group").finish(), - Self::Yield => f.debug_tuple("Yield").finish(), - } - } -} - -pub enum CompletionType { - Read(ReadCompletion), - Write(WriteCompletion), - Sync(SyncCompletion), - Truncate(TruncateCompletion), - Group(GroupCompletion), - Yield, -} - -impl CompletionInner { - fn new(completion_type: CompletionType, needs_link: bool) -> Self { - Self { - completion_type, - result: OnceLock::new(), - needs_link, - context: Context::new(), - parent: OnceLock::new(), - } - } -} - -impl Completion { - pub fn new(completion_type: CompletionType) -> Self { - Self { - inner: Some(Arc::new(CompletionInner::new(completion_type, false))), - } - } - - pub fn new_linked(completion_type: CompletionType) -> Self { - Self { - inner: Some(Arc::new(CompletionInner::new(completion_type, true))), - } - } - - pub(self) fn get_inner(&self) -> &Arc { - self.inner.as_ref().unwrap() - } - - pub fn needs_link(&self) -> bool { - self.get_inner().needs_link - } - - pub fn new_write_linked(complete: F) -> Self - where - F: Fn(Result) + 'static, - { - Self::new_linked(CompletionType::Write(WriteCompletion::new(Box::new( - complete, - )))) - } - - pub fn new_write(complete: F) -> Self - where - F: Fn(Result) + 'static, - { - Self::new(CompletionType::Write(WriteCompletion::new(Box::new( - complete, - )))) - } - - pub fn new_read(buf: Arc, complete: F) -> Self - where - F: Fn(Result<(Arc, i32), CompletionError>) + 'static, - { - Self::new(CompletionType::Read(ReadCompletion::new( - buf, - Box::new(complete), - ))) - } - pub fn new_sync(complete: F) -> Self - where - F: Fn(Result) + 'static, - { - Self::new(CompletionType::Sync(SyncCompletion::new(Box::new( - complete, - )))) - } - - pub fn new_trunc(complete: F) -> Self - where - F: Fn(Result) + 'static, - { - Self::new(CompletionType::Truncate(TruncateCompletion::new(Box::new( - complete, - )))) - } - - /// Create a yield completion. These are completed by default allowing to yield control without - /// allocating memory. - pub fn new_yield() -> Self { - Self { inner: None } - } - - pub fn wake(&self) { - self.get_inner().context.wake(); - } - - pub fn set_waker(&self, waker: &Waker) { - if self.finished() || self.inner.is_none() { - waker.wake_by_ref(); - } else { - self.get_inner().context.set_waker(waker); - } - } - - pub fn succeeded(&self) -> bool { - match &self.inner { - Some(inner) => match &inner.completion_type { - CompletionType::Group(g) => { - g.inner.outstanding.load(Ordering::SeqCst) == 0 - && g.inner.result.get().is_none_or(|e| e.is_none()) - } - _ => inner.result.get().is_some(), - }, - None => true, - } - } - - pub fn failed(&self) -> bool { - match &self.inner { - Some(inner) => inner.result.get().is_some_and(|val| val.is_some()), - None => false, - } - } - - pub fn get_error(&self) -> Option { - match &self.inner { - Some(inner) => { - match &inner.completion_type { - CompletionType::Group(g) => { - // For groups, check the group's cached result field - // (set when the last completion finishes) - g.inner.result.get().and_then(|res| *res) - } - _ => inner.result.get().and_then(|res| *res), - } - } - None => None, - } - } - - /// Checks if the Completion completed or errored - pub fn finished(&self) -> bool { - match &self.inner { - Some(inner) => match &inner.completion_type { - CompletionType::Group(g) => g.inner.outstanding.load(Ordering::SeqCst) == 0, - _ => inner.result.get().is_some(), - }, - None => true, - } - } - - pub fn complete(&self, result: i32) { - let result = Ok(result); - self.callback(result); - } - - pub fn error(&self, err: CompletionError) { - let result = Err(err); - self.callback(result); - } - - pub fn abort(&self) { - self.error(CompletionError::Aborted); - } - - fn callback(&self, result: Result) { - let inner = self.get_inner(); - inner.result.get_or_init(|| { - match &inner.completion_type { - CompletionType::Read(r) => r.callback(result), - CompletionType::Write(w) => w.callback(result), - CompletionType::Sync(s) => s.callback(result), // fix - CompletionType::Truncate(t) => t.callback(result), - CompletionType::Group(g) => g.callback(result), - CompletionType::Yield => {} - }; - - if let Some(group) = inner.parent.get() { - // Capture first error in group - if let Err(err) = result { - let _ = group.result.set(Some(err)); - } - let prev = group.outstanding.fetch_sub(1, Ordering::SeqCst); - - // If this was the last completion in the group, trigger the group's callback - // which will recursively call this same callback() method to notify parents - if prev == 1 { - if let Some(group_completion) = group.self_completion.get() { - let group_result = group.result.get().and_then(|e| *e); - group_completion.callback(group_result.map_or(Ok(0), Err)); - } - } - } - - result.err() - }); - // call the waker regardless - inner.context.wake(); - } - - /// only call this method if you are sure that the completion is - /// a ReadCompletion, panics otherwise - pub fn as_read(&self) -> &ReadCompletion { - let inner = self.get_inner(); - match inner.completion_type { - CompletionType::Read(ref r) => r, - _ => unreachable!(), - } - } - - /// Link this completion to a group completion (internal use only) - fn link_internal(&mut self, group: &Completion) { - let group_inner = match &group.get_inner().completion_type { - CompletionType::Group(g) => &g.inner, - _ => panic!("link_internal() requires a group completion"), - }; - - // Set the parent (can only be set once) - if self.get_inner().parent.set(group_inner.clone()).is_err() { - panic!("completion can only be linked once"); - } - } -} - -pub struct ReadCompletion { - pub buf: Arc, - pub complete: Box, -} - -impl ReadCompletion { - pub fn new(buf: Arc, complete: Box) -> Self { - Self { buf, complete } - } - - pub fn buf(&self) -> &Buffer { - &self.buf - } - - pub fn callback(&self, bytes_read: Result) { - (self.complete)(bytes_read.map(|b| (self.buf.clone(), b))); - } - - pub fn buf_arc(&self) -> Arc { - self.buf.clone() - } -} - -pub struct WriteCompletion { - pub complete: Box, -} - -impl WriteCompletion { - pub fn new(complete: Box) -> Self { - Self { complete } - } - - pub fn callback(&self, bytes_written: Result) { - (self.complete)(bytes_written); - } -} - -pub struct SyncCompletion { - pub complete: Box, -} - -impl SyncCompletion { - pub fn new(complete: Box) -> Self { - Self { complete } - } - - pub fn callback(&self, res: Result) { - (self.complete)(res); - } -} - -pub struct TruncateCompletion { - pub complete: Box, -} - -impl TruncateCompletion { - pub fn new(complete: Box) -> Self { - Self { complete } - } - - pub fn callback(&self, res: Result) { - (self.complete)(res); - } -} - pub type BufferData = Pin>; pub enum Buffer { @@ -803,493 +322,3 @@ impl TempBufferCache { } } } - -cfg_block! { - #[cfg(all(target_os = "linux", feature = "io_uring", not(miri)))] { - mod io_uring; - #[cfg(feature = "fs")] - pub use io_uring::UringIO; - } - - #[cfg(all(target_family = "unix", not(miri)))] { - mod unix; - #[cfg(feature = "fs")] - pub use unix::UnixIO; - pub use unix::UnixIO as PlatformIO; - pub use PlatformIO as SyscallIO; - } - - #[cfg(any(not(any(target_family = "unix", target_os = "android", target_os = "ios")), miri))] { - mod generic; - pub use generic::GenericIO as PlatformIO; - pub use PlatformIO as SyscallIO; - } -} - -mod memory; -#[cfg(feature = "fs")] -mod vfs; -pub use memory::MemoryIO; -pub mod clock; -mod common; -pub use clock::Clock; - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_completion_group_empty() { - use std::sync::atomic::{AtomicBool, Ordering}; - - let callback_called = Arc::new(AtomicBool::new(false)); - let callback_called_clone = callback_called.clone(); - - let group = CompletionGroup::new(move |_| { - callback_called_clone.store(true, Ordering::SeqCst); - }); - let group = group.build(); - assert!(group.finished()); - assert!(group.succeeded()); - assert!(group.get_error().is_none()); - - // Verify the callback was actually called - assert!( - callback_called.load(Ordering::SeqCst), - "callback should be called for empty group" - ); - } - - #[test] - fn test_completion_group_single_completion() { - let mut group = CompletionGroup::new(|_| {}); - let c = Completion::new_write(|_| {}); - group.add(&c); - let group = group.build(); - - assert!(!group.finished()); - assert!(!group.succeeded()); - - c.complete(0); - - assert!(group.finished()); - assert!(group.succeeded()); - assert!(group.get_error().is_none()); - } - - #[test] - fn test_completion_group_multiple_completions() { - let mut group = CompletionGroup::new(|_| {}); - let c1 = Completion::new_write(|_| {}); - let c2 = Completion::new_write(|_| {}); - let c3 = Completion::new_write(|_| {}); - group.add(&c1); - group.add(&c2); - group.add(&c3); - let group = group.build(); - - assert!(!group.succeeded()); - assert!(!group.finished()); - - c1.complete(0); - assert!(!group.succeeded()); - assert!(!group.finished()); - - c2.complete(0); - assert!(!group.succeeded()); - assert!(!group.finished()); - - c3.complete(0); - assert!(group.succeeded()); - assert!(group.finished()); - } - - #[test] - fn test_completion_group_with_error() { - let mut group = CompletionGroup::new(|_| {}); - let c1 = Completion::new_write(|_| {}); - let c2 = Completion::new_write(|_| {}); - group.add(&c1); - group.add(&c2); - let group = group.build(); - - c1.complete(0); - c2.error(CompletionError::Aborted); - - assert!(group.finished()); - assert!(!group.succeeded()); - assert_eq!(group.get_error(), Some(CompletionError::Aborted)); - } - - #[test] - fn test_completion_group_callback() { - use std::sync::atomic::{AtomicBool, Ordering}; - let called = Arc::new(AtomicBool::new(false)); - let called_clone = called.clone(); - - let mut group = CompletionGroup::new(move |_| { - called_clone.store(true, Ordering::SeqCst); - }); - - let c1 = Completion::new_write(|_| {}); - let c2 = Completion::new_write(|_| {}); - group.add(&c1); - group.add(&c2); - let group = group.build(); - - assert!(!called.load(Ordering::SeqCst)); - - c1.complete(0); - assert!(!called.load(Ordering::SeqCst)); - - c2.complete(0); - assert!(called.load(Ordering::SeqCst)); - assert!(group.finished()); - assert!(group.succeeded()); - } - - #[test] - fn test_completion_group_some_already_completed() { - // Test some completions added to group, then finish before build() - let mut group = CompletionGroup::new(|_| {}); - let c1 = Completion::new_write(|_| {}); - let c2 = Completion::new_write(|_| {}); - let c3 = Completion::new_write(|_| {}); - - // Add all to group while pending - group.add(&c1); - group.add(&c2); - group.add(&c3); - - // Complete c1 and c2 AFTER adding but BEFORE build() - c1.complete(0); - c2.complete(0); - - let group = group.build(); - - // c1 and c2 finished before build(), so outstanding should account for them - // Only c3 should be pending - assert!(!group.finished()); - assert!(!group.succeeded()); - - // Complete c3 - c3.complete(0); - - // Now the group should be finished - assert!(group.finished()); - assert!(group.succeeded()); - assert!(group.get_error().is_none()); - } - - #[test] - fn test_completion_group_all_already_completed() { - // Test when all completions are already finished before build() - let mut group = CompletionGroup::new(|_| {}); - let c1 = Completion::new_write(|_| {}); - let c2 = Completion::new_write(|_| {}); - - // Complete both before adding to group - c1.complete(0); - c2.complete(0); - - group.add(&c1); - group.add(&c2); - - let group = group.build(); - - // All completions were already complete, so group should be finished immediately - assert!(group.finished()); - assert!(group.succeeded()); - assert!(group.get_error().is_none()); - } - - #[test] - fn test_completion_group_mixed_finished_and_pending() { - use std::sync::atomic::{AtomicBool, Ordering}; - let called = Arc::new(AtomicBool::new(false)); - let called_clone = called.clone(); - - let mut group = CompletionGroup::new(move |_| { - called_clone.store(true, Ordering::SeqCst); - }); - - let c1 = Completion::new_write(|_| {}); - let c2 = Completion::new_write(|_| {}); - let c3 = Completion::new_write(|_| {}); - let c4 = Completion::new_write(|_| {}); - - // Complete c1 and c3 before adding to group - c1.complete(0); - c3.complete(0); - - group.add(&c1); - group.add(&c2); - group.add(&c3); - group.add(&c4); - - let group = group.build(); - - // Only c2 and c4 should be pending - assert!(!group.finished()); - assert!(!called.load(Ordering::SeqCst)); - - c2.complete(0); - assert!(!group.finished()); - assert!(!called.load(Ordering::SeqCst)); - - c4.complete(0); - assert!(group.finished()); - assert!(group.succeeded()); - assert!(called.load(Ordering::SeqCst)); - } - - #[test] - fn test_completion_group_already_completed_with_error() { - // Test when a completion finishes with error before build() - let mut group = CompletionGroup::new(|_| {}); - let c1 = Completion::new_write(|_| {}); - let c2 = Completion::new_write(|_| {}); - - // Complete c1 with error before adding to group - c1.error(CompletionError::Aborted); - - group.add(&c1); - group.add(&c2); - - let group = group.build(); - - // Group should immediately fail with the error - assert!(group.finished()); - assert!(!group.succeeded()); - assert_eq!(group.get_error(), Some(CompletionError::Aborted)); - } - - #[test] - fn test_completion_group_tracks_all_completions() { - // This test verifies the fix for the bug where CompletionGroup::add() - // would skip successfully-finished completions. This caused problems - // when code used drain() to move completions into a group, because - // finished completions would be removed from the source but not tracked - // by the group, effectively losing them. - use std::sync::atomic::{AtomicUsize, Ordering}; - - let callback_count = Arc::new(AtomicUsize::new(0)); - let callback_count_clone = callback_count.clone(); - - // Simulate the pattern: create multiple completions, complete some, - // then add ALL of them to a group (like drain() would do) - let mut completions = Vec::new(); - - // Create 4 completions - for _ in 0..4 { - completions.push(Completion::new_write(|_| {})); - } - - // Complete 2 of them before adding to group (simulate async completion) - completions[0].complete(0); - completions[2].complete(0); - - // Now create a group and add ALL completions (like drain() would do) - let mut group = CompletionGroup::new(move |_| { - callback_count_clone.fetch_add(1, Ordering::SeqCst); - }); - - // Add all completions to the group - for c in &completions { - group.add(c); - } - - let group = group.build(); - - // The group should track all 4 completions: - // - c[0] and c[2] are already finished - // - c[1] and c[3] are still pending - // So the group should not be finished yet - assert!(!group.finished()); - assert_eq!(callback_count.load(Ordering::SeqCst), 0); - - // Complete the first pending completion - completions[1].complete(0); - assert!(!group.finished()); - assert_eq!(callback_count.load(Ordering::SeqCst), 0); - - // Complete the last pending completion - now group should finish - completions[3].complete(0); - assert!(group.finished()); - assert!(group.succeeded()); - assert_eq!(callback_count.load(Ordering::SeqCst), 1); - - // Verify no errors - assert!(group.get_error().is_none()); - } - - #[test] - fn test_completion_group_with_all_finished_successfully() { - // Edge case: all completions are already successfully finished - // when added to the group. The group should complete immediately. - use std::sync::atomic::{AtomicBool, Ordering}; - - let callback_called = Arc::new(AtomicBool::new(false)); - let callback_called_clone = callback_called.clone(); - - let mut completions = Vec::new(); - - // Create and immediately complete 3 completions - for _ in 0..3 { - let c = Completion::new_write(|_| {}); - c.complete(0); - completions.push(c); - } - - // Add all already-completed completions to group - let mut group = CompletionGroup::new(move |_| { - callback_called_clone.store(true, Ordering::SeqCst); - }); - - for c in &completions { - group.add(c); - } - - let group = group.build(); - - // Group should be immediately finished since all completions were done - assert!(group.finished()); - assert!(group.succeeded()); - assert!(callback_called.load(Ordering::SeqCst)); - assert!(group.get_error().is_none()); - } - - #[test] - fn test_completion_group_nested() { - use std::sync::atomic::{AtomicUsize, Ordering}; - - // Track callbacks at different levels - let parent_called = Arc::new(AtomicUsize::new(0)); - let child1_called = Arc::new(AtomicUsize::new(0)); - let child2_called = Arc::new(AtomicUsize::new(0)); - - // Create child group 1 with 2 completions - let child1_called_clone = child1_called.clone(); - let mut child_group1 = CompletionGroup::new(move |_| { - child1_called_clone.fetch_add(1, Ordering::SeqCst); - }); - let c1 = Completion::new_write(|_| {}); - let c2 = Completion::new_write(|_| {}); - child_group1.add(&c1); - child_group1.add(&c2); - let child_group1 = child_group1.build(); - - // Create child group 2 with 2 completions - let child2_called_clone = child2_called.clone(); - let mut child_group2 = CompletionGroup::new(move |_| { - child2_called_clone.fetch_add(1, Ordering::SeqCst); - }); - let c3 = Completion::new_write(|_| {}); - let c4 = Completion::new_write(|_| {}); - child_group2.add(&c3); - child_group2.add(&c4); - let child_group2 = child_group2.build(); - - // Create parent group containing both child groups - let parent_called_clone = parent_called.clone(); - let mut parent_group = CompletionGroup::new(move |_| { - parent_called_clone.fetch_add(1, Ordering::SeqCst); - }); - parent_group.add(&child_group1); - parent_group.add(&child_group2); - let parent_group = parent_group.build(); - - // Initially nothing should be finished - assert!(!parent_group.finished()); - assert!(!child_group1.finished()); - assert!(!child_group2.finished()); - assert_eq!(parent_called.load(Ordering::SeqCst), 0); - assert_eq!(child1_called.load(Ordering::SeqCst), 0); - assert_eq!(child2_called.load(Ordering::SeqCst), 0); - - // Complete first completion in child group 1 - c1.complete(0); - assert!(!child_group1.finished()); - assert!(!parent_group.finished()); - assert_eq!(child1_called.load(Ordering::SeqCst), 0); - assert_eq!(parent_called.load(Ordering::SeqCst), 0); - - // Complete second completion in child group 1 - should finish child group 1 - c2.complete(0); - assert!(child_group1.finished()); - assert!(child_group1.succeeded()); - assert_eq!(child1_called.load(Ordering::SeqCst), 1); - - // Parent should not be finished yet because child group 2 is still pending - assert!(!parent_group.finished()); - assert_eq!(parent_called.load(Ordering::SeqCst), 0); - - // Complete first completion in child group 2 - c3.complete(0); - assert!(!child_group2.finished()); - assert!(!parent_group.finished()); - assert_eq!(child2_called.load(Ordering::SeqCst), 0); - assert_eq!(parent_called.load(Ordering::SeqCst), 0); - - // Complete second completion in child group 2 - should finish everything - c4.complete(0); - assert!(child_group2.finished()); - assert!(child_group2.succeeded()); - assert_eq!(child2_called.load(Ordering::SeqCst), 1); - - // Parent should now be finished - assert!(parent_group.finished()); - assert!(parent_group.succeeded()); - assert_eq!(parent_called.load(Ordering::SeqCst), 1); - assert!(parent_group.get_error().is_none()); - } - - #[test] - fn test_completion_group_nested_with_error() { - use std::sync::atomic::{AtomicBool, Ordering}; - - let parent_called = Arc::new(AtomicBool::new(false)); - let child_called = Arc::new(AtomicBool::new(false)); - - // Create child group with 2 completions - let child_called_clone = child_called.clone(); - let mut child_group = CompletionGroup::new(move |_| { - child_called_clone.store(true, Ordering::SeqCst); - }); - let c1 = Completion::new_write(|_| {}); - let c2 = Completion::new_write(|_| {}); - child_group.add(&c1); - child_group.add(&c2); - let child_group = child_group.build(); - - // Create parent group containing child group and another completion - let parent_called_clone = parent_called.clone(); - let mut parent_group = CompletionGroup::new(move |_| { - parent_called_clone.store(true, Ordering::SeqCst); - }); - let c3 = Completion::new_write(|_| {}); - parent_group.add(&child_group); - parent_group.add(&c3); - let parent_group = parent_group.build(); - - // Complete child group with success - c1.complete(0); - c2.complete(0); - assert!(child_group.finished()); - assert!(child_group.succeeded()); - assert!(child_called.load(Ordering::SeqCst)); - - // Parent still pending - assert!(!parent_group.finished()); - assert!(!parent_called.load(Ordering::SeqCst)); - - // Complete c3 with error - c3.error(CompletionError::Aborted); - - // Parent should finish with error - assert!(parent_group.finished()); - assert!(!parent_group.succeeded()); - assert_eq!(parent_group.get_error(), Some(CompletionError::Aborted)); - assert!(parent_called.load(Ordering::SeqCst)); - } -} From 00e382a7c75c4cbfde8d1089bdd7eb34da27c398 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 21 Oct 2025 18:30:08 +0400 Subject: [PATCH 343/428] avoid unnecessary allocations --- core/storage/buffer_pool.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/storage/buffer_pool.rs b/core/storage/buffer_pool.rs index dbd855d59..6bac73f71 100644 --- a/core/storage/buffer_pool.rs +++ b/core/storage/buffer_pool.rs @@ -239,13 +239,13 @@ impl PoolInner { .wal_frame_arena .as_ref() .and_then(|wal_arena| Arena::try_alloc(wal_arena, len)) - .unwrap_or(Buffer::new_temporary(len)); + .unwrap_or_else(|| Buffer::new_temporary(len)); } // For all other sizes, use regular arena self.page_arena .as_ref() .and_then(|arena| Arena::try_alloc(arena, len)) - .unwrap_or(Buffer::new_temporary(len)) + .unwrap_or_else(|| Buffer::new_temporary(len)) } fn get_db_page_buffer(&mut self) -> Buffer { @@ -253,7 +253,7 @@ impl PoolInner { self.page_arena .as_ref() .and_then(|arena| Arena::try_alloc(arena, db_page_size)) - .unwrap_or(Buffer::new_temporary(db_page_size)) + .unwrap_or_else(|| Buffer::new_temporary(db_page_size)) } fn get_wal_frame_buffer(&mut self) -> Buffer { @@ -261,7 +261,7 @@ impl PoolInner { self.wal_frame_arena .as_ref() .and_then(|wal_arena| Arena::try_alloc(wal_arena, len)) - .unwrap_or(Buffer::new_temporary(len)) + .unwrap_or_else(|| Buffer::new_temporary(len)) } /// Allocate a new arena for the pool to use From ea04e9033a2c91b0c749ffaccfc85c153d52541e Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 21 Oct 2025 13:38:00 +0200 Subject: [PATCH 344/428] core/mvcc: add btree_cursor under MVCC cursor --- core/incremental/compiler.rs | 19 +- core/incremental/cursor.rs | 14 +- core/incremental/operator.rs | 195 +++++++---------- core/mvcc/cursor.rs | 23 +- .../mvcc/database/checkpoint_state_machine.rs | 9 +- core/mvcc/database/mod.rs | 1 - core/mvcc/database/tests.rs | 5 + core/schema.rs | 2 +- core/storage/btree.rs | 202 ++++-------------- core/vdbe/execute.rs | 154 +++++++------ 10 files changed, 246 insertions(+), 378 deletions(-) diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index 98cc29896..a319bb9ad 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -490,15 +490,10 @@ impl DbspCircuit { ) -> Result> { if let Some(root_id) = self.root { // Create temporary cursors for execute (non-commit) operations - let table_cursor = BTreeCursor::new_table( - None, - pager.clone(), - self.internal_state_root, - OPERATOR_COLUMNS, - ); + let table_cursor = + BTreeCursor::new_table(pager.clone(), self.internal_state_root, OPERATOR_COLUMNS); let index_def = create_dbsp_state_index(self.internal_state_index_root); let index_cursor = BTreeCursor::new_index( - None, pager.clone(), self.internal_state_index_root, &index_def, @@ -547,14 +542,12 @@ impl DbspCircuit { CommitState::Init => { // Create state cursors when entering CommitOperators state let state_table_cursor = BTreeCursor::new_table( - None, pager.clone(), self.internal_state_root, OPERATOR_COLUMNS, ); let index_def = create_dbsp_state_index(self.internal_state_index_root); let state_index_cursor = BTreeCursor::new_index( - None, pager.clone(), self.internal_state_index_root, &index_def, @@ -585,7 +578,6 @@ impl DbspCircuit { // Create view cursor when entering UpdateView state let view_cursor = Box::new(BTreeCursor::new_table( - None, pager.clone(), main_data_root, num_columns, @@ -615,7 +607,6 @@ impl DbspCircuit { // due to btree cursor state machine limitations if matches!(write_row_state, WriteRowView::GetRecord) { *view_cursor = Box::new(BTreeCursor::new_table( - None, pager.clone(), main_data_root, num_columns, @@ -643,7 +634,6 @@ impl DbspCircuit { let view_cursor = std::mem::replace( view_cursor, Box::new(BTreeCursor::new_table( - None, pager.clone(), main_data_root, num_columns, @@ -739,14 +729,12 @@ impl DbspCircuit { // Create temporary cursors for the recursive call let temp_table_cursor = BTreeCursor::new_table( - None, pager.clone(), self.internal_state_root, OPERATOR_COLUMNS, ); let index_def = create_dbsp_state_index(self.internal_state_index_root); let temp_index_cursor = BTreeCursor::new_index( - None, pager.clone(), self.internal_state_index_root, &index_def, @@ -2774,8 +2762,7 @@ mod tests { let num_columns = circuit.output_schema.columns.len() + 1; // Create a cursor to read the btree - let mut btree_cursor = - BTreeCursor::new_table(None, pager.clone(), main_data_root, num_columns); + let mut btree_cursor = BTreeCursor::new_table(pager.clone(), main_data_root, num_columns); // Rewind to the beginning pager.io.block(|| btree_cursor.rewind())?; diff --git a/core/incremental/cursor.rs b/core/incremental/cursor.rs index a33734c72..12e3c49c6 100644 --- a/core/incremental/cursor.rs +++ b/core/incremental/cursor.rs @@ -5,7 +5,7 @@ use crate::{ view::{IncrementalView, ViewTransactionState}, }, return_if_io, - storage::btree::{BTreeCursor, CursorTrait}, + storage::btree::CursorTrait, types::{IOResult, SeekKey, SeekOp, SeekResult, Value}, LimboError, Pager, Result, }; @@ -35,7 +35,7 @@ enum SeekState { /// and overlays transaction changes as needed. pub struct MaterializedViewCursor { // Core components - btree_cursor: Box, + btree_cursor: Box, view: Arc>, pager: Arc, @@ -62,7 +62,7 @@ pub struct MaterializedViewCursor { impl MaterializedViewCursor { pub fn new( - btree_cursor: Box, + btree_cursor: Box, view: Arc>, pager: Arc, tx_state: Arc, @@ -296,6 +296,7 @@ impl MaterializedViewCursor { #[cfg(test)] mod tests { use super::*; + use crate::storage::btree::BTreeCursor; use crate::util::IOExt; use crate::{Connection, Database, OpenFlags}; use std::sync::Arc; @@ -359,12 +360,7 @@ mod tests { // Create a btree cursor let pager = conn.get_pager(); - let btree_cursor = Box::new(BTreeCursor::new( - None, // No MvCursor - pager.clone(), - root_page, - num_columns, - )); + let btree_cursor = Box::new(BTreeCursor::new(pager.clone(), root_page, num_columns)); // Get or create transaction state for this view let tx_state = conn.view_transaction_states.get_or_create("test_view"); diff --git a/core/incremental/operator.rs b/core/incremental/operator.rs index 70ab72d74..613497e8d 100644 --- a/core/incremental/operator.rs +++ b/core/incremental/operator.rs @@ -393,12 +393,11 @@ mod tests { // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); // Create an aggregate operator for SUM(age) with no GROUP BY @@ -513,12 +512,11 @@ mod tests { // Create an aggregate operator for SUM(score) GROUP BY team // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -664,12 +662,11 @@ mod tests { // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); // Create COUNT(*) GROUP BY category @@ -745,12 +742,11 @@ mod tests { // Create SUM(amount) GROUP BY product // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -842,12 +838,11 @@ mod tests { // Test the example from DBSP_ROADMAP: COUNT(*) and SUM(amount) GROUP BY user_id // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -934,12 +929,11 @@ mod tests { // Test AVG aggregation // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -1034,12 +1028,11 @@ mod tests { // Test that deletes (negative weights) properly update aggregates // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -1121,12 +1114,11 @@ mod tests { // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -1211,12 +1203,11 @@ mod tests { // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -1295,12 +1286,11 @@ mod tests { // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -1364,12 +1354,11 @@ mod tests { // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -1450,12 +1439,11 @@ mod tests { // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut filter = FilterOperator::new(FilterPredicate::GreaterThan { @@ -1509,12 +1497,11 @@ mod tests { fn test_filter_eval_with_uncommitted() { // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut filter = FilterOperator::new(FilterPredicate::GreaterThan { @@ -1600,12 +1587,11 @@ mod tests { // This is the critical test - aggregations must not modify internal state during eval // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -1770,12 +1756,11 @@ mod tests { // doesn't pollute the internal state // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -1852,12 +1837,11 @@ mod tests { // Test eval with both committed delta and uncommitted changes // Create a persistent pager for the test let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); // Create index cursor with proper index definition for DBSP state table let index_def = create_dbsp_state_index(index_root_page_id); // Index has 4 columns: operator_id, zset_id, element_id, rowid - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -1968,10 +1952,9 @@ mod tests { fn test_min_max_basic() { // Test basic MIN/MAX functionality let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2036,10 +2019,9 @@ mod tests { fn test_min_max_deletion_updates_min() { // Test that deleting the MIN value updates to the next lowest let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2126,10 +2108,9 @@ mod tests { fn test_min_max_deletion_updates_max() { // Test that deleting the MAX value updates to the next highest let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2216,10 +2197,9 @@ mod tests { fn test_min_max_insertion_updates_min() { // Test that inserting a new MIN value updates the aggregate let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2298,10 +2278,9 @@ mod tests { fn test_min_max_insertion_updates_max() { // Test that inserting a new MAX value updates the aggregate let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2380,10 +2359,9 @@ mod tests { fn test_min_max_update_changes_min() { // Test that updating a row to become the new MIN updates the aggregate let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2470,10 +2448,9 @@ mod tests { fn test_min_max_with_group_by() { // Test MIN/MAX with GROUP BY let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2572,10 +2549,9 @@ mod tests { fn test_min_max_with_nulls() { // Test that NULL values are ignored in MIN/MAX let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2648,10 +2624,9 @@ mod tests { fn test_min_max_integer_values() { // Test MIN/MAX with integer values instead of floats let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2716,10 +2691,9 @@ mod tests { fn test_min_max_text_values() { // Test MIN/MAX with text values (alphabetical ordering) let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2755,10 +2729,9 @@ mod tests { #[test] fn test_min_max_with_other_aggregates() { let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2847,10 +2820,9 @@ mod tests { #[test] fn test_min_max_multiple_columns() { let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut agg = AggregateOperator::new( @@ -2926,10 +2898,9 @@ mod tests { fn test_join_operator_inner() { // Test INNER JOIN with incremental updates let (pager, table_page_id, index_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_page_id, 10); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_page_id, 10); let index_def = create_dbsp_state_index(index_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_page_id, &index_def, 10); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_page_id, &index_def, 10); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut join = JoinOperator::new( 1, // operator_id @@ -3023,10 +2994,9 @@ mod tests { fn test_join_operator_with_deletions() { // Test INNER JOIN with deletions (negative weights) let (pager, table_page_id, index_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_page_id, 10); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_page_id, 10); let index_def = create_dbsp_state_index(index_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_page_id, &index_def, 10); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_page_id, &index_def, 10); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut join = JoinOperator::new( @@ -3114,10 +3084,9 @@ mod tests { fn test_join_operator_one_to_many() { // Test one-to-many relationship: one customer with multiple orders let (pager, table_page_id, index_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_page_id, 10); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_page_id, 10); let index_def = create_dbsp_state_index(index_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_page_id, &index_def, 10); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_page_id, &index_def, 10); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut join = JoinOperator::new( @@ -3251,10 +3220,9 @@ mod tests { fn test_join_operator_many_to_many() { // Test many-to-many: multiple rows with same key on both sides let (pager, table_page_id, index_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_page_id, 10); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_page_id, 10); let index_def = create_dbsp_state_index(index_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_page_id, &index_def, 10); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_page_id, &index_def, 10); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut join = JoinOperator::new( @@ -3368,10 +3336,9 @@ mod tests { fn test_join_operator_update_in_one_to_many() { // Test updates in one-to-many scenarios let (pager, table_page_id, index_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_page_id, 10); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_page_id, 10); let index_def = create_dbsp_state_index(index_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_page_id, &index_def, 10); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_page_id, &index_def, 10); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut join = JoinOperator::new( @@ -3491,10 +3458,9 @@ mod tests { fn test_join_operator_weight_accumulation_complex() { // Test complex weight accumulation with multiple identical rows let (pager, table_page_id, index_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_page_id, 10); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_page_id, 10); let index_def = create_dbsp_state_index(index_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_page_id, &index_def, 10); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_page_id, &index_def, 10); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut join = JoinOperator::new( @@ -3627,9 +3593,9 @@ mod tests { let mut state = EvalState::Init { deltas: delta_pair }; let (pager, table_root, index_root) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root, 5); let index_def = create_dbsp_state_index(index_root); - let index_cursor = BTreeCursor::new_index(None, pager.clone(), index_root, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let result = pager @@ -3687,10 +3653,10 @@ mod tests { #[test] fn test_merge_operator_basic() { let (_pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, _pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(_pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); let index_cursor = - BTreeCursor::new_index(None, _pager.clone(), index_root_page_id, &index_def, 4); + BTreeCursor::new_index(_pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); let mut merge_op = MergeOperator::new( @@ -3748,10 +3714,10 @@ mod tests { #[test] fn test_merge_operator_stateful_distinct() { let (_pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, _pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(_pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); let index_cursor = - BTreeCursor::new_index(None, _pager.clone(), index_root_page_id, &index_def, 4); + BTreeCursor::new_index(_pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); // Test that UNION (distinct) properly deduplicates across multiple operations @@ -3822,10 +3788,10 @@ mod tests { #[test] fn test_merge_operator_single_sided_inputs_union_all() { let (_pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, _pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(_pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); let index_cursor = - BTreeCursor::new_index(None, _pager.clone(), index_root_page_id, &index_def, 4); + BTreeCursor::new_index(_pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); // Test UNION ALL with inputs coming from only one side at a time @@ -3942,10 +3908,10 @@ mod tests { #[test] fn test_merge_operator_both_sides_empty() { let (_pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, _pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(_pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); let index_cursor = - BTreeCursor::new_index(None, _pager.clone(), index_root_page_id, &index_def, 4); + BTreeCursor::new_index(_pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); // Test that both sides being empty works correctly @@ -4022,10 +3988,9 @@ mod tests { // Test that aggregate state serialization correctly preserves column indices // when multiple aggregates operate on different columns let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); - let table_cursor = BTreeCursor::new_table(None, pager.clone(), table_root_page_id, 5); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); let index_def = create_dbsp_state_index(index_root_page_id); - let index_cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page_id, &index_def, 4); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); // Create first operator with SUM(col1), MIN(col3) GROUP BY col0 diff --git a/core/mvcc/cursor.rs b/core/mvcc/cursor.rs index f177181df..20a012cc3 100644 --- a/core/mvcc/cursor.rs +++ b/core/mvcc/cursor.rs @@ -26,6 +26,7 @@ pub struct MvccLazyCursor { tx_id: u64, /// Reusable immutable record, used to allow better allocation strategy. reusable_immutable_record: RefCell>, + _btree_cursor: Box, } impl MvccLazyCursor { @@ -34,17 +35,21 @@ impl MvccLazyCursor { tx_id: u64, root_page_or_table_id: i64, pager: Arc, + btree_cursor: Box, ) -> Result> { + assert!( + (&*btree_cursor as &dyn Any).is::(), + "BTreeCursor expected for mvcc cursor" + ); let table_id = db.get_table_id_from_root_page(root_page_or_table_id); db.maybe_initialize_table(table_id, pager)?; - let cursor = Self { + Ok(Self { db, tx_id, current_pos: RefCell::new(CursorPosition::BeforeFirst), table_id, reusable_immutable_record: RefCell::new(None), - }; - Ok(cursor) + _btree_cursor: btree_cursor, } pub fn current_row(&self) -> Result> { @@ -375,8 +380,16 @@ impl CursorTrait for MvccLazyCursor { fn get_skip_advance(&self) -> bool { todo!() } +} - fn get_mvcc_cursor(&self) -> Arc> { - todo!() +impl Debug for MvccLazyCursor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("MvccLazyCursor") + .field("current_pos", &self.current_pos) + .field("table_id", &self.table_id) + .field("tx_id", &self.tx_id) + .field("reusable_immutable_record", &self.reusable_immutable_record) + .field("btree_cursor", &()) + .finish() } } diff --git a/core/mvcc/database/checkpoint_state_machine.rs b/core/mvcc/database/checkpoint_state_machine.rs index 778431adc..8c3e109e3 100644 --- a/core/mvcc/database/checkpoint_state_machine.rs +++ b/core/mvcc/database/checkpoint_state_machine.rs @@ -390,7 +390,6 @@ impl CheckpointStateMachine { cursor.clone() } else { let cursor = BTreeCursor::new_table( - None, self.pager.clone(), known_root_page as i64, num_columns, @@ -465,12 +464,8 @@ impl CheckpointStateMachine { let cursor = if let Some(cursor) = self.cursors.get(&root_page) { cursor.clone() } else { - let cursor = BTreeCursor::new_table( - None, // Write directly to B-tree - self.pager.clone(), - root_page as i64, - num_columns, - ); + let cursor = + BTreeCursor::new_table(self.pager.clone(), root_page as i64, num_columns); let cursor = Arc::new(RwLock::new(cursor)); self.cursors.insert(root_page, cursor.clone()); cursor diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index f1852ebfd..9675ead3c 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -1884,7 +1884,6 @@ impl MvStore { .value() .unwrap_or_else(|| panic!("Table ID does not have a root page: {table_id}")); let mut cursor = BTreeCursor::new_table( - None, // No MVCC cursor for scanning pager.clone(), root_page as i64, 1, // We'll adjust this as needed diff --git a/core/mvcc/database/tests.rs b/core/mvcc/database/tests.rs index 9198c5825..206636f02 100644 --- a/core/mvcc/database/tests.rs +++ b/core/mvcc/database/tests.rs @@ -830,6 +830,7 @@ fn test_lazy_scan_cursor_basic() { tx_id, table_id, db.conn.pager.read().clone(), + Box::new(BTreeCursor::new(db.conn.pager.read().clone(), table_id, 1)), ) .unwrap(); @@ -872,6 +873,7 @@ fn test_lazy_scan_cursor_with_gaps() { tx_id, table_id, db.conn.pager.read().clone(), + Box::new(BTreeCursor::new(db.conn.pager.read().clone(), table_id, 1)), ) .unwrap(); @@ -923,6 +925,7 @@ fn test_cursor_basic() { tx_id, table_id, db.conn.pager.read().clone(), + Box::new(BTreeCursor::new(db.conn.pager.read().clone(), table_id, 1)), ) .unwrap(); @@ -977,6 +980,7 @@ fn test_cursor_with_empty_table() { tx_id, table_id, db.conn.pager.read().clone(), + Box::new(BTreeCursor::new(db.conn.pager.read().clone(), table_id, 1)), ) .unwrap(); assert!(cursor.is_empty()); @@ -994,6 +998,7 @@ fn test_cursor_modification_during_scan() { tx_id, table_id, db.conn.pager.read().clone(), + Box::new(BTreeCursor::new(db.conn.pager.read().clone(), table_id, 1)), ) .unwrap(); diff --git a/core/schema.rs b/core/schema.rs index ce30d3950..67e6d2061 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -414,7 +414,7 @@ impl Schema { mv_cursor.is_none(), "mvcc not yet supported for make_from_btree" ); - let mut cursor = BTreeCursor::new_table(mv_cursor, Arc::clone(&pager), 1, 10); + let mut cursor = BTreeCursor::new_table(Arc::clone(&pager), 1, 10); let mut from_sql_indexes = Vec::with_capacity(10); let mut automatic_indices: HashMap> = HashMap::with_capacity(10); diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 65659c4b3..6d615f611 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -24,7 +24,7 @@ use crate::{ RecordCursor, SeekResult, }, util::IOExt, - Completion, MvCursor, Page, + Completion, Page, }; use crate::{ @@ -39,7 +39,6 @@ use super::{ write_varint_to_vec, IndexInteriorCell, IndexLeafCell, OverflowCell, MINIMUM_CELL_SIZE, }, }; -use parking_lot::RwLock; use std::{ any::Any, cell::{Cell, Ref, RefCell}, @@ -556,15 +555,10 @@ pub trait CursorTrait: Any { fn record_cursor_mut(&self) -> std::cell::RefMut<'_, RecordCursor>; fn get_pager(&self) -> Arc; fn get_skip_advance(&self) -> bool; - - // FIXME: remove once we implement trait for mvcc - fn get_mvcc_cursor(&self) -> Arc>; // --- end: BTreeCursor specific functions ---- } pub struct BTreeCursor { - /// The multi-version cursor that is used to read and write to the database file. - mv_cursor: Option>>, /// The pager that is used to read and write to the database file. pub pager: Arc, /// Cached value of the usable space of a BTree page, since it is very expensive to call in a hot loop via pager.usable_space(). @@ -667,12 +661,7 @@ impl BTreeNodeState { } impl BTreeCursor { - pub fn new( - mv_cursor: Option>>, - pager: Arc, - root_page: i64, - num_columns: usize, - ) -> Self { + pub fn new(pager: Arc, root_page: i64, num_columns: usize) -> Self { let valid_state = if root_page == 1 && !pager.db_state.is_initialized() { CursorValidState::Invalid } else { @@ -680,7 +669,6 @@ impl BTreeCursor { }; let usable_space = pager.usable_space(); Self { - mv_cursor, pager, root_page, usable_space_cached: usable_space, @@ -718,23 +706,12 @@ impl BTreeCursor { } } - pub fn new_table( - mv_cursor: Option>>, - pager: Arc, - root_page: i64, - num_columns: usize, - ) -> Self { - Self::new(mv_cursor, pager, root_page, num_columns) + pub fn new_table(pager: Arc, root_page: i64, num_columns: usize) -> Self { + Self::new(pager, root_page, num_columns) } - pub fn new_index( - mv_cursor: Option>>, - pager: Arc, - root_page: i64, - index: &Index, - num_columns: usize, - ) -> Self { - let mut cursor = Self::new(mv_cursor, pager, root_page, num_columns); + pub fn new_index(pager: Arc, root_page: i64, index: &Index, num_columns: usize) -> Self { + let mut cursor = Self::new(pager, root_page, num_columns); cursor.index_info = Some(IndexInfo::new_from_index(index)); cursor } @@ -767,10 +744,6 @@ impl BTreeCursor { let state = self.is_empty_table_state.borrow().clone(); match state { EmptyTableState::Start => { - if let Some(mv_cursor) = &self.mv_cursor { - let mv_cursor = mv_cursor.read(); - return Ok(IOResult::Done(mv_cursor.is_empty())); - } let (page, c) = self.pager.read_page(self.root_page)?; *self.is_empty_table_state.borrow_mut() = EmptyTableState::ReadPage { page }; if let Some(c) = c { @@ -1296,19 +1269,7 @@ impl BTreeCursor { /// Used in forwards iteration, which is the default. #[instrument(skip(self), level = Level::DEBUG, name = "next")] pub fn get_next_record(&mut self) -> Result> { - if let Some(mv_cursor) = &self.mv_cursor { - let mut mv_cursor = mv_cursor.write(); - assert!(matches!(mv_cursor.next()?, IOResult::Done(_))); - let IOResult::Done(rowid) = mv_cursor.rowid()? else { - todo!() - }; - match rowid { - Some(_rowid) => { - return Ok(IOResult::Done(true)); - } - None => return Ok(IOResult::Done(false)), - } - } else if self.stack.current_page == -1 { + if self.stack.current_page == -1 { // This can happen in nested left joins. See: // https://github.com/tursodatabase/turso/issues/2924 return Ok(IOResult::Done(false)); @@ -1883,10 +1844,6 @@ impl BTreeCursor { /// of iterating cells in order. #[instrument(skip_all, level = Level::DEBUG)] fn tablebtree_seek(&mut self, rowid: i64, seek_op: SeekOp) -> Result> { - turso_assert!( - self.mv_cursor.is_none(), - "attempting to seek with MV cursor" - ); let iter_dir = seek_op.iteration_direction(); if matches!( @@ -2239,10 +2196,6 @@ impl BTreeCursor { #[instrument(skip_all, level = Level::DEBUG)] pub fn move_to(&mut self, key: SeekKey<'_>, cmp: SeekOp) -> Result> { - turso_assert!( - self.mv_cursor.is_none(), - "attempting to move with MV cursor" - ); tracing::trace!(?key, ?cmp); // For a table with N rows, we can find any row by row id in O(log(N)) time by starting at the root page and following the B-tree pointers. // B-trees consist of interior pages and leaf pages. Interior pages contain pointers to other pages, while leaf pages contain the actual row data. @@ -4957,7 +4910,6 @@ impl CursorTrait for BTreeCursor { #[instrument(skip_all, level = Level::DEBUG)] fn last(&mut self) -> Result> { - assert!(self.mv_cursor.is_none()); let cursor_has_record = return_if_io!(self.move_to_rightmost()); self.has_record.replace(cursor_has_record); self.invalidate_record(); @@ -4966,7 +4918,6 @@ impl CursorTrait for BTreeCursor { #[instrument(skip_all, level = Level::DEBUG)] fn prev(&mut self) -> Result> { - assert!(self.mv_cursor.is_none()); loop { match self.advance_state { AdvanceState::Start => { @@ -4985,16 +4936,6 @@ impl CursorTrait for BTreeCursor { #[instrument(skip(self), level = Level::DEBUG)] fn rowid(&self) -> Result>> { - if let Some(mv_cursor) = &self.mv_cursor { - let mv_cursor = mv_cursor.write(); - let IOResult::Done(rowid) = mv_cursor.rowid()? else { - todo!(); - }; - let Some(rowid) = rowid else { - return Ok(IOResult::Done(None)); - }; - return Ok(IOResult::Done(Some(rowid))); - } if self.get_null_flag() { return Ok(IOResult::Done(None)); } @@ -5017,10 +4958,6 @@ impl CursorTrait for BTreeCursor { #[instrument(skip(self, key), level = Level::DEBUG)] fn seek(&mut self, key: SeekKey<'_>, op: SeekOp) -> Result> { - if let Some(mv_cursor) = &self.mv_cursor { - let mut mv_cursor = mv_cursor.write(); - return mv_cursor.seek(key, op); - } self.skip_advance.set(false); // Empty trace to capture the span information tracing::trace!(""); @@ -5038,7 +4975,7 @@ impl CursorTrait for BTreeCursor { #[instrument(skip(self), level = Level::DEBUG)] fn record(&self) -> Result>>> { - if !self.has_record.get() && self.mv_cursor.is_none() { + if !self.has_record.get() { return Ok(IOResult::Done(None)); } let invalidated = self @@ -5052,25 +4989,6 @@ impl CursorTrait for BTreeCursor { .unwrap(); return Ok(IOResult::Done(Some(record_ref))); } - if let Some(mv_cursor) = &self.mv_cursor { - let mv_cursor = mv_cursor.write(); - let Some(row) = mv_cursor.current_row()? else { - return Ok(IOResult::Done(None)); - }; - self.get_immutable_record_or_create() - .as_mut() - .unwrap() - .invalidate(); - self.get_immutable_record_or_create() - .as_mut() - .unwrap() - .start_serialization(&row.data); - self.record_cursor.borrow_mut().invalidate(); - let record_ref = - Ref::filter_map(self.reusable_immutable_record.borrow(), |opt| opt.as_ref()) - .unwrap(); - return Ok(IOResult::Done(Some(record_ref))); - } let page = self.stack.top_ref(); let contents = page.get_contents(); @@ -5118,17 +5036,10 @@ impl CursorTrait for BTreeCursor { #[instrument(skip_all, level = Level::DEBUG)] fn insert(&mut self, key: &BTreeKey) -> Result> { tracing::debug!(valid_state = ?self.valid_state, cursor_state = ?self.state, is_write_in_progress = self.is_write_in_progress()); - match &self.mv_cursor { - Some(mv_cursor) => { - return_if_io!(mv_cursor.write().insert(key)); - } - None => { - return_if_io!(self.insert_into_page(key)); - if key.maybe_rowid().is_some() { - self.has_record.replace(true); - } - } - }; + return_if_io!(self.insert_into_page(key)); + if key.maybe_rowid().is_some() { + self.has_record.replace(true); + } Ok(IOResult::Done(())) } @@ -5146,11 +5057,6 @@ impl CursorTrait for BTreeCursor { /// 9. SeekAfterBalancing -> adjust the cursor to a node that is closer to the deleted value. go to Finish /// 10. Finish -> Delete operation is done. Return CursorResult(Ok()) fn delete(&mut self) -> Result> { - if let Some(mv_cursor) = &self.mv_cursor { - return_if_io!(mv_cursor.write().delete()); - return Ok(IOResult::Done(())); - } - if let CursorState::None = &self.state { self.state = CursorState::Delete(DeleteState::Start); } @@ -5524,7 +5430,6 @@ impl CursorTrait for BTreeCursor { #[instrument(skip_all, level = Level::DEBUG)] fn exists(&mut self, key: &Value) -> Result> { - assert!(self.mv_cursor.is_none()); let int_key = match key { Value::Integer(i) => i, _ => unreachable!("btree tables are indexed by integers!"), @@ -5561,10 +5466,6 @@ impl CursorTrait for BTreeCursor { /// /// Only supposed to be used in the context of a simple Count Select Statement fn count(&mut self) -> Result> { - if let Some(_mv_cursor) = &self.mv_cursor { - todo!("Implement count for mvcc"); - } - let mut mem_page; let mut contents; @@ -5684,14 +5585,9 @@ impl CursorTrait for BTreeCursor { match self.rewind_state { RewindState::Start => { self.rewind_state = RewindState::NextRecord; - if let Some(mv_cursor) = &self.mv_cursor { - let mut mv_cursor = mv_cursor.write(); - return_if_io!(mv_cursor.rewind()); - } else { - let c = self.move_to_root()?; - if let Some(c) = c { - io_yield_one!(c); - } + let c = self.move_to_root()?; + if let Some(c) = c { + io_yield_one!(c); } } RewindState::NextRecord => { @@ -5744,7 +5640,6 @@ impl CursorTrait for BTreeCursor { } fn seek_end(&mut self) -> Result> { - assert!(self.mv_cursor.is_none()); // unsure about this -_- loop { match self.seek_end_state { SeekEndState::Start => { @@ -5780,16 +5675,11 @@ impl CursorTrait for BTreeCursor { } } - fn get_mvcc_cursor(&self) -> Arc> { - self.mv_cursor.as_ref().unwrap().clone() - } - #[instrument(skip_all, level = Level::DEBUG)] fn seek_to_last(&mut self) -> Result> { loop { match self.seek_to_last_state { SeekToLastState::Start => { - assert!(self.mv_cursor.is_none()); let has_record = return_if_io!(self.move_to_rightmost()); self.invalidate_record(); self.has_record.replace(has_record); @@ -8076,7 +7966,7 @@ mod tests { fn validate_btree(pager: Arc, page_idx: i64) -> (usize, bool) { let num_columns = 5; - let cursor = BTreeCursor::new_table(None, pager.clone(), page_idx, num_columns); + let cursor = BTreeCursor::new_table(pager.clone(), page_idx, num_columns); let (page, _c) = cursor.read_page(page_idx).unwrap(); while page.is_locked() { pager.io.step().unwrap(); @@ -8187,7 +8077,7 @@ mod tests { fn format_btree(pager: Arc, page_idx: i64, depth: usize) -> String { let num_columns = 5; - let cursor = BTreeCursor::new_table(None, pager.clone(), page_idx, num_columns); + let cursor = BTreeCursor::new_table(pager.clone(), page_idx, num_columns); let (page, _c) = cursor.read_page(page_idx).unwrap(); while page.is_locked() { pager.io.step().unwrap(); @@ -8269,7 +8159,7 @@ mod tests { let conn = db.connect().unwrap(); let pager = conn.pager.read().clone(); - let mut cursor = BTreeCursor::new(None, pager, 1, 5); + let mut cursor = BTreeCursor::new(pager, 1, 5); let result = cursor.rewind()?; assert!(matches!(result, IOResult::Done(_))); let result = cursor.next()?; @@ -8299,7 +8189,7 @@ mod tests { let large_record = ImmutableRecord::from_registers(regs, regs.len()); // Create cursor for the table - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); let initial_pagecount = pager .io @@ -8451,7 +8341,7 @@ mod tests { let (pager, root_page, _, _) = empty_btree(); let num_columns = 5; - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); for (key, size) in sequence.iter() { run_until_done( || { @@ -8517,7 +8407,7 @@ mod tests { for _ in 0..attempts { let (pager, root_page, _db, conn) = empty_btree(); - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); let mut keys = SortedVec::new(); tracing::info!("seed: {seed}"); for insert_id in 0..inserts { @@ -8659,13 +8549,8 @@ mod tests { has_rowid: false, }; let num_columns = index_def.columns.len(); - let mut cursor = BTreeCursor::new_index( - None, - pager.clone(), - index_root_page, - &index_def, - num_columns, - ); + let mut cursor = + BTreeCursor::new_index(pager.clone(), index_root_page, &index_def, num_columns); let mut keys = SortedVec::new(); tracing::info!("seed: {seed}"); for i in 0..inserts { @@ -8822,8 +8707,7 @@ mod tests { ephemeral: false, has_rowid: false, }; - let mut cursor = - BTreeCursor::new_index(None, pager.clone(), index_root_page, &index_def, 1); + let mut cursor = BTreeCursor::new_index(pager.clone(), index_root_page, &index_def, 1); // Track expected keys that should be present in the tree let mut expected_keys = Vec::new(); @@ -9214,7 +9098,7 @@ mod tests { let pager = setup_test_env(5); let num_columns = 5; - let mut cursor = BTreeCursor::new_table(None, pager.clone(), 1, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), 1, num_columns); let max_local = payload_overflow_threshold_max(PageType::TableLeaf, 4096); let usable_size = cursor.usable_space(); @@ -9325,7 +9209,7 @@ mod tests { let pager = setup_test_env(5); let num_columns = 5; - let mut cursor = BTreeCursor::new_table(None, pager.clone(), 1, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), 1, num_columns); let small_payload = vec![b'A'; 10]; @@ -9374,7 +9258,7 @@ mod tests { let pager = setup_test_env(initial_size); let num_columns = 5; - let mut cursor = BTreeCursor::new_table(None, pager.clone(), 2, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), 2, num_columns); // Initialize page 2 as a root page (interior) let root_page = run_until_done( @@ -9467,7 +9351,7 @@ mod tests { let num_columns = 5; let record_count = 10; - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); for rowid in 1..=record_count { insert_record(&mut cursor, &pager, rowid, Value::Integer(rowid))?; @@ -9492,7 +9376,7 @@ mod tests { let num_columns = 5; let record_count = 1000; - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); for rowid in 1..=record_count { insert_record(&mut cursor, &pager, rowid, Value::Integer(rowid))?; @@ -9518,7 +9402,7 @@ mod tests { let num_columns = 5; let record_count = 1000; - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); for rowid in 1..=record_count { insert_record(&mut cursor, &pager, rowid, Value::Integer(rowid))?; @@ -9558,8 +9442,8 @@ mod tests { let num_columns = 5; let record_count = 1000; - let mut cursor1 = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); - let mut cursor2 = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor1 = BTreeCursor::new_table(pager.clone(), root_page, num_columns); + let mut cursor2 = BTreeCursor::new_table(pager.clone(), root_page, num_columns); // Use cursor1 to insert records for rowid in 1..=record_count { @@ -9592,7 +9476,7 @@ mod tests { let num_columns = 5; let record_count = 100; - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); let initial_page_count = pager .io @@ -10228,7 +10112,7 @@ mod tests { let num_columns = 5; for i in 0..10000 { - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); tracing::info!("INSERT INTO t VALUES ({});", i,); let regs = &[Register::Value(Value::Integer(i))]; let value = ImmutableRecord::from_registers(regs, regs.len()); @@ -10256,7 +10140,7 @@ mod tests { format_btree(pager.clone(), root_page, 0) ); for key in keys.iter() { - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); let key = Value::Integer(*key); let exists = run_until_done(|| cursor.exists(&key), pager.deref()).unwrap(); assert!(exists, "key not found {key}"); @@ -10315,7 +10199,7 @@ mod tests { // Insert 10,000 records in to the BTree. for i in 1..=10000 { - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); let regs = &[Register::Value(Value::Text(Text::new("hello world")))]; let value = ImmutableRecord::from_registers(regs, regs.len()); @@ -10342,7 +10226,7 @@ mod tests { // Delete records with 500 <= key <= 3500 for i in 500..=3500 { - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); let seek_key = SeekKey::TableRowId(i); let seek_result = run_until_done( @@ -10362,7 +10246,7 @@ mod tests { continue; } - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); let key = Value::Integer(i); let exists = run_until_done(|| cursor.exists(&key), pager.deref()).unwrap(); assert!(exists, "Key {i} should exist but doesn't"); @@ -10370,7 +10254,7 @@ mod tests { // Verify the deleted records don't exist. for i in 500..=3500 { - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); let key = Value::Integer(i); let exists = run_until_done(|| cursor.exists(&key), pager.deref()).unwrap(); assert!(!exists, "Deleted key {i} still exists"); @@ -10393,7 +10277,7 @@ mod tests { let num_columns = 5; for (i, huge_text) in huge_texts.iter().enumerate().take(iterations) { - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); tracing::info!("INSERT INTO t VALUES ({});", i,); let regs = &[Register::Value(Value::Text(Text { value: huge_text.as_bytes().to_vec(), @@ -10423,7 +10307,7 @@ mod tests { format_btree(pager.clone(), root_page, 0) ); } - let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new_table(pager.clone(), root_page, num_columns); let _c = cursor.move_to_root().unwrap(); for i in 0..iterations { let has_next = run_until_done(|| cursor.next(), pager.deref()).unwrap(); @@ -10441,7 +10325,7 @@ mod tests { pub fn test_read_write_payload_with_offset() { let (pager, root_page, _, _) = empty_btree(); let num_columns = 5; - let mut cursor = BTreeCursor::new(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new(pager.clone(), root_page, num_columns); let offset = 2; // blobs data starts at offset 2 let initial_text = "hello world"; let initial_blob = initial_text.as_bytes().to_vec(); @@ -10518,7 +10402,7 @@ mod tests { pub fn test_read_write_payload_with_overflow_page() { let (pager, root_page, _, _) = empty_btree(); let num_columns = 5; - let mut cursor = BTreeCursor::new(None, pager.clone(), root_page, num_columns); + let mut cursor = BTreeCursor::new(pager.clone(), root_page, num_columns); let mut large_blob = vec![b'A'; 40960 - 11]; // insert large blob. 40960 = 10 page long. let hello_world = b"hello world"; large_blob.extend_from_slice(hello_world); diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 0d7daeacd..00ce85fec 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -36,6 +36,7 @@ use crate::{ translate::emitter::TransactionMode, }; use crate::{get_cursor, CheckpointMode, Connection, MvCursor}; +use std::any::Any; use std::env::temp_dir; use std::ops::DerefMut; use std::{ @@ -1044,16 +1045,9 @@ pub fn op_open_read( let pager = program.get_pager_from_database_index(db); let (_, cursor_type) = program.cursor_ref.get(*cursor_id).unwrap(); - let mv_cursor = if let Some(tx_id) = program.connection.get_mv_tx_id() { - let mv_store = mv_store.unwrap().clone(); - let mv_cursor = Arc::new(RwLock::new( - MvCursor::new(mv_store, tx_id, *root_page, pager.clone()).unwrap(), - )); - Some(mv_cursor) - } else { + if program.connection.get_mv_tx_id().is_none() { assert!(*root_page >= 0, ""); - None - }; + } let cursors = &mut state.cursors; let num_columns = match cursor_type { CursorType::BTreeTable(table_rc) => table_rc.columns.len(), @@ -1062,16 +1056,33 @@ pub fn op_open_read( _ => unreachable!("This should not have happened"), }; + let maybe_promote_to_mvcc_cursor = + |btree_cursor: Box| -> Result> { + if let Some(tx_id) = program.connection.get_mv_tx_id() { + let mv_store = mv_store.unwrap().clone(); + Ok(Box::new(MvCursor::new( + mv_store, + tx_id, + *root_page, + pager.clone(), + btree_cursor, + )?)) + } else { + Ok(btree_cursor) + } + }; + match cursor_type { CursorType::MaterializedView(_, view_mutex) => { // This is a materialized view with storage // Create btree cursor for reading the persistent data + let btree_cursor = Box::new(BTreeCursor::new_table( - mv_cursor, pager.clone(), *root_page, num_columns, )); + let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?; // Get the view name and look up or create its transaction state let view_name = view_mutex.lock().unwrap().name().to_string(); @@ -1082,7 +1093,7 @@ pub fn op_open_read( // Create materialized view cursor with this view's transaction state let mv_cursor = crate::incremental::cursor::MaterializedViewCursor::new( - btree_cursor, + cursor, view_mutex.clone(), pager.clone(), tx_state, @@ -1095,24 +1106,29 @@ pub fn op_open_read( } CursorType::BTreeTable(_) => { // Regular table - let cursor = BTreeCursor::new_table(mv_cursor, pager.clone(), *root_page, num_columns); + let btree_cursor = Box::new(BTreeCursor::new_table( + pager.clone(), + *root_page, + num_columns, + )); + let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?; cursors .get_mut(*cursor_id) .unwrap() - .replace(Cursor::new_btree(Box::new(cursor))); + .replace(Cursor::new_btree(cursor)); } CursorType::BTreeIndex(index) => { - let cursor = BTreeCursor::new_index( - mv_cursor, + let btree_cursor = Box::new(BTreeCursor::new_index( pager.clone(), *root_page, index.as_ref(), num_columns, - ); + )); + let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?; cursors .get_mut(*cursor_id) .unwrap() - .replace(Cursor::new_btree(Box::new(cursor))); + .replace(Cursor::new_btree(cursor)); } CursorType::Pseudo(_) => { panic!("OpenRead on pseudo cursor"); @@ -6423,13 +6439,13 @@ pub fn op_new_rowid( }, insn ); - if let Some(mv_store) = mv_store { + // With MVCC we can't simply find last rowid and get rowid + 1 as a result. To not have two conflicting rowids concurrently we need to call `get_next_rowid` + // which will make sure we don't collide. let rowid = { let cursor = state.get_cursor(*cursor); - let cursor = cursor.as_btree_mut(); - let mvcc_cursor = cursor.get_mvcc_cursor(); - let mut mvcc_cursor = mvcc_cursor.write(); + let cursor = cursor.as_btree_mut() as &mut dyn Any; + let mvcc_cursor = cursor.downcast_mut::().unwrap(); mvcc_cursor.get_next_rowid() }; state.registers[*rowid_reg] = Register::Value(Value::Integer(rowid)); @@ -6706,16 +6722,10 @@ pub fn op_not_exists( }, insn ); - let exists = if let Some(mv_store) = mv_store { - let cursor = must_be_btree_cursor!(*cursor, program.cursor_ref, state, "NotExists"); - let cursor = cursor.as_btree_mut(); - let mvcc_cursor = cursor.get_mvcc_cursor(); - false - } else { - let cursor = must_be_btree_cursor!(*cursor, program.cursor_ref, state, "NotExists"); - let cursor = cursor.as_btree_mut(); - return_if_io!(cursor.exists(state.registers[*rowid_reg].get_value())) - }; + let cursor = must_be_btree_cursor!(*cursor, program.cursor_ref, state, "NotExists"); + let cursor = cursor.as_btree_mut(); + let exists = return_if_io!(cursor.exists(state.registers[*rowid_reg].get_value())); + if exists { state.pc += 1; } else { @@ -6823,15 +6833,21 @@ pub fn op_open_write( CursorType::BTreeIndex(index) => Some(index), _ => None, }; - let mv_cursor = if let Some(tx_id) = program.connection.get_mv_tx_id() { - let mv_store = mv_store.unwrap().clone(); - let mv_cursor = Arc::new(RwLock::new( - MvCursor::new(mv_store.clone(), tx_id, root_page, pager.clone()).unwrap(), - )); - Some(mv_cursor) - } else { - None - }; + let maybe_promote_to_mvcc_cursor = + |btree_cursor: Box| -> Result> { + if let Some(tx_id) = program.connection.get_mv_tx_id() { + let mv_store = mv_store.unwrap().clone(); + Ok(Box::new(MvCursor::new( + mv_store, + tx_id, + root_page, + pager.clone(), + btree_cursor, + )?)) + } else { + Ok(btree_cursor) + } + }; if let Some(index) = maybe_index { let conn = program.connection.clone(); let schema = conn.schema.read(); @@ -6840,17 +6856,17 @@ pub fn op_open_write( .and_then(|table| table.btree()); let num_columns = index.columns.len(); - let cursor = BTreeCursor::new_index( - mv_cursor, + let btree_cursor = Box::new(BTreeCursor::new_index( pager.clone(), root_page, index.as_ref(), num_columns, - ); + )); + let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?; cursors .get_mut(*cursor_id) .unwrap() - .replace(Cursor::new_btree(Box::new(cursor))); + .replace(Cursor::new_btree(cursor)); } else { let num_columns = match cursor_type { CursorType::BTreeTable(table_rc) => table_rc.columns.len(), @@ -6860,11 +6876,16 @@ pub fn op_open_write( ), }; - let cursor = BTreeCursor::new_table(mv_cursor, pager.clone(), root_page, num_columns); + let btree_cursor = Box::new(BTreeCursor::new_table( + pager.clone(), + root_page, + num_columns, + )); + let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?; cursors .get_mut(*cursor_id) .unwrap() - .replace(Cursor::new_btree(Box::new(cursor))); + .replace(Cursor::new_btree(cursor)); } state.pc += 1; Ok(InsnFunctionStepResult::Step) @@ -6958,7 +6979,7 @@ pub fn op_destroy( OpDestroyState::CreateCursor => { // Destroy doesn't do anything meaningful with the table/index distinction so we can just use a // table btree cursor for both. - let cursor = BTreeCursor::new(None, pager.clone(), *root, 0); + let cursor = BTreeCursor::new(pager.clone(), *root, 0); state.op_destroy_state = OpDestroyState::DestroyBtree(Arc::new(RwLock::new(cursor))); } @@ -7623,9 +7644,9 @@ pub fn op_open_ephemeral( }; let cursor = if let CursorType::BTreeIndex(index) = cursor_type { - BTreeCursor::new_index(None, pager.clone(), root_page, index, num_columns) + BTreeCursor::new_index(pager.clone(), root_page, index, num_columns) } else { - BTreeCursor::new_table(None, pager.clone(), root_page, num_columns) + BTreeCursor::new_table(pager.clone(), root_page, num_columns) }; state.op_open_ephemeral_state = OpOpenEphemeralState::Rewind { cursor: Box::new(cursor), @@ -7704,29 +7725,32 @@ pub fn op_open_dup( // a separate database file). let pager = original_cursor.get_pager(); - let mv_cursor = if let Some(tx_id) = program.connection.get_mv_tx_id() { - let mv_store = mv_store.unwrap().clone(); - let mv_cursor = Arc::new(RwLock::new(MvCursor::new( - mv_store, - tx_id, - root_page, - pager.clone(), - )?)); - Some(mv_cursor) - } else { - None - }; - let (_, cursor_type) = program.cursor_ref.get(*original_cursor_id).unwrap(); match cursor_type { CursorType::BTreeTable(table) => { - let cursor = - BTreeCursor::new_table(mv_cursor, pager.clone(), root_page, table.columns.len()); + let cursor = Box::new(BTreeCursor::new_table( + pager.clone(), + root_page, + table.columns.len(), + )); + let cursor: Box = + if let Some(tx_id) = program.connection.get_mv_tx_id() { + let mv_store = mv_store.unwrap().clone(); + Box::new(MvCursor::new( + mv_store, + tx_id, + root_page, + pager.clone(), + cursor, + )?) + } else { + cursor + }; let cursors = &mut state.cursors; cursors .get_mut(*new_cursor_id) .unwrap() - .replace(Cursor::new_btree(Box::new(cursor))); + .replace(Cursor::new_btree(cursor)); } CursorType::BTreeIndex(table) => { // In principle, we could implement OpenDup for BTreeIndex, From edac1ff256f9922c6ce58e5ffd9fe0624b1c96b2 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 21 Oct 2025 13:38:31 +0200 Subject: [PATCH 345/428] core/mvcc/cursor: set null flag --- core/mvcc/cursor.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/core/mvcc/cursor.rs b/core/mvcc/cursor.rs index 20a012cc3..1834dc986 100644 --- a/core/mvcc/cursor.rs +++ b/core/mvcc/cursor.rs @@ -18,7 +18,7 @@ enum CursorPosition { /// We have reached the end of the table. End, } -#[derive(Debug)] + pub struct MvccLazyCursor { pub db: Arc>, current_pos: RefCell, @@ -27,6 +27,7 @@ pub struct MvccLazyCursor { /// Reusable immutable record, used to allow better allocation strategy. reusable_immutable_record: RefCell>, _btree_cursor: Box, + null_flag: bool, } impl MvccLazyCursor { @@ -50,6 +51,7 @@ impl MvccLazyCursor { table_id, reusable_immutable_record: RefCell::new(None), _btree_cursor: btree_cursor, + null_flag: false, } pub fn current_row(&self) -> Result> { @@ -273,12 +275,12 @@ impl CursorTrait for MvccLazyCursor { Ok(IOResult::Done(())) } - fn set_null_flag(&mut self, _flag: bool) { - todo!() + fn set_null_flag(&mut self, flag: bool) { + self.null_flag = flag; } fn get_null_flag(&self) -> bool { - todo!() + self.null_flag } fn exists(&mut self, key: &Value) -> Result> { From 790859c62f00224bdc1ffa2f9f43303765ed509b Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 21 Oct 2025 13:38:56 +0200 Subject: [PATCH 346/428] core/mvcc/cursor: fix exists --- core/mvcc/cursor.rs | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/core/mvcc/cursor.rs b/core/mvcc/cursor.rs index 1834dc986..b100f0051 100644 --- a/core/mvcc/cursor.rs +++ b/core/mvcc/cursor.rs @@ -284,21 +284,25 @@ impl CursorTrait for MvccLazyCursor { } fn exists(&mut self, key: &Value) -> Result> { + self.invalidate_record(); let int_key = match key { Value::Integer(i) => i, _ => unreachable!("btree tables are indexed by integers!"), }; - let exists = self - .db - .seek_rowid( - Bound::Included(&RowID { - table_id: self.table_id, - row_id: *int_key, - }), - true, - self.tx_id, - ) - .is_some(); + let rowid = self.db.seek_rowid( + Bound::Included(&RowID { + table_id: self.table_id, + row_id: *int_key, + }), + true, + self.tx_id, + ); + tracing::trace!("found {rowid:?}"); + let exists = if let Some(rowid) = rowid { + rowid.row_id == *int_key + } else { + false + }; if exists { self.current_pos.replace(CursorPosition::Loaded(RowID { table_id: self.table_id, From 0fee588bcabdc19b1b1609f3546ae07788f1fb46 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 21 Oct 2025 13:40:29 +0200 Subject: [PATCH 347/428] core/mvcc/cursor: add record cursor --- core/mvcc/cursor.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/core/mvcc/cursor.rs b/core/mvcc/cursor.rs index b100f0051..f766b2808 100644 --- a/core/mvcc/cursor.rs +++ b/core/mvcc/cursor.rs @@ -28,6 +28,7 @@ pub struct MvccLazyCursor { reusable_immutable_record: RefCell>, _btree_cursor: Box, null_flag: bool, + record_cursor: RefCell, } impl MvccLazyCursor { @@ -52,6 +53,7 @@ impl MvccLazyCursor { reusable_immutable_record: RefCell::new(None), _btree_cursor: btree_cursor, null_flag: false, + record_cursor: RefCell::new(RecordCursor::new()), } pub fn current_row(&self) -> Result> { @@ -111,6 +113,7 @@ impl CursorTrait for MvccLazyCursor { } else { self.current_pos.replace(CursorPosition::BeforeFirst); } + self.invalidate_record(); Ok(IOResult::Done(())) } @@ -216,6 +219,7 @@ impl CursorTrait for MvccLazyCursor { SeekOp::LT => (Bound::Excluded(&rowid), false), SeekOp::LE { eq_only: _ } => (Bound::Included(&rowid), false), }; + self.invalidate_record(); let rowid = self.db.seek_rowid(bound, lower_bound, self.tx_id); if let Some(rowid) = rowid { self.current_pos.replace(CursorPosition::Loaded(rowid)); @@ -263,6 +267,7 @@ impl CursorTrait for MvccLazyCursor { self.current_pos.replace(CursorPosition::BeforeFirst); })?; } + self.invalidate_record(); Ok(IOResult::Done(())) } @@ -272,6 +277,7 @@ impl CursorTrait for MvccLazyCursor { }; let rowid = RowID::new(self.table_id, rowid); self.db.delete(self.tx_id, rowid)?; + self.invalidate_record(); Ok(IOResult::Done(())) } @@ -369,6 +375,7 @@ impl CursorTrait for MvccLazyCursor { .as_mut() .unwrap() .invalidate(); + self.record_cursor.borrow_mut().invalidate(); } fn has_rowid(&self) -> bool { @@ -376,7 +383,7 @@ impl CursorTrait for MvccLazyCursor { } fn record_cursor_mut(&self) -> std::cell::RefMut<'_, crate::types::RecordCursor> { - todo!() + self.record_cursor.borrow_mut() } fn get_pager(&self) -> Arc { From 3f41a092f2bd53a4dbf6ef928de80730b27931d3 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 21 Oct 2025 13:41:10 +0200 Subject: [PATCH 348/428] core/mvcc/cursor: add next rowid lock --- core/mvcc/cursor.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/core/mvcc/cursor.rs b/core/mvcc/cursor.rs index f766b2808..5ff69e72c 100644 --- a/core/mvcc/cursor.rs +++ b/core/mvcc/cursor.rs @@ -29,6 +29,7 @@ pub struct MvccLazyCursor { _btree_cursor: Box, null_flag: bool, record_cursor: RefCell, + next_rowid_lock: Arc>, } impl MvccLazyCursor { @@ -54,6 +55,8 @@ impl MvccLazyCursor { _btree_cursor: btree_cursor, null_flag: false, record_cursor: RefCell::new(RecordCursor::new()), + next_rowid_lock: Arc::new(RwLock::new(())), + }) } pub fn current_row(&self) -> Result> { @@ -80,6 +83,9 @@ impl MvccLazyCursor { } pub fn get_next_rowid(&mut self) -> i64 { + // lock so we don't get same two rowids + let lock = self.next_rowid_lock.clone(); + let _lock = lock.write(); let _ = self.last(); match *self.current_pos.borrow() { CursorPosition::Loaded(id) => id.row_id + 1, @@ -147,6 +153,7 @@ impl CursorTrait for MvccLazyCursor { } }; self.current_pos.replace(new_position); + self.invalidate_record(); Ok(IOResult::Done(matches!( self.get_current_pos(), From 88576041618c5822438c7ed34499331a82e074bb Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 21 Oct 2025 13:41:21 +0200 Subject: [PATCH 349/428] core/mvcc/cursor: fix rewind --- core/mvcc/cursor.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/mvcc/cursor.rs b/core/mvcc/cursor.rs index 5ff69e72c..dc819f6c2 100644 --- a/core/mvcc/cursor.rs +++ b/core/mvcc/cursor.rs @@ -353,7 +353,12 @@ impl CursorTrait for MvccLazyCursor { } fn rewind(&mut self) -> Result> { - self.current_pos.replace(CursorPosition::BeforeFirst); + self.invalidate_record(); + if !matches!(self.get_current_pos(), CursorPosition::BeforeFirst) { + self.current_pos.replace(CursorPosition::BeforeFirst); + } + // Next will set cursor position to a valid position if it exists, otherwise it will set it to one that doesn't exist. + let _ = return_if_io!(self.next()); Ok(IOResult::Done(())) } From 92c0e744585cdf0a4c578ece0331528399cd51f6 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 21 Oct 2025 13:41:34 +0200 Subject: [PATCH 350/428] core/mvcc/cursor: implement seek_to_last --- core/mvcc/cursor.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/core/mvcc/cursor.rs b/core/mvcc/cursor.rs index dc819f6c2..696835427 100644 --- a/core/mvcc/cursor.rs +++ b/core/mvcc/cursor.rs @@ -379,7 +379,21 @@ impl CursorTrait for MvccLazyCursor { } fn seek_to_last(&mut self) -> Result> { - todo!() + self.invalidate_record(); + let max_rowid = RowID { + table_id: self.table_id, + row_id: i64::MAX, + }; + let bound = Bound::Included(&max_rowid); + let lower_bound = false; + + let rowid = self.db.seek_rowid(bound, lower_bound, self.tx_id); + if let Some(rowid) = rowid { + self.current_pos.replace(CursorPosition::Loaded(rowid)); + } else { + self.current_pos.replace(CursorPosition::End); + } + Ok(IOResult::Done(())) } fn invalidate_record(&mut self) { From 128b4266814cfc083f0f1f041447175c818c88ba Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Tue, 21 Oct 2025 13:41:57 +0200 Subject: [PATCH 351/428] core/mvcc/cursor: imports --- core/mvcc/cursor.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/core/mvcc/cursor.rs b/core/mvcc/cursor.rs index 696835427..b9e0ce136 100644 --- a/core/mvcc/cursor.rs +++ b/core/mvcc/cursor.rs @@ -1,9 +1,12 @@ +use parking_lot::RwLock; + use crate::mvcc::clock::LogicalClock; use crate::mvcc::database::{MVTableId, MvStore, Row, RowID}; -use crate::storage::btree::{BTreeKey, CursorTrait}; -use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult}; -use crate::Result; +use crate::storage::btree::{BTreeCursor, BTreeKey, CursorTrait}; +use crate::types::{IOResult, ImmutableRecord, RecordCursor, SeekKey, SeekOp, SeekResult}; +use crate::{return_if_io, Result}; use crate::{Pager, Value}; +use std::any::Any; use std::cell::{Ref, RefCell}; use std::fmt::Debug; use std::ops::Bound; From 792e0033ae3704e787ffe58aa34360f2f7c317d0 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 21 Oct 2025 21:03:45 +0400 Subject: [PATCH 352/428] fix tests and clippy --- core/vector/operations/distance_cos.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/core/vector/operations/distance_cos.rs b/core/vector/operations/distance_cos.rs index 437d069c6..e7a56a923 100644 --- a/core/vector/operations/distance_cos.rs +++ b/core/vector/operations/distance_cos.rs @@ -138,9 +138,9 @@ mod tests { vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[0.0, 0.0]), 1.0 ); - assert!(vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[1.0, 2.0]).abs() < 1e-9); - assert!((vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[-1.0, -2.0]) - 2.0).abs() < 1e-9); - assert!((vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[-2.0, 1.0]) - 1.0).abs() < 1e-9); + assert!(vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[1.0, 2.0]).abs() < 1e-6); + assert!((vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[-1.0, -2.0]) - 2.0).abs() < 1e-6); + assert!((vector_f32_distance_cos_simsimd(&[1.0, 2.0], &[-2.0, 1.0]) - 1.0).abs() < 1e-6); } #[test] @@ -150,9 +150,9 @@ mod tests { vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[0.0, 0.0]), 1.0 ); - assert!(vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[1.0, 2.0]).abs() < 1e-9); - assert!((vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[-1.0, -2.0]) - 2.0).abs() < 1e-9); - assert!((vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[-2.0, 1.0]) - 1.0).abs() < 1e-9); + assert!(vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[1.0, 2.0]).abs() < 1e-6); + assert!((vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[-1.0, -2.0]) - 2.0).abs() < 1e-6); + assert!((vector_f64_distance_cos_simsimd(&[1.0, 2.0], &[-2.0, 1.0]) - 1.0).abs() < 1e-6); } #[test] @@ -198,7 +198,7 @@ mod tests { let v2 = vector_convert(v2.into(), VectorType::Float32Dense).unwrap(); let d1 = vector_f32_distance_cos_rust(v1.as_f32_slice(), v2.as_f32_slice()); let d2 = vector_f32_distance_cos_simsimd(v1.as_f32_slice(), v2.as_f32_slice()); - println!("d1 vs d2: {} vs {}", d1, d2); + println!("d1 vs d2: {d1} vs {d2}"); (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-4 } @@ -211,7 +211,7 @@ mod tests { let v2 = vector_convert(v2.into(), VectorType::Float64Dense).unwrap(); let d1 = vector_f64_distance_cos_rust(v1.as_f64_slice(), v2.as_f64_slice()); let d2 = vector_f64_distance_cos_simsimd(v1.as_f64_slice(), v2.as_f64_slice()); - println!("d1 vs d2: {} vs {}", d1, d2); + println!("d1 vs d2: {d1} vs {d2}"); (d1.is_nan() && d2.is_nan()) || (d1 - d2).abs() < 1e-6 } } From 4805c6b06b66912c5dc104b478d22b2c1be07542 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 21 Oct 2025 19:18:43 +0300 Subject: [PATCH 353/428] antithesis-tests: Don't fail tests on ProgrammingError The ProgrammingError exception is thrown when tables, indexes, or columns are dropped in parallel. Let's not fail the Antithesis test drivers when that happens. --- .../parallel_driver_alter_table.py | 4 ++ .../parallel_driver_create_index.py | 6 +++ .../stress-composer/parallel_driver_delete.py | 4 ++ .../parallel_driver_drop_index.py | 4 +- .../parallel_driver_drop_table.py | 11 +++-- .../stress-composer/parallel_driver_insert.py | 4 ++ .../parallel_driver_rollback.py | 4 ++ .../parallel_driver_schema_rollback.py | 49 ++++++++++++------- .../stress-composer/parallel_driver_update.py | 4 ++ 9 files changed, 67 insertions(+), 23 deletions(-) diff --git a/antithesis-tests/stress-composer/parallel_driver_alter_table.py b/antithesis-tests/stress-composer/parallel_driver_alter_table.py index be0551e41..0d151dbbb 100755 --- a/antithesis-tests/stress-composer/parallel_driver_alter_table.py +++ b/antithesis-tests/stress-composer/parallel_driver_alter_table.py @@ -126,6 +126,10 @@ try: con.commit() con_init.commit() +except turso.ProgrammingError as e: + print(f"Table/column might have been dropped in parallel: {e}") + con.rollback() + con_init.rollback() except turso.OperationalError as e: print(f"Failed to alter table: {e}") con.rollback() diff --git a/antithesis-tests/stress-composer/parallel_driver_create_index.py b/antithesis-tests/stress-composer/parallel_driver_create_index.py index e384dcb2d..2391dac4f 100755 --- a/antithesis-tests/stress-composer/parallel_driver_create_index.py +++ b/antithesis-tests/stress-composer/parallel_driver_create_index.py @@ -90,6 +90,9 @@ if create_composite: """) con_init.commit() print(f"Successfully created composite index: {index_name}") + except turso.ProgrammingError as e: + print(f"Table/column might have been dropped in parallel: {e}") + con.rollback() except turso.OperationalError as e: print(f"Failed to create composite index: {e}") con.rollback() @@ -137,6 +140,9 @@ else: """) con_init.commit() print(f"Successfully created {idx_type} index: {index_name}") + except turso.ProgrammingError as e: + print(f"Table/column might have been dropped in parallel: {e}") + con.rollback() except turso.OperationalError as e: print(f"Failed to create index: {e}") con.rollback() diff --git a/antithesis-tests/stress-composer/parallel_driver_delete.py b/antithesis-tests/stress-composer/parallel_driver_delete.py index 951cffd62..7f32e816f 100755 --- a/antithesis-tests/stress-composer/parallel_driver_delete.py +++ b/antithesis-tests/stress-composer/parallel_driver_delete.py @@ -48,6 +48,10 @@ for i in range(deletions): cur.execute(f""" DELETE FROM tbl_{selected_tbl} WHERE {where_clause} """) + except turso.ProgrammingError: + # Table/column might have been dropped in parallel - this is expected + con.rollback() + break except turso.OperationalError: con.rollback() # Re-raise other operational errors diff --git a/antithesis-tests/stress-composer/parallel_driver_drop_index.py b/antithesis-tests/stress-composer/parallel_driver_drop_index.py index 2033b4e5e..031aa0509 100755 --- a/antithesis-tests/stress-composer/parallel_driver_drop_index.py +++ b/antithesis-tests/stress-composer/parallel_driver_drop_index.py @@ -55,11 +55,13 @@ try: con_init.commit() print(f"Successfully dropped index: {index_name}") +except turso.ProgrammingError as e: + print(f"Index {index_name} already dropped in parallel: {e}") + con.rollback() except turso.OperationalError as e: print(f"Failed to drop index: {e}") con.rollback() except Exception as e: - # Handle case where index might not exist in indexes table print(f"Warning: Could not remove index from metadata: {e}") con.commit() diff --git a/antithesis-tests/stress-composer/parallel_driver_drop_table.py b/antithesis-tests/stress-composer/parallel_driver_drop_table.py index d065d7442..dea33ee42 100755 --- a/antithesis-tests/stress-composer/parallel_driver_drop_table.py +++ b/antithesis-tests/stress-composer/parallel_driver_drop_table.py @@ -31,9 +31,14 @@ except Exception as e: cur = con.cursor() -cur.execute(f"DROP TABLE tbl_{selected_tbl}") - -con.commit() +try: + cur.execute(f"DROP TABLE tbl_{selected_tbl}") + con.commit() + print(f"Successfully dropped table tbl_{selected_tbl}") +except turso.ProgrammingError as e: + # Table might have been dropped in parallel - this is expected + print(f"Table tbl_{selected_tbl} already dropped in parallel: {e}") + con.rollback() con.close() diff --git a/antithesis-tests/stress-composer/parallel_driver_insert.py b/antithesis-tests/stress-composer/parallel_driver_insert.py index 707ec58a6..50079094b 100755 --- a/antithesis-tests/stress-composer/parallel_driver_insert.py +++ b/antithesis-tests/stress-composer/parallel_driver_insert.py @@ -46,6 +46,10 @@ for i in range(insertions): INSERT INTO tbl_{selected_tbl} ({cols}) VALUES ({", ".join(values)}) """) + except turso.ProgrammingError: + # Table/column might have been dropped in parallel - this is expected + con.rollback() + break except turso.OperationalError as e: if "UNIQUE constraint failed" in str(e): # Ignore UNIQUE constraint violations diff --git a/antithesis-tests/stress-composer/parallel_driver_rollback.py b/antithesis-tests/stress-composer/parallel_driver_rollback.py index 862da43e1..30435b55b 100755 --- a/antithesis-tests/stress-composer/parallel_driver_rollback.py +++ b/antithesis-tests/stress-composer/parallel_driver_rollback.py @@ -46,6 +46,10 @@ for i in range(insertions): INSERT INTO tbl_{selected_tbl} ({cols}) VALUES ({", ".join(values)}) """) + except turso.ProgrammingError: + # Table/column might have been dropped in parallel - this is expected + con.rollback() + break except turso.OperationalError as e: if "UNIQUE constraint failed" in str(e): # Ignore UNIQUE constraint violations diff --git a/antithesis-tests/stress-composer/parallel_driver_schema_rollback.py b/antithesis-tests/stress-composer/parallel_driver_schema_rollback.py index 62ad68e96..5243ce5b8 100755 --- a/antithesis-tests/stress-composer/parallel_driver_schema_rollback.py +++ b/antithesis-tests/stress-composer/parallel_driver_schema_rollback.py @@ -35,25 +35,36 @@ except Exception as e: exit(0) cur = con.cursor() -cur.execute("SELECT sql FROM sqlite_schema WHERE type = 'table' AND name = '" + tbl_name + "'") -result = cur.fetchone() +try: + cur.execute("SELECT sql FROM sqlite_schema WHERE type = 'table' AND name = '" + tbl_name + "'") -if result is None: - print(f"Table {tbl_name} not found") + result = cur.fetchone() + + if result is None: + print(f"Table {tbl_name} not found") + exit(0) + else: + schema_before = result[0] + + cur.execute("BEGIN TRANSACTION") + + cur.execute("ALTER TABLE " + tbl_name + " RENAME TO " + tbl_name + "_old") + + con.rollback() + + cur = con.cursor() + cur.execute("SELECT sql FROM sqlite_schema WHERE type = 'table' AND name = '" + tbl_name + "'") + + result_after = cur.fetchone() + if result_after is None: + print(f"Table {tbl_name} dropped in parallel after rollback") + exit(0) + + schema_after = result_after[0] + + always(schema_before == schema_after, "schema should be the same after rollback", {}) +except turso.ProgrammingError as e: + print(f"Table {tbl_name} dropped in parallel: {e}") + con.rollback() exit(0) -else: - schema_before = result[0] - -cur.execute("BEGIN TRANSACTION") - -cur.execute("ALTER TABLE " + tbl_name + " RENAME TO " + tbl_name + "_old") - -con.rollback() - -cur = con.cursor() -cur.execute("SELECT sql FROM sqlite_schema WHERE type = 'table' AND name = '" + tbl_name + "'") - -schema_after = cur.fetchone()[0] - -always(schema_before == schema_after, "schema should be the same after rollback", {}) diff --git a/antithesis-tests/stress-composer/parallel_driver_update.py b/antithesis-tests/stress-composer/parallel_driver_update.py index 9288b7287..d4c916bdb 100755 --- a/antithesis-tests/stress-composer/parallel_driver_update.py +++ b/antithesis-tests/stress-composer/parallel_driver_update.py @@ -60,6 +60,10 @@ for i in range(updates): cur.execute(f""" UPDATE tbl_{selected_tbl} SET {set_clause} WHERE {where_clause} """) + except turso.ProgrammingError: + # Table/column might have been dropped in parallel - this is expected + con.rollback() + break except turso.OperationalError as e: if "UNIQUE constraint failed" in str(e): # Ignore UNIQUE constraint violations From 8501bc930affb1325063360475afbf7bc93a462f Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sun, 5 Oct 2025 17:28:00 -0300 Subject: [PATCH 354/428] use workspace rand version --- Cargo.lock | 2 +- core/Cargo.toml | 3 +- core/mvcc/persistent_storage/logical_log.rs | 4 +-- core/storage/btree.rs | 4 +-- core/storage/encryption.rs | 38 ++++++++++----------- core/storage/slot_bitmap.rs | 16 +++++---- core/vdbe/execute.rs | 10 +++--- 7 files changed, 40 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ccd556376..9fbc73fc8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4915,7 +4915,7 @@ dependencies = [ "pprof", "quickcheck", "quickcheck_macros", - "rand 0.8.5", + "rand 0.9.2", "rand_chacha 0.9.0", "regex", "regex-syntax", diff --git a/core/Cargo.toml b/core/Cargo.toml index c9c9c54b8..d485df277 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -59,7 +59,7 @@ regex-syntax = { workspace = true, default-features = false, features = [ ] } chrono = { workspace = true, default-features = false, features = ["clock"] } julian_day_converter = "0.4.5" -rand = "0.8.5" +rand = { workspace = true } libm = "0.2" turso_macros = { workspace = true } miette = { workspace = true } @@ -103,7 +103,6 @@ rstest = "0.18.2" rusqlite = { workspace = true, features = ["series"] } quickcheck = { version = "1.0", default-features = false } quickcheck_macros = { version = "1.0", default-features = false } -rand = "0.8.5" # Required for quickcheck rand_chacha = { workspace = true } env_logger = { workspace = true } test-log = { version = "0.2.17", features = ["trace"] } diff --git a/core/mvcc/persistent_storage/logical_log.rs b/core/mvcc/persistent_storage/logical_log.rs index f2a09230e..48b97b2e3 100644 --- a/core/mvcc/persistent_storage/logical_log.rs +++ b/core/mvcc/persistent_storage/logical_log.rs @@ -485,7 +485,7 @@ impl StreamingLogicalLogReader { mod tests { use std::{collections::HashSet, sync::Arc}; - use rand::{thread_rng, Rng}; + use rand::{rng, Rng}; use rand_chacha::{ rand_core::{RngCore, SeedableRng}, ChaCha8Rng, @@ -646,7 +646,7 @@ mod tests { #[test] fn test_logical_log_read_fuzz() { - let seed = thread_rng().gen(); + let seed = rng().random(); let mut rng = ChaCha8Rng::seed_from_u64(seed); let num_transactions = rng.next_u64() % 128; let mut txns = vec![]; diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 65659c4b3..d403c401a 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -7875,7 +7875,7 @@ fn shift_pointers_left(page: &mut PageContent, cell_idx: usize) { #[cfg(test)] mod tests { - use rand::{thread_rng, Rng}; + use rand::{rng, Rng}; use rand_chacha::{ rand_core::{RngCore, SeedableRng}, ChaCha8Rng, @@ -9719,7 +9719,7 @@ mod tests { let mut cells = Vec::new(); let usable_space = 4096; let mut i = 100000; - let seed = thread_rng().gen(); + let seed = rng().random(); tracing::info!("seed {}", seed); let mut rng = ChaCha8Rng::seed_from_u64(seed); while i > 0 { diff --git a/core/storage/encryption.rs b/core/storage/encryption.rs index 3a1bb97b9..156d17c0b 100644 --- a/core/storage/encryption.rs +++ b/core/storage/encryption.rs @@ -979,14 +979,14 @@ mod tests { } fn generate_random_hex_key() -> String { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let mut bytes = [0u8; 32]; rng.fill(&mut bytes); hex::encode(bytes) } fn generate_random_hex_key_128() -> String { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let mut bytes = [0u8; 16]; rng.fill(&mut bytes); hex::encode(bytes) @@ -995,7 +995,7 @@ mod tests { fn create_test_page_1() -> Vec { let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; page[..SQLITE_HEADER.len()].copy_from_slice(SQLITE_HEADER); - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); // 48 is the max reserved bytes we might need for metadata with any cipher rng.fill(&mut page[SQLITE_HEADER.len()..DEFAULT_ENCRYPTED_PAGE_SIZE - 48]); page @@ -1135,7 +1135,7 @@ mod tests { #[test] fn test_aes128gcm_encrypt_decrypt_round_trip() { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let cipher_mode = CipherMode::Aes128Gcm; let metadata_size = cipher_mode.metadata_size(); let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; @@ -1144,7 +1144,7 @@ mod tests { let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; page.iter_mut() .take(data_size) - .for_each(|byte| *byte = rng.gen()); + .for_each(|byte| *byte = rng.random()); page }; @@ -1165,7 +1165,7 @@ mod tests { #[test] fn test_aes_encrypt_decrypt_round_trip() { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let cipher_mode = CipherMode::Aes256Gcm; let metadata_size = cipher_mode.metadata_size(); let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; @@ -1174,7 +1174,7 @@ mod tests { let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; page.iter_mut() .take(data_size) - .for_each(|byte| *byte = rng.gen()); + .for_each(|byte| *byte = rng.random()); page }; @@ -1211,7 +1211,7 @@ mod tests { #[test] fn test_aegis256_encrypt_decrypt_round_trip() { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let cipher_mode = CipherMode::Aegis256; let metadata_size = cipher_mode.metadata_size(); let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; @@ -1220,7 +1220,7 @@ mod tests { let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; page.iter_mut() .take(data_size) - .for_each(|byte| *byte = rng.gen()); + .for_each(|byte| *byte = rng.random()); page }; @@ -1256,7 +1256,7 @@ mod tests { #[test] fn test_aegis128x2_encrypt_decrypt_round_trip() { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let cipher_mode = CipherMode::Aegis128X2; let metadata_size = cipher_mode.metadata_size(); let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; @@ -1265,7 +1265,7 @@ mod tests { let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; page.iter_mut() .take(data_size) - .for_each(|byte| *byte = rng.gen()); + .for_each(|byte| *byte = rng.random()); page }; @@ -1301,7 +1301,7 @@ mod tests { #[test] fn test_aegis128l_encrypt_decrypt_round_trip() { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let cipher_mode = CipherMode::Aegis128L; let metadata_size = cipher_mode.metadata_size(); let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; @@ -1310,7 +1310,7 @@ mod tests { let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; page.iter_mut() .take(data_size) - .for_each(|byte| *byte = rng.gen()); + .for_each(|byte| *byte = rng.random()); page }; @@ -1346,7 +1346,7 @@ mod tests { #[test] fn test_aegis128x4_encrypt_decrypt_round_trip() { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let cipher_mode = CipherMode::Aegis128X4; let metadata_size = cipher_mode.metadata_size(); let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; @@ -1355,7 +1355,7 @@ mod tests { let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; page.iter_mut() .take(data_size) - .for_each(|byte| *byte = rng.gen()); + .for_each(|byte| *byte = rng.random()); page }; @@ -1391,7 +1391,7 @@ mod tests { #[test] fn test_aegis256x2_encrypt_decrypt_round_trip() { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let cipher_mode = CipherMode::Aegis256X2; let metadata_size = cipher_mode.metadata_size(); let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; @@ -1400,7 +1400,7 @@ mod tests { let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; page.iter_mut() .take(data_size) - .for_each(|byte| *byte = rng.gen()); + .for_each(|byte| *byte = rng.random()); page }; @@ -1436,7 +1436,7 @@ mod tests { #[test] fn test_aegis256x4_encrypt_decrypt_round_trip() { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let cipher_mode = CipherMode::Aegis256X4; let metadata_size = cipher_mode.metadata_size(); let data_size = DEFAULT_ENCRYPTED_PAGE_SIZE - metadata_size; @@ -1445,7 +1445,7 @@ mod tests { let mut page = vec![0u8; DEFAULT_ENCRYPTED_PAGE_SIZE]; page.iter_mut() .take(data_size) - .for_each(|byte| *byte = rng.gen()); + .for_each(|byte| *byte = rng.random()); page }; diff --git a/core/storage/slot_bitmap.rs b/core/storage/slot_bitmap.rs index 140eb708d..86050d3e9 100644 --- a/core/storage/slot_bitmap.rs +++ b/core/storage/slot_bitmap.rs @@ -516,14 +516,14 @@ pub mod tests { ]; for &seed in seeds { let mut rng = StdRng::seed_from_u64(seed); - let n_slots = rng.gen_range(1..10) * 64; + let n_slots = rng.random_range(1..10) * 64; let mut pb = SlotBitmap::new(n_slots); let mut model = vec![true; n_slots as usize]; let iters = 2000usize; for _ in 0..iters { - let op = rng.gen_range(0..100); + let op = rng.random_range(0..100); match op { 0..=49 => { // alloc_one @@ -540,8 +540,9 @@ pub mod tests { } 50..=79 => { // alloc_run with random length - let need = - rng.gen_range(1..=std::cmp::max(1, (n_slots as usize).min(128))) as u32; + let need = rng + .random_range(1..=std::cmp::max(1, (n_slots as usize).min(128))) + as u32; let got = pb.alloc_run(need); if let Some(start) = got { assert!(start + need <= n_slots, "within bounds"); @@ -560,13 +561,14 @@ pub mod tests { } _ => { // free_run on a random valid range - let len = - rng.gen_range(1..=std::cmp::max(1, (n_slots as usize).min(128))) as u32; + let len = rng + .random_range(1..=std::cmp::max(1, (n_slots as usize).min(128))) + as u32; let max_start = n_slots.saturating_sub(len); let start = if max_start == 0 { 0 } else { - rng.gen_range(0..=max_start) + rng.random_range(0..=max_start) }; pb.free_run(start, len); ref_mark_run(&mut model, start, len, true); diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 292657274..6dce1a6e9 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -38,6 +38,7 @@ use crate::{ translate::emitter::TransactionMode, }; use crate::{get_cursor, CheckpointMode, Connection, MvCursor}; +use rand::Rng; use std::env::temp_dir; use std::ops::DerefMut; use std::{ @@ -74,7 +75,7 @@ use super::{ CommitState, }; use parking_lot::RwLock; -use rand::{thread_rng, Rng, RngCore}; +use rand::RngCore; use turso_parser::ast::{self, ForeignKeyClause, Name, SortOrder}; use turso_parser::parser::Parser; @@ -6622,8 +6623,9 @@ pub fn op_new_rowid( // Generate a random i64 and constrain it to the lower half of the rowid range. // We use the lower half (1 to MAX_ROWID/2) because we're in random mode only // when sequential allocation reached MAX_ROWID, meaning the upper range is full. - let mut rng = thread_rng(); - let mut random_rowid: i64 = rng.gen(); + + let mut rng = rand::rng(); + let mut random_rowid: i64 = rng.random(); random_rowid &= MAX_ROWID >> 1; // Mask to keep value in range [0, MAX_ROWID/2] random_rowid += 1; // Ensure positive @@ -8841,7 +8843,7 @@ impl Value { .max(1) as usize; let mut blob: Vec = vec![0; length]; - rand::thread_rng().fill_bytes(&mut blob); + rand::rng().fill_bytes(&mut blob); Value::Blob(blob) } From 8c0b9c6979fd4d7e810357782071d542a4ef955c Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Tue, 21 Oct 2025 11:20:10 -0300 Subject: [PATCH 355/428] add additional `fill_bytes` method to `IO` to deterministically generate random bytes and modify random functions to use them --- Cargo.lock | 1 - core/Cargo.toml | 1 - core/io/mod.rs | 10 +++++++--- core/vdbe/execute.rs | 38 ++++++++++++++++++++++---------------- 4 files changed, 29 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9fbc73fc8..a71426222 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4897,7 +4897,6 @@ dependencies = [ "crossbeam-skiplist", "env_logger 0.11.7", "fallible-iterator", - "getrandom 0.2.15", "hex", "intrusive-collections", "io-uring", diff --git a/core/Cargo.toml b/core/Cargo.toml index d485df277..5616ddf97 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -52,7 +52,6 @@ cfg_block = "0.1.1" fallible-iterator = { workspace = true } hex = { workspace = true } thiserror = { workspace = true } -getrandom = { version = "0.2.15" } regex = { workspace = true } regex-syntax = { workspace = true, default-features = false, features = [ "unicode", diff --git a/core/io/mod.rs b/core/io/mod.rs index ea733f820..1c9d107a4 100644 --- a/core/io/mod.rs +++ b/core/io/mod.rs @@ -3,6 +3,7 @@ use crate::storage::sqlite3_ondisk::WAL_FRAME_HEADER_SIZE; use crate::{BufferPool, Result}; use bitflags::bitflags; use cfg_block::cfg_block; +use rand::{Rng, RngCore}; use std::cell::RefCell; use std::fmt; use std::ptr::NonNull; @@ -147,9 +148,12 @@ pub trait IO: Clock + Send + Sync { } fn generate_random_number(&self) -> i64 { - let mut buf = [0u8; 8]; - getrandom::getrandom(&mut buf).unwrap(); - i64::from_ne_bytes(buf) + rand::rng().random() + } + + /// Fill `dest` with random data. + fn fill_bytes(&self, dest: &mut [u8]) { + rand::rng().fill_bytes(dest); } fn get_memory_io(&self) -> Arc { diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 6dce1a6e9..cd88b78c1 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -38,7 +38,6 @@ use crate::{ translate::emitter::TransactionMode, }; use crate::{get_cursor, CheckpointMode, Connection, MvCursor}; -use rand::Rng; use std::env::temp_dir; use std::ops::DerefMut; use std::{ @@ -75,7 +74,6 @@ use super::{ CommitState, }; use parking_lot::RwLock; -use rand::RngCore; use turso_parser::ast::{self, ForeignKeyClause, Name, SortOrder}; use turso_parser::parser::Parser; @@ -4806,7 +4804,9 @@ pub fn op_function( ScalarFunc::Typeof => Some(reg_value.exec_typeof()), ScalarFunc::Unicode => Some(reg_value.exec_unicode()), ScalarFunc::Quote => Some(reg_value.exec_quote()), - ScalarFunc::RandomBlob => Some(reg_value.exec_randomblob()), + ScalarFunc::RandomBlob => { + Some(reg_value.exec_randomblob(|dest| pager.io.fill_bytes(dest))) + } ScalarFunc::ZeroBlob => Some(reg_value.exec_zeroblob()), ScalarFunc::Soundex => Some(reg_value.exec_soundex()), _ => unreachable!(), @@ -4831,7 +4831,8 @@ pub fn op_function( state.registers[*dest] = Register::Value(result); } ScalarFunc::Random => { - state.registers[*dest] = Register::Value(Value::exec_random()); + state.registers[*dest] = + Register::Value(Value::exec_random(|| pager.io.generate_random_number())); } ScalarFunc::Trim => { let reg_value = &state.registers[*start_reg]; @@ -6623,9 +6624,7 @@ pub fn op_new_rowid( // Generate a random i64 and constrain it to the lower half of the rowid range. // We use the lower half (1 to MAX_ROWID/2) because we're in random mode only // when sequential allocation reached MAX_ROWID, meaning the upper range is full. - - let mut rng = rand::rng(); - let mut random_rowid: i64 = rng.random(); + let mut random_rowid: i64 = pager.io.generate_random_number(); random_rowid &= MAX_ROWID >> 1; // Mask to keep value in range [0, MAX_ROWID/2] random_rowid += 1; // Ensure positive @@ -8826,14 +8825,17 @@ impl Value { }) } - pub fn exec_random() -> Self { - let mut buf = [0u8; 8]; - getrandom::getrandom(&mut buf).unwrap(); - let random_number = i64::from_ne_bytes(buf); - Value::Integer(random_number) + pub fn exec_random(generate_random_number: F) -> Self + where + F: Fn() -> i64, + { + Value::Integer(generate_random_number()) } - pub fn exec_randomblob(&self) -> Value { + pub fn exec_randomblob(&self, fill_bytes: F) -> Value + where + F: Fn(&mut [u8]), + { let length = match self { Value::Integer(i) => *i, Value::Float(f) => *f as i64, @@ -8843,7 +8845,7 @@ impl Value { .max(1) as usize; let mut blob: Vec = vec![0; length]; - rand::rng().fill_bytes(&mut blob); + fill_bytes(&mut blob); Value::Blob(blob) } @@ -10200,6 +10202,8 @@ where #[cfg(test)] mod tests { + use rand::{Rng, RngCore}; + use super::*; use crate::types::Value; @@ -10960,7 +10964,7 @@ mod tests { #[test] fn test_random() { - match Value::exec_random() { + match Value::exec_random(|| rand::rng().random()) { Value::Integer(value) => { // Check that the value is within the range of i64 assert!( @@ -11023,7 +11027,9 @@ mod tests { ]; for test_case in &test_cases { - let result = test_case.input.exec_randomblob(); + let result = test_case.input.exec_randomblob(|dest| { + rand::rng().fill_bytes(dest); + }); match result { Value::Blob(blob) => { assert_eq!(blob.len(), test_case.expected_len); From 72baf48863cffec16e41c281c7c4835fed79cced Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Tue, 21 Oct 2025 11:44:40 -0300 Subject: [PATCH 356/428] add random generation in simulator IO --- simulator/runner/io.rs | 8 ++++++-- simulator/runner/memory/io.rs | 8 ++++++-- whopper/io.rs | 5 +++++ 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/simulator/runner/io.rs b/simulator/runner/io.rs index c5c38f928..baf4d9e98 100644 --- a/simulator/runner/io.rs +++ b/simulator/runner/io.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use rand::{RngCore, SeedableRng}; +use rand::{Rng, RngCore, SeedableRng}; use rand_chacha::ChaCha8Rng; use turso_core::{Clock, IO, Instant, OpenFlags, PlatformIO, Result}; @@ -136,6 +136,10 @@ impl IO for SimulatorIO { } fn generate_random_number(&self) -> i64 { - self.rng.borrow_mut().next_u64() as i64 + self.rng.borrow_mut().random() + } + + fn fill_bytes(&self, dest: &mut [u8]) { + self.rng.borrow_mut().fill_bytes(dest); } } diff --git a/simulator/runner/memory/io.rs b/simulator/runner/memory/io.rs index 975f0d7ce..fc406e7c1 100644 --- a/simulator/runner/memory/io.rs +++ b/simulator/runner/memory/io.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use indexmap::IndexMap; use parking_lot::Mutex; -use rand::{RngCore, SeedableRng}; +use rand::{Rng, RngCore, SeedableRng}; use rand_chacha::ChaCha8Rng; use turso_core::{Clock, Completion, IO, Instant, OpenFlags, Result}; @@ -269,7 +269,11 @@ impl IO for MemorySimIO { } fn generate_random_number(&self) -> i64 { - self.rng.borrow_mut().next_u64() as i64 + self.rng.borrow_mut().random() + } + + fn fill_bytes(&self, dest: &mut [u8]) { + self.rng.borrow_mut().fill_bytes(dest); } fn remove_file(&self, path: &str) -> Result<()> { diff --git a/whopper/io.rs b/whopper/io.rs index 5b9da7b3e..9f8c8a872 100644 --- a/whopper/io.rs +++ b/whopper/io.rs @@ -142,6 +142,11 @@ impl IO for SimulatorIO { let mut rng = self.rng.lock().unwrap(); rng.next_u64() as i64 } + + fn fill_bytes(&self, dest: &mut [u8]) { + let mut rng = self.rng.lock().unwrap(); + rng.fill_bytes(dest); + } } const MAX_FILE_SIZE: usize = 1 << 33; // 8 GiB From 10ead9f3b6301ec1b4e5dd6f2ea3a993210c4d96 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Tue, 21 Oct 2025 21:10:58 +0400 Subject: [PATCH 357/428] one more clippy fix --- core/vector/operations/concat.rs | 4 ++-- core/vector/vector_types.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/core/vector/operations/concat.rs b/core/vector/operations/concat.rs index 178258838..8823568ff 100644 --- a/core/vector/operations/concat.rs +++ b/core/vector/operations/concat.rs @@ -13,8 +13,8 @@ pub fn vector_concat(v1: &Vector, v2: &Vector) -> Result> { let data = match v1.vector_type { VectorType::Float32Dense | VectorType::Float64Dense => { let mut data = Vec::with_capacity(v1.bin_len() + v2.bin_len()); - data.extend_from_slice(&v1.bin_data()); - data.extend_from_slice(&v2.bin_data()); + data.extend_from_slice(v1.bin_data()); + data.extend_from_slice(v2.bin_data()); data } VectorType::Float32Sparse => { diff --git a/core/vector/vector_types.rs b/core/vector/vector_types.rs index c6c599090..8b70fdbda 100644 --- a/core/vector/vector_types.rs +++ b/core/vector/vector_types.rs @@ -118,7 +118,7 @@ impl<'a> Vector<'a> { Self::from_data(vector_type, Some(blob), None) } pub fn from_slice(blob: &'a [u8]) -> Result { - let (vector_type, len) = Self::vector_type(&blob)?; + let (vector_type, len) = Self::vector_type(blob)?; Self::from_data(vector_type, None, Some(&blob[..len])) } pub fn from_data( @@ -126,7 +126,7 @@ impl<'a> Vector<'a> { owned: Option>, refer: Option<&'a [u8]>, ) -> Result { - let owned_slice = owned.as_ref().map(|x| x.as_slice()); + let owned_slice = owned.as_deref(); let refer_slice = refer.as_ref().map(|&x| x); let data = owned_slice.unwrap_or_else(|| refer_slice.unwrap()); match vector_type { @@ -191,7 +191,7 @@ impl<'a> Vector<'a> { } pub fn bin_data(&'a self) -> &'a [u8] { - let owned = self.owned.as_ref().map(|x| x.as_slice()); + let owned = self.owned.as_deref(); let refer = self.refer.as_ref().map(|&x| x); owned.unwrap_or_else(|| refer.unwrap()) } From 892fcc881df1afe91b04f9bd18472047af1447c6 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 21 Oct 2025 21:15:35 -0400 Subject: [PATCH 358/428] Handle TRUE|FALSE literal case for default column constraint in the parser --- parser/src/parser.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/parser/src/parser.rs b/parser/src/parser.rs index bbab464ff..1563f3e8b 100644 --- a/parser/src/parser.rs +++ b/parser/src/parser.rs @@ -3120,9 +3120,17 @@ impl<'a> Parser<'a> { TK_NULL | TK_BLOB | TK_STRING | TK_FLOAT | TK_INTEGER | TK_CTIME_KW => { Ok(ColumnConstraint::Default(self.parse_term()?)) } - _ => Ok(ColumnConstraint::Default(Box::new(Expr::Id( - self.parse_nm()?, - )))), + _ => { + let name = self.parse_nm()?; + let expr = if name.as_str().eq_ignore_ascii_case("true") { + Expr::Literal(Literal::Numeric("1".into())) + } else if name.as_str().eq_ignore_ascii_case("false") { + Expr::Literal(Literal::Numeric("0".into())) + } else { + Expr::Id(name) + }; + Ok(ColumnConstraint::Default(Box::new(expr))) + } } } From 2f401c0bcc5f89820276b52ef7cb743008d0846a Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 21 Oct 2025 21:22:09 -0400 Subject: [PATCH 359/428] Add regression tcl test for #3796 default bool col constraints --- testing/create_table.test | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/testing/create_table.test b/testing/create_table.test index e17196c9f..e0182828c 100755 --- a/testing/create_table.test +++ b/testing/create_table.test @@ -101,3 +101,17 @@ do_execsql_test_in_memory_any_error create_view_index_collision-1 { CREATE INDEX ix_same ON t4(w); CREATE VIEW ix_same AS SELECT 1; } + +# https://github.com/tursodatabase/turso/issues/3796 +do_execsql_test_on_specific_db {:memory:} col-default-true { + create table t(id integer primary key, a default true); + insert into t (id) values (1); + SELECT a from t; +} {1} + +# https://github.com/tursodatabase/turso/issues/3796 +do_execsql_test_on_specific_db {:memory:} col-default-false { + create table t(id integer primary key, a default false); + insert into t (id) values (1); + SELECT a from t; +} {0} From 7d423d358f5018f7db9737d19ce0e548f5d8300f Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 22 Oct 2025 11:08:46 +0400 Subject: [PATCH 360/428] avoid unnecessary time measures --- cli/app.rs | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/cli/app.rs b/cli/app.rs index 4b6e1f5a9..923d280a3 100644 --- a/cli/app.rs +++ b/cli/app.rs @@ -106,44 +106,65 @@ macro_rules! row_step_result_query { return Ok(()); } - let start = Instant::now(); + let start = if $stats.is_some() { + Some(Instant::now()) + } else { + None + }; match $rows.step() { Ok(StepResult::Row) => { if let Some(ref mut stats) = $stats { - stats.execute_time_elapsed_samples.push(start.elapsed()); + stats + .execute_time_elapsed_samples + .push(start.unwrap().elapsed()); } $row_handle } Ok(StepResult::IO) => { - let start = Instant::now(); + if let Some(ref mut stats) = $stats { + stats.io_time_elapsed_samples.push(start.unwrap().elapsed()); + } + let start = if $stats.is_some() { + Some(Instant::now()) + } else { + None + }; $rows.run_once()?; if let Some(ref mut stats) = $stats { - stats.io_time_elapsed_samples.push(start.elapsed()); + stats.io_time_elapsed_samples.push(start.unwrap().elapsed()); } } Ok(StepResult::Interrupt) => { if let Some(ref mut stats) = $stats { - stats.execute_time_elapsed_samples.push(start.elapsed()); + stats + .execute_time_elapsed_samples + .push(start.unwrap().elapsed()); } break; } Ok(StepResult::Done) => { if let Some(ref mut stats) = $stats { - stats.execute_time_elapsed_samples.push(start.elapsed()); + stats + .execute_time_elapsed_samples + .push(start.unwrap().elapsed()); } break; } Ok(StepResult::Busy) => { if let Some(ref mut stats) = $stats { - stats.execute_time_elapsed_samples.push(start.elapsed()); + stats + .execute_time_elapsed_samples + .push(start.unwrap().elapsed()); } let _ = $app.writeln("database is busy"); break; } Err(err) => { if let Some(ref mut stats) = $stats { - stats.execute_time_elapsed_samples.push(start.elapsed()); + stats + .execute_time_elapsed_samples + .push(start.unwrap().elapsed()); } let report = miette::Error::from(err).with_source_code($sql.to_owned()); let _ = $app.writeln_fmt(format_args!("{report:?}")); From 671d266dd662690ebda0d47763cbd04b48ea89ba Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 22 Oct 2025 11:47:46 +0400 Subject: [PATCH 361/428] Revert "wip" This reverts commit dd34f7fd504fbbd57e7802da4e76447c4911ab09. --- core/lib.rs | 1 - core/storage/btree.rs | 12 +++++------- core/vdbe/execute.rs | 2 +- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 8a057e326..637e91c92 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -15,7 +15,6 @@ mod json; pub mod mvcc; mod parameters; mod pragma; -pub mod primitives; mod pseudo; mod schema; #[cfg(feature = "series")] diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 38a4511b7..3124050cd 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -41,7 +41,7 @@ use super::{ }; use std::{ any::Any, - cell::Cell, + cell::{Cell, Ref, RefCell}, cmp::{Ordering, Reverse}, collections::{BinaryHeap, HashMap}, fmt::Debug, @@ -50,8 +50,6 @@ use std::{ sync::Arc, }; -use crate::primitives::{Ref, RefCell, RefMut}; - /// The B-Tree page header is 12 bytes for interior pages and 8 bytes for leaf pages. /// /// +--------+-----------------+-----------------+-----------------+--------+----- ..... ----+ @@ -554,7 +552,7 @@ pub trait CursorTrait: Any { // --- start: BTreeCursor specific functions ---- fn invalidate_record(&mut self); fn has_rowid(&self) -> bool; - fn record_cursor_mut(&self) -> RefMut<'_, RecordCursor>; + fn record_cursor_mut(&self) -> std::cell::RefMut<'_, RecordCursor>; fn get_pager(&self) -> Arc; fn get_skip_advance(&self) -> bool; // --- end: BTreeCursor specific functions ---- @@ -4795,7 +4793,7 @@ impl BTreeCursor { Ok(IOResult::Done(())) } - fn get_immutable_record_or_create(&self) -> RefMut<'_, Option> { + fn get_immutable_record_or_create(&self) -> std::cell::RefMut<'_, Option> { let mut reusable_immutable_record = self.reusable_immutable_record.borrow_mut(); if reusable_immutable_record.is_none() { let page_size = self.pager.get_page_size_unchecked().get(); @@ -4805,7 +4803,7 @@ impl BTreeCursor { reusable_immutable_record } - fn get_immutable_record(&self) -> RefMut<'_, Option> { + fn get_immutable_record(&self) -> std::cell::RefMut<'_, Option> { self.reusable_immutable_record.borrow_mut() } @@ -5613,7 +5611,7 @@ impl CursorTrait for BTreeCursor { .invalidate(); self.record_cursor.borrow_mut().invalidate(); } - fn record_cursor_mut(&self) -> RefMut<'_, RecordCursor> { + fn record_cursor_mut(&self) -> std::cell::RefMut<'_, RecordCursor> { self.record_cursor.borrow_mut() } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index fa42f8a4e..dadc7a063 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -112,7 +112,7 @@ macro_rules! load_insn { }; #[cfg(not(debug_assertions))] let Insn::$variant { $($field $(: $binding)?),*} = $insn else { - // this will optimize away the branch + // this will optimize away the branch unsafe { std::hint::unreachable_unchecked() }; }; }; From 29b400c6ac6b599784e2891e3b5215caaa94fdc5 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 22 Oct 2025 12:46:45 +0300 Subject: [PATCH 362/428] tests: Separate integration and fuzz tests This separates fuzz tests from integration tests so that you can run the fast test cases with: ``` cargo test --test integration_tests ``` and the longer fuzz cases with: ``` cargo test --test fuzz_tests ``` --- tests/Cargo.toml | 8 ++++++-- .../fuzz/grammar_generator.rs | 6 ++++++ tests/{integration => }/fuzz/mod.rs | 17 ++++++++--------- tests/{integration => }/fuzz/rowid_alias.rs | 2 +- tests/integration/common.rs | 18 ++++++++++-------- tests/integration/mod.rs | 1 - tests/lib.rs | 4 +++- 7 files changed, 34 insertions(+), 22 deletions(-) rename tests/{integration => }/fuzz/grammar_generator.rs (99%) rename tests/{integration => }/fuzz/mod.rs (99%) rename tests/{integration => }/fuzz/rowid_alias.rs (99%) diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 416d26768..9b7ee7f5d 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -14,6 +14,10 @@ path = "lib.rs" name = "integration_tests" path = "integration/mod.rs" +[[test]] +name = "fuzz_tests" +path = "fuzz/mod.rs" + [dependencies] anyhow.workspace = true env_logger = { workspace = true } @@ -31,11 +35,11 @@ ctor = "0.5.0" twox-hash = "2.1.1" sql_generation = { path = "../sql_generation" } turso_parser = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter"] } +tracing = { workspace = true } [dev-dependencies] test-log = { version = "0.2.17", features = ["trace"] } -tracing-subscriber = { workspace = true, features = ["env-filter"] } -tracing = { workspace = true } [features] default = ["test_helper"] diff --git a/tests/integration/fuzz/grammar_generator.rs b/tests/fuzz/grammar_generator.rs similarity index 99% rename from tests/integration/fuzz/grammar_generator.rs rename to tests/fuzz/grammar_generator.rs index aa046f065..5c8f701d8 100644 --- a/tests/integration/fuzz/grammar_generator.rs +++ b/tests/fuzz/grammar_generator.rs @@ -89,6 +89,12 @@ struct GrammarGeneratorInner { symbols: HashMap, } +impl Default for GrammarGenerator { + fn default() -> Self { + Self::new() + } +} + impl GrammarGenerator { pub fn new() -> Self { GrammarGenerator(Rc::new(RefCell::new(GrammarGeneratorInner { diff --git a/tests/integration/fuzz/mod.rs b/tests/fuzz/mod.rs similarity index 99% rename from tests/integration/fuzz/mod.rs rename to tests/fuzz/mod.rs index d3630cf66..08a275495 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/fuzz/mod.rs @@ -2,7 +2,7 @@ pub mod grammar_generator; pub mod rowid_alias; #[cfg(test)] -mod tests { +mod fuzz_tests { use rand::seq::{IndexedRandom, IteratorRandom, SliceRandom}; use rand::Rng; use rand_chacha::ChaCha8Rng; @@ -10,15 +10,14 @@ mod tests { use std::{collections::HashSet, io::Write}; use turso_core::DatabaseOpts; - use crate::{ - common::{ - do_flush, limbo_exec_rows, limbo_exec_rows_fallible, limbo_stmt_get_column_names, - maybe_setup_tracing, rng_from_time_or_env, rusqlite_integrity_check, sqlite_exec_rows, - TempDatabase, - }, - fuzz::grammar_generator::{const_str, rand_int, rand_str, GrammarGenerator}, + use core_tester::common::{ + do_flush, limbo_exec_rows, limbo_exec_rows_fallible, limbo_stmt_get_column_names, + maybe_setup_tracing, rng_from_time_or_env, rusqlite_integrity_check, sqlite_exec_rows, + TempDatabase, }; + use super::grammar_generator::{const_str, rand_int, rand_str, GrammarGenerator}; + use super::grammar_generator::SymbolHandle; /// [See this issue for more info](https://github.com/tursodatabase/turso/issues/1763) @@ -4112,7 +4111,7 @@ mod tests { #[test] #[cfg(feature = "test_helper")] pub fn fuzz_pending_byte_database() -> anyhow::Result<()> { - use crate::common::rusqlite_integrity_check; + use core_tester::common::rusqlite_integrity_check; maybe_setup_tracing(); let (mut rng, seed) = rng_from_time_or_env(); diff --git a/tests/integration/fuzz/rowid_alias.rs b/tests/fuzz/rowid_alias.rs similarity index 99% rename from tests/integration/fuzz/rowid_alias.rs rename to tests/fuzz/rowid_alias.rs index 52570e9ec..28c08a074 100644 --- a/tests/integration/fuzz/rowid_alias.rs +++ b/tests/fuzz/rowid_alias.rs @@ -1,4 +1,4 @@ -use crate::common::{limbo_exec_rows, TempDatabase}; +use core_tester::common::{limbo_exec_rows, TempDatabase}; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; use sql_generation::{ diff --git a/tests/integration/common.rs b/tests/integration/common.rs index 175a98a53..65e171d1a 100644 --- a/tests/integration/common.rs +++ b/tests/integration/common.rs @@ -163,7 +163,7 @@ impl TempDatabase { } } -pub(crate) fn do_flush(conn: &Arc, tmp_db: &TempDatabase) -> anyhow::Result<()> { +pub fn do_flush(conn: &Arc, tmp_db: &TempDatabase) -> anyhow::Result<()> { let completions = conn.cacheflush()?; for c in completions { tmp_db.io.wait_for_completion(c)?; @@ -171,7 +171,7 @@ pub(crate) fn do_flush(conn: &Arc, tmp_db: &TempDatabase) -> anyhow: Ok(()) } -pub(crate) fn compare_string(a: impl AsRef, b: impl AsRef) { +pub fn compare_string(a: impl AsRef, b: impl AsRef) { let a = a.as_ref(); let b = b.as_ref(); @@ -204,7 +204,7 @@ pub fn maybe_setup_tracing() { .try_init(); } -pub(crate) fn sqlite_exec_rows( +pub fn sqlite_exec_rows( conn: &rusqlite::Connection, query: &str, ) -> Vec> { @@ -227,7 +227,7 @@ pub(crate) fn sqlite_exec_rows( results } -pub(crate) fn limbo_exec_rows( +pub fn limbo_exec_rows( _db: &TempDatabase, conn: &Arc, query: &str, @@ -266,7 +266,8 @@ pub(crate) fn limbo_exec_rows( rows } -pub(crate) fn limbo_stmt_get_column_names( +#[allow(dead_code)] +pub fn limbo_stmt_get_column_names( _db: &TempDatabase, conn: &Arc, query: &str, @@ -280,7 +281,7 @@ pub(crate) fn limbo_stmt_get_column_names( names } -pub(crate) fn limbo_exec_rows_fallible( +pub fn limbo_exec_rows_fallible( _db: &TempDatabase, conn: &Arc, query: &str, @@ -319,7 +320,7 @@ pub(crate) fn limbo_exec_rows_fallible( Ok(rows) } -pub(crate) fn limbo_exec_rows_error( +pub fn limbo_exec_rows_error( _db: &TempDatabase, conn: &Arc, query: &str, @@ -338,7 +339,7 @@ pub(crate) fn limbo_exec_rows_error( } } -pub(crate) fn rng_from_time() -> (ChaCha8Rng, u64) { +pub fn rng_from_time() -> (ChaCha8Rng, u64) { let seed = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap() @@ -401,6 +402,7 @@ pub fn run_query_core( Ok(()) } +#[allow(dead_code)] pub fn rusqlite_integrity_check(db_path: &Path) -> anyhow::Result<()> { let conn = rusqlite::Connection::open(db_path)?; let mut stmt = conn.prepare("SELECT * FROM pragma_integrity_check;")?; diff --git a/tests/integration/mod.rs b/tests/integration/mod.rs index 1149f224a..e369e68b7 100644 --- a/tests/integration/mod.rs +++ b/tests/integration/mod.rs @@ -1,6 +1,5 @@ mod common; mod functions; -mod fuzz; mod fuzz_transaction; mod pragma; mod query_processing; diff --git a/tests/lib.rs b/tests/lib.rs index 8b1378917..26482d663 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -1 +1,3 @@ - +// Shared test utilities +#[path = "integration/common.rs"] +pub mod common; From da828681c35b279d80ec835dd87687e5e59ac3f1 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 22 Oct 2025 13:12:50 +0300 Subject: [PATCH 363/428] github: Run fuzz tests in a separate workflow --- .../{long_fuzz_tests_btree.yml => fuzz.yml} | 22 ++++++++++++++++++- .github/workflows/rust.yml | 2 +- 2 files changed, 22 insertions(+), 2 deletions(-) rename .github/workflows/{long_fuzz_tests_btree.yml => fuzz.yml} (66%) diff --git a/.github/workflows/long_fuzz_tests_btree.yml b/.github/workflows/fuzz.yml similarity index 66% rename from .github/workflows/long_fuzz_tests_btree.yml rename to .github/workflows/fuzz.yml index 982ac8604..4573d658f 100644 --- a/.github/workflows/long_fuzz_tests_btree.yml +++ b/.github/workflows/fuzz.yml @@ -12,7 +12,27 @@ on: - main jobs: - run-long-tests: + run-fuzz-tests: + runs-on: blacksmith-4vcpu-ubuntu-2404 + timeout-minutes: 30 + + steps: + - uses: actions/checkout@v3 + - uses: useblacksmith/rust-cache@v3 + with: + prefix-key: "v1-rust" # can be updated if we need to reset caches due to non-trivial change in the dependencies (for example, custom env var were set for single workspace project) + - name: Set up Python 3.10 + uses: useblacksmith/setup-python@v6 + with: + python-version: "3.10" + - name: Build + run: cargo build --verbose + - name: Run ignored long tests + run: cargo test --test fuzz_tests + env: + RUST_BACKTRACE: 1 + + run-long-fuzz-tests: runs-on: blacksmith-4vcpu-ubuntu-2404 timeout-minutes: 30 diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 49140d2b8..e059236c1 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -48,7 +48,7 @@ jobs: - name: Test env: RUST_LOG: ${{ runner.debug && 'turso_core::storage=trace' || '' }} - run: cargo test --verbose --features checksum + run: cargo test --verbose --features checksum --test integration_tests timeout-minutes: 20 clippy: From b984ddf98f8309f893c1581f1cc5bdb7d86dfb90 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 22 Oct 2025 13:42:52 +0300 Subject: [PATCH 364/428] Turso 0.3.0-pre.4 --- Cargo.lock | 54 +++++++++---------- Cargo.toml | 34 ++++++------ bindings/javascript/package-lock.json | 36 ++++++------- bindings/javascript/package.json | 2 +- .../javascript/packages/common/package.json | 2 +- .../javascript/packages/native/package.json | 4 +- .../packages/wasm-common/package.json | 2 +- .../javascript/packages/wasm/package.json | 6 +-- .../sync/packages/common/package.json | 4 +- .../sync/packages/native/package.json | 6 +-- .../sync/packages/wasm/package.json | 8 +-- bindings/javascript/yarn.lock | 24 ++++----- 12 files changed, 91 insertions(+), 91 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f079d528f..508d44cf8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -822,7 +822,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_tester" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "anyhow", "assert_cmd", @@ -2548,7 +2548,7 @@ dependencies = [ [[package]] name = "limbo_completion" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "mimalloc", "turso_ext", @@ -2556,7 +2556,7 @@ dependencies = [ [[package]] name = "limbo_crypto" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "blake3", "data-encoding", @@ -2569,7 +2569,7 @@ dependencies = [ [[package]] name = "limbo_csv" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "csv", "mimalloc", @@ -2579,7 +2579,7 @@ dependencies = [ [[package]] name = "limbo_fuzzy" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "mimalloc", "turso_ext", @@ -2587,7 +2587,7 @@ dependencies = [ [[package]] name = "limbo_ipaddr" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "ipnetwork", "mimalloc", @@ -2596,7 +2596,7 @@ dependencies = [ [[package]] name = "limbo_percentile" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "mimalloc", "turso_ext", @@ -2604,7 +2604,7 @@ dependencies = [ [[package]] name = "limbo_regexp" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "mimalloc", "regex", @@ -2613,7 +2613,7 @@ dependencies = [ [[package]] name = "limbo_sim" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "anyhow", "bitflags 2.9.4", @@ -2649,7 +2649,7 @@ dependencies = [ [[package]] name = "limbo_sqlite_test_ext" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "cc", ] @@ -3465,7 +3465,7 @@ dependencies = [ [[package]] name = "py-turso" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "anyhow", "pyo3", @@ -4211,7 +4211,7 @@ checksum = "d372029cb5195f9ab4e4b9aef550787dce78b124fcaee8d82519925defcd6f0d" [[package]] name = "sql_generation" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "anarchist-readable-name-generator-lib 0.2.0", "anyhow", @@ -4839,7 +4839,7 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "turso" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "rand 0.9.2", "rand_chacha 0.9.0", @@ -4853,7 +4853,7 @@ dependencies = [ [[package]] name = "turso-java" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "jni", "thiserror 2.0.16", @@ -4862,7 +4862,7 @@ dependencies = [ [[package]] name = "turso_cli" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "anyhow", "cfg-if", @@ -4898,7 +4898,7 @@ dependencies = [ [[package]] name = "turso_core" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "aegis", "aes", @@ -4958,7 +4958,7 @@ dependencies = [ [[package]] name = "turso_dart" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "flutter_rust_bridge", "turso_core", @@ -4966,7 +4966,7 @@ dependencies = [ [[package]] name = "turso_ext" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "chrono", "getrandom 0.3.2", @@ -4975,7 +4975,7 @@ dependencies = [ [[package]] name = "turso_ext_tests" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "env_logger 0.11.7", "lazy_static", @@ -4986,7 +4986,7 @@ dependencies = [ [[package]] name = "turso_macros" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "proc-macro2", "quote", @@ -4995,7 +4995,7 @@ dependencies = [ [[package]] name = "turso_node" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "chrono", "napi", @@ -5008,7 +5008,7 @@ dependencies = [ [[package]] name = "turso_parser" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "bitflags 2.9.4", "criterion", @@ -5024,7 +5024,7 @@ dependencies = [ [[package]] name = "turso_sqlite3" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "env_logger 0.11.7", "libc", @@ -5037,7 +5037,7 @@ dependencies = [ [[package]] name = "turso_stress" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "anarchist-readable-name-generator-lib 0.1.2", "antithesis_sdk", @@ -5054,7 +5054,7 @@ dependencies = [ [[package]] name = "turso_sync_engine" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "base64 0.22.1", "bytes", @@ -5081,7 +5081,7 @@ dependencies = [ [[package]] name = "turso_sync_js" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "genawaiter", "napi", @@ -5096,7 +5096,7 @@ dependencies = [ [[package]] name = "turso_whopper" -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" dependencies = [ "anyhow", "clap", diff --git a/Cargo.toml b/Cargo.toml index 8ac02dab6..4460ca602 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,29 +39,29 @@ exclude = [ ] [workspace.package] -version = "0.3.0-pre.3" +version = "0.3.0-pre.4" authors = ["the Limbo authors"] edition = "2021" license = "MIT" repository = "https://github.com/tursodatabase/turso" [workspace.dependencies] -turso = { path = "bindings/rust", version = "0.3.0-pre.3" } -turso_node = { path = "bindings/javascript", version = "0.3.0-pre.3" } -limbo_completion = { path = "extensions/completion", version = "0.3.0-pre.3" } -turso_core = { path = "core", version = "0.3.0-pre.3" } -turso_sync_engine = { path = "sync/engine", version = "0.3.0-pre.3" } -limbo_crypto = { path = "extensions/crypto", version = "0.3.0-pre.3" } -limbo_csv = { path = "extensions/csv", version = "0.3.0-pre.3" } -turso_ext = { path = "extensions/core", version = "0.3.0-pre.3" } -turso_ext_tests = { path = "extensions/tests", version = "0.3.0-pre.3" } -limbo_ipaddr = { path = "extensions/ipaddr", version = "0.3.0-pre.3" } -turso_macros = { path = "macros", version = "0.3.0-pre.3" } -limbo_percentile = { path = "extensions/percentile", version = "0.3.0-pre.3" } -limbo_regexp = { path = "extensions/regexp", version = "0.3.0-pre.3" } -limbo_uuid = { path = "extensions/uuid", version = "0.3.0-pre.3" } -turso_parser = { path = "parser", version = "0.3.0-pre.3" } -limbo_fuzzy = { path = "extensions/fuzzy", version = "0.3.0-pre.3" } +turso = { path = "bindings/rust", version = "0.3.0-pre.4" } +turso_node = { path = "bindings/javascript", version = "0.3.0-pre.4" } +limbo_completion = { path = "extensions/completion", version = "0.3.0-pre.4" } +turso_core = { path = "core", version = "0.3.0-pre.4" } +turso_sync_engine = { path = "sync/engine", version = "0.3.0-pre.4" } +limbo_crypto = { path = "extensions/crypto", version = "0.3.0-pre.4" } +limbo_csv = { path = "extensions/csv", version = "0.3.0-pre.4" } +turso_ext = { path = "extensions/core", version = "0.3.0-pre.4" } +turso_ext_tests = { path = "extensions/tests", version = "0.3.0-pre.4" } +limbo_ipaddr = { path = "extensions/ipaddr", version = "0.3.0-pre.4" } +turso_macros = { path = "macros", version = "0.3.0-pre.4" } +limbo_percentile = { path = "extensions/percentile", version = "0.3.0-pre.4" } +limbo_regexp = { path = "extensions/regexp", version = "0.3.0-pre.4" } +limbo_uuid = { path = "extensions/uuid", version = "0.3.0-pre.4" } +turso_parser = { path = "parser", version = "0.3.0-pre.4" } +limbo_fuzzy = { path = "extensions/fuzzy", version = "0.3.0-pre.4" } sql_generation = { path = "sql_generation" } strum = { version = "0.26", features = ["derive"] } strum_macros = "0.26" diff --git a/bindings/javascript/package-lock.json b/bindings/javascript/package-lock.json index 68feab117..56e6574bd 100644 --- a/bindings/javascript/package-lock.json +++ b/bindings/javascript/package-lock.json @@ -1,11 +1,11 @@ { "name": "javascript", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "lockfileVersion": 3, "requires": true, "packages": { "": { - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "workspaces": [ "packages/common", "packages/wasm-common", @@ -3542,7 +3542,7 @@ }, "packages/common": { "name": "@tursodatabase/database-common", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "license": "MIT", "devDependencies": { "typescript": "^5.9.2", @@ -3551,10 +3551,10 @@ }, "packages/native": { "name": "@tursodatabase/database", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.3" + "@tursodatabase/database-common": "^0.3.0-pre.4" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3568,11 +3568,11 @@ }, "packages/wasm": { "name": "@tursodatabase/database-wasm", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.3", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.3" + "@tursodatabase/database-common": "^0.3.0-pre.4", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.4" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3585,7 +3585,7 @@ }, "packages/wasm-common": { "name": "@tursodatabase/database-wasm-common", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "license": "MIT", "dependencies": { "@napi-rs/wasm-runtime": "^1.0.5" @@ -3596,10 +3596,10 @@ }, "sync/packages/common": { "name": "@tursodatabase/sync-common", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.3" + "@tursodatabase/database-common": "^0.3.0-pre.4" }, "devDependencies": { "typescript": "^5.9.2" @@ -3607,11 +3607,11 @@ }, "sync/packages/native": { "name": "@tursodatabase/sync", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.3", - "@tursodatabase/sync-common": "^0.3.0-pre.3" + "@tursodatabase/database-common": "^0.3.0-pre.4", + "@tursodatabase/sync-common": "^0.3.0-pre.4" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", @@ -3622,12 +3622,12 @@ }, "sync/packages/wasm": { "name": "@tursodatabase/sync-wasm", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "license": "MIT", "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.3", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.3", - "@tursodatabase/sync-common": "^0.3.0-pre.3" + "@tursodatabase/database-common": "^0.3.0-pre.4", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.4", + "@tursodatabase/sync-common": "^0.3.0-pre.4" }, "devDependencies": { "@napi-rs/cli": "^3.1.5", diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json index 0c19f4697..12f5d43f4 100644 --- a/bindings/javascript/package.json +++ b/bindings/javascript/package.json @@ -14,5 +14,5 @@ "sync/packages/native", "sync/packages/wasm" ], - "version": "0.3.0-pre.3" + "version": "0.3.0-pre.4" } diff --git a/bindings/javascript/packages/common/package.json b/bindings/javascript/packages/common/package.json index 188f5bbfa..72acbc0dd 100644 --- a/bindings/javascript/packages/common/package.json +++ b/bindings/javascript/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-common", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/native/package.json b/bindings/javascript/packages/native/package.json index 7150ff058..052d1c1ac 100644 --- a/bindings/javascript/packages/native/package.json +++ b/bindings/javascript/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -47,7 +47,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.3" + "@tursodatabase/database-common": "^0.3.0-pre.4" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/packages/wasm-common/package.json b/bindings/javascript/packages/wasm-common/package.json index 0b328856a..23b0801ed 100644 --- a/bindings/javascript/packages/wasm-common/package.json +++ b/bindings/javascript/packages/wasm-common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-wasm-common", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" diff --git a/bindings/javascript/packages/wasm/package.json b/bindings/javascript/packages/wasm/package.json index e6214d967..a474952e4 100644 --- a/bindings/javascript/packages/wasm/package.json +++ b/bindings/javascript/packages/wasm/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/database-wasm", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -51,7 +51,7 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.3", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.3" + "@tursodatabase/database-common": "^0.3.0-pre.4", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.4" } } diff --git a/bindings/javascript/sync/packages/common/package.json b/bindings/javascript/sync/packages/common/package.json index 3f308e4d9..0f83707d6 100644 --- a/bindings/javascript/sync/packages/common/package.json +++ b/bindings/javascript/sync/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-common", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -23,6 +23,6 @@ "test": "echo 'no tests'" }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.3" + "@tursodatabase/database-common": "^0.3.0-pre.4" } } diff --git a/bindings/javascript/sync/packages/native/package.json b/bindings/javascript/sync/packages/native/package.json index 822f8adf7..d8b20c163 100644 --- a/bindings/javascript/sync/packages/native/package.json +++ b/bindings/javascript/sync/packages/native/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -44,8 +44,8 @@ ] }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.3", - "@tursodatabase/sync-common": "^0.3.0-pre.3" + "@tursodatabase/database-common": "^0.3.0-pre.4", + "@tursodatabase/sync-common": "^0.3.0-pre.4" }, "imports": { "#index": "./index.js" diff --git a/bindings/javascript/sync/packages/wasm/package.json b/bindings/javascript/sync/packages/wasm/package.json index 31543a6ca..c4bea4c94 100644 --- a/bindings/javascript/sync/packages/wasm/package.json +++ b/bindings/javascript/sync/packages/wasm/package.json @@ -1,6 +1,6 @@ { "name": "@tursodatabase/sync-wasm", - "version": "0.3.0-pre.3", + "version": "0.3.0-pre.4", "repository": { "type": "git", "url": "https://github.com/tursodatabase/turso" @@ -54,8 +54,8 @@ "#index": "./index.js" }, "dependencies": { - "@tursodatabase/database-common": "^0.3.0-pre.3", - "@tursodatabase/database-wasm-common": "^0.3.0-pre.3", - "@tursodatabase/sync-common": "^0.3.0-pre.3" + "@tursodatabase/database-common": "^0.3.0-pre.4", + "@tursodatabase/database-wasm-common": "^0.3.0-pre.4", + "@tursodatabase/sync-common": "^0.3.0-pre.4" } } diff --git a/bindings/javascript/yarn.lock b/bindings/javascript/yarn.lock index de25e12a3..7b33e6081 100644 --- a/bindings/javascript/yarn.lock +++ b/bindings/javascript/yarn.lock @@ -1586,7 +1586,7 @@ __metadata: languageName: node linkType: hard -"@tursodatabase/database-common@npm:^0.3.0-pre.3, @tursodatabase/database-common@workspace:packages/common": +"@tursodatabase/database-common@npm:^0.3.0-pre.4, @tursodatabase/database-common@workspace:packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/database-common@workspace:packages/common" dependencies: @@ -1595,7 +1595,7 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/database-wasm-common@npm:^0.3.0-pre.3, @tursodatabase/database-wasm-common@workspace:packages/wasm-common": +"@tursodatabase/database-wasm-common@npm:^0.3.0-pre.4, @tursodatabase/database-wasm-common@workspace:packages/wasm-common": version: 0.0.0-use.local resolution: "@tursodatabase/database-wasm-common@workspace:packages/wasm-common" dependencies: @@ -1609,8 +1609,8 @@ __metadata: resolution: "@tursodatabase/database-wasm@workspace:packages/wasm" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.3" - "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.3" + "@tursodatabase/database-common": "npm:^0.3.0-pre.4" + "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.4" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1624,7 +1624,7 @@ __metadata: resolution: "@tursodatabase/database@workspace:packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.3" + "@tursodatabase/database-common": "npm:^0.3.0-pre.4" "@types/node": "npm:^24.3.1" better-sqlite3: "npm:^12.2.0" drizzle-kit: "npm:^0.31.4" @@ -1634,11 +1634,11 @@ __metadata: languageName: unknown linkType: soft -"@tursodatabase/sync-common@npm:^0.3.0-pre.3, @tursodatabase/sync-common@workspace:sync/packages/common": +"@tursodatabase/sync-common@npm:^0.3.0-pre.4, @tursodatabase/sync-common@workspace:sync/packages/common": version: 0.0.0-use.local resolution: "@tursodatabase/sync-common@workspace:sync/packages/common" dependencies: - "@tursodatabase/database-common": "npm:^0.3.0-pre.3" + "@tursodatabase/database-common": "npm:^0.3.0-pre.4" typescript: "npm:^5.9.2" languageName: unknown linkType: soft @@ -1648,9 +1648,9 @@ __metadata: resolution: "@tursodatabase/sync-wasm@workspace:sync/packages/wasm" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.3" - "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.3" - "@tursodatabase/sync-common": "npm:^0.3.0-pre.3" + "@tursodatabase/database-common": "npm:^0.3.0-pre.4" + "@tursodatabase/database-wasm-common": "npm:^0.3.0-pre.4" + "@tursodatabase/sync-common": "npm:^0.3.0-pre.4" "@vitest/browser": "npm:^3.2.4" playwright: "npm:^1.55.0" typescript: "npm:^5.9.2" @@ -1664,8 +1664,8 @@ __metadata: resolution: "@tursodatabase/sync@workspace:sync/packages/native" dependencies: "@napi-rs/cli": "npm:^3.1.5" - "@tursodatabase/database-common": "npm:^0.3.0-pre.3" - "@tursodatabase/sync-common": "npm:^0.3.0-pre.3" + "@tursodatabase/database-common": "npm:^0.3.0-pre.4" + "@tursodatabase/sync-common": "npm:^0.3.0-pre.4" "@types/node": "npm:^24.3.1" typescript: "npm:^5.9.2" vitest: "npm:^3.2.4" From 123a26b7d98a8099ed26d147491287831ebc26ea Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 17 Oct 2025 13:39:32 -0400 Subject: [PATCH 365/428] Add AtomicEnum macro macros crate --- macros/src/atomic_enum.rs | 290 ++++++++++++++++++++++++++++++++++++++ macros/src/lib.rs | 28 ++++ 2 files changed, 318 insertions(+) create mode 100644 macros/src/atomic_enum.rs diff --git a/macros/src/atomic_enum.rs b/macros/src/atomic_enum.rs new file mode 100644 index 000000000..d1248f9d8 --- /dev/null +++ b/macros/src/atomic_enum.rs @@ -0,0 +1,290 @@ +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, Data, DeriveInput, Fields, Type}; + +pub(crate) fn derive_atomic_enum_inner(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let name = &input.ident; + let atomic_name = syn::Ident::new(&format!("Atomic{name}"), name.span()); + + let variants = match &input.data { + Data::Enum(data) => &data.variants, + _ => { + return syn::Error::new_spanned(input, "AtomicEnum can only be derived for enums") + .to_compile_error() + .into(); + } + }; + + // get info about variants to determine how we have to encode them + let mut has_bool_field = false; + let mut has_u8_field = false; + let mut max_discriminant = 0u8; + + for (idx, variant) in variants.iter().enumerate() { + max_discriminant = idx as u8; + match &variant.fields { + Fields::Unit => {} + Fields::Named(fields) if fields.named.len() == 1 => { + let field = &fields.named[0]; + if is_bool_type(&field.ty) { + has_bool_field = true; + } else if is_u8_or_i8_type(&field.ty) { + has_u8_field = true; + } else { + return syn::Error::new_spanned( + field, + "AtomicEnum only supports bool, u8, or i8 fields", + ) + .to_compile_error() + .into(); + } + } + Fields::Unnamed(fields) if fields.unnamed.len() == 1 => { + let field = &fields.unnamed[0]; + if is_bool_type(&field.ty) { + has_bool_field = true; + } else if is_u8_or_i8_type(&field.ty) { + has_u8_field = true; + } else { + return syn::Error::new_spanned( + field, + "AtomicEnum only supports bool, u8, or i8 fields", + ) + .to_compile_error() + .into(); + } + } + _ => { + return syn::Error::new_spanned( + variant, + "AtomicEnum only supports unit variants or variants with a single field", + ) + .to_compile_error() + .into(); + } + } + } + + let (storage_type, atomic_type) = if has_u8_field || (has_bool_field && max_discriminant > 127) + { + // Need u16: 8 bits for discriminant, 8 bits for data + (quote! { u16 }, quote! { ::std::sync::atomic::AtomicU16 }) + } else { + // Can use u8: 7 bits for discriminant, 1 bit for bool (if any) + (quote! { u8 }, quote! { ::std::sync::atomic::AtomicU8 }) + }; + + let use_u16 = has_u8_field || (has_bool_field && max_discriminant > 127); + + let to_storage = variants.iter().enumerate().map(|(idx, variant)| { + let var_name = &variant.ident; + let disc = idx as u8; // The discriminant here is just the variant's index + + match &variant.fields { + // Simple unit variant, just store the discriminant + Fields::Unit => { + if use_u16 { + quote! { #name::#var_name => #disc as u16 } + } else { + quote! { #name::#var_name => #disc } + } + } + Fields::Named(fields) => { + // Named field variant like `Write { schema_did_change: bool }` + let field = &fields.named[0]; + let field_name = &field.ident; + + if is_bool_type(&field.ty) { + if use_u16 { + // Pack as: [discriminant_byte | bool_as_byte] + // Example: Write {true} with disc=3 becomes: b100000011 + quote! { + #name::#var_name { ref #field_name } => { + (#disc as u16) | ((*#field_name as u16) << 8) + } + } + } else { + // Same as above but with u8, so only 1 bit for bool + // Example: Write{true} with disc=3 becomes: b10000011 + quote! { + #name::#var_name { ref #field_name } => { + #disc | ((*#field_name as u8) << 7) + } + } + } + } else { + // u8/i8 field always uses u16 to have enough bits + // Pack as: [discriminant_byte | value_byte] + quote! { + #name::#var_name { ref #field_name } => { + (#disc as u16) | ((*#field_name as u16) << 8) + } + } + } + } + Fields::Unnamed(_) => { + // same strategy as above, but for tuple variants like `Write(bool)` + if is_bool_type(&variant.fields.iter().next().unwrap().ty) { + if use_u16 { + quote! { + #name::#var_name(ref val) => { + (#disc as u16) | ((*val as u16) << 8) + } + } + } else { + quote! { + #name::#var_name(ref val) => { + #disc | ((*val as u8) << 7) + } + } + } + } else { + quote! { + #name::#var_name(ref val) => { + (#disc as u16) | ((*val as u16) << 8) + } + } + } + } + } + }); + + // Generate the match arms for decoding the storage representation back to enum + let from_storage = variants.iter().enumerate().map(|(idx, variant)| { + let var_name = &variant.ident; + let disc = idx as u8; + + match &variant.fields { + Fields::Unit => quote! { #disc => #name::#var_name }, + Fields::Named(fields) => { + let field = &fields.named[0]; + let field_name = &field.ident; + + if is_bool_type(&field.ty) { + if use_u16 { + // Extract bool from high byte: check if non-zero + quote! { + #disc => #name::#var_name { + #field_name: (val >> 8) != 0 + } + } + } else { + // check single bool value at bit 7 + quote! { + #disc => #name::#var_name { + #field_name: (val & 0x80) != 0 + } + } + } + } else { + quote! { + #disc => #name::#var_name { + // Extract u8/i8 from high byte and cast to appropriate type + #field_name: (val >> 8) as _ + } + } + } + } + Fields::Unnamed(_) => { + if is_bool_type(&variant.fields.iter().next().unwrap().ty) { + if use_u16 { + quote! { #disc => #name::#var_name((val >> 8) != 0) } + } else { + quote! { #disc => #name::#var_name((val & 0x80) != 0) } + } + } else { + quote! { #disc => #name::#var_name((val >> 8) as _) } + } + } + } + }); + + let discriminant_mask = if use_u16 { + quote! { 0xFF } + } else { + quote! { 0x7F } + }; + let to_storage_arms_copy = to_storage.clone(); + + let expanded = quote! { + #[derive(Debug)] + /// Atomic wrapper for #name + pub struct #atomic_name(#atomic_type); + + impl #atomic_name { + /// Encode enum into storage representation + /// Discriminant in lower bits, field data in upper bits + #[inline] + fn to_storage(val: &#name) -> #storage_type { + match val { + #(#to_storage_arms_copy),* + } + } + + /// Decode storage representation into enum + /// Panics on invalid discriminant + #[inline] + fn from_storage(val: #storage_type) -> #name { + let discriminant = (val & #discriminant_mask) as u8; + match discriminant { + #(#from_storage,)* + _ => panic!(concat!("Invalid ", stringify!(#name), " discriminant: {}"), discriminant), + } + } + + /// Create new atomic enum with initial value + #[inline] + pub const fn new(val: #name) -> Self { + // Can't call to_storage in const context, so inline it + let storage = match val { + #(#to_storage),* + }; + Self(#atomic_type::new(storage)) + } + + #[inline] + /// Load and convert the current value to expected enum + pub fn get(&self) -> #name { + Self::from_storage(self.0.load(::std::sync::atomic::Ordering::Acquire)) + } + + #[inline] + /// Convert and store new value + pub fn set(&self, val: #name) { + self.0.store(Self::to_storage(&val), ::std::sync::atomic::Ordering::Release) + } + + #[inline] + /// Store new value and return previous value + pub fn swap(&self, val: #name) -> #name { + let prev = self.0.swap(Self::to_storage(&val), ::std::sync::atomic::Ordering::AcqRel); + Self::from_storage(prev) + } + } + + impl From<#name> for #atomic_name { + fn from(val: #name) -> Self { + Self::new(val) + } + } + }; + + TokenStream::from(expanded) +} + +fn is_bool_type(ty: &Type) -> bool { + if let Type::Path(path) = ty { + path.path.is_ident("bool") + } else { + false + } +} + +fn is_u8_or_i8_type(ty: &Type) -> bool { + if let Type::Path(path) = ty { + path.path.is_ident("u8") || path.path.is_ident("i8") + } else { + false + } +} diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 8df01da22..9c95cbd1a 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -1,5 +1,6 @@ mod ext; extern crate proc_macro; +mod atomic_enum; use proc_macro::{token_stream::IntoIter, Group, TokenStream, TokenTree}; use std::collections::HashMap; @@ -464,3 +465,30 @@ pub fn derive_vfs_module(input: TokenStream) -> TokenStream { pub fn match_ignore_ascii_case(input: TokenStream) -> TokenStream { ext::match_ignore_ascci_case(input) } + +/// Derive macro for creating atomic wrappers for enums +/// +/// Supports: +/// - Unit variants +/// - Variants with single bool/u8/i8 fields +/// - Named or unnamed fields +/// +/// Algorithm: +/// - Uses u8 representation, splitting bits for variant discriminant and field data +/// - For bool fields: high bit for bool, lower 7 bits for discriminant +/// - For u8/i8 fields: uses u16 internally (8 bits discriminant, 8 bits data) +/// +/// Example: +/// ```rust +/// #[derive(AtomicEnum)] +/// enum TransactionState { +/// Write { schema_did_change: bool }, +/// Read, +/// PendingUpgrade, +/// None, +/// } +/// ``` +#[proc_macro_derive(AtomicEnum)] +pub fn derive_atomic_enum(input: TokenStream) -> TokenStream { + atomic_enum::derive_atomic_enum_inner(input) +} From a8b257c6643fad1fcb10f65171b1076732298bd0 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 17 Oct 2025 13:40:18 -0400 Subject: [PATCH 366/428] Replace several RwLock values with new AtomicEnums --- core/lib.rs | 65 ++++++++++--------- .../mvcc/database/checkpoint_state_machine.rs | 10 +-- core/storage/btree.rs | 2 +- core/storage/encryption.rs | 15 ++++- core/storage/pager.rs | 55 +++------------- core/util.rs | 1 + macros/src/atomic_enum.rs | 6 +- macros/src/lib.rs | 2 +- 8 files changed, 67 insertions(+), 89 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 637e91c92..c3149b64d 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -40,6 +40,7 @@ pub mod numeric; mod numeric; use crate::storage::checksum::CHECKSUM_REQUIRED_RESERVED_BYTES; +use crate::storage::encryption::AtomicCipherMode; use crate::translate::display::PlanContext; use crate::translate::pragma::TURSO_CDC_DEFAULT_TABLE_NAME; #[cfg(all(feature = "fs", feature = "conn_raw_api"))] @@ -93,7 +94,7 @@ pub use storage::{ wal::{CheckpointMode, CheckpointResult, Wal, WalFile, WalFileShared}, }; use tracing::{instrument, Level}; -use turso_macros::match_ignore_ascii_case; +use turso_macros::{match_ignore_ascii_case, AtomicEnum}; use turso_parser::ast::fmt::ToTokens; use turso_parser::{ast, ast::Cmd, parser::Parser}; use types::IOResult; @@ -178,7 +179,7 @@ impl EncryptionOpts { pub type Result = std::result::Result; -#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[derive(Clone, AtomicEnum, Copy, PartialEq, Eq, Debug)] enum TransactionState { Write { schema_did_change: bool }, Read, @@ -186,7 +187,7 @@ enum TransactionState { None, } -#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[derive(Debug, AtomicEnum, Clone, Copy, PartialEq, Eq)] pub enum SyncMode { Off = 0, Full = 2, @@ -562,7 +563,7 @@ impl Database { schema: RwLock::new(self.schema.lock().unwrap().clone()), database_schemas: RwLock::new(std::collections::HashMap::new()), auto_commit: AtomicBool::new(true), - transaction_state: RwLock::new(TransactionState::None), + transaction_state: AtomicTransactionState::new(TransactionState::None), last_insert_rowid: AtomicI64::new(0), last_change: AtomicI64::new(0), total_changes: AtomicI64::new(0), @@ -580,8 +581,8 @@ impl Database { metrics: RwLock::new(ConnectionMetrics::new()), is_nested_stmt: AtomicBool::new(false), encryption_key: RwLock::new(None), - encryption_cipher_mode: RwLock::new(None), - sync_mode: RwLock::new(SyncMode::Full), + encryption_cipher_mode: AtomicCipherMode::new(CipherMode::None), + sync_mode: AtomicSyncMode::new(SyncMode::Full), data_sync_retry: AtomicBool::new(false), busy_timeout: RwLock::new(Duration::new(0, 0)), is_mvcc_bootstrap_connection: AtomicBool::new(is_mvcc_bootstrap_connection), @@ -604,7 +605,7 @@ impl Database { /// we need to read the page_size from the database header. fn read_page_size_from_db_header(&self) -> Result { turso_assert!( - self.db_state.is_initialized(), + self.db_state.get().is_initialized(), "read_page_size_from_db_header called on uninitialized database" ); turso_assert!( @@ -622,7 +623,7 @@ impl Database { fn read_reserved_space_bytes_from_db_header(&self) -> Result { turso_assert!( - self.db_state.is_initialized(), + self.db_state.get().is_initialized(), "read_reserved_space_bytes_from_db_header called on uninitialized database" ); turso_assert!( @@ -658,7 +659,7 @@ impl Database { return Ok(page_size); } } - if self.db_state.is_initialized() { + if self.db_state.get().is_initialized() { Ok(self.read_page_size_from_db_header()?) } else { let Some(size) = requested_page_size else { @@ -674,7 +675,7 @@ impl Database { /// if the database is initialized i.e. it exists on disk, return the reserved space bytes from /// the header or None fn maybe_get_reserved_space_bytes(&self) -> Result> { - if self.db_state.is_initialized() { + if self.db_state.get().is_initialized() { Ok(Some(self.read_reserved_space_bytes_from_db_header()?)) } else { Ok(None) @@ -696,7 +697,7 @@ impl Database { drop(shared_wal); let buffer_pool = self.buffer_pool.clone(); - if self.db_state.is_initialized() { + if self.db_state.get().is_initialized() { buffer_pool.finalize_with_page_size(page_size.get() as usize)?; } @@ -729,7 +730,7 @@ impl Database { let buffer_pool = self.buffer_pool.clone(); - if self.db_state.is_initialized() { + if self.db_state.get().is_initialized() { buffer_pool.finalize_with_page_size(page_size.get() as usize)?; } @@ -1067,7 +1068,7 @@ pub struct Connection { database_schemas: RwLock>>, /// Whether to automatically commit transaction auto_commit: AtomicBool, - transaction_state: RwLock, + transaction_state: AtomicTransactionState, last_insert_rowid: AtomicI64, last_change: AtomicI64, total_changes: AtomicI64, @@ -1096,8 +1097,8 @@ pub struct Connection { /// Generally this is only true for ParseSchema. is_nested_stmt: AtomicBool, encryption_key: RwLock>, - encryption_cipher_mode: RwLock>, - sync_mode: RwLock, + encryption_cipher_mode: AtomicCipherMode, + sync_mode: AtomicSyncMode, data_sync_retry: AtomicBool, /// User defined max accumulated Busy timeout duration /// Default is 0 (no timeout) @@ -1238,8 +1239,7 @@ impl Connection { let reparse_result = self.reparse_schema(); - let previous = - std::mem::replace(&mut *self.transaction_state.write(), TransactionState::None); + let previous = self.transaction_state.swap(TransactionState::None); turso_assert!( matches!(previous, TransactionState::None | TransactionState::Read), "unexpected end transaction state" @@ -1519,7 +1519,7 @@ impl Connection { let _ = conn.pragma_update("cipher", encryption_opts.cipher.to_string()); let _ = conn.pragma_update("hexkey", encryption_opts.hexkey.to_string()); let pager = conn.pager.read(); - if db.db_state.is_initialized() { + if db.db_state.get().is_initialized() { // Clear page cache so the header page can be reread from disk and decrypted using the encryption context. pager.clear_page_cache(false); } @@ -1597,9 +1597,9 @@ impl Connection { header.schema_cookie.get() < version, "cookie can't go back in time" ); - *self.transaction_state.write() = TransactionState::Write { + self.set_tx_state(TransactionState::Write { schema_did_change: true, - }; + }); self.with_schema_mut(|schema| schema.schema_version = version); header.schema_cookie = version.into(); }) @@ -1682,9 +1682,9 @@ impl Connection { })?; // start write transaction and disable auto-commit mode as SQL can be executed within WAL session (at caller own risk) - *self.transaction_state.write() = TransactionState::Write { + self.set_tx_state(TransactionState::Write { schema_did_change: false, - }; + }); self.auto_commit.store(false, Ordering::SeqCst); Ok(()) @@ -2029,7 +2029,7 @@ impl Connection { } pub fn is_db_initialized(&self) -> bool { - self.db.db_state.is_initialized() + self.db.db_state.get().is_initialized() } fn get_pager_from_database_index(&self, index: &usize) -> Arc { @@ -2259,11 +2259,11 @@ impl Connection { } pub fn get_sync_mode(&self) -> SyncMode { - *self.sync_mode.read() + self.sync_mode.get() } pub fn set_sync_mode(&self, mode: SyncMode) { - *self.sync_mode.write() = mode; + self.sync_mode.set(mode); } pub fn get_data_sync_retry(&self) -> bool { @@ -2289,7 +2289,7 @@ impl Connection { pub fn set_encryption_cipher(&self, cipher_mode: CipherMode) -> Result<()> { tracing::trace!("setting encryption cipher for connection"); - *self.encryption_cipher_mode.write() = Some(cipher_mode); + self.encryption_cipher_mode.set(cipher_mode); self.set_encryption_context() } @@ -2300,7 +2300,10 @@ impl Connection { } pub fn get_encryption_cipher_mode(&self) -> Option { - *self.encryption_cipher_mode.read() + match self.encryption_cipher_mode.get() { + CipherMode::None => None, + mode => Some(mode), + } } // if both key and cipher are set, set encryption context on pager @@ -2309,8 +2312,8 @@ impl Connection { let Some(key) = key_guard.as_ref() else { return Ok(()); }; - let cipher_guard = self.encryption_cipher_mode.read(); - let Some(cipher_mode) = *cipher_guard else { + let cipher_mode = self.get_encryption_cipher_mode(); + let Some(cipher_mode) = cipher_mode else { return Ok(()); }; tracing::trace!("setting encryption ctx for connection"); @@ -2348,11 +2351,11 @@ impl Connection { } fn set_tx_state(&self, state: TransactionState) { - *self.transaction_state.write() = state; + self.transaction_state.set(state); } fn get_tx_state(&self) -> TransactionState { - *self.transaction_state.read() + self.transaction_state.get() } pub(crate) fn get_mv_tx_id(&self) -> Option { diff --git a/core/mvcc/database/checkpoint_state_machine.rs b/core/mvcc/database/checkpoint_state_machine.rs index 8c3e109e3..712cc8048 100644 --- a/core/mvcc/database/checkpoint_state_machine.rs +++ b/core/mvcc/database/checkpoint_state_machine.rs @@ -325,9 +325,9 @@ impl CheckpointStateMachine { } result?; if self.update_transaction_state { - *self.connection.transaction_state.write() = TransactionState::Write { + self.connection.set_tx_state(TransactionState::Write { schema_did_change: false, - }; // TODO: schema_did_change?? + }); // TODO: schema_did_change?? } self.lock_states.pager_write_tx = true; self.state = CheckpointState::WriteRow { @@ -534,7 +534,7 @@ impl CheckpointStateMachine { self.lock_states.pager_read_tx = false; self.lock_states.pager_write_tx = false; if self.update_transaction_state { - *self.connection.transaction_state.write() = TransactionState::None; + self.connection.set_tx_state(TransactionState::None); } let header = self .pager @@ -623,12 +623,12 @@ impl StateTransition for CheckpointStateMachine { if self.lock_states.pager_write_tx { self.pager.rollback_tx(self.connection.as_ref()); if self.update_transaction_state { - *self.connection.transaction_state.write() = TransactionState::None; + self.connection.set_tx_state(TransactionState::None); } } else if self.lock_states.pager_read_tx { self.pager.end_read_tx(); if self.update_transaction_state { - *self.connection.transaction_state.write() = TransactionState::None; + self.connection.set_tx_state(TransactionState::None); } } if self.lock_states.blocking_checkpoint_lock_held { diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 440c0de79..0c5279608 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -662,7 +662,7 @@ impl BTreeNodeState { impl BTreeCursor { pub fn new(pager: Arc, root_page: i64, num_columns: usize) -> Self { - let valid_state = if root_page == 1 && !pager.db_state.is_initialized() { + let valid_state = if root_page == 1 && !pager.db_state.get().is_initialized() { CursorValidState::Invalid } else { CursorValidState::Valid diff --git a/core/storage/encryption.rs b/core/storage/encryption.rs index 156d17c0b..f0184dbc0 100644 --- a/core/storage/encryption.rs +++ b/core/storage/encryption.rs @@ -10,7 +10,7 @@ use aes_gcm::{ aead::{Aead, AeadCore, KeyInit, OsRng}, Aes128Gcm, Aes256Gcm, Key, Nonce, }; -use turso_macros::match_ignore_ascii_case; +use turso_macros::{match_ignore_ascii_case, AtomicEnum}; /// Encryption Scheme /// We support two major algorithms: AEGIS, AES GCM. These algorithms picked so that they also do @@ -319,8 +319,9 @@ define_aegis_cipher!( "AEGIS-128X4" ); -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, AtomicEnum, Clone, Copy, PartialEq)] pub enum CipherMode { + None, Aes128Gcm, Aes256Gcm, Aegis256, @@ -363,6 +364,7 @@ impl std::fmt::Display for CipherMode { CipherMode::Aegis128X4 => write!(f, "aegis128x4"), CipherMode::Aegis256X2 => write!(f, "aegis256x2"), CipherMode::Aegis256X4 => write!(f, "aegis256x4"), + CipherMode::None => write!(f, "None"), } } } @@ -380,6 +382,7 @@ impl CipherMode { CipherMode::Aegis128L => 16, CipherMode::Aegis128X2 => 16, CipherMode::Aegis128X4 => 16, + CipherMode::None => 0, } } @@ -394,6 +397,7 @@ impl CipherMode { CipherMode::Aegis128L => 16, CipherMode::Aegis128X2 => 16, CipherMode::Aegis128X4 => 16, + CipherMode::None => 0, } } @@ -408,6 +412,7 @@ impl CipherMode { CipherMode::Aegis128L => 16, CipherMode::Aegis128X2 => 16, CipherMode::Aegis128X4 => 16, + CipherMode::None => 0, } } @@ -427,6 +432,7 @@ impl CipherMode { CipherMode::Aegis128L => 6, CipherMode::Aegis128X2 => 7, CipherMode::Aegis128X4 => 8, + CipherMode::None => 0, } } @@ -503,6 +509,11 @@ impl EncryptionContext { CipherMode::Aegis128L => Cipher::Aegis128L(Box::new(Aegis128LCipher::new(key))), CipherMode::Aegis128X2 => Cipher::Aegis128X2(Box::new(Aegis128X2Cipher::new(key))), CipherMode::Aegis128X4 => Cipher::Aegis128X4(Box::new(Aegis128X4Cipher::new(key))), + CipherMode::None => { + return Err(LimboError::InvalidArgument( + "must select valid CipherMode".into(), + )) + } }; Ok(Self { cipher_mode, diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 2e6b50920..ddb791a0f 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -25,6 +25,7 @@ use std::sync::atomic::{ }; use std::sync::{Arc, Mutex}; use tracing::{instrument, trace, Level}; +use turso_macros::AtomicEnum; use super::btree::btree_init_page; use super::page_cache::{CacheError, CacheResizeResult, PageCache, PageCacheKey}; @@ -57,7 +58,7 @@ impl HeaderRef { tracing::trace!("HeaderRef::from_pager - {:?}", state); match state { HeaderRefState::Start => { - if !pager.db_state.is_initialized() { + if !pager.db_state.get().is_initialized() { return Err(LimboError::Page1NotAlloc); } @@ -97,7 +98,7 @@ impl HeaderRefMut { tracing::trace!(?state); match state { HeaderRefState::Start => { - if !pager.db_state.is_initialized() { + if !pager.db_state.get().is_initialized() { return Err(LimboError::Page1NotAlloc); } @@ -416,57 +417,19 @@ impl From for AutoVacuumMode { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(usize)] +#[derive(Debug, AtomicEnum, Clone, Copy, PartialEq, Eq)] pub enum DbState { - Uninitialized = Self::UNINITIALIZED, - Initializing = Self::INITIALIZING, - Initialized = Self::INITIALIZED, + Uninitialized, + Initializing, + Initialized, } impl DbState { - pub(self) const UNINITIALIZED: usize = 0; - pub(self) const INITIALIZING: usize = 1; - pub(self) const INITIALIZED: usize = 2; - - #[inline] pub fn is_initialized(&self) -> bool { matches!(self, DbState::Initialized) } } -#[derive(Debug)] -#[repr(transparent)] -pub struct AtomicDbState(AtomicUsize); - -impl AtomicDbState { - #[inline] - pub const fn new(state: DbState) -> Self { - Self(AtomicUsize::new(state as usize)) - } - - #[inline] - pub fn set(&self, state: DbState) { - self.0.store(state as usize, Ordering::Release); - } - - #[inline] - pub fn get(&self) -> DbState { - let v = self.0.load(Ordering::Acquire); - match v { - DbState::UNINITIALIZED => DbState::Uninitialized, - DbState::INITIALIZING => DbState::Initializing, - DbState::INITIALIZED => DbState::Initialized, - _ => unreachable!(), - } - } - - #[inline] - pub fn is_initialized(&self) -> bool { - self.get().is_initialized() - } -} - #[derive(Debug, Clone)] #[cfg(not(feature = "omit_autovacuum"))] enum PtrMapGetState { @@ -621,7 +584,7 @@ impl Pager { db_state: Arc, init_lock: Arc>, ) -> Result { - let allocate_page1_state = if !db_state.is_initialized() { + let allocate_page1_state = if !db_state.get().is_initialized() { RwLock::new(AllocatePage1State::Start) } else { RwLock::new(AllocatePage1State::Done) @@ -1131,7 +1094,7 @@ impl Pager { #[instrument(skip_all, level = Level::DEBUG)] pub fn maybe_allocate_page1(&self) -> Result> { - if !self.db_state.is_initialized() { + if !self.db_state.get().is_initialized() { if let Ok(_lock) = self.init_lock.try_lock() { match (self.db_state.get(), self.allocating_page1()) { // In case of being empty or (allocating and this connection is performing allocation) then allocate the first page diff --git a/core/util.rs b/core/util.rs index 26696eabe..77062fd7d 100644 --- a/core/util.rs +++ b/core/util.rs @@ -11,6 +11,7 @@ use crate::{ LimboError, OpenFlags, Result, Statement, StepResult, SymbolTable, }; use crate::{Connection, MvStore, IO}; +use std::sync::atomic::AtomicU8; use std::{ collections::HashMap, rc::Rc, diff --git a/macros/src/atomic_enum.rs b/macros/src/atomic_enum.rs index d1248f9d8..14dd97ac0 100644 --- a/macros/src/atomic_enum.rs +++ b/macros/src/atomic_enum.rs @@ -246,19 +246,19 @@ pub(crate) fn derive_atomic_enum_inner(input: TokenStream) -> TokenStream { #[inline] /// Load and convert the current value to expected enum pub fn get(&self) -> #name { - Self::from_storage(self.0.load(::std::sync::atomic::Ordering::Acquire)) + Self::from_storage(self.0.load(::std::sync::atomic::Ordering::SeqCst)) } #[inline] /// Convert and store new value pub fn set(&self, val: #name) { - self.0.store(Self::to_storage(&val), ::std::sync::atomic::Ordering::Release) + self.0.store(Self::to_storage(&val), ::std::sync::atomic::Ordering::SeqCst) } #[inline] /// Store new value and return previous value pub fn swap(&self, val: #name) -> #name { - let prev = self.0.swap(Self::to_storage(&val), ::std::sync::atomic::Ordering::AcqRel); + let prev = self.0.swap(Self::to_storage(&val), ::std::sync::atomic::Ordering::SeqCst); Self::from_storage(prev) } } diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 9c95cbd1a..9db89bad2 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -479,7 +479,7 @@ pub fn match_ignore_ascii_case(input: TokenStream) -> TokenStream { /// - For u8/i8 fields: uses u16 internally (8 bits discriminant, 8 bits data) /// /// Example: -/// ```rust +/// ```ignore /// #[derive(AtomicEnum)] /// enum TransactionState { /// Write { schema_did_change: bool }, From 0fb149c4c9924b2666b1c90db883a6d9eda86a4c Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 22 Oct 2025 17:44:02 +0400 Subject: [PATCH 367/428] fix bug --- core/translate/order_by.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/core/translate/order_by.rs b/core/translate/order_by.rs index c80a504f7..2dd7a3c8a 100644 --- a/core/translate/order_by.rs +++ b/core/translate/order_by.rs @@ -332,7 +332,7 @@ pub fn order_by_sorter_insert( .. } = sort_metadata; - let (insert_label, skip_label) = if *use_heap_sort { + let skip_label = if *use_heap_sort { // skip records which greater than current top-k maintained in a separate BTreeIndex let insert_label = program.allocate_label(); let skip_label = program.allocate_label(); @@ -353,9 +353,15 @@ pub fn order_by_sorter_insert( num_regs: orderby_sorter_column_count, target_pc: skip_label, }); - (Some(insert_label), Some(skip_label)) + program.emit_insn(Insn::Delete { + cursor_id: *sort_cursor, + table_name: "".to_string(), + is_part_of_update: false, + }); + program.preassign_label_to_next_insn(insert_label); + Some(skip_label) } else { - (None, None) + None }; let mut cur_reg = start_reg + order_by_len; @@ -452,13 +458,6 @@ pub fn order_by_sorter_insert( } if *use_heap_sort { - program.emit_insn(Insn::Delete { - cursor_id: *sort_cursor, - table_name: "".to_string(), - is_part_of_update: false, - }); - - program.preassign_label_to_next_insn(insert_label.unwrap()); program.emit_insn(Insn::MakeRecord { start_reg, count: orderby_sorter_column_count, From 689c11a21a5d1b19d881925a972cd848861746e9 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 22 Oct 2025 17:45:49 +0400 Subject: [PATCH 368/428] cargo fmt --- core/vdbe/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index bc9a159b1..847f53004 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -1028,7 +1028,6 @@ fn make_record(registers: &[Register], start_reg: &usize, count: &usize) -> Immu ImmutableRecord::from_registers(regs, regs.len()) } - #[instrument(skip(program), level = Level::DEBUG)] fn trace_insn(program: &Program, addr: InsnReference, insn: &Insn) { tracing::trace!( From 5dd503b7b928894c3db5071962cd721378238ec0 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 14 Oct 2025 15:16:03 +0300 Subject: [PATCH 369/428] core/storage: Cache schema cookie in Pager Every transaction was reading page 1 from the WAL to check the schema cookie in op_transaction, causing unnecessary WAL lookups. This commit caches the schema_cookie in Pager as AtomicU64, similar to how page_size and reserved_space are already cached. The cache is updated when the header is read/modified and invalidated in begin_read_tx() when WAL changes are detected from other connections. This matches SQLite's approach of caching frequently accessed header fields to avoid repeated page 1 reads. Improves write throughput by 5% in our benchmarks. --- core/storage/pager.rs | 52 ++++++++++++++++++++++++++++++++++++++++++- core/vdbe/execute.rs | 19 +++++++++++++--- 2 files changed, 67 insertions(+), 4 deletions(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 2e6b50920..ab5ffc15a 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -537,6 +537,11 @@ pub struct Pager { /// to change it. pub(crate) page_size: AtomicU32, reserved_space: AtomicU16, + /// Schema cookie cache. + /// + /// Note that schema cookie is 32-bits, but we use 64-bit field so we can + /// represent case where value is not set. + schema_cookie: AtomicU64, free_page_state: RwLock, /// Maximum number of pages allowed in the database. Default is 1073741823 (SQLite default). max_page_count: AtomicU32, @@ -650,6 +655,7 @@ impl Pager { allocate_page1_state, page_size: AtomicU32::new(0), // 0 means not set reserved_space: AtomicU16::new(RESERVED_SPACE_NOT_SET), + schema_cookie: AtomicU64::new(Self::SCHEMA_COOKIE_NOT_SET), free_page_state: RwLock::new(FreePageState::Start), allocate_page_state: RwLock::new(AllocatePageState::Start), max_page_count: AtomicU32::new(DEFAULT_MAX_PAGE_COUNT), @@ -1115,6 +1121,41 @@ impl Pager { self.reserved_space.store(space as u16, Ordering::SeqCst); } + /// Schema cookie sentinel value that represents value not set. + const SCHEMA_COOKIE_NOT_SET: u64 = u64::MAX; + + /// Get the cached schema cookie. Returns None if not set yet. + pub fn get_schema_cookie_cached(&self) -> Option { + let value = self.schema_cookie.load(Ordering::SeqCst); + if value == Self::SCHEMA_COOKIE_NOT_SET { + None + } else { + Some(value as u32) + } + } + + /// Set the schema cookie cache. + pub fn set_schema_cookie(&self, cookie: Option) { + match cookie { + Some(value) => { + self.schema_cookie.store(value as u64, Ordering::SeqCst); + } + None => self + .schema_cookie + .store(Self::SCHEMA_COOKIE_NOT_SET, Ordering::SeqCst), + } + } + + /// Get the schema cookie, using the cached value if available to avoid reading page 1. + pub fn get_schema_cookie(&self) -> Result> { + // Try to use cached value first + if let Some(cookie) = self.get_schema_cookie_cached() { + return Ok(IOResult::Done(cookie)); + } + // If not cached, read from header and cache it + self.with_header(|header| header.schema_cookie.get()) + } + #[inline(always)] #[instrument(skip_all, level = Level::DEBUG)] pub fn begin_read_tx(&self) -> Result<()> { @@ -1125,6 +1166,8 @@ impl Pager { if changed { // Someone else changed the database -> assume our page cache is invalid (this is default SQLite behavior, we can probably do better with more granular invalidation) self.clear_page_cache(false); + // Invalidate cached schema cookie to force re-read on next access + self.set_schema_cookie(None); } Ok(()) } @@ -2425,6 +2468,8 @@ impl Pager { ); } self.reset_internal_states(); + // Invalidate cached schema cookie since rollback may have restored the database schema cookie + self.set_schema_cookie(None); if schema_did_change { *connection.schema.write() = connection.db.clone_schema(); } @@ -2456,13 +2501,18 @@ impl Pager { pub fn with_header(&self, f: impl Fn(&DatabaseHeader) -> T) -> Result> { let header_ref = return_if_io!(HeaderRef::from_pager(self)); let header = header_ref.borrow(); + // Update cached schema cookie when reading header + self.set_schema_cookie(Some(header.schema_cookie.get())); Ok(IOResult::Done(f(header))) } pub fn with_header_mut(&self, f: impl Fn(&mut DatabaseHeader) -> T) -> Result> { let header_ref = return_if_io!(HeaderRefMut::from_pager(self)); let header = header_ref.borrow_mut(); - Ok(IOResult::Done(f(header))) + let result = f(header); + // Update cached schema cookie after modification + self.set_schema_cookie(Some(header.schema_cookie.get())); + Ok(IOResult::Done(result)) } pub fn is_encryption_ctx_set(&self) -> bool { diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 81a19ba76..47b166dbe 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2429,9 +2429,7 @@ pub fn op_transaction_inner( // Can only read header if page 1 has been allocated already // begin_write_tx that happens, but not begin_read_tx OpTransactionState::CheckSchemaCookie => { - let res = with_header(&pager, mv_store, program, |header| { - header.schema_cookie.get() - }); + let res = get_schema_cookie(&pager, mv_store, program); match res { Ok(IOResult::Done(header_schema_cookie)) => { if header_schema_cookie != *schema_cookie { @@ -10224,6 +10222,21 @@ where } } +fn get_schema_cookie( + pager: &Arc, + mv_store: Option<&Arc>, + program: &Program, +) -> Result> { + if let Some(mv_store) = mv_store { + let tx_id = program.connection.get_mv_tx_id(); + mv_store + .with_header(|header| header.schema_cookie.get(), tx_id.as_ref()) + .map(IOResult::Done) + } else { + pager.get_schema_cookie() + } +} + #[cfg(test)] mod tests { use rand::{Rng, RngCore}; From 8e1cec5104e4f1c6c3226b4858a0421c200a9609 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 22 Oct 2025 19:30:43 +0400 Subject: [PATCH 370/428] Revert "alternative read_variant implementation" This reverts commit 68650cf594799d4c5a97de3670bfe157b5229698. --- core/storage/sqlite3_ondisk.rs | 85 +++++++++++++++++++++++----------- core/vdbe/execute.rs | 7 +-- 2 files changed, 62 insertions(+), 30 deletions(-) diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index d62d7aa9f..190b94479 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -1486,40 +1486,71 @@ pub fn read_integer(buf: &[u8], serial_type: u8) -> Result { } } +/// Fast varint reader optimized for the common cases of 1-byte and 2-byte varints. +/// +/// This function is a performance-optimized version of `read_varint()` that handles +/// the most common varint cases inline before falling back to the full implementation. +/// It follows the same varint encoding as SQLite. +/// +/// # Optimized Cases +/// +/// - **Single-byte case**: Values 0-127 (0x00-0x7F) are returned immediately +/// - **Two-byte case**: Values 128-16383 (0x80-0x3FFF) are handled inline +/// - **Multi-byte case**: Larger values fall back to the full `read_varint()` implementation +/// +/// This function is similar to `sqlite3GetVarint32` +#[inline(always)] +pub fn read_varint_fast(buf: &[u8]) -> Result<(u64, usize)> { + // Fast path: Single-byte varint + if let Some(&first_byte) = buf.first() { + if first_byte & 0x80 == 0 { + return Ok((first_byte as u64, 1)); + } + } else { + crate::bail_corrupt_error!("Invalid varint"); + } + + // Fast path: Two-byte varint + if let Some(&second_byte) = buf.get(1) { + if second_byte & 0x80 == 0 { + let v = (((buf[0] & 0x7f) as u64) << 7) + (second_byte as u64); + return Ok((v, 2)); + } + } else { + crate::bail_corrupt_error!("Invalid varint"); + } + + //Fallback: Multi-byte varint + read_varint(buf) +} + #[inline(always)] pub fn read_varint(buf: &[u8]) -> Result<(u64, usize)> { let mut v: u64 = 0; - let mut i = 0; - let chunks = buf.chunks_exact(2); - for chunk in chunks { - let c1 = chunk[0]; - v = (v << 7) + (c1 & 0x7f) as u64; - i += 1; - if (c1 & 0x80) == 0 { - return Ok((v, i)); - } - let c2 = chunk[1]; - v = (v << 7) + (c2 & 0x7f) as u64; - i += 1; - if (c2 & 0x80) == 0 { - return Ok((v, i)); - } - if i == 8 { - break; + for i in 0..8 { + match buf.get(i) { + Some(c) => { + v = (v << 7) + (c & 0x7f) as u64; + if (c & 0x80) == 0 { + return Ok((v, i + 1)); + } + } + None => { + crate::bail_corrupt_error!("Invalid varint"); + } } } - match buf.get(i) { + match buf.get(8) { Some(&c) => { - if i < 8 && (c & 0x80) == 0 { - return Ok(((v << 7) + c as u64, i + 1)); - } else if i == 8 && (v >> 48) > 0 { - // Values requiring 9 bytes must have non-zero in the top 8 bits (value >= 1<<56). - // Since the final value is `(v<<8) + c`, the top 8 bits (v >> 48) must not be 0. - // If those are zero, this should be treated as corrupt. - // Perf? the comparison + branching happens only in parsing 9-byte varint which is rare. - return Ok(((v << 8) + c as u64, i + 1)); + // Values requiring 9 bytes must have non-zero in the top 8 bits (value >= 1<<56). + // Since the final value is `(v<<8) + c`, the top 8 bits (v >> 48) must not be 0. + // If those are zero, this should be treated as corrupt. + // Perf? the comparison + branching happens only in parsing 9-byte varint which is rare. + if (v >> 48) == 0 { + bail_corrupt_error!("Invalid varint"); } - bail_corrupt_error!("Invalid varint"); + v = (v << 8) + c as u64; + Ok((v, 9)) } None => { bail_corrupt_error!("Invalid varint"); diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index dadc7a063..f989d23f0 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -11,7 +11,7 @@ use crate::storage::btree::{ use crate::storage::database::DatabaseFile; use crate::storage::page_cache::PageCache; use crate::storage::pager::{AtomicDbState, CreateBTreeFlags, DbState}; -use crate::storage::sqlite3_ondisk::{read_varint, DatabaseHeader, PageSize}; +use crate::storage::sqlite3_ondisk::{read_varint_fast, DatabaseHeader, PageSize}; use crate::translate::collate::CollationSeq; use crate::types::{ compare_immutable, compare_records_generic, Extendable, IOCompletions, ImmutableRecord, @@ -1612,7 +1612,7 @@ pub fn op_column( let mut record_cursor = cursor.record_cursor_mut(); if record_cursor.offsets.is_empty() { - let (header_size, header_len_bytes) = read_varint(payload)?; + let (header_size, header_len_bytes) = read_varint_fast(payload)?; let header_size = header_size as usize; debug_assert!(header_size <= payload.len() && header_size <= 98307, "header_size: {header_size}, header_len_bytes: {header_len_bytes}, payload.len(): {}", payload.len()); @@ -1634,7 +1634,8 @@ pub fn op_column( while record_cursor.serial_types.len() <= target_column && parse_pos < record_cursor.header_size { - let (serial_type, varint_len) = read_varint(&payload[parse_pos..])?; + let (serial_type, varint_len) = + read_varint_fast(&payload[parse_pos..])?; record_cursor.serial_types.push(serial_type); parse_pos += varint_len; let data_size = match serial_type { From b32d22a2fdc35bcafa3b0babd58fb86ae1610244 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 22 Oct 2025 20:20:54 +0400 Subject: [PATCH 371/428] Revert "move more possible option higher" This reverts commit c0fdaeb4755fa49b8fab928a566cd536b476c55f. --- core/vdbe/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 847f53004..3aec5900c 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -134,8 +134,8 @@ impl BranchOffset { /// Returns the offset value. Panics if the branch offset is a label or placeholder. pub fn as_offset_int(&self) -> InsnReference { match self { - BranchOffset::Offset(v) => *v, BranchOffset::Label(v) => unreachable!("Unresolved label: {}", v), + BranchOffset::Offset(v) => *v, BranchOffset::Placeholder => unreachable!("Unresolved placeholder"), } } From 53957b6d2214154f30acc7d63b398e23e1cf4a9f Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 22 Oct 2025 20:21:00 +0400 Subject: [PATCH 372/428] Revert "simplify serial_type size calculation" This reverts commit f19c73822ee9afbb9c859fe6b7f3f9e0ee3bc576. --- core/vdbe/execute.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index f989d23f0..c671a81a7 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -1659,14 +1659,17 @@ pub fn op_column( 8 => 0, // CONST_INT1 9 => 0, + // BLOB + n if n >= 12 && n & 1 == 0 => (n - 12) >> 1, + // TEXT + n if n >= 13 && n & 1 == 1 => (n - 13) >> 1, // Reserved 10 | 11 => { return Err(LimboError::Corrupt(format!( "Reserved serial type: {serial_type}" ))) } - // BLOB or TEXT - n => (n - 12) / 2, + _ => unreachable!("Invalid serial type: {serial_type}"), } as usize; data_offset += data_size; record_cursor.offsets.push(data_offset); From 91ffb4e249e726593f1cfc726071fd0a2340f8cc Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 22 Oct 2025 20:21:39 +0400 Subject: [PATCH 373/428] Revert "avoid allocations" This reverts commit dba195bdfa331ab3e3533031f96c1632517d24eb. --- core/types.rs | 34 +++++++++++----------------------- core/vdbe/execute.rs | 23 ++++++++++++++++------- core/vdbe/mod.rs | 13 +++++++------ 3 files changed, 34 insertions(+), 36 deletions(-) diff --git a/core/types.rs b/core/types.rs index 8183450fa..cb9c99082 100644 --- a/core/types.rs +++ b/core/types.rs @@ -219,12 +219,6 @@ pub enum ValueRef<'a> { Blob(&'a [u8]), } -impl<'a, 'b> From<&'b ValueRef<'a>> for ValueRef<'a> { - fn from(value: &'b ValueRef<'a>) -> Self { - *value - } -} - impl Debug for ValueRef<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -1820,15 +1814,12 @@ fn compare_records_int( /// 4. **Length comparison**: If strings are equal, compares lengths /// 5. **Remaining fields**: If first field is equal and more fields exist, /// delegates to `compare_records_generic()` with `skip=1` -fn compare_records_string<'a, T>( +fn compare_records_string( serialized: &ImmutableRecord, - unpacked: &'a [T], + unpacked: &[ValueRef], index_info: &IndexInfo, tie_breaker: std::cmp::Ordering, -) -> Result -where - ValueRef<'a>: From<&'a T>, -{ +) -> Result { turso_assert!( index_info.key_info.len() >= unpacked.len(), "index_info.key_info.len() < unpacked.len()" @@ -1856,7 +1847,7 @@ where return compare_records_generic(serialized, unpacked, index_info, 0, tie_breaker); } - let ValueRef::Text(rhs_text, _) = (&unpacked[0]).into() else { + let ValueRef::Text(rhs_text, _) = &unpacked[0] else { return compare_records_generic(serialized, unpacked, index_info, 0, tie_breaker); }; @@ -1935,16 +1926,13 @@ where /// The serialized and unpacked records do not have to contain the same number /// of fields. If all fields that appear in both records are equal, then /// `tie_breaker` is returned. -pub fn compare_records_generic<'a, T>( +pub fn compare_records_generic( serialized: &ImmutableRecord, - unpacked: &'a [T], + unpacked: &[ValueRef], index_info: &IndexInfo, skip: usize, tie_breaker: std::cmp::Ordering, -) -> Result -where - ValueRef<'a>: From<&'a T>, -{ +) -> Result { turso_assert!( index_info.key_info.len() >= unpacked.len(), "index_info.key_info.len() < unpacked.len()" @@ -1984,7 +1972,7 @@ where header_pos += bytes_read; let serial_type = SerialType::try_from(serial_type_raw)?; - let rhs_value = (&unpacked[field_idx]).into(); + let rhs_value = &unpacked[field_idx]; let lhs_value = match serial_type.kind() { SerialTypeKind::ConstInt0 => ValueRef::Integer(0), @@ -2006,14 +1994,14 @@ where } (ValueRef::Integer(lhs_int), ValueRef::Float(rhs_float)) => { - sqlite_int_float_compare(*lhs_int, rhs_float) + sqlite_int_float_compare(*lhs_int, *rhs_float) } (ValueRef::Float(lhs_float), ValueRef::Integer(rhs_int)) => { - sqlite_int_float_compare(rhs_int, *lhs_float).reverse() + sqlite_int_float_compare(*rhs_int, *lhs_float).reverse() } - _ => lhs_value.partial_cmp(&rhs_value).unwrap(), + _ => lhs_value.partial_cmp(rhs_value).unwrap(), }; let final_comparison = match index_info.key_info[field_idx].sort_order { diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index c671a81a7..247960d16 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -21,7 +21,7 @@ use crate::util::{ normalize_ident, rewrite_column_references_if_needed, rewrite_fk_parent_cols_if_self_ref, }; use crate::vdbe::insn::InsertFlags; -use crate::vdbe::TxnCleanup; +use crate::vdbe::{registers_to_ref_values, TxnCleanup}; use crate::vector::{vector32_sparse, vector_concat, vector_distance_jaccard, vector_slice}; use crate::{ error::{ @@ -3384,11 +3384,13 @@ pub fn op_idx_ge( let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { // Create the comparison record from registers + let values = + registers_to_ref_values(&state.registers[*start_reg..*start_reg + *num_regs]); let tie_breaker = get_tie_breaker_from_idx_comp_op(insn); let ord = compare_records_generic( - &idx_record, // The serialized record from the index - &state.registers[*start_reg..*start_reg + *num_regs], // The record built from registers - cursor.get_index_info(), // Sort order flags + &idx_record, // The serialized record from the index + &values, // The record built from registers + cursor.get_index_info(), // Sort order flags 0, tie_breaker, )?; @@ -3450,10 +3452,12 @@ pub fn op_idx_le( let cursor = cursor.as_btree_mut(); let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { + let values = + registers_to_ref_values(&state.registers[*start_reg..*start_reg + *num_regs]); let tie_breaker = get_tie_breaker_from_idx_comp_op(insn); let ord = compare_records_generic( &idx_record, - &state.registers[*start_reg..*start_reg + *num_regs], + &values, cursor.get_index_info(), 0, tie_breaker, @@ -3499,10 +3503,12 @@ pub fn op_idx_gt( let cursor = cursor.as_btree_mut(); let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { + let values = + registers_to_ref_values(&state.registers[*start_reg..*start_reg + *num_regs]); let tie_breaker = get_tie_breaker_from_idx_comp_op(insn); let ord = compare_records_generic( &idx_record, - &state.registers[*start_reg..*start_reg + *num_regs], + &values, cursor.get_index_info(), 0, tie_breaker, @@ -3548,10 +3554,13 @@ pub fn op_idx_lt( let cursor = cursor.as_btree_mut(); let pc = if let Some(idx_record) = return_if_io!(cursor.record()) { + let values = + registers_to_ref_values(&state.registers[*start_reg..*start_reg + *num_regs]); + let tie_breaker = get_tie_breaker_from_idx_comp_op(insn); let ord = compare_records_generic( &idx_record, - &state.registers[*start_reg..*start_reg + *num_regs], + &values, cursor.get_index_info(), 0, tie_breaker, diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 3aec5900c..b462b8f82 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -251,12 +251,6 @@ pub enum Register { Record(ImmutableRecord), } -impl<'a> From<&'a Register> for ValueRef<'a> { - fn from(value: &'a Register) -> Self { - value.get_value().as_ref() - } -} - impl Register { #[inline] pub fn is_null(&self) -> bool { @@ -1028,6 +1022,13 @@ fn make_record(registers: &[Register], start_reg: &usize, count: &usize) -> Immu ImmutableRecord::from_registers(regs, regs.len()) } +pub fn registers_to_ref_values<'a>(registers: &'a [Register]) -> Vec> { + registers + .iter() + .map(|reg| reg.get_value().as_ref()) + .collect() +} + #[instrument(skip(program), level = Level::DEBUG)] fn trace_insn(program: &Program, addr: InsnReference, insn: &Insn) { tracing::trace!( From a071d40d5f757e693bbac4e2fc036f99be3731c0 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 22 Oct 2025 20:21:47 +0400 Subject: [PATCH 374/428] Revert "faster extend_from_slice" This reverts commit ae8adc044958145dfc3f1224c55f1b1b3be45422. --- core/types.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/core/types.rs b/core/types.rs index cb9c99082..a7259f8b1 100644 --- a/core/types.rs +++ b/core/types.rs @@ -1061,15 +1061,7 @@ impl ImmutableRecord { } pub fn start_serialization(&mut self, payload: &[u8]) { - let blob = self.as_blob_mut(); - blob.reserve(payload.len()); - - let len = blob.len(); - unsafe { - let dst = blob.as_mut_ptr().add(len); - std::ptr::copy_nonoverlapping(payload.as_ptr(), dst, payload.len()); - blob.set_len(len + payload.len()); - } + self.as_blob_mut().extend_from_slice(payload); } pub fn invalidate(&mut self) { From 6aa67c6ea0d24f414df3950d4fd40778187c6860 Mon Sep 17 00:00:00 2001 From: Nikita Sivukhin Date: Wed, 22 Oct 2025 20:21:52 +0400 Subject: [PATCH 375/428] Revert "slight reorder of operations" This reverts commit 8e107ab18e5197e2d4d4ffcc7e0774d81e59c6e9. --- core/storage/btree.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 3124050cd..440c0de79 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -5001,26 +5001,30 @@ impl CursorTrait for BTreeCursor { first_overflow_page, .. }) => (payload, payload_size, first_overflow_page), - BTreeCell::IndexLeafCell(IndexLeafCell { - payload, - payload_size, - first_overflow_page, - }) => (payload, payload_size, first_overflow_page), BTreeCell::IndexInteriorCell(IndexInteriorCell { payload, payload_size, first_overflow_page, .. }) => (payload, payload_size, first_overflow_page), + BTreeCell::IndexLeafCell(IndexLeafCell { + payload, + first_overflow_page, + payload_size, + }) => (payload, payload_size, first_overflow_page), _ => unreachable!("unexpected page_type"), }; if let Some(next_page) = first_overflow_page { return_if_io!(self.process_overflow_read(payload, next_page, payload_size)) } else { - let mut record = self.get_immutable_record_or_create(); - let record = record.as_mut().unwrap(); - record.invalidate(); - record.start_serialization(payload); + self.get_immutable_record_or_create() + .as_mut() + .unwrap() + .invalidate(); + self.get_immutable_record_or_create() + .as_mut() + .unwrap() + .start_serialization(payload); self.record_cursor.borrow_mut().invalidate(); }; From 6557a41503ffb5a1ccc880dd0ed5ef1113282654 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:01:43 +0300 Subject: [PATCH 376/428] Refactor emit_fk_violation() to always issue a FkCounter instruction --- core/translate/fkeys.rs | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/core/translate/fkeys.rs b/core/translate/fkeys.rs index b8544a078..a545a94aa 100644 --- a/core/translate/fkeys.rs +++ b/core/translate/fkeys.rs @@ -212,19 +212,13 @@ pub fn build_index_affinity_string(idx: &Index, table: &BTreeTable) -> String { .collect() } -/// For deferred FKs: increment the global counter; for immediate FKs: halt with FK error. +/// Increment a foreign key violation counter; for deferred FKs, this is a global counter +/// on the connection; for immediate FKs, this is a per-statement counter in the program state. pub fn emit_fk_violation(program: &mut ProgramBuilder, fk: &ForeignKey) -> Result<()> { - if fk.deferred { - program.emit_insn(Insn::FkCounter { - increment_value: 1, - is_scope: false, - }); - } else { - program.emit_insn(Insn::Halt { - err_code: crate::error::SQLITE_CONSTRAINT_FOREIGNKEY, - description: "FOREIGN KEY constraint failed".to_string(), - }); - } + program.emit_insn(Insn::FkCounter { + increment_value: 1, + is_scope: !fk.deferred, + }); Ok(()) } @@ -549,16 +543,8 @@ fn emit_fk_parent_key_probe( let on_match = |p: &mut ProgramBuilder| -> Result<()> { match (is_deferred, pass) { // OLD key referenced by a child - (false, ParentProbePass::Old) => { - // Immediate FK: fail now. - emit_fk_violation(p, &fk_ref.fk)?; // HALT for immediate - } - (true, ParentProbePass::Old) => { - // Deferred FK: increment counter. - p.emit_insn(Insn::FkCounter { - increment_value: 1, - is_scope: false, - }); + (_, ParentProbePass::Old) => { + emit_fk_violation(p, &fk_ref.fk)?; } // NEW key referenced by a child (cancel one deferred violation) From d4a9797f79caa1fb6211990e10f8adbf62b9aa5f Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:04:36 +0300 Subject: [PATCH 377/428] Store two foreign key counters in ProgramState 1. The number of deferred FK violations when the statement started. When a statement subtransaction rolls back, the connection's deferred violation counter will be reset to this value. 2. The number of immediate FK violations that occurred during the statement. In practice we just need to know whether this number is nonzero, and if it is, the statement subtransaction will roll back. Statement subtransactions will be implemented in future commits. --- core/vdbe/execute.rs | 8 ++++++-- core/vdbe/mod.rs | 17 ++++++++++++++--- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 81a19ba76..b75cfad03 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -8592,7 +8592,9 @@ pub fn op_fk_counter( insn ); if *is_scope { - state.fk_scope_counter = state.fk_scope_counter.saturating_add(*increment_value); + state + .fk_immediate_violations_during_stmt + .fetch_add(*increment_value, Ordering::AcqRel); } else { // Transaction-level counter: add/subtract for deferred FKs. program @@ -8632,7 +8634,9 @@ pub fn op_fk_if_zero( let v = if !*is_scope { program.connection.get_deferred_foreign_key_violations() } else { - state.fk_scope_counter + state + .fk_immediate_violations_during_stmt + .load(Ordering::Acquire) }; state.pc = if v == 0 { diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index be44dd0f7..b94de5785 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -66,7 +66,7 @@ use std::{ collections::HashMap, num::NonZero, sync::{ - atomic::{AtomicI64, Ordering}, + atomic::{AtomicI64, AtomicIsize, Ordering}, Arc, }, task::Waker, @@ -319,7 +319,13 @@ pub struct ProgramState { /// This is used when statement in auto-commit mode reseted after previous uncomplete execution - in which case we may need to rollback transaction started on previous attempt /// Note, that MVCC transactions are always explicit - so they do not update auto_txn_cleanup marker pub(crate) auto_txn_cleanup: TxnCleanup, - fk_scope_counter: isize, + /// Number of deferred foreign key violations when the statement started. + /// When a statement subtransaction rolls back, the connection's deferred foreign key violations counter + /// is reset to this value. + fk_deferred_violations_when_stmt_started: AtomicIsize, + /// Number of immediate foreign key violations that occurred during the active statement. If nonzero, + /// the statement subtransactionwill roll back. + fk_immediate_violations_during_stmt: AtomicIsize, } // SAFETY: This needs to be audited for thread safety. @@ -371,7 +377,8 @@ impl ProgramState { op_checkpoint_state: OpCheckpointState::StartCheckpoint, view_delta_state: ViewDeltaCommitState::NotStarted, auto_txn_cleanup: TxnCleanup::None, - fk_scope_counter: 0, + fk_deferred_violations_when_stmt_started: AtomicIsize::new(0), + fk_immediate_violations_during_stmt: AtomicIsize::new(0), } } @@ -455,6 +462,10 @@ impl ProgramState { self.op_row_id_state = OpRowIdState::Start; self.view_delta_state = ViewDeltaCommitState::NotStarted; self.auto_txn_cleanup = TxnCleanup::None; + self.fk_immediate_violations_during_stmt + .store(0, Ordering::SeqCst); + self.fk_deferred_violations_when_stmt_started + .store(0, Ordering::SeqCst); } pub fn get_cursor(&mut self, cursor_id: CursorID) -> &mut Cursor { From ad802854378e1d347748a07244cfc3bf125fb3f9 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:11:37 +0300 Subject: [PATCH 378/428] Rename is_scope to deferred and invert respective boolean logic Much clearer name for what it is/does --- core/translate/fkeys.rs | 6 +++--- core/vdbe/execute.rs | 8 ++++---- core/vdbe/explain.rs | 8 ++++---- core/vdbe/insn.rs | 4 ++-- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/core/translate/fkeys.rs b/core/translate/fkeys.rs index a545a94aa..14f9b7102 100644 --- a/core/translate/fkeys.rs +++ b/core/translate/fkeys.rs @@ -16,12 +16,12 @@ use std::{collections::HashSet, num::NonZeroUsize, sync::Arc}; #[inline] pub fn emit_guarded_fk_decrement(program: &mut ProgramBuilder, label: BranchOffset) { program.emit_insn(Insn::FkIfZero { - is_scope: false, + deferred: true, target_pc: label, }); program.emit_insn(Insn::FkCounter { increment_value: -1, - is_scope: false, + deferred: true, }); } @@ -217,7 +217,7 @@ pub fn build_index_affinity_string(idx: &Index, table: &BTreeTable) -> String { pub fn emit_fk_violation(program: &mut ProgramBuilder, fk: &ForeignKey) -> Result<()> { program.emit_insn(Insn::FkCounter { increment_value: 1, - is_scope: !fk.deferred, + deferred: fk.deferred, }); Ok(()) } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index b75cfad03..1ba9e14ca 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -8587,11 +8587,11 @@ pub fn op_fk_counter( load_insn!( FkCounter { increment_value, - is_scope, + deferred, }, insn ); - if *is_scope { + if !*deferred { state .fk_immediate_violations_during_stmt .fetch_add(*increment_value, Ordering::AcqRel); @@ -8616,7 +8616,7 @@ pub fn op_fk_if_zero( ) -> Result { load_insn!( FkIfZero { - is_scope, + deferred, target_pc, }, insn @@ -8631,7 +8631,7 @@ pub fn op_fk_if_zero( state.pc = target_pc.as_offset_int(); return Ok(InsnFunctionStepResult::Step); } - let v = if !*is_scope { + let v = if *deferred { program.connection.get_deferred_foreign_key_violations() } else { state diff --git a/core/vdbe/explain.rs b/core/vdbe/explain.rs index 69434de17..4f09e2ea0 100644 --- a/core/vdbe/explain.rs +++ b/core/vdbe/explain.rs @@ -1804,19 +1804,19 @@ pub fn insn_to_row( 0, String::new(), ), - Insn::FkCounter{increment_value, is_scope } => ( + Insn::FkCounter{increment_value, deferred } => ( "FkCounter", *increment_value as i32, - *is_scope as i32, + *deferred as i32, 0, Value::build_text(""), 0, String::new(), ), - Insn::FkIfZero{target_pc, is_scope } => ( + Insn::FkIfZero{target_pc, deferred } => ( "FkIfZero", target_pc.as_debug_int(), - *is_scope as i32, + *deferred as i32, 0, Value::build_text(""), 0, diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index bf7bf89b3..c949d804f 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -1184,13 +1184,13 @@ pub enum Insn { // Otherwise, if P1 is zero, the statement counter is incremented (immediate foreign key constraints). FkCounter { increment_value: isize, - is_scope: bool, + deferred: bool, }, // This opcode tests if a foreign key constraint-counter is currently zero. If so, jump to instruction P2. Otherwise, fall through to the next instruction. // If P1 is non-zero, then the jump is taken if the database constraint-counter is zero (the one that counts deferred constraint violations). // If P1 is zero, the jump is taken if the statement constraint-counter is zero (immediate foreign key constraint violations). FkIfZero { - is_scope: bool, + deferred: bool, target_pc: BranchOffset, }, } From 459c01f93c7b655c4c7d2524d9c1919379b3c97e Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:14:26 +0300 Subject: [PATCH 379/428] Add subjournal module The subjournal is a temporary file where stmt subtransactions write an 'undo log' of pages before modifying them. If a stmt subtransaction rolls back, the pages are restored from the subjournal. --- core/storage/mod.rs | 1 + core/storage/subjournal.rs | 88 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 core/storage/subjournal.rs diff --git a/core/storage/mod.rs b/core/storage/mod.rs index ee5029ba0..f64d26385 100644 --- a/core/storage/mod.rs +++ b/core/storage/mod.rs @@ -22,6 +22,7 @@ pub(crate) mod pager; pub(super) mod slot_bitmap; pub(crate) mod sqlite3_ondisk; mod state_machines; +pub(crate) mod subjournal; #[allow(clippy::arc_with_non_send_sync)] pub(crate) mod wal; diff --git a/core/storage/subjournal.rs b/core/storage/subjournal.rs new file mode 100644 index 000000000..d174230a5 --- /dev/null +++ b/core/storage/subjournal.rs @@ -0,0 +1,88 @@ +use std::sync::Arc; + +use crate::{ + storage::sqlite3_ondisk::finish_read_page, Buffer, Completion, CompletionError, PageRef, Result, +}; + +#[derive(Clone)] +pub struct Subjournal { + file: Arc, +} + +impl Subjournal { + pub fn new(file: Arc) -> Self { + Self { file } + } + + pub fn size(&self) -> Result { + self.file.size() + } + + pub fn write_page( + &self, + offset: u64, + page_size: usize, + buffer: Arc, + c: Completion, + ) -> Result { + assert!( + buffer.len() == page_size + 4, + "buffer length should be page_size + 4 bytes for page id" + ); + self.file.pwrite(offset, buffer, c) + } + + pub fn read_page_number(&self, offset: u64, page_id_buffer: Arc) -> Result { + assert!( + page_id_buffer.len() == 4, + "page_id_buffer length should be 4 bytes" + ); + let c = Completion::new_read( + page_id_buffer, + move |res: Result<(Arc, i32), CompletionError>| { + let Ok((_buffer, _bytes_read)) = res else { + return; + }; + }, + ); + let c = self.file.pread(offset, c)?; + Ok(c) + } + + pub fn read_page( + &self, + offset: u64, + buffer: Arc, + page: PageRef, + page_size: usize, + ) -> Result { + assert!( + buffer.len() == page_size, + "buffer length should be page_size" + ); + let c = Completion::new_read( + buffer, + move |res: Result<(Arc, i32), CompletionError>| { + let Ok((buffer, bytes_read)) = res else { + return; + }; + assert!( + bytes_read == page_size as i32, + "bytes_read should be page_size" + ); + finish_read_page(page.get().id as usize, buffer, page.clone()); + }, + ); + let c = self.file.pread(offset, c)?; + Ok(c) + } + + pub fn truncate(&self, offset: u64) -> Result { + let c = Completion::new_trunc(move |res: Result| { + let Ok(_) = res else { + return; + }; + }); + self.file.truncate(offset, c) + } +} From 8b15a06a85a6db5b0c1b6e754648b6942942688e Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:16:24 +0300 Subject: [PATCH 380/428] Add Savepoint struct --- core/storage/pager.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index ddb791a0f..820781508 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -16,6 +16,7 @@ use crate::{ }; use crate::{io_yield_one, IOContext}; use parking_lot::RwLock; +use roaring::RoaringBitmap; use std::cell::{RefCell, UnsafeCell}; use std::collections::HashSet; use std::hash; @@ -464,6 +465,33 @@ enum BtreeCreateVacuumFullState { PtrMapPut { allocated_page_id: u32 }, } +pub struct Savepoint { + /// Start offset of this savepoint in the subjournal. + start_offset: AtomicU64, + /// Current write offset in the subjournal. + write_offset: AtomicU64, + /// Bitmap of page numbers that are dirty in the savepoint. + page_bitmap: RwLock, +} + +impl Savepoint { + pub fn new(subjournal_offset: u64) -> Self { + Self { + start_offset: AtomicU64::new(subjournal_offset), + write_offset: AtomicU64::new(subjournal_offset), + page_bitmap: RwLock::new(RoaringBitmap::new()), + } + } + + pub fn add_dirty_page(&self, page_num: u32) { + self.page_bitmap.write().insert(page_num); + } + + pub fn has_dirty_page(&self, page_num: u32) -> bool { + self.page_bitmap.read().contains(page_num) + } +} + /// The pager interface implements the persistence layer by providing access /// to pages of the database file, including caching, concurrency control, and /// transaction management. From 2a03c1a6173c364984f943d8a4f676e98c4dae13 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:18:26 +0300 Subject: [PATCH 381/428] Add subjournal and savepoints to Pager struct --- core/storage/pager.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 820781508..e5f5757dd 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1,4 +1,5 @@ use crate::storage::database::DatabaseFile; +use crate::storage::subjournal::Subjournal; use crate::storage::wal::IOV_MAX; use crate::storage::{ buffer_pool::BufferPool, @@ -508,7 +509,8 @@ pub struct Pager { /// I/O interface for input/output operations. pub io: Arc, dirty_pages: Arc>>>, - + subjournal: RwLock>, + savepoints: Arc>>, commit_info: RwLock, checkpoint_state: RwLock, syncing: Arc, @@ -626,6 +628,8 @@ impl Pager { dirty_pages: Arc::new(RwLock::new(HashSet::with_hasher( hash::BuildHasherDefault::new(), ))), + subjournal: RwLock::new(None), + savepoints: Arc::new(RwLock::new(Vec::new())), commit_info: RwLock::new(CommitInfo { result: None, completions: Vec::new(), From 77be1f08aee1cc5cf163a8baf51accfca5faba95 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:20:50 +0300 Subject: [PATCH 382/428] Pager: add open_subjournal method --- core/storage/pager.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index e5f5757dd..ceedd4d16 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -659,6 +659,23 @@ impl Pager { enable_encryption: AtomicBool::new(false), }) } + /// Open the subjournal if not yet open. + /// The subjournal is a file that is used to store the "before images" of pages for the + /// current savepoint. If the savepoint is rolled back, the pages can be restored from the subjournal. + /// + /// Currently uses MemoryIO, but should eventually be backed by temporary on-disk files. + pub fn open_subjournal(&self) -> Result<()> { + if self.subjournal.read().is_some() { + return Ok(()); + } + use crate::MemoryIO; + + let db_file_io = Arc::new(MemoryIO::new()); + let file = db_file_io.open_file("subjournal", OpenFlags::Create, false)?; + let db_file = Subjournal::new(file); + *self.subjournal.write() = Some(db_file); + Ok(()) + } #[cfg(feature = "test_helper")] pub fn get_pending_byte() -> u32 { From aa1eebbfcb3b60c4c2802bd2249886c9322ff6c4 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:24:01 +0300 Subject: [PATCH 383/428] Pager: add open_savepoint() and release_savepoint() methods --- core/storage/pager.rs | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index ceedd4d16..58255b61f 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -676,6 +676,38 @@ impl Pager { *self.subjournal.write() = Some(db_file); Ok(()) } + pub fn open_savepoint(&self) -> Result<()> { + self.open_subjournal()?; + let subjournal_offset = self.subjournal.read().as_ref().unwrap().size()?; + // Currently as we only have anonymous savepoints opened at the start of a statement, + // the subjournal offset should always be 0 as we should only have max 1 savepoint + // opened at any given time. + turso_assert!(subjournal_offset == 0, "subjournal offset should be 0"); + let savepoint = Savepoint::new(subjournal_offset); + let mut savepoints = self.savepoints.write(); + turso_assert!(savepoints.is_empty(), "savepoints should be empty"); + savepoints.push(savepoint); + Ok(()) + } + + /// Release i.e. commit the current savepoint. This basically just means removing it. + pub fn release_savepoint(&self) -> Result<()> { + let mut savepoints = self.savepoints.write(); + let Some(savepoint) = savepoints.pop() else { + return Ok(()); + }; + let subjournal = self.subjournal.read(); + let Some(subjournal) = subjournal.as_ref() else { + return Ok(()); + }; + let start_offset = savepoint.start_offset.load(Ordering::SeqCst); + // Same reason as in open_savepoint, the start offset should always be 0 as we should only have max 1 savepoint + // opened at any given time. + turso_assert!(start_offset == 0, "start offset should be 0"); + let c = subjournal.truncate(start_offset as u64)?; + assert!(c.succeeded(), "memory IO should complete immediately"); + Ok(()) + } #[cfg(feature = "test_helper")] pub fn get_pending_byte() -> u32 { From e8226c0e4b10992a4cdd447fc971ebea4b9a8ff9 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:24:59 +0300 Subject: [PATCH 384/428] Pager: add clear_savepoint() method --- core/storage/pager.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 58255b61f..697a32d9b 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -709,6 +709,18 @@ impl Pager { Ok(()) } + pub fn clear_savepoints(&self) -> Result<()> { + *self.savepoints.write() = Vec::new(); + let subjournal = self.subjournal.read(); + let Some(subjournal) = subjournal.as_ref() else { + return Ok(()); + }; + let c = subjournal.truncate(0)?; + assert!(c.succeeded(), "memory IO should complete immediately"); + Ok(()) + } + } + #[cfg(feature = "test_helper")] pub fn get_pending_byte() -> u32 { PENDING_BYTE.load(Ordering::Relaxed) From 5b01605fae8208bbdbcee13c148df87ae68060c0 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:27:01 +0300 Subject: [PATCH 385/428] Pager: add subjournal_page_if_required() method --- core/storage/pager.rs | 75 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 697a32d9b..88d125dfa 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -662,7 +662,7 @@ impl Pager { /// Open the subjournal if not yet open. /// The subjournal is a file that is used to store the "before images" of pages for the /// current savepoint. If the savepoint is rolled back, the pages can be restored from the subjournal. - /// + /// /// Currently uses MemoryIO, but should eventually be backed by temporary on-disk files. pub fn open_subjournal(&self) -> Result<()> { if self.subjournal.read().is_some() { @@ -676,6 +676,79 @@ impl Pager { *self.subjournal.write() = Some(db_file); Ok(()) } + + /// Write page to subjournal if the current savepoint does not currently + /// contain an an entry for it. In case of a statement-level rollback, + /// the page image can be restored from the subjournal. + /// + /// A buffer of length page_size + 4 bytes is allocated and the page id + /// is written to the beginning of the buffer. The rest of the buffer is filled with the page contents. + pub fn subjournal_page_if_required(&self, page: &Page) -> Result<()> { + if self.subjournal.read().is_none() { + return Ok(()); + } + let write_offset = { + let savepoints = self.savepoints.read(); + let Some(cur_savepoint) = savepoints.last() else { + return Ok(()); + }; + if cur_savepoint.has_dirty_page(page.get().id as u32) { + return Ok(()); + } + cur_savepoint.write_offset.load(Ordering::SeqCst) + }; + let page_id = page.get().id; + let page_size = self.page_size.load(Ordering::SeqCst) as usize; + let buffer = { + let page_id = page.get().id as u32; + let contents = page.get_contents(); + let buffer = self.buffer_pool.allocate(page_size + 4); + let contents_buffer = contents.buffer.as_slice(); + turso_assert!( + contents_buffer.len() == page_size, + "contents buffer length should be equal to page size" + ); + + buffer.as_mut_slice()[0..4].copy_from_slice(&page_id.to_be_bytes()); + buffer.as_mut_slice()[4..4 + page_size].copy_from_slice(&contents_buffer); + + Arc::new(buffer) + }; + + let savepoints = self.savepoints.clone(); + + let write_complete = { + let buf_copy = buffer.clone(); + Box::new(move |res: Result| { + let Ok(bytes_written) = res else { + return; + }; + let buf_copy = buf_copy.clone(); + let buf_len = buf_copy.len(); + + turso_assert!( + bytes_written == buf_len as i32, + "wrote({bytes_written}) != expected({buf_len})" + ); + + let savepoints = savepoints.read(); + let cur_savepoint = savepoints.last().unwrap(); + cur_savepoint.add_dirty_page(page_id as u32); + cur_savepoint + .write_offset + .fetch_add(page_size as u64 + 4, Ordering::SeqCst); + }) + }; + let c = Completion::new_write(write_complete); + + let subjournal = self.subjournal.read(); + let subjournal = subjournal.as_ref().unwrap(); + + let c = subjournal.write_page(write_offset, page_size, buffer.clone(), c)?; + assert!(c.succeeded(), "memory IO should complete immediately"); + Ok(()) + } + pub fn open_savepoint(&self) -> Result<()> { self.open_subjournal()?; let subjournal_offset = self.subjournal.read().as_ref().unwrap().size()?; From 86d5ad681579b5fc829ddcedc6f3867f8d2c24c1 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 11:29:52 +0300 Subject: [PATCH 386/428] pager: allow upserted cached page not to be dirty --- core/storage/btree.rs | 2 +- core/storage/pager.rs | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 0c5279608..063f41154 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -3482,7 +3482,7 @@ impl BTreeCursor { if *new_id != page.get().id { page.get().id = *new_id; self.pager - .update_dirty_loaded_page_in_cache(*new_id, page.clone())?; + .upsert_page_in_cache(*new_id, page.clone(), true)?; } } diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 88d125dfa..cbbcb9dcf 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -2521,16 +2521,19 @@ impl Pager { } } - pub fn update_dirty_loaded_page_in_cache( + pub fn upsert_page_in_cache( &self, id: usize, page: PageRef, + dirty_page_must_exist: bool, ) -> Result<(), LimboError> { let mut cache = self.page_cache.write(); let page_key = PageCacheKey::new(id); // FIXME: use specific page key for writer instead of max frame, this will make readers not conflict - assert!(page.is_dirty()); + if dirty_page_must_exist { + assert!(page.is_dirty()); + } cache.upsert_page(page_key, page.clone()).map_err(|e| { LimboError::InternalError(format!( "Failed to insert loaded page {id} into cache: {e:?}" From a19c5c22ac62f30e433d6bcb337e377ea06d6ba8 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:00:01 +0300 Subject: [PATCH 387/428] Pager: add rollback_to_newest_savepoint() method --- core/storage/pager.rs | 68 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index cbbcb9dcf..3d50d7943 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -792,6 +792,74 @@ impl Pager { assert!(c.succeeded(), "memory IO should complete immediately"); Ok(()) } + + /// Rollback to the newest savepoint. This basically just means reading the subjournal from the start offset + /// of the savepoint to the end of the subjournal and restoring the page images to the page cache. + pub fn rollback_to_newest_savepoint(&self) -> Result<()> { + let subjournal = self.subjournal.read(); + let Some(subjournal) = subjournal.as_ref() else { + return Ok(()); + }; + let mut savepoints = self.savepoints.write(); + let Some(savepoint) = savepoints.pop() else { + return Ok(()); + }; + let journal_start_offset = savepoint.start_offset.load(Ordering::SeqCst); + + let mut rollback_bitset = RoaringBitmap::new(); + + // Read the subjournal starting from start offset, first reading 4 bytes to get page id, then if rollback_bitset already has the page, skip reading the page + // and just advance the offset. otherwise read the page and add the page id to the rollback_bitset + put the page image into the page cache + let mut current_offset = journal_start_offset; + let page_size = self.page_size.load(Ordering::SeqCst) as u64; + let journal_end_offset = savepoint.write_offset.load(Ordering::SeqCst); + + while current_offset < journal_end_offset { + // Read 4 bytes for page id + let page_id_buffer = Arc::new(self.buffer_pool.allocate(4)); + let c = subjournal.read_page_number(current_offset, page_id_buffer.clone())?; + assert!(c.succeeded(), "memory IO should complete immediately"); + let page_id = u32::from_be_bytes(page_id_buffer.as_slice()[0..4].try_into().unwrap()); + current_offset += 4; + + // Check if we've already rolled back this page + if rollback_bitset.contains(page_id) { + // Skip reading the page, just advance offset + current_offset += page_size; + continue; + } + + // Read the page data + let page_buffer = Arc::new(self.buffer_pool.allocate(page_size as usize)); + let page = Arc::new(Page::new(page_id as i64)); + let c = subjournal.read_page( + current_offset, + page_buffer.clone(), + page.clone(), + page_size as usize, + )?; + assert!(c.succeeded(), "memory IO should complete immediately"); + current_offset += page_size; + + // Add page to rollback bitset + rollback_bitset.insert(page_id); + + // Put the page image into the page cache + self.upsert_page_in_cache(page_id as usize, page, false)?; + } + + let truncate_completion = self + .subjournal + .read() + .as_ref() + .unwrap() + .truncate(journal_start_offset)?; + assert!( + truncate_completion.succeeded(), + "memory IO should complete immediately" + ); + + Ok(()) } #[cfg(feature = "test_helper")] From f4af7c224204656671a3e69b41fd80c91b5ffc92 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:00:42 +0300 Subject: [PATCH 388/428] Pager: add begin_statement() method --- core/storage/pager.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 3d50d7943..79f8b7efc 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -659,6 +659,13 @@ impl Pager { enable_encryption: AtomicBool::new(false), }) } + + pub fn begin_statement(&self) -> Result<()> { + self.open_subjournal()?; + self.open_savepoint()?; + Ok(()) + } + /// Open the subjournal if not yet open. /// The subjournal is a file that is used to store the "before images" of pages for the /// current savepoint. If the savepoint is rolled back, the pages can be restored from the subjournal. From 97177dae02b88b6bb776bf7e365323290a0b25dd Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:00:58 +0300 Subject: [PATCH 389/428] add missing imports --- core/storage/pager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 79f8b7efc..2b4e679c7 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -15,7 +15,7 @@ use crate::{ io::CompletionGroup, return_if_io, turso_assert, types::WalFrameInfo, Completion, Connection, IOResult, LimboError, Result, TransactionState, }; -use crate::{io_yield_one, IOContext}; +use crate::{io_yield_one, CompletionError, IOContext, OpenFlags, IO}; use parking_lot::RwLock; use roaring::RoaringBitmap; use std::cell::{RefCell, UnsafeCell}; From a8cf8e45940f4d488d0d60b1b8ae9676b703085f Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:05:48 +0300 Subject: [PATCH 390/428] Pager: subjournal page if required when it's marked as dirty --- core/storage/btree.rs | 38 +++++++++++++++++++++++--------------- core/storage/pager.rs | 24 +++++++++++++----------- 2 files changed, 36 insertions(+), 26 deletions(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 063f41154..875746039 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -1049,7 +1049,13 @@ impl BTreeCursor { local_amount = local_size as u32 - offset; } if is_write { - self.write_payload_to_page(offset, local_amount, payload, buffer, page.clone()); + self.write_payload_to_page( + offset, + local_amount, + payload, + buffer, + page.clone(), + )?; } else { self.read_payload_from_page(offset, local_amount, payload, buffer); } @@ -1175,7 +1181,7 @@ impl BTreeCursor { page_payload, buffer, page.clone(), - ); + )?; } else { self.read_payload_from_page( payload_offset as u32, @@ -1247,13 +1253,14 @@ impl BTreeCursor { payload: &[u8], buffer: &mut [u8], page: PageRef, - ) { - self.pager.add_dirty(&page); + ) -> Result<()> { + self.pager.add_dirty(&page)?; // SAFETY: This is safe as long as the page is not evicted from the cache. let payload_mut = unsafe { std::slice::from_raw_parts_mut(payload.as_ptr() as *mut u8, payload.len()) }; payload_mut[payload_offset as usize..payload_offset as usize + num_bytes as usize] .copy_from_slice(&buffer[..num_bytes as usize]); + Ok(()) } /// Check if any ancestor pages still have cells to iterate. @@ -2275,7 +2282,7 @@ impl BTreeCursor { // get page and find cell let cell_idx = { - self.pager.add_dirty(&page); + self.pager.add_dirty(&page)?; self.stack.current_cell_index() }; if cell_idx == -1 { @@ -2643,8 +2650,8 @@ impl BTreeCursor { usable_space, )?; parent_contents.write_rightmost_ptr(new_rightmost_leaf.get().id as u32); - self.pager.add_dirty(parent); - self.pager.add_dirty(&new_rightmost_leaf); + self.pager.add_dirty(parent)?; + self.pager.add_dirty(&new_rightmost_leaf)?; // Continue balance from the parent page (inserting the new divider cell may have overflowed the parent) self.stack.pop(); @@ -2721,7 +2728,7 @@ impl BTreeCursor { overflow_cell.index ); } - self.pager.add_dirty(parent_page); + self.pager.add_dirty(parent_page)?; let parent_contents = parent_page.get_contents(); let page_to_balance_idx = self.stack.current_cell_index() as usize; @@ -2824,7 +2831,7 @@ impl BTreeCursor { } Ok((page, c)) => { // mark as dirty - self.pager.add_dirty(&page); + self.pager.add_dirty(&page)?; pages_to_balance[i].replace(page); if let Some(c) = c { group.add(&c); @@ -4678,7 +4685,7 @@ impl BTreeCursor { destroy_info.state = DestroyState::ProcessPage; } else { if keep_root { - self.clear_root(&page); + self.clear_root(&page)?; } else { return_if_io!(self.pager.free_page(Some(page), page_id)); } @@ -4693,7 +4700,7 @@ impl BTreeCursor { } } - fn clear_root(&mut self, root_page: &PageRef) { + fn clear_root(&mut self, root_page: &PageRef) -> Result<()> { let page_ref = root_page.get(); let contents = page_ref.contents.as_ref().unwrap(); @@ -4702,8 +4709,9 @@ impl BTreeCursor { PageType::IndexLeaf | PageType::IndexInterior => PageType::IndexLeaf, }; - self.pager.add_dirty(root_page); + self.pager.add_dirty(root_page)?; btree_init_page(root_page, page_type, 0, self.pager.usable_space()); + Ok(()) } pub fn overwrite_cell( @@ -5072,7 +5080,7 @@ impl CursorTrait for BTreeCursor { match delete_state { DeleteState::Start => { let page = self.stack.top_ref(); - self.pager.add_dirty(page); + self.pager.add_dirty(page)?; if matches!( page.get_contents().page_type(), PageType::TableLeaf | PageType::TableInterior @@ -5269,8 +5277,8 @@ impl CursorTrait for BTreeCursor { let leaf_page = self.stack.top_ref(); - self.pager.add_dirty(page); - self.pager.add_dirty(leaf_page); + self.pager.add_dirty(page)?; + self.pager.add_dirty(leaf_page)?; // Step 2: Replace the cell in the parent (interior) page. { diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 2b4e679c7..42d9a9acb 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -117,7 +117,7 @@ impl HeaderRefMut { "incorrect header page id" ); - pager.add_dirty(&page); + pager.add_dirty(&page)?; *pager.header_ref_state.write() = HeaderRefState::Start; break Ok(IOResult::Done(Self(page))); } @@ -1118,7 +1118,7 @@ impl Pager { ptrmap_page.get().id == ptrmap_pg_no, "ptrmap page has unexpected number" ); - self.add_dirty(&ptrmap_page); + self.add_dirty(&ptrmap_page)?; self.vacuum_state.write().ptrmap_put_state = PtrMapPutState::Start; break Ok(IOResult::Done(())); } @@ -1574,11 +1574,13 @@ impl Pager { Ok(page_cache.resize(capacity)) } - pub fn add_dirty(&self, page: &Page) { + pub fn add_dirty(&self, page: &Page) -> Result<()> { + self.subjournal_page_if_required(page)?; // TODO: check duplicates? let mut dirty_pages = self.dirty_pages.write(); dirty_pages.insert(page.get().id); page.set_dirty(); + Ok(()) } pub fn wal_state(&self) -> Result { @@ -2235,7 +2237,7 @@ impl Pager { trunk_page.get().id == trunk_page_id as usize, "trunk page has unexpected id" ); - self.add_dirty(&trunk_page); + self.add_dirty(&trunk_page)?; trunk_page_contents.write_u32_no_offset( TRUNK_PAGE_LEAF_COUNT_OFFSET, @@ -2255,7 +2257,7 @@ impl Pager { turso_assert!(page.is_loaded(), "page should be loaded"); // If we get here, need to make this page a new trunk turso_assert!(page.get().id == page_id, "page has unexpected id"); - self.add_dirty(page); + self.add_dirty(page)?; let trunk_page_id = header.freelist_trunk_page.get(); @@ -2395,7 +2397,7 @@ impl Pager { // we will allocate a ptrmap page, so increment size new_db_size += 1; let page = allocate_new_page(new_db_size as i64, &self.buffer_pool, 0); - self.add_dirty(&page); + self.add_dirty(&page)?; let page_key = PageCacheKey::new(page.get().id as usize); let mut cache = self.page_cache.write(); cache.insert(page_key, page.clone())?; @@ -2471,7 +2473,7 @@ impl Pager { // and update the database's first freelist trunk page to the next trunk page. header.freelist_trunk_page = next_trunk_page_id.into(); header.freelist_pages = (header.freelist_pages.get() - 1).into(); - self.add_dirty(trunk_page); + self.add_dirty(trunk_page)?; // zero out the page turso_assert!( trunk_page.get_contents().overflow_cells.is_empty(), @@ -2503,7 +2505,7 @@ impl Pager { leaf_page.get().id ); let page_contents = trunk_page.get_contents(); - self.add_dirty(leaf_page); + self.add_dirty(leaf_page)?; // zero out the page turso_assert!( leaf_page.get_contents().overflow_cells.is_empty(), @@ -2541,7 +2543,7 @@ impl Pager { FREELIST_TRUNK_OFFSET_LEAF_COUNT, remaining_leaves_count as u32, ); - self.add_dirty(trunk_page); + self.add_dirty(trunk_page)?; header.freelist_pages = (header.freelist_pages.get() - 1).into(); let leaf_page = leaf_page.clone(); @@ -2555,7 +2557,7 @@ impl Pager { if Some(new_db_size) == self.pending_byte_page_id() { let richard_hipp_special_page = allocate_new_page(new_db_size as i64, &self.buffer_pool, 0); - self.add_dirty(&richard_hipp_special_page); + self.add_dirty(&richard_hipp_special_page)?; let page_key = PageCacheKey::new(richard_hipp_special_page.get().id); { let mut cache = self.page_cache.write(); @@ -2579,7 +2581,7 @@ impl Pager { let page = allocate_new_page(new_db_size as i64, &self.buffer_pool, 0); { // setup page and add to cache - self.add_dirty(&page); + self.add_dirty(&page)?; let page_key = PageCacheKey::new(page.get().id as usize); { From 25f8ba002593e6185418a87d019594f324070e29 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:06:53 +0300 Subject: [PATCH 391/428] Pager: clear savepoints when tx rolls back --- core/storage/pager.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 42d9a9acb..285684a72 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1415,6 +1415,8 @@ impl Pager { }; tracing::trace!("rollback_tx(schema_did_change={})", schema_did_change); if is_write { + self.clear_savepoints() + .expect("in practice, clear_savepoints() should never fail as it uses memory IO"); wal.borrow().end_write_tx(); } wal.borrow().end_read_tx(); From 734eeb5bab47b6006c7d2739e5a81d5ed0ce0c52 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:07:39 +0300 Subject: [PATCH 392/428] VDBE: constraint errors do not cause a tx rollback by default --- core/vdbe/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index b94de5785..6393ac335 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -1004,6 +1004,10 @@ impl Program { Some(LimboError::TableLocked) => {} // Busy errors do not cause a rollback. Some(LimboError::Busy) => {} + // Constraint errors do not cause a rollback of the transaction by default; + // Instead individual statement subtransactions will roll back and these are handled in op_auto_commit + // and op_halt. + Some(LimboError::Constraint(_)) => {} _ => { if *cleanup != TxnCleanup::None || err.is_some() { if let Some(mv_store) = mv_store { From f0548c280f92375517b8549051d3220bb0ef0cbc Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:08:48 +0300 Subject: [PATCH 393/428] ProgramState: add begin_statement() and end_statement() --- core/vdbe/mod.rs | 56 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 6393ac335..09870ba8d 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -475,6 +475,62 @@ impl ProgramState { .as_mut() .unwrap_or_else(|| panic!("cursor id {cursor_id} is None")) } + + /// Begin a statement subtransaction. + pub fn begin_statement( + &mut self, + connection: &Connection, + pager: &Arc, + write: bool, + ) -> Result<()> { + // Store the deferred foreign key violations counter at the start of the statement. + // This is used to ensure that if an interactive transaction had deferred FK violations and a statement subtransaction rolls back, + // the deferred FK violations are not lost. + self.fk_deferred_violations_when_stmt_started.store( + connection.fk_deferred_violations.load(Ordering::Acquire), + Ordering::SeqCst, + ); + // Reset the immediate foreign key violations counter to 0. If this is nonzero when the statement completes, the statement subtransaction will roll back. + self.fk_immediate_violations_during_stmt + .store(0, Ordering::SeqCst); + if write { + pager.begin_statement()?; + } + Ok(()) + } + + /// End a statement subtransaction. + pub fn end_statement( + &mut self, + connection: &Connection, + pager: &Arc, + end_statement: EndStatement, + ) -> Result<()> { + match end_statement { + EndStatement::ReleaseSavepoint => pager.release_savepoint(), + EndStatement::RollbackSavepoint => { + pager.rollback_to_newest_savepoint()?; + // Reset the deferred foreign key violations counter to the value it had at the start of the statement. + // This is used to ensure that if an interactive transaction had deferred FK violations, they are not lost. + connection.fk_deferred_violations.store( + self.fk_deferred_violations_when_stmt_started + .load(Ordering::Acquire), + Ordering::SeqCst, + ); + Ok(()) + } + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +/// Action to take at the end of a statement subtransaction. +pub enum EndStatement { + /// Release (commit) the savepoint -- effectively removing the savepoint as it is no longer needed for undo purposes. + ReleaseSavepoint, + /// Rollback (abort) to the newest savepoint: read pages from the subjournal and restore them to the page cache. + /// This is used to undo the changes made by the statement. + RollbackSavepoint, } impl Register { From 904cbe535d366c69f4a5001ef35e1dc393d4fea0 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:12:02 +0300 Subject: [PATCH 394/428] VDBE: handle subtransaction commits/aborts in op_halt --- core/vdbe/execute.rs | 52 ++++++++++++++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 16 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 1ba9e14ca..442e6857d 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -1,5 +1,5 @@ #![allow(unused_variables)] -use crate::error::{SQLITE_CONSTRAINT_FOREIGNKEY, SQLITE_CONSTRAINT_UNIQUE}; +use crate::error::SQLITE_CONSTRAINT_UNIQUE; use crate::function::AlterTableFunc; use crate::mvcc::database::CheckpointStateMachine; use crate::numeric::{NullableInteger, Numeric}; @@ -21,7 +21,7 @@ use crate::util::{ normalize_ident, rewrite_column_references_if_needed, rewrite_fk_parent_cols_if_self_ref, }; use crate::vdbe::insn::InsertFlags; -use crate::vdbe::{registers_to_ref_values, TxnCleanup}; +use crate::vdbe::{registers_to_ref_values, EndStatement, TxnCleanup}; use crate::vector::{vector32_sparse, vector_concat, vector_distance_jaccard, vector_slice}; use crate::{ error::{ @@ -2149,8 +2149,8 @@ pub fn halt( description: &str, ) -> Result { if err_code > 0 { - // invalidate page cache in case of error - pager.clear_page_cache(false); + // Any non-FK constraint violation causes the statement subtransaction to roll back. + state.end_statement(&program.connection, &pager, EndStatement::RollbackSavepoint)?; } match err_code { 0 => {} @@ -2169,9 +2169,6 @@ pub fn halt( "UNIQUE constraint failed: {description} (19)" ))); } - SQLITE_CONSTRAINT_FOREIGNKEY => { - return Err(LimboError::Constraint(format!("{description} (19)"))); - } _ => { return Err(LimboError::Constraint(format!( "undocumented halt error code {description}" @@ -2181,23 +2178,46 @@ pub fn halt( let auto_commit = program.connection.auto_commit.load(Ordering::SeqCst); tracing::trace!("halt(auto_commit={})", auto_commit); + + // Check for immediate foreign key violations. + // Any immediate violation causes the statement subtransaction to roll back. + if program.connection.foreign_keys_enabled() + && state + .fk_immediate_violations_during_stmt + .load(Ordering::Acquire) + > 0 + { + state.end_statement(&program.connection, &pager, EndStatement::RollbackSavepoint)?; + return Err(LimboError::Constraint( + "foreign key constraint failed".to_string(), + )); + } + if auto_commit { - // In autocommit mode, a statement that leaves deferred violations must fail here. - if program.connection.foreign_keys_enabled() - && program + // In autocommit mode, a statement that leaves deferred violations must fail here, + // and it also ends the transaction. + if program.connection.foreign_keys_enabled() { + let deferred_violations = program .connection .fk_deferred_violations - .swap(0, Ordering::AcqRel) - > 0 - { - return Err(LimboError::Constraint( - "foreign key constraint failed".to_string(), - )); + .swap(0, Ordering::AcqRel); + if deferred_violations > 0 { + pager.rollback_tx(&program.connection); + program.connection.set_tx_state(TransactionState::None); + program.connection.auto_commit.store(true, Ordering::SeqCst); + return Err(LimboError::Constraint( + "foreign key constraint failed".to_string(), + )); + } } + state.end_statement(&program.connection, &pager, EndStatement::ReleaseSavepoint)?; program .commit_txn(pager.clone(), state, mv_store, false) .map(Into::into) } else { + // Even if deferred violations are present, the statement subtransaction completes successfully when + // it is part of an interactive transaction. + state.end_statement(&program.connection, &pager, EndStatement::ReleaseSavepoint)?; Ok(InsnFunctionStepResult::Done) } } From 086ba8c9460d8839d4dcdcaa96e9e184d779d092 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:13:06 +0300 Subject: [PATCH 395/428] VDBE: begin statement subtransaction in op_transaction --- core/vdbe/execute.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 442e6857d..85eb03ef3 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2471,6 +2471,9 @@ pub fn op_transaction_inner( } } + let write = matches!(tx_mode, TransactionMode::Write); + state.begin_statement(&program.connection, &pager, write)?; + state.pc += 1; state.op_transaction_state = OpTransactionState::Start; return Ok(InsnFunctionStepResult::Step); From 97aad78b3fd2a879276cf2e91a722a7ff75db71c Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:14:02 +0300 Subject: [PATCH 396/428] Allow dead code - SQLITE_CONSTRAINT_FOREIGNKEY is currently unused --- core/error.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/core/error.rs b/core/error.rs index dd76fddfc..c5bb811db 100644 --- a/core/error.rs +++ b/core/error.rs @@ -163,6 +163,7 @@ impl From for LimboError { pub const SQLITE_CONSTRAINT: usize = 19; pub const SQLITE_CONSTRAINT_PRIMARYKEY: usize = SQLITE_CONSTRAINT | (6 << 8); +#[allow(dead_code)] pub const SQLITE_CONSTRAINT_FOREIGNKEY: usize = SQLITE_CONSTRAINT | (7 << 8); pub const SQLITE_CONSTRAINT_NOTNULL: usize = SQLITE_CONSTRAINT | (5 << 8); pub const SQLITE_FULL: usize = 13; // we want this in autoincrement - incase if user inserts max allowed int From 1fdc0258cd2f80536a8e4032787ebcfc847334e0 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:36:12 +0300 Subject: [PATCH 397/428] Unignore fk_deferred_constraints_fuzz because it doesnt fail anymore --- tests/fuzz/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/fuzz/mod.rs b/tests/fuzz/mod.rs index 08a275495..c096cd20b 100644 --- a/tests/fuzz/mod.rs +++ b/tests/fuzz/mod.rs @@ -653,7 +653,6 @@ mod fuzz_tests { #[test] #[allow(unused_assignments)] - #[ignore] // ignoring because every error I can find is due to sqlite sub-transaction behavior pub fn fk_deferred_constraints_fuzz() { let _ = env_logger::try_init(); let (mut rng, seed) = rng_from_time_or_env(); From 2d3ac79fe9b2d1e3844cb410c10fc7436d41fdc4 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:42:41 +0300 Subject: [PATCH 398/428] Modify fk_deferred_constraints_fuzz - Add more statements per iteration - Allow interactive transaction to contain multiple statements - add VERBOSE flag to print all statements executed in a successful iteration --- tests/fuzz/mod.rs | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/tests/fuzz/mod.rs b/tests/fuzz/mod.rs index c096cd20b..40eb360f4 100644 --- a/tests/fuzz/mod.rs +++ b/tests/fuzz/mod.rs @@ -659,7 +659,7 @@ mod fuzz_tests { println!("fk_deferred_constraints_fuzz seed: {seed}"); const OUTER_ITERS: usize = 10; - const INNER_ITERS: usize = 50; + const INNER_ITERS: usize = 100; for outer in 0..OUTER_ITERS { println!("fk_deferred_constraints_fuzz {}/{}", outer + 1, OUTER_ITERS); @@ -743,12 +743,12 @@ mod fuzz_tests { } // Transaction-based mutations with mix of deferred and immediate operations + let mut in_tx = false; for tx_num in 0..INNER_ITERS { // Decide if we're in a transaction - let mut in_tx = false; - let use_transaction = rng.random_bool(0.7); + let start_a_transaction = rng.random_bool(0.7); - if use_transaction && !in_tx { + if start_a_transaction && !in_tx { in_tx = true; let s = log_and_exec("BEGIN"); let sres = sqlite.execute(&s, params![]); @@ -874,7 +874,7 @@ mod fuzz_tests { format!("DELETE FROM child_deferred WHERE id={id}") } // Self-referential deferred insert (create temp violation then fix) - 10 if use_transaction => { + 10 if start_a_transaction => { let id = rng.random_range(400..=500); let pid = id + 1; // References non-existent yet format!("INSERT INTO child_deferred VALUES ({id}, {pid}, 0)") @@ -890,7 +890,7 @@ mod fuzz_tests { let sres = sqlite.execute(&stmt, params![]); let lres = limbo_exec_rows_fallible(&limbo_db, &limbo, &stmt); - if !use_transaction && !in_tx { + if !start_a_transaction && !in_tx { match (sres, lres) { (Ok(_), Ok(_)) | (Err(_), Err(_)) => {} (s, l) => { @@ -908,8 +908,8 @@ mod fuzz_tests { } } - if use_transaction && in_tx { - // Randomly COMMIT or ROLLBACK + // Randomly COMMIT or ROLLBACK some of the time + if in_tx && rng.random_bool(0.4) { let commit = rng.random_bool(0.7); let s = log_and_exec("COMMIT"); @@ -963,8 +963,14 @@ mod fuzz_tests { ); } } + in_tx = false; } } + // Print all statements + if std::env::var("VERBOSE").is_ok() { + println!("{}", stmts.join("\n")); + println!("--------- ITERATION COMPLETED ---------"); + } } } From d8cc57cf147c0853e9c8206ced0558ed7806950b Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 12:45:40 +0300 Subject: [PATCH 399/428] clippy: Remove unnecessary referencing --- core/storage/pager.rs | 4 ++-- core/storage/subjournal.rs | 2 +- core/vdbe/execute.rs | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 285684a72..1123a5079 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -717,7 +717,7 @@ impl Pager { ); buffer.as_mut_slice()[0..4].copy_from_slice(&page_id.to_be_bytes()); - buffer.as_mut_slice()[4..4 + page_size].copy_from_slice(&contents_buffer); + buffer.as_mut_slice()[4..4 + page_size].copy_from_slice(contents_buffer); Arc::new(buffer) }; @@ -784,7 +784,7 @@ impl Pager { // Same reason as in open_savepoint, the start offset should always be 0 as we should only have max 1 savepoint // opened at any given time. turso_assert!(start_offset == 0, "start offset should be 0"); - let c = subjournal.truncate(start_offset as u64)?; + let c = subjournal.truncate(start_offset)?; assert!(c.succeeded(), "memory IO should complete immediately"); Ok(()) } diff --git a/core/storage/subjournal.rs b/core/storage/subjournal.rs index d174230a5..62a30cf1d 100644 --- a/core/storage/subjournal.rs +++ b/core/storage/subjournal.rs @@ -70,7 +70,7 @@ impl Subjournal { bytes_read == page_size as i32, "bytes_read should be page_size" ); - finish_read_page(page.get().id as usize, buffer, page.clone()); + finish_read_page(page.get().id, buffer, page.clone()); }, ); let c = self.file.pread(offset, c)?; diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 85eb03ef3..3a56f30bc 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2150,7 +2150,7 @@ pub fn halt( ) -> Result { if err_code > 0 { // Any non-FK constraint violation causes the statement subtransaction to roll back. - state.end_statement(&program.connection, &pager, EndStatement::RollbackSavepoint)?; + state.end_statement(&program.connection, pager, EndStatement::RollbackSavepoint)?; } match err_code { 0 => {} @@ -2187,7 +2187,7 @@ pub fn halt( .load(Ordering::Acquire) > 0 { - state.end_statement(&program.connection, &pager, EndStatement::RollbackSavepoint)?; + state.end_statement(&program.connection, pager, EndStatement::RollbackSavepoint)?; return Err(LimboError::Constraint( "foreign key constraint failed".to_string(), )); @@ -2210,14 +2210,14 @@ pub fn halt( )); } } - state.end_statement(&program.connection, &pager, EndStatement::ReleaseSavepoint)?; + state.end_statement(&program.connection, pager, EndStatement::ReleaseSavepoint)?; program .commit_txn(pager.clone(), state, mv_store, false) .map(Into::into) } else { // Even if deferred violations are present, the statement subtransaction completes successfully when // it is part of an interactive transaction. - state.end_statement(&program.connection, &pager, EndStatement::ReleaseSavepoint)?; + state.end_statement(&program.connection, pager, EndStatement::ReleaseSavepoint)?; Ok(InsnFunctionStepResult::Done) } } From 1dcfd3d0687494c31d9decb7e0177e9de8dfc813 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 13:34:35 +0300 Subject: [PATCH 400/428] fix stale test: constraint errors do not roll back tx anymore --- .../query_processing/test_transactions.rs | 34 ++++++++++++------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/tests/integration/query_processing/test_transactions.rs b/tests/integration/query_processing/test_transactions.rs index 89f102ae5..81b104788 100644 --- a/tests/integration/query_processing/test_transactions.rs +++ b/tests/integration/query_processing/test_transactions.rs @@ -177,11 +177,12 @@ fn test_transaction_visibility() { } #[test] -/// Currently, our default conflict resolution strategy is ROLLBACK, which ends the transaction. -/// In SQLite, the default is ABORT, which rolls back the current statement but allows the transaction to continue. -/// We should migrate to default ABORT once we support subtransactions. -fn test_constraint_error_aborts_transaction() { - let tmp_db = TempDatabase::new("test_constraint_error_aborts_transaction.db", true); +/// A constraint error does not rollback the transaction, it rolls back the statement. +fn test_constraint_error_aborts_only_stmt_not_entire_transaction() { + let tmp_db = TempDatabase::new( + "test_constraint_error_aborts_only_stmt_not_entire_transaction.db", + true, + ); let conn = tmp_db.connect_limbo(); // Create table succeeds @@ -198,14 +199,23 @@ fn test_constraint_error_aborts_transaction() { let result = conn.execute("INSERT INTO t VALUES (2),(3)"); assert!(matches!(result, Err(LimboError::Constraint(_)))); - // Commit fails because the transaction was aborted by the constraint error - let result = conn.execute("COMMIT"); - assert!(matches!(result, Err(LimboError::TxError(_)))); + // Third insert is valid again + conn.execute("INSERT INTO t VALUES (4)").unwrap(); - // Make sure table is empty - let stmt = conn.query("SELECT COUNT(*) FROM t").unwrap().unwrap(); - let row = helper_read_single_row(stmt); - assert_eq!(row, vec![Value::Integer(0)]); + // Commit succeeds + conn.execute("COMMIT").unwrap(); + + // Make sure table has 3 rows (a=1, a=2, a=4) + let stmt = conn.query("SELECT a FROM t").unwrap().unwrap(); + let rows = helper_read_all_rows(stmt); + assert_eq!( + rows, + vec![ + vec![Value::Integer(1)], + vec![Value::Integer(2)], + vec![Value::Integer(4)] + ] + ); } #[test] From e9bfb570655880f19fdaf5444e29c677fb0ee48c Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 14:00:48 +0300 Subject: [PATCH 401/428] Fix incorrectly implemented test Test started executing another statement when previous statement returned IO the last time and didn't run to completion --- tests/integration/query_processing/test_multi_thread.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/integration/query_processing/test_multi_thread.rs b/tests/integration/query_processing/test_multi_thread.rs index 6054c7a32..4f09a0f94 100644 --- a/tests/integration/query_processing/test_multi_thread.rs +++ b/tests/integration/query_processing/test_multi_thread.rs @@ -242,8 +242,10 @@ fn test_schema_reprepare_write() { } fn advance(stmt: &mut Statement) -> anyhow::Result<()> { - stmt.step()?; - stmt.run_once()?; + tracing::info!("Advancing statement: {:?}", stmt.get_sql()); + while matches!(stmt.step()?, StepResult::IO) { + stmt.run_once()?; + } Ok(()) } @@ -268,9 +270,9 @@ fn test_interleaved_transactions() -> anyhow::Result<()> { tmp_db.connect_limbo(), ]; - let mut statement2 = conn[2].prepare("BEGIN")?; let mut statement0 = conn[0].prepare("BEGIN")?; let mut statement1 = conn[1].prepare("BEGIN")?; + let mut statement2 = conn[2].prepare("BEGIN")?; advance(&mut statement2)?; From 7376475cb3751696faa26df29f4bdd84b01bb608 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 14:08:30 +0300 Subject: [PATCH 402/428] Do not start statement subtransactions when MVCC is enabled MVCC does not support statement-level rollback. --- core/vdbe/execute.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 3a56f30bc..a4c3fb37a 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2471,8 +2471,10 @@ pub fn op_transaction_inner( } } - let write = matches!(tx_mode, TransactionMode::Write); - state.begin_statement(&program.connection, &pager, write)?; + if mv_store.is_none() { + let write = matches!(tx_mode, TransactionMode::Write); + state.begin_statement(&program.connection, &pager, write)?; + } state.pc += 1; state.op_transaction_state = OpTransactionState::Start; From a14bbdecf24a6e073b82fbbcb8461663dc30eea2 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 14:24:03 +0300 Subject: [PATCH 403/428] Add assertion that page is loaded when pager.add_dirty() is called --- core/storage/pager.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 1123a5079..919f4e8c5 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1577,6 +1577,11 @@ impl Pager { } pub fn add_dirty(&self, page: &Page) -> Result<()> { + turso_assert!( + page.is_loaded(), + "page {} must be loaded in add_dirty() so its contents can be subjournaled", + page.get().id + ); self.subjournal_page_if_required(page)?; // TODO: check duplicates? let mut dirty_pages = self.dirty_pages.write(); From e04c6c9b46c0878a59fe45a02ccc659bc2eb8d18 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 14:25:43 +0300 Subject: [PATCH 404/428] Mark pages_to_balance as dirty only after loading --- core/storage/btree.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 875746039..95797b9c7 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -2830,8 +2830,6 @@ impl BTreeCursor { return Err(e); } Ok((page, c)) => { - // mark as dirty - self.pager.add_dirty(&page)?; pages_to_balance[i].replace(page); if let Some(c) = c { group.add(&c); @@ -2916,7 +2914,7 @@ impl BTreeCursor { .take(balance_info.sibling_count) { let page = page.as_ref().unwrap(); - turso_assert!(page.is_loaded(), "page should be loaded"); + self.pager.add_dirty(page)?; #[cfg(debug_assertions)] let page_type_of_siblings = balance_info.pages_to_balance[0] From ea98d8086f9e88ee3dce705df3086694bd67f201 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 15:03:02 +0300 Subject: [PATCH 405/428] Change default ON CONFLICT mode back to ABORT now that we support it --- core/translate/insert.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 64bd29ed6..2facd8520 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -232,7 +232,7 @@ pub fn translate_insert( &table, &mut body, connection, - on_conflict.unwrap_or(ResolveType::Rollback), + on_conflict.unwrap_or(ResolveType::Abort), )?; if inserting_multiple_rows && btree_table.has_autoincrement { @@ -976,9 +976,8 @@ fn bind_insert( next: None, })); } - ResolveType::Rollback => { - // This is the current default behavior for INSERT in tursodb - the transaction will be rolled back if the insert fails. - // In SQLite, the default is ABORT and we should use that one once we support subtransactions. + ResolveType::Abort => { + // This is the default conflict resolution strategy for INSERT in SQLite. } _ => { crate::bail_parse_error!( From fe51804e6b253961342803bc9846245c422f8834 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Tue, 21 Oct 2025 16:47:32 +0300 Subject: [PATCH 406/428] Implement crude way of making opening subtransaction conditional We don't want something like `BEGIN IMMEDIATE` to start a subtransaction, so instead we will open it if: - Statement is write, AND a) Statement has >0 table_references, or b) The statement is an INSERT (INSERT doesn't track table_references in the same way as other program types) --- core/storage/pager.rs | 6 +++++- core/translate/insert.rs | 2 ++ core/vdbe/builder.rs | 15 +++++++++++++++ core/vdbe/execute.rs | 2 +- core/vdbe/mod.rs | 4 ++++ 5 files changed, 27 insertions(+), 2 deletions(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 919f4e8c5..d2a813525 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -765,7 +765,11 @@ impl Pager { turso_assert!(subjournal_offset == 0, "subjournal offset should be 0"); let savepoint = Savepoint::new(subjournal_offset); let mut savepoints = self.savepoints.write(); - turso_assert!(savepoints.is_empty(), "savepoints should be empty"); + turso_assert!( + savepoints.is_empty(), + "savepoints should be empty, but had {} savepoints open", + savepoints.len() + ); savepoints.push(savepoint); Ok(()) } diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 2facd8520..9209bdd2a 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -485,6 +485,8 @@ pub fn translate_insert( )?; emit_epilogue(&mut program, &ctx, inserting_multiple_rows); + + program.set_needs_stmt_subtransactions(true); Ok(program) } diff --git a/core/vdbe/builder.rs b/core/vdbe/builder.rs index 667fa6b9d..3ac364226 100644 --- a/core/vdbe/builder.rs +++ b/core/vdbe/builder.rs @@ -124,6 +124,10 @@ pub struct ProgramBuilder { current_parent_explain_idx: Option, pub param_ctx: ParamState, pub(crate) reg_result_cols_start: Option, + /// Whether the program needs to use statement subtransactions, + /// i.e. the individual statement may need to be aborted due to a constraint conflict, etc. + /// instead of the entire transaction. + needs_stmt_subtransactions: bool, } #[derive(Debug, Clone)] @@ -211,9 +215,14 @@ impl ProgramBuilder { current_parent_explain_idx: None, param_ctx: ParamState::default(), reg_result_cols_start: None, + needs_stmt_subtransactions: false, } } + pub fn set_needs_stmt_subtransactions(&mut self, needs_stmt_subtransactions: bool) { + self.needs_stmt_subtransactions = needs_stmt_subtransactions; + } + pub fn capture_data_changes_mode(&self) -> &CaptureDataChangesMode { &self.capture_data_changes_mode } @@ -1029,6 +1038,11 @@ impl ProgramBuilder { self.resolve_labels(); self.parameters.list.dedup(); + + if !self.table_references.is_empty() && matches!(self.txn_mode, TransactionMode::Write) { + self.needs_stmt_subtransactions = true; + } + Program { max_registers: self.next_free_register, insns: self.insns, @@ -1042,6 +1056,7 @@ impl ProgramBuilder { table_references: self.table_references, sql: sql.to_string(), accesses_db: !matches!(self.txn_mode, TransactionMode::None), + needs_stmt_subtransactions: self.needs_stmt_subtransactions, } } } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index a4c3fb37a..d90765c62 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2471,7 +2471,7 @@ pub fn op_transaction_inner( } } - if mv_store.is_none() { + if program.needs_stmt_subtransactions && mv_store.is_none() { let write = matches!(tx_mode, TransactionMode::Write); state.begin_statement(&program.connection, &pager, write)?; } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 09870ba8d..39109fd1f 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -595,6 +595,10 @@ pub struct Program { /// Used to determine whether we need to check for schema changes when /// starting a transaction. pub accesses_db: bool, + /// In SQLite, whether statement subtransactions will be used for executing a program (`usesStmtJournal`) + /// is determined by the parser flags "mayAbort" and "isMultiWrite". Essentially this means that the individual + /// statement may need to be aborted due to a constraint conflict, etc. instead of the entire transaction. + pub needs_stmt_subtransactions: bool, } impl Program { From 2b73260dd97d7463ea702d58f67b6c45ef4fbff2 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Wed, 22 Oct 2025 09:45:52 +0300 Subject: [PATCH 407/428] Handle cases where DB grows or shrinks due to savepoint rollback --- core/storage/pager.rs | 41 ++++-- core/vdbe/execute.rs | 9 +- core/vdbe/mod.rs | 20 +-- .../query_processing/test_write_path.rs | 128 +++++++++++++++++- 4 files changed, 172 insertions(+), 26 deletions(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index d2a813525..a6c3891dc 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -473,14 +473,19 @@ pub struct Savepoint { write_offset: AtomicU64, /// Bitmap of page numbers that are dirty in the savepoint. page_bitmap: RwLock, + /// Database size at the start of the savepoint. + /// If the database grows during the savepoint and a rollback to the savepoint is performed, + /// the pages exceeding the database size at the start of the savepoint will be ignored. + db_size: AtomicU32, } impl Savepoint { - pub fn new(subjournal_offset: u64) -> Self { + pub fn new(subjournal_offset: u64, db_size: u32) -> Self { Self { start_offset: AtomicU64::new(subjournal_offset), write_offset: AtomicU64::new(subjournal_offset), page_bitmap: RwLock::new(RoaringBitmap::new()), + db_size: AtomicU32::new(db_size), } } @@ -660,9 +665,9 @@ impl Pager { }) } - pub fn begin_statement(&self) -> Result<()> { + pub fn begin_statement(&self, db_size: u32) -> Result<()> { self.open_subjournal()?; - self.open_savepoint()?; + self.open_savepoint(db_size)?; Ok(()) } @@ -756,14 +761,14 @@ impl Pager { Ok(()) } - pub fn open_savepoint(&self) -> Result<()> { + pub fn open_savepoint(&self, db_size: u32) -> Result<()> { self.open_subjournal()?; let subjournal_offset = self.subjournal.read().as_ref().unwrap().size()?; // Currently as we only have anonymous savepoints opened at the start of a statement, // the subjournal offset should always be 0 as we should only have max 1 savepoint // opened at any given time. turso_assert!(subjournal_offset == 0, "subjournal offset should be 0"); - let savepoint = Savepoint::new(subjournal_offset); + let savepoint = Savepoint::new(subjournal_offset, db_size); let mut savepoints = self.savepoints.write(); turso_assert!( savepoints.is_empty(), @@ -824,6 +829,9 @@ impl Pager { let mut current_offset = journal_start_offset; let page_size = self.page_size.load(Ordering::SeqCst) as u64; let journal_end_offset = savepoint.write_offset.load(Ordering::SeqCst); + let db_size = savepoint.db_size.load(Ordering::SeqCst); + + let mut dirty_pages = self.dirty_pages.write(); while current_offset < journal_end_offset { // Read 4 bytes for page id @@ -833,12 +841,27 @@ impl Pager { let page_id = u32::from_be_bytes(page_id_buffer.as_slice()[0..4].try_into().unwrap()); current_offset += 4; - // Check if we've already rolled back this page - if rollback_bitset.contains(page_id) { - // Skip reading the page, just advance offset + // Check if we've already rolled back this page or if the page is beyond the database size at the start of the savepoint + let already_rolled_back = rollback_bitset.contains(page_id); + if already_rolled_back { current_offset += page_size; continue; } + let page_wont_exist_after_rollback = page_id > db_size; + if page_wont_exist_after_rollback { + dirty_pages.remove(&(page_id as usize)); + if let Some(page) = self + .page_cache + .write() + .get(&PageCacheKey::new(page_id as usize))? + { + page.clear_dirty(); + page.try_unpin(); + } + current_offset += page_size; + rollback_bitset.insert(page_id); + continue; + } // Read the page data let page_buffer = Arc::new(self.buffer_pool.allocate(page_size as usize)); @@ -870,6 +893,8 @@ impl Pager { "memory IO should complete immediately" ); + self.page_cache.write().truncate(db_size as usize)?; + Ok(()) } diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index d90765c62..503c9c016 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -2266,6 +2266,7 @@ pub fn op_halt_if_null( pub enum OpTransactionState { Start, CheckSchemaCookie, + BeginStatement, } pub fn op_transaction( @@ -2471,9 +2472,15 @@ pub fn op_transaction_inner( } } + state.op_transaction_state = OpTransactionState::BeginStatement; + } + OpTransactionState::BeginStatement => { if program.needs_stmt_subtransactions && mv_store.is_none() { let write = matches!(tx_mode, TransactionMode::Write); - state.begin_statement(&program.connection, &pager, write)?; + let res = state.begin_statement(&program.connection, &pager, write)?; + if let IOResult::IO(io) = res { + return Ok(InsnFunctionStepResult::IO(io)); + } } state.pc += 1; diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 39109fd1f..68981dd70 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -29,6 +29,7 @@ use crate::{ error::LimboError, function::{AggFunc, FuncCtx}, mvcc::{database::CommitStateMachine, LocalClock}, + return_if_io, state_machine::StateMachine, storage::{pager::PagerCommitResult, sqlite3_ondisk::SmallVec}, translate::{collate::CollationSeq, plan::TableReferences}, @@ -179,18 +180,6 @@ pub enum StepResult { Busy, } -/// If there is I/O, the instruction is restarted. -/// Evaluate a Result>, if IO return Ok(StepResult::IO). -#[macro_export] -macro_rules! return_step_if_io { - ($expr:expr) => { - match $expr? { - IOResult::Ok(v) => v, - IOResult::IO => return Ok(StepResult::IO), - } - }; -} - struct RegexCache { like: HashMap, glob: HashMap, @@ -482,7 +471,7 @@ impl ProgramState { connection: &Connection, pager: &Arc, write: bool, - ) -> Result<()> { + ) -> Result> { // Store the deferred foreign key violations counter at the start of the statement. // This is used to ensure that if an interactive transaction had deferred FK violations and a statement subtransaction rolls back, // the deferred FK violations are not lost. @@ -494,9 +483,10 @@ impl ProgramState { self.fk_immediate_violations_during_stmt .store(0, Ordering::SeqCst); if write { - pager.begin_statement()?; + let db_size = return_if_io!(pager.with_header(|header| header.database_size.get())); + pager.begin_statement(db_size)?; } - Ok(()) + Ok(IOResult::Done(())) } /// End a statement subtransaction. diff --git a/tests/integration/query_processing/test_write_path.rs b/tests/integration/query_processing/test_write_path.rs index 85f666ee4..b9384cdc3 100644 --- a/tests/integration/query_processing/test_write_path.rs +++ b/tests/integration/query_processing/test_write_path.rs @@ -1,10 +1,11 @@ -use crate::common::{self, limbo_exec_rows, maybe_setup_tracing}; +use crate::common::{self, limbo_exec_rows, maybe_setup_tracing, rusqlite_integrity_check}; use crate::common::{compare_string, do_flush, TempDatabase}; use log::debug; use std::io::{Read, Seek, Write}; use std::sync::Arc; use turso_core::{ - CheckpointMode, Connection, Database, LimboError, Row, Statement, StepResult, Value, + CheckpointMode, Connection, Database, DatabaseOpts, LimboError, Row, Statement, StepResult, + Value, }; const WAL_HEADER_SIZE: usize = 32; @@ -508,6 +509,129 @@ fn test_update_regression() -> anyhow::Result<()> { Ok(()) } +#[test] +/// Test that a large insert statement containing a UNIQUE constraint violation +/// is properly rolled back so that the database size is also shrunk to the size +/// before that statement is executed. +fn test_rollback_on_unique_constraint_violation() -> anyhow::Result<()> { + let _ = env_logger::try_init(); + let tmp_db = TempDatabase::new_with_opts( + "big_statement_rollback.db", + DatabaseOpts::new().with_indexes(true), + ); + let conn = tmp_db.connect_limbo(); + + conn.execute("CREATE TABLE t(x UNIQUE)")?; + + conn.execute("BEGIN")?; + conn.execute("INSERT INTO t VALUES (10000)")?; + + // This should fail due to unique constraint violation + let result = conn.execute("INSERT INTO t SELECT value FROM generate_series(1,10000)"); + assert!(result.is_err(), "Expected unique constraint violation"); + + conn.execute("COMMIT")?; + + // Should have exactly 1 row (the first insert) + common::run_query_on_row(&tmp_db, &conn, "SELECT count(*) FROM t", |row| { + let count = row.get::(0).unwrap(); + assert_eq!(count, 1, "Expected 1 row after rollback"); + })?; + + // Check page count + common::run_query_on_row(&tmp_db, &conn, "PRAGMA page_count", |row| { + let page_count = row.get::(0).unwrap(); + assert_eq!(page_count, 3, "Expected 3 pages"); + })?; + + // Checkpoint the WAL + conn.execute("PRAGMA wal_checkpoint(TRUNCATE)")?; + + // Integrity check with rusqlite + rusqlite_integrity_check(tmp_db.path.as_path())?; + + // Size on disk should be 3 * 4096 + let db_size = std::fs::metadata(&tmp_db.path).unwrap().len(); + assert_eq!(db_size, 3 * 4096); + + Ok(()) +} + +#[test] +/// Test that a large delete statement containing a foreign key constraint violation +/// is properly rolled back. +fn test_rollback_on_foreign_key_constraint_violation() -> anyhow::Result<()> { + let _ = env_logger::try_init(); + let tmp_db = TempDatabase::new_with_opts( + "big_delete_rollback.db", + DatabaseOpts::new().with_indexes(true), + ); + let conn = tmp_db.connect_limbo(); + + // Enable foreign keys + conn.execute("PRAGMA foreign_keys = ON")?; + + // Create parent and child tables + conn.execute("CREATE TABLE parent(id INTEGER PRIMARY KEY)")?; + conn.execute( + "CREATE TABLE child(id INTEGER PRIMARY KEY, parent_id INTEGER REFERENCES parent(id))", + )?; + + // Insert 10000 parent rows + conn.execute("INSERT INTO parent SELECT value FROM generate_series(1,10000)")?; + + // Insert a child row that references the 10000th parent row + conn.execute("INSERT INTO child VALUES (1, 10000)")?; + + conn.execute("BEGIN")?; + + // Delete first parent row (should succeed) + conn.execute("DELETE FROM parent WHERE id = 1")?; + + // This should fail due to foreign key constraint violation (trying to delete parent row 10000 which has a child) + let result = conn.execute("DELETE FROM parent WHERE id >= 2"); + assert!(result.is_err(), "Expected foreign key constraint violation"); + + conn.execute("COMMIT")?; + + // Should have 9999 parent rows (10000 - 1 that was successfully deleted) + common::run_query_on_row(&tmp_db, &conn, "SELECT count(*) FROM parent", |row| { + let count = row.get::(0).unwrap(); + assert_eq!(count, 9999, "Expected 9999 parent rows after rollback"); + })?; + + // Verify rows 2-10000 are intact + common::run_query_on_row( + &tmp_db, + &conn, + "SELECT min(id), max(id) FROM parent", + |row| { + let min_id = row.get::(0).unwrap(); + let max_id = row.get::(1).unwrap(); + assert_eq!(min_id, 2, "Expected min id to be 2"); + assert_eq!(max_id, 10000, "Expected max id to be 10000"); + }, + )?; + + // Child row should still exist + common::run_query_on_row(&tmp_db, &conn, "SELECT count(*) FROM child", |row| { + let count = row.get::(0).unwrap(); + assert_eq!(count, 1, "Expected 1 child row"); + })?; + + // Checkpoint the WAL + conn.execute("PRAGMA wal_checkpoint(TRUNCATE)")?; + + // Integrity check with rusqlite + rusqlite_integrity_check(tmp_db.path.as_path())?; + + // Size on disk should be 21 * 4096 + let db_size = std::fs::metadata(&tmp_db.path).unwrap().len(); + assert_eq!(db_size, 21 * 4096); + + Ok(()) +} + #[test] fn test_multiple_statements() -> anyhow::Result<()> { let _ = env_logger::try_init(); From c2b84f74845d966f2b10fa1ee730cefe183e51f5 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Wed, 22 Oct 2025 10:51:09 +0300 Subject: [PATCH 408/428] Randomly inject txn control statements into index_mutation_upsert_fuzz --- tests/fuzz/mod.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/fuzz/mod.rs b/tests/fuzz/mod.rs index 40eb360f4..122e58dce 100644 --- a/tests/fuzz/mod.rs +++ b/tests/fuzz/mod.rs @@ -2210,6 +2210,22 @@ mod fuzz_tests { } for _ in 0..INNER_ITERS { + // Randomly inject transaction statements -- we don't care if they are legal, + // we just care that tursodb/sqlite behave the same way. + if rng.random_bool(0.15) { + let tx_stmt = match rng.random_range(0..4) { + 0 => "BEGIN", + 1 => "BEGIN IMMEDIATE", + 2 => "COMMIT", + 3 => "ROLLBACK", + _ => unreachable!(), + }; + println!("{tx_stmt};"); + let sqlite_res = sqlite.execute(tx_stmt, rusqlite::params![]); + let limbo_res = limbo_exec_rows_fallible(&limbo_db, &limbo_conn, tx_stmt); + // Both should succeed or both should fail + assert!(sqlite_res.is_ok() == limbo_res.is_ok()); + } let action = rng.random_range(0..4); // 0: INSERT, 1: UPDATE, 2: DELETE, 3: UPSERT (catch-all) let stmt = match action { // INSERT From 92751e621b0f44676683149272216bae8834d8e2 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 6 Oct 2025 20:54:38 -0500 Subject: [PATCH 409/428] Add DISTINCT support to aggregate operator Implements COUNT/SUM/AVG(DISTINCT) and SELECT DISTINCT for materialized views. To do this we have to keep a list of the actual distinct values (similarly to how we do for min/max). We then update the operator (and issue deltas) only when there is a state transition (for example, if we already count the value x = 1, and we see an insert for x = 1, we do nothing). SELECT DISTINCT (with no aggregator) is similar. We already have to keep a list of the values anyway to power the aggregates. So we just issue new deltas based on the transition, without updating the aggregator. --- core/incremental/aggregate_operator.rs | 1367 ++++++++++++++++++++++-- core/incremental/compiler.rs | 84 +- core/incremental/operator.rs | 514 +++++++++ core/incremental/persistence.rs | 19 +- testing/materialized_views.test | 774 ++++++++++++++ 5 files changed, 2676 insertions(+), 82 deletions(-) diff --git a/core/incremental/aggregate_operator.rs b/core/incremental/aggregate_operator.rs index e0ed53d9f..a24c4437c 100644 --- a/core/incremental/aggregate_operator.rs +++ b/core/incremental/aggregate_operator.rs @@ -14,9 +14,71 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fmt::{self, Display}; use std::sync::{Arc, Mutex}; +// Architecture of the Aggregate Operator +// ======================================== +// +// This operator implements SQL aggregations (GROUP BY, DISTINCT, COUNT, SUM, AVG, MIN, MAX) +// using DBSP-style incremental computation. The key insight is that all these operations +// can be expressed as operations on weighted sets (Z-sets) stored in persistent BTrees. +// +// ## Storage Strategy +// +// We use three different storage encodings (identified by 2-bit type codes in storage IDs): +// - **Regular aggregates** (COUNT/SUM/AVG): Store accumulated state as a blob +// - **MIN/MAX aggregates**: Store individual values; BTree ordering gives us min/max efficiently +// - **DISTINCT tracking**: Store distinct values with weights (positive = present, zero = deleted) +// +// ## MIN/MAX Handling +// +// MIN/MAX are special because they're not fully incrementalizable: +// - **Inserts**: Can be computed incrementally (new_min = min(old_min, new_value)) +// - **Deletes**: Must recompute from the BTree when the current min/max is deleted +// +// Our approach: +// 1. Store each value with its weight in a BTree (leveraging natural ordering) +// 2. On insert: Simply compare with current min/max (incremental) +// 3. On delete of current min/max: Scan the BTree to find the next min/max +// - For MIN: scan forward from the beginning to find first value with positive weight +// - For MAX: scan backward from the end to find last value with positive weight +// +// ## DISTINCT Handling +// +// DISTINCT operations (COUNT(DISTINCT), SUM(DISTINCT), etc.) are implemented using the +// weighted set pattern: +// - Each distinct value is stored with a weight (occurrence count) +// - Weight > 0 means the value exists in the current dataset +// - Weight = 0 means the value has been deleted (we may clean these up) +// - We track transitions: when a value's weight crosses zero (appears/disappears) +// +// ## Plain DISTINCT (SELECT DISTINCT) +// +// A clever reuse of infrastructure: SELECT DISTINCT x, y, z is compiled to: +// - GROUP BY x, y, z (making each unique row combination a group) +// - Empty aggregates vector (no actual aggregations to compute) +// - The groups themselves become the distinct rows +// +// This allows us to reuse all the incremental machinery for DISTINCT without special casing. +// The `is_distinct_only` flag indicates this pattern, where the groups ARE the output rows. +// +// ## State Machines +// +// The operator uses async-ready state machines to handle I/O operations: +// - **Eval state machine**: Fetches existing state, applies deltas, recomputes MIN/MAX +// - **Commit state machine**: Persists updated state back to storage +// - Each state represents a resumption point for when I/O operations yield + /// Constants for aggregate type encoding in storage IDs (2 bits) pub const AGG_TYPE_REGULAR: u8 = 0b00; // COUNT/SUM/AVG pub const AGG_TYPE_MINMAX: u8 = 0b01; // MIN/MAX (BTree ordering gives both) +pub const AGG_TYPE_DISTINCT: u8 = 0b10; // DISTINCT values tracking + +/// Hash a Value to generate an element_id for DISTINCT storage +/// Uses HashableRow with column_idx as rowid for consistent hashing +fn hash_value(value: &Value, column_idx: usize) -> Hash128 { + // Use column_idx as rowid to ensure different columns with same value get different hashes + let row = HashableRow::new(column_idx as i64, vec![value.clone()]); + row.cached_hash() +} // Serialization type codes for aggregate functions const AGG_FUNC_COUNT: i64 = 0; @@ -24,22 +86,31 @@ const AGG_FUNC_SUM: i64 = 1; const AGG_FUNC_AVG: i64 = 2; const AGG_FUNC_MIN: i64 = 3; const AGG_FUNC_MAX: i64 = 4; +const AGG_FUNC_COUNT_DISTINCT: i64 = 5; +const AGG_FUNC_SUM_DISTINCT: i64 = 6; +const AGG_FUNC_AVG_DISTINCT: i64 = 7; #[derive(Debug, Clone, PartialEq)] pub enum AggregateFunction { Count, - Sum(usize), // Column index - Avg(usize), // Column index - Min(usize), // Column index - Max(usize), // Column index + CountDistinct(usize), // COUNT(DISTINCT column_index) + Sum(usize), // Column index + SumDistinct(usize), // SUM(DISTINCT column_index) + Avg(usize), // Column index + AvgDistinct(usize), // AVG(DISTINCT column_index) + Min(usize), // Column index + Max(usize), // Column index } impl Display for AggregateFunction { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { AggregateFunction::Count => write!(f, "COUNT(*)"), + AggregateFunction::CountDistinct(idx) => write!(f, "COUNT(DISTINCT col{idx})"), AggregateFunction::Sum(idx) => write!(f, "SUM(col{idx})"), + AggregateFunction::SumDistinct(idx) => write!(f, "SUM(DISTINCT col{idx})"), AggregateFunction::Avg(idx) => write!(f, "AVG(col{idx})"), + AggregateFunction::AvgDistinct(idx) => write!(f, "AVG(DISTINCT col{idx})"), AggregateFunction::Min(idx) => write!(f, "MIN(col{idx})"), AggregateFunction::Max(idx) => write!(f, "MAX(col{idx})"), } @@ -58,12 +129,30 @@ impl AggregateFunction { pub fn to_values(&self) -> Vec { match self { AggregateFunction::Count => vec![Value::Integer(AGG_FUNC_COUNT)], + AggregateFunction::CountDistinct(idx) => { + vec![ + Value::Integer(AGG_FUNC_COUNT_DISTINCT), + Value::Integer(*idx as i64), + ] + } AggregateFunction::Sum(idx) => { vec![Value::Integer(AGG_FUNC_SUM), Value::Integer(*idx as i64)] } + AggregateFunction::SumDistinct(idx) => { + vec![ + Value::Integer(AGG_FUNC_SUM_DISTINCT), + Value::Integer(*idx as i64), + ] + } AggregateFunction::Avg(idx) => { vec![Value::Integer(AGG_FUNC_AVG), Value::Integer(*idx as i64)] } + AggregateFunction::AvgDistinct(idx) => { + vec![ + Value::Integer(AGG_FUNC_AVG_DISTINCT), + Value::Integer(*idx as i64), + ] + } AggregateFunction::Min(idx) => { vec![Value::Integer(AGG_FUNC_MIN), Value::Integer(*idx as i64)] } @@ -85,6 +174,20 @@ impl AggregateFunction { *cursor += 1; AggregateFunction::Count } + Value::Integer(AGG_FUNC_COUNT_DISTINCT) => { + *cursor += 1; + let idx = values.get(*cursor).ok_or_else(|| { + LimboError::InternalError("Missing COUNT(DISTINCT) column index".into()) + })?; + if let Value::Integer(idx) = idx { + *cursor += 1; + AggregateFunction::CountDistinct(*idx as usize) + } else { + return Err(LimboError::InternalError(format!( + "Expected Integer for COUNT(DISTINCT) column index, got {idx:?}" + ))); + } + } Value::Integer(AGG_FUNC_SUM) => { *cursor += 1; let idx = values @@ -99,6 +202,20 @@ impl AggregateFunction { ))); } } + Value::Integer(AGG_FUNC_SUM_DISTINCT) => { + *cursor += 1; + let idx = values.get(*cursor).ok_or_else(|| { + LimboError::InternalError("Missing SUM(DISTINCT) column index".into()) + })?; + if let Value::Integer(idx) = idx { + *cursor += 1; + AggregateFunction::SumDistinct(*idx as usize) + } else { + return Err(LimboError::InternalError(format!( + "Expected Integer for SUM(DISTINCT) column index, got {idx:?}" + ))); + } + } Value::Integer(AGG_FUNC_AVG) => { *cursor += 1; let idx = values @@ -113,6 +230,20 @@ impl AggregateFunction { ))); } } + Value::Integer(AGG_FUNC_AVG_DISTINCT) => { + *cursor += 1; + let idx = values.get(*cursor).ok_or_else(|| { + LimboError::InternalError("Missing AVG(DISTINCT) column index".into()) + })?; + if let Value::Integer(idx) = idx { + *cursor += 1; + AggregateFunction::AvgDistinct(*idx as usize) + } else { + return Err(LimboError::InternalError(format!( + "Expected Integer for AVG(DISTINCT) column index, got {idx:?}" + ))); + } + } Value::Integer(AGG_FUNC_MIN) => { *cursor += 1; let idx = values @@ -189,6 +320,27 @@ type ComputedStates = HashMap, AggregateState)>; // group_key_str -> (column_index, value_as_hashable_row) -> accumulated_weight pub type MinMaxDeltas = HashMap>; +/// Type for tracking distinct values within a batch +/// Maps: group_key_str -> (column_idx, HashableRow) -> accumulated_weight +/// HashableRow contains the value with column_idx as rowid for proper hashing +type DistinctDeltas = HashMap>; + +/// Return type for merge_delta_with_existing function +type MergeResult = (Delta, HashMap, AggregateState)>); + +/// Information about distinct value transitions for a single column +#[derive(Debug, Clone)] +pub struct DistinctTransition { + pub transition_type: TransitionType, + pub transitioned_value: Value, // The value that was added/removed +} + +#[derive(Debug, Clone, PartialEq)] +pub enum TransitionType { + Added, // Value added to distinct set + Removed, // Value removed from distinct set +} + #[derive(Debug)] enum AggregateCommitState { Idle, @@ -198,13 +350,21 @@ enum AggregateCommitState { PersistDelta { delta: Delta, computed_states: ComputedStates, + old_states: HashMap, // Track old counts for plain DISTINCT current_idx: usize, write_row: WriteRow, min_max_deltas: MinMaxDeltas, + distinct_deltas: DistinctDeltas, + input_delta: Delta, // Keep original input delta for distinct processing }, PersistMinMax { delta: Delta, min_max_persist_state: MinMaxPersistState, + distinct_deltas: DistinctDeltas, + }, + PersistDistinctValues { + delta: Delta, + distinct_persist_state: DistinctPersistState, }, Done { delta: Delta, @@ -221,8 +381,9 @@ pub enum AggregateEvalState { groups_to_read: Vec<(String, Vec)>, // Changed to Vec for index-based access existing_groups: HashMap, old_values: HashMap>, + pre_existing_groups: HashSet, // Track groups that existed before this delta }, - FetchData { + FetchAggregateState { delta: Delta, // Keep original delta for merge operation current_idx: usize, groups_to_read: Vec<(String, Vec)>, // Changed to Vec for index-based access @@ -230,12 +391,23 @@ pub enum AggregateEvalState { old_values: HashMap>, rowid: Option, // Rowid found by FetchKey (None if not found) read_record_state: Box, + pre_existing_groups: HashSet, // Track groups that existed before this delta + }, + FetchDistinctValues { + delta: Delta, // Keep original delta for merge operation + current_idx: usize, + groups_to_read: Vec<(String, Vec)>, // Changed to Vec for index-based access + existing_groups: HashMap, + old_values: HashMap>, + fetch_distinct_state: Box, + pre_existing_groups: HashSet, // Track groups that existed before this delta }, RecomputeMinMax { delta: Delta, existing_groups: HashMap, old_values: HashMap>, recompute_state: Box, + pre_existing_groups: HashSet, // Track groups that existed before this delta }, Done { output: (Delta, ComputedStates), @@ -257,10 +429,15 @@ pub struct AggregateOperator { pub input_column_names: Vec, // Map from column index to aggregate info for quick lookup pub column_min_max: HashMap, + // Set of column indices that have distinct aggregates + pub distinct_columns: HashSet, tracker: Option>>, // State machine for commit operation commit_state: AggregateCommitState, + + // SELECT DISTINCT x,y,z.... with no aggregations. + is_distinct_only: bool, } /// State for a single group's aggregates @@ -276,9 +453,34 @@ pub struct AggregateState { pub mins: HashMap, // For MAX: column_index -> maximum value pub maxs: HashMap, + // For DISTINCT aggregates: column_index -> computed value + // These are populated during eval when we scan the BTree (or in-memory map) + pub distinct_counts: HashMap, + pub distinct_sums: HashMap, + + // Weights of specific distinct values needed for current delta processing + // (column_index, value) -> weight + // Populated during FetchKey for values mentioned in the delta + pub(crate) distinct_value_weights: HashMap<(usize, HashableRow), i64>, } impl AggregateEvalState { + /// Process a delta through the aggregate state machine. + /// + /// Control flow is strictly linear for maintainability: + /// 1. FetchKey → FetchAggregateState (always) + /// 2. FetchAggregateState → FetchKey (always, loops until all groups processed) + /// 3. FetchKey (when done) → FetchDistinctValues (always) + /// 4. FetchDistinctValues → RecomputeMinMax (always) + /// 5. RecomputeMinMax → Done (always) + /// + /// Some states may be no-ops depending on the operator configuration: + /// - FetchAggregateState: For plain DISTINCT, skips reading aggregate blob (no aggregates to fetch) + /// - FetchDistinctValues: No-op if no distinct columns exist (distinct_columns is empty) + /// - RecomputeMinMax: No-op if no MIN/MAX aggregates exist (has_min_max() returns false) + /// + /// This deterministic flow ensures each state always transitions to the same next state, + /// making the state machine easier to understand and debug. fn process_delta( &mut self, operator: &mut AggregateOperator, @@ -292,40 +494,47 @@ impl AggregateEvalState { groups_to_read, existing_groups, old_values, + pre_existing_groups, } => { if *current_idx >= groups_to_read.len() { - // All groups have been fetched, move to RecomputeMinMax - // Extract MIN/MAX deltas from the input delta - let min_max_deltas = operator.extract_min_max_deltas(delta); - - let recompute_state = Box::new(RecomputeMinMax::new( - min_max_deltas, + // All groups have been fetched, move to FetchDistinctValues + // Create FetchDistinctState based on the delta and existing groups + let fetch_distinct_state = FetchDistinctState::new( + delta, + &operator.distinct_columns, + |values| operator.extract_group_key(values), + AggregateOperator::group_key_to_string, existing_groups, - operator, - )); + operator.is_distinct_only, + ); - *self = AggregateEvalState::RecomputeMinMax { + *self = AggregateEvalState::FetchDistinctValues { delta: std::mem::take(delta), + current_idx: 0, + groups_to_read: std::mem::take(groups_to_read), existing_groups: std::mem::take(existing_groups), old_values: std::mem::take(old_values), - recompute_state, + fetch_distinct_state: Box::new(fetch_distinct_state), + pre_existing_groups: std::mem::take(pre_existing_groups), }; } else { // Get the current group to read let (group_key_str, _group_key) = &groups_to_read[*current_idx]; - // Build the key for the index: (operator_id, zset_hash, element_id) - // For regular aggregates, use column_index=0 and AGG_TYPE_REGULAR + // For plain DISTINCT, we still need to transition to FetchAggregateState + // to add the group to existing_groups, but we won't read any aggregate blob + + // Build the key for regular aggregate state: (operator_id, zset_hash, element_id=0) let operator_storage_id = generate_storage_id(operator.operator_id, 0, AGG_TYPE_REGULAR); let zset_hash = operator.generate_group_hash(group_key_str); - let element_id = 0i64; // Always 0 for aggregators + let element_id = Hash128::new(0, 0); // Always zeros for aggregate state // Create index key values let index_key_values = vec![ Value::Integer(operator_storage_id), zset_hash.to_value(), - Value::Integer(element_id), + element_id.to_value(), ]; // Create an immutable record for the index key @@ -347,10 +556,10 @@ impl AggregateEvalState { None }; - // Always transition to FetchData + // Always transition to FetchAggregateState let taken_existing = std::mem::take(existing_groups); let taken_old_values = std::mem::take(old_values); - let next_state = AggregateEvalState::FetchData { + let next_state = AggregateEvalState::FetchAggregateState { delta: std::mem::take(delta), current_idx: *current_idx, groups_to_read: std::mem::take(groups_to_read), @@ -358,11 +567,12 @@ impl AggregateEvalState { old_values: taken_old_values, rowid, read_record_state: Box::new(ReadRecord::new()), + pre_existing_groups: std::mem::take(pre_existing_groups), // Pass through existing }; *self = next_state; } } - AggregateEvalState::FetchData { + AggregateEvalState::FetchAggregateState { delta, current_idx, groups_to_read, @@ -370,13 +580,20 @@ impl AggregateEvalState { old_values, rowid, read_record_state, + pre_existing_groups, } => { // Get the current group to read let (group_key_str, group_key) = &groups_to_read[*current_idx]; - // Only try to read if we have a rowid - if let Some(rowid) = rowid { + // For plain DISTINCT, skip aggregate state fetch entirely + // The distinct values are handled separately in FetchDistinctValues + if operator.is_distinct_only { + // Always insert the group key so FetchDistinctState will process it + // The count will be set properly when we fetch distinct values + existing_groups.insert(group_key_str.clone(), AggregateState::default()); + } else if let Some(rowid) = rowid { let key = SeekKey::TableRowId(*rowid); + // Regular aggregates - read the blob let state = return_if_io!( read_record_state.read_record(key, &mut cursors.table_cursor) ); @@ -385,23 +602,75 @@ impl AggregateEvalState { let mut old_row = group_key.clone(); old_row.extend(state.to_values(&operator.aggregates)); old_values.insert(group_key_str.clone(), old_row); - existing_groups.insert(group_key_str.clone(), state.clone()); + existing_groups.insert(group_key_str.clone(), state); + // Track that this group exists in storage + pre_existing_groups.insert(group_key_str.clone()); } - } else { - // No rowid for this group, skipping read } // If no rowid, there's no existing state for this group - // Move to next group + // Always move to next group via FetchKey let next_idx = *current_idx + 1; + let taken_existing = std::mem::take(existing_groups); let taken_old_values = std::mem::take(old_values); + let taken_pre_existing_groups = std::mem::take(pre_existing_groups); let next_state = AggregateEvalState::FetchKey { delta: std::mem::take(delta), current_idx: next_idx, groups_to_read: std::mem::take(groups_to_read), existing_groups: taken_existing, old_values: taken_old_values, + pre_existing_groups: taken_pre_existing_groups, + }; + *self = next_state; + } + AggregateEvalState::FetchDistinctValues { + delta, + current_idx: _, + groups_to_read: _, + existing_groups, + old_values, + fetch_distinct_state, + pre_existing_groups, + } => { + // Use FetchDistinctState to read distinct values from BTree storage + return_if_io!(fetch_distinct_state.fetch_distinct_values( + operator.operator_id, + existing_groups, + cursors, + |group_key| operator.generate_group_hash(group_key), + operator.is_distinct_only + )); + + // For plain DISTINCT, mark groups as "from storage" if they have distinct values + if operator.is_distinct_only { + for (group_key_str, state) in existing_groups.iter() { + // Check if this group has any distinct values with positive weight + let has_values = state.distinct_value_weights.values().any(|&w| w > 0); + if has_values { + pre_existing_groups.insert(group_key_str.clone()); + } + } + } + + // Extract MIN/MAX deltas for recomputation + let min_max_deltas = operator.extract_min_max_deltas(delta); + + // Create RecomputeMinMax before moving existing_groups + let recompute_state = Box::new(RecomputeMinMax::new( + min_max_deltas, + existing_groups, + operator, + )); + + // Transition to RecomputeMinMax + let next_state = AggregateEvalState::RecomputeMinMax { + delta: std::mem::take(delta), + existing_groups: std::mem::take(existing_groups), + old_values: std::mem::take(old_values), + recompute_state, + pre_existing_groups: std::mem::take(pre_existing_groups), }; *self = next_state; } @@ -410,6 +679,7 @@ impl AggregateEvalState { existing_groups, old_values, recompute_state, + pre_existing_groups, } => { if operator.has_min_max() { // Process MIN/MAX recomputation - this will update existing_groups with correct MIN/MAX @@ -417,15 +687,20 @@ impl AggregateEvalState { } // Now compute final output with updated MIN/MAX values - let (output_delta, computed_states) = - operator.merge_delta_with_existing(delta, existing_groups, old_values); + let (output_delta, computed_states) = operator.merge_delta_with_existing( + delta, + existing_groups, + old_values, + pre_existing_groups, + ); *self = AggregateEvalState::Done { output: (output_delta, computed_states), }; } AggregateEvalState::Done { output } => { - return Ok(IOResult::Done(output.clone())); + let (delta, computed_states) = output.clone(); + return Ok(IOResult::Done((delta, computed_states))); } } } @@ -459,15 +734,34 @@ impl AggregateState { AggregateFunction::Count => { // Count state is already stored at the beginning } + AggregateFunction::CountDistinct(col_idx) => { + // Store the distinct count for this column + let count = self.distinct_counts.get(col_idx).copied().unwrap_or(0); + values.push(Value::Integer(count)); + } AggregateFunction::Sum(col_idx) => { let sum = self.sums.get(col_idx).copied().unwrap_or(0.0); values.push(Value::Float(sum)); } + AggregateFunction::SumDistinct(col_idx) => { + // Store both the distinct count and sum for this column + let count = self.distinct_counts.get(col_idx).copied().unwrap_or(0); + let sum = self.distinct_sums.get(col_idx).copied().unwrap_or(0.0); + values.push(Value::Integer(count)); + values.push(Value::Float(sum)); + } AggregateFunction::Avg(col_idx) => { let (sum, count) = self.avgs.get(col_idx).copied().unwrap_or((0.0, 0)); values.push(Value::Float(sum)); values.push(Value::Integer(count)); } + AggregateFunction::AvgDistinct(col_idx) => { + // Store both the distinct count and sum for this column + let count = self.distinct_counts.get(col_idx).copied().unwrap_or(0); + let sum = self.distinct_sums.get(col_idx).copied().unwrap_or(0.0); + values.push(Value::Integer(count)); + values.push(Value::Float(sum)); + } AggregateFunction::Min(col_idx) => { if let Some(min_val) = self.mins.get(col_idx) { values.push(Value::Integer(1)); // Has value @@ -532,6 +826,69 @@ impl AggregateState { AggregateFunction::Count => { // Count state is already stored at the beginning } + AggregateFunction::CountDistinct(col_idx) => { + let count = values.get(cursor).ok_or_else(|| { + LimboError::InternalError("Missing COUNT(DISTINCT) value".into()) + })?; + if let Value::Integer(count) = count { + state.distinct_counts.insert(col_idx, *count); + cursor += 1; + } else { + return Err(LimboError::InternalError(format!( + "Expected Integer for COUNT(DISTINCT) value, got {count:?}" + ))); + } + } + AggregateFunction::SumDistinct(col_idx) => { + let count = values.get(cursor).ok_or_else(|| { + LimboError::InternalError("Missing SUM(DISTINCT) count".into()) + })?; + if let Value::Integer(count) = count { + state.distinct_counts.insert(col_idx, *count); + cursor += 1; + } else { + return Err(LimboError::InternalError(format!( + "Expected Integer for SUM(DISTINCT) count, got {count:?}" + ))); + } + + let sum = values.get(cursor).ok_or_else(|| { + LimboError::InternalError("Missing SUM(DISTINCT) sum".into()) + })?; + if let Value::Float(sum) = sum { + state.distinct_sums.insert(col_idx, *sum); + cursor += 1; + } else { + return Err(LimboError::InternalError(format!( + "Expected Float for SUM(DISTINCT) sum, got {sum:?}" + ))); + } + } + AggregateFunction::AvgDistinct(col_idx) => { + let count = values.get(cursor).ok_or_else(|| { + LimboError::InternalError("Missing AVG(DISTINCT) count".into()) + })?; + if let Value::Integer(count) = count { + state.distinct_counts.insert(col_idx, *count); + cursor += 1; + } else { + return Err(LimboError::InternalError(format!( + "Expected Integer for AVG(DISTINCT) count, got {count:?}" + ))); + } + + let sum = values.get(cursor).ok_or_else(|| { + LimboError::InternalError("Missing AVG(DISTINCT) sum".into()) + })?; + if let Value::Float(sum) = sum { + state.distinct_sums.insert(col_idx, *sum); + cursor += 1; + } else { + return Err(LimboError::InternalError(format!( + "Expected Float for AVG(DISTINCT) sum, got {sum:?}" + ))); + } + } AggregateFunction::Sum(col_idx) => { let sum = values .get(cursor) @@ -689,16 +1046,72 @@ impl AggregateState { weight: isize, aggregates: &[AggregateFunction], _column_names: &[String], // No longer needed + distinct_transitions: &HashMap, ) { // Update COUNT self.count += weight as i64; - // Update other aggregates + // Track which columns have had their distinct counts/sums updated + // This prevents double-counting when multiple distinct aggregates + // operate on the same column (e.g., COUNT(DISTINCT col), SUM(DISTINCT col), AVG(DISTINCT col)) + let mut processed_counts: HashSet = HashSet::new(); + let mut processed_sums: HashSet = HashSet::new(); + + // Update distinct aggregate state for agg in aggregates { match agg { AggregateFunction::Count => { // Already handled above } + AggregateFunction::CountDistinct(col_idx) => { + // Only update count if we haven't processed this column yet + if !processed_counts.contains(col_idx) { + if let Some(transition) = distinct_transitions.get(col_idx) { + let current_count = + self.distinct_counts.get(col_idx).copied().unwrap_or(0); + let new_count = match transition.transition_type { + TransitionType::Added => current_count + 1, + TransitionType::Removed => current_count - 1, + }; + self.distinct_counts.insert(*col_idx, new_count); + processed_counts.insert(*col_idx); + } + } + } + AggregateFunction::SumDistinct(col_idx) + | AggregateFunction::AvgDistinct(col_idx) => { + if let Some(transition) = distinct_transitions.get(col_idx) { + // Update count if not already processed (needed for AVG) + if !processed_counts.contains(col_idx) { + let current_count = + self.distinct_counts.get(col_idx).copied().unwrap_or(0); + let new_count = match transition.transition_type { + TransitionType::Added => current_count + 1, + TransitionType::Removed => current_count - 1, + }; + self.distinct_counts.insert(*col_idx, new_count); + processed_counts.insert(*col_idx); + } + + // Update sum if not already processed + if !processed_sums.contains(col_idx) { + let current_sum = + self.distinct_sums.get(col_idx).copied().unwrap_or(0.0); + let value_as_float = match &transition.transitioned_value { + Value::Integer(i) => *i as f64, + Value::Float(f) => *f, + _ => 0.0, + }; + + let new_sum = match transition.transition_type { + TransitionType::Added => current_sum + value_as_float, + TransitionType::Removed => current_sum - value_as_float, + }; + self.distinct_sums.insert(*col_idx, new_sum); + processed_sums.insert(*col_idx); + } + } + } AggregateFunction::Sum(col_idx) => { if let Some(val) = values.get(*col_idx) { let num_val = match val { @@ -760,6 +1173,11 @@ impl AggregateState { AggregateFunction::Count => { result.push(Value::Integer(self.count)); } + AggregateFunction::CountDistinct(col_idx) => { + // Return the computed DISTINCT count + let count = self.distinct_counts.get(col_idx).copied().unwrap_or(0); + result.push(Value::Integer(count)); + } AggregateFunction::Sum(col_idx) => { let sum = self.sums.get(col_idx).copied().unwrap_or(0.0); // Return as integer if it's a whole number, otherwise as float @@ -769,6 +1187,15 @@ impl AggregateState { result.push(Value::Float(sum)); } } + AggregateFunction::SumDistinct(col_idx) => { + // Return the computed SUM(DISTINCT) + let sum = self.distinct_sums.get(col_idx).copied().unwrap_or(0.0); + if sum.fract() == 0.0 { + result.push(Value::Integer(sum as i64)); + } else { + result.push(Value::Float(sum)); + } + } AggregateFunction::Avg(col_idx) => { if let Some((sum, count)) = self.avgs.get(col_idx) { if *count > 0 { @@ -780,6 +1207,18 @@ impl AggregateState { result.push(Value::Null); } } + AggregateFunction::AvgDistinct(col_idx) => { + // Compute AVG from SUM(DISTINCT) / COUNT(DISTINCT) + let count = self.distinct_counts.get(col_idx).copied().unwrap_or(0); + if count > 0 { + let sum = self.distinct_sums.get(col_idx).copied().unwrap_or(0.0); + let avg = sum / count as f64; + // AVG always returns a float value for consistency with SQLite + result.push(Value::Float(avg)); + } else { + result.push(Value::Null); + } + } AggregateFunction::Min(col_idx) => { // Return the MIN value from our state result.push(self.mins.get(col_idx).cloned().unwrap_or(Value::Null)); @@ -796,12 +1235,109 @@ impl AggregateState { } impl AggregateOperator { + /// Detect if a distinct value crosses the zero boundary (using pre-fetched weights and batch-accumulated weights) + fn detect_distinct_value_transition( + col_idx: usize, + val: &Value, + weight: isize, + existing_state: &AggregateState, + group_distinct_deltas: Option<&HashMap<(usize, HashableRow), isize>>, + ) -> Option { + let hashable_row = HashableRow::new(col_idx as i64, vec![val.clone()]); + + // Get the weight from storage (pre-fetched in AggregateState) + let storage_count = existing_state + .distinct_value_weights + .get(&(col_idx, hashable_row.clone())) + .copied() + .unwrap_or(0); + + // Get the accumulated weight from the current batch (before this row) + let batch_accumulated = if let Some(deltas) = group_distinct_deltas { + deltas + .get(&(col_idx, hashable_row.clone())) + .copied() + .unwrap_or(0) + } else { + 0 + }; + + // The old count is storage + batch accumulated so far (before this row) + let old_count = storage_count + batch_accumulated as i64; + // The new count includes the current weight + let new_count = old_count + weight as i64; + + // Detect transitions + if old_count <= 0 && new_count > 0 { + // Value added to distinct set + Some(DistinctTransition { + transition_type: TransitionType::Added, + transitioned_value: val.clone(), + }) + } else if old_count > 0 && new_count <= 0 { + // Value removed from distinct set + Some(DistinctTransition { + transition_type: TransitionType::Removed, + transitioned_value: val.clone(), + }) + } else { + // No transition + None + } + } + + /// Detect distinct value transitions for a single row + fn detect_distinct_transitions( + &self, + row_values: &[Value], + weight: isize, + existing_state: &AggregateState, + group_distinct_deltas: Option<&HashMap<(usize, HashableRow), isize>>, + ) -> HashMap { + let mut transitions = HashMap::new(); + + // Plain Distinct doesn't track individual values, so no transitions needed + if self.is_distinct_only { + // Distinct is handled by the count alone in apply_delta + return transitions; + } + + // Process each distinct column + for &col_idx in &self.distinct_columns { + let val = match row_values.get(col_idx) { + Some(v) => v, + None => continue, + }; + + // Skip null values + if val == &Value::Null { + continue; + } + + if let Some(transition) = Self::detect_distinct_value_transition( + col_idx, + val, + weight, + existing_state, + group_distinct_deltas, + ) { + transitions.insert(col_idx, transition); + } + } + + transitions + } + pub fn new( operator_id: i64, group_by: Vec, aggregates: Vec, input_column_names: Vec, ) -> Self { + // Precompute flags for runtime efficiency + // Plain DISTINCT is indicated by empty aggregates vector + let is_distinct_only = aggregates.is_empty(); + // Build map of column indices to their MIN/MAX info let mut column_min_max = HashMap::new(); let mut storage_indices = HashMap::new(); @@ -821,7 +1357,7 @@ impl AggregateOperator { } } - // Second pass: build the column info map + // Second pass: build the column info map for MIN/MAX for agg in &aggregates { match agg { AggregateFunction::Min(col_idx) => { @@ -846,14 +1382,29 @@ impl AggregateOperator { } } + // Build the distinct columns set + let mut distinct_columns = HashSet::new(); + for agg in &aggregates { + match agg { + AggregateFunction::CountDistinct(col_idx) + | AggregateFunction::SumDistinct(col_idx) + | AggregateFunction::AvgDistinct(col_idx) => { + distinct_columns.insert(*col_idx); + } + _ => {} + } + } + Self { operator_id, group_by, aggregates, input_column_names, column_min_max, + distinct_columns, tracker: None, commit_state: AggregateCommitState::Idle, + is_distinct_only, } } @@ -861,6 +1412,11 @@ impl AggregateOperator { !self.column_min_max.is_empty() } + /// Check if this operator has any DISTINCT aggregates or plain DISTINCT + pub fn has_distinct(&self) -> bool { + !self.distinct_columns.is_empty() || self.is_distinct_only + } + fn eval_internal( &mut self, state: &mut EvalState, @@ -896,6 +1452,7 @@ impl AggregateOperator { groups_to_read: groups_to_read.into_iter().collect(), existing_groups: HashMap::new(), old_values: HashMap::new(), + pre_existing_groups: HashSet::new(), // Initialize empty })); } EvalState::Aggregate(_agg_state) => { @@ -924,12 +1481,17 @@ impl AggregateOperator { delta: &Delta, existing_groups: &mut HashMap, old_values: &mut HashMap>, - ) -> (Delta, HashMap, AggregateState)>) { + pre_existing_groups: &HashSet, + ) -> MergeResult { let mut output_delta = Delta::new(); let mut temp_keys: HashMap> = HashMap::new(); + // Track distinct value weights as we process the batch + let mut batch_distinct_weights: HashMap> = + HashMap::new(); + // Process each change in the delta - for (row, weight) in &delta.changes { + for (row, weight) in delta.changes.iter() { if let Some(tracker) = &self.tracker { tracker.lock().unwrap().record_aggregation(); } @@ -938,50 +1500,159 @@ impl AggregateOperator { let group_key = self.extract_group_key(&row.values); let group_key_str = Self::group_key_to_string(&group_key); + // Get or create the state for this group let state = existing_groups.entry(group_key_str.clone()).or_default(); + // Get batch weights for this group + let group_batch_weights = batch_distinct_weights.get(&group_key_str); + + // Detect distinct transitions using the existing state and batch-accumulated weights + let distinct_transitions = if self.has_distinct() { + self.detect_distinct_transitions(&row.values, *weight, state, group_batch_weights) + } else { + HashMap::new() + }; + + // Update batch weights after detecting transitions + if self.has_distinct() { + for &col_idx in &self.distinct_columns { + if let Some(val) = row.values.get(col_idx) { + if val != &Value::Null { + let hashable_row = HashableRow::new(col_idx as i64, vec![val.clone()]); + let group_entry = batch_distinct_weights + .entry(group_key_str.clone()) + .or_default(); + let weight_entry = + group_entry.entry((col_idx, hashable_row)).or_insert(0); + *weight_entry += weight; + } + } + } + } + temp_keys.insert(group_key_str.clone(), group_key.clone()); - // Apply the delta to the temporary state + // Apply the delta to the state with pre-computed transitions state.apply_delta( &row.values, *weight, &self.aggregates, &self.input_column_names, + &distinct_transitions, ); } // Generate output delta from temporary states and collect final states let mut final_states = HashMap::new(); - for (group_key_str, state) in existing_groups { - let group_key = temp_keys.get(group_key_str).cloned().unwrap_or_default(); + for (group_key_str, state) in existing_groups.iter() { + let group_key = if let Some(key) = temp_keys.get(group_key_str) { + key.clone() + } else if let Some(old_row) = old_values.get(group_key_str) { + // Extract group key from old row (first N columns where N = group_by.len()) + old_row[0..self.group_by.len()].to_vec() + } else { + vec![] + }; // Generate synthetic rowid for this group let result_key = self.generate_group_rowid(group_key_str); - if let Some(old_row_values) = old_values.get(group_key_str) { - let old_row = HashableRow::new(result_key, old_row_values.clone()); - output_delta.changes.push((old_row, -1)); - } - // Always store the state for persistence (even if count=0, we need to delete it) final_states.insert(group_key_str.clone(), (group_key.clone(), state.clone())); - // Only include groups with count > 0 in the output delta - if state.count > 0 { - // Build output row: group_by columns + aggregate values - let mut output_values = group_key.clone(); - let aggregate_values = state.to_values(&self.aggregates); - output_values.extend(aggregate_values); + // Check if we only have DISTINCT (no other aggregates) + if self.is_distinct_only { + // For plain DISTINCT, we output each distinct VALUE (not group) + // state.count tells us how many distinct values have positive weight - let output_row = HashableRow::new(result_key, output_values.clone()); - output_delta.changes.push((output_row, 1)); + // Check if this group had any values before + let old_existed = pre_existing_groups.contains(group_key_str); + let new_exists = state.count > 0; + + if old_existed && !new_exists { + // All distinct values removed: output deletion + if let Some(old_row_values) = old_values.get(group_key_str) { + let old_row = HashableRow::new(result_key, old_row_values.clone()); + output_delta.changes.push((old_row, -1)); + } else { + // For plain DISTINCT, the old row is just the group key itself + let old_row = HashableRow::new(result_key, group_key.clone()); + output_delta.changes.push((old_row, -1)); + } + } else if !old_existed && new_exists { + // First distinct value added: output insertion + let output_values = group_key.clone(); + // DISTINCT doesn't add aggregate values - just the group key + let output_row = HashableRow::new(result_key, output_values.clone()); + output_delta.changes.push((output_row, 1)); + } + // No output if staying positive or staying at zero + } else { + // Normal aggregates: output deletions and insertions as before + if let Some(old_row_values) = old_values.get(group_key_str) { + let old_row = HashableRow::new(result_key, old_row_values.clone()); + output_delta.changes.push((old_row, -1)); + } + + // Only include groups with count > 0 in the output delta + if state.count > 0 { + // Build output row: group_by columns + aggregate values + let mut output_values = group_key.clone(); + let aggregate_values = state.to_values(&self.aggregates); + output_values.extend(aggregate_values); + + let output_row = HashableRow::new(result_key, output_values.clone()); + output_delta.changes.push((output_row, 1)); + } } } + (output_delta, final_states) } + /// Extract distinct values from delta changes for batch tracking + fn extract_distinct_deltas(&self, delta: &Delta) -> DistinctDeltas { + let mut distinct_deltas: DistinctDeltas = HashMap::new(); + + for (row, weight) in &delta.changes { + let group_key = self.extract_group_key(&row.values); + let group_key_str = Self::group_key_to_string(&group_key); + + // Get or create entry for this group + let group_entry = distinct_deltas.entry(group_key_str.clone()).or_default(); + + if self.is_distinct_only { + // For plain DISTINCT, the group itself is what we're tracking + // We store a single entry that represents "this group exists N times" + // Use column index 0 with the group_key_str as the value + // For group key, use 0 as column index + let key = ( + 0, + HashableRow::new(0, vec![Value::Text(group_key_str.clone().into())]), + ); + let value_entry = group_entry.entry(key).or_insert(0); + *value_entry += weight; + } else { + // For DISTINCT aggregates, track individual column values + for &col_idx in &self.distinct_columns { + if let Some(val) = row.values.get(col_idx) { + // Skip NULL values + if val == &Value::Null { + continue; + } + + let key = (col_idx, HashableRow::new(col_idx as i64, vec![val.clone()])); + let value_entry = group_entry.entry(key).or_insert(0); + *value_entry += weight; + } + } + } + } + + distinct_deltas + } + /// Extract MIN/MAX values from delta changes for persistence to index fn extract_min_max_deltas(&self, delta: &Delta) -> MinMaxDeltas { let mut min_max_deltas: MinMaxDeltas = HashMap::new(); @@ -1104,14 +1775,25 @@ impl IncrementalOperator for AggregateOperator { self.commit_state = AggregateCommitState::Eval { eval_state }; } AggregateCommitState::Eval { ref mut eval_state } => { - // Extract input delta before eval for MIN/MAX processing - let input_delta = eval_state.extract_delta(); + // Clone the delta for MIN/MAX processing before eval consumes it + // We need to get the delta from the eval_state if it's still in Init + let input_delta = match eval_state { + EvalState::Init { deltas } => deltas.left.clone(), + _ => Delta::new(), // Empty delta if already processed + }; - // Extract MIN/MAX deltas before any I/O operations + // Extract MIN/MAX and DISTINCT deltas before any I/O operations let min_max_deltas = self.extract_min_max_deltas(&input_delta); + // For plain DISTINCT, we need to extract deltas too + let distinct_deltas = if self.has_distinct() || self.is_distinct_only { + self.extract_distinct_deltas(&input_delta) + } else { + HashMap::new() + }; - // Create a new eval state with the same delta - *eval_state = EvalState::from_delta(input_delta.clone()); + // Get old counts before eval modifies the states + // We need to extract this from the eval_state before it's consumed + let old_states = HashMap::new(); // TODO: Extract from eval_state let (output_delta, computed_states) = return_and_restore_if_io!( &mut self.commit_state, @@ -1122,17 +1804,23 @@ impl IncrementalOperator for AggregateOperator { self.commit_state = AggregateCommitState::PersistDelta { delta: output_delta, computed_states, + old_states, current_idx: 0, write_row: WriteRow::new(), - min_max_deltas, // Store for later use + min_max_deltas, // Store for later use + distinct_deltas, // Store for distinct processing + input_delta, // Store original input }; } AggregateCommitState::PersistDelta { delta, computed_states, + old_states, current_idx, write_row, min_max_deltas, + distinct_deltas, + input_delta, } => { let states_vec: Vec<_> = computed_states.iter().collect(); @@ -1141,28 +1829,59 @@ impl IncrementalOperator for AggregateOperator { self.commit_state = AggregateCommitState::PersistMinMax { delta: delta.clone(), min_max_persist_state: MinMaxPersistState::new(min_max_deltas.clone()), + distinct_deltas: distinct_deltas.clone(), }; } else { let (group_key_str, (group_key, agg_state)) = states_vec[*current_idx]; - // Build the key components for the new table structure - // For regular aggregates, use column_index=0 and AGG_TYPE_REGULAR + // Skip aggregate state persistence for plain DISTINCT + // Plain DISTINCT only uses the distinct value weights, not aggregate state + if self.is_distinct_only { + // Skip to next - distinct values are handled in PersistDistinctValues + // We still need to transition states properly + let next_idx = *current_idx + 1; + if next_idx >= states_vec.len() { + // Done with all groups, move to PersistMinMax + self.commit_state = AggregateCommitState::PersistMinMax { + delta: std::mem::take(delta), + min_max_persist_state: MinMaxPersistState::new(std::mem::take( + min_max_deltas, + )), + distinct_deltas: std::mem::take(distinct_deltas), + }; + } else { + // Move to next group + self.commit_state = AggregateCommitState::PersistDelta { + delta: std::mem::take(delta), + computed_states: std::mem::take(computed_states), + old_states: std::mem::take(old_states), + current_idx: next_idx, + write_row: WriteRow::new(), + min_max_deltas: std::mem::take(min_max_deltas), + distinct_deltas: std::mem::take(distinct_deltas), + input_delta: std::mem::take(input_delta), + }; + } + continue; + } + + // Build the key components for regular aggregates let operator_storage_id = generate_storage_id(self.operator_id, 0, AGG_TYPE_REGULAR); let zset_hash = self.generate_group_hash(group_key_str); - let element_id = 0i64; + let element_id = Hash128::new(0, 0); // Always zeros for regular aggregates - // Determine weight: -1 to delete (cancels existing weight=1), 1 to insert/update + // Determine weight: 1 if exists, -1 if deleted let weight = if agg_state.count == 0 { -1 } else { 1 }; - // Serialize the aggregate state with group key (even for deletion, we need a row) + // Serialize the aggregate state (only for regular aggregates, not plain DISTINCT) let state_blob = agg_state.to_blob(&self.aggregates, group_key); let blob_value = Value::Blob(state_blob); // Build the aggregate storage format: [operator_id, zset_hash, element_id, value, weight] let operator_id_val = Value::Integer(operator_storage_id); let zset_hash_val = zset_hash.to_value(); - let element_id_val = Value::Integer(element_id); + let element_id_val = element_id.to_value(); let blob_val = blob_value.clone(); // Create index key - the first 3 columns of our primary key @@ -1185,24 +1904,27 @@ impl IncrementalOperator for AggregateOperator { let delta = std::mem::take(delta); let computed_states = std::mem::take(computed_states); let min_max_deltas = std::mem::take(min_max_deltas); + let distinct_deltas = std::mem::take(distinct_deltas); + let input_delta = std::mem::take(input_delta); self.commit_state = AggregateCommitState::PersistDelta { delta, computed_states, + old_states: std::mem::take(old_states), current_idx: *current_idx + 1, write_row: WriteRow::new(), // Reset for next write min_max_deltas, + distinct_deltas, + input_delta, }; } } AggregateCommitState::PersistMinMax { delta, min_max_persist_state, + distinct_deltas, } => { - if !self.has_min_max() { - let delta = std::mem::take(delta); - self.commit_state = AggregateCommitState::Done { delta }; - } else { + if self.has_min_max() { return_and_restore_if_io!( &mut self.commit_state, state, @@ -1213,10 +1935,37 @@ impl IncrementalOperator for AggregateOperator { |group_key_str| self.generate_group_hash(group_key_str) ) ); - - let delta = std::mem::take(delta); - self.commit_state = AggregateCommitState::Done { delta }; } + + // Transition to PersistDistinctValues + let delta = std::mem::take(delta); + let distinct_deltas = std::mem::take(distinct_deltas); + let distinct_persist_state = DistinctPersistState::new(distinct_deltas); + self.commit_state = AggregateCommitState::PersistDistinctValues { + delta, + distinct_persist_state, + }; + } + AggregateCommitState::PersistDistinctValues { + delta, + distinct_persist_state, + } => { + if self.has_distinct() { + // Use the state machine to persist distinct values to BTree + return_and_restore_if_io!( + &mut self.commit_state, + state, + distinct_persist_state.persist_distinct_values( + self.operator_id, + cursors, + |group_key_str| self.generate_group_hash(group_key_str) + ) + ); + } + + // Transition to Done + let delta = std::mem::take(delta); + self.commit_state = AggregateCommitState::Done { delta }; } AggregateCommitState::Done { delta } => { self.commit_state = AggregateCommitState::Idle; @@ -1755,6 +2504,484 @@ pub enum MinMaxPersistState { Done, } +/// State machine for fetching distinct values from BTree storage +#[derive(Debug)] +pub enum FetchDistinctState { + Init { + groups_to_fetch: Vec<(String, HashMap>)>, + }, + FetchGroup { + groups_to_fetch: Vec<(String, HashMap>)>, + group_idx: usize, + value_idx: usize, + values_to_fetch: Vec<(usize, Value)>, + }, + ReadValue { + groups_to_fetch: Vec<(String, HashMap>)>, + group_idx: usize, + value_idx: usize, + values_to_fetch: Vec<(usize, Value)>, + group_key: String, + column_idx: usize, + value: Value, + }, + Done, +} + +impl FetchDistinctState { + /// Add fetch entry for plain DISTINCT - the group itself is the distinct value + fn add_plain_distinct_fetch( + group_entry: &mut HashMap>, + group_key_str: &str, + ) { + let group_value = Value::Text(group_key_str.to_string().into()); + group_entry + .entry(0) + .or_default() + .insert(HashableRow::new(0, vec![group_value])); + } + + /// Add fetch entries for DISTINCT aggregates - individual column values + fn add_aggregate_distinct_fetch( + group_entry: &mut HashMap>, + row_values: &[Value], + distinct_columns: &HashSet, + ) { + for &col_idx in distinct_columns { + if let Some(val) = row_values.get(col_idx) { + if val != &Value::Null { + group_entry + .entry(col_idx) + .or_default() + .insert(HashableRow::new(col_idx as i64, vec![val.clone()])); + } + } + } + } + + pub fn new( + delta: &Delta, + distinct_columns: &HashSet, + extract_group_key: impl Fn(&[Value]) -> Vec, + group_key_to_string: impl Fn(&[Value]) -> String, + existing_groups: &HashMap, + is_plain_distinct: bool, + ) -> Self { + let mut groups_to_fetch: HashMap>> = + HashMap::new(); + + for (row, _weight) in &delta.changes { + let group_key = extract_group_key(&row.values); + let group_key_str = group_key_to_string(&group_key); + + // Skip groups we don't need to fetch + // For DISTINCT aggregates, only fetch for existing groups + if !is_plain_distinct && !existing_groups.contains_key(&group_key_str) { + continue; + } + + let group_entry = groups_to_fetch.entry(group_key_str.clone()).or_default(); + + if is_plain_distinct { + Self::add_plain_distinct_fetch(group_entry, &group_key_str); + } else { + Self::add_aggregate_distinct_fetch(group_entry, &row.values, distinct_columns); + } + } + + let groups_to_fetch: Vec<_> = groups_to_fetch.into_iter().collect(); + + if groups_to_fetch.is_empty() { + Self::Done + } else { + Self::Init { groups_to_fetch } + } + } + + pub fn fetch_distinct_values( + &mut self, + operator_id: i64, + existing_groups: &mut HashMap, + cursors: &mut DbspStateCursors, + generate_group_hash: impl Fn(&str) -> Hash128, + is_plain_distinct: bool, + ) -> Result> { + loop { + match self { + FetchDistinctState::Init { groups_to_fetch } => { + if groups_to_fetch.is_empty() { + *self = FetchDistinctState::Done; + continue; + } + + let groups = std::mem::take(groups_to_fetch); + *self = FetchDistinctState::FetchGroup { + groups_to_fetch: groups, + group_idx: 0, + value_idx: 0, + values_to_fetch: Vec::new(), + }; + } + FetchDistinctState::FetchGroup { + groups_to_fetch, + group_idx, + value_idx, + values_to_fetch, + } => { + if *group_idx >= groups_to_fetch.len() { + *self = FetchDistinctState::Done; + continue; + } + + // Build list of values to fetch for current group if not done + if values_to_fetch.is_empty() && *group_idx < groups_to_fetch.len() { + let (_group_key, cols_values) = &groups_to_fetch[*group_idx]; + for (col_idx, values) in cols_values { + for hashable_row in values { + // Extract the value from HashableRow + values_to_fetch + .push((*col_idx, hashable_row.values.first().unwrap().clone())); + } + } + } + + if *value_idx >= values_to_fetch.len() { + // Move to next group + *group_idx += 1; + *value_idx = 0; + values_to_fetch.clear(); + continue; + } + + // Fetch current value + let (group_key, _) = groups_to_fetch[*group_idx].clone(); + let (column_idx, value) = values_to_fetch[*value_idx].clone(); + + let groups = std::mem::take(groups_to_fetch); + let values = std::mem::take(values_to_fetch); + *self = FetchDistinctState::ReadValue { + groups_to_fetch: groups, + group_idx: *group_idx, + value_idx: *value_idx, + values_to_fetch: values, + group_key, + column_idx, + value, + }; + } + FetchDistinctState::ReadValue { + groups_to_fetch, + group_idx, + value_idx, + values_to_fetch, + group_key, + column_idx, + value, + } => { + // Read the record from BTree using the same pattern as WriteRow: + // 1. Seek in index to find the entry + // 2. Get rowid from index cursor + // 3. Use rowid to read from table cursor + let storage_id = + generate_storage_id(operator_id, *column_idx, AGG_TYPE_DISTINCT); + let zset_hash = generate_group_hash(group_key); + let element_id = hash_value(value, *column_idx); + + // First, seek in the index cursor + let index_key = vec![ + Value::Integer(storage_id), + zset_hash.to_value(), + element_id.to_value(), + ]; + let index_record = ImmutableRecord::from_values(&index_key, index_key.len()); + + let seek_result = return_if_io!(cursors.index_cursor.seek( + SeekKey::IndexKey(&index_record), + SeekOp::GE { eq_only: true } + )); + + // Early exit if not found in index + if !matches!(seek_result, SeekResult::Found) { + let groups = std::mem::take(groups_to_fetch); + let values = std::mem::take(values_to_fetch); + *self = FetchDistinctState::FetchGroup { + groups_to_fetch: groups, + group_idx: *group_idx, + value_idx: *value_idx + 1, + values_to_fetch: values, + }; + continue; + } + + // Get the rowid from the index cursor + let rowid = return_if_io!(cursors.index_cursor.rowid()); + + // Early exit if no rowid + let rowid = match rowid { + Some(id) => id, + None => { + let groups = std::mem::take(groups_to_fetch); + let values = std::mem::take(values_to_fetch); + *self = FetchDistinctState::FetchGroup { + groups_to_fetch: groups, + group_idx: *group_idx, + value_idx: *value_idx + 1, + values_to_fetch: values, + }; + continue; + } + }; + + // Now seek in the table cursor using the rowid + let table_result = return_if_io!(cursors + .table_cursor + .seek(SeekKey::TableRowId(rowid), SeekOp::GE { eq_only: true })); + + // Early exit if not found in table + if !matches!(table_result, SeekResult::Found) { + let groups = std::mem::take(groups_to_fetch); + let values = std::mem::take(values_to_fetch); + *self = FetchDistinctState::FetchGroup { + groups_to_fetch: groups, + group_idx: *group_idx, + value_idx: *value_idx + 1, + values_to_fetch: values, + }; + continue; + } + + // Read the actual record from the table cursor + let record = return_if_io!(cursors.table_cursor.record()); + + if let Some(r) = record { + let values = r.get_values(); + + // The table has 5 columns: storage_id, zset_hash, element_id, blob, weight + // The weight is at index 4 + if values.len() >= 5 { + // Get the weight directly from column 4 + let weight = match values[4].to_owned() { + Value::Integer(w) => w, + _ => 0, + }; + + // Store the weight in the existing group's state + let state = existing_groups.entry(group_key.clone()).or_default(); + state.distinct_value_weights.insert( + ( + *column_idx, + HashableRow::new(*column_idx as i64, vec![value.clone()]), + ), + weight, + ); + } + } + + // Move to next value + let groups = std::mem::take(groups_to_fetch); + let values = std::mem::take(values_to_fetch); + *self = FetchDistinctState::FetchGroup { + groups_to_fetch: groups, + group_idx: *group_idx, + value_idx: *value_idx + 1, + values_to_fetch: values, + }; + } + FetchDistinctState::Done => { + // For plain DISTINCT, construct AggregateState from the weights we fetched + if is_plain_distinct { + for (_group_key_str, state) in existing_groups.iter_mut() { + // For plain DISTINCT, sum all the weights to get total count + // Each weight represents how many times the distinct value appears + let total_weight: i64 = state.distinct_value_weights.values().sum(); + + // Set the count based on total weight + state.count = total_weight; + } + } + return Ok(IOResult::Done(())); + } + } + } + } +} + +/// State machine for persisting distinct values to BTree storage +#[derive(Debug)] +pub enum DistinctPersistState { + Init { + distinct_deltas: DistinctDeltas, + group_keys: Vec, + }, + ProcessGroup { + distinct_deltas: DistinctDeltas, + group_keys: Vec, + group_idx: usize, + value_keys: Vec<(usize, HashableRow)>, // (col_idx, value) pairs for current group + value_idx: usize, + }, + WriteValue { + distinct_deltas: DistinctDeltas, + group_keys: Vec, + group_idx: usize, + value_keys: Vec<(usize, HashableRow)>, + value_idx: usize, + group_key: String, + col_idx: usize, + value: Value, + weight: isize, + write_row: WriteRow, + }, + Done, +} + +impl DistinctPersistState { + pub fn new(distinct_deltas: DistinctDeltas) -> Self { + let group_keys: Vec = distinct_deltas.keys().cloned().collect(); + Self::Init { + distinct_deltas, + group_keys, + } + } + + pub fn persist_distinct_values( + &mut self, + operator_id: i64, + cursors: &mut DbspStateCursors, + generate_group_hash: impl Fn(&str) -> Hash128, + ) -> Result> { + loop { + match self { + DistinctPersistState::Init { + distinct_deltas, + group_keys, + } => { + let distinct_deltas = std::mem::take(distinct_deltas); + let group_keys = std::mem::take(group_keys); + *self = DistinctPersistState::ProcessGroup { + distinct_deltas, + group_keys, + group_idx: 0, + value_keys: Vec::new(), + value_idx: 0, + }; + } + DistinctPersistState::ProcessGroup { + distinct_deltas, + group_keys, + group_idx, + value_keys, + value_idx, + } => { + // Check if we're past all groups + if *group_idx >= group_keys.len() { + *self = DistinctPersistState::Done; + continue; + } + + // Check if we need to get value_keys for current group + if value_keys.is_empty() && *group_idx < group_keys.len() { + let group_key_str = &group_keys[*group_idx]; + if let Some(group_values) = distinct_deltas.get(group_key_str) { + *value_keys = group_values.keys().cloned().collect(); + } + } + + // Check if we have more values in current group + if *value_idx >= value_keys.len() { + *group_idx += 1; + *value_idx = 0; + value_keys.clear(); + continue; + } + + // Process current value + let group_key = group_keys[*group_idx].clone(); + let (col_idx, hashable_row) = value_keys[*value_idx].clone(); + let weight = distinct_deltas[&group_key][&(col_idx, hashable_row.clone())]; + // Extract the value from HashableRow (it's the first element in values vector) + let value = hashable_row.values.first().unwrap().clone(); + + let distinct_deltas = std::mem::take(distinct_deltas); + let group_keys = std::mem::take(group_keys); + let value_keys = std::mem::take(value_keys); + *self = DistinctPersistState::WriteValue { + distinct_deltas, + group_keys, + group_idx: *group_idx, + value_keys, + value_idx: *value_idx, + group_key, + col_idx, + value, + weight, + write_row: WriteRow::new(), + }; + } + DistinctPersistState::WriteValue { + distinct_deltas, + group_keys, + group_idx, + value_keys, + value_idx, + group_key, + col_idx, + value, + weight, + write_row, + } => { + // Build the key components for DISTINCT storage + let storage_id = generate_storage_id(operator_id, *col_idx, AGG_TYPE_DISTINCT); + let zset_hash = generate_group_hash(group_key); + + // For DISTINCT, element_id is a hash of the value + let element_id = hash_value(value, *col_idx); + + // Create index key + let index_key = vec![ + Value::Integer(storage_id), + zset_hash.to_value(), + element_id.to_value(), + ]; + + // Record values (operator_id, zset_hash, element_id, weight_blob) + // Store weight as a minimal AggregateState blob so ReadRecord can parse it + let weight_state = AggregateState { + count: *weight as i64, + ..Default::default() + }; + let weight_blob = weight_state.to_blob(&[], &[]); + + let record_values = vec![ + Value::Integer(storage_id), + zset_hash.to_value(), + element_id.to_value(), + Value::Blob(weight_blob), + ]; + + // Write to BTree + return_if_io!(write_row.write_row(cursors, index_key, record_values, *weight)); + + // Move to next value + let distinct_deltas = std::mem::take(distinct_deltas); + let group_keys = std::mem::take(group_keys); + let value_keys = std::mem::take(value_keys); + *self = DistinctPersistState::ProcessGroup { + distinct_deltas, + group_keys, + group_idx: *group_idx, + value_keys, + value_idx: *value_idx + 1, + }; + } + DistinctPersistState::Done => { + return Ok(IOResult::Done(())); + } + } + } + } +} + impl MinMaxPersistState { pub fn new(min_max_deltas: MinMaxDeltas) -> Self { let group_keys: Vec = min_max_deltas.keys().cloned().collect(); diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index a319bb9ad..316f31742 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -5,6 +5,7 @@ //! //! Based on the DBSP paper: "DBSP: Automatic Incremental View Maintenance for Rich Query Languages" +use crate::incremental::aggregate_operator::AggregateOperator; use crate::incremental::dbsp::{Delta, DeltaPair}; use crate::incremental::expr_compiler::CompiledExpression; use crate::incremental::operator::{ @@ -300,6 +301,8 @@ pub enum DbspOperator { Input { name: String, schema: SchemaRef }, /// Merge operator for combining streams (used in recursive CTEs and UNION) Merge { schema: SchemaRef }, + /// Distinct operator - removes duplicates + Distinct { schema: SchemaRef }, } /// Represents an expression in DBSP @@ -818,6 +821,13 @@ impl DbspCircuit { schema.columns.len() )?; } + DbspOperator::Distinct { schema } => { + writeln!( + f, + "{indent}Distinct[{node_id}]: (schema: {} columns)", + schema.columns.len() + )?; + } } for input_id in &node.inputs { @@ -1143,16 +1153,34 @@ impl DbspCompiler { } } - // Compile aggregate expressions + // Compile aggregate expressions (both DISTINCT and regular) let mut aggregate_functions = Vec::new(); for expr in &agg.aggr_expr { - if let LogicalExpr::AggregateFunction { fun, args, .. } = expr { + if let LogicalExpr::AggregateFunction { fun, args, distinct } = expr { use crate::function::AggFunc; use crate::incremental::aggregate_operator::AggregateFunction; match fun { AggFunc::Count | AggFunc::Count0 => { - aggregate_functions.push(AggregateFunction::Count); + if *distinct { + // COUNT(DISTINCT col) + if args.is_empty() { + return Err(LimboError::ParseError("COUNT(DISTINCT) requires an argument".to_string())); + } + if let LogicalExpr::Column(col) = &args[0] { + let (col_idx, _) = input_schema.find_column(&col.name, col.table.as_deref()) + .ok_or_else(|| LimboError::ParseError( + format!("COUNT(DISTINCT) column '{}' not found in input", col.name) + ))?; + aggregate_functions.push(AggregateFunction::CountDistinct(col_idx)); + } else { + return Err(LimboError::ParseError( + "Only column references are supported in aggregate functions for incremental views".to_string() + )); + } + } else { + aggregate_functions.push(AggregateFunction::Count); + } } AggFunc::Sum => { if args.is_empty() { @@ -1164,7 +1192,11 @@ impl DbspCompiler { .ok_or_else(|| LimboError::ParseError( format!("SUM column '{}' not found in input", col.name) ))?; - aggregate_functions.push(AggregateFunction::Sum(col_idx)); + if *distinct { + aggregate_functions.push(AggregateFunction::SumDistinct(col_idx)); + } else { + aggregate_functions.push(AggregateFunction::Sum(col_idx)); + } } else { return Err(LimboError::ParseError( "Only column references are supported in aggregate functions for incremental views".to_string() @@ -1180,7 +1212,11 @@ impl DbspCompiler { .ok_or_else(|| LimboError::ParseError( format!("AVG column '{}' not found in input", col.name) ))?; - aggregate_functions.push(AggregateFunction::Avg(col_idx)); + if *distinct { + aggregate_functions.push(AggregateFunction::AvgDistinct(col_idx)); + } else { + aggregate_functions.push(AggregateFunction::Avg(col_idx)); + } } else { return Err(LimboError::ParseError( "Only column references are supported in aggregate functions for incremental views".to_string() @@ -1364,14 +1400,48 @@ impl DbspCompiler { // Handle UNION and UNION ALL self.compile_union(union) } + LogicalPlan::Distinct(distinct) => { + // DISTINCT is implemented as GROUP BY all columns with a special aggregate + let input_id = self.compile_plan(&distinct.input)?; + let input_schema = distinct.input.schema(); + + // Create GROUP BY indices for all columns + let group_by: Vec = (0..input_schema.columns.len()).collect(); + + // Column names for the operator + let input_column_names: Vec = input_schema.columns.iter() + .map(|col| col.name.clone()) + .collect(); + + // Create the aggregate operator with DISTINCT mode + let operator_id = self.circuit.next_id; + let executable: Box = Box::new( + AggregateOperator::new( + operator_id, + group_by, + vec![], // Empty aggregates indicates plain DISTINCT + input_column_names, + ), + ); + + // Add the node to the circuit + let node_id = self.circuit.add_node( + DbspOperator::Distinct { + schema: input_schema.clone(), + }, + vec![input_id], + executable, + ); + + Ok(node_id) + } _ => Err(LimboError::ParseError( format!("Unsupported operator in DBSP compiler: only Filter, Projection, Join, Aggregate, and Union are supported, got: {:?}", match plan { LogicalPlan::Sort(_) => "Sort", LogicalPlan::Limit(_) => "Limit", LogicalPlan::Union(_) => "Union", - LogicalPlan::Distinct(_) => "Distinct", - LogicalPlan::EmptyRelation(_) => "EmptyRelation", + LogicalPlan::EmptyRelation(_) => "EmptyRelation", LogicalPlan::Values(_) => "Values", LogicalPlan::WithCTE(_) => "WithCTE", LogicalPlan::CTERef(_) => "CTERef", diff --git a/core/incremental/operator.rs b/core/incremental/operator.rs index 613497e8d..fee869152 100644 --- a/core/incremental/operator.rs +++ b/core/incremental/operator.rs @@ -4092,4 +4092,518 @@ mod tests { "MIN(col1) should be 20 (new data only)" ); } + + #[test] + fn test_distinct_removes_duplicates() { + let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + // Create a DISTINCT operator that groups by all columns + let mut operator = AggregateOperator::new( + 0, // operator_id + vec![0], // group by column 0 (value) + vec![], // Empty aggregates for plain DISTINCT + vec!["value".to_string()], + ); + + // Create input with duplicates + let mut input = Delta::new(); + input.insert(1, vec![Value::Integer(100)]); // First 100 + input.insert(2, vec![Value::Integer(200)]); // First 200 + input.insert(3, vec![Value::Integer(100)]); // Duplicate 100 + input.insert(4, vec![Value::Integer(300)]); // First 300 + input.insert(5, vec![Value::Integer(200)]); // Duplicate 200 + input.insert(6, vec![Value::Integer(100)]); // Another duplicate 100 + + // Execute commit (for materialized views) instead of eval + let result = pager + .io + .block(|| operator.commit((&input).into(), &mut cursors)) + .unwrap(); + + // Should have exactly 3 distinct values (100, 200, 300) + let distinct_values: std::collections::HashSet = result + .changes + .iter() + .map(|(row, _weight)| match &row.values[0] { + Value::Integer(i) => *i, + _ => panic!("Expected integer value"), + }) + .collect(); + + assert_eq!( + distinct_values.len(), + 3, + "Should have exactly 3 distinct values" + ); + assert!(distinct_values.contains(&100)); + assert!(distinct_values.contains(&200)); + assert!(distinct_values.contains(&300)); + + // All weights should be 1 (distinct normalizes weights) + for (_row, weight) in &result.changes { + assert_eq!(*weight, 1, "DISTINCT should output weight 1 for all groups"); + } + } + + #[test] + fn test_distinct_incremental_updates() { + let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + let mut operator = AggregateOperator::new( + 0, + vec![0, 1], // group by both columns + vec![], // Empty aggregates for plain DISTINCT + vec!["category".to_string(), "value".to_string()], + ); + + // First batch: insert some values + let mut delta1 = Delta::new(); + delta1.insert(1, vec![Value::Text("A".into()), Value::Integer(100)]); + delta1.insert(2, vec![Value::Text("B".into()), Value::Integer(200)]); + delta1.insert(3, vec![Value::Text("A".into()), Value::Integer(100)]); // Duplicate + + // Commit first batch + let result1 = pager + .io + .block(|| operator.commit((&delta1).into(), &mut cursors)) + .unwrap(); + + // Should have 2 distinct groups: (A,100) and (B,200) + assert_eq!( + result1.changes.len(), + 2, + "First commit should output 2 distinct groups" + ); + + // Verify each group appears with weight +1 + for (_row, weight) in &result1.changes { + assert_eq!(*weight, 1, "New groups should have weight +1"); + } + + // Second batch: delete one instance of (A,100) and add new group + let mut delta2 = Delta::new(); + delta2.delete(1, vec![Value::Text("A".into()), Value::Integer(100)]); + delta2.insert(4, vec![Value::Text("C".into()), Value::Integer(300)]); + + let result2 = pager + .io + .block(|| operator.commit((&delta2).into(), &mut cursors)) + .unwrap(); + + // Should only output the new group (C,300) with weight +1 + // (A,100) still exists (weight went from 2 to 1), so no output for it + assert_eq!( + result2.changes.len(), + 1, + "Second commit should only output new group" + ); + + let (row, weight) = &result2.changes[0]; + assert_eq!(*weight, 1); + assert_eq!(row.values[0], Value::Text("C".into())); + assert_eq!(row.values[1], Value::Integer(300)); + + // Third batch: delete last instance of (A,100) + let mut delta3 = Delta::new(); + delta3.delete(3, vec![Value::Text("A".into()), Value::Integer(100)]); + + let result3 = pager + .io + .block(|| operator.commit((&delta3).into(), &mut cursors)) + .unwrap(); + + // Should output (A,100) with weight -1 (group disappeared) + assert_eq!( + result3.changes.len(), + 1, + "Third commit should output disappeared group" + ); + + let (row, weight) = &result3.changes[0]; + assert_eq!(*weight, -1, "Disappeared group should have weight -1"); + assert_eq!(row.values[0], Value::Text("A".into())); + assert_eq!(row.values[1], Value::Integer(100)) + } + + #[test] + fn test_distinct_state_transitions() { + let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + // Test that DISTINCT correctly tracks state transitions (0 ↔ positive) + let mut operator = AggregateOperator::new( + 0, + vec![0], + vec![], // Empty aggregates for plain DISTINCT + vec!["value".to_string()], + ); + + // Insert value with weight 3 + let mut delta1 = Delta::new(); + for i in 1..=3 { + delta1.insert(i, vec![Value::Integer(100)]); + } + + let result1 = pager + .io + .block(|| operator.commit((&delta1).into(), &mut cursors)) + .unwrap(); + + assert_eq!(result1.changes.len(), 1); + assert_eq!(result1.changes[0].1, 1, "First appearance should output +1"); + + // Remove 2 instances (weight goes from 3 to 1, still positive) + let mut delta2 = Delta::new(); + for i in 1..=2 { + delta2.delete(i, vec![Value::Integer(100)]); + } + + let result2 = pager + .io + .block(|| operator.commit((&delta2).into(), &mut cursors)) + .unwrap(); + + assert_eq!(result2.changes.len(), 0, "No transition, no output"); + + // Remove last instance (weight goes from 1 to 0) + let mut delta3 = Delta::new(); + delta3.delete(3, vec![Value::Integer(100)]); + + let result3 = pager + .io + .block(|| operator.commit((&delta3).into(), &mut cursors)) + .unwrap(); + + assert_eq!(result3.changes.len(), 1); + assert_eq!(result3.changes[0].1, -1, "Disappearance should output -1"); + + // Re-add the value (weight goes from 0 to 1) + let mut delta4 = Delta::new(); + delta4.insert(4, vec![Value::Integer(100)]); + + let result4 = pager + .io + .block(|| operator.commit((&delta4).into(), &mut cursors)) + .unwrap(); + + assert_eq!(result4.changes.len(), 1); + assert_eq!(result4.changes[0].1, 1, "Reappearance should output +1") + } + + #[test] + fn test_distinct_persistence() { + let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + // First operator instance + let mut operator1 = AggregateOperator::new( + 0, + vec![0], + vec![], // Empty aggregates for plain DISTINCT + vec!["value".to_string()], + ); + + // Insert some values + let mut delta1 = Delta::new(); + delta1.insert(1, vec![Value::Integer(100)]); + delta1.insert(2, vec![Value::Integer(100)]); // Duplicate + delta1.insert(3, vec![Value::Integer(200)]); + + let result1 = pager + .io + .block(|| operator1.commit((&delta1).into(), &mut cursors)) + .unwrap(); + + // Should have 2 distinct values + assert_eq!(result1.changes.len(), 2, "Should output 2 distinct values"); + + // Create new operator instance with same ID (simulates restart) + // Create new cursors for the second operator + let table_cursor2 = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); + let index_cursor2 = + BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors2 = DbspStateCursors::new(table_cursor2, index_cursor2); + + let mut operator2 = AggregateOperator::new( + 0, // Same operator_id + vec![0], + vec![], // Empty aggregates for plain DISTINCT + vec!["value".to_string()], + ); + + // Add new value and delete existing (100 has weight 2, so it stays) + let mut delta2 = Delta::new(); + delta2.insert(4, vec![Value::Integer(300)]); + delta2.delete(1, vec![Value::Integer(100)]); // Remove one of the 100s + + let result2 = pager + .io + .block(|| operator2.commit((&delta2).into(), &mut cursors2)) + .unwrap(); + + // Should only output the new value (300) + // 100 still exists (went from weight 2 to 1) + assert_eq!(result2.changes.len(), 1, "Should only output new value"); + assert_eq!(result2.changes[0].1, 1, "Should be insertion"); + assert_eq!(result2.changes[0].0.values[0], Value::Integer(300)); + + // Now delete the last instance of 100 + let mut delta3 = Delta::new(); + delta3.delete(2, vec![Value::Integer(100)]); + + let result3 = pager + .io + .block(|| operator2.commit((&delta3).into(), &mut cursors2)) + .unwrap(); + + // Should output deletion of 100 + assert_eq!(result3.changes.len(), 1, "Should output deletion"); + assert_eq!(result3.changes[0].1, -1, "Should be deletion"); + assert_eq!(result3.changes[0].0.values[0], Value::Integer(100)); + } + + #[test] + fn test_distinct_batch_with_multiple_groups() { + let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + let mut operator = AggregateOperator::new( + 0, + vec![0, 1], // group by category and value + vec![], // Empty aggregates for plain DISTINCT + vec!["category".to_string(), "value".to_string()], + ); + + // Create a complex batch with multiple groups and duplicates within each group + let mut delta = Delta::new(); + + // Group (A, 100): 3 instances + delta.insert(1, vec![Value::Text("A".into()), Value::Integer(100)]); + delta.insert(2, vec![Value::Text("A".into()), Value::Integer(100)]); + delta.insert(3, vec![Value::Text("A".into()), Value::Integer(100)]); + + // Group (B, 200): 2 instances + delta.insert(4, vec![Value::Text("B".into()), Value::Integer(200)]); + delta.insert(5, vec![Value::Text("B".into()), Value::Integer(200)]); + + // Group (A, 200): 1 instance + delta.insert(6, vec![Value::Text("A".into()), Value::Integer(200)]); + + // Group (C, 100): 2 instances + delta.insert(7, vec![Value::Text("C".into()), Value::Integer(100)]); + delta.insert(8, vec![Value::Text("C".into()), Value::Integer(100)]); + + // More instances of Group (A, 100) + delta.insert(9, vec![Value::Text("A".into()), Value::Integer(100)]); + delta.insert(10, vec![Value::Text("A".into()), Value::Integer(100)]); + + // Group (B, 100): 1 instance + delta.insert(11, vec![Value::Text("B".into()), Value::Integer(100)]); + + let result = pager + .io + .block(|| operator.commit((&delta).into(), &mut cursors)) + .unwrap(); + + // Should have exactly 5 distinct groups: + // (A, 100), (A, 200), (B, 100), (B, 200), (C, 100) + assert_eq!( + result.changes.len(), + 5, + "Should have exactly 5 distinct groups" + ); + + // All should have weight +1 (new groups appearing) + for (_row, weight) in &result.changes { + assert_eq!(*weight, 1, "All groups should have weight +1"); + } + + // Verify the distinct groups + let groups: std::collections::HashSet<(String, i64)> = result + .changes + .iter() + .map(|(row, _)| { + let category = match &row.values[0] { + Value::Text(s) => String::from_utf8(s.value.clone()).unwrap(), + _ => panic!("Expected text for category"), + }; + let value = match &row.values[1] { + Value::Integer(i) => *i, + _ => panic!("Expected integer for value"), + }; + (category, value) + }) + .collect(); + + assert!(groups.contains(&("A".to_string(), 100))); + assert!(groups.contains(&("A".to_string(), 200))); + assert!(groups.contains(&("B".to_string(), 100))); + assert!(groups.contains(&("B".to_string(), 200))); + assert!(groups.contains(&("C".to_string(), 100))); + } + + #[test] + fn test_multiple_distinct_aggregates_same_column() { + // Test that multiple DISTINCT aggregates on the same column don't interfere + let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); + + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); + + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + // Create operator with COUNT(DISTINCT value), SUM(DISTINCT value), AVG(DISTINCT value) + // all on the same column + let mut operator = AggregateOperator::new( + 0, + vec![], // No group by - single group + vec![ + AggregateFunction::CountDistinct(0), // COUNT(DISTINCT value) + AggregateFunction::SumDistinct(0), // SUM(DISTINCT value) + AggregateFunction::AvgDistinct(0), // AVG(DISTINCT value) + ], + vec!["value".to_string()], + ); + + // Insert distinct values: 10, 20, 30 (each appearing multiple times) + let mut input = Delta::new(); + input.insert(1, vec![Value::Integer(10)]); + input.insert(2, vec![Value::Integer(10)]); // duplicate + input.insert(3, vec![Value::Integer(20)]); + input.insert(4, vec![Value::Integer(20)]); // duplicate + input.insert(5, vec![Value::Integer(30)]); + input.insert(6, vec![Value::Integer(10)]); // duplicate + + let output = pager + .io + .block(|| operator.commit((&input).into(), &mut cursors)) + .unwrap(); + + // Should have exactly one output row (no group by) + assert_eq!(output.changes.len(), 1); + let (row, weight) = &output.changes[0]; + assert_eq!(*weight, 1); + + // Extract the aggregate values + let values = &row.values; + assert_eq!(values.len(), 3); // 3 aggregate values + + // COUNT(DISTINCT value) should be 3 (distinct values: 10, 20, 30) + assert_eq!(values[0], Value::Integer(3)); + + // SUM(DISTINCT value) should be 60 (10 + 20 + 30) + assert_eq!(values[1], Value::Integer(60)); + + // AVG(DISTINCT value) should be 20.0 (60 / 3) + assert_eq!(values[2], Value::Float(20.0)); + } + + #[test] + fn test_count_distinct_with_deletions() { + let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); + + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + let mut operator = AggregateOperator::new( + 1, + vec![], // No GROUP BY + vec![AggregateFunction::CountDistinct(1)], + vec!["id".to_string(), "value".to_string()], + ); + + // Insert 3 distinct values + let mut delta1 = Delta::new(); + delta1.insert(1, vec![Value::Integer(1), Value::Integer(100)]); + delta1.insert(2, vec![Value::Integer(2), Value::Integer(200)]); + delta1.insert(3, vec![Value::Integer(3), Value::Integer(300)]); + + let result1 = pager + .io + .block(|| operator.commit((&delta1).into(), &mut cursors)) + .unwrap(); + + assert_eq!(result1.changes.len(), 1); + assert_eq!(result1.changes[0].1, 1); + assert_eq!(result1.changes[0].0.values[0], Value::Integer(3)); + + // Delete one value + let mut delta2 = Delta::new(); + delta2.delete(2, vec![Value::Integer(2), Value::Integer(200)]); + + let result2 = pager + .io + .block(|| operator.commit((&delta2).into(), &mut cursors)) + .unwrap(); + + assert_eq!(result2.changes.len(), 2); + let new_row = result2.changes.iter().find(|(_, w)| *w == 1).unwrap(); + assert_eq!(new_row.0.values[0], Value::Integer(2)); + } + + #[test] + fn test_sum_distinct_with_deletions() { + let (pager, table_root_page_id, index_root_page_id) = create_test_pager(); + + let table_cursor = BTreeCursor::new_table(pager.clone(), table_root_page_id, 5); + let index_def = create_dbsp_state_index(index_root_page_id); + let index_cursor = BTreeCursor::new_index(pager.clone(), index_root_page_id, &index_def, 4); + let mut cursors = DbspStateCursors::new(table_cursor, index_cursor); + + let mut operator = AggregateOperator::new( + 1, + vec![], + vec![AggregateFunction::SumDistinct(1)], + vec!["id".to_string(), "value".to_string()], + ); + + // Insert values including a duplicate + let mut delta1 = Delta::new(); + delta1.insert(1, vec![Value::Integer(1), Value::Integer(100)]); + delta1.insert(2, vec![Value::Integer(2), Value::Integer(200)]); + delta1.insert(3, vec![Value::Integer(3), Value::Integer(100)]); // Duplicate + delta1.insert(4, vec![Value::Integer(4), Value::Integer(300)]); + + let result1 = pager + .io + .block(|| operator.commit((&delta1).into(), &mut cursors)) + .unwrap(); + + assert_eq!(result1.changes.len(), 1); + assert_eq!(result1.changes[0].1, 1); + assert_eq!(result1.changes[0].0.values[0], Value::Float(600.0)); // 100 + 200 + 300 + + // Delete value 200 + let mut delta2 = Delta::new(); + delta2.delete(2, vec![Value::Integer(2), Value::Integer(200)]); + + let result2 = pager + .io + .block(|| operator.commit((&delta2).into(), &mut cursors)) + .unwrap(); + + assert_eq!(result2.changes.len(), 2); + let new_row = result2.changes.iter().find(|(_, w)| *w == 1).unwrap(); + assert_eq!(new_row.0.values[0], Value::Float(400.0)); // 100 + 300 + } } diff --git a/core/incremental/persistence.rs b/core/incremental/persistence.rs index bba64a282..26b9b8b3f 100644 --- a/core/incremental/persistence.rs +++ b/core/incremental/persistence.rs @@ -8,7 +8,7 @@ pub enum ReadRecord { #[default] GetRecord, Done { - state: Option, + state: Box>, }, } @@ -27,7 +27,9 @@ impl ReadRecord { ReadRecord::GetRecord => { let res = return_if_io!(cursor.seek(key.clone(), SeekOp::GE { eq_only: true })); if !matches!(res, SeekResult::Found) { - *self = ReadRecord::Done { state: None }; + *self = ReadRecord::Done { + state: Box::new(None), + }; } else { let record = return_if_io!(cursor.record()); let r = record.ok_or_else(|| { @@ -41,14 +43,21 @@ impl ReadRecord { let (state, _group_key) = match blob { Value::Blob(blob) => AggregateState::from_blob(&blob), + Value::Null => { + // For plain DISTINCT, we store null value and just track weight + // Return a minimal state indicating existence + Ok((AggregateState::default(), vec![])) + } _ => Err(LimboError::ParseError( - "Value in aggregator not blob".to_string(), + "Value in aggregator not blob or null".to_string(), )), }?; - *self = ReadRecord::Done { state: Some(state) } + *self = ReadRecord::Done { + state: Box::new(Some(state)), + } } } - ReadRecord::Done { state } => return Ok(IOResult::Done(state.clone())), + ReadRecord::Done { state } => return Ok(IOResult::Done((**state).clone())), } } } diff --git a/testing/materialized_views.test b/testing/materialized_views.test index a2a8eb5c4..e2bfd11bd 100755 --- a/testing/materialized_views.test +++ b/testing/materialized_views.test @@ -1720,3 +1720,777 @@ do_execsql_test_on_specific_db {:memory:} matview-groupby-join-position { SELECT * FROM tujoingroup ORDER BY a; } {1|2 2|1} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-basic { + CREATE TABLE items(id INTEGER, category TEXT, value INTEGER); + INSERT INTO items VALUES + (1, 'A', 100), + (2, 'B', 200), + (3, 'A', 100), -- duplicate of row 1 + (4, 'C', 300), + (5, 'B', 200), -- duplicate of row 2 + (6, 'A', 100); -- another duplicate of row 1 + + CREATE MATERIALIZED VIEW distinct_items AS + SELECT DISTINCT category, value FROM items; + + SELECT category, value FROM distinct_items ORDER BY category, value; +} {A|100 +B|200 +C|300} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-single-column { + CREATE TABLE numbers(n INTEGER); + INSERT INTO numbers VALUES (1), (2), (1), (3), (2), (1), (4), (3); + + CREATE MATERIALIZED VIEW distinct_numbers AS + SELECT DISTINCT n FROM numbers; + + SELECT n FROM distinct_numbers ORDER BY n; +} {1 +2 +3 +4} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-incremental-insert { + CREATE TABLE data(x INTEGER, y TEXT); + CREATE MATERIALIZED VIEW distinct_data AS + SELECT DISTINCT x, y FROM data; + + -- Initial data + INSERT INTO data VALUES (1, 'alpha'), (2, 'beta'), (1, 'alpha'); + SELECT x, y FROM distinct_data ORDER BY x, y; +} {1|alpha +2|beta} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-incremental-insert-new { + CREATE TABLE data(x INTEGER, y TEXT); + CREATE MATERIALIZED VIEW distinct_data AS + SELECT DISTINCT x, y FROM data; + + -- Initial data + INSERT INTO data VALUES (1, 'alpha'), (2, 'beta'), (1, 'alpha'); + + -- Add new distinct values + INSERT INTO data VALUES (3, 'gamma'), (4, 'delta'); + SELECT x, y FROM distinct_data ORDER BY x, y; +} {1|alpha +2|beta +3|gamma +4|delta} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-incremental-insert-dups { + CREATE TABLE data(x INTEGER, y TEXT); + CREATE MATERIALIZED VIEW distinct_data AS + SELECT DISTINCT x, y FROM data; + + -- Initial data with some new values + INSERT INTO data VALUES + (1, 'alpha'), (2, 'beta'), (1, 'alpha'), + (3, 'gamma'), (4, 'delta'); + + -- Add duplicates of existing values + INSERT INTO data VALUES (1, 'alpha'), (2, 'beta'), (3, 'gamma'); + -- Should be same as before the duplicate insert + SELECT x, y FROM distinct_data ORDER BY x, y; +} {1|alpha +2|beta +3|gamma +4|delta} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-incremental-delete { + CREATE TABLE records(id INTEGER PRIMARY KEY, category TEXT, status INTEGER); + INSERT INTO records VALUES + (1, 'X', 1), + (2, 'Y', 2), + (3, 'X', 1), -- duplicate values + (4, 'Z', 3), + (5, 'Y', 2); -- duplicate values + + CREATE MATERIALIZED VIEW distinct_records AS + SELECT DISTINCT category, status FROM records; + + SELECT category, status FROM distinct_records ORDER BY category, status; +} {X|1 +Y|2 +Z|3} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-incremental-delete-partial { + CREATE TABLE records(id INTEGER PRIMARY KEY, category TEXT, status INTEGER); + INSERT INTO records VALUES + (1, 'X', 1), + (2, 'Y', 2), + (3, 'X', 1), -- duplicate values + (4, 'Z', 3), + (5, 'Y', 2); -- duplicate values + + CREATE MATERIALIZED VIEW distinct_records AS + SELECT DISTINCT category, status FROM records; + + -- Delete one instance of duplicate + DELETE FROM records WHERE id = 3; + -- X|1 should still exist (one instance remains) + SELECT category, status FROM distinct_records ORDER BY category, status; +} {X|1 +Y|2 +Z|3} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-incremental-delete-all { + CREATE TABLE records(id INTEGER PRIMARY KEY, category TEXT, status INTEGER); + INSERT INTO records VALUES + (1, 'X', 1), + (2, 'Y', 2), + (4, 'Z', 3), + (5, 'Y', 2); -- duplicate values + + CREATE MATERIALIZED VIEW distinct_records AS + SELECT DISTINCT category, status FROM records; + + -- Delete all instances of X|1 + DELETE FROM records WHERE category = 'X' AND status = 1; + -- Now X|1 should be gone + SELECT category, status FROM distinct_records ORDER BY category, status; +} {Y|2 +Z|3} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-incremental-reappear { + CREATE TABLE records(id INTEGER PRIMARY KEY, category TEXT, status INTEGER); + INSERT INTO records VALUES + (2, 'Y', 2), + (4, 'Z', 3), + (5, 'Y', 2); -- duplicate values + + CREATE MATERIALIZED VIEW distinct_records AS + SELECT DISTINCT category, status FROM records; + + -- Re-add a previously deleted value + INSERT INTO records VALUES (6, 'X', 1); + -- X|1 should appear + SELECT category, status FROM distinct_records ORDER BY category, status; +} {X|1 +Y|2 +Z|3} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-null-handling { + CREATE TABLE nullable(a INTEGER, b TEXT); + INSERT INTO nullable VALUES + (1, 'one'), + (2, NULL), + (NULL, 'three'), + (1, 'one'), -- duplicate + (2, NULL), -- duplicate with NULL + (NULL, 'three'), -- duplicate with NULL + (NULL, NULL); + + CREATE MATERIALIZED VIEW distinct_nullable AS + SELECT DISTINCT a, b FROM nullable; + + -- NULLs should be handled as distinct values + SELECT a, b FROM distinct_nullable ORDER BY a, b; +} {| +|three +1|one +2|} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-empty-table { + CREATE TABLE empty_source(x INTEGER, y TEXT); + CREATE MATERIALIZED VIEW distinct_empty AS + SELECT DISTINCT x, y FROM empty_source; + + -- Should be empty + SELECT COUNT(*) FROM distinct_empty; +} {0} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-empty-then-insert { + CREATE TABLE empty_source(x INTEGER, y TEXT); + CREATE MATERIALIZED VIEW distinct_empty AS + SELECT DISTINCT x, y FROM empty_source; + + -- Insert into previously empty table + INSERT INTO empty_source VALUES (1, 'first'), (1, 'first'), (2, 'second'); + SELECT x, y FROM distinct_empty ORDER BY x, y; +} {1|first +2|second} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-multi-column-types { + CREATE TABLE mixed_types(i INTEGER, t TEXT, r REAL, b BLOB); + INSERT INTO mixed_types VALUES + (1, 'text1', 1.5, x'0102'), + (2, 'text2', 2.5, x'0304'), + (1, 'text1', 1.5, x'0102'), -- exact duplicate + (3, 'text3', 3.5, x'0506'), + (2, 'text2', 2.5, x'0304'); -- another duplicate + + CREATE MATERIALIZED VIEW distinct_mixed AS + SELECT DISTINCT i, t FROM mixed_types; + + SELECT i, t FROM distinct_mixed ORDER BY i, t; +} {1|text1 +2|text2 +3|text3} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-update-simulation { + CREATE TABLE updatable(id INTEGER PRIMARY KEY, val TEXT); + INSERT INTO updatable VALUES (1, 'old'), (2, 'old'), (3, 'new'); + + CREATE MATERIALIZED VIEW distinct_vals AS + SELECT DISTINCT val FROM updatable; + + SELECT val FROM distinct_vals ORDER BY val; +} {new +old} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-update-simulation-change { + CREATE TABLE updatable(id INTEGER PRIMARY KEY, val TEXT); + INSERT INTO updatable VALUES (1, 'old'), (2, 'old'), (3, 'new'); + + CREATE MATERIALIZED VIEW distinct_vals AS + SELECT DISTINCT val FROM updatable; + + -- Simulate update by delete + insert + DELETE FROM updatable WHERE id = 1; + INSERT INTO updatable VALUES (1, 'new'); + + -- Now we have two 'new' and one 'old', but distinct shows each once + SELECT val FROM distinct_vals ORDER BY val; +} {new +old} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-update-simulation-all-same { + CREATE TABLE updatable(id INTEGER PRIMARY KEY, val TEXT); + INSERT INTO updatable VALUES (1, 'new'), (2, 'old'), (3, 'new'); + + CREATE MATERIALIZED VIEW distinct_vals AS + SELECT DISTINCT val FROM updatable; + + -- Change the 'old' to 'new' + DELETE FROM updatable WHERE id = 2; + INSERT INTO updatable VALUES (2, 'new'); + + -- Now all three rows have 'new', old should disappear + SELECT val FROM distinct_vals ORDER BY val; +} {new} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-large-duplicates { + CREATE TABLE many_dups(x INTEGER); + + -- Insert many duplicates + INSERT INTO many_dups VALUES (1), (1), (1), (1), (1); + INSERT INTO many_dups VALUES (2), (2), (2), (2), (2); + INSERT INTO many_dups VALUES (3), (3), (3), (3), (3); + + CREATE MATERIALIZED VIEW distinct_many AS + SELECT DISTINCT x FROM many_dups; + + SELECT x FROM distinct_many ORDER BY x; +} {1 +2 +3} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-large-duplicates-remove { + CREATE TABLE many_dups(x INTEGER); + + -- Insert many duplicates + INSERT INTO many_dups VALUES (1), (1), (1), (1), (1); + INSERT INTO many_dups VALUES (2), (2), (2), (2), (2); + INSERT INTO many_dups VALUES (3), (3), (3), (3), (3); + + CREATE MATERIALIZED VIEW distinct_many AS + SELECT DISTINCT x FROM many_dups; + + -- Remove some instances of value 2 (rowids 7,8,9,10 keeping rowid 6) + DELETE FROM many_dups WHERE rowid IN (7, 8, 9, 10); + + -- Should still have all three values + SELECT x FROM distinct_many ORDER BY x; +} {1 +2 +3} + +do_execsql_test_on_specific_db {:memory:} matview-distinct-large-duplicates-remove-all { + CREATE TABLE many_dups(x INTEGER); + + -- Insert many duplicates but only one instance of 2 + INSERT INTO many_dups VALUES (1), (1), (1), (1), (1); + INSERT INTO many_dups VALUES (2); + INSERT INTO many_dups VALUES (3), (3), (3), (3), (3); + + CREATE MATERIALIZED VIEW distinct_many AS + SELECT DISTINCT x FROM many_dups; + + -- Remove ALL instances of value 2 + DELETE FROM many_dups WHERE x = 2; + + -- Now 2 should be gone + SELECT x FROM distinct_many ORDER BY x; +} {1 +3} + +# COUNT(DISTINCT) tests for materialized views + +do_execsql_test_on_specific_db {:memory:} matview-count-distinct-basic { + CREATE TABLE sales(region TEXT, product TEXT, amount INTEGER); + INSERT INTO sales VALUES + ('North', 'A', 100), + ('North', 'A', 100), -- Duplicate + ('North', 'B', 200), + ('South', 'A', 150), + ('South', 'A', 150); -- Duplicate + + CREATE MATERIALIZED VIEW sales_summary AS + SELECT region, COUNT(DISTINCT product) as unique_products + FROM sales GROUP BY region; + + SELECT * FROM sales_summary ORDER BY region; +} {North|2 +South|1} + +do_execsql_test_on_specific_db {:memory:} matview-count-distinct-nulls { + -- COUNT(DISTINCT) should ignore NULL values per SQL standard + CREATE TABLE data(grp INTEGER, val INTEGER); + INSERT INTO data VALUES + (1, 10), + (1, 20), + (1, NULL), + (1, NULL), -- Multiple NULLs + (2, 30), + (2, NULL); + + CREATE MATERIALIZED VIEW v AS + SELECT grp, COUNT(DISTINCT val) as cnt FROM data GROUP BY grp; + + SELECT * FROM v ORDER BY grp; + + -- Add more NULLs (should not affect count) + INSERT INTO data VALUES (1, NULL), (2, NULL); + SELECT * FROM v ORDER BY grp; + + -- Add a non-NULL value + INSERT INTO data VALUES (2, 40); + SELECT * FROM v ORDER BY grp; +} {1|2 +2|1 +1|2 +2|1 +1|2 +2|2} + +do_execsql_test_on_specific_db {:memory:} matview-count-distinct-empty-groups { + CREATE TABLE items(category TEXT, item TEXT); + INSERT INTO items VALUES + ('A', 'x'), + ('A', 'y'), + ('B', 'z'); + + CREATE MATERIALIZED VIEW category_counts AS + SELECT category, COUNT(DISTINCT item) as unique_items + FROM items GROUP BY category; + + SELECT * FROM category_counts ORDER BY category; + + -- Delete all items from category B + DELETE FROM items WHERE category = 'B'; + SELECT * FROM category_counts ORDER BY category; + + -- Re-add items to B + INSERT INTO items VALUES ('B', 'w'), ('B', 'w'); -- Same value twice + SELECT * FROM category_counts ORDER BY category; +} {A|2 +B|1 +A|2 +A|2 +B|1} + +do_execsql_test_on_specific_db {:memory:} matview-count-distinct-updates { + CREATE TABLE records(id INTEGER PRIMARY KEY, grp TEXT, val INTEGER); + INSERT INTO records VALUES + (1, 'X', 100), + (2, 'X', 200), + (3, 'Y', 100), + (4, 'Y', 100); -- Duplicate + + CREATE MATERIALIZED VIEW grp_summary AS + SELECT grp, COUNT(DISTINCT val) as distinct_vals + FROM records GROUP BY grp; + + SELECT * FROM grp_summary ORDER BY grp; + + -- Update that changes group membership + UPDATE records SET grp = 'Y' WHERE id = 1; + SELECT * FROM grp_summary ORDER BY grp; + + -- Update that changes value within group + UPDATE records SET val = 300 WHERE id = 3; + SELECT * FROM grp_summary ORDER BY grp; +} {X|2 +Y|1 +X|1 +Y|1 +X|1 +Y|2} + +do_execsql_test_on_specific_db {:memory:} matview-count-distinct-large-scale { + CREATE TABLE events(user_id INTEGER, event_type INTEGER); + + -- Insert many events with varying duplication + INSERT INTO events VALUES + (1, 1), (1, 1), (1, 1), (1, 2), (1, 3), -- User 1: 3 distinct + (2, 1), (2, 2), (2, 2), (2, 2), -- User 2: 2 distinct + (3, 4), (3, 4), (3, 4), (3, 4), (3, 4); -- User 3: 1 distinct + + CREATE MATERIALIZED VIEW user_stats AS + SELECT user_id, COUNT(DISTINCT event_type) as unique_events + FROM events GROUP BY user_id; + + SELECT * FROM user_stats ORDER BY user_id; + + -- Mass deletion + DELETE FROM events WHERE event_type = 2; + SELECT * FROM user_stats ORDER BY user_id; + + -- Mass insertion with duplicates + INSERT INTO events VALUES + (1, 5), (1, 5), (1, 6), + (2, 5), (2, 6), (2, 7); + SELECT * FROM user_stats ORDER BY user_id; +} {1|3 +2|2 +3|1 +1|2 +2|1 +3|1 +1|4 +2|4 +3|1} + +do_execsql_test_on_specific_db {:memory:} matview-count-distinct-group-by-empty-start { + CREATE TABLE measurements(device TEXT, reading INTEGER); + + CREATE MATERIALIZED VIEW device_summary AS + SELECT device, COUNT(DISTINCT reading) as unique_readings + FROM measurements GROUP BY device; + + -- Start with empty table (no groups yet) + SELECT COUNT(*) FROM device_summary; + + -- Add first group + INSERT INTO measurements VALUES ('D1', 100), ('D1', 100); + SELECT * FROM device_summary; + + -- Add second group with distinct values + INSERT INTO measurements VALUES ('D2', 200), ('D2', 300); + SELECT * FROM device_summary ORDER BY device; + + -- Remove all data + DELETE FROM measurements; + SELECT COUNT(*) FROM device_summary; +} {0 +D1|1 +D1|1 +D2|2 +0} + +do_execsql_test_on_specific_db {:memory:} matview-count-distinct-single-row-groups { + CREATE TABLE singles(k TEXT PRIMARY KEY, v INTEGER); + INSERT INTO singles VALUES + ('a', 1), + ('b', 2), + ('c', 3); + + CREATE MATERIALIZED VIEW v AS + SELECT k, COUNT(DISTINCT v) as cnt FROM singles GROUP BY k; + + SELECT * FROM v ORDER BY k; + + -- Each group has exactly one row, so COUNT(DISTINCT v) = 1 + UPDATE singles SET v = 999 WHERE k = 'b'; + SELECT * FROM v ORDER BY k; + + DELETE FROM singles WHERE k = 'c'; + SELECT * FROM v ORDER BY k; +} {a|1 +b|1 +c|1 +a|1 +b|1 +c|1 +a|1 +b|1} + +do_execsql_test_on_specific_db {:memory:} matview-count-distinct-transactions { + CREATE TABLE txn_data(grp TEXT, val INTEGER); + INSERT INTO txn_data VALUES ('A', 1), ('A', 2); + + CREATE MATERIALIZED VIEW txn_view AS + SELECT grp, COUNT(DISTINCT val) as cnt FROM txn_data GROUP BY grp; + + SELECT * FROM txn_view; + + -- Transaction that adds duplicates (should not change count) + BEGIN; + INSERT INTO txn_data VALUES ('A', 1), ('A', 2); + SELECT * FROM txn_view; + COMMIT; + + SELECT * FROM txn_view; + + -- Transaction that adds new distinct value then rolls back + BEGIN; + INSERT INTO txn_data VALUES ('A', 3); + SELECT * FROM txn_view; + ROLLBACK; + + SELECT * FROM txn_view; +} {A|2 +A|2 +A|2 +A|3 +A|2} + +do_execsql_test_on_specific_db {:memory:} matview-count-distinct-text-values { + CREATE TABLE strings(category INTEGER, str TEXT); + INSERT INTO strings VALUES + (1, 'hello'), + (1, 'world'), + (1, 'hello'), -- Duplicate + (2, 'foo'), + (2, 'bar'), + (2, 'bar'); -- Duplicate + + CREATE MATERIALIZED VIEW str_counts AS + SELECT category, COUNT(DISTINCT str) as unique_strings + FROM strings GROUP BY category; + + SELECT * FROM str_counts ORDER BY category; + + -- Case sensitivity test + INSERT INTO strings VALUES (1, 'HELLO'), (2, 'FOO'); + SELECT * FROM str_counts ORDER BY category; + + -- Empty strings + INSERT INTO strings VALUES (1, ''), (1, ''), (2, ''); + SELECT * FROM str_counts ORDER BY category; +} {1|2 +2|2 +1|3 +2|3 +1|4 +2|4} + +do_execsql_test_on_specific_db {:memory:} matview-sum-distinct { + CREATE TABLE sales(region TEXT, amount INTEGER); + INSERT INTO sales VALUES + ('North', 100), + ('North', 200), + ('North', 100), -- Duplicate + ('North', NULL), + ('South', 300), + ('South', 300), -- Duplicate + ('South', 400); + + CREATE MATERIALIZED VIEW sales_summary AS + SELECT region, SUM(DISTINCT amount) as total_distinct + FROM sales GROUP BY region; + + SELECT * FROM sales_summary ORDER BY region; + + -- Add a duplicate value + INSERT INTO sales VALUES ('North', 200); + SELECT * FROM sales_summary ORDER BY region; + + -- Add a new distinct value + INSERT INTO sales VALUES ('South', 500); + SELECT * FROM sales_summary ORDER BY region; +} {North|300 +South|700 +North|300 +South|700 +North|300 +South|1200} + +do_execsql_test_on_specific_db {:memory:} matview-avg-distinct { + CREATE TABLE grades(student TEXT, score INTEGER); + INSERT INTO grades VALUES + ('Alice', 90), + ('Alice', 85), + ('Alice', 90), -- Duplicate + ('Alice', NULL), + ('Bob', 75), + ('Bob', 80), + ('Bob', 75); -- Duplicate + + CREATE MATERIALIZED VIEW avg_grades AS + SELECT student, AVG(DISTINCT score) as avg_score + FROM grades GROUP BY student; + + SELECT * FROM avg_grades ORDER BY student; + + -- Add duplicate scores + INSERT INTO grades VALUES ('Alice', 85), ('Bob', 80); + SELECT * FROM avg_grades ORDER BY student; + + -- Add new distinct score + INSERT INTO grades VALUES ('Alice', 95); + SELECT * FROM avg_grades ORDER BY student; +} {Alice|87.5 +Bob|77.5 +Alice|87.5 +Bob|77.5 +Alice|90.0 +Bob|77.5} + +do_execsql_test_on_specific_db {:memory:} matview-min-distinct { + CREATE TABLE metrics(category TEXT, value INTEGER); + INSERT INTO metrics VALUES + ('A', 10), + ('A', 20), + ('A', 10), -- Duplicate + ('A', 30), + ('A', NULL), + ('B', 5), + ('B', 15), + ('B', 5); -- Duplicate + + CREATE MATERIALIZED VIEW metric_min AS + SELECT category, + MIN(DISTINCT value) as min_val + FROM metrics GROUP BY category; + + SELECT * FROM metric_min ORDER BY category; + + -- Add values that don't change min + INSERT INTO metrics VALUES ('A', 15), ('B', 10); + SELECT * FROM metric_min ORDER BY category; + + -- Add values that change min + INSERT INTO metrics VALUES ('A', 5), ('B', 3); + SELECT * FROM metric_min ORDER BY category; +} {A|10 +B|5 +A|10 +B|5 +A|5 +B|3} + +do_execsql_test_on_specific_db {:memory:} matview-max-distinct { + CREATE TABLE metrics2(category TEXT, value INTEGER); + INSERT INTO metrics2 VALUES + ('A', 10), + ('A', 20), + ('A', 10), -- Duplicate + ('A', 30), + ('A', NULL), + ('B', 5), + ('B', 15), + ('B', 5); -- Duplicate + + CREATE MATERIALIZED VIEW metric_max AS + SELECT category, + MAX(DISTINCT value) as max_val + FROM metrics2 GROUP BY category; + + SELECT * FROM metric_max ORDER BY category; + + -- Add values that don't change max + INSERT INTO metrics2 VALUES ('A', 15), ('B', 10); + SELECT * FROM metric_max ORDER BY category; + + -- Add values that change max + INSERT INTO metrics2 VALUES ('A', 40), ('B', 20); + SELECT * FROM metric_max ORDER BY category; +} {A|30 +B|15 +A|30 +B|15 +A|40 +B|20} + +do_execsql_test_on_specific_db {:memory:} matview-multiple-distinct-aggregates-with-groupby { + CREATE TABLE data(grp TEXT, x INTEGER, y INTEGER, z INTEGER); + INSERT INTO data VALUES + ('A', 1, 10, 100), + ('A', 2, 20, 200), + ('A', 1, 10, 300), -- x,y duplicates + ('A', 3, 30, 100), -- z duplicate + ('A', NULL, 40, 400), + ('B', 4, 50, 500), + ('B', 5, 50, 600), -- y duplicate + ('B', 4, 60, 700), -- x duplicate + ('B', 6, NULL, 500), -- z duplicate + ('B', NULL, 70, NULL); + + CREATE MATERIALIZED VIEW multi_distinct AS + SELECT grp, + COUNT(DISTINCT x) as cnt_x, + SUM(DISTINCT y) as sum_y, + AVG(DISTINCT z) as avg_z + FROM data GROUP BY grp; + + SELECT * FROM multi_distinct ORDER BY grp; + + -- Add more data with duplicates + INSERT INTO data VALUES + ('A', 1, 20, 200), -- Existing values + ('B', 7, 80, 800); -- New values + + SELECT * FROM multi_distinct ORDER BY grp; +} {A|3|100|250.0 +B|3|180|600.0 +A|3|100|250.0 +B|4|260|650.0} + +do_execsql_test_on_specific_db {:memory:} matview-multiple-distinct-aggregates-no-groupby { + CREATE TABLE data2(x INTEGER, y INTEGER, z INTEGER); + INSERT INTO data2 VALUES + (1, 10, 100), + (2, 20, 200), + (1, 10, 300), -- x,y duplicates + (3, 30, 100), -- z duplicate + (NULL, 40, 400), + (4, 50, 500), + (5, 50, 600), -- y duplicate + (4, 60, 700), -- x duplicate + (6, NULL, 500), -- z duplicate + (NULL, 70, NULL); + + CREATE MATERIALIZED VIEW multi_distinct_global AS + SELECT COUNT(DISTINCT x) as cnt_x, + SUM(DISTINCT y) as sum_y, + AVG(DISTINCT z) as avg_z + FROM data2; + + SELECT * FROM multi_distinct_global; + + -- Add more data + INSERT INTO data2 VALUES + (1, 20, 200), -- Existing values + (7, 80, 800); -- New values + + SELECT * FROM multi_distinct_global; +} {6|280|400.0 +7|360|450.0} + +do_execsql_test_on_specific_db {:memory:} matview-count-distinct-global-aggregate { + CREATE TABLE all_data(val INTEGER); + INSERT INTO all_data VALUES (1), (2), (1), (3), (2); + + CREATE MATERIALIZED VIEW summary AS + SELECT COUNT(DISTINCT val) as total_distinct FROM all_data; + + SELECT * FROM summary; + + -- Add duplicates + INSERT INTO all_data VALUES (1), (2), (3); + SELECT * FROM summary; + + -- Add new distinct values + INSERT INTO all_data VALUES (4), (5); + SELECT * FROM summary; + + -- Delete all of one value + DELETE FROM all_data WHERE val = 3; + SELECT * FROM summary; +} {3 +3 +5 +4} From c463bab609c2194f1dd20c41cb515ff906aca231 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 23 Oct 2025 16:27:12 +0300 Subject: [PATCH 410/428] perf/throughput: Use connection per transaction in rusqlite benchmark Open a connection per transaction in the rusqlite benchmark so that we're comparing the same workload with Turso. --- perf/throughput/rusqlite/src/main.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/perf/throughput/rusqlite/src/main.rs b/perf/throughput/rusqlite/src/main.rs index 1e1d5c5f5..e709e8d87 100644 --- a/perf/throughput/rusqlite/src/main.rs +++ b/perf/throughput/rusqlite/src/main.rs @@ -115,18 +115,18 @@ fn worker_thread( start_barrier: Arc, compute_usec: u64, ) -> Result { - let conn = Connection::open(&db_path)?; - - conn.pragma_update(None, "synchronous", "FULL")?; - conn.pragma_update(None, "fullfsync", "true")?; - - conn.busy_timeout(std::time::Duration::from_secs(30))?; - start_barrier.wait(); let mut total_inserts = 0; for iteration in 0..iterations { + let conn = Connection::open(&db_path)?; + + conn.pragma_update(None, "synchronous", "FULL")?; + conn.pragma_update(None, "fullfsync", "true")?; + + conn.busy_timeout(std::time::Duration::from_secs(30))?; + let mut stmt = conn.prepare("INSERT INTO test_table (id, data) VALUES (?, ?)")?; conn.execute("BEGIN", [])?; From 413c582b410f94888ca0242de6b5e24d9d486e61 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 23 Oct 2025 15:50:26 +0300 Subject: [PATCH 411/428] core/vdbe: Reuse cursor in op_open_write() This optimization reuses an existing cursor when op_open_write() is called on the same table/index (same root_page). This is safe because the cursor position doesn't matter - op_rewind() is always called after op_open_write() to position the cursor at the beginning of the table/index before any operations are performed. This change speeds up op_open_write() by avoiding unnecessary cursor re-initialization. --- core/vdbe/execute.rs | 112 ++++++++++++++++++++++++------------------- 1 file changed, 62 insertions(+), 50 deletions(-) diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 47b166dbe..cc2e95f23 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -6958,59 +6958,71 @@ pub fn op_open_write( CursorType::BTreeIndex(index) => Some(index), _ => None, }; - let maybe_promote_to_mvcc_cursor = - |btree_cursor: Box| -> Result> { - if let Some(tx_id) = program.connection.get_mv_tx_id() { - let mv_store = mv_store.unwrap().clone(); - Ok(Box::new(MvCursor::new( - mv_store, - tx_id, - root_page, - pager.clone(), - btree_cursor, - )?)) - } else { - Ok(btree_cursor) - } - }; - if let Some(index) = maybe_index { - let conn = program.connection.clone(); - let schema = conn.schema.read(); - let table = schema - .get_table(&index.table_name) - .and_then(|table| table.btree()); - let num_columns = index.columns.len(); - let btree_cursor = Box::new(BTreeCursor::new_index( - pager.clone(), - root_page, - index.as_ref(), - num_columns, - )); - let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?; - cursors - .get_mut(*cursor_id) - .unwrap() - .replace(Cursor::new_btree(cursor)); + // Check if we can reuse the existing cursor + let can_reuse_cursor = if let Some(Some(Cursor::BTree(btree_cursor))) = cursors.get(*cursor_id) + { + // Reuse if the root_page matches (same table/index) + btree_cursor.root_page() == root_page } else { - let num_columns = match cursor_type { - CursorType::BTreeTable(table_rc) => table_rc.columns.len(), - CursorType::MaterializedView(table_rc, _) => table_rc.columns.len(), - _ => unreachable!( - "Expected BTreeTable or MaterializedView. This should not have happened." - ), - }; + false + }; - let btree_cursor = Box::new(BTreeCursor::new_table( - pager.clone(), - root_page, - num_columns, - )); - let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?; - cursors - .get_mut(*cursor_id) - .unwrap() - .replace(Cursor::new_btree(cursor)); + if !can_reuse_cursor { + let maybe_promote_to_mvcc_cursor = + |btree_cursor: Box| -> Result> { + if let Some(tx_id) = program.connection.get_mv_tx_id() { + let mv_store = mv_store.unwrap().clone(); + Ok(Box::new(MvCursor::new( + mv_store, + tx_id, + root_page, + pager.clone(), + btree_cursor, + )?)) + } else { + Ok(btree_cursor) + } + }; + if let Some(index) = maybe_index { + let conn = program.connection.clone(); + let schema = conn.schema.read(); + let table = schema + .get_table(&index.table_name) + .and_then(|table| table.btree()); + + let num_columns = index.columns.len(); + let btree_cursor = Box::new(BTreeCursor::new_index( + pager.clone(), + root_page, + index.as_ref(), + num_columns, + )); + let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?; + cursors + .get_mut(*cursor_id) + .unwrap() + .replace(Cursor::new_btree(cursor)); + } else { + let num_columns = match cursor_type { + CursorType::BTreeTable(table_rc) => table_rc.columns.len(), + CursorType::MaterializedView(table_rc, _) => table_rc.columns.len(), + _ => unreachable!( + "Expected BTreeTable or MaterializedView. This should not have happened." + ), + }; + + let btree_cursor = Box::new(BTreeCursor::new_table( + pager.clone(), + root_page, + num_columns, + )); + let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?; + cursors + .get_mut(*cursor_id) + .unwrap() + .replace(Cursor::new_btree(cursor)); + } } state.pc += 1; Ok(InsnFunctionStepResult::Step) From 23cddbcad9eef4e38b9e5985365ee5ac68c1cbb6 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 23 Oct 2025 10:11:55 -0400 Subject: [PATCH 412/428] Return null terminated strings from sqlite3_column_text --- sqlite3/src/lib.rs | 43 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/sqlite3/src/lib.rs b/sqlite3/src/lib.rs index 037e5015b..0083715ae 100644 --- a/sqlite3/src/lib.rs +++ b/sqlite3/src/lib.rs @@ -94,15 +94,25 @@ pub struct sqlite3_stmt { *mut ffi::c_void, )>, pub(crate) next: *mut sqlite3_stmt, + pub(crate) text_cache: Vec>, } impl sqlite3_stmt { pub fn new(db: *mut sqlite3, stmt: turso_core::Statement) -> Self { + let n_cols = stmt.num_columns(); Self { db, stmt, destructors: Vec::new(), next: std::ptr::null_mut(), + text_cache: vec![vec![]; n_cols], + } + } + #[inline] + fn clear_text_cache(&mut self) { + // Drop per-column buffers for the previous row + for r in &mut self.text_cache { + r.clear(); } } } @@ -323,7 +333,7 @@ pub unsafe extern "C" fn sqlite3_finalize(stmt: *mut sqlite3_stmt) -> ffi::c_int destructor_fn(ptr); } } - + stmt_ref.clear_text_cache(); let _ = Box::from_raw(stmt); SQLITE_OK } @@ -340,9 +350,15 @@ pub unsafe extern "C" fn sqlite3_step(stmt: *mut sqlite3_stmt) -> ffi::c_int { stmt.stmt.run_once().unwrap(); continue; } - turso_core::StepResult::Done => return SQLITE_DONE, + turso_core::StepResult::Done => { + stmt.clear_text_cache(); + return SQLITE_DONE; + } turso_core::StepResult::Interrupt => return SQLITE_INTERRUPT, - turso_core::StepResult::Row => return SQLITE_ROW, + turso_core::StepResult::Row => { + stmt.clear_text_cache(); + return SQLITE_ROW; + } turso_core::StepResult::Busy => return SQLITE_BUSY, } } else { @@ -389,6 +405,7 @@ pub unsafe extern "C" fn sqlite3_exec( pub unsafe extern "C" fn sqlite3_reset(stmt: *mut sqlite3_stmt) -> ffi::c_int { let stmt = &mut *stmt; stmt.stmt.reset(); + stmt.clear_text_cache(); SQLITE_OK } @@ -1048,14 +1065,30 @@ pub unsafe extern "C" fn sqlite3_column_text( stmt: *mut sqlite3_stmt, idx: ffi::c_int, ) -> *const ffi::c_uchar { + if stmt.is_null() || idx < 0 { + return std::ptr::null(); + } let stmt = &mut *stmt; let row = stmt.stmt.row(); let row = match row.as_ref() { Some(row) => row, None => return std::ptr::null(), }; - match row.get::<&Value>(idx as usize) { - Ok(turso_core::Value::Text(text)) => text.as_str().as_ptr(), + let i = idx as usize; + if i >= stmt.text_cache.len() { + return std::ptr::null(); + } + if !stmt.text_cache[i].is_empty() { + // we have already cached this value + return stmt.text_cache[i].as_ptr() as *const ffi::c_uchar; + } + match row.get::<&Value>(i) { + Ok(turso_core::Value::Text(text)) => { + let buf = &mut stmt.text_cache[i]; + buf.extend(text.as_str().as_bytes()); + buf.push(0); + buf.as_ptr() as *const ffi::c_uchar + } _ => std::ptr::null(), } } From 8ed4e7cac19e8884b866ca4011b464c32bfa2d45 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 23 Oct 2025 10:54:19 -0400 Subject: [PATCH 413/428] Add test for null terminated string from sqlite3_column_text --- sqlite3/tests/compat/mod.rs | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/sqlite3/tests/compat/mod.rs b/sqlite3/tests/compat/mod.rs index d04b933e8..0badf6051 100644 --- a/sqlite3/tests/compat/mod.rs +++ b/sqlite3/tests/compat/mod.rs @@ -412,6 +412,42 @@ mod tests { } } + #[test] + #[cfg(not(target_os = "windows"))] + fn column_text_is_nul_terminated_and_bytes_match() { + unsafe { + let mut db = std::ptr::null_mut(); + assert_eq!( + sqlite3_open(c"../testing/testing.db".as_ptr(), &mut db), + SQLITE_OK + ); + let mut stmt = std::ptr::null_mut(); + assert_eq!( + sqlite3_prepare_v2( + db, + c"SELECT first_name FROM users ORDER BY rowid ASC LIMIT 1;".as_ptr(), + -1, + &mut stmt, + std::ptr::null_mut() + ), + SQLITE_OK + ); + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + let p = sqlite3_column_text(stmt, 0); + assert!(!p.is_null()); + let bytes = sqlite3_column_bytes(stmt, 0) as usize; + // NUL at [bytes], and no extra counted + let slice = std::slice::from_raw_parts(p, bytes + 1); + assert_eq!(slice[bytes], 0); + assert_eq!(libc::strlen(p), bytes); + + let s = std::ffi::CStr::from_ptr(p).to_str().unwrap(); + assert_eq!(s, "Jamie"); + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + #[test] fn test_sqlite3_bind_text() { unsafe { From 87069fde93c6132ca124394586dd26661add9cb3 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 23 Oct 2025 19:35:46 +0300 Subject: [PATCH 414/428] core/storage: Fix WAL already enabled issue If WAL is already enabled, let's just continue execution instead of erroring out. --- core/storage/wal.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/storage/wal.rs b/core/storage/wal.rs index 8a7ed81a6..09fcd2af3 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -2413,7 +2413,7 @@ impl WalFileShared { pub fn create(&mut self, file: Arc) -> Result<()> { if self.enabled.load(Ordering::SeqCst) { - return Err(LimboError::InternalError("WAL already enabled".to_string())); + return Ok(()); } let magic = if cfg!(target_endian = "big") { From 30d183c58f0172f9814b6ad526c36c7b7335df5e Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 23 Oct 2025 19:41:27 +0300 Subject: [PATCH 415/428] bindings/javascript: Improve open error messages Include database path name for debuggability. --- bindings/javascript/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bindings/javascript/src/lib.rs b/bindings/javascript/src/lib.rs index dcaaefc5b..d92661269 100644 --- a/bindings/javascript/src/lib.rs +++ b/bindings/javascript/src/lib.rs @@ -178,7 +178,7 @@ fn connect_sync(db: &DatabaseInner) -> napi::Result<()> { let io = &db.io; let file = io .open_file(&db.path, flags, false) - .map_err(|e| to_generic_error("failed to open file", e))?; + .map_err(|e| to_generic_error(&format!("failed to open file {}", db.path), e))?; let db_file = DatabaseFile::new(file); let db_core = turso_core::Database::open_with_flags( @@ -191,7 +191,7 @@ fn connect_sync(db: &DatabaseInner) -> napi::Result<()> { .with_indexes(true), None, ) - .map_err(|e| to_generic_error("failed to open database", e))?; + .map_err(|e| to_generic_error(&format!("failed to open database {}", db.path), e))?; let conn = db_core .connect() From 827b646c248209561c80e13d1c47453bcde0ecfc Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 23 Oct 2025 20:51:12 +0300 Subject: [PATCH 416/428] Switch to SQLite's Julian date logic The `julian_day_converter` crate is GPL, which is problematic for apps embedding Turso. Switch to SQLite's Julian date logic by porting the C code to Rust. --- Cargo.lock | 10 ---- core/Cargo.toml | 1 - core/functions/datetime.rs | 114 +++++++++++++++++++++++++++---------- 3 files changed, 84 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 508d44cf8..7fdda8afd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2410,15 +2410,6 @@ dependencies = [ "serde", ] -[[package]] -name = "julian_day_converter" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2987f71b89b85c812c8484cbf0c5d7912589e77bfdc66fd3e52f760e7859f16" -dependencies = [ - "chrono", -] - [[package]] name = "kqueue" version = "1.0.8" @@ -4916,7 +4907,6 @@ dependencies = [ "hex", "intrusive-collections", "io-uring", - "julian_day_converter", "libc", "libloading", "libm", diff --git a/core/Cargo.toml b/core/Cargo.toml index 80bc57f57..5b4aaa01d 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -57,7 +57,6 @@ regex-syntax = { workspace = true, default-features = false, features = [ "unicode", ] } chrono = { workspace = true, default-features = false, features = ["clock"] } -julian_day_converter = "0.4.5" rand = { workspace = true } libm = "0.2" turso_macros = { workspace = true } diff --git a/core/functions/datetime.rs b/core/functions/datetime.rs index b5969855f..3ccbef5a2 100644 --- a/core/functions/datetime.rs +++ b/core/functions/datetime.rs @@ -348,30 +348,35 @@ pub fn exec_julianday(values: &[Register]) -> Value { } fn to_julian_day_exact(dt: &NaiveDateTime) -> f64 { - let year = dt.year(); - let month = dt.month() as i32; - let day = dt.day() as i32; - let (adjusted_year, adjusted_month) = if month <= 2 { - (year - 1, month + 12) - } else { - (year, month) - }; + // SQLite's computeJD algorithm + let mut y = dt.year(); + let mut m = dt.month() as i32; + let d = dt.day() as i32; - let a = adjusted_year / 100; - let b = 2 - a + a / 4; - let jd_days = (365.25 * ((adjusted_year + 4716) as f64)).floor() - + (30.6001 * ((adjusted_month + 1) as f64)).floor() - + (day as f64) - + (b as f64) - - 1524.5; + if m <= 2 { + y -= 1; + m += 12; + } - let seconds = dt.hour() as f64 * 3600.0 - + dt.minute() as f64 * 60.0 - + dt.second() as f64 - + (dt.nanosecond() as f64) / 1_000_000_000.0; + let a = (y + 4800) / 100; + let b = 38 - a + (a / 4); + let x1 = 36525 * (y + 4716) / 100; + let x2 = 306001 * (m + 1) / 10000; - let jd_fraction = seconds / 86400.0; - jd_days + jd_fraction + // iJD = (sqlite3_int64)((X1 + X2 + D + B - 1524.5) * 86400000) + let jd_days = (x1 + x2 + d + b) as f64 - 1524.5; + let mut i_jd = (jd_days * 86400000.0) as i64; + + // Add time component in milliseconds + // iJD += h*3600000 + m*60000 + (sqlite3_int64)(s*1000 + 0.5) + let h_ms = dt.hour() as i64 * 3600000; + let m_ms = dt.minute() as i64 * 60000; + let s_ms = (dt.second() as f64 * 1000.0 + dt.nanosecond() as f64 / 1_000_000.0 + 0.5) as i64; + + i_jd += h_ms + m_ms + s_ms; + + // Convert back to floating point JD + i_jd as f64 / 86400000.0 } pub fn exec_unixepoch(time_value: &Value) -> Result { @@ -490,7 +495,58 @@ fn get_date_time_from_time_value_float(value: f64) -> Option { if value.is_infinite() || value.is_nan() || !is_julian_day_value(value) { return None; } - julian_day_converter::julian_day_to_datetime(value).ok() + julian_day_to_datetime(value).ok() +} + +/// Convert a Julian Day number (as f64) to a NaiveDateTime +/// This uses SQLite's algorithm which converts to integer milliseconds first +/// to preserve precision, then converts back to date/time components. +fn julian_day_to_datetime(jd: f64) -> Result { + // SQLite approach: Convert JD to integer milliseconds + // iJD = (sqlite3_int64)(jd * 86400000.0 + 0.5) + let i_jd = (jd * 86400000.0 + 0.5) as i64; + + // Compute the date (Year, Month, Day) from iJD + // Z = (int)((iJD + 43200000)/86400000) + let z = ((i_jd + 43200000) / 86400000) as i32; + + // SQLite's algorithm from computeYMD + let alpha = ((z as f64 + 32044.75) / 36524.25) as i32 - 52; + let a = z + 1 + alpha - ((alpha + 100) / 4) + 25; + let b = a + 1524; + let c = ((b as f64 - 122.1) / 365.25) as i32; + let d = (36525 * (c & 32767)) / 100; + let e = ((b - d) as f64 / 30.6001) as i32; + let x1 = (30.6001 * e as f64) as i32; + + let day = (b - d - x1) as u32; + let month = if e < 14 { e - 1 } else { e - 13 } as u32; + let year = if month > 2 { c - 4716 } else { c - 4715 }; + + // Compute the time (Hour, Minute, Second) from iJD + // day_ms = (int)((iJD + 43200000) % 86400000) + let day_ms = ((i_jd + 43200000) % 86400000) as i32; + + // s = (day_ms % 60000) / 1000.0 + let s_millis = day_ms % 60000; + let seconds = (s_millis / 1000) as u32; + let millis = (s_millis % 1000) as u32; + + // day_min = day_ms / 60000 + let day_min = day_ms / 60000; + let minutes = (day_min % 60) as u32; + let hours = (day_min / 60) as u32; + + // Create the date + let date = NaiveDate::from_ymd_opt(year, month, day) + .ok_or_else(|| crate::LimboError::InternalError("Invalid date".to_string()))?; + + // Create time with millisecond precision converted to nanoseconds + let nanos = millis * 1_000_000; + let time = NaiveTime::from_hms_nano_opt(hours, minutes, seconds, nanos) + .ok_or_else(|| crate::LimboError::InternalError("Invalid time".to_string()))?; + + Ok(NaiveDateTime::new(date, time)) } fn is_leap_second(dt: &NaiveDateTime) -> bool { @@ -1584,17 +1640,15 @@ mod tests { assert_eq!(weekday_sunday_based(&dt), 5); } - #[allow(deprecated)] #[test] fn test_apply_modifier_julianday() { - use julian_day_converter::*; - let dt = create_datetime(2000, 1, 1, 12, 0, 0); - let julian_day = &dt.to_jd(); - let mut dt_result = NaiveDateTime::default(); - if let Some(ndt) = JulianDay::from_jd(*julian_day) { - dt_result = ndt; - } + + // Convert datetime to julian day using our implementation + let julian_day_value = to_julian_day_exact(&dt); + + // Convert back + let dt_result = julian_day_to_datetime(julian_day_value).unwrap(); assert_eq!(dt_result, dt); } From 18e6a23f2353eca6c0fb9f7411476c61418cd126 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Fri, 24 Oct 2025 10:52:54 +0300 Subject: [PATCH 417/428] Fix foreign key constraint enforcement on UNIQUE indexes Closes #3648 Co-authored-by: Pavan-Nambi --- core/translate/emitter.rs | 5 +- core/translate/fkeys.rs | 179 +++++++++++++++----------------------- core/translate/upsert.rs | 7 +- testing/foreign_keys.test | 103 ++++++++++++++++++++++ tests/fuzz/mod.rs | 33 +++++-- 5 files changed, 204 insertions(+), 123 deletions(-) diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 8d75dc63d..ff22d1035 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -33,7 +33,7 @@ use crate::translate::expr::{ use crate::translate::fkeys::{ build_index_affinity_string, emit_fk_child_update_counters, emit_fk_delete_parent_existence_checks, emit_guarded_fk_decrement, - emit_parent_pk_change_checks, open_read_index, open_read_table, stabilize_new_row_for_fk, + emit_parent_key_change_checks, open_read_index, open_read_table, stabilize_new_row_for_fk, }; use crate::translate::plan::{DeletePlan, JoinedTable, Plan, QueryDestination, Search}; use crate::translate::planner::ROWID_STRS; @@ -1346,10 +1346,11 @@ fn emit_update_insns( .schema .any_resolved_fks_referencing(table_name) { - emit_parent_pk_change_checks( + emit_parent_key_change_checks( program, &t_ctx.resolver, &table_btree, + plan.indexes_to_update.iter(), target_table_cursor_id, beg, start, diff --git a/core/translate/fkeys.rs b/core/translate/fkeys.rs index 14f9b7102..32a7099ad 100644 --- a/core/translate/fkeys.rs +++ b/core/translate/fkeys.rs @@ -264,14 +264,15 @@ pub fn stabilize_new_row_for_fk( Ok(()) } -/// Parent-side checks when the parent PK might change (UPDATE on parent): +/// Parent-side checks when the parent key might change (UPDATE on parent): /// Detect if any child references the OLD key (potential violation), and if any references the NEW key -/// (which cancels one potential violation). For composite PKs this builds OLD/NEW vectors first. +/// (which cancels one potential violation). For composite keys this builds OLD/NEW vectors first. #[allow(clippy::too_many_arguments)] -pub fn emit_parent_pk_change_checks( +pub fn emit_parent_key_change_checks( program: &mut ProgramBuilder, resolver: &Resolver, table_btree: &BTreeTable, + indexes_to_update: impl Iterator>, cursor_id: usize, old_rowid_reg: usize, start: usize, @@ -290,34 +291,32 @@ pub fn emit_parent_pk_change_checks( return Ok(()); } - match table_btree.primary_key_columns.len() { - 0 => emit_rowid_pk_change_check( + let primary_key_is_rowid_alias = table_btree.get_rowid_alias_column().is_some(); + + if primary_key_is_rowid_alias || table_btree.primary_key_columns.is_empty() { + emit_rowid_pk_change_check( program, &incoming, resolver, old_rowid_reg, rowid_set_clause_reg.unwrap_or(old_rowid_reg), - ), - 1 => emit_single_pk_change_check( - program, - &incoming, - resolver, - table_btree, - cursor_id, - start, - rowid_new_reg, - ), - _ => emit_composite_pk_change_check( - program, - &incoming, - resolver, - table_btree, - cursor_id, - old_rowid_reg, - start, - rowid_new_reg, - ), + )?; } + + for index in indexes_to_update { + emit_parent_index_key_change_checks( + program, + cursor_id, + start, + old_rowid_reg, + rowid_new_reg, + &incoming, + resolver, + table_btree, + index.as_ref(), + )?; + } + Ok(()) } /// Rowid-table parent PK change: compare rowid OLD vs NEW; if changed, run two-pass counters. @@ -355,124 +354,82 @@ pub fn emit_rowid_pk_change_check( Ok(()) } -/// Single-column PK parent change: load OLD and NEW; if changed, run two-pass counters. -pub fn emit_single_pk_change_check( - program: &mut ProgramBuilder, - incoming: &[ResolvedFkRef], - resolver: &Resolver, - table_btree: &BTreeTable, - cursor_id: usize, - start: usize, - rowid_new_reg: usize, -) -> Result<()> { - let (pk_name, _) = &table_btree.primary_key_columns[0]; - let (pos, col) = table_btree.get_column(pk_name).unwrap(); - - let old_reg = program.alloc_register(); - if col.is_rowid_alias { - program.emit_insn(Insn::RowId { - cursor_id, - dest: old_reg, - }); - } else { - program.emit_insn(Insn::Column { - cursor_id, - column: pos, - dest: old_reg, - default: None, - }); - } - let new_reg = if col.is_rowid_alias { - rowid_new_reg - } else { - start + pos - }; - - let skip = program.allocate_label(); - program.emit_insn(Insn::Eq { - lhs: old_reg, - rhs: new_reg, - target_pc: skip, - flags: CmpInsFlags::default(), - collation: None, - }); - - let old_pk = program.alloc_register(); - let new_pk = program.alloc_register(); - program.emit_insn(Insn::Copy { - src_reg: old_reg, - dst_reg: old_pk, - extra_amount: 0, - }); - program.emit_insn(Insn::Copy { - src_reg: new_reg, - dst_reg: new_pk, - extra_amount: 0, - }); - - emit_fk_parent_pk_change_counters(program, incoming, resolver, old_pk, new_pk, 1)?; - program.preassign_label_to_next_insn(skip); - Ok(()) -} - -/// Composite-PK parent change: build OLD/NEW vectors; if any component differs, run two-pass counters. +/// Foreign keys are only legal if the referenced parent key is: +/// 1. The rowid alias (no separate index) +/// 2. Part of a primary key / unique index (there is no practical difference between the two) +/// +/// If the foreign key references a composite key, all of the columns in the key must be referenced. +/// E.g. +/// CREATE TABLE parent (a, b, c, PRIMARY KEY (a, b, c)); +/// CREATE TABLE child (a, b, c, FOREIGN KEY (a, b, c) REFERENCES parent (a, b, c)); +/// +/// Whereas this is not allowed: +/// CREATE TABLE parent (a, b, c, PRIMARY KEY (a, b, c)); +/// CREATE TABLE child (a, b, c, FOREIGN KEY (a, b) REFERENCES parent (a, b, c)); +/// +/// This function checks if the parent key has changed by comparing the OLD and NEW values. +/// If the parent key has changed, it emits the counters for the foreign keys. +/// If the parent key has not changed, it does nothing. #[allow(clippy::too_many_arguments)] -pub fn emit_composite_pk_change_check( +pub fn emit_parent_index_key_change_checks( program: &mut ProgramBuilder, + cursor_id: usize, + new_values_start: usize, + old_rowid_reg: usize, + new_rowid_reg: usize, incoming: &[ResolvedFkRef], resolver: &Resolver, table_btree: &BTreeTable, - cursor_id: usize, - old_rowid_reg: usize, - start: usize, - rowid_new_reg: usize, + index: &Index, ) -> Result<()> { - let pk_len = table_btree.primary_key_columns.len(); + let idx_len = index.columns.len(); - let old_pk = program.alloc_registers(pk_len); - for (i, (pk_name, _)) in table_btree.primary_key_columns.iter().enumerate() { - let (pos, col) = table_btree.get_column(pk_name).unwrap(); - if col.is_rowid_alias { + let old_key = program.alloc_registers(idx_len); + for (i, index_col) in index.columns.iter().enumerate() { + let pos_in_table = index_col.pos_in_table; + let column = &table_btree.columns[pos_in_table]; + if column.is_rowid_alias { program.emit_insn(Insn::Copy { src_reg: old_rowid_reg, - dst_reg: old_pk + i, + dst_reg: old_key + i, extra_amount: 0, }); } else { program.emit_insn(Insn::Column { cursor_id, - column: pos, - dest: old_pk + i, + column: pos_in_table, + dest: old_key + i, default: None, }); } } - let new_pk = program.alloc_registers(pk_len); - for (i, (pk_name, _)) in table_btree.primary_key_columns.iter().enumerate() { - let (pos, col) = table_btree.get_column(pk_name).unwrap(); - let src = if col.is_rowid_alias { - rowid_new_reg + let new_key = program.alloc_registers(idx_len); + for (i, index_col) in index.columns.iter().enumerate() { + let pos_in_table = index_col.pos_in_table; + let column = &table_btree.columns[pos_in_table]; + let src = if column.is_rowid_alias { + new_rowid_reg } else { - start + pos + new_values_start + pos_in_table }; program.emit_insn(Insn::Copy { src_reg: src, - dst_reg: new_pk + i, + dst_reg: new_key + i, extra_amount: 0, }); } let skip = program.allocate_label(); let changed = program.allocate_label(); - for i in 0..pk_len { - let next = if i + 1 == pk_len { + for i in 0..idx_len { + let next = if i + 1 == idx_len { None } else { Some(program.allocate_label()) }; program.emit_insn(Insn::Eq { - lhs: old_pk + i, - rhs: new_pk + i, + lhs: old_key + i, + rhs: new_key + i, target_pc: next.unwrap_or(skip), flags: CmpInsFlags::default(), collation: None, @@ -484,7 +441,7 @@ pub fn emit_composite_pk_change_check( } program.preassign_label_to_next_insn(changed); - emit_fk_parent_pk_change_counters(program, incoming, resolver, old_pk, new_pk, pk_len)?; + emit_fk_parent_pk_change_counters(program, incoming, resolver, old_key, new_key, idx_len)?; program.preassign_label_to_next_insn(skip); Ok(()) } diff --git a/core/translate/upsert.rs b/core/translate/upsert.rs index 287203a85..85f304283 100644 --- a/core/translate/upsert.rs +++ b/core/translate/upsert.rs @@ -8,7 +8,7 @@ use crate::error::SQLITE_CONSTRAINT_PRIMARYKEY; use crate::schema::ROWID_SENTINEL; use crate::translate::emitter::UpdateRowSource; use crate::translate::expr::{walk_expr, WalkControl}; -use crate::translate::fkeys::{emit_fk_child_update_counters, emit_parent_pk_change_checks}; +use crate::translate::fkeys::{emit_fk_child_update_counters, emit_parent_key_change_checks}; use crate::translate::insert::{format_unique_violation_desc, InsertEmitCtx}; use crate::translate::planner::ROWID_STRS; use crate::vdbe::insn::CmpInsFlags; @@ -497,10 +497,13 @@ pub fn emit_upsert( &changed_cols, )?; } - emit_parent_pk_change_checks( + emit_parent_key_change_checks( program, resolver, &bt, + resolver.schema.get_indices(table.get_name()).filter(|idx| { + upsert_index_is_affected(table, idx, &changed_cols, rowid_changed) + }), ctx.cursor_id, ctx.conflict_rowid_reg, new_start, diff --git a/testing/foreign_keys.test b/testing/foreign_keys.test index 8934292fd..360ccbc8f 100644 --- a/testing/foreign_keys.test +++ b/testing/foreign_keys.test @@ -1104,3 +1104,106 @@ do_execsql_test_on_specific_db {:memory:} fk-delete-composite-bounds { -- This should be a no-op (no row (5,3)), and MUST NOT error. DELETE FROM p WHERE a=5 AND b=3; } {} + +# Single column unique index on parent, FK referenced by child +do_execsql_test_in_memory_any_error fk-update-parent-unique-single-col { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(id UNIQUE); + CREATE TABLE child(pid REFERENCES parent(id)); + INSERT INTO parent VALUES(1); + INSERT INTO child VALUES(1); + UPDATE parent SET id = 2 WHERE id = 1; +} + +# Single column with explicit CREATE UNIQUE INDEX +do_execsql_test_in_memory_any_error fk-update-parent-explicit-unique-single-col { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(id); + CREATE UNIQUE INDEX parent_id_idx ON parent(id); + CREATE TABLE child(pid REFERENCES parent(id)); + INSERT INTO parent VALUES(1); + INSERT INTO child VALUES(1); + UPDATE parent SET id = 2 WHERE id = 1; +} + +# Multi-column unique index on parent, FK referenced by multi-column FK in child +do_execsql_test_in_memory_any_error fk-update-parent-unique-multi-col { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(a, b, UNIQUE(a, b)); + CREATE TABLE child(ca, cb, FOREIGN KEY(ca, cb) REFERENCES parent(a, b)); + INSERT INTO parent VALUES(1, 2); + INSERT INTO child VALUES(1, 2); + UPDATE parent SET a = 3 WHERE a = 1 AND b = 2; +} + +# Multi-column unique index on parent, FK referenced by multi-column FK in child +do_execsql_test_in_memory_any_error fk-update-parent-unique-multi-col-2 { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(a, b, UNIQUE(a, b)); + CREATE TABLE child(ca, cb, FOREIGN KEY(ca, cb) REFERENCES parent(a, b)); + INSERT INTO parent VALUES(1, 2); + INSERT INTO child VALUES(1, 2); + UPDATE parent SET b = 3 WHERE a = 1 AND b = 2; +} + +# Multi-column index defined explicitly as CREATE UNIQUE INDEX +do_execsql_test_in_memory_any_error fk-update-parent-explicit-unique-multi-col { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(a, b); + CREATE UNIQUE INDEX parent_ab_idx ON parent(a, b); + CREATE TABLE child(ca, cb, FOREIGN KEY(ca, cb) REFERENCES parent(a, b)); + INSERT INTO parent VALUES(1, 2); + INSERT INTO child VALUES(1, 2); + UPDATE parent SET a = 3 WHERE a = 1 AND b = 2; +} + +# Multi-column index defined explicitly as CREATE UNIQUE INDEX +do_execsql_test_in_memory_any_error fk-update-parent-explicit-unique-multi-col-2 { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(a, b); + CREATE UNIQUE INDEX parent_ab_idx ON parent(a, b); + CREATE TABLE child(ca, cb, FOREIGN KEY(ca, cb) REFERENCES parent(a, b)); + INSERT INTO parent VALUES(1, 2); + INSERT INTO child VALUES(1, 2); + UPDATE parent SET b = 3 WHERE a = 1 AND b = 2; +} + +# Single column INTEGER PRIMARY KEY +do_execsql_test_in_memory_any_error fk-update-parent-int-pk { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(id INTEGER PRIMARY KEY); + CREATE TABLE child(pid REFERENCES parent(id)); + INSERT INTO parent VALUES(1); + INSERT INTO child VALUES(1); + UPDATE parent SET id = 2 WHERE id = 1; +} + +# Single column TEXT PRIMARY KEY +do_execsql_test_in_memory_any_error fk-update-parent-text-pk { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(id PRIMARY KEY); + CREATE TABLE child(pid REFERENCES parent(id)); + INSERT INTO parent VALUES('key1'); + INSERT INTO child VALUES('key1'); + UPDATE parent SET id = 'key2' WHERE id = 'key1'; +} + +# Multi-column PRIMARY KEY +do_execsql_test_in_memory_any_error fk-update-parent-multi-col-pk { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(a, b, PRIMARY KEY(a, b)); + CREATE TABLE child(ca, cb, FOREIGN KEY(ca, cb) REFERENCES parent(a, b)); + INSERT INTO parent VALUES(1, 2); + INSERT INTO child VALUES(1, 2); + UPDATE parent SET a = 3 WHERE a = 1 AND b = 2; +} + +# Multi-column PRIMARY KEY +do_execsql_test_in_memory_any_error fk-update-parent-multi-col-pk-2 { + PRAGMA foreign_keys=ON; + CREATE TABLE parent(a, b, PRIMARY KEY(a, b)); + CREATE TABLE child(ca, cb, FOREIGN KEY(ca, cb) REFERENCES parent(a, b)); + INSERT INTO parent VALUES(1, 2); + INSERT INTO child VALUES(1, 2); + UPDATE parent SET b = 3 WHERE a = 1 AND b = 2; +} diff --git a/tests/fuzz/mod.rs b/tests/fuzz/mod.rs index 122e58dce..9f06f20fc 100644 --- a/tests/fuzz/mod.rs +++ b/tests/fuzz/mod.rs @@ -679,30 +679,47 @@ mod fuzz_tests { limbo_exec_rows(&limbo_db, &limbo, &s); sqlite.execute(&s, params![]).unwrap(); + let get_constraint_type = |rng: &mut ChaCha8Rng| match rng.random_range(0..3) { + 0 => "INTEGER PRIMARY KEY", + 1 => "UNIQUE", + 2 => "PRIMARY KEY", + _ => unreachable!(), + }; + // Mix of immediate and deferred FK constraints - let s = log_and_exec("CREATE TABLE parent(id INTEGER PRIMARY KEY, a INT, b INT)"); + let s = log_and_exec(&format!( + "CREATE TABLE parent(id {}, a INT, b INT)", + get_constraint_type(&mut rng) + )); limbo_exec_rows(&limbo_db, &limbo, &s); sqlite.execute(&s, params![]).unwrap(); // Child with DEFERRABLE INITIALLY DEFERRED FK - let s = log_and_exec( - "CREATE TABLE child_deferred(id INTEGER PRIMARY KEY, pid INT, x INT, \ + let s = log_and_exec(&format!( + "CREATE TABLE child_deferred(id {}, pid INT, x INT, \ FOREIGN KEY(pid) REFERENCES parent(id) DEFERRABLE INITIALLY DEFERRED)", - ); + get_constraint_type(&mut rng) + )); limbo_exec_rows(&limbo_db, &limbo, &s); sqlite.execute(&s, params![]).unwrap(); // Child with immediate FK (default) - let s = log_and_exec( - "CREATE TABLE child_immediate(id INTEGER PRIMARY KEY, pid INT, y INT, \ + let s = log_and_exec(&format!( + "CREATE TABLE child_immediate(id {}, pid INT, y INT, \ FOREIGN KEY(pid) REFERENCES parent(id))", - ); + get_constraint_type(&mut rng) + )); limbo_exec_rows(&limbo_db, &limbo, &s); sqlite.execute(&s, params![]).unwrap(); + let composite_constraint = match rng.random_range(0..2) { + 0 => "PRIMARY KEY", + 1 => "UNIQUE", + _ => unreachable!(), + }; // Composite key parent for deferred testing let s = log_and_exec( - "CREATE TABLE parent_comp(a INT NOT NULL, b INT NOT NULL, c INT, PRIMARY KEY(a,b))", + &format!("CREATE TABLE parent_comp(a INT NOT NULL, b INT NOT NULL, c INT, {composite_constraint}(a,b))"), ); limbo_exec_rows(&limbo_db, &limbo, &s); sqlite.execute(&s, params![]).unwrap(); From ff83b8218a31260566217932bd9e00e8e18184ab Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 24 Oct 2025 09:37:39 +0300 Subject: [PATCH 418/428] docs: Add vector search section to database manual --- docs/manual.md | 162 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 162 insertions(+) diff --git a/docs/manual.md b/docs/manual.md index 8472c8696..638ebad69 100644 --- a/docs/manual.md +++ b/docs/manual.md @@ -40,6 +40,7 @@ Welcome to Turso database manual! - [WAL manipulation](#wal-manipulation) - [`libsql_wal_frame_count`](#libsql_wal_frame_count) - [Encryption](#encryption) + - [Vector search](#vector-search) - [CDC](#cdc-early-preview) - [Appendix A: Turso Internals](#appendix-a-turso-internals) - [Frontend](#frontend) @@ -609,6 +610,167 @@ $ cargo run -- --experimental-encryption \ ``` +## Vector search + +Turso supports vector search for building workloads such as semantic search, recommendation systems, and similarity matching. Vector embeddings can be stored and queried using specialized functions for distance calculations. + +### Vector types + +Turso supports both **dense** and **sparse** vector representations: + +#### Dense vectors + +Dense vectors store a value for every dimension. Turso provides two precision levels: + +* **Float32 dense vectors** (`vector32`): 32-bit floating-point values, suitable for most machine learning embeddings (e.g., OpenAI embeddings, sentence transformers). Uses 4 bytes per dimension. +* **Float64 dense vectors** (`vector64`): 64-bit floating-point values for applications requiring higher precision. Uses 8 bytes per dimension. + +Dense vectors are ideal for embeddings from neural networks where most dimensions contain non-zero values. + +#### Sparse vectors + +Sparse vectors only store non-zero values and their indices, making them memory-efficient for high-dimensional data with many zero values: + +* **Float32 sparse vectors** (`vector32_sparse`): Stores only non-zero 32-bit float values along with their dimension indices. + +Sparse vectors are ideal for TF-IDF representations, bag-of-words models, and other scenarios where most dimensions are zero. + +### Vector functions + +#### Creating and converting vectors + +**`vector32(value)`** + +Converts a text or blob value into a 32-bit dense vector. + +```sql +SELECT vector32('[1.0, 2.0, 3.0]'); +``` + +**`vector32_sparse(value)`** + +Converts a text or blob value into a 32-bit sparse vector. + +```sql +SELECT vector32_sparse('[0.0, 1.5, 0.0, 2.3, 0.0]'); +``` + +**`vector64(value)`** + +Converts a text or blob value into a 64-bit dense vector. + +```sql +SELECT vector64('[1.0, 2.0, 3.0]'); +``` + +**`vector_extract(blob)`** + +Extracts and displays a vector blob as human-readable text. + +```sql +SELECT vector_extract(embedding) FROM documents; +``` + +#### Distance functions + +Turso provides three distance metrics for measuring vector similarity: + +**`vector_distance_cos(v1, v2)`** + +Computes the cosine distance between two vectors. Returns a value between 0 (identical direction) and 2 (opposite direction). Cosine distance is computed as `1 - cosine_similarity`. + +Cosine distance is ideal for: +- Text embeddings where magnitude is less important than direction +- Comparing document similarity + +```sql +SELECT name, vector_distance_cos(embedding, vector32('[0.1, 0.5, 0.3]')) AS distance +FROM documents +ORDER BY distance +LIMIT 10; +``` + +**`vector_distance_l2(v1, v2)`** + +Computes the Euclidean (L2) distance between two vectors. Returns the straight-line distance in n-dimensional space. + +L2 distance is ideal for: +- Image embeddings where absolute differences matter +- Spatial data and geometric problems +- When embeddings are not normalized + +```sql +SELECT name, vector_distance_l2(embedding, vector32('[0.1, 0.5, 0.3]')) AS distance +FROM documents +ORDER BY distance +LIMIT 10; +``` + +**`vector_distance_jaccard(v1, v2)`** + +Computes the weighted Jaccard distance between two vectors, measuring dissimilarity based on the ratio of minimum to maximum values across dimensions. Note that this is different from the ordinary Jaccard distance, which is defined only for binary vectors. + +Weighted Jaccard distance is ideal for: +- Sparse vectors with many zero values +- Set-like comparisons +- TF-IDF and bag-of-words representations + +```sql +SELECT name, vector_distance_jaccard(sparse_embedding, vector32_sparse('[0.0, 1.0, 0.0, 2.0]')) AS distance +FROM documents +ORDER BY distance +LIMIT 10; +``` + +#### Utility functions + +**`vector_concat(v1, v2)`** + +Concatenates two vectors into a single vector. The resulting vector has dimensions equal to the sum of both input vectors. + +```sql +SELECT vector_concat(vector32('[1.0, 2.0]'), vector32('[3.0, 4.0]')); +-- Results in a 4-dimensional vector: [1.0, 2.0, 3.0, 4.0] +``` + +**`vector_slice(vector, start_index, end_index)`** + +Extracts a slice of a vector from `start_index` to `end_index` (exclusive). + +```sql +SELECT vector_slice(vector32('[1.0, 2.0, 3.0, 4.0, 5.0]'), 1, 4); +-- Results in: [2.0, 3.0, 4.0] +``` + +### Example: Semantic search + +Here's a complete example of building a semantic search system: + +```sql +-- Create a table for documents with embeddings +CREATE TABLE documents ( + id INTEGER PRIMARY KEY, + name TEXT, + content TEXT, + embedding BLOB +); + +-- Insert documents with precomputed embeddings +INSERT INTO documents (name, content, embedding) VALUES + ('Doc 1', 'Machine learning basics', vector32('[0.2, 0.5, 0.1, 0.8]')), + ('Doc 2', 'Database fundamentals', vector32('[0.1, 0.3, 0.9, 0.2]')), + ('Doc 3', 'Neural networks guide', vector32('[0.3, 0.6, 0.2, 0.7]')); + +-- Find documents similar to a query embedding +SELECT + name, + content, + vector_distance_cos(embedding, vector32('[0.25, 0.55, 0.15, 0.75]')) AS similarity +FROM documents +ORDER BY similarity +LIMIT 5; +``` + ## CDC (Early Preview) Turso supports [Change Data Capture](https://en.wikipedia.org/wiki/Change_data_capture), a powerful pattern for tracking and recording changes to your database in real-time. Instead of periodically scanning tables to find what changed, CDC automatically logs every insert, update, and delete as it happens per connection. From c3fb867173fba8d73e3d2df3bdf01a85de456231 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 23 Oct 2025 13:18:07 +0300 Subject: [PATCH 419/428] core: Switch RwLock> to ArcSwap We don't actually need the RwLock locking capabilities, just the ability to swap the instance. --- Cargo.lock | 7 ++ core/Cargo.toml | 1 + core/incremental/compiler.rs | 2 +- core/incremental/operator.rs | 2 +- core/incremental/project_operator.rs | 2 +- core/lib.rs | 63 ++++++------ core/mvcc/database/mod.rs | 4 +- core/mvcc/database/tests.rs | 100 +++++++++--------- core/mvcc/mod.rs | 10 +- core/mvcc/persistent_storage/logical_log.rs | 6 +- core/storage/btree.rs | 24 ++--- core/storage/wal.rs | 106 ++++++++++---------- core/vdbe/execute.rs | 6 +- 13 files changed, 171 insertions(+), 162 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 508d44cf8..e542d40f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -227,6 +227,12 @@ version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + [[package]] name = "arrayref" version = "0.3.9" @@ -4904,6 +4910,7 @@ dependencies = [ "aes", "aes-gcm", "antithesis_sdk", + "arc-swap", "bitflags 2.9.4", "built", "bytemuck", diff --git a/core/Cargo.toml b/core/Cargo.toml index 80bc57f57..5abead4c2 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -84,6 +84,7 @@ twox-hash = "2.1.1" intrusive-collections = "0.9.7" roaring = "0.11.2" simsimd = "6.5.3" +arc-swap = "1.7" [build-dependencies] chrono = { workspace = true, default-features = false } diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index a319bb9ad..b82147f0f 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -2580,7 +2580,7 @@ mod tests { let io: Arc = Arc::new(MemoryIO::new()); let db = Database::open_file(io.clone(), ":memory:", false, false).unwrap(); let conn = db.connect().unwrap(); - let pager = conn.pager.read().clone(); + let pager = conn.pager.load().clone(); let _ = pager.io.block(|| pager.allocate_page1()).unwrap(); diff --git a/core/incremental/operator.rs b/core/incremental/operator.rs index 613497e8d..fa72d678c 100644 --- a/core/incremental/operator.rs +++ b/core/incremental/operator.rs @@ -270,7 +270,7 @@ mod tests { let db = Database::open_file(io.clone(), ":memory:", false, false).unwrap(); let conn = db.connect().unwrap(); - let pager = conn.pager.read().clone(); + let pager = conn.pager.load().clone(); // Allocate page 1 first (database header) let _ = pager.io.block(|| pager.allocate_page1()); diff --git a/core/incremental/project_operator.rs b/core/incremental/project_operator.rs index b82a1a138..8103ac7ff 100644 --- a/core/incremental/project_operator.rs +++ b/core/incremental/project_operator.rs @@ -86,7 +86,7 @@ impl ProjectOperator { for col in &self.columns { // Use the internal connection's pager for expression evaluation - let internal_pager = self.internal_conn.pager.read().clone(); + let internal_pager = self.internal_conn.pager.load().clone(); // Execute the compiled expression (handles both columns and complex expressions) let result = col diff --git a/core/lib.rs b/core/lib.rs index c3149b64d..4e5eeea8d 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -51,6 +51,7 @@ use crate::vdbe::explain::{EXPLAIN_COLUMNS_TYPE, EXPLAIN_QUERY_PLAN_COLUMNS_TYPE use crate::vdbe::metrics::ConnectionMetrics; use crate::vtab::VirtualTable; use crate::{incremental::view::AllViewsTxState, translate::emitter::TransactionMode}; +use arc_swap::ArcSwap; use core::str; pub use error::{CompletionError, LimboError}; pub use io::clock::{Clock, Instant}; @@ -487,7 +488,7 @@ impl Database { let conn = db.connect()?; let syms = conn.syms.read(); - let pager = conn.pager.read().clone(); + let pager = conn.pager.load().clone(); if let Some(encryption_opts) = encryption_opts { conn.pragma_update("cipher", format!("'{}'", encryption_opts.cipher))?; @@ -559,7 +560,7 @@ impl Database { .get(); let conn = Arc::new(Connection { db: self.clone(), - pager: RwLock::new(pager), + pager: ArcSwap::new(pager), schema: RwLock::new(self.schema.lock().unwrap().clone()), database_schemas: RwLock::new(std::collections::HashMap::new()), auto_commit: AtomicBool::new(true), @@ -1061,7 +1062,7 @@ impl DatabaseCatalog { pub struct Connection { db: Arc, - pager: RwLock>, + pager: ArcSwap, schema: RwLock>, /// Per-database schema cache (database_index -> schema) /// Loaded lazily to avoid copying all schemas on connection open @@ -1161,7 +1162,7 @@ impl Connection { .unwrap() .trim(); self.maybe_update_schema(); - let pager = self.pager.read().clone(); + let pager = self.pager.load().clone(); let mode = QueryMode::new(&cmd); let (Cmd::Stmt(stmt) | Cmd::Explain(stmt) | Cmd::ExplainQueryPlan(stmt)) = cmd; let program = translate::translate( @@ -1192,7 +1193,7 @@ impl Connection { /// This function must be called outside of any transaction because internally it will start transaction session by itself #[allow(dead_code)] fn maybe_reparse_schema(self: &Arc) -> Result<()> { - let pager = self.pager.read().clone(); + let pager = self.pager.load().clone(); // first, quickly read schema_version from the root page in order to check if schema changed pager.begin_read_tx()?; @@ -1258,7 +1259,7 @@ impl Connection { } fn reparse_schema(self: &Arc) -> Result<()> { - let pager = self.pager.read().clone(); + let pager = self.pager.load().clone(); // read cookie before consuming statement program - otherwise we can end up reading cookie with closed transaction state let cookie = pager @@ -1315,7 +1316,7 @@ impl Connection { let mut parser = Parser::new(sql.as_bytes()); while let Some(cmd) = parser.next_cmd()? { let syms = self.syms.read(); - let pager = self.pager.read().clone(); + let pager = self.pager.load().clone(); let byte_offset_end = parser.offset(); let input = str::from_utf8(&sql.as_bytes()[..byte_offset_end]) .unwrap() @@ -1367,7 +1368,7 @@ impl Connection { return Err(LimboError::InternalError("Connection closed".to_string())); } let syms = self.syms.read(); - let pager = self.pager.read().clone(); + let pager = self.pager.load().clone(); let mode = QueryMode::new(&cmd); let (Cmd::Stmt(stmt) | Cmd::Explain(stmt) | Cmd::ExplainQueryPlan(stmt)) = cmd; let program = translate::translate( @@ -1399,7 +1400,7 @@ impl Connection { let mut parser = Parser::new(sql.as_bytes()); while let Some(cmd) = parser.next_cmd()? { let syms = self.syms.read(); - let pager = self.pager.read().clone(); + let pager = self.pager.load().clone(); let byte_offset_end = parser.offset(); let input = str::from_utf8(&sql.as_bytes()[..byte_offset_end]) .unwrap() @@ -1428,7 +1429,7 @@ impl Connection { return Ok(None); }; let syms = self.syms.read(); - let pager = self.pager.read().clone(); + let pager = self.pager.load().clone(); let byte_offset_end = parser.offset(); let input = str::from_utf8(&sql.as_bytes()[..byte_offset_end]) .unwrap() @@ -1518,7 +1519,7 @@ impl Connection { if let Some(encryption_opts) = encryption_opts { let _ = conn.pragma_update("cipher", encryption_opts.cipher.to_string()); let _ = conn.pragma_update("hexkey", encryption_opts.hexkey.to_string()); - let pager = conn.pager.read(); + let pager = conn.pager.load(); if db.db_state.get().is_initialized() { // Clear page cache so the header page can be reread from disk and decrypted using the encryption context. pager.clear_page_cache(false); @@ -1572,7 +1573,7 @@ impl Connection { /// Read schema version at current transaction #[cfg(all(feature = "fs", feature = "conn_raw_api"))] pub fn read_schema_version(&self) -> Result { - let pager = self.pager.read(); + let pager = self.pager.load(); pager .io .block(|| pager.with_header(|header| header.schema_cookie)) @@ -1590,7 +1591,7 @@ impl Connection { "write_schema_version must be called from within Write transaction".to_string(), )); }; - let pager = self.pager.read(); + let pager = self.pager.load(); pager.io.block(|| { pager.with_header_mut(|header| { turso_assert!( @@ -1617,7 +1618,7 @@ impl Connection { page: &mut [u8], frame_watermark: Option, ) -> Result { - let pager = self.pager.read(); + let pager = self.pager.load(); let (page_ref, c) = match pager.read_page_no_cache(page_idx as i64, frame_watermark, true) { Ok(result) => result, // on windows, zero read will trigger UnexpectedEof @@ -1643,19 +1644,19 @@ impl Connection { /// (so, if concurrent connection wrote something to the WAL - this method will not see this change) #[cfg(all(feature = "fs", feature = "conn_raw_api"))] pub fn wal_changed_pages_after(&self, frame_watermark: u64) -> Result> { - self.pager.read().wal_changed_pages_after(frame_watermark) + self.pager.load().wal_changed_pages_after(frame_watermark) } #[cfg(all(feature = "fs", feature = "conn_raw_api"))] pub fn wal_state(&self) -> Result { - self.pager.read().wal_state() + self.pager.load().wal_state() } #[cfg(all(feature = "fs", feature = "conn_raw_api"))] pub fn wal_get_frame(&self, frame_no: u64, frame: &mut [u8]) -> Result { use crate::storage::sqlite3_ondisk::parse_wal_frame_header; - let c = self.pager.read().wal_get_frame(frame_no, frame)?; + let c = self.pager.load().wal_get_frame(frame_no, frame)?; self.db.io.wait_for_completion(c)?; let (header, _) = parse_wal_frame_header(frame); Ok(WalFrameInfo { @@ -1669,13 +1670,13 @@ impl Connection { /// If attempt to write frame at the position `frame_no` will create gap in the WAL - method will return error #[cfg(all(feature = "fs", feature = "conn_raw_api"))] pub fn wal_insert_frame(&self, frame_no: u64, frame: &[u8]) -> Result { - self.pager.read().wal_insert_frame(frame_no, frame) + self.pager.load().wal_insert_frame(frame_no, frame) } /// Start WAL session by initiating read+write transaction for this connection #[cfg(all(feature = "fs", feature = "conn_raw_api"))] pub fn wal_insert_begin(&self) -> Result<()> { - let pager = self.pager.read(); + let pager = self.pager.load(); pager.begin_read_tx()?; pager.io.block(|| pager.begin_write_tx()).inspect_err(|_| { pager.end_read_tx(); @@ -1695,7 +1696,7 @@ impl Connection { #[cfg(all(feature = "fs", feature = "conn_raw_api"))] pub fn wal_insert_end(self: &Arc, force_commit: bool) -> Result<()> { { - let pager = self.pager.read(); + let pager = self.pager.load(); let Some(wal) = pager.wal.as_ref() else { return Err(LimboError::InternalError( @@ -1745,14 +1746,14 @@ impl Connection { if self.is_closed() { return Err(LimboError::InternalError("Connection closed".to_string())); } - self.pager.read().cacheflush() + self.pager.load().cacheflush() } pub fn checkpoint(&self, mode: CheckpointMode) -> Result { if self.is_closed() { return Err(LimboError::InternalError("Connection closed".to_string())); } - self.pager.read().wal_checkpoint(mode) + self.pager.load().wal_checkpoint(mode) } /// Close a connection and checkpoint. @@ -1768,7 +1769,7 @@ impl Connection { } _ => { if !self.mvcc_enabled() { - let pager = self.pager.read(); + let pager = self.pager.load(); pager.rollback_tx(self); } self.set_tx_state(TransactionState::None); @@ -1782,7 +1783,7 @@ impl Connection { .eq(&1) { self.pager - .read() + .load() .checkpoint_shutdown(self.is_wal_auto_checkpoint_disabled())?; }; Ok(()) @@ -1891,11 +1892,11 @@ impl Connection { shared_wal.enabled.store(false, Ordering::SeqCst); shared_wal.file = None; } - self.pager.write().clear_page_cache(false); + self.pager.load().clear_page_cache(false); let pager = self.db.init_pager(Some(size.get() as usize))?; pager.enable_encryption(self.db.opts.enable_encryption); - *self.pager.write() = Arc::new(pager); - self.pager.read().set_initial_page_size(size); + self.pager.store(Arc::new(pager)); + self.pager.load().set_initial_page_size(size); Ok(()) } @@ -2034,7 +2035,7 @@ impl Connection { fn get_pager_from_database_index(&self, index: &usize) -> Arc { if *index < 2 { - self.pager.read().clone() + self.pager.load().clone() } else { self.attached_databases.read().get_pager_by_index(index) } @@ -2247,7 +2248,7 @@ impl Connection { } pub fn get_pager(&self) -> Arc { - self.pager.read().clone() + self.pager.load().clone() } pub fn get_query_only(&self) -> bool { @@ -2294,7 +2295,7 @@ impl Connection { } pub fn set_reserved_bytes(&self, reserved_bytes: u8) -> Result<()> { - let pager = self.pager.read(); + let pager = self.pager.load(); pager.set_reserved_space_bytes(reserved_bytes); Ok(()) } @@ -2317,7 +2318,7 @@ impl Connection { return Ok(()); }; tracing::trace!("setting encryption ctx for connection"); - let pager = self.pager.read(); + let pager = self.pager.load(); if pager.is_encryption_ctx_set() { return Err(LimboError::InvalidArgument( "cannot reset encryption attributes if already set in the session".to_string(), diff --git a/core/mvcc/database/mod.rs b/core/mvcc/database/mod.rs index 9675ead3c..0057e56d8 100644 --- a/core/mvcc/database/mod.rs +++ b/core/mvcc/database/mod.rs @@ -404,7 +404,7 @@ impl CommitStateMachine { commit_coordinator: Arc, header: Arc>>, ) -> Self { - let pager = connection.pager.read().clone(); + let pager = connection.pager.load().clone(); Self { state, is_finalized: false, @@ -1047,7 +1047,7 @@ impl MvStore { self.insert_table_id_to_rootpage(root_page_as_table_id, Some(*root_page)); } - if !self.maybe_recover_logical_log(bootstrap_conn.pager.read().clone())? { + if !self.maybe_recover_logical_log(bootstrap_conn.pager.load().clone())? { // There was no logical log to recover, so we're done. return Ok(()); } diff --git a/core/mvcc/database/tests.rs b/core/mvcc/database/tests.rs index 206636f02..0af15e370 100644 --- a/core/mvcc/database/tests.rs +++ b/core/mvcc/database/tests.rs @@ -125,7 +125,7 @@ fn test_insert_read() { let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let tx1_row = generate_simple_string_row((-2).into(), 1, "Hello"); db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap(); @@ -145,7 +145,7 @@ fn test_insert_read() { let tx2 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let row = db .mvcc_store @@ -166,7 +166,7 @@ fn test_read_nonexistent() { let db = MvccTestDb::new(); let tx = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let row = db.mvcc_store.read( tx, @@ -184,7 +184,7 @@ fn test_delete() { let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let tx1_row = generate_simple_string_row((-2).into(), 1, "Hello"); db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap(); @@ -224,7 +224,7 @@ fn test_delete() { let tx2 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let row = db .mvcc_store @@ -244,7 +244,7 @@ fn test_delete_nonexistent() { let db = MvccTestDb::new(); let tx = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); assert!(!db .mvcc_store @@ -263,7 +263,7 @@ fn test_commit() { let db = MvccTestDb::new(); let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let tx1_row = generate_simple_string_row((-2).into(), 1, "Hello"); db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap(); @@ -297,7 +297,7 @@ fn test_commit() { let tx2 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let row = db .mvcc_store @@ -320,7 +320,7 @@ fn test_rollback() { let db = MvccTestDb::new(); let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let row1 = generate_simple_string_row((-2).into(), 1, "Hello"); db.mvcc_store.insert(tx1, row1.clone()).unwrap(); @@ -351,10 +351,10 @@ fn test_rollback() { .unwrap(); assert_eq!(row3, row4); db.mvcc_store - .rollback_tx(tx1, db.conn.pager.read().clone(), &db.conn); + .rollback_tx(tx1, db.conn.pager.load().clone(), &db.conn); let tx2 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let row5 = db .mvcc_store @@ -376,7 +376,7 @@ fn test_dirty_write() { // T1 inserts a row with ID 1, but does not commit. let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let tx1_row = generate_simple_string_row((-2).into(), 1, "Hello"); db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap(); @@ -395,7 +395,7 @@ fn test_dirty_write() { let conn2 = db.db.connect().unwrap(); // T2 attempts to delete row with ID 1, but fails because T1 has not committed. - let tx2 = db.mvcc_store.begin_tx(conn2.pager.read().clone()).unwrap(); + let tx2 = db.mvcc_store.begin_tx(conn2.pager.load().clone()).unwrap(); let tx2_row = generate_simple_string_row((-2).into(), 1, "World"); assert!(!db.mvcc_store.update(tx2, tx2_row).unwrap()); @@ -420,14 +420,14 @@ fn test_dirty_read() { // T1 inserts a row with ID 1, but does not commit. let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let row1 = generate_simple_string_row((-2).into(), 1, "Hello"); db.mvcc_store.insert(tx1, row1).unwrap(); // T2 attempts to read row with ID 1, but doesn't see one because T1 has not committed. let conn2 = db.db.connect().unwrap(); - let tx2 = db.mvcc_store.begin_tx(conn2.pager.read().clone()).unwrap(); + let tx2 = db.mvcc_store.begin_tx(conn2.pager.load().clone()).unwrap(); let row2 = db .mvcc_store .read( @@ -448,7 +448,7 @@ fn test_dirty_read_deleted() { // T1 inserts a row with ID 1 and commits. let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let tx1_row = generate_simple_string_row((-2).into(), 1, "Hello"); db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap(); @@ -456,7 +456,7 @@ fn test_dirty_read_deleted() { // T2 deletes row with ID 1, but does not commit. let conn2 = db.db.connect().unwrap(); - let tx2 = db.mvcc_store.begin_tx(conn2.pager.read().clone()).unwrap(); + let tx2 = db.mvcc_store.begin_tx(conn2.pager.load().clone()).unwrap(); assert!(db .mvcc_store .delete( @@ -470,7 +470,7 @@ fn test_dirty_read_deleted() { // T3 reads row with ID 1, but doesn't see the delete because T2 hasn't committed. let conn3 = db.db.connect().unwrap(); - let tx3 = db.mvcc_store.begin_tx(conn3.pager.read().clone()).unwrap(); + let tx3 = db.mvcc_store.begin_tx(conn3.pager.load().clone()).unwrap(); let row = db .mvcc_store .read( @@ -492,7 +492,7 @@ fn test_fuzzy_read() { // T1 inserts a row with ID 1 and commits. let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let tx1_row = generate_simple_string_row((-2).into(), 1, "First"); db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap(); @@ -512,7 +512,7 @@ fn test_fuzzy_read() { // T2 reads the row with ID 1 within an active transaction. let conn2 = db.db.connect().unwrap(); - let tx2 = db.mvcc_store.begin_tx(conn2.pager.read().clone()).unwrap(); + let tx2 = db.mvcc_store.begin_tx(conn2.pager.load().clone()).unwrap(); let row = db .mvcc_store .read( @@ -528,7 +528,7 @@ fn test_fuzzy_read() { // T3 updates the row and commits. let conn3 = db.db.connect().unwrap(); - let tx3 = db.mvcc_store.begin_tx(conn3.pager.read().clone()).unwrap(); + let tx3 = db.mvcc_store.begin_tx(conn3.pager.load().clone()).unwrap(); let tx3_row = generate_simple_string_row((-2).into(), 1, "Second"); db.mvcc_store.update(tx3, tx3_row).unwrap(); commit_tx(db.mvcc_store.clone(), &conn3, tx3).unwrap(); @@ -561,7 +561,7 @@ fn test_lost_update() { // T1 inserts a row with ID 1 and commits. let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let tx1_row = generate_simple_string_row((-2).into(), 1, "Hello"); db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap(); @@ -581,13 +581,13 @@ fn test_lost_update() { // T2 attempts to update row ID 1 within an active transaction. let conn2 = db.db.connect().unwrap(); - let tx2 = db.mvcc_store.begin_tx(conn2.pager.read().clone()).unwrap(); + let tx2 = db.mvcc_store.begin_tx(conn2.pager.load().clone()).unwrap(); let tx2_row = generate_simple_string_row((-2).into(), 1, "World"); assert!(db.mvcc_store.update(tx2, tx2_row.clone()).unwrap()); // T3 also attempts to update row ID 1 within an active transaction. let conn3 = db.db.connect().unwrap(); - let tx3 = db.mvcc_store.begin_tx(conn3.pager.read().clone()).unwrap(); + let tx3 = db.mvcc_store.begin_tx(conn3.pager.load().clone()).unwrap(); let tx3_row = generate_simple_string_row((-2).into(), 1, "Hello, world!"); assert!(matches!( db.mvcc_store.update(tx3, tx3_row), @@ -595,7 +595,7 @@ fn test_lost_update() { )); // hack: in the actual tursodb database we rollback the mvcc tx ourselves, so manually roll it back here db.mvcc_store - .rollback_tx(tx3, conn3.pager.read().clone(), &conn3); + .rollback_tx(tx3, conn3.pager.load().clone(), &conn3); commit_tx(db.mvcc_store.clone(), &conn2, tx2).unwrap(); assert!(matches!( @@ -604,7 +604,7 @@ fn test_lost_update() { )); let conn4 = db.db.connect().unwrap(); - let tx4 = db.mvcc_store.begin_tx(conn4.pager.read().clone()).unwrap(); + let tx4 = db.mvcc_store.begin_tx(conn4.pager.load().clone()).unwrap(); let row = db .mvcc_store .read( @@ -628,7 +628,7 @@ fn test_committed_visibility() { // let's add $10 to my account since I like money let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let tx1_row = generate_simple_string_row((-2).into(), 1, "10"); db.mvcc_store.insert(tx1, tx1_row.clone()).unwrap(); @@ -636,7 +636,7 @@ fn test_committed_visibility() { // but I like more money, so let me try adding $10 more let conn2 = db.db.connect().unwrap(); - let tx2 = db.mvcc_store.begin_tx(conn2.pager.read().clone()).unwrap(); + let tx2 = db.mvcc_store.begin_tx(conn2.pager.load().clone()).unwrap(); let tx2_row = generate_simple_string_row((-2).into(), 1, "20"); assert!(db.mvcc_store.update(tx2, tx2_row.clone()).unwrap()); let row = db @@ -654,7 +654,7 @@ fn test_committed_visibility() { // can I check how much money I have? let conn3 = db.db.connect().unwrap(); - let tx3 = db.mvcc_store.begin_tx(conn3.pager.read().clone()).unwrap(); + let tx3 = db.mvcc_store.begin_tx(conn3.pager.load().clone()).unwrap(); let row = db .mvcc_store .read( @@ -676,11 +676,11 @@ fn test_future_row() { let tx1 = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let conn2 = db.db.connect().unwrap(); - let tx2 = db.mvcc_store.begin_tx(conn2.pager.read().clone()).unwrap(); + let tx2 = db.mvcc_store.begin_tx(conn2.pager.load().clone()).unwrap(); let tx2_row = generate_simple_string_row((-2).into(), 1, "Hello"); db.mvcc_store.insert(tx2, tx2_row).unwrap(); @@ -726,7 +726,7 @@ fn setup_test_db() -> (MvccTestDb, u64) { let db = MvccTestDb::new(); let tx_id = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let table_id = MVTableId::new(-1); @@ -749,7 +749,7 @@ fn setup_test_db() -> (MvccTestDb, u64) { let tx_id = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); (db, tx_id) } @@ -758,7 +758,7 @@ fn setup_lazy_db(initial_keys: &[i64]) -> (MvccTestDb, u64) { let db = MvccTestDb::new(); let tx_id = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let table_id = -1; @@ -774,7 +774,7 @@ fn setup_lazy_db(initial_keys: &[i64]) -> (MvccTestDb, u64) { let tx_id = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); (db, tx_id) } @@ -829,8 +829,8 @@ fn test_lazy_scan_cursor_basic() { db.mvcc_store.clone(), tx_id, table_id, - db.conn.pager.read().clone(), - Box::new(BTreeCursor::new(db.conn.pager.read().clone(), table_id, 1)), + db.conn.pager.load().clone(), + Box::new(BTreeCursor::new(db.conn.pager.load().clone(), table_id, 1)), ) .unwrap(); @@ -872,8 +872,8 @@ fn test_lazy_scan_cursor_with_gaps() { db.mvcc_store.clone(), tx_id, table_id, - db.conn.pager.read().clone(), - Box::new(BTreeCursor::new(db.conn.pager.read().clone(), table_id, 1)), + db.conn.pager.load().clone(), + Box::new(BTreeCursor::new(db.conn.pager.load().clone(), table_id, 1)), ) .unwrap(); @@ -924,8 +924,8 @@ fn test_cursor_basic() { db.mvcc_store.clone(), tx_id, table_id, - db.conn.pager.read().clone(), - Box::new(BTreeCursor::new(db.conn.pager.read().clone(), table_id, 1)), + db.conn.pager.load().clone(), + Box::new(BTreeCursor::new(db.conn.pager.load().clone(), table_id, 1)), ) .unwrap(); @@ -964,13 +964,13 @@ fn test_cursor_with_empty_table() { let db = MvccTestDb::new(); { // FIXME: force page 1 initialization - let pager = db.conn.pager.read().clone(); + let pager = db.conn.pager.load().clone(); let tx_id = db.mvcc_store.begin_tx(pager.clone()).unwrap(); commit_tx(db.mvcc_store.clone(), &db.conn, tx_id).unwrap(); } let tx_id = db .mvcc_store - .begin_tx(db.conn.pager.read().clone()) + .begin_tx(db.conn.pager.load().clone()) .unwrap(); let table_id = -1; // Empty table @@ -979,8 +979,8 @@ fn test_cursor_with_empty_table() { db.mvcc_store.clone(), tx_id, table_id, - db.conn.pager.read().clone(), - Box::new(BTreeCursor::new(db.conn.pager.read().clone(), table_id, 1)), + db.conn.pager.load().clone(), + Box::new(BTreeCursor::new(db.conn.pager.load().clone(), table_id, 1)), ) .unwrap(); assert!(cursor.is_empty()); @@ -997,8 +997,8 @@ fn test_cursor_modification_during_scan() { db.mvcc_store.clone(), tx_id, table_id, - db.conn.pager.read().clone(), - Box::new(BTreeCursor::new(db.conn.pager.read().clone(), table_id, 1)), + db.conn.pager.load().clone(), + Box::new(BTreeCursor::new(db.conn.pager.load().clone(), table_id, 1)), ) .unwrap(); @@ -1202,7 +1202,7 @@ fn test_restart() { { let conn = db.connect(); let mvcc_store = db.get_mvcc_store(); - let tx_id = mvcc_store.begin_tx(conn.pager.read().clone()).unwrap(); + let tx_id = mvcc_store.begin_tx(conn.pager.load().clone()).unwrap(); // insert table id -2 into sqlite_schema table (table_id -1) let data = ImmutableRecord::from_values( &[ @@ -1240,13 +1240,13 @@ fn test_restart() { { let conn = db.connect(); let mvcc_store = db.get_mvcc_store(); - let tx_id = mvcc_store.begin_tx(conn.pager.read().clone()).unwrap(); + let tx_id = mvcc_store.begin_tx(conn.pager.load().clone()).unwrap(); let row = generate_simple_string_row((-2).into(), 2, "bar"); mvcc_store.insert(tx_id, row).unwrap(); commit_tx(mvcc_store.clone(), &conn, tx_id).unwrap(); - let tx_id = mvcc_store.begin_tx(conn.pager.read().clone()).unwrap(); + let tx_id = mvcc_store.begin_tx(conn.pager.load().clone()).unwrap(); let row = mvcc_store .read(tx_id, RowID::new((-2).into(), 2)) .unwrap() diff --git a/core/mvcc/mod.rs b/core/mvcc/mod.rs index ba96b4c3e..319215448 100644 --- a/core/mvcc/mod.rs +++ b/core/mvcc/mod.rs @@ -65,7 +65,7 @@ mod tests { let conn = db.get_db().connect().unwrap(); let mvcc_store = db.get_db().mv_store.as_ref().unwrap().clone(); for _ in 0..iterations { - let tx = mvcc_store.begin_tx(conn.pager.read().clone()).unwrap(); + let tx = mvcc_store.begin_tx(conn.pager.load().clone()).unwrap(); let id = IDS.fetch_add(1, Ordering::SeqCst); let id = RowID { table_id: (-2).into(), @@ -74,7 +74,7 @@ mod tests { let row = generate_simple_string_row((-2).into(), id.row_id, "Hello"); mvcc_store.insert(tx, row.clone()).unwrap(); commit_tx_no_conn(&db, tx, &conn).unwrap(); - let tx = mvcc_store.begin_tx(conn.pager.read().clone()).unwrap(); + let tx = mvcc_store.begin_tx(conn.pager.load().clone()).unwrap(); let committed_row = mvcc_store.read(tx, id).unwrap(); commit_tx_no_conn(&db, tx, &conn).unwrap(); assert_eq!(committed_row, Some(row)); @@ -86,7 +86,7 @@ mod tests { let conn = db.get_db().connect().unwrap(); let mvcc_store = db.get_db().mv_store.as_ref().unwrap().clone(); for _ in 0..iterations { - let tx = mvcc_store.begin_tx(conn.pager.read().clone()).unwrap(); + let tx = mvcc_store.begin_tx(conn.pager.load().clone()).unwrap(); let id = IDS.fetch_add(1, Ordering::SeqCst); let id = RowID { table_id: (-2).into(), @@ -95,7 +95,7 @@ mod tests { let row = generate_simple_string_row((-2).into(), id.row_id, "World"); mvcc_store.insert(tx, row.clone()).unwrap(); commit_tx_no_conn(&db, tx, &conn).unwrap(); - let tx = mvcc_store.begin_tx(conn.pager.read().clone()).unwrap(); + let tx = mvcc_store.begin_tx(conn.pager.load().clone()).unwrap(); let committed_row = mvcc_store.read(tx, id).unwrap(); commit_tx_no_conn(&db, tx, &conn).unwrap(); assert_eq!(committed_row, Some(row)); @@ -127,7 +127,7 @@ mod tests { let dropped = mvcc_store.drop_unused_row_versions(); tracing::debug!("garbage collected {dropped} versions"); } - let tx = mvcc_store.begin_tx(conn.pager.read().clone()).unwrap(); + let tx = mvcc_store.begin_tx(conn.pager.load().clone()).unwrap(); let id = i % 16; let id = RowID { table_id: (-2).into(), diff --git a/core/mvcc/persistent_storage/logical_log.rs b/core/mvcc/persistent_storage/logical_log.rs index 48b97b2e3..db8a33317 100644 --- a/core/mvcc/persistent_storage/logical_log.rs +++ b/core/mvcc/persistent_storage/logical_log.rs @@ -513,7 +513,7 @@ mod tests { let db = MvccTestDbNoConn::new_with_random_db(); let (io, pager) = { let conn = db.connect(); - let pager = conn.pager.read().clone(); + let pager = conn.pager.load().clone(); let mvcc_store = db.get_mvcc_store(); let tx_id = mvcc_store.begin_tx(pager.clone()).unwrap(); // insert table id -2 into sqlite_schema table (table_id -1) @@ -580,7 +580,7 @@ mod tests { let db = MvccTestDbNoConn::new_with_random_db(); let (io, pager) = { let conn = db.connect(); - let pager = conn.pager.read().clone(); + let pager = conn.pager.load().clone(); let mvcc_store = db.get_mvcc_store(); let tx_id = mvcc_store.begin_tx(pager.clone()).unwrap(); @@ -691,7 +691,7 @@ mod tests { let mut db = MvccTestDbNoConn::new_with_random_db(); let pager = { let conn = db.connect(); - let pager = conn.pager.read().clone(); + let pager = conn.pager.load().clone(); let mvcc_store = db.get_mvcc_store(); // insert table id -2 into sqlite_schema table (table_id -1) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 0c5279608..7163ab519 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -7864,11 +7864,11 @@ mod tests { pos, &record, 4096, - conn.pager.read().clone(), + conn.pager.load().clone(), &mut fill_cell_payload_state, ) }, - &conn.pager.read().clone(), + &conn.pager.load().clone(), ) .unwrap(); insert_into_cell(page.get_contents(), &payload, pos, 4096).unwrap(); @@ -8137,7 +8137,7 @@ mod tests { let io: Arc = Arc::new(MemoryIO::new()); let db = Database::open_file(io.clone(), ":memory:", false, false).unwrap(); let conn = db.connect().unwrap(); - let pager = conn.pager.read().clone(); + let pager = conn.pager.load().clone(); // FIXME: handle page cache is full @@ -8157,7 +8157,7 @@ mod tests { let io: Arc = Arc::new(MemoryIO::new()); let db = Database::open_file(io.clone(), ":memory:", false, false).unwrap(); let conn = db.connect().unwrap(); - let pager = conn.pager.read().clone(); + let pager = conn.pager.load().clone(); let mut cursor = BTreeCursor::new(pager, 1, 5); let result = cursor.rewind()?; @@ -9626,11 +9626,11 @@ mod tests { cell_idx, &record, 4096, - conn.pager.read().clone(), + conn.pager.load().clone(), &mut fill_cell_payload_state, ) }, - &conn.pager.read().clone(), + &conn.pager.load().clone(), ) .unwrap(); if (free as usize) < payload.len() + 2 { @@ -9708,11 +9708,11 @@ mod tests { cell_idx, &record, 4096, - conn.pager.read().clone(), + conn.pager.load().clone(), &mut fill_cell_payload_state, ) }, - &conn.pager.read().clone(), + &conn.pager.load().clone(), ) .unwrap(); if (free as usize) < payload.len() - 2 { @@ -10081,11 +10081,11 @@ mod tests { 0, &record, 4096, - conn.pager.read().clone(), + conn.pager.load().clone(), &mut fill_cell_payload_state, ) }, - &conn.pager.read().clone(), + &conn.pager.load().clone(), ) .unwrap(); @@ -10167,11 +10167,11 @@ mod tests { 0, &record, 4096, - conn.pager.read().clone(), + conn.pager.load().clone(), &mut fill_cell_payload_state, ) }, - &conn.pager.read().clone(), + &conn.pager.load().clone(), ) .unwrap(); insert_into_cell(page.get_contents(), &payload, 0, 4096).unwrap(); diff --git a/core/storage/wal.rs b/core/storage/wal.rs index 8a7ed81a6..da1de6f66 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -2560,7 +2560,7 @@ pub mod test { for _i in 0..25 { let _ = conn.execute("insert into test (value) values (randomblob(1024)), (randomblob(1024)), (randomblob(1024))"); } - let pager = conn.pager.write(); + let pager = conn.pager.load(); let _ = pager.cacheflush(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); @@ -2651,7 +2651,7 @@ pub mod test { conn.execute("create table test(id integer primary key, value text)") .unwrap(); bulk_inserts(&conn, 20, 3); - let completions = conn.pager.write().cacheflush().unwrap(); + let completions = conn.pager.load().cacheflush().unwrap(); for c in completions { db.io.wait_for_completion(c).unwrap(); } @@ -2677,7 +2677,7 @@ pub mod test { // Run a RESTART checkpoint, should backfill everything and reset WAL counters, // but NOT truncate the file. { - let pager = conn.pager.read(); + let pager = conn.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); let res = run_checkpoint_until_done(&mut *wal, &pager, CheckpointMode::Restart); assert_eq!(res.num_attempted, mx_before); @@ -2723,7 +2723,7 @@ pub mod test { conn.execute("insert into test(value) values ('post_restart')") .unwrap(); conn.pager - .write() + .load() .wal .as_ref() .unwrap() @@ -2746,14 +2746,14 @@ pub mod test { .execute("create table test(id integer primary key, value text)") .unwrap(); bulk_inserts(&conn1.clone(), 15, 2); - let completions = conn1.pager.write().cacheflush().unwrap(); + let completions = conn1.pager.load().cacheflush().unwrap(); for c in completions { db.io.wait_for_completion(c).unwrap(); } // Force a read transaction that will freeze a lower read mark let readmark = { - let pager = conn2.pager.write(); + let pager = conn2.pager.load(); let mut wal2 = pager.wal.as_ref().unwrap().borrow_mut(); wal2.begin_read_tx().unwrap(); wal2.get_max_frame() @@ -2761,14 +2761,14 @@ pub mod test { // generate more frames that the reader will not see. bulk_inserts(&conn1.clone(), 15, 2); - let completions = conn1.pager.write().cacheflush().unwrap(); + let completions = conn1.pager.load().cacheflush().unwrap(); for c in completions { db.io.wait_for_completion(c).unwrap(); } // Run passive checkpoint, expect partial let (res1, max_before) = { - let pager = conn1.pager.read(); + let pager = conn1.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); let res = run_checkpoint_until_done( &mut *wal, @@ -2793,13 +2793,13 @@ pub mod test { ); // Release reader { - let pager = conn2.pager.write(); + let pager = conn2.pager.load(); let wal2 = pager.wal.as_ref().unwrap().borrow_mut(); wal2.end_read_tx(); } // Second passive checkpoint should finish - let pager = conn1.pager.read(); + let pager = conn1.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); let res2 = run_checkpoint_until_done( &mut *wal, @@ -2823,7 +2823,7 @@ pub mod test { // Start a read transaction conn2 .pager - .write() + .load() .wal .as_ref() .unwrap() @@ -2833,7 +2833,7 @@ pub mod test { // checkpoint should succeed here because the wal is fully checkpointed (empty) // so the reader is using readmark0 to read directly from the db file. - let p = conn1.pager.read(); + let p = conn1.pager.load(); let mut w = p.wal.as_ref().unwrap().borrow_mut(); loop { match w.checkpoint(&p, CheckpointMode::Restart) { @@ -2850,7 +2850,7 @@ pub mod test { } } drop(w); - conn2.pager.write().end_read_tx(); + conn2.pager.load().end_read_tx(); conn1 .execute("create table test(id integer primary key, value text)") @@ -2861,8 +2861,8 @@ pub mod test { .unwrap(); } // now that we have some frames to checkpoint, try again - conn2.pager.write().begin_read_tx().unwrap(); - let p = conn1.pager.read(); + conn2.pager.load().begin_read_tx().unwrap(); + let p = conn1.pager.load(); let mut w = p.wal.as_ref().unwrap().borrow_mut(); loop { match w.checkpoint(&p, CheckpointMode::Restart) { @@ -2894,7 +2894,7 @@ pub mod test { bulk_inserts(&conn, 10, 5); // Checkpoint with restart { - let pager = conn.pager.read(); + let pager = conn.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); let result = run_checkpoint_until_done(&mut *wal, &pager, CheckpointMode::Restart); assert!(result.everything_backfilled()); @@ -2935,7 +2935,7 @@ pub mod test { // R1 starts reading let r1_max_frame = { - let pager = conn_r1.pager.write(); + let pager = conn_r1.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); wal.begin_read_tx().unwrap(); wal.get_max_frame() @@ -2944,7 +2944,7 @@ pub mod test { // R2 starts reading, sees more frames than R1 let r2_max_frame = { - let pager = conn_r2.pager.write(); + let pager = conn_r2.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); wal.begin_read_tx().unwrap(); wal.get_max_frame() @@ -2952,7 +2952,7 @@ pub mod test { // try passive checkpoint, should only checkpoint up to R1's position let checkpoint_result = { - let pager = conn_writer.pager.read(); + let pager = conn_writer.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); run_checkpoint_until_done( &mut *wal, @@ -2976,7 +2976,7 @@ pub mod test { assert_eq!( conn_r2 .pager - .read() + .load() .wal .as_ref() .unwrap() @@ -3001,7 +3001,7 @@ pub mod test { let max_frame_before = wal_shared.read().max_frame.load(Ordering::SeqCst); { - let pager = conn.pager.read(); + let pager = conn.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); let _result = run_checkpoint_until_done( &mut *wal, @@ -3034,7 +3034,7 @@ pub mod test { // start a write transaction { - let pager = conn2.pager.write(); + let pager = conn2.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); let _ = wal.begin_read_tx().unwrap(); wal.begin_write_tx().unwrap(); @@ -3042,7 +3042,7 @@ pub mod test { // should fail because writer lock is held let result = { - let pager = conn1.pager.read(); + let pager = conn1.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); wal.checkpoint(&pager, CheckpointMode::Restart) }; @@ -3054,7 +3054,7 @@ pub mod test { conn2 .pager - .read() + .load() .wal .as_ref() .unwrap() @@ -3063,7 +3063,7 @@ pub mod test { // release write lock conn2 .pager - .read() + .load() .wal .as_ref() .unwrap() @@ -3072,7 +3072,7 @@ pub mod test { // now restart should succeed let result = { - let pager = conn1.pager.read(); + let pager = conn1.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); run_checkpoint_until_done(&mut *wal, &pager, CheckpointMode::Restart) }; @@ -3090,13 +3090,13 @@ pub mod test { .unwrap(); // Attempt to start a write transaction without a read transaction - let pager = conn.pager.read(); + let pager = conn.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); let _ = wal.begin_write_tx(); } fn check_read_lock_slot(conn: &Arc, expected_slot: usize) -> bool { - let pager = conn.pager.read(); + let pager = conn.pager.load(); let wal = pager.wal.as_ref().unwrap().borrow(); #[cfg(debug_assertions)] { @@ -3124,7 +3124,7 @@ pub mod test { stmt.step().unwrap(); let frame = conn .pager - .read() + .load() .wal .as_ref() .unwrap() @@ -3152,7 +3152,7 @@ pub mod test { // passive checkpoint #1 let result1 = { - let pager = conn_writer.pager.read(); + let pager = conn_writer.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); run_checkpoint_until_done( &mut *wal, @@ -3169,7 +3169,7 @@ pub mod test { // passive checkpoint #2 let result2 = { - let pager = conn_writer.pager.read(); + let pager = conn_writer.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); run_checkpoint_until_done( &mut *wal, @@ -3218,7 +3218,7 @@ pub mod test { // Do a TRUNCATE checkpoint { - let pager = conn.pager.read(); + let pager = conn.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); run_checkpoint_until_done( &mut *wal, @@ -3279,7 +3279,7 @@ pub mod test { // Do a TRUNCATE checkpoint { - let pager = conn.pager.read(); + let pager = conn.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); run_checkpoint_until_done( &mut *wal, @@ -3317,7 +3317,7 @@ pub mod test { assert_eq!(hdr.page_size, 4096, "invalid page size"); assert_eq!(hdr.checkpoint_seq, 1, "invalid checkpoint_seq"); { - let pager = conn.pager.read(); + let pager = conn.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); run_checkpoint_until_done( &mut *wal, @@ -3367,7 +3367,7 @@ pub mod test { .unwrap(); // Start a read transaction on conn2 { - let pager = conn2.pager.write(); + let pager = conn2.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); wal.begin_read_tx().unwrap(); } @@ -3375,7 +3375,7 @@ pub mod test { bulk_inserts(&conn1, 5, 5); // Try to start a write transaction on conn2 with a stale snapshot let result = { - let pager = conn2.pager.read(); + let pager = conn2.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); wal.begin_write_tx() }; @@ -3384,14 +3384,14 @@ pub mod test { // End read transaction and start a fresh one { - let pager = conn2.pager.read(); + let pager = conn2.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); wal.end_read_tx(); wal.begin_read_tx().unwrap(); } // Now write transaction should work let result = { - let pager = conn2.pager.read(); + let pager = conn2.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); wal.begin_write_tx() }; @@ -3410,7 +3410,7 @@ pub mod test { bulk_inserts(&conn1, 5, 5); // Do a full checkpoint to move all data to DB file { - let pager = conn1.pager.read(); + let pager = conn1.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); run_checkpoint_until_done( &mut *wal, @@ -3423,14 +3423,14 @@ pub mod test { // Start a read transaction on conn2 { - let pager = conn2.pager.write(); + let pager = conn2.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); wal.begin_read_tx().unwrap(); } // should use slot 0, as everything is backfilled assert!(check_read_lock_slot(&conn2, 0)); { - let pager = conn1.pager.read(); + let pager = conn1.pager.load(); let wal = pager.wal.as_ref().unwrap().borrow(); let frame = wal.find_frame(5, None); // since we hold readlock0, we should ignore the db file and find_frame should return none @@ -3438,7 +3438,7 @@ pub mod test { } // Try checkpoint, should fail because reader has slot 0 { - let pager = conn1.pager.read(); + let pager = conn1.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); let result = wal.checkpoint(&pager, CheckpointMode::Restart); @@ -3449,12 +3449,12 @@ pub mod test { } // End the read transaction { - let pager = conn2.pager.read(); + let pager = conn2.pager.load(); let wal = pager.wal.as_ref().unwrap().borrow(); wal.end_read_tx(); } { - let pager = conn1.pager.read(); + let pager = conn1.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); let result = run_checkpoint_until_done(&mut *wal, &pager, CheckpointMode::Restart); assert!( @@ -3475,7 +3475,7 @@ pub mod test { bulk_inserts(&conn, 8, 4); // Ensure frames are flushed to the WAL - let completions = conn.pager.write().cacheflush().unwrap(); + let completions = conn.pager.load().cacheflush().unwrap(); for c in completions { db.io.wait_for_completion(c).unwrap(); } @@ -3487,7 +3487,7 @@ pub mod test { // Run FULL checkpoint - must backfill *all* frames up to mx_before let result = { - let pager = conn.pager.read(); + let pager = conn.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); run_checkpoint_until_done(&mut *wal, &pager, CheckpointMode::Full) }; @@ -3508,26 +3508,26 @@ pub mod test { // First commit some data and flush (reader will snapshot here) bulk_inserts(&writer, 2, 3); - let completions = writer.pager.write().cacheflush().unwrap(); + let completions = writer.pager.load().cacheflush().unwrap(); for c in completions { db.io.wait_for_completion(c).unwrap(); } // Start a read transaction pinned at the current snapshot { - let pager = reader.pager.write(); + let pager = reader.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); wal.begin_read_tx().unwrap(); } let r_snapshot = { - let pager = reader.pager.read(); + let pager = reader.pager.load(); let wal = pager.wal.as_ref().unwrap().borrow(); wal.get_max_frame() }; // Advance WAL beyond the reader's snapshot bulk_inserts(&writer, 3, 4); - let completions = writer.pager.write().cacheflush().unwrap(); + let completions = writer.pager.load().cacheflush().unwrap(); for c in completions { db.io.wait_for_completion(c).unwrap(); } @@ -3536,7 +3536,7 @@ pub mod test { // FULL must return Busy while a reader is stuck behind { - let pager = writer.pager.read(); + let pager = writer.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); loop { match wal.checkpoint(&pager, CheckpointMode::Full) { @@ -3554,13 +3554,13 @@ pub mod test { // Release the reader, now full mode should succeed and backfill everything { - let pager = reader.pager.read(); + let pager = reader.pager.load(); let wal = pager.wal.as_ref().unwrap().borrow(); wal.end_read_tx(); } let result = { - let pager = writer.pager.read(); + let pager = writer.pager.load(); let mut wal = pager.wal.as_ref().unwrap().borrow_mut(); run_checkpoint_until_done(&mut *wal, &pager, CheckpointMode::Full) }; diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 47b166dbe..cbad7b9ff 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -408,7 +408,7 @@ pub fn op_checkpoint_inner( let step_result = program .connection .pager - .write() + .load() .wal_checkpoint_start(*checkpoint_mode); match step_result { Ok(IOResult::Done(result)) => { @@ -429,7 +429,7 @@ pub fn op_checkpoint_inner( let step_result = program .connection .pager - .write() + .load() .wal_checkpoint_finish(result.as_mut().unwrap()); match step_result { Ok(IOResult::Done(())) => { @@ -7692,7 +7692,7 @@ pub fn op_open_ephemeral( let page_size = return_if_io!(with_header(pager, mv_store, program, |header| header.page_size)); let conn = program.connection.clone(); - let io = conn.pager.read().io.clone(); + let io = conn.pager.load().io.clone(); let rand_num = io.generate_random_number(); let db_file; let db_file_io: Arc; From d0fd258ab58db23154cf33a8086083f1c2c6f16a Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 22 Oct 2025 15:02:24 -0400 Subject: [PATCH 420/428] Handle multiple statements via sqlite3_exec API --- sqlite3/src/lib.rs | 209 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 198 insertions(+), 11 deletions(-) diff --git a/sqlite3/src/lib.rs b/sqlite3/src/lib.rs index 0083715ae..32e20b0ca 100644 --- a/sqlite3/src/lib.rs +++ b/sqlite3/src/lib.rs @@ -380,25 +380,212 @@ type exec_callback = Option< pub unsafe extern "C" fn sqlite3_exec( db: *mut sqlite3, sql: *const ffi::c_char, - _callback: exec_callback, - _context: *mut ffi::c_void, - _err: *mut *mut ffi::c_char, + callback: exec_callback, + context: *mut ffi::c_void, + err: *mut *mut ffi::c_char, ) -> ffi::c_int { if db.is_null() || sql.is_null() { return SQLITE_MISUSE; } - let db: &mut sqlite3 = &mut *db; - let db = db.inner.lock().unwrap(); - let sql = CStr::from_ptr(sql); - let sql = match sql.to_str() { + + let db_ref: &mut sqlite3 = &mut *db; + let sql_cstr = CStr::from_ptr(sql); + let sql_str = match sql_cstr.to_str() { Ok(s) => s, Err(_) => return SQLITE_MISUSE, }; - trace!("sqlite3_exec(sql={})", sql); - match db.conn.execute(sql) { - Ok(_) => SQLITE_OK, - Err(_) => SQLITE_ERROR, + trace!("sqlite3_exec(sql={})", sql_str); + let statements = split_sql_statements(sql_str); + + for stmt_sql in statements { + let trimmed = stmt_sql.trim(); + if trimmed.is_empty() { + continue; + } + + // check if this is a DQL statement, because we will only allow if there is a callback + let is_dql = is_query_statement(trimmed); + if is_dql && callback.is_none() { + if !err.is_null() { + let err_msg = + CString::new("queries return results, use callback or sqlite3_prepare") + .unwrap(); + *err = err_msg.into_raw(); + } + return SQLITE_MISUSE; + } + + // For DML/DDL, use normal execute path + if !is_dql { + let db_inner = db_ref.inner.lock().unwrap(); + match db_inner.conn.execute(trimmed) { + Ok(_) => continue, + Err(e) => { + if !err.is_null() { + let err_msg = format!("SQL error: {e:?}"); + *err = CString::new(err_msg).unwrap().into_raw(); + } + return SQLITE_ERROR; + } + } + } else { + // Handle DQL with callback + let rc = execute_query_with_callback(db, trimmed, callback, context, err); + if rc != SQLITE_OK { + return rc; + } + } } + + SQLITE_OK +} + +/// Detect if a SQL statement is DQL +fn is_query_statement(sql: &str) -> bool { + let sql_upper = sql.to_uppercase(); + let first_token = sql_upper.split_whitespace().next().unwrap_or(""); + + matches!( + first_token, + "SELECT" | "VALUES" | "WITH" | "PRAGMA" | "EXPLAIN" + ) || sql_upper.contains("RETURNING") +} + +/// Execute a query statement with callback for each row +/// Only called when we know callback is Some +unsafe fn execute_query_with_callback( + db: *mut sqlite3, + sql: &str, + callback: exec_callback, + context: *mut ffi::c_void, + err: *mut *mut ffi::c_char, +) -> ffi::c_int { + let sql_cstring = match CString::new(sql) { + Ok(s) => s, + Err(_) => return SQLITE_MISUSE, + }; + + let mut stmt_ptr: *mut sqlite3_stmt = std::ptr::null_mut(); + let rc = sqlite3_prepare_v2( + db, + sql_cstring.as_ptr(), + -1, + &mut stmt_ptr, + std::ptr::null_mut(), + ); + + if rc != SQLITE_OK { + if !err.is_null() { + let err_msg = format!("Prepare failed: {rc}"); + *err = CString::new(err_msg).unwrap().into_raw(); + } + return rc; + } + + let stmt_ref = &*stmt_ptr; + let n_cols = stmt_ref.stmt.num_columns() as ffi::c_int; + let mut column_names: Vec = Vec::with_capacity(n_cols as usize); + + for i in 0..n_cols { + let name = stmt_ref.stmt.get_column_name(i as usize); + column_names.push(CString::new(name.as_bytes()).unwrap()); + } + + loop { + let step_rc = sqlite3_step(stmt_ptr); + + match step_rc { + SQLITE_ROW => { + // Safety: checked earlier + let callback = callback.unwrap(); + + let mut values: Vec = Vec::with_capacity(n_cols as usize); + let mut value_ptrs: Vec<*mut ffi::c_char> = Vec::with_capacity(n_cols as usize); + let mut col_ptrs: Vec<*mut ffi::c_char> = Vec::with_capacity(n_cols as usize); + + for i in 0..n_cols { + let val = stmt_ref.stmt.row().unwrap().get_value(i as usize); + values.push(CString::new(val.to_string().as_bytes()).unwrap()); + } + + for value in &values { + value_ptrs.push(value.as_ptr() as *mut ffi::c_char); + } + for name in &column_names { + col_ptrs.push(name.as_ptr() as *mut ffi::c_char); + } + + let cb_rc = callback( + context, + n_cols, + value_ptrs.as_mut_ptr(), + col_ptrs.as_mut_ptr(), + ); + + if cb_rc != 0 { + sqlite3_finalize(stmt_ptr); + return SQLITE_ABORT; + } + } + SQLITE_DONE => { + break; + } + _ => { + sqlite3_finalize(stmt_ptr); + if !err.is_null() { + let err_msg = format!("Step failed: {step_rc}"); + *err = CString::new(err_msg).unwrap().into_raw(); + } + return step_rc; + } + } + } + + sqlite3_finalize(stmt_ptr) +} + +/// Split SQL string into individual statements +/// Handles quoted strings properly and skips comments +fn split_sql_statements(sql: &str) -> Vec<&str> { + let mut statements = Vec::new(); + let mut current_start = 0; + let mut in_single_quote = false; + let mut in_double_quote = false; + let bytes = sql.as_bytes(); + let mut i = 0; + + while i < bytes.len() { + match bytes[i] { + // Check for escaped quotes first + b'\'' if !in_double_quote => { + if i + 1 < bytes.len() && bytes[i + 1] == b'\'' { + i += 2; + continue; + } + in_single_quote = !in_single_quote; + } + b'"' if !in_single_quote => { + if i + 1 < bytes.len() && bytes[i + 1] == b'"' { + i += 2; + continue; + } + in_double_quote = !in_double_quote; + } + b';' if !in_single_quote && !in_double_quote => { + // we found the statement boundary + statements.push(&sql[current_start..i]); + current_start = i + 1; + } + _ => {} + } + i += 1; + } + + if current_start < sql.len() { + statements.push(&sql[current_start..]); + } + + statements } #[no_mangle] From 921f2e72bd1a5b66442800292b448faf76a3d8df Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 22 Oct 2025 15:02:49 -0400 Subject: [PATCH 421/428] Add integration tests for sqlite3_exec multi-statements --- sqlite3/tests/compat/mod.rs | 759 ++++++++++++++++++++++++++++++++++++ 1 file changed, 759 insertions(+) diff --git a/sqlite3/tests/compat/mod.rs b/sqlite3/tests/compat/mod.rs index 0badf6051..b3c911e66 100644 --- a/sqlite3/tests/compat/mod.rs +++ b/sqlite3/tests/compat/mod.rs @@ -20,6 +20,21 @@ extern "C" { fn sqlite3_close(db: *mut sqlite3) -> i32; fn sqlite3_open(filename: *const libc::c_char, db: *mut *mut sqlite3) -> i32; fn sqlite3_db_filename(db: *mut sqlite3, db_name: *const libc::c_char) -> *const libc::c_char; + fn sqlite3_exec( + db: *mut sqlite3, + sql: *const libc::c_char, + callback: Option< + unsafe extern "C" fn( + arg1: *mut libc::c_void, + arg2: libc::c_int, + arg3: *mut *mut libc::c_char, + arg4: *mut *mut libc::c_char, + ) -> libc::c_int, + >, + arg: *mut libc::c_void, + errmsg: *mut *mut libc::c_char, + ) -> i32; + fn sqlite3_free(ptr: *mut libc::c_void); fn sqlite3_prepare_v2( db: *mut sqlite3, sql: *const libc::c_char, @@ -106,6 +121,7 @@ const SQLITE_CHECKPOINT_RESTART: i32 = 2; const SQLITE_CHECKPOINT_TRUNCATE: i32 = 3; const SQLITE_INTEGER: i32 = 1; const SQLITE_FLOAT: i32 = 2; +const SQLITE_ABORT: i32 = 4; const SQLITE_TEXT: i32 = 3; const SQLITE3_TEXT: i32 = 3; const SQLITE_BLOB: i32 = 4; @@ -762,6 +778,749 @@ mod tests { } } + #[test] + fn test_exec_multi_statement_dml() { + unsafe { + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + // Multiple DML statements in one exec call + let rc = sqlite3_exec( + db, + c"CREATE TABLE bind_text(x TEXT);\ + INSERT INTO bind_text(x) VALUES('TEXT1');\ + INSERT INTO bind_text(x) VALUES('TEXT2');" + .as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + // Verify the data was inserted + let mut stmt = ptr::null_mut(); + assert_eq!( + sqlite3_prepare_v2( + db, + c"SELECT COUNT(*) FROM bind_text".as_ptr(), + -1, + &mut stmt, + ptr::null_mut(), + ), + SQLITE_OK + ); + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + assert_eq!(sqlite3_column_int(stmt, 0), 2); + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); + + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_multi_statement_with_semicolons_in_strings() { + unsafe { + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + // Semicolons inside strings should not split statements + let rc = sqlite3_exec( + db, + c"CREATE TABLE test_semicolon(x TEXT);\ + INSERT INTO test_semicolon(x) VALUES('value;with;semicolons');\ + INSERT INTO test_semicolon(x) VALUES(\"another;value\");" + .as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + // Verify the values contain semicolons + let mut stmt = ptr::null_mut(); + assert_eq!( + sqlite3_prepare_v2( + db, + c"SELECT x FROM test_semicolon ORDER BY rowid".as_ptr(), + -1, + &mut stmt, + ptr::null_mut(), + ), + SQLITE_OK + ); + + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + let val1 = std::ffi::CStr::from_ptr(sqlite3_column_text(stmt, 0)) + .to_str() + .unwrap(); + assert_eq!(val1, "value;with;semicolons"); + + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + let val2 = std::ffi::CStr::from_ptr(sqlite3_column_text(stmt, 0)) + .to_str() + .unwrap(); + assert_eq!(val2, "another;value"); + + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_multi_statement_with_escaped_quotes() { + unsafe { + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + // Test escaped quotes + let rc = sqlite3_exec( + db, + c"CREATE TABLE test_quotes(x TEXT);\ + INSERT INTO test_quotes(x) VALUES('it''s working');\ + INSERT INTO test_quotes(x) VALUES(\"quote\"\"test\"\"\");" + .as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + let mut stmt = ptr::null_mut(); + assert_eq!( + sqlite3_prepare_v2( + db, + c"SELECT x FROM test_quotes ORDER BY rowid".as_ptr(), + -1, + &mut stmt, + ptr::null_mut(), + ), + SQLITE_OK + ); + + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + let val1 = std::ffi::CStr::from_ptr(sqlite3_column_text(stmt, 0)) + .to_str() + .unwrap(); + assert_eq!(val1, "it's working"); + + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + let val2 = std::ffi::CStr::from_ptr(sqlite3_column_text(stmt, 0)) + .to_str() + .unwrap(); + assert_eq!(val2, "quote\"test\""); + + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_with_select_callback() { + unsafe { + // Callback that collects results + unsafe extern "C" fn exec_callback( + context: *mut std::ffi::c_void, + n_cols: std::ffi::c_int, + values: *mut *mut std::ffi::c_char, + _cols: *mut *mut std::ffi::c_char, + ) -> std::ffi::c_int { + let results = &mut *(context as *mut Vec>); + let mut row = Vec::new(); + + for i in 0..n_cols as isize { + let value_ptr = *values.offset(i); + let value = if value_ptr.is_null() { + String::from("NULL") + } else { + std::ffi::CStr::from_ptr(value_ptr) + .to_str() + .unwrap() + .to_owned() + }; + row.push(value); + } + results.push(row); + 0 // Continue + } + + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + // Setup data + let rc = sqlite3_exec( + db, + c"CREATE TABLE test_select(id INTEGER, name TEXT);\ + INSERT INTO test_select VALUES(1, 'Alice');\ + INSERT INTO test_select VALUES(2, 'Bob');" + .as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + // Execute SELECT with callback + let mut results: Vec> = Vec::new(); + let rc = sqlite3_exec( + db, + c"SELECT id, name FROM test_select ORDER BY id".as_ptr(), + Some(exec_callback), + &mut results as *mut _ as *mut std::ffi::c_void, + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + assert_eq!(results.len(), 2); + assert_eq!(results[0], vec!["1", "Alice"]); + assert_eq!(results[1], vec!["2", "Bob"]); + + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_multi_statement_mixed_dml_select() { + unsafe { + // Callback that counts invocations + unsafe extern "C" fn count_callback( + context: *mut std::ffi::c_void, + _n_cols: std::ffi::c_int, + _values: *mut *mut std::ffi::c_char, + _cols: *mut *mut std::ffi::c_char, + ) -> std::ffi::c_int { + let count = &mut *(context as *mut i32); + *count += 1; + 0 + } + + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + let mut callback_count = 0; + + // Mix of DDL/DML/DQL + let rc = sqlite3_exec( + db, + c"CREATE TABLE mixed(x INTEGER);\ + INSERT INTO mixed VALUES(1);\ + INSERT INTO mixed VALUES(2);\ + SELECT x FROM mixed;\ + INSERT INTO mixed VALUES(3);\ + SELECT COUNT(*) FROM mixed;" + .as_ptr(), + Some(count_callback), + &mut callback_count as *mut _ as *mut std::ffi::c_void, + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + // Callback should be called 3 times total: + // 2 times for first SELECT (2 rows) + // 1 time for second SELECT (1 row with COUNT) + assert_eq!(callback_count, 3); + + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_select_without_callback_fails() { + unsafe { + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + sqlite3_exec( + db, + c"CREATE TABLE test(x INTEGER)".as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + + // SELECT without callback should fail + let mut err_msg = ptr::null_mut(); + let rc = sqlite3_exec( + db, + c"SELECT * FROM test".as_ptr(), + None, + ptr::null_mut(), + &mut err_msg, + ); + assert_eq!(rc, SQLITE_MISUSE); + + if !err_msg.is_null() { + let msg = std::ffi::CStr::from_ptr(err_msg).to_str().unwrap(); + println!("Error message: {msg:?}"); + assert!(msg.contains("callback") || msg.contains("prepare")); + // Free the error message + sqlite3_free(err_msg as *mut std::ffi::c_void); + } + + let rc = sqlite3_close(db); + println!("RESULT: {rc}"); + assert_eq!(rc, SQLITE_OK); + } + } + + #[test] + fn test_exec_callback_abort() { + unsafe { + // Callback that aborts after first row + unsafe extern "C" fn abort_callback( + context: *mut std::ffi::c_void, + _n_cols: std::ffi::c_int, + _values: *mut *mut std::ffi::c_char, + _cols: *mut *mut std::ffi::c_char, + ) -> std::ffi::c_int { + let count = &mut *(context as *mut i32); + *count += 1; + if *count >= 1 { + return 1; // Abort + } + 0 + } + + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + sqlite3_exec( + db, + c"CREATE TABLE test(x INTEGER);\ + INSERT INTO test VALUES(1),(2),(3);" + .as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + + let mut count = 0; + let rc = sqlite3_exec( + db, + c"SELECT x FROM test".as_ptr(), + Some(abort_callback), + &mut count as *mut _ as *mut std::ffi::c_void, + ptr::null_mut(), + ); + + assert_eq!(rc, SQLITE_ABORT); + assert_eq!(count, 1); // Only processed one row before aborting + + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_error_stops_execution() { + unsafe { + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + let mut err_msg = ptr::null_mut(); + + // Second statement has error, third should not execute + let rc = sqlite3_exec( + db, + c"CREATE TABLE test(x INTEGER);\ + INSERT INTO nonexistent VALUES(1);\ + CREATE TABLE should_not_exist(y INTEGER);" + .as_ptr(), + None, + ptr::null_mut(), + &mut err_msg, + ); + + assert_eq!(rc, SQLITE_ERROR); + + // Verify third statement didn't execute + let mut stmt = ptr::null_mut(); + let check_rc = sqlite3_prepare_v2( + db, + c"SELECT name FROM sqlite_master WHERE type='table' AND name='should_not_exist'" + .as_ptr(), + -1, + &mut stmt, + ptr::null_mut(), + ); + assert_eq!(check_rc, SQLITE_OK); + assert_eq!(sqlite3_step(stmt), SQLITE_DONE); // No rows = table doesn't exist + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); + + if !err_msg.is_null() { + sqlite3_free(err_msg as *mut std::ffi::c_void); + } + + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_empty_statements() { + unsafe { + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + // Multiple semicolons and whitespace should be handled gracefully + let rc = sqlite3_exec( + db, + c"CREATE TABLE test(x INTEGER);;;\n\n;\t;INSERT INTO test VALUES(1);;;".as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + // Verify both statements executed + let mut stmt = ptr::null_mut(); + assert_eq!( + sqlite3_prepare_v2( + db, + c"SELECT x FROM test".as_ptr(), + -1, + &mut stmt, + ptr::null_mut(), + ), + SQLITE_OK + ); + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + assert_eq!(sqlite3_column_int(stmt, 0), 1); + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); + + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + #[test] + fn test_exec_with_comments() { + unsafe { + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + // SQL comments shouldn't affect statement splitting + let rc = sqlite3_exec( + db, + c"-- This is a comment\n\ + CREATE TABLE test(x INTEGER); -- inline comment\n\ + INSERT INTO test VALUES(1); -- semicolon in comment ;\n\ + INSERT INTO test VALUES(2) -- end with comment" + .as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + // Verify both inserts worked + let mut stmt = ptr::null_mut(); + assert_eq!( + sqlite3_prepare_v2( + db, + c"SELECT COUNT(*) FROM test".as_ptr(), + -1, + &mut stmt, + ptr::null_mut(), + ), + SQLITE_OK + ); + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + assert_eq!(sqlite3_column_int(stmt, 0), 2); + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_nested_quotes() { + unsafe { + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + // Mix of quote types and nesting + let rc = sqlite3_exec( + db, + c"CREATE TABLE test(x TEXT);\ + INSERT INTO test VALUES('single \"double\" inside');\ + INSERT INTO test VALUES(\"double 'single' inside\");\ + INSERT INTO test VALUES('mix;\"quote\";types');" + .as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + // Verify values + let mut stmt = ptr::null_mut(); + assert_eq!( + sqlite3_prepare_v2( + db, + c"SELECT x FROM test ORDER BY rowid".as_ptr(), + -1, + &mut stmt, + ptr::null_mut(), + ), + SQLITE_OK + ); + + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + let val1 = std::ffi::CStr::from_ptr(sqlite3_column_text(stmt, 0)) + .to_str() + .unwrap(); + assert_eq!(val1, "single \"double\" inside"); + + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + let val2 = std::ffi::CStr::from_ptr(sqlite3_column_text(stmt, 0)) + .to_str() + .unwrap(); + assert_eq!(val2, "double 'single' inside"); + + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + let val3 = std::ffi::CStr::from_ptr(sqlite3_column_text(stmt, 0)) + .to_str() + .unwrap(); + assert_eq!(val3, "mix;\"quote\";types"); + + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_transaction_rollback() { + unsafe { + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + // Test transaction rollback in multi-statement + let rc = sqlite3_exec( + db, + c"CREATE TABLE test(x INTEGER);\ + BEGIN TRANSACTION;\ + INSERT INTO test VALUES(1);\ + INSERT INTO test VALUES(2);\ + ROLLBACK;" + .as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + // Table should exist but be empty due to rollback + let mut stmt = ptr::null_mut(); + assert_eq!( + sqlite3_prepare_v2( + db, + c"SELECT COUNT(*) FROM test".as_ptr(), + -1, + &mut stmt, + ptr::null_mut(), + ), + SQLITE_OK + ); + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + assert_eq!(sqlite3_column_int(stmt, 0), 0); // No rows due to rollback + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_with_pragma() { + unsafe { + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + // Callback to capture pragma results + unsafe extern "C" fn pragma_callback( + context: *mut std::ffi::c_void, + _n_cols: std::ffi::c_int, + _values: *mut *mut std::ffi::c_char, + _cols: *mut *mut std::ffi::c_char, + ) -> std::ffi::c_int { + let count = &mut *(context as *mut i32); + *count += 1; + 0 + } + + let mut callback_count = 0; + + // PRAGMA should be treated as DQL when it returns results + let rc = sqlite3_exec( + db, + c"CREATE TABLE test(x INTEGER);\ + PRAGMA table_info(test);" + .as_ptr(), + Some(pragma_callback), + &mut callback_count as *mut _ as *mut std::ffi::c_void, + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + assert!(callback_count > 0); // PRAGMA should return at least one row + + // PRAGMA without callback should fail + let mut err_msg = ptr::null_mut(); + let rc = sqlite3_exec( + db, + c"PRAGMA table_info(test)".as_ptr(), + None, + ptr::null_mut(), + &mut err_msg, + ); + assert_eq!(rc, SQLITE_MISUSE); + if !err_msg.is_null() { + sqlite3_free(err_msg as *mut std::ffi::c_void); + } + + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_with_cte() { + unsafe { + // Callback that collects results + unsafe extern "C" fn exec_callback( + context: *mut std::ffi::c_void, + n_cols: std::ffi::c_int, + values: *mut *mut std::ffi::c_char, + _cols: *mut *mut std::ffi::c_char, + ) -> std::ffi::c_int { + let results = &mut *(context as *mut Vec>); + let mut row = Vec::new(); + for i in 0..n_cols as isize { + let value_ptr = *values.offset(i); + let value = if value_ptr.is_null() { + String::from("NULL") + } else { + std::ffi::CStr::from_ptr(value_ptr) + .to_str() + .unwrap() + .to_owned() + }; + row.push(value); + } + results.push(row); + 0 + } + + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + // CTE should be recognized as DQL + let mut results: Vec> = Vec::new(); + let rc = sqlite3_exec( + db, + c"CREATE TABLE test(x INTEGER);\ + INSERT INTO test VALUES(1),(2),(3);\ + WITH cte AS (SELECT x FROM test WHERE x > 1) SELECT * FROM cte;" + .as_ptr(), + Some(exec_callback), + &mut results as *mut _ as *mut std::ffi::c_void, + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + assert_eq!(results.len(), 2); // Should get 2 and 3 + assert_eq!(results[0], vec!["2"]); + assert_eq!(results[1], vec!["3"]); + + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + + #[test] + fn test_exec_with_returning_clause() { + unsafe { + // Callback for RETURNING results + unsafe extern "C" fn exec_callback( + context: *mut std::ffi::c_void, + n_cols: std::ffi::c_int, + values: *mut *mut std::ffi::c_char, + _cols: *mut *mut std::ffi::c_char, + ) -> std::ffi::c_int { + let results = &mut *(context as *mut Vec>); + let mut row = Vec::new(); + for i in 0..n_cols as isize { + let value_ptr = *values.offset(i); + let value = if value_ptr.is_null() { + String::from("NULL") + } else { + std::ffi::CStr::from_ptr(value_ptr) + .to_str() + .unwrap() + .to_owned() + }; + row.push(value); + } + results.push(row); + 0 + } + + let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); + let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); + let mut db = ptr::null_mut(); + assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); + + let mut results: Vec> = Vec::new(); + + // INSERT...RETURNING should be treated as DQL + let rc = sqlite3_exec( + db, + c"CREATE TABLE test(id INTEGER PRIMARY KEY, x INTEGER);\ + INSERT INTO test(x) VALUES(42) RETURNING id, x;" + .as_ptr(), + Some(exec_callback), + &mut results as *mut _ as *mut std::ffi::c_void, + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + assert_eq!(results.len(), 1); + assert_eq!(results[0][1], "42"); // x value + + // RETURNING without callback should fail + let mut err_msg = ptr::null_mut(); + let rc = sqlite3_exec( + db, + c"DELETE FROM test WHERE x=42 RETURNING id".as_ptr(), + None, + ptr::null_mut(), + &mut err_msg, + ); + assert_eq!(rc, SQLITE_MISUSE); + if !err_msg.is_null() { + sqlite3_free(err_msg as *mut std::ffi::c_void); + } + + assert_eq!(sqlite3_close(db), SQLITE_OK); + } + } + #[cfg(not(feature = "sqlite3"))] mod libsql_ext { From fb26b72b1a1c4939d1f5483bbb57774ef39189af Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 22 Oct 2025 15:29:54 -0400 Subject: [PATCH 422/428] Add comment from sqlite3.h describing behavior of sqlite3_exec --- sqlite3/src/lib.rs | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/sqlite3/src/lib.rs b/sqlite3/src/lib.rs index 32e20b0ca..15ca34bd7 100644 --- a/sqlite3/src/lib.rs +++ b/sqlite3/src/lib.rs @@ -376,6 +376,35 @@ type exec_callback = Option< ) -> ffi::c_int, >; +/* sqlite.h 365 +** The sqlite3_exec() interface is a convenience wrapper around +** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()], +** that allows an application to run multiple statements of SQL +** without having to use a lot of C code. +** +** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, +** semicolon-separate SQL statements passed into its 2nd argument, +** in the context of the [database connection] passed in as its 1st +** argument. ^If the callback function of the 3rd argument to +** sqlite3_exec() is not NULL, then it is invoked for each result row +** coming out of the evaluated SQL statements. ^The 4th argument to +** sqlite3_exec() is relayed through to the 1st argument of each +** callback invocation. ^If the callback pointer to sqlite3_exec() +** is NULL, then no callback is ever invoked and result rows are +** ignored. +** +** ^If an error occurs while evaluating the SQL statements passed into +** sqlite3_exec(), then execution of the current statement stops and +** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec() +** is not NULL then any error message is written into memory obtained +** from [sqlite3_malloc()] and passed back through the 5th parameter. +** To avoid memory leaks, the application should invoke [sqlite3_free()] +** on error message strings returned through the 5th parameter of +** sqlite3_exec() after the error message string is no longer needed. +** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors +** occur, then sqlite3_exec() sets the pointer in its 5th parameter to +** NULL before returning. +*/ #[no_mangle] pub unsafe extern "C" fn sqlite3_exec( db: *mut sqlite3, @@ -437,6 +466,14 @@ pub unsafe extern "C" fn sqlite3_exec( } } + /* ^If the 5th parameter to sqlite3_exec() is not NULL and no errors + ** occur, then sqlite3_exec() sets the pointer in its 5th parameter to + ** NULL before returning. + */ + if !err.is_null() { + *err = std::ptr::null_mut(); + } + SQLITE_OK } From ec30aad0155c23dc0d87b31629b2fa212781896d Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 22 Oct 2025 15:52:47 -0400 Subject: [PATCH 423/428] Replace inefficient is_query_statement fn in sqlite3 api --- sqlite3/src/lib.rs | 49 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 6 deletions(-) diff --git a/sqlite3/src/lib.rs b/sqlite3/src/lib.rs index 15ca34bd7..638b2683d 100644 --- a/sqlite3/src/lib.rs +++ b/sqlite3/src/lib.rs @@ -479,13 +479,50 @@ pub unsafe extern "C" fn sqlite3_exec( /// Detect if a SQL statement is DQL fn is_query_statement(sql: &str) -> bool { - let sql_upper = sql.to_uppercase(); - let first_token = sql_upper.split_whitespace().next().unwrap_or(""); + let trimmed = sql.trim_start(); + if trimmed.is_empty() { + return false; + } + let bytes = trimmed.as_bytes(); - matches!( - first_token, - "SELECT" | "VALUES" | "WITH" | "PRAGMA" | "EXPLAIN" - ) || sql_upper.contains("RETURNING") + let starts_with_ignore_case = |keyword: &[u8]| -> bool { + if bytes.len() < keyword.len() { + return false; + } + // Check keyword matches + if !bytes[..keyword.len()].eq_ignore_ascii_case(keyword) { + return false; + } + // Ensure keyword is followed by whitespace or EOF + bytes.len() == keyword.len() || bytes[keyword.len()].is_ascii_whitespace() + }; + + // Check DQL keywords + if starts_with_ignore_case(b"SELECT") + || starts_with_ignore_case(b"VALUES") + || starts_with_ignore_case(b"WITH") + || starts_with_ignore_case(b"PRAGMA") + || starts_with_ignore_case(b"EXPLAIN") + { + return true; + } + + // Look for RETURNING as a whole word, that's not part of another identifier + let mut i = 0; + while i < bytes.len() { + if i + 9 <= bytes.len() && bytes[i..i + 9].eq_ignore_ascii_case(b"RETURNING") { + // Check it's a word boundary before and after + let is_word_start = + i == 0 || !bytes[i - 1].is_ascii_alphanumeric() && bytes[i - 1] != b'_'; + let is_word_end = i + 9 == bytes.len() + || !bytes[i + 9].is_ascii_alphanumeric() && bytes[i + 9] != b'_'; + if is_word_start && is_word_end { + return true; + } + } + i += 1; + } + false } /// Execute a query statement with callback for each row From 1204494068809e8e6747a57292f70eb9a2b714cf Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 22 Oct 2025 16:41:56 -0400 Subject: [PATCH 424/428] Fix sqlite_exec callback handling to discard rows when not provided --- sqlite3/src/lib.rs | 61 +++++++++++++++++++++++++++++----------------- 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/sqlite3/src/lib.rs b/sqlite3/src/lib.rs index 638b2683d..2a8a223e5 100644 --- a/sqlite3/src/lib.rs +++ b/sqlite3/src/lib.rs @@ -424,28 +424,19 @@ pub unsafe extern "C" fn sqlite3_exec( Err(_) => return SQLITE_MISUSE, }; trace!("sqlite3_exec(sql={})", sql_str); + if !err.is_null() { + *err = std::ptr::null_mut(); + } let statements = split_sql_statements(sql_str); - for stmt_sql in statements { let trimmed = stmt_sql.trim(); if trimmed.is_empty() { continue; } - // check if this is a DQL statement, because we will only allow if there is a callback let is_dql = is_query_statement(trimmed); - if is_dql && callback.is_none() { - if !err.is_null() { - let err_msg = - CString::new("queries return results, use callback or sqlite3_prepare") - .unwrap(); - *err = err_msg.into_raw(); - } - return SQLITE_MISUSE; - } - - // For DML/DDL, use normal execute path if !is_dql { + // For DML/DDL, use normal execute path let db_inner = db_ref.inner.lock().unwrap(); match db_inner.conn.execute(trimmed) { Ok(_) => continue, @@ -457,23 +448,47 @@ pub unsafe extern "C" fn sqlite3_exec( return SQLITE_ERROR; } } + } else if callback.is_none() { + // DQL without callback provided, still execute but discard any result rows + let mut stmt_ptr: *mut sqlite3_stmt = std::ptr::null_mut(); + let rc = sqlite3_prepare_v2( + db, + CString::new(trimmed).unwrap().as_ptr(), + -1, + &mut stmt_ptr, + std::ptr::null_mut(), + ); + if rc != SQLITE_OK { + if !err.is_null() { + let err_msg = format!("Prepare failed: {rc}"); + *err = CString::new(err_msg).unwrap().into_raw(); + } + return rc; + } + loop { + let step_rc = sqlite3_step(stmt_ptr); + match step_rc { + SQLITE_ROW => continue, + SQLITE_DONE => break, + _ => { + sqlite3_finalize(stmt_ptr); + if !err.is_null() { + let err_msg = format!("Step failed: {step_rc}"); + *err = CString::new(err_msg).unwrap().into_raw(); + } + return step_rc; + } + } + } + sqlite3_finalize(stmt_ptr); } else { - // Handle DQL with callback + // DQL with callback let rc = execute_query_with_callback(db, trimmed, callback, context, err); if rc != SQLITE_OK { return rc; } } } - - /* ^If the 5th parameter to sqlite3_exec() is not NULL and no errors - ** occur, then sqlite3_exec() sets the pointer in its 5th parameter to - ** NULL before returning. - */ - if !err.is_null() { - *err = std::ptr::null_mut(); - } - SQLITE_OK } From 5318af16b5313eea3fb4317f4d95bf5b5040e30b Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 22 Oct 2025 16:42:20 -0400 Subject: [PATCH 425/428] Update tests in sqlite3 package to adapt to sqlite behavior --- sqlite3/tests/compat/mod.rs | 101 +++++++++++++++++------------------- 1 file changed, 48 insertions(+), 53 deletions(-) diff --git a/sqlite3/tests/compat/mod.rs b/sqlite3/tests/compat/mod.rs index b3c911e66..a1f016ddc 100644 --- a/sqlite3/tests/compat/mod.rs +++ b/sqlite3/tests/compat/mod.rs @@ -1033,47 +1033,6 @@ mod tests { } } - #[test] - fn test_exec_select_without_callback_fails() { - unsafe { - let temp_file = tempfile::NamedTempFile::with_suffix(".db").unwrap(); - let path = std::ffi::CString::new(temp_file.path().to_str().unwrap()).unwrap(); - let mut db = ptr::null_mut(); - assert_eq!(sqlite3_open(path.as_ptr(), &mut db), SQLITE_OK); - - sqlite3_exec( - db, - c"CREATE TABLE test(x INTEGER)".as_ptr(), - None, - ptr::null_mut(), - ptr::null_mut(), - ); - - // SELECT without callback should fail - let mut err_msg = ptr::null_mut(); - let rc = sqlite3_exec( - db, - c"SELECT * FROM test".as_ptr(), - None, - ptr::null_mut(), - &mut err_msg, - ); - assert_eq!(rc, SQLITE_MISUSE); - - if !err_msg.is_null() { - let msg = std::ffi::CStr::from_ptr(err_msg).to_str().unwrap(); - println!("Error message: {msg:?}"); - assert!(msg.contains("callback") || msg.contains("prepare")); - // Free the error message - sqlite3_free(err_msg as *mut std::ffi::c_void); - } - - let rc = sqlite3_close(db); - println!("RESULT: {rc}"); - assert_eq!(rc, SQLITE_OK); - } - } - #[test] fn test_exec_callback_abort() { unsafe { @@ -1382,7 +1341,7 @@ mod tests { assert_eq!(rc, SQLITE_OK); assert!(callback_count > 0); // PRAGMA should return at least one row - // PRAGMA without callback should fail + // PRAGMA without callback should discard row let mut err_msg = ptr::null_mut(); let rc = sqlite3_exec( db, @@ -1391,7 +1350,7 @@ mod tests { ptr::null_mut(), &mut err_msg, ); - assert_eq!(rc, SQLITE_MISUSE); + assert_eq!(rc, SQLITE_OK); if !err_msg.is_null() { sqlite3_free(err_msg as *mut std::ffi::c_void); } @@ -1489,7 +1448,7 @@ mod tests { let mut results: Vec> = Vec::new(); - // INSERT...RETURNING should be treated as DQL + // INSERT...RETURNING with callback should capture the returned values let rc = sqlite3_exec( db, c"CREATE TABLE test(id INTEGER PRIMARY KEY, x INTEGER);\ @@ -1503,19 +1462,55 @@ mod tests { assert_eq!(results.len(), 1); assert_eq!(results[0][1], "42"); // x value - // RETURNING without callback should fail - let mut err_msg = ptr::null_mut(); - let rc = sqlite3_exec( + // Add another row for testing + sqlite3_exec( db, - c"DELETE FROM test WHERE x=42 RETURNING id".as_ptr(), + c"INSERT INTO test(x) VALUES(99)".as_ptr(), None, ptr::null_mut(), - &mut err_msg, + ptr::null_mut(), ); - assert_eq!(rc, SQLITE_MISUSE); - if !err_msg.is_null() { - sqlite3_free(err_msg as *mut std::ffi::c_void); - } + + // should still delete the row but discard the RETURNING results + let rc = sqlite3_exec( + db, + c"UPDATE test SET id = 3, x = 41 WHERE x=42 RETURNING id".as_ptr(), + None, + ptr::null_mut(), + ptr::null_mut(), + ); + assert_eq!(rc, SQLITE_OK); + + // Verify the row was actually updated + let mut stmt = ptr::null_mut(); + assert_eq!( + sqlite3_prepare_v2( + db, + c"SELECT COUNT(*) FROM test WHERE x=42".as_ptr(), + -1, + &mut stmt, + ptr::null_mut(), + ), + SQLITE_OK + ); + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + assert_eq!(sqlite3_column_int(stmt, 0), 0); // Should be 0 rows with x=42 + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); + + // Verify + assert_eq!( + sqlite3_prepare_v2( + db, + c"SELECT COUNT(*) FROM test".as_ptr(), + -1, + &mut stmt, + ptr::null_mut(), + ), + SQLITE_OK + ); + assert_eq!(sqlite3_step(stmt), SQLITE_ROW); + assert_eq!(sqlite3_column_int(stmt, 0), 2); + assert_eq!(sqlite3_finalize(stmt), SQLITE_OK); assert_eq!(sqlite3_close(db), SQLITE_OK); } From e9f1a451a28dbef58bc5ba7c43d7ae97464c9ce9 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 24 Oct 2025 09:35:54 -0400 Subject: [PATCH 426/428] Remove sqlite comment from sqlite3_exec api --- sqlite3/src/lib.rs | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/sqlite3/src/lib.rs b/sqlite3/src/lib.rs index 2a8a223e5..2cfa52bbd 100644 --- a/sqlite3/src/lib.rs +++ b/sqlite3/src/lib.rs @@ -376,35 +376,6 @@ type exec_callback = Option< ) -> ffi::c_int, >; -/* sqlite.h 365 -** The sqlite3_exec() interface is a convenience wrapper around -** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()], -** that allows an application to run multiple statements of SQL -** without having to use a lot of C code. -** -** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, -** semicolon-separate SQL statements passed into its 2nd argument, -** in the context of the [database connection] passed in as its 1st -** argument. ^If the callback function of the 3rd argument to -** sqlite3_exec() is not NULL, then it is invoked for each result row -** coming out of the evaluated SQL statements. ^The 4th argument to -** sqlite3_exec() is relayed through to the 1st argument of each -** callback invocation. ^If the callback pointer to sqlite3_exec() -** is NULL, then no callback is ever invoked and result rows are -** ignored. -** -** ^If an error occurs while evaluating the SQL statements passed into -** sqlite3_exec(), then execution of the current statement stops and -** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec() -** is not NULL then any error message is written into memory obtained -** from [sqlite3_malloc()] and passed back through the 5th parameter. -** To avoid memory leaks, the application should invoke [sqlite3_free()] -** on error message strings returned through the 5th parameter of -** sqlite3_exec() after the error message string is no longer needed. -** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors -** occur, then sqlite3_exec() sets the pointer in its 5th parameter to -** NULL before returning. -*/ #[no_mangle] pub unsafe extern "C" fn sqlite3_exec( db: *mut sqlite3, From 1ccd61088ecf00dca573033626dac098c7463e67 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 24 Oct 2025 14:13:53 -0500 Subject: [PATCH 427/428] Always returns Floats for sum and avg on DBSP aggregations Trying to return integer sometimes to match SQLite led to more problems that I anticipated. The reason being, we can't *really* match SQLite's behavior unless we know the type of *every* element in the sum. This is not impossible, but it is very hard, for very little gain. Fixes #3831 --- core/incremental/aggregate_operator.rs | 21 ++- core/incremental/compiler.rs | 61 +++---- testing/materialized_views.test | 222 ++++++++++++------------- 3 files changed, 153 insertions(+), 151 deletions(-) diff --git a/core/incremental/aggregate_operator.rs b/core/incremental/aggregate_operator.rs index a24c4437c..a6d0fb5b8 100644 --- a/core/incremental/aggregate_operator.rs +++ b/core/incremental/aggregate_operator.rs @@ -1165,6 +1165,14 @@ impl AggregateState { } /// Convert aggregate state to output values + /// + /// Note: SQLite returns INTEGER for SUM when all inputs are integers, and REAL when any input is REAL. + /// However, in an incremental system like DBSP, we cannot track whether all current values are integers + /// after deletions. For example: + /// - Initial: SUM(10, 20, 30.5) = 60.5 (REAL) + /// - After DELETE 30.5: SUM(10, 20) = 30 (SQLite returns INTEGER, but we only know the sum is 30.0) + /// + /// Therefore, we always return REAL for SUM operations. pub fn to_values(&self, aggregates: &[AggregateFunction]) -> Vec { let mut result = Vec::new(); @@ -1180,21 +1188,12 @@ impl AggregateState { } AggregateFunction::Sum(col_idx) => { let sum = self.sums.get(col_idx).copied().unwrap_or(0.0); - // Return as integer if it's a whole number, otherwise as float - if sum.fract() == 0.0 { - result.push(Value::Integer(sum as i64)); - } else { - result.push(Value::Float(sum)); - } + result.push(Value::Float(sum)); } AggregateFunction::SumDistinct(col_idx) => { // Return the computed SUM(DISTINCT) let sum = self.distinct_sums.get(col_idx).copied().unwrap_or(0.0); - if sum.fract() == 0.0 { - result.push(Value::Integer(sum as i64)); - } else { - result.push(Value::Float(sum)); - } + result.push(Value::Float(sum)); } AggregateFunction::Avg(col_idx) => { if let Some((sum, count)) = self.avgs.get(col_idx) { diff --git a/core/incremental/compiler.rs b/core/incremental/compiler.rs index 316f31742..d977d65de 100644 --- a/core/incremental/compiler.rs +++ b/core/incremental/compiler.rs @@ -3381,11 +3381,12 @@ mod tests { assert_eq!(row.values.len(), 1); // The hex function converts the number to string first, then to hex - // 96 as string is "96", which in hex is "3936" (hex of ASCII '9' and '6') + // SUM now returns Float, so 96.0 as string is "96.0", which in hex is "39362E30" + // (hex of ASCII '9', '6', '.', '0') assert_eq!( row.values[0], - Value::Text("3936".to_string().into()), - "HEX(SUM(age + 2)) should return '3936' for sum of 96" + Value::Text("39362E30".to_string().into()), + "HEX(SUM(age + 2)) should return '39362E30' for sum of 96.0" ); // Test incremental update: add a new user @@ -3404,22 +3405,22 @@ mod tests { let result = test_execute(&mut circuit, input_data, pager.clone()).unwrap(); - // Expected: new SUM(age + 2) = 96 + (40+2) = 138 - // HEX(138) = hex of "138" = "313338" + // Expected: new SUM(age + 2) = 96.0 + (40+2) = 138.0 + // HEX(138.0) = hex of "138.0" = "3133382E30" assert_eq!(result.changes.len(), 2); - // First change: remove old aggregate (96) + // First change: remove old aggregate (96.0) let (row, weight) = &result.changes[0]; assert_eq!(*weight, -1); - assert_eq!(row.values[0], Value::Text("3936".to_string().into())); + assert_eq!(row.values[0], Value::Text("39362E30".to_string().into())); - // Second change: add new aggregate (138) + // Second change: add new aggregate (138.0) let (row, weight) = &result.changes[1]; assert_eq!(*weight, 1); assert_eq!( row.values[0], - Value::Text("313338".to_string().into()), - "HEX(SUM(age + 2)) should return '313338' for sum of 138" + Value::Text("3133382E30".to_string().into()), + "HEX(SUM(age + 2)) should return '3133382E30' for sum of 138.0" ); } @@ -3467,8 +3468,8 @@ mod tests { .unwrap(); // Expected results: - // Alice: SUM(25*2 + 35*2) = 50 + 70 = 120, HEX("120") = "313230" - // Bob: SUM(30*2) = 60, HEX("60") = "3630" + // Alice: SUM(25*2 + 35*2) = 50 + 70 = 120.0, HEX("120.0") = "3132302E30" + // Bob: SUM(30*2) = 60.0, HEX("60.0") = "36302E30" assert_eq!(result.changes.len(), 2); let results: HashMap = result @@ -3489,13 +3490,13 @@ mod tests { assert_eq!( results.get("Alice").unwrap(), - "313230", - "Alice's HEX(SUM(age * 2)) should be '313230' (120)" + "3132302E30", + "Alice's HEX(SUM(age * 2)) should be '3132302E30' (120.0)" ); assert_eq!( results.get("Bob").unwrap(), - "3630", - "Bob's HEX(SUM(age * 2)) should be '3630' (60)" + "36302E30", + "Bob's HEX(SUM(age * 2)) should be '36302E30' (60.0)" ); } @@ -4812,12 +4813,12 @@ mod tests { ); // Check the results - let mut results_map: HashMap = HashMap::new(); + let mut results_map: HashMap = HashMap::new(); for (row, weight) in result.changes { assert_eq!(weight, 1); assert_eq!(row.values.len(), 2); // name and total_quantity - if let (Value::Text(name), Value::Integer(total)) = (&row.values[0], &row.values[1]) { + if let (Value::Text(name), Value::Float(total)) = (&row.values[0], &row.values[1]) { results_map.insert(name.to_string(), *total); } else { panic!("Unexpected value types in result"); @@ -4826,12 +4827,12 @@ mod tests { assert_eq!( results_map.get("Alice"), - Some(&10), + Some(&10.0), "Alice should have total quantity 10" ); assert_eq!( results_map.get("Bob"), - Some(&7), + Some(&7.0), "Bob should have total quantity 7" ); } @@ -4928,24 +4929,24 @@ mod tests { ); // Check the results - let mut results_map: HashMap = HashMap::new(); + let mut results_map: HashMap = HashMap::new(); for (row, weight) in result.changes { assert_eq!(weight, 1); assert_eq!(row.values.len(), 2); // name and total - if let (Value::Text(name), Value::Integer(total)) = (&row.values[0], &row.values[1]) { + if let (Value::Text(name), Value::Float(total)) = (&row.values[0], &row.values[1]) { results_map.insert(name.to_string(), *total); } } assert_eq!( results_map.get("Alice"), - Some(&8), + Some(&8.0), "Alice should have total 8" ); assert_eq!( results_map.get("Charlie"), - Some(&7), + Some(&7.0), "Charlie should have total 7" ); assert_eq!(results_map.get("Bob"), None, "Bob should be filtered out"); @@ -5084,7 +5085,7 @@ mod tests { // Row should have name, product_name, and sum columns assert_eq!(row.values.len(), 3); - if let (Value::Text(name), Value::Text(product), Value::Integer(total)) = + if let (Value::Text(name), Value::Text(product), Value::Float(total)) = (&row.values[0], &row.values[1], &row.values[2]) { let key = format!("{}-{}", name.as_ref(), product.as_ref()); @@ -5092,12 +5093,14 @@ mod tests { match key.as_str() { "Alice-Widget" => { - assert_eq!(*total, 9, "Alice should have ordered 9 Widgets total") + assert_eq!(*total, 9.0, "Alice should have ordered 9 Widgets total") } - "Alice-Gadget" => assert_eq!(*total, 3, "Alice should have ordered 3 Gadgets"), - "Bob-Widget" => assert_eq!(*total, 7, "Bob should have ordered 7 Widgets"), + "Alice-Gadget" => { + assert_eq!(*total, 3.0, "Alice should have ordered 3 Gadgets") + } + "Bob-Widget" => assert_eq!(*total, 7.0, "Bob should have ordered 7 Widgets"), "Bob-Doohickey" => { - assert_eq!(*total, 2, "Bob should have ordered 2 Doohickeys") + assert_eq!(*total, 2.0, "Bob should have ordered 2 Doohickeys") } _ => panic!("Unexpected result: {key}"), } diff --git a/testing/materialized_views.test b/testing/materialized_views.test index e2bfd11bd..2827f30d4 100755 --- a/testing/materialized_views.test +++ b/testing/materialized_views.test @@ -66,9 +66,9 @@ do_execsql_test_on_specific_db {:memory:} matview-aggregation-population { GROUP BY day; SELECT * FROM daily_totals ORDER BY day; -} {1|7|2 -2|2|2 -3|4|2} +} {1|7.0|2 +2|2.0|2 +3|4.0|2} do_execsql_test_on_specific_db {:memory:} matview-filter-with-groupby { CREATE TABLE t(a INTEGER, b INTEGER); @@ -81,9 +81,9 @@ do_execsql_test_on_specific_db {:memory:} matview-filter-with-groupby { GROUP BY b; SELECT * FROM v ORDER BY yourb; -} {3|3|1 -6|6|1 -7|7|1} +} {3|3.0|1 +6|6.0|1 +7|7.0|1} do_execsql_test_on_specific_db {:memory:} matview-insert-maintenance { CREATE TABLE t(a INTEGER, b INTEGER); @@ -101,12 +101,12 @@ do_execsql_test_on_specific_db {:memory:} matview-insert-maintenance { INSERT INTO t VALUES (1,1), (2,2); SELECT * FROM v ORDER BY b; -} {3|3|1 -6|6|1 -3|7|2 -6|11|2 -3|7|2 -6|11|2} +} {3|3.0|1 +6|6.0|1 +3|7.0|2 +6|11.0|2 +3|7.0|2 +6|11.0|2} do_execsql_test_on_specific_db {:memory:} matview-delete-maintenance { CREATE TABLE items(id INTEGER, category TEXT, amount INTEGER); @@ -129,11 +129,11 @@ do_execsql_test_on_specific_db {:memory:} matview-delete-maintenance { DELETE FROM items WHERE category = 'B'; SELECT * FROM category_sums ORDER BY category; -} {A|90|3 -B|60|2 -A|60|2 -B|60|2 -A|60|2} +} {A|90.0|3 +B|60.0|2 +A|60.0|2 +B|60.0|2 +A|60.0|2} do_execsql_test_on_specific_db {:memory:} matview-update-maintenance { CREATE TABLE records(id INTEGER, value INTEGER, status INTEGER); @@ -155,12 +155,12 @@ do_execsql_test_on_specific_db {:memory:} matview-update-maintenance { UPDATE records SET status = 2 WHERE id = 3; SELECT * FROM status_totals ORDER BY status; -} {1|400|2 -2|600|2 -1|450|2 -2|600|2 -1|150|1 -2|900|3} +} {1|400.0|2 +2|600.0|2 +1|450.0|2 +2|600.0|2 +1|150.0|1 +2|900.0|3} do_execsql_test_on_specific_db {:memory:} matview-integer-primary-key-basic { CREATE TABLE t(a INTEGER PRIMARY KEY, b INTEGER); @@ -243,12 +243,12 @@ do_execsql_test_on_specific_db {:memory:} matview-integer-primary-key-with-aggre DELETE FROM t WHERE a = 3; SELECT * FROM v ORDER BY b; -} {10|500|1 -20|700|2 -10|600|2 -20|700|2 -10|600|2 -20|400|1} +} {10|500.0|1 +20|700.0|2 +10|600.0|2 +20|700.0|2 +10|600.0|2 +20|400.0|1} do_execsql_test_on_specific_db {:memory:} matview-complex-filter-aggregation { CREATE TABLE transactions( @@ -282,17 +282,17 @@ do_execsql_test_on_specific_db {:memory:} matview-complex-filter-aggregation { DELETE FROM transactions WHERE id = 3; SELECT * FROM account_deposits ORDER BY account; -} {100|70|2 -200|100|1 -300|60|1 -100|95|3 -200|100|1 -300|60|1 -100|125|3 -200|100|1 -300|60|1 -100|125|3 -300|60|1} +} {100|70.0|2 +200|100.0|1 +300|60.0|1 +100|95.0|3 +200|100.0|1 +300|60.0|1 +100|125.0|3 +200|100.0|1 +300|60.0|1 +100|125.0|3 +300|60.0|1} do_execsql_test_on_specific_db {:memory:} matview-sum-count-only { CREATE TABLE data(id INTEGER, value INTEGER, category INTEGER); @@ -317,12 +317,12 @@ do_execsql_test_on_specific_db {:memory:} matview-sum-count-only { UPDATE data SET value = 35 WHERE id = 3; SELECT * FROM category_stats ORDER BY category; -} {1|80|3 -2|70|2 -1|85|4 -2|70|2 -1|85|4 -2|75|2} +} {1|80.0|3 +2|70.0|2 +1|85.0|4 +2|70.0|2 +1|85.0|4 +2|75.0|2} do_execsql_test_on_specific_db {:memory:} matview-empty-table-population { CREATE TABLE t(a INTEGER, b INTEGER); @@ -337,8 +337,8 @@ do_execsql_test_on_specific_db {:memory:} matview-empty-table-population { INSERT INTO t VALUES (1, 3), (2, 7), (3, 9); SELECT * FROM v ORDER BY b; } {0 -7|2|1 -9|3|1} +7|2.0|1 +9|3.0|1} do_execsql_test_on_specific_db {:memory:} matview-all-rows-filtered { CREATE TABLE t(a INTEGER, b INTEGER); @@ -386,17 +386,17 @@ do_execsql_test_on_specific_db {:memory:} matview-mixed-operations-sequence { INSERT INTO orders VALUES (4, 300, 150); SELECT * FROM customer_totals ORDER BY customer_id; -} {100|50|1 -200|75|1 -100|75|2 -200|75|1 -100|75|2 -200|100|1 -100|25|1 -200|100|1 -100|25|1 -200|100|1 -300|150|1} +} {100|50.0|1 +200|75.0|1 +100|75.0|2 +200|75.0|1 +100|75.0|2 +200|100.0|1 +100|25.0|1 +200|100.0|1 +100|25.0|1 +200|100.0|1 +300|150.0|1} do_execsql_test_on_specific_db {:memory:} matview-projections { CREATE TABLE t(a,b); @@ -502,13 +502,13 @@ do_execsql_test_on_specific_db {:memory:} matview-rollback-aggregation { ROLLBACK; SELECT * FROM product_totals ORDER BY product_id; -} {1|300|2 -2|400|2 -1|350|3 -2|400|2 -3|300|1 -1|300|2 -2|400|2} +} {1|300.0|2 +2|400.0|2 +1|350.0|3 +2|400.0|2 +3|300.0|1 +1|300.0|2 +2|400.0|2} do_execsql_test_on_specific_db {:memory:} matview-rollback-mixed-operations { CREATE TABLE orders(id INTEGER PRIMARY KEY, customer INTEGER, amount INTEGER); @@ -529,12 +529,12 @@ do_execsql_test_on_specific_db {:memory:} matview-rollback-mixed-operations { ROLLBACK; SELECT * FROM customer_totals ORDER BY customer; -} {100|75|2 -200|75|1 -100|150|2 -200|150|1 -100|75|2 -200|75|1} +} {100|75.0|2 +200|75.0|1 +100|150.0|2 +200|150.0|1 +100|75.0|2 +200|75.0|1} do_execsql_test_on_specific_db {:memory:} matview-rollback-filtered-aggregation { CREATE TABLE transactions(id INTEGER, account INTEGER, amount INTEGER, type TEXT); @@ -560,11 +560,11 @@ do_execsql_test_on_specific_db {:memory:} matview-rollback-filtered-aggregation ROLLBACK; SELECT * FROM deposits ORDER BY account; -} {100|50|1 -200|100|1 -100|135|2 -100|50|1 -200|100|1} +} {100|50.0|1 +200|100.0|1 +100|135.0|2 +100|50.0|1 +200|100.0|1} do_execsql_test_on_specific_db {:memory:} matview-rollback-empty-view { CREATE TABLE t(a INTEGER, b INTEGER); @@ -619,8 +619,8 @@ do_execsql_test_on_specific_db {:memory:} matview-join-with-aggregation { GROUP BY u.name; SELECT * FROM user_totals ORDER BY name; -} {Alice|250 -Bob|250} +} {Alice|250.0 +Bob|250.0} do_execsql_test_on_specific_db {:memory:} matview-three-way-join { CREATE TABLE customers(id INTEGER PRIMARY KEY, name TEXT, city TEXT); @@ -661,9 +661,9 @@ do_execsql_test_on_specific_db {:memory:} matview-three-way-join-with-aggregatio GROUP BY c.name, p.name; SELECT * FROM sales_totals ORDER BY customer_name, product_name; -} {Alice|Gadget|3|60 -Alice|Widget|9|90 -Bob|Widget|2|20} +} {Alice|Gadget|3.0|60.0 +Alice|Widget|9.0|90.0 +Bob|Widget|2.0|20.0} do_execsql_test_on_specific_db {:memory:} matview-join-incremental-insert { CREATE TABLE users(id INTEGER PRIMARY KEY, name TEXT); @@ -864,9 +864,9 @@ do_execsql_test_on_specific_db {:memory:} matview-aggregation-before-join { GROUP BY c.id, c.name, c.tier; SELECT * FROM customer_order_summary ORDER BY total_quantity DESC; -} {Bob|Silver|2|7 -Alice|Gold|3|6 -Charlie|Bronze|1|1} +} {Bob|Silver|2|7.0 +Alice|Gold|3|6.0 +Charlie|Bronze|1|1.0} # Test 4: Join with aggregation AFTER the join do_execsql_test_on_specific_db {:memory:} matview-aggregation-after-join { @@ -894,8 +894,8 @@ do_execsql_test_on_specific_db {:memory:} matview-aggregation-after-join { GROUP BY st.region; SELECT * FROM regional_sales ORDER BY total_revenue DESC; -} {North|38|3150 -South|18|1500} +} {North|38.0|3150.0 +South|18.0|1500.0} # Test 5: Modifying both tables in same transaction do_execsql_test_on_specific_db {:memory:} matview-join-both-tables-modified { @@ -1223,8 +1223,8 @@ do_execsql_test_on_specific_db {:memory:} matview-union-with-aggregation { FROM q2_sales; SELECT * FROM half_year_summary ORDER BY quarter; -} {Q1|68|16450 -Q2|105|21750} +} {Q1|68.0|16450.0 +Q2|105.0|21750.0} do_execsql_test_on_specific_db {:memory:} matview-union-with-join { CREATE TABLE customers(id INTEGER PRIMARY KEY, name TEXT, type TEXT); @@ -1654,8 +1654,8 @@ do_execsql_test_on_specific_db {:memory:} matview-groupby-scalar-function { GROUP BY substr(orderdate, 1, 4); SELECT * FROM yearly_totals ORDER BY 1; -} {2020|250 -2021|200} +} {2020|250.0 +2021|200.0} do_execsql_test_on_specific_db {:memory:} matview-groupby-alias { CREATE TABLE orders(id INTEGER, orderdate TEXT, amount INTEGER); @@ -1669,8 +1669,8 @@ do_execsql_test_on_specific_db {:memory:} matview-groupby-alias { GROUP BY year; SELECT * FROM yearly_totals ORDER BY year; -} {2020|250 -2021|200} +} {2020|250.0 +2021|200.0} do_execsql_test_on_specific_db {:memory:} matview-groupby-position { CREATE TABLE orders(id INTEGER, orderdate TEXT, amount INTEGER, nation TEXT); @@ -1684,8 +1684,8 @@ do_execsql_test_on_specific_db {:memory:} matview-groupby-position { GROUP BY 1, 2; SELECT * FROM national_yearly ORDER BY nation, year; -} {UK|2021|200 -USA|2020|250} +} {UK|2021|200.0 +USA|2020|250.0} do_execsql_test_on_specific_db {:memory:} matview-groupby-scalar-incremental { CREATE TABLE orders(id INTEGER, orderdate TEXT, amount INTEGER); @@ -1701,10 +1701,10 @@ do_execsql_test_on_specific_db {:memory:} matview-groupby-scalar-incremental { SELECT * FROM yearly_totals; INSERT INTO orders VALUES (3, '2021-03-20', 200); SELECT * FROM yearly_totals ORDER BY year; -} {2020|100 -2020|250 -2020|250 -2021|200} +} {2020|100.0 +2020|250.0 +2020|250.0 +2021|200.0} do_execsql_test_on_specific_db {:memory:} matview-groupby-join-position { CREATE TABLE t(a INTEGER); @@ -2301,12 +2301,12 @@ do_execsql_test_on_specific_db {:memory:} matview-sum-distinct { -- Add a new distinct value INSERT INTO sales VALUES ('South', 500); SELECT * FROM sales_summary ORDER BY region; -} {North|300 -South|700 -North|300 -South|700 -North|300 -South|1200} +} {North|300.0 +South|700.0 +North|300.0 +South|700.0 +North|300.0 +South|1200.0} do_execsql_test_on_specific_db {:memory:} matview-avg-distinct { CREATE TABLE grades(student TEXT, score INTEGER); @@ -2434,10 +2434,10 @@ do_execsql_test_on_specific_db {:memory:} matview-multiple-distinct-aggregates-w ('B', 7, 80, 800); -- New values SELECT * FROM multi_distinct ORDER BY grp; -} {A|3|100|250.0 -B|3|180|600.0 -A|3|100|250.0 -B|4|260|650.0} +} {A|3|100.0|250.0 +B|3|180.0|600.0 +A|3|100.0|250.0 +B|4|260.0|650.0} do_execsql_test_on_specific_db {:memory:} matview-multiple-distinct-aggregates-no-groupby { CREATE TABLE data2(x INTEGER, y INTEGER, z INTEGER); @@ -2467,8 +2467,8 @@ do_execsql_test_on_specific_db {:memory:} matview-multiple-distinct-aggregates-n (7, 80, 800); -- New values SELECT * FROM multi_distinct_global; -} {6|280|400.0 -7|360|450.0} +} {6|280.0|400.0 +7|360.0|450.0} do_execsql_test_on_specific_db {:memory:} matview-count-distinct-global-aggregate { CREATE TABLE all_data(val INTEGER); From 7f8f1bc07444941cc3d942a1f4cfad9c23ceed3e Mon Sep 17 00:00:00 2001 From: Sumit Patel Date: Sat, 25 Oct 2025 16:53:59 +0530 Subject: [PATCH 428/428] Update the write_varint method to use an encoded buffer of size 9 instead of 10. The SQLite varint specification states that the varint is guaranteed to be a maximum of 9 bytes, but our version of write_varint initializes a buffer of 10 bytes. Changing the size to match the specification. --- core/storage/sqlite3_ondisk.rs | 2 +- tests/integration/query_processing/test_btree.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index 190b94479..93f897dc1 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -1601,7 +1601,7 @@ pub fn write_varint(buf: &mut [u8], value: u64) -> usize { return 9; } - let mut encoded: [u8; 10] = [0; 10]; + let mut encoded: [u8; 9] = [0; 9]; let mut bytes = value; let mut n = 0; while bytes != 0 { diff --git a/tests/integration/query_processing/test_btree.rs b/tests/integration/query_processing/test_btree.rs index e55c512f0..c24d025d6 100644 --- a/tests/integration/query_processing/test_btree.rs +++ b/tests/integration/query_processing/test_btree.rs @@ -130,7 +130,7 @@ pub fn write_varint(buf: &mut [u8], value: u64) -> usize { return 9; } - let mut encoded: [u8; 10] = [0; 10]; + let mut encoded: [u8; 9] = [0; 9]; let mut bytes = value; let mut n = 0; while bytes != 0 {