Merge 'index_method: implement basic trait and simple toy index' from Nikita Sivukhin

This PR adds `index_method` trait and implementation of toy sparse
vector index.
In order to make PR more lightweight - for now index methods are not
deeply integrated into the query planner and only necessary components
are added in order to make integration tests which uses `index_method`
API directly to work.
Primary changes introduced in this PR are:
1. `SymbolTable` extended with `index_methods` field and builtin
extensions populated with 2 native indices: `backing_btree` and
`toy_vector_sparse_ivf`
2. `Index` struct extended with `index_method` field which holds
`IndexMethodAttachment` constructed for the table with given parameters
from `IndexMethod` "factory" trait
The toy index implementation store inverted index pairs `(dimension,
rowid)` in the auxilary BTree index. This index uses special
`backing_btree` index_method which marked as `backing_btree: true` and
treated in a special way by the db core: this is real BTree index which
is not managed by the tursodb core and must be managed by index_method
created it (so it responsible for data population, creation, destruction
of this btree).

Reviewed-by: Jussi Saurio <jussi.saurio@gmail.com>

Closes #3846
This commit is contained in:
Jussi Saurio
2025-10-28 07:01:36 +02:00
committed by GitHub
37 changed files with 1777 additions and 477 deletions

View File

@@ -1,6 +1,11 @@
#[cfg(feature = "fs")]
mod dynamic;
mod vtab_xconnect;
use crate::index_method::backing_btree::BackingBtreeIndexMethod;
use crate::index_method::toy_vector_sparse_ivf::VectorSparseInvertedIndexMethod;
use crate::index_method::{
BACKING_BTREE_INDEX_METHOD_NAME, TOY_VECTOR_SPARSE_IVF_INDEX_METHOD_NAME,
};
use crate::schema::{Schema, Table};
#[cfg(all(target_os = "linux", feature = "io_uring", not(miri)))]
use crate::UringIO;
@@ -162,6 +167,17 @@ impl Database {
/// Register any built-in extensions that can be stored on the Database so we do not have
/// to register these once-per-connection, and the connection can just extend its symbol table
pub fn register_global_builtin_extensions(&self) -> Result<(), String> {
{
let mut syms = self.builtin_syms.write();
syms.index_methods.insert(
TOY_VECTOR_SPARSE_IVF_INDEX_METHOD_NAME.to_string(),
Arc::new(VectorSparseInvertedIndexMethod),
);
syms.index_methods.insert(
BACKING_BTREE_INDEX_METHOD_NAME.to_string(),
Arc::new(BackingBtreeIndexMethod),
);
}
let syms = self.builtin_syms.data_ptr();
// Pass the mutex pointer and the appropriate handler
let schema_mutex_ptr = &self.schema as *const Mutex<Arc<Schema>> as *mut Mutex<Arc<Schema>>;

View File

@@ -70,6 +70,7 @@ pub fn create_dbsp_state_index(root_page: i64) -> Index {
ephemeral: false,
has_rowid: true,
where_clause: None,
index_method: None,
}
}

View File

@@ -0,0 +1,45 @@
use std::sync::Arc;
use crate::{
index_method::{
IndexMethod, IndexMethodAttachment, IndexMethodConfiguration, IndexMethodCursor,
IndexMethodDefinition, BACKING_BTREE_INDEX_METHOD_NAME,
},
Result,
};
/// Special 'backing_btree' index method which can be used by other custom index methods
///
/// Under the hood, it's marked as 'treat_as_btree' which recognized by the tursodb core as a special index method
/// which should be translated to ordinary btree but also do not explicitly managed by the core
#[derive(Debug)]
pub struct BackingBtreeIndexMethod;
#[derive(Debug)]
pub struct BackingBTreeIndexMethodAttachment(String);
impl IndexMethod for BackingBtreeIndexMethod {
fn attach(
&self,
configuration: &IndexMethodConfiguration,
) -> Result<Arc<dyn IndexMethodAttachment>> {
Ok(Arc::new(BackingBTreeIndexMethodAttachment(
configuration.index_name.clone(),
)))
}
}
impl IndexMethodAttachment for BackingBTreeIndexMethodAttachment {
fn definition<'a>(&'a self) -> IndexMethodDefinition<'a> {
IndexMethodDefinition {
method_name: BACKING_BTREE_INDEX_METHOD_NAME,
index_name: &self.0,
patterns: &[],
backing_btree: true,
}
}
fn init(&self) -> Result<Box<dyn IndexMethodCursor>> {
panic!("init is not supported for backing_btree index method")
}
}

171
core/index_method/mod.rs Normal file
View File

@@ -0,0 +1,171 @@
use std::{collections::HashMap, sync::Arc};
use turso_parser::ast;
use crate::{
schema::IndexColumn,
storage::btree::BTreeCursor,
types::{IOResult, IndexInfo, KeyInfo},
vdbe::Register,
Connection, LimboError, Result, Value,
};
pub mod backing_btree;
pub mod toy_vector_sparse_ivf;
pub const BACKING_BTREE_INDEX_METHOD_NAME: &str = "backing_btree";
pub const TOY_VECTOR_SPARSE_IVF_INDEX_METHOD_NAME: &str = "toy_vector_sparse_ivf";
/// index method "entry point" which can create attachment of the method to the table with given configuration
/// (this trait acts like a "factory")
pub trait IndexMethod: std::fmt::Debug + Send + Sync {
/// create attachment of the index method to the specific table with specific method configuration
fn attach(
&self,
configuration: &IndexMethodConfiguration,
) -> Result<Arc<dyn IndexMethodAttachment>>;
}
#[derive(Debug, Clone)]
pub struct IndexMethodConfiguration {
/// table name for which index_method is defined
pub table_name: String,
/// index name
pub index_name: String,
/// columns c1, c2, c3, ... provided to the index method (e.g. create index t_idx on t using method (c1, c2, c3, ...))
pub columns: Vec<IndexColumn>,
/// optional parameters provided to the index method through WITH clause
pub parameters: HashMap<String, Value>,
}
/// index method attached to the table with specific configuration
/// the attachment is capable of generating SELECT patterns where index can be used and also can create cursor for query execution
pub trait IndexMethodAttachment: std::fmt::Debug + Send + Sync {
fn definition<'a>(&'a self) -> IndexMethodDefinition<'a>;
fn init(&self) -> Result<Box<dyn IndexMethodCursor>>;
}
#[derive(Debug)]
pub struct IndexMethodDefinition<'a> {
/// index method name
pub method_name: &'a str,
/// index name
pub index_name: &'a str,
/// SELECT patterns where index method can be used
/// the patterns can contain positional placeholder which will make planner to capture parameters from the original query and provide them to the index method
/// (for example, pattern 'SELECT * FROM {table} LIMIT ?' will capture LIMIT parameter and provide its value from the query to the index method query_start(...) call)
pub patterns: &'a [ast::Select],
/// special marker which forces tursodb core to treat index method as backing btree - so it will allocate real btree on disk for that index method
pub backing_btree: bool,
}
/// cursor opened for index method and capable of executing DML/DDL/DQL queries for the index method over fixed table
pub trait IndexMethodCursor {
/// create necessary components for index method (usually, this is a bunch of btree-s)
fn create(&mut self, connection: &Arc<Connection>) -> Result<IOResult<()>>;
/// destroy components created in the create(...) call for index method
fn destroy(&mut self, connection: &Arc<Connection>) -> Result<IOResult<()>>;
/// open necessary components for reading the index
fn open_read(&mut self, connection: &Arc<Connection>) -> Result<IOResult<()>>;
/// open necessary components for writing the index
fn open_write(&mut self, connection: &Arc<Connection>) -> Result<IOResult<()>>;
/// handle insert action
/// "values" argument contains registers with values for index columns followed by rowid Integer register
/// (e.g. for "CREATE INDEX i ON t USING method (x, z)" insert(...) call will have 3 registers in values: [x, z, rowid])
fn insert(&mut self, values: &[Register]) -> Result<IOResult<()>>;
/// handle delete action
/// "values" argument contains registers with values for index columns followed by rowid Integer register
/// (e.g. for "CREATE INDEX i ON t USING method (x, z)" insert(...) call will have 3 registers in values: [x, z, rowid])
fn delete(&mut self, values: &[Register]) -> Result<IOResult<()>>;
/// initialize query to the index method
/// first element of "values" slice is the Integer register which holds index of the chosen [IndexMethodDefinition::patterns] by query planner
/// next arguments of the "values" slice are values from the original query expression captured by pattern
///
/// For example, for 2 patterns ["SELECT * FROM {table} LIMIT ?", "SELECT * FROM {table} WHERE x = ?"], query_start(...) call can have following arguments:
/// - [Integer(0), Integer(10)] - pattern "SELECT * FROM {table} LIMIT ?" was chosen with LIMIT parameter equals to 10
/// - [Integer(1), Text("turso")] - pattern "SELECT * FROM {table} WHERE x = ?" was chosen with equality comparison equals to "turso"
fn query_start(&mut self, values: &[Register]) -> Result<IOResult<()>>;
/// Moves cursor to the next response row
/// Returns false if query exhausted all rows
fn query_next(&mut self) -> Result<IOResult<bool>>;
/// Return column with given idx (zero-based) from current row
fn query_column(&mut self, idx: usize) -> Result<IOResult<Value>>;
/// Return rowid of the original table row which corresponds to the current cursor row
///
/// This method is used by tursodb core in order to "enrich" response from query pattern with additional fields from original table
/// For example, consider pattern like this:
///
/// > SELECT vector_distance_jaccard(embedding, ?) as d FROM table ORDER BY d LIMIT 10
///
/// It can be used in more complex query:
///
/// > SELECT name, comment, rating, vector_distance_jaccard(embedding, ?) as d FROM table ORDER BY d LIMIT 10
///
/// In this case query planner will execute index method query first, and then
/// enrich its result with name, comment, rating columns from original table accessing original row by its rowid
/// returned from query_rowid(...) method
fn query_rowid(&mut self) -> Result<IOResult<Option<i64>>>;
}
/// helper method to open table BTree cursor in the index method implementation
pub(crate) fn open_table_cursor(connection: &Connection, table: &str) -> Result<BTreeCursor> {
let pager = connection.pager.load().clone();
let schema = connection.schema.read();
let Some(table) = schema.get_table(table) else {
return Err(LimboError::InternalError(format!(
"table {table} not found",
)));
};
let cursor = BTreeCursor::new_table(pager, table.get_root_page(), table.columns().len());
Ok(cursor)
}
/// helper method to open index BTree cursor in the index method implementation
pub(crate) fn open_index_cursor(
connection: &Connection,
table: &str,
index: &str,
keys: Vec<KeyInfo>,
) -> Result<BTreeCursor> {
let pager = connection.pager.load().clone();
let schema = connection.schema.read();
let Some(scratch) = schema.get_index(table, index) else {
return Err(LimboError::InternalError(format!(
"index {index} for table {table} not found",
)));
};
let mut cursor = BTreeCursor::new(pager, scratch.root_page, keys.len());
cursor.index_info = Some(IndexInfo {
has_rowid: false,
num_cols: keys.len(),
key_info: keys,
});
Ok(cursor)
}
/// helper method to parse select patterns for [IndexMethodAttachment::definition] call
pub(crate) fn parse_patterns(patterns: &[&str]) -> Result<Vec<ast::Select>> {
let mut parsed = Vec::new();
for pattern in patterns {
let mut parser = turso_parser::parser::Parser::new(pattern.as_bytes());
let Some(ast) = parser.next() else {
return Err(LimboError::ParseError(format!(
"unable to parse pattern statement: {pattern}",
)));
};
let ast = ast?;
let ast::Cmd::Stmt(ast::Stmt::Select(select)) = ast else {
return Err(LimboError::ParseError(format!(
"only select patterns are allowed: {pattern}",
)));
};
parsed.push(select);
}
Ok(parsed)
}

View File

@@ -0,0 +1,725 @@
use std::{
collections::{BTreeSet, HashSet, VecDeque},
sync::Arc,
};
use turso_parser::ast::{self, SortOrder};
use crate::{
index_method::{
open_index_cursor, open_table_cursor, parse_patterns, IndexMethod, IndexMethodAttachment,
IndexMethodConfiguration, IndexMethodCursor, IndexMethodDefinition,
BACKING_BTREE_INDEX_METHOD_NAME, TOY_VECTOR_SPARSE_IVF_INDEX_METHOD_NAME,
},
return_if_io,
storage::btree::{BTreeCursor, BTreeKey, CursorTrait},
translate::collate::CollationSeq,
types::{IOResult, ImmutableRecord, KeyInfo, SeekKey, SeekOp, SeekResult},
vdbe::Register,
vector::{
operations,
vector_types::{Vector, VectorType},
},
Connection, LimboError, Result, Statement, Value, ValueRef,
};
/// Simple inverted index for sparse vectors
/// > CREATE INDEX t_idx ON t USING toy_vector_sparse_ivf (embedding)
///
/// It accept single column which must contain vector encoded in sparse format (e.g. vector32_sparse(...))
/// It can handle jaccard similarity scoring queries like the following:
/// > SELECT vector_distance_jaccard(embedding, ?) as d FROM t ORDER BY d LIMIT ?
#[derive(Debug)]
pub struct VectorSparseInvertedIndexMethod;
#[derive(Debug)]
pub struct VectorSparseInvertedIndexMethodAttachment {
configuration: IndexMethodConfiguration,
patterns: Vec<ast::Select>,
}
pub enum VectorSparseInvertedIndexCreateState {
Init,
Run { stmt: Box<Statement> },
}
pub enum VectorSparseInvertedIndexInsertState {
Init,
Prepare {
positions: Option<Vec<u32>>,
rowid: i64,
idx: usize,
},
Seek {
positions: Option<Vec<u32>>,
key: Option<ImmutableRecord>,
rowid: i64,
idx: usize,
},
Insert {
positions: Option<Vec<u32>>,
key: Option<ImmutableRecord>,
rowid: i64,
idx: usize,
},
}
pub enum VectorSparseInvertedIndexDeleteState {
Init,
Prepare {
positions: Option<Vec<u32>>,
rowid: i64,
idx: usize,
},
Seek {
positions: Option<Vec<u32>>,
key: Option<ImmutableRecord>,
rowid: i64,
idx: usize,
},
Insert {
positions: Option<Vec<u32>>,
rowid: i64,
idx: usize,
},
}
#[derive(Debug, PartialEq)]
struct FloatOrd(f64);
impl Eq for FloatOrd {}
impl PartialOrd for FloatOrd {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for FloatOrd {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.0.total_cmp(&other.0)
}
}
#[derive(Debug)]
enum VectorSparseInvertedIndexSearchState {
Init,
Prepare {
collected: Option<HashSet<i64>>,
positions: Option<Vec<u32>>,
idx: usize,
limit: i64,
},
Seek {
collected: Option<HashSet<i64>>,
positions: Option<Vec<u32>>,
key: Option<ImmutableRecord>,
idx: usize,
limit: i64,
},
Read {
collected: Option<HashSet<i64>>,
positions: Option<Vec<u32>>,
key: Option<ImmutableRecord>,
idx: usize,
limit: i64,
},
Next {
collected: Option<HashSet<i64>>,
positions: Option<Vec<u32>>,
key: Option<ImmutableRecord>,
idx: usize,
limit: i64,
},
EvaluateSeek {
rowids: Option<Vec<i64>>,
distances: Option<BTreeSet<(FloatOrd, i64)>>,
limit: i64,
},
EvaluateRead {
rowids: Option<Vec<i64>>,
distances: Option<BTreeSet<(FloatOrd, i64)>>,
limit: i64,
},
}
pub struct VectorSparseInvertedIndexMethodCursor {
configuration: IndexMethodConfiguration,
scratch_btree: String,
scratch_cursor: Option<BTreeCursor>,
main_btree: Option<BTreeCursor>,
create_state: VectorSparseInvertedIndexCreateState,
insert_state: VectorSparseInvertedIndexInsertState,
delete_state: VectorSparseInvertedIndexDeleteState,
search_state: VectorSparseInvertedIndexSearchState,
search_result: VecDeque<(i64, f64)>,
search_row: Option<(i64, f64)>,
}
impl IndexMethod for VectorSparseInvertedIndexMethod {
fn attach(
&self,
configuration: &IndexMethodConfiguration,
) -> Result<Arc<dyn IndexMethodAttachment>> {
let query_pattern1 = format!(
"SELECT vector_distance_jaccard({}, ?) as distance FROM {} ORDER BY distance LIMIT ?",
configuration.columns[0].name, configuration.table_name
);
let query_pattern2 = format!(
"SELECT vector_distance_jaccard(?, {}) as distance FROM {} ORDER BY distance LIMIT ?",
configuration.columns[0].name, configuration.table_name
);
Ok(Arc::new(VectorSparseInvertedIndexMethodAttachment {
configuration: configuration.clone(),
patterns: parse_patterns(&[&query_pattern1, &query_pattern2])?,
}))
}
}
impl IndexMethodAttachment for VectorSparseInvertedIndexMethodAttachment {
fn definition<'a>(&'a self) -> IndexMethodDefinition<'a> {
IndexMethodDefinition {
method_name: TOY_VECTOR_SPARSE_IVF_INDEX_METHOD_NAME,
index_name: &self.configuration.index_name,
patterns: self.patterns.as_slice(),
backing_btree: false,
}
}
fn init(&self) -> Result<Box<dyn IndexMethodCursor>> {
Ok(Box::new(VectorSparseInvertedIndexMethodCursor::new(
self.configuration.clone(),
)))
}
}
impl VectorSparseInvertedIndexMethodCursor {
pub fn new(configuration: IndexMethodConfiguration) -> Self {
let scratch_btree = format!("{}_scratch", configuration.index_name);
Self {
configuration,
scratch_btree,
scratch_cursor: None,
main_btree: None,
search_result: VecDeque::new(),
search_row: None,
create_state: VectorSparseInvertedIndexCreateState::Init,
insert_state: VectorSparseInvertedIndexInsertState::Init,
delete_state: VectorSparseInvertedIndexDeleteState::Init,
search_state: VectorSparseInvertedIndexSearchState::Init,
}
}
}
impl IndexMethodCursor for VectorSparseInvertedIndexMethodCursor {
fn create(&mut self, connection: &Arc<Connection>) -> Result<IOResult<()>> {
loop {
match &mut self.create_state {
VectorSparseInvertedIndexCreateState::Init => {
let columns = &self.configuration.columns;
let columns = columns.iter().map(|x| x.name.as_str()).collect::<Vec<_>>();
let sql = format!(
"CREATE INDEX {} ON {} USING {} ({})",
self.scratch_btree,
self.configuration.table_name,
BACKING_BTREE_INDEX_METHOD_NAME,
columns.join(", ")
);
let stmt = connection.prepare(&sql)?;
connection.start_nested();
self.create_state = VectorSparseInvertedIndexCreateState::Run {
stmt: Box::new(stmt),
};
}
VectorSparseInvertedIndexCreateState::Run { stmt } => {
// we need to properly track subprograms and propagate result to the root program to make this execution async
let result = stmt.run_ignore_rows();
connection.end_nested();
result?;
return Ok(IOResult::Done(()));
}
}
}
}
fn destroy(&mut self, connection: &Arc<Connection>) -> Result<IOResult<()>> {
let sql = format!("DROP INDEX {}", self.scratch_btree);
let mut stmt = connection.prepare(&sql)?;
connection.start_nested();
let result = stmt.run_ignore_rows();
connection.end_nested();
result?;
Ok(IOResult::Done(()))
}
fn open_read(&mut self, connection: &Arc<Connection>) -> Result<IOResult<()>> {
let key_info = KeyInfo {
collation: CollationSeq::Binary,
sort_order: SortOrder::Asc,
};
self.scratch_cursor = Some(open_index_cursor(
connection,
&self.configuration.table_name,
&self.scratch_btree,
vec![key_info, key_info],
)?);
self.main_btree = Some(open_table_cursor(
connection,
&self.configuration.table_name,
)?);
Ok(IOResult::Done(()))
}
fn open_write(&mut self, connection: &Arc<Connection>) -> Result<IOResult<()>> {
let key_info = KeyInfo {
collation: CollationSeq::Binary,
sort_order: SortOrder::Asc,
};
self.scratch_cursor = Some(open_index_cursor(
connection,
&self.configuration.table_name,
&self.scratch_btree,
vec![key_info, key_info],
)?);
Ok(IOResult::Done(()))
}
fn insert(&mut self, values: &[Register]) -> Result<IOResult<()>> {
let Some(cursor) = &mut self.scratch_cursor else {
return Err(LimboError::InternalError(
"cursor must be opened".to_string(),
));
};
loop {
match &mut self.insert_state {
VectorSparseInvertedIndexInsertState::Init => {
let Some(vector) = values[0].get_value().to_blob() else {
return Err(LimboError::InternalError(
"first value must be sparse vector".to_string(),
));
};
let vector = Vector::from_slice(vector)?;
if !matches!(vector.vector_type, VectorType::Float32Sparse) {
return Err(LimboError::InternalError(
"first value must be sparse vector".to_string(),
));
}
let Some(rowid) = values[1].get_value().as_int() else {
return Err(LimboError::InternalError(
"second value must be i64 rowid".to_string(),
));
};
let sparse = vector.as_f32_sparse();
self.insert_state = VectorSparseInvertedIndexInsertState::Prepare {
positions: Some(sparse.idx.to_vec()),
rowid,
idx: 0,
}
}
VectorSparseInvertedIndexInsertState::Prepare {
positions,
rowid,
idx,
} => {
let p = positions.as_ref().unwrap();
if *idx == p.len() {
self.insert_state = VectorSparseInvertedIndexInsertState::Init;
return Ok(IOResult::Done(()));
}
let position = p[*idx];
let key = ImmutableRecord::from_values(
&[Value::Integer(position as i64), Value::Integer(*rowid)],
2,
);
self.insert_state = VectorSparseInvertedIndexInsertState::Seek {
idx: *idx,
rowid: *rowid,
positions: positions.take(),
key: Some(key),
};
}
VectorSparseInvertedIndexInsertState::Seek {
positions,
rowid,
idx,
key,
} => {
let k = key.as_ref().unwrap();
let _ = return_if_io!(
cursor.seek(SeekKey::IndexKey(k), SeekOp::GE { eq_only: false })
);
self.insert_state = VectorSparseInvertedIndexInsertState::Insert {
idx: *idx,
rowid: *rowid,
positions: positions.take(),
key: key.take(),
};
}
VectorSparseInvertedIndexInsertState::Insert {
positions,
rowid,
idx,
key,
} => {
let k = key.as_ref().unwrap();
return_if_io!(cursor.insert(&BTreeKey::IndexKey(k)));
self.insert_state = VectorSparseInvertedIndexInsertState::Prepare {
idx: *idx + 1,
rowid: *rowid,
positions: positions.take(),
};
}
}
}
}
fn delete(&mut self, values: &[Register]) -> Result<IOResult<()>> {
let Some(cursor) = &mut self.scratch_cursor else {
return Err(LimboError::InternalError(
"cursor must be opened".to_string(),
));
};
loop {
match &mut self.delete_state {
VectorSparseInvertedIndexDeleteState::Init => {
let Some(vector) = values[0].get_value().to_blob() else {
return Err(LimboError::InternalError(
"first value must be sparse vector".to_string(),
));
};
let vector = Vector::from_slice(vector)?;
if !matches!(vector.vector_type, VectorType::Float32Sparse) {
return Err(LimboError::InternalError(
"first value must be sparse vector".to_string(),
));
}
let Some(rowid) = values[1].get_value().as_int() else {
return Err(LimboError::InternalError(
"second value must be i64 rowid".to_string(),
));
};
let sparse = vector.as_f32_sparse();
self.delete_state = VectorSparseInvertedIndexDeleteState::Prepare {
positions: Some(sparse.idx.to_vec()),
rowid,
idx: 0,
}
}
VectorSparseInvertedIndexDeleteState::Prepare {
positions,
rowid,
idx,
} => {
let p = positions.as_ref().unwrap();
if *idx == p.len() {
self.delete_state = VectorSparseInvertedIndexDeleteState::Init;
return Ok(IOResult::Done(()));
}
let position = p[*idx];
let key = ImmutableRecord::from_values(
&[Value::Integer(position as i64), Value::Integer(*rowid)],
2,
);
self.delete_state = VectorSparseInvertedIndexDeleteState::Seek {
idx: *idx,
rowid: *rowid,
positions: positions.take(),
key: Some(key),
};
}
VectorSparseInvertedIndexDeleteState::Seek {
positions,
rowid,
idx,
key,
} => {
let k = key.as_ref().unwrap();
let result = return_if_io!(
cursor.seek(SeekKey::IndexKey(k), SeekOp::GE { eq_only: true })
);
if !matches!(result, SeekResult::Found) {
return Err(LimboError::Corrupt("inverted index corrupted".to_string()));
}
self.delete_state = VectorSparseInvertedIndexDeleteState::Insert {
idx: *idx,
rowid: *rowid,
positions: positions.take(),
};
}
VectorSparseInvertedIndexDeleteState::Insert {
positions,
rowid,
idx,
} => {
return_if_io!(cursor.delete());
self.delete_state = VectorSparseInvertedIndexDeleteState::Prepare {
idx: *idx + 1,
rowid: *rowid,
positions: positions.take(),
};
}
}
}
}
fn query_start(&mut self, values: &[Register]) -> Result<IOResult<()>> {
let Some(scratch) = &mut self.scratch_cursor else {
return Err(LimboError::InternalError(
"cursor must be opened".to_string(),
));
};
let Some(main) = &mut self.main_btree else {
return Err(LimboError::InternalError(
"cursor must be opened".to_string(),
));
};
loop {
tracing::debug!("state: {:?}", self.search_state);
match &mut self.search_state {
VectorSparseInvertedIndexSearchState::Init => {
let Some(vector) = values[1].get_value().to_blob() else {
return Err(LimboError::InternalError(
"first value must be sparse vector".to_string(),
));
};
let Some(limit) = values[2].get_value().as_int() else {
return Err(LimboError::InternalError(
"second value must be i64 limit parameter".to_string(),
));
};
let vector = Vector::from_slice(vector)?;
if !matches!(vector.vector_type, VectorType::Float32Sparse) {
return Err(LimboError::InternalError(
"first value must be sparse vector".to_string(),
));
}
let sparse = vector.as_f32_sparse();
self.search_state = VectorSparseInvertedIndexSearchState::Prepare {
collected: Some(HashSet::new()),
positions: Some(sparse.idx.to_vec()),
idx: 0,
limit,
};
}
VectorSparseInvertedIndexSearchState::Prepare {
collected,
positions,
idx,
limit,
} => {
let p = positions.as_ref().unwrap();
if *idx == p.len() {
let mut rowids = collected
.take()
.unwrap()
.iter()
.cloned()
.collect::<Vec<_>>();
rowids.sort();
self.search_state = VectorSparseInvertedIndexSearchState::EvaluateSeek {
rowids: Some(rowids),
distances: Some(BTreeSet::new()),
limit: *limit,
};
continue;
}
let position = p[*idx];
let key = ImmutableRecord::from_values(&[Value::Integer(position as i64)], 2);
self.search_state = VectorSparseInvertedIndexSearchState::Seek {
collected: collected.take(),
positions: positions.take(),
key: Some(key),
idx: *idx,
limit: *limit,
};
}
VectorSparseInvertedIndexSearchState::Seek {
collected,
positions,
key,
idx,
limit,
} => {
let k = key.as_ref().unwrap();
let result = return_if_io!(
scratch.seek(SeekKey::IndexKey(k), SeekOp::GE { eq_only: false })
);
match result {
SeekResult::Found => {
self.search_state = VectorSparseInvertedIndexSearchState::Read {
collected: collected.take(),
positions: positions.take(),
key: key.take(),
idx: *idx,
limit: *limit,
};
}
SeekResult::TryAdvance => {
self.search_state = VectorSparseInvertedIndexSearchState::Next {
collected: collected.take(),
positions: positions.take(),
key: key.take(),
idx: *idx,
limit: *limit,
};
}
SeekResult::NotFound => {
return Err(LimboError::Corrupt("inverted index corrupted".to_string()))
}
}
}
VectorSparseInvertedIndexSearchState::Read {
collected,
positions,
key,
idx,
limit,
} => {
let record = return_if_io!(scratch.record());
if let Some(record) = record {
let ValueRef::Integer(position) = record.get_value(0)? else {
return Err(LimboError::InternalError(
"first value of index record must be int".to_string(),
));
};
let ValueRef::Integer(rowid) = record.get_value(1)? else {
return Err(LimboError::InternalError(
"second value of index record must be int".to_string(),
));
};
tracing::debug!("position/rowid: {}/{}", position, rowid);
if position == positions.as_ref().unwrap()[*idx] as i64 {
collected.as_mut().unwrap().insert(rowid);
self.search_state = VectorSparseInvertedIndexSearchState::Next {
collected: collected.take(),
positions: positions.take(),
key: key.take(),
idx: *idx,
limit: *limit,
};
continue;
}
}
self.search_state = VectorSparseInvertedIndexSearchState::Prepare {
collected: collected.take(),
positions: positions.take(),
idx: *idx + 1,
limit: *limit,
};
}
VectorSparseInvertedIndexSearchState::Next {
collected,
positions,
key,
idx,
limit,
} => {
let result = return_if_io!(scratch.next());
if !result {
self.search_state = VectorSparseInvertedIndexSearchState::Prepare {
collected: collected.take(),
positions: positions.take(),
idx: *idx + 1,
limit: *limit,
};
} else {
self.search_state = VectorSparseInvertedIndexSearchState::Read {
collected: collected.take(),
positions: positions.take(),
key: key.take(),
idx: *idx,
limit: *limit,
};
}
}
VectorSparseInvertedIndexSearchState::EvaluateSeek {
rowids,
distances,
limit,
} => {
let Some(rowid) = rowids.as_ref().unwrap().last() else {
let distances = distances.take().unwrap();
self.search_result = distances.iter().map(|(d, i)| (*i, d.0)).collect();
return Ok(IOResult::Done(()));
};
let result = return_if_io!(
main.seek(SeekKey::TableRowId(*rowid), SeekOp::GE { eq_only: true })
);
if !matches!(result, SeekResult::Found) {
return Err(LimboError::Corrupt(
"vector_sparse_ivf corrupted: unable to find rowid in main table"
.to_string(),
));
};
self.search_state = VectorSparseInvertedIndexSearchState::EvaluateRead {
rowids: rowids.take(),
distances: distances.take(),
limit: *limit,
};
}
VectorSparseInvertedIndexSearchState::EvaluateRead {
rowids,
distances,
limit,
} => {
let record = return_if_io!(main.record());
let rowid = rowids.as_mut().unwrap().pop().unwrap();
if let Some(record) = record {
let column_idx = self.configuration.columns[0].pos_in_table;
let ValueRef::Blob(data) = record.get_value(column_idx)? else {
return Err(LimboError::InternalError(
"table column value must be sparse vector".to_string(),
));
};
let data = Vector::from_vec(data.to_vec())?;
if !matches!(data.vector_type, VectorType::Float32Sparse) {
return Err(LimboError::InternalError(
"table column value must be sparse vector".to_string(),
));
}
let Some(arg) = values[1].get_value().to_blob() else {
return Err(LimboError::InternalError(
"first value must be sparse vector".to_string(),
));
};
let arg = Vector::from_vec(arg.to_vec())?;
if !matches!(arg.vector_type, VectorType::Float32Sparse) {
return Err(LimboError::InternalError(
"first value must be sparse vector".to_string(),
));
}
tracing::debug!(
"vector: {:?}, query: {:?}",
data.as_f32_sparse(),
arg.as_f32_sparse()
);
let distance = operations::jaccard::vector_distance_jaccard(&data, &arg)?;
let distances = distances.as_mut().unwrap();
distances.insert((FloatOrd(distance), rowid));
if distances.len() > *limit as usize {
let _ = distances.pop_last();
}
}
self.search_state = VectorSparseInvertedIndexSearchState::EvaluateSeek {
rowids: rowids.take(),
distances: distances.take(),
limit: *limit,
};
}
}
}
}
fn query_rowid(&mut self) -> Result<IOResult<Option<i64>>> {
let result = self.search_row.as_ref().unwrap();
Ok(IOResult::Done(Some(result.0)))
}
fn query_column(&mut self, _: usize) -> Result<IOResult<Value>> {
let result = self.search_row.as_ref().unwrap();
Ok(IOResult::Done(Value::Float(result.1)))
}
fn query_next(&mut self) -> Result<IOResult<bool>> {
self.search_row = self.search_result.pop_front();
Ok(IOResult::Done(self.search_row.is_some()))
}
}

View File

@@ -1,6 +1,4 @@
use crate::{
io::clock::DefaultClock, Clock, Completion, File, Instant, LimboError, OpenFlags, Result, IO,
};
use crate::{io::clock::DefaultClock, Clock, Completion, File, Instant, OpenFlags, Result, IO};
use parking_lot::RwLock;
use std::io::{Read, Seek, Write};
use std::sync::Arc;

View File

@@ -8,6 +8,7 @@ mod fast_lock;
mod function;
mod functions;
mod incremental;
pub mod index_method;
mod info;
mod io;
#[cfg(feature = "json")]
@@ -16,7 +17,7 @@ pub mod mvcc;
mod parameters;
mod pragma;
mod pseudo;
mod schema;
pub mod schema;
#[cfg(feature = "series")]
mod series;
pub mod state_machine;
@@ -30,7 +31,7 @@ mod util;
#[cfg(feature = "uuid")]
mod uuid;
mod vdbe;
mod vector;
pub mod vector;
mod vtab;
#[cfg(feature = "fuzz")]
@@ -39,6 +40,7 @@ pub mod numeric;
#[cfg(not(feature = "fuzz"))]
mod numeric;
use crate::index_method::IndexMethod;
use crate::storage::checksum::CHECKSUM_REQUIRED_RESERVED_BYTES;
use crate::storage::encryption::AtomicCipherMode;
use crate::translate::display::PlanContext;
@@ -103,7 +105,9 @@ pub use types::Value;
pub use types::ValueRef;
use util::parse_schema_rows;
pub use util::IOExt;
pub use vdbe::{builder::QueryMode, explain::EXPLAIN_COLUMNS, explain::EXPLAIN_QUERY_PLAN_COLUMNS};
pub use vdbe::{
builder::QueryMode, explain::EXPLAIN_COLUMNS, explain::EXPLAIN_QUERY_PLAN_COLUMNS, Register,
};
/// Configuration for database features
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@@ -489,6 +493,9 @@ impl Database {
n_connections: AtomicUsize::new(0),
});
db.register_global_builtin_extensions()
.expect("unable to register global extensions");
// Check: https://github.com/tursodatabase/turso/pull/1761#discussion_r2154013123
if db_state.is_initialized() {
// parse schema
@@ -511,10 +518,14 @@ impl Database {
let result = schema
.make_from_btree(None, pager.clone(), &syms)
.inspect_err(|_| pager.end_read_tx());
if let Err(LimboError::ExtensionError(e)) = result {
// this means that a vtab exists and we no longer have the module loaded. we print
// a warning to the user to load the module
eprintln!("Warning: {e}");
match result {
Err(LimboError::ExtensionError(e)) => {
// this means that a vtab exists and we no longer have the module loaded. we print
// a warning to the user to load the module
eprintln!("Warning: {e}");
}
Err(e) => return Err(e),
_ => {}
}
if db.mvcc_enabled() && !schema.indexes.is_empty() {
@@ -534,9 +545,6 @@ impl Database {
mv_store.bootstrap(mvcc_bootstrap_conn)?;
}
db.register_global_builtin_extensions()
.expect("unable to register global extensions");
Ok(db)
}
@@ -2885,6 +2893,7 @@ pub struct SymbolTable {
pub functions: HashMap<String, Arc<function::ExternalFunc>>,
pub vtabs: HashMap<String, Arc<VirtualTable>>,
pub vtab_modules: HashMap<String, Arc<crate::ext::VTabImpl>>,
pub index_methods: HashMap<String, Arc<dyn IndexMethod>>,
}
impl std::fmt::Debug for SymbolTable {
@@ -2926,6 +2935,7 @@ impl SymbolTable {
functions: HashMap::new(),
vtabs: HashMap::new(),
vtab_modules: HashMap::new(),
index_methods: HashMap::new(),
}
}
pub fn resolve_function(
@@ -2946,6 +2956,9 @@ impl SymbolTable {
for (name, module) in &other.vtab_modules {
self.vtab_modules.insert(name.clone(), module.clone());
}
for (name, module) in &other.index_methods {
self.index_methods.insert(name.clone(), module.clone());
}
}
}

View File

@@ -1,9 +1,10 @@
use crate::function::Func;
use crate::incremental::view::IncrementalView;
use crate::index_method::{IndexMethodAttachment, IndexMethodConfiguration};
use crate::translate::expr::{
bind_and_rewrite_expr, walk_expr, BindingBehavior, ParamState, WalkControl,
};
use crate::translate::index::resolve_sorted_columns;
use crate::translate::index::{resolve_index_method_parameters, resolve_sorted_columns};
use crate::translate::planner::ROWID_STRS;
use parking_lot::RwLock;
@@ -368,6 +369,7 @@ impl Schema {
.get(&name)
.map(|v| v.iter())
.unwrap_or_default()
.filter(|i| !i.is_backing_btree_index())
}
pub fn get_index(&self, table_name: &str, index_name: &str) -> Option<&Arc<Index>> {
@@ -485,7 +487,7 @@ impl Schema {
pager.end_read_tx();
self.populate_indices(from_sql_indexes, automatic_indices)?;
self.populate_indices(syms, from_sql_indexes, automatic_indices)?;
self.populate_materialized_views(
materialized_view_info,
@@ -501,6 +503,7 @@ impl Schema {
/// automatic_indices: indices created automatically for primary key and unique constraints
pub fn populate_indices(
&mut self,
syms: &SymbolTable,
from_sql_indexes: Vec<UnparsedFromSqlIndex>,
automatic_indices: std::collections::HashMap<String, Vec<(String, i64)>>,
) -> Result<()> {
@@ -512,6 +515,7 @@ impl Schema {
.get_btree_table(&unparsed_sql_from_index.table_name)
.unwrap();
let index = Index::from_sql(
syms,
&unparsed_sql_from_index.sql,
unparsed_sql_from_index.root_page,
table.as_ref(),
@@ -2419,6 +2423,7 @@ pub struct Index {
/// and SELECT DISTINCT ephemeral indexes will not have a rowid.
pub has_rowid: bool,
pub where_clause: Option<Box<Expr>>,
pub index_method: Option<Arc<dyn IndexMethodAttachment>>,
}
#[allow(dead_code)]
@@ -2437,7 +2442,12 @@ pub struct IndexColumn {
}
impl Index {
pub fn from_sql(sql: &str, root_page: i64, table: &BTreeTable) -> Result<Index> {
pub fn from_sql(
syms: &SymbolTable,
sql: &str,
root_page: i64,
table: &BTreeTable,
) -> Result<Index> {
let mut parser = Parser::new(sql.as_bytes());
let cmd = parser.next_cmd()?;
match cmd {
@@ -2447,25 +2457,66 @@ impl Index {
columns,
unique,
where_clause,
using,
with_clause,
..
})) => {
let index_name = normalize_ident(idx_name.name.as_str());
let index_columns = resolve_sorted_columns(table, &columns)?;
Ok(Index {
name: index_name,
table_name: normalize_ident(tbl_name.as_str()),
root_page,
columns: index_columns,
unique,
ephemeral: false,
has_rowid: table.has_rowid,
where_clause,
})
if let Some(using) = using {
if where_clause.is_some() {
bail_parse_error!("custom index module do not support partial indices");
}
if unique {
bail_parse_error!("custom index module do not support UNIQUE indices");
}
let parameters = resolve_index_method_parameters(with_clause)?;
let Some(module) = syms.index_methods.get(using.as_str()) else {
bail_parse_error!("unknown module name: '{}'", using);
};
let configuration = IndexMethodConfiguration {
table_name: table.name.clone(),
index_name: index_name.clone(),
columns: index_columns.clone(),
parameters,
};
let descriptor = module.attach(&configuration)?;
Ok(Index {
name: index_name,
table_name: normalize_ident(tbl_name.as_str()),
root_page,
columns: index_columns,
unique: false,
ephemeral: false,
has_rowid: table.has_rowid,
where_clause: None,
index_method: Some(descriptor),
})
} else {
Ok(Index {
name: index_name,
table_name: normalize_ident(tbl_name.as_str()),
root_page,
columns: index_columns,
unique,
ephemeral: false,
has_rowid: table.has_rowid,
where_clause,
index_method: None,
})
}
}
_ => todo!("Expected create index statement"),
}
}
/// check if this is special backing_btree index created and managed by custom index_method
pub fn is_backing_btree_index(&self) -> bool {
self.index_method
.as_ref()
.is_some_and(|x| x.definition().backing_btree)
}
pub fn automatic_from_primary_key(
table: &BTreeTable,
auto_index: (String, i64), // name, root_page
@@ -2505,6 +2556,7 @@ impl Index {
ephemeral: false,
has_rowid: table.has_rowid,
where_clause: None,
index_method: None,
})
}
@@ -2542,6 +2594,7 @@ impl Index {
ephemeral: false,
has_rowid: table.has_rowid,
where_clause: None,
index_method: None,
})
}

View File

@@ -8553,6 +8553,7 @@ mod tests {
unique: false,
ephemeral: false,
has_rowid: false,
index_method: None,
};
let num_columns = index_def.columns.len();
let mut cursor =
@@ -8712,6 +8713,7 @@ mod tests {
unique: false,
ephemeral: false,
has_rowid: false,
index_method: None,
};
let mut cursor = BTreeCursor::new_index(pager.clone(), index_root_page, &index_def, 1);

View File

@@ -432,6 +432,7 @@ fn create_dedupe_index(
unique: false,
has_rowid: false,
where_clause: None,
index_method: None,
});
let cursor_id = program.alloc_cursor_id(CursorType::BTreeIndex(dedupe_index.clone()));
program.emit_insn(Insn::OpenEphemeral {

View File

@@ -725,30 +725,24 @@ fn emit_delete_insns(
});
} else {
// Delete from all indexes before deleting from the main table.
let indexes = t_ctx.resolver.schema.indexes.get(table_name);
let indexes = t_ctx.resolver.schema.get_indices(table_name);
// Get the index that is being used to iterate the deletion loop, if there is one.
let iteration_index = unsafe { &*table_reference }.op.index();
// Get all indexes that are not the iteration index.
let other_indexes = indexes
.map(|indexes| {
indexes
.iter()
.filter(|index| {
iteration_index
.as_ref()
.is_none_or(|it_idx| !Arc::ptr_eq(it_idx, index))
})
.map(|index| {
(
index.clone(),
program
.resolve_cursor_id(&CursorKey::index(internal_id, index.clone())),
)
})
.collect::<Vec<_>>()
.filter(|index| {
iteration_index
.as_ref()
.is_none_or(|it_idx| !Arc::ptr_eq(it_idx, index))
})
.unwrap_or_default();
.map(|index| {
(
index.clone(),
program.resolve_cursor_id(&CursorKey::index(internal_id, index.clone())),
)
})
.collect::<Vec<_>>();
for (index, index_cursor_id) in other_indexes {
let skip_delete_label = if index.where_clause.is_some() {

View File

@@ -1,7 +1,10 @@
use std::collections::HashMap;
use std::sync::Arc;
use crate::bail_parse_error;
use crate::error::SQLITE_CONSTRAINT_UNIQUE;
use crate::index_method::IndexMethodConfiguration;
use crate::numeric::Numeric;
use crate::schema::{Table, RESERVED_TABLE_PREFIXES};
use crate::translate::emitter::{
emit_cdc_full_record, emit_cdc_insns, prepare_cdc_if_necessary, OperationMode, Resolver,
@@ -110,12 +113,30 @@ pub fn translate_create_index(
crate::bail_parse_error!("Error: table '{tbl_name}' is not a b-tree table.");
};
let columns = resolve_sorted_columns(&tbl, &columns)?;
let custom_module = using.is_some();
if !with_clause.is_empty() && !custom_module {
if !with_clause.is_empty() && using.is_none() {
crate::bail_parse_error!(
"Error: additional parameters are allowed only for custom module indices: '{idx_name}' is not custom module index"
);
}
let mut index_method = None;
if let Some(using) = &using {
let index_modules = &resolver.symbol_table.index_methods;
let using = using.as_str();
let index_module = index_modules.get(using);
if index_module.is_none() {
crate::bail_parse_error!("Error: unknown module name '{}'", using);
}
if let Some(index_module) = index_module {
let parameters = resolve_index_method_parameters(with_clause)?;
index_method = Some(index_module.attach(&IndexMethodConfiguration {
table_name: tbl.name.clone(),
index_name: idx_name.clone(),
columns: columns.clone(),
parameters: parameters.clone(),
})?);
}
}
let idx = Arc::new(Index {
name: idx_name.clone(),
table_name: tbl.name.clone(),
@@ -127,6 +148,7 @@ pub fn translate_create_index(
// store the *original* where clause, because we need to rewrite it
// before translating, and it cannot reference a table alias
where_clause: where_clause.clone(),
index_method: index_method.clone(),
});
if !idx.validate_where_expr(table) {
@@ -142,7 +164,7 @@ pub fn translate_create_index(
// Allocate the necessary cursors:
//
// 1. sqlite_schema_cursor_id - sqlite_schema table
// 2. btree_cursor_id - new index btree
// 2. index_cursor_id - new index cursor
// 3. table_cursor_id - table we are creating the index on
// 4. sorter_cursor_id - sorter
// 5. pseudo_cursor_id - pseudo table to store the sorted index values
@@ -150,7 +172,7 @@ pub fn translate_create_index(
let sqlite_schema_cursor_id =
program.alloc_cursor_id(CursorType::BTreeTable(sqlite_table.clone()));
let table_ref = program.table_reference_counter.next();
let btree_cursor_id = program.alloc_cursor_id(CursorType::BTreeIndex(idx.clone()));
let index_cursor_id = program.alloc_cursor_id(CursorType::BTreeIndex(idx.clone()));
let table_cursor_id = program.alloc_cursor_id_keyed(
CursorKey::table(table_ref),
CursorType::BTreeTable(tbl.clone()),
@@ -204,163 +226,165 @@ pub fn translate_create_index(
Some(sql),
)?;
// determine the order of the columns in the index for the sorter
let order = idx.columns.iter().map(|c| c.order).collect();
// open the sorter and the pseudo table
program.emit_insn(Insn::SorterOpen {
cursor_id: sorter_cursor_id,
columns: columns.len(),
order,
collations: idx.columns.iter().map(|c| c.collation).collect(),
});
let content_reg = program.alloc_register();
program.emit_insn(Insn::OpenPseudo {
cursor_id: pseudo_cursor_id,
content_reg,
num_fields: columns.len() + 1,
});
// open the table we are creating the index on for reading
program.emit_insn(Insn::OpenRead {
cursor_id: table_cursor_id,
root_page: tbl.root_page,
db: 0,
});
let loop_start_label = program.allocate_label();
let loop_end_label = program.allocate_label();
program.emit_insn(Insn::Rewind {
cursor_id: table_cursor_id,
pc_if_empty: loop_end_label,
});
program.preassign_label_to_next_insn(loop_start_label);
// Loop start:
// Collect index values into start_reg..rowid_reg
// emit MakeRecord (index key + rowid) into record_reg.
//
// Then insert the record into the sorter
let mut skip_row_label = None;
if let Some(where_clause) = where_clause {
let label = program.allocate_label();
translate_condition_expr(
&mut program,
&table_references,
&where_clause,
ConditionMetadata {
jump_if_condition_is_true: false,
jump_target_when_false: label,
jump_target_when_true: BranchOffset::Placeholder,
jump_target_when_null: label,
},
resolver,
)?;
skip_row_label = Some(label);
}
let start_reg = program.alloc_registers(columns.len() + 1);
for (i, col) in columns.iter().enumerate() {
program.emit_column_or_rowid(table_cursor_id, col.pos_in_table, start_reg + i);
}
let rowid_reg = start_reg + columns.len();
program.emit_insn(Insn::RowId {
cursor_id: table_cursor_id,
dest: rowid_reg,
});
let record_reg = program.alloc_register();
program.emit_insn(Insn::MakeRecord {
start_reg,
count: columns.len() + 1,
dest_reg: record_reg,
index_name: Some(idx_name.clone()),
affinity_str: None,
});
program.emit_insn(Insn::SorterInsert {
cursor_id: sorter_cursor_id,
record_reg,
});
if let Some(skip_row_label) = skip_row_label {
program.resolve_label(skip_row_label, program.offset());
}
program.emit_insn(Insn::Next {
cursor_id: table_cursor_id,
pc_if_next: loop_start_label,
});
program.preassign_label_to_next_insn(loop_end_label);
// Open the index btree we created for writing to insert the
// newly sorted index records.
program.emit_insn(Insn::OpenWrite {
cursor_id: btree_cursor_id,
root_page: RegisterOrLiteral::Register(root_page_reg),
db: 0,
});
let sorted_loop_start = program.allocate_label();
let sorted_loop_end = program.allocate_label();
// Sort the index records in the sorter
program.emit_insn(Insn::SorterSort {
cursor_id: sorter_cursor_id,
pc_if_empty: sorted_loop_end,
});
let sorted_record_reg = program.alloc_register();
if unique {
// Since the records to be inserted are sorted, we can compare prev with current and if they are equal,
// we fall through to Halt with a unique constraint violation error.
let goto_label = program.allocate_label();
let label_after_sorter_compare = program.allocate_label();
program.resolve_label(goto_label, program.offset());
program.emit_insn(Insn::Goto {
target_pc: label_after_sorter_compare,
});
program.preassign_label_to_next_insn(sorted_loop_start);
program.emit_insn(Insn::SorterCompare {
if index_method.is_none() {
// determine the order of the columns in the index for the sorter
let order = idx.columns.iter().map(|c| c.order).collect();
// open the sorter and the pseudo table
program.emit_insn(Insn::SorterOpen {
cursor_id: sorter_cursor_id,
sorted_record_reg,
num_regs: columns.len(),
pc_when_nonequal: goto_label,
columns: columns.len(),
order,
collations: idx.columns.iter().map(|c| c.collation).collect(),
});
program.emit_insn(Insn::Halt {
err_code: SQLITE_CONSTRAINT_UNIQUE,
description: format_unique_violation_desc(tbl_name.as_str(), &idx),
let content_reg = program.alloc_register();
program.emit_insn(Insn::OpenPseudo {
cursor_id: pseudo_cursor_id,
content_reg,
num_fields: columns.len() + 1,
});
program.preassign_label_to_next_insn(label_after_sorter_compare);
} else {
program.preassign_label_to_next_insn(sorted_loop_start);
// open the table we are creating the index on for reading
program.emit_insn(Insn::OpenRead {
cursor_id: table_cursor_id,
root_page: tbl.root_page,
db: 0,
});
let loop_start_label = program.allocate_label();
let loop_end_label = program.allocate_label();
program.emit_insn(Insn::Rewind {
cursor_id: table_cursor_id,
pc_if_empty: loop_end_label,
});
program.preassign_label_to_next_insn(loop_start_label);
// Loop start:
// Collect index values into start_reg..rowid_reg
// emit MakeRecord (index key + rowid) into record_reg.
//
// Then insert the record into the sorter
let mut skip_row_label = None;
if let Some(where_clause) = where_clause {
let label = program.allocate_label();
translate_condition_expr(
&mut program,
&table_references,
&where_clause,
ConditionMetadata {
jump_if_condition_is_true: false,
jump_target_when_false: label,
jump_target_when_true: BranchOffset::Placeholder,
jump_target_when_null: label,
},
resolver,
)?;
skip_row_label = Some(label);
}
let start_reg = program.alloc_registers(columns.len() + 1);
for (i, col) in columns.iter().enumerate() {
program.emit_column_or_rowid(table_cursor_id, col.pos_in_table, start_reg + i);
}
let rowid_reg = start_reg + columns.len();
program.emit_insn(Insn::RowId {
cursor_id: table_cursor_id,
dest: rowid_reg,
});
let record_reg = program.alloc_register();
program.emit_insn(Insn::MakeRecord {
start_reg,
count: columns.len() + 1,
dest_reg: record_reg,
index_name: Some(idx_name.clone()),
affinity_str: None,
});
program.emit_insn(Insn::SorterInsert {
cursor_id: sorter_cursor_id,
record_reg,
});
if let Some(skip_row_label) = skip_row_label {
program.resolve_label(skip_row_label, program.offset());
}
program.emit_insn(Insn::Next {
cursor_id: table_cursor_id,
pc_if_next: loop_start_label,
});
program.preassign_label_to_next_insn(loop_end_label);
// Open the index btree we created for writing to insert the
// newly sorted index records.
program.emit_insn(Insn::OpenWrite {
cursor_id: index_cursor_id,
root_page: RegisterOrLiteral::Register(root_page_reg),
db: 0,
});
let sorted_loop_start = program.allocate_label();
let sorted_loop_end = program.allocate_label();
// Sort the index records in the sorter
program.emit_insn(Insn::SorterSort {
cursor_id: sorter_cursor_id,
pc_if_empty: sorted_loop_end,
});
let sorted_record_reg = program.alloc_register();
if unique {
// Since the records to be inserted are sorted, we can compare prev with current and if they are equal,
// we fall through to Halt with a unique constraint violation error.
let goto_label = program.allocate_label();
let label_after_sorter_compare = program.allocate_label();
program.resolve_label(goto_label, program.offset());
program.emit_insn(Insn::Goto {
target_pc: label_after_sorter_compare,
});
program.preassign_label_to_next_insn(sorted_loop_start);
program.emit_insn(Insn::SorterCompare {
cursor_id: sorter_cursor_id,
sorted_record_reg,
num_regs: columns.len(),
pc_when_nonequal: goto_label,
});
program.emit_insn(Insn::Halt {
err_code: SQLITE_CONSTRAINT_UNIQUE,
description: format_unique_violation_desc(tbl_name.as_str(), &idx),
});
program.preassign_label_to_next_insn(label_after_sorter_compare);
} else {
program.preassign_label_to_next_insn(sorted_loop_start);
}
program.emit_insn(Insn::SorterData {
pseudo_cursor: pseudo_cursor_id,
cursor_id: sorter_cursor_id,
dest_reg: sorted_record_reg,
});
// seek to the end of the index btree to position the cursor for appending
program.emit_insn(Insn::SeekEnd {
cursor_id: index_cursor_id,
});
// insert new index record
program.emit_insn(Insn::IdxInsert {
cursor_id: index_cursor_id,
record_reg: sorted_record_reg,
unpacked_start: None, // TODO: optimize with these to avoid decoding record twice
unpacked_count: None,
flags: IdxInsertFlags::new().use_seek(false),
});
program.emit_insn(Insn::SorterNext {
cursor_id: sorter_cursor_id,
pc_if_next: sorted_loop_start,
});
program.preassign_label_to_next_insn(sorted_loop_end);
}
program.emit_insn(Insn::SorterData {
pseudo_cursor: pseudo_cursor_id,
cursor_id: sorter_cursor_id,
dest_reg: sorted_record_reg,
});
// seek to the end of the index btree to position the cursor for appending
program.emit_insn(Insn::SeekEnd {
cursor_id: btree_cursor_id,
});
// insert new index record
program.emit_insn(Insn::IdxInsert {
cursor_id: btree_cursor_id,
record_reg: sorted_record_reg,
unpacked_start: None, // TODO: optimize with these to avoid decoding record twice
unpacked_count: None,
flags: IdxInsertFlags::new().use_seek(false),
});
program.emit_insn(Insn::SorterNext {
cursor_id: sorter_cursor_id,
pc_if_next: sorted_loop_start,
});
program.preassign_label_to_next_insn(sorted_loop_end);
// End of the outer loop
//
// Keep schema table open to emit ParseSchema, close the other cursors.
program.close_cursors(&[sorter_cursor_id, table_cursor_id, btree_cursor_id]);
program.close_cursors(&[sorter_cursor_id, table_cursor_id, index_cursor_id]);
program.emit_insn(Insn::SetCookie {
db: 0,
@@ -411,6 +435,40 @@ pub fn resolve_sorted_columns(
Ok(resolved)
}
pub fn resolve_index_method_parameters(
parameters: Vec<(turso_parser::ast::Name, Box<Expr>)>,
) -> crate::Result<HashMap<String, crate::Value>> {
let mut resolved = HashMap::new();
for (key, value) in parameters {
let value = match *value {
Expr::Literal(literal) => match literal {
ast::Literal::Numeric(s) => match Numeric::from(s) {
Numeric::Null => crate::Value::Null,
Numeric::Integer(v) => crate::Value::Integer(v),
Numeric::Float(v) => crate::Value::Float(v.into()),
},
ast::Literal::Null => crate::Value::Null,
ast::Literal::String(s) => crate::Value::Text(s.into()),
ast::Literal::Blob(b) => crate::Value::Blob(
b.as_bytes()
.chunks_exact(2)
.map(|pair| {
// We assume that sqlite3-parser has already validated that
// the input is valid hex string, thus unwrap is safe.
let hex_byte = std::str::from_utf8(pair).unwrap();
u8::from_str_radix(hex_byte, 16).unwrap()
})
.collect(),
),
_ => bail_parse_error!("parameters must be constant literals"),
},
_ => bail_parse_error!("parameters must be constant literals"),
};
resolved.insert(key.as_str().to_string(), value);
}
Ok(resolved)
}
pub fn translate_drop_index(
idx_name: &str,
resolver: &Resolver,

View File

@@ -103,6 +103,7 @@ pub fn init_distinct(program: &mut ProgramBuilder, plan: &SelectPlan) -> Result<
unique: false,
has_rowid: false,
where_clause: None,
index_method: None,
});
let cursor_id = program.alloc_cursor_id(CursorType::BTreeIndex(index.clone()));
let ctx = DistinctCtx {
@@ -180,6 +181,7 @@ pub fn init_loop(
has_rowid: false,
unique: false,
where_clause: None,
index_method: None,
});
let cursor_id = program.alloc_cursor_id(CursorType::BTreeIndex(index.clone()));
if group_by.is_none() {
@@ -247,25 +249,23 @@ pub fn init_loop(
});
}
// For delete, we need to open all the other indexes too for writing
if let Some(indexes) = t_ctx.resolver.schema.indexes.get(&btree.name) {
for index in indexes {
if table
.op
.index()
.is_some_and(|table_index| table_index.name == index.name)
{
continue;
}
let cursor_id = program.alloc_cursor_id_keyed(
CursorKey::index(table.internal_id, index.clone()),
CursorType::BTreeIndex(index.clone()),
);
program.emit_insn(Insn::OpenWrite {
cursor_id,
root_page: index.root_page.into(),
db: table.database_id,
});
for index in t_ctx.resolver.schema.get_indices(&btree.name) {
if table
.op
.index()
.is_some_and(|table_index| table_index.name == index.name)
{
continue;
}
let cursor_id = program.alloc_cursor_id_keyed(
CursorKey::index(table.internal_id, index.clone()),
CursorType::BTreeIndex(index.clone()),
);
program.emit_insn(Insn::OpenWrite {
cursor_id,
root_page: index.root_page.into(),
db: table.database_id,
});
}
}
(OperationMode::UPDATE(update_mode), Table::BTree(btree)) => {
@@ -342,27 +342,23 @@ pub fn init_loop(
// For DELETE, we need to open all the indexes for writing
// UPDATE opens these in emit_program_for_update() separately
if matches!(mode, OperationMode::DELETE) {
if let Some(indexes) =
t_ctx.resolver.schema.indexes.get(table.table.get_name())
{
for index in indexes {
if table
.op
.index()
.is_some_and(|table_index| table_index.name == index.name)
{
continue;
}
let cursor_id = program.alloc_cursor_id_keyed(
CursorKey::index(table.internal_id, index.clone()),
CursorType::BTreeIndex(index.clone()),
);
program.emit_insn(Insn::OpenWrite {
cursor_id,
root_page: index.root_page.into(),
db: table.database_id,
});
for index in t_ctx.resolver.schema.get_indices(table.table.get_name()) {
if table
.op
.index()
.is_some_and(|table_index| table_index.name == index.name)
{
continue;
}
let cursor_id = program.alloc_cursor_id_keyed(
CursorKey::index(table.internal_id, index.clone()),
CursorType::BTreeIndex(index.clone()),
);
program.emit_insn(Insn::OpenWrite {
cursor_id,
root_page: index.root_page.into(),
db: table.database_id,
});
}
}
}

View File

@@ -687,6 +687,7 @@ mod tests {
ephemeral: false,
root_page: 1,
has_rowid: true,
index_method: None,
});
available_indexes.insert("test_table".to_string(), VecDeque::from([index]));
@@ -760,6 +761,7 @@ mod tests {
ephemeral: false,
root_page: 1,
has_rowid: true,
index_method: None,
});
available_indexes.insert("table1".to_string(), VecDeque::from([index1]));
@@ -881,6 +883,7 @@ mod tests {
ephemeral: false,
root_page: 1,
has_rowid: true,
index_method: None,
});
available_indexes.insert(table_name.to_string(), VecDeque::from([index]));
});
@@ -899,6 +902,7 @@ mod tests {
ephemeral: false,
root_page: 1,
has_rowid: true,
index_method: None,
});
let order_id_idx = Arc::new(Index {
name: "order_items_order_id_idx".to_string(),
@@ -915,6 +919,7 @@ mod tests {
ephemeral: false,
root_page: 1,
has_rowid: true,
index_method: None,
});
available_indexes
@@ -1354,6 +1359,7 @@ mod tests {
root_page: 2,
ephemeral: false,
has_rowid: true,
index_method: None,
});
let mut available_indexes = HashMap::new();
@@ -1452,6 +1458,7 @@ mod tests {
root_page: 2,
ephemeral: false,
has_rowid: true,
index_method: None,
});
available_indexes.insert("t1".to_string(), VecDeque::from([index]));
@@ -1568,6 +1575,7 @@ mod tests {
ephemeral: false,
has_rowid: true,
unique: false,
index_method: None,
});
available_indexes.insert("t1".to_string(), VecDeque::from([index]));

View File

@@ -1089,6 +1089,7 @@ fn ephemeral_index_build(
.table
.btree()
.is_some_and(|btree| btree.has_rowid),
index_method: None,
};
ephemeral_index

View File

@@ -109,6 +109,7 @@ pub fn init_order_by(
unique: false,
has_rowid: false,
where_clause: None,
index_method: None,
});
program.alloc_cursor_id(CursorType::BTreeIndex(index))
} else {

View File

@@ -1051,6 +1051,9 @@ impl JoinedTable {
if self.col_used_mask.is_empty() {
return false;
}
if index.index_method.is_some() {
return false;
}
let mut index_cols_mask = ColumnUsedMask::default();
for col in index.columns.iter() {
index_cols_mask.set(col.pos_in_table);

View File

@@ -182,7 +182,7 @@ pub fn parse_schema_rows(
}
}
schema.populate_indices(from_sql_indexes, automatic_indices)?;
schema.populate_indices(syms, from_sql_indexes, automatic_indices)?;
schema.populate_materialized_views(
materialized_view_info,
dbsp_state_roots,

View File

@@ -23,7 +23,7 @@ mod fuzz_tests {
/// [See this issue for more info](https://github.com/tursodatabase/turso/issues/1763)
#[test]
pub fn fuzz_failure_issue_1763() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
let offending_query = "SELECT ((ceil(pow((((2.0))), (-2.0 - -1.0) / log(0.5)))) - -2.0)";
@@ -37,7 +37,7 @@ mod fuzz_tests {
#[test]
pub fn arithmetic_expression_fuzz_ex1() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -56,10 +56,8 @@ mod fuzz_tests {
#[test]
pub fn rowid_seek_fuzz() {
let db = TempDatabase::new_with_rusqlite(
"CREATE TABLE t (x INTEGER PRIMARY KEY autoincrement)",
false,
); // INTEGER PRIMARY KEY is a rowid alias, so an index is not created
let db =
TempDatabase::new_with_rusqlite("CREATE TABLE t (x INTEGER PRIMARY KEY autoincrement)"); // INTEGER PRIMARY KEY is a rowid alias, so an index is not created
let sqlite_conn = rusqlite::Connection::open(db.path.clone()).unwrap();
let (mut rng, _seed) = rng_from_time_or_env();
@@ -173,7 +171,7 @@ mod fuzz_tests {
#[test]
pub fn index_scan_fuzz() {
let db = TempDatabase::new_with_rusqlite("CREATE TABLE t (x PRIMARY KEY)", true);
let db = TempDatabase::new_with_rusqlite("CREATE TABLE t (x PRIMARY KEY)");
let sqlite_conn = rusqlite::Connection::open(db.path.clone()).unwrap();
let insert = format!(
@@ -234,14 +232,14 @@ mod fuzz_tests {
];
// Create all different 3-column primary key permutations
let dbs = [
TempDatabase::new_with_rusqlite(table_defs[0], true),
TempDatabase::new_with_rusqlite(table_defs[1], true),
TempDatabase::new_with_rusqlite(table_defs[2], true),
TempDatabase::new_with_rusqlite(table_defs[3], true),
TempDatabase::new_with_rusqlite(table_defs[4], true),
TempDatabase::new_with_rusqlite(table_defs[5], true),
TempDatabase::new_with_rusqlite(table_defs[6], true),
TempDatabase::new_with_rusqlite(table_defs[7], true),
TempDatabase::new_with_rusqlite(table_defs[0]),
TempDatabase::new_with_rusqlite(table_defs[1]),
TempDatabase::new_with_rusqlite(table_defs[2]),
TempDatabase::new_with_rusqlite(table_defs[3]),
TempDatabase::new_with_rusqlite(table_defs[4]),
TempDatabase::new_with_rusqlite(table_defs[5]),
TempDatabase::new_with_rusqlite(table_defs[6]),
TempDatabase::new_with_rusqlite(table_defs[7]),
];
let mut pk_tuples = HashSet::new();
while pk_tuples.len() < 100000 {
@@ -565,7 +563,7 @@ mod fuzz_tests {
// Create databases for each variant using rusqlite, then open limbo on the same file.
let dbs: Vec<TempDatabase> = table_defs
.iter()
.map(|ddl| TempDatabase::new_with_rusqlite(ddl, true))
.map(|ddl| TempDatabase::new_with_rusqlite(ddl))
.collect();
// Seed data focuses on case and trailing spaces to exercise NOCASE and RTRIM semantics.
@@ -664,8 +662,8 @@ mod fuzz_tests {
for outer in 0..OUTER_ITERS {
println!("fk_deferred_constraints_fuzz {}/{}", outer + 1, OUTER_ITERS);
let limbo_db = TempDatabase::new_empty(true);
let sqlite_db = TempDatabase::new_empty(true);
let limbo_db = TempDatabase::new_empty();
let sqlite_db = TempDatabase::new_empty();
let limbo = limbo_db.connect_limbo();
let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap();
@@ -1003,8 +1001,8 @@ mod fuzz_tests {
for outer in 0..OUTER_ITERS {
println!("fk_single_pk_mutation_fuzz {}/{}", outer + 1, OUTER_ITERS);
let limbo_db = TempDatabase::new_empty(true);
let sqlite_db = TempDatabase::new_empty(true);
let limbo_db = TempDatabase::new_empty();
let sqlite_db = TempDatabase::new_empty();
let limbo = limbo_db.connect_limbo();
let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap();
@@ -1314,8 +1312,8 @@ mod fuzz_tests {
// parent rowid, child textified integers -> MustBeInt coercion path
for outer in 0..OUTER_ITERS {
let limbo_db = TempDatabase::new_empty(true);
let sqlite_db = TempDatabase::new_empty(true);
let limbo_db = TempDatabase::new_empty();
let sqlite_db = TempDatabase::new_empty();
let limbo = limbo_db.connect_limbo();
let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap();
@@ -1380,8 +1378,8 @@ mod fuzz_tests {
// slf-referential rowid FK
for outer in 0..OUTER_ITERS {
let limbo_db = TempDatabase::new_empty(true);
let sqlite_db = TempDatabase::new_empty(true);
let limbo_db = TempDatabase::new_empty();
let sqlite_db = TempDatabase::new_empty();
let limbo = limbo_db.connect_limbo();
let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap();
@@ -1443,8 +1441,8 @@ mod fuzz_tests {
// self-referential UNIQUE(u,v) parent (fast-path for composite)
for outer in 0..OUTER_ITERS {
let limbo_db = TempDatabase::new_empty(true);
let sqlite_db = TempDatabase::new_empty(true);
let limbo_db = TempDatabase::new_empty();
let sqlite_db = TempDatabase::new_empty();
let limbo = limbo_db.connect_limbo();
let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap();
@@ -1523,8 +1521,8 @@ mod fuzz_tests {
// parent TEXT UNIQUE(u,v), child types differ; rely on parent-index affinities
for outer in 0..OUTER_ITERS {
let limbo_db = TempDatabase::new_empty(true);
let sqlite_db = TempDatabase::new_empty(true);
let limbo_db = TempDatabase::new_empty();
let sqlite_db = TempDatabase::new_empty();
let limbo = limbo_db.connect_limbo();
let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap();
@@ -1650,8 +1648,8 @@ mod fuzz_tests {
OUTER_ITERS
);
let limbo_db = TempDatabase::new_empty(true);
let sqlite_db = TempDatabase::new_empty(true);
let limbo_db = TempDatabase::new_empty();
let sqlite_db = TempDatabase::new_empty();
let limbo = limbo_db.connect_limbo();
let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap();
@@ -1860,8 +1858,8 @@ mod fuzz_tests {
i + 1,
OUTER_ITERATIONS
);
let limbo_db = TempDatabase::new_empty(true);
let sqlite_db = TempDatabase::new_empty(true);
let limbo_db = TempDatabase::new_empty();
let sqlite_db = TempDatabase::new_empty();
let num_cols = rng.random_range(1..=10);
let mut table_cols = vec!["id INTEGER PRIMARY KEY AUTOINCREMENT".to_string()];
table_cols.extend(
@@ -2088,8 +2086,8 @@ mod fuzz_tests {
);
// Columns: id (rowid PK), plus a few data columns we can reference in predicates/keys.
let limbo_db = TempDatabase::new_empty(true);
let sqlite_db = TempDatabase::new_empty(true);
let limbo_db = TempDatabase::new_empty();
let sqlite_db = TempDatabase::new_empty();
let limbo_conn = limbo_db.connect_limbo();
let sqlite = rusqlite::Connection::open(sqlite_db.path.clone()).unwrap();
@@ -2431,7 +2429,7 @@ mod fuzz_tests {
const MAX_SELECTS_IN_UNION_EXTRA: usize = 2;
const MAX_LIMIT_VALUE: usize = 50;
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -2557,7 +2555,7 @@ mod fuzz_tests {
let (mut rng, seed) = rng_from_time_or_env();
const ITERATIONS: usize = 1000;
for i in 0..ITERATIONS {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
let num_cols = rng.random_range(1..=5);
let col_names: Vec<String> = (0..num_cols).map(|c| format!("c{c}")).collect();
@@ -2719,7 +2717,7 @@ mod fuzz_tests {
let sql = g.create().concat(" ").push_str("SELECT").push(expr).build();
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -2739,7 +2737,7 @@ mod fuzz_tests {
#[test]
pub fn fuzz_ex() {
let _ = env_logger::try_init();
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -2838,7 +2836,7 @@ mod fuzz_tests {
let sql = g.create().concat(" ").push_str("SELECT").push(expr).build();
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -2998,7 +2996,7 @@ mod fuzz_tests {
let sql = g.create().concat(" ").push_str("SELECT").push(expr).build();
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -3367,7 +3365,7 @@ mod fuzz_tests {
.push(expr)
.build();
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -3401,7 +3399,7 @@ mod fuzz_tests {
"SELECT * FROM t",
],
] {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
for query in queries.iter() {
@@ -3428,7 +3426,7 @@ mod fuzz_tests {
let datatype = datatypes[rng.random_range(0..datatypes.len())];
let create_table = format!("CREATE TABLE t (x {datatype})");
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -3479,7 +3477,7 @@ mod fuzz_tests {
log::info!("affinity_fuzz seed: {seed}");
for iteration in 0..500 {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -3580,7 +3578,7 @@ mod fuzz_tests {
log::info!("seed: {seed}");
for _ in 0..100 {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -3626,7 +3624,7 @@ mod fuzz_tests {
log::info!("seed: {seed}");
for _ in 0..100 {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -3670,7 +3668,7 @@ mod fuzz_tests {
log::info!("seed: {seed}");
for _ in 0..100 {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -3716,7 +3714,7 @@ mod fuzz_tests {
log::info!("seed: {seed}");
for _ in 0..100 {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -3764,7 +3762,7 @@ mod fuzz_tests {
let predicate = predicate_builders(&g, Some(&tables));
let expr = build_logical_expr(&g, &builders, Some(&predicate));
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
for table in tables.iter() {
@@ -3855,7 +3853,7 @@ mod fuzz_tests {
#[test]
pub fn fuzz_distinct() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -4174,7 +4172,7 @@ mod fuzz_tests {
let db_path = tempfile::NamedTempFile::new()?;
{
let db = TempDatabase::new_with_existent(db_path.path(), true);
let db = TempDatabase::new_with_existent(db_path.path());
let prev_pending_byte = TempDatabase::get_pending_byte();
tracing::debug!(prev_pending_byte);

View File

@@ -79,8 +79,8 @@ pub fn rowid_alias_differential_fuzz() {
};
// Create two Limbo databases with indexes enabled
let db_with_alias = TempDatabase::new_empty(true);
let db_without_alias = TempDatabase::new_empty(true);
let db_with_alias = TempDatabase::new_empty();
let db_without_alias = TempDatabase::new_empty();
// Connect to both databases
let conn_with_alias = db_with_alias.connect_limbo();

View File

@@ -17,15 +17,15 @@ unsafe impl Send for TempDatabase {}
#[allow(dead_code, clippy::arc_with_non_send_sync)]
impl TempDatabase {
pub fn new_empty(enable_indexes: bool) -> Self {
Self::new(&format!("test-{}.db", rng().next_u32()), enable_indexes)
pub fn new_empty() -> Self {
Self::new(&format!("test-{}.db", rng().next_u32()))
}
pub fn new(db_name: &str, enable_indexes: bool) -> Self {
pub fn new(db_name: &str) -> Self {
let mut path = TempDir::new().unwrap().keep();
path.push(db_name);
Self::new_with_existent(&path, enable_indexes)
Self::new_with_existent(&path)
}
pub fn new_with_opts(db_name: &str, opts: turso_core::DatabaseOpts) -> Self {
@@ -47,12 +47,8 @@ impl TempDatabase {
}
}
pub fn new_with_existent(db_path: &Path, enable_indexes: bool) -> Self {
Self::new_with_existent_with_flags(
db_path,
turso_core::OpenFlags::default(),
enable_indexes,
)
pub fn new_with_existent(db_path: &Path) -> Self {
Self::new_with_existent_with_flags(db_path, turso_core::OpenFlags::default())
}
pub fn new_with_existent_with_opts(db_path: &Path, opts: turso_core::DatabaseOpts) -> Self {
@@ -72,18 +68,14 @@ impl TempDatabase {
}
}
pub fn new_with_existent_with_flags(
db_path: &Path,
flags: turso_core::OpenFlags,
enable_indexes: bool,
) -> Self {
pub fn new_with_existent_with_flags(db_path: &Path, flags: turso_core::OpenFlags) -> Self {
let io: Arc<dyn IO + Send> = Arc::new(turso_core::PlatformIO::new().unwrap());
let db = Database::open_file_with_flags(
io.clone(),
db_path.to_str().unwrap(),
flags,
turso_core::DatabaseOpts::new()
.with_indexes(enable_indexes)
.with_indexes(true)
.with_encryption(true),
None,
)
@@ -95,7 +87,7 @@ impl TempDatabase {
}
}
pub fn new_with_rusqlite(table_sql: &str, enable_indexes: bool) -> Self {
pub fn new_with_rusqlite(table_sql: &str) -> Self {
let mut path = TempDir::new().unwrap().keep();
path.push("test.db");
{
@@ -110,7 +102,9 @@ impl TempDatabase {
io.clone(),
path.to_str().unwrap(),
turso_core::OpenFlags::default(),
turso_core::DatabaseOpts::new().with_indexes(enable_indexes),
turso_core::DatabaseOpts::new()
.with_indexes(true)
.with_index_method(true),
None,
)
.unwrap();
@@ -439,7 +433,6 @@ mod tests {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (foo integer, bar integer, baz integer);",
false,
);
let conn = tmp_db.connect_limbo();
@@ -477,11 +470,8 @@ mod tests {
fn test_limbo_open_read_only() -> anyhow::Result<()> {
let path = TempDir::new().unwrap().keep().join("temp_read_only");
{
let db = TempDatabase::new_with_existent_with_flags(
&path,
turso_core::OpenFlags::default(),
false,
);
let db =
TempDatabase::new_with_existent_with_flags(&path, turso_core::OpenFlags::default());
let conn = db.connect_limbo();
let ret = limbo_exec_rows(&db, &conn, "CREATE table t (a)");
assert!(ret.is_empty(), "{ret:?}");
@@ -493,7 +483,6 @@ mod tests {
let db = TempDatabase::new_with_existent_with_flags(
&path,
turso_core::OpenFlags::default() | turso_core::OpenFlags::ReadOnly,
false,
);
let conn = db.connect_limbo();
let ret = limbo_exec_rows(&db, &conn, "SELECT * from t");
@@ -509,7 +498,7 @@ mod tests {
fn test_unique_index_ordering() -> anyhow::Result<()> {
use rand::Rng;
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
let _ = limbo_exec_rows(&db, &conn, "CREATE TABLE t (x INTEGER UNIQUE)");
@@ -550,7 +539,7 @@ mod tests {
#[test]
fn test_large_unique_blobs() -> anyhow::Result<()> {
let path = TempDir::new().unwrap().keep().join("temp_read_only");
let db = TempDatabase::new_with_existent(&path, true);
let db = TempDatabase::new_with_existent(&path);
let conn = db.connect_limbo();
let _ = limbo_exec_rows(&db, &conn, "CREATE TABLE t (x BLOB UNIQUE)");
@@ -580,7 +569,7 @@ mod tests {
.unwrap()
.keep()
.join("temp_transaction_isolation");
let db = TempDatabase::new_with_existent(&path, true);
let db = TempDatabase::new_with_existent(&path);
// Create two separate connections
let conn1 = db.connect_limbo();
@@ -611,7 +600,7 @@ mod tests {
.unwrap()
.keep()
.join("temp_transaction_isolation");
let db = TempDatabase::new_with_existent(&path, true);
let db = TempDatabase::new_with_existent(&path);
// Create two separate connections
let conn1 = db.connect_limbo();
@@ -648,7 +637,7 @@ mod tests {
.unwrap()
.keep()
.join("temp_transaction_isolation");
let db = TempDatabase::new_with_existent(&path, true);
let db = TempDatabase::new_with_existent(&path);
let conn = db.connect_limbo();
@@ -667,7 +656,7 @@ mod tests {
let _ = limbo_exec_rows(&db, &conn, "INSERT INTO t VALUES (69)");
// Reopen the database
let db = TempDatabase::new_with_existent(&path, true);
let db = TempDatabase::new_with_existent(&path);
let conn = db.connect_limbo();
// Should only see the last committed value
@@ -688,7 +677,7 @@ mod tests {
.unwrap()
.keep()
.join("temp_transaction_isolation");
let db = TempDatabase::new_with_existent(&path, true);
let db = TempDatabase::new_with_existent(&path);
let conn = db.connect_limbo();
@@ -703,7 +692,7 @@ mod tests {
do_flush(&conn, &db)?;
// Reopen the database without committing
let db = TempDatabase::new_with_existent(&path, true);
let db = TempDatabase::new_with_existent(&path);
let conn = db.connect_limbo();
// Should see no rows since transaction was never committed

View File

@@ -16,7 +16,7 @@ fn replace_column_with_null(rows: Vec<Vec<Value>>, column: usize) -> Vec<Vec<Val
#[test]
fn test_cdc_simple_id() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
conn.execute("CREATE TABLE t (x INTEGER PRIMARY KEY, y)")
.unwrap();
@@ -78,7 +78,7 @@ fn record<const N: usize>(values: [Value; N]) -> Vec<u8> {
#[test]
fn test_cdc_simple_before() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
conn.execute("CREATE TABLE t (x INTEGER PRIMARY KEY, y)")
.unwrap();
@@ -149,7 +149,7 @@ fn test_cdc_simple_before() {
#[test]
fn test_cdc_simple_after() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
conn.execute("CREATE TABLE t (x INTEGER PRIMARY KEY, y)")
.unwrap();
@@ -220,7 +220,7 @@ fn test_cdc_simple_after() {
#[test]
fn test_cdc_simple_full() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
conn.execute("CREATE TABLE t (x INTEGER PRIMARY KEY, y)")
.unwrap();
@@ -296,7 +296,7 @@ fn test_cdc_simple_full() {
#[test]
fn test_cdc_crud() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
conn.execute("CREATE TABLE t (x INTEGER PRIMARY KEY, y)")
.unwrap();
@@ -417,7 +417,7 @@ fn test_cdc_crud() {
#[test]
fn test_cdc_failed_op() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
conn.execute("CREATE TABLE t (x INTEGER PRIMARY KEY, y UNIQUE)")
.unwrap();
@@ -491,7 +491,7 @@ fn test_cdc_failed_op() {
#[test]
fn test_cdc_uncaptured_connection() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn1 = db.connect_limbo();
conn1
.execute("CREATE TABLE t (x INTEGER PRIMARY KEY, y UNIQUE)")
@@ -571,7 +571,7 @@ fn test_cdc_uncaptured_connection() {
#[test]
fn test_cdc_custom_table() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn1 = db.connect_limbo();
conn1
.execute("CREATE TABLE t (x INTEGER PRIMARY KEY, y UNIQUE)")
@@ -620,7 +620,7 @@ fn test_cdc_custom_table() {
#[test]
fn test_cdc_ignore_changes_in_cdc_table() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn1 = db.connect_limbo();
conn1
.execute("CREATE TABLE t (x INTEGER PRIMARY KEY, y UNIQUE)")
@@ -660,7 +660,7 @@ fn test_cdc_ignore_changes_in_cdc_table() {
#[test]
fn test_cdc_transaction() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn1 = db.connect_limbo();
conn1
.execute("CREATE TABLE t (x INTEGER PRIMARY KEY, y UNIQUE)")
@@ -743,7 +743,7 @@ fn test_cdc_transaction() {
#[test]
fn test_cdc_independent_connections() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn1 = db.connect_limbo();
let conn2 = db.connect_limbo();
conn1
@@ -799,7 +799,7 @@ fn test_cdc_independent_connections() {
#[test]
fn test_cdc_independent_connections_different_cdc_not_ignore() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn1 = db.connect_limbo();
let conn2 = db.connect_limbo();
conn1
@@ -889,7 +889,7 @@ fn test_cdc_independent_connections_different_cdc_not_ignore() {
#[test]
fn test_cdc_table_columns() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
conn.execute("CREATE TABLE t (a INTEGER PRIMARY KEY, b, c UNIQUE)")
.unwrap();
@@ -905,7 +905,7 @@ fn test_cdc_table_columns() {
#[test]
fn test_cdc_bin_record() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
let record = record([
Value::Null,
@@ -934,7 +934,7 @@ fn test_cdc_bin_record() {
#[test]
fn test_cdc_schema_changes() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
conn.execute("PRAGMA unstable_capture_data_changes_conn('full')")
.unwrap();
@@ -1056,7 +1056,7 @@ fn test_cdc_schema_changes() {
#[test]
fn test_cdc_schema_changes_alter_table() {
let db = TempDatabase::new_empty(true);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
conn.execute("PRAGMA unstable_capture_data_changes_conn('full')")
.unwrap();

View File

@@ -6,7 +6,6 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test_rowid (id INTEGER PRIMARY KEY, val TEXT);",
false,
);
let conn = tmp_db.connect_limbo();
@@ -91,7 +90,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
fn test_integer_primary_key() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test_rowid (id INTEGER PRIMARY KEY);", false);
TempDatabase::new_with_rusqlite("CREATE TABLE test_rowid (id INTEGER PRIMARY KEY);");
let conn = tmp_db.connect_limbo();
for query in &[

View File

@@ -5,7 +5,7 @@ use turso_core::LimboError;
fn sum_errors_on_integer_overflow() {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_empty(false);
let tmp_db = TempDatabase::new_empty();
let conn = tmp_db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();

View File

@@ -13,7 +13,7 @@ use crate::common::{limbo_exec_rows, rng_from_time, TempDatabase};
#[test]
fn test_wal_frame_count() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
assert_eq!(conn.wal_state().unwrap().max_frame, 0);
conn.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")
@@ -29,9 +29,9 @@ fn test_wal_frame_count() {
#[test]
fn test_wal_frame_transfer_no_schema_changes() {
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
let db2 = TempDatabase::new_empty(false);
let db2 = TempDatabase::new_empty();
let conn2 = db2.connect_limbo();
conn1
.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")
@@ -68,9 +68,9 @@ fn test_wal_frame_transfer_no_schema_changes() {
#[test]
fn test_wal_frame_transfer_various_schema_changes() {
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
let db2 = TempDatabase::new_empty(false);
let db2 = TempDatabase::new_empty();
let conn2 = db2.connect_limbo();
let conn3 = db2.connect_limbo();
conn1
@@ -127,9 +127,9 @@ fn test_wal_frame_transfer_various_schema_changes() {
#[test]
fn test_wal_frame_transfer_schema_changes() {
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
let db2 = TempDatabase::new_empty(false);
let db2 = TempDatabase::new_empty();
let conn2 = db2.connect_limbo();
conn1
.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")
@@ -166,9 +166,9 @@ fn test_wal_frame_transfer_schema_changes() {
#[test]
fn test_wal_frame_transfer_no_schema_changes_rollback() {
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
let db2 = TempDatabase::new_empty(false);
let db2 = TempDatabase::new_empty();
let conn2 = db2.connect_limbo();
conn1
.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")
@@ -205,9 +205,9 @@ fn test_wal_frame_transfer_no_schema_changes_rollback() {
#[test]
fn test_wal_frame_transfer_schema_changes_rollback() {
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
let db2 = TempDatabase::new_empty(false);
let db2 = TempDatabase::new_empty();
let conn2 = db2.connect_limbo();
conn1
.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")
@@ -240,9 +240,9 @@ fn test_wal_frame_transfer_schema_changes_rollback() {
#[test]
fn test_wal_frame_conflict() {
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
let db2 = TempDatabase::new_empty(false);
let db2 = TempDatabase::new_empty();
let conn2 = db2.connect_limbo();
conn1
.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")
@@ -259,9 +259,9 @@ fn test_wal_frame_conflict() {
#[test]
fn test_wal_frame_far_away_write() {
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
let db2 = TempDatabase::new_empty(false);
let db2 = TempDatabase::new_empty();
let conn2 = db2.connect_limbo();
conn1
.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")
@@ -287,9 +287,9 @@ fn test_wal_frame_far_away_write() {
fn test_wal_frame_api_no_schema_changes_fuzz() {
let (mut rng, _) = rng_from_time();
for _ in 0..4 {
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
let db2 = TempDatabase::new_empty(false);
let db2 = TempDatabase::new_empty();
let conn2 = db2.connect_limbo();
conn1
.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")
@@ -342,7 +342,7 @@ fn test_wal_frame_api_no_schema_changes_fuzz() {
#[test]
fn test_wal_api_changed_pages() {
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
conn1
.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")
@@ -421,7 +421,7 @@ fn revert_to(conn: &Arc<turso_core::Connection>, frame_watermark: u64) -> turso_
#[test]
fn test_wal_api_revert_pages() {
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
conn1
.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")
@@ -465,7 +465,7 @@ fn test_wal_api_revert_pages() {
#[test]
fn test_wal_upper_bound_passive() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let writer = db.connect_limbo();
writer
@@ -499,7 +499,7 @@ fn test_wal_upper_bound_passive() {
let db_path_copy = format!("{}-{}-copy", db.path.to_str().unwrap(), watermark);
std::fs::copy(&db.path, db_path_copy.clone()).unwrap();
let db_copy = TempDatabase::new_with_existent(&PathBuf::from(db_path_copy), false);
let db_copy = TempDatabase::new_with_existent(&PathBuf::from(db_path_copy));
let conn = db_copy.connect_limbo();
let mut stmt = conn.prepare("select * from test").unwrap();
let mut rows: Vec<Vec<turso_core::types::Value>> = Vec::new();
@@ -520,7 +520,7 @@ fn test_wal_upper_bound_passive() {
#[test]
fn test_wal_upper_bound_truncate() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let writer = db.connect_limbo();
writer
@@ -548,7 +548,7 @@ fn test_wal_upper_bound_truncate() {
#[test]
fn test_wal_state_checkpoint_seq() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let writer = db.connect_limbo();
writer
@@ -586,7 +586,7 @@ fn test_wal_state_checkpoint_seq() {
#[test]
fn test_wal_checkpoint_no_work() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let writer = db.connect_limbo();
let reader = db.connect_limbo();
@@ -631,7 +631,7 @@ fn test_wal_checkpoint_no_work() {
#[test]
fn test_wal_revert_change_db_size() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let writer = db.connect_limbo();
writer.execute("create table t(x, y)").unwrap();
@@ -678,7 +678,7 @@ fn test_wal_revert_change_db_size() {
#[test]
fn test_wal_api_exec_commit() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let writer = db.connect_limbo();
writer
@@ -725,7 +725,7 @@ fn test_wal_api_exec_commit() {
#[test]
fn test_wal_api_exec_rollback() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let writer = db.connect_limbo();
writer
@@ -760,7 +760,7 @@ fn test_wal_api_exec_rollback() {
#[test]
fn test_wal_api_insert_exec_mix() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
conn.execute("create table a(x, y)").unwrap();
@@ -928,9 +928,9 @@ fn test_db_share_same_file() {
#[test]
fn test_wal_api_simulate_spilled_frames() {
let (mut rng, _) = rng_from_time();
let db1 = TempDatabase::new_empty(false);
let db1 = TempDatabase::new_empty();
let conn1 = db1.connect_limbo();
let db2 = TempDatabase::new_empty(false);
let db2 = TempDatabase::new_empty();
let conn2 = db2.connect_limbo();
conn1
.execute("CREATE TABLE t(x INTEGER PRIMARY KEY, y)")

View File

@@ -0,0 +1,255 @@
use std::collections::HashMap;
use turso_core::{
index_method::{
toy_vector_sparse_ivf::VectorSparseInvertedIndexMethod, IndexMethod,
IndexMethodConfiguration,
},
schema::IndexColumn,
types::IOResult,
vector::{self, vector_types::VectorType},
Register, Result, Value,
};
use turso_parser::ast::SortOrder;
use crate::common::{limbo_exec_rows, TempDatabase};
fn run<T>(db: &TempDatabase, mut f: impl FnMut() -> Result<IOResult<T>>) -> Result<T> {
loop {
match f()? {
IOResult::Done(value) => return Ok(value),
IOResult::IO(iocompletions) => {
while !iocompletions.finished() {
db.io.step().unwrap();
}
}
}
}
}
fn sparse_vector(v: &str) -> Value {
let vector = vector::operations::text::vector_from_text(VectorType::Float32Sparse, v).unwrap();
vector::operations::serialize::vector_serialize(vector)
}
#[test]
fn test_vector_sparse_ivf_create_destroy() {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t(name, embedding)");
let conn = tmp_db.connect_limbo();
let schema_rows = || {
limbo_exec_rows(&tmp_db, &conn, "SELECT * FROM sqlite_master")
.into_iter()
.map(|x| match &x[1] {
rusqlite::types::Value::Text(t) => t.clone(),
_ => unreachable!(),
})
.collect::<Vec<String>>()
};
assert_eq!(schema_rows(), vec!["t"]);
let index = VectorSparseInvertedIndexMethod;
let attached = index
.attach(&IndexMethodConfiguration {
table_name: "t".to_string(),
index_name: "t_idx".to_string(),
columns: vec![IndexColumn {
name: "embedding".to_string(),
order: SortOrder::Asc,
pos_in_table: 1,
collation: None,
default: None,
}],
parameters: HashMap::new(),
})
.unwrap();
conn.wal_insert_begin().unwrap();
{
let mut cursor = attached.init().unwrap();
run(&tmp_db, || cursor.create(&conn)).unwrap();
}
conn.wal_insert_end(true).unwrap();
assert_eq!(schema_rows(), vec!["t", "t_idx_scratch"]);
conn.wal_insert_begin().unwrap();
{
let mut cursor = attached.init().unwrap();
run(&tmp_db, || cursor.destroy(&conn)).unwrap();
}
conn.wal_insert_end(true).unwrap();
assert_eq!(schema_rows(), vec!["t"]);
}
#[test]
fn test_vector_sparse_ivf_insert_query() {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t(name, embedding)");
let conn = tmp_db.connect_limbo();
let index = VectorSparseInvertedIndexMethod;
let attached = index
.attach(&IndexMethodConfiguration {
table_name: "t".to_string(),
index_name: "t_idx".to_string(),
columns: vec![IndexColumn {
name: "embedding".to_string(),
order: SortOrder::Asc,
pos_in_table: 1,
collation: None,
default: None,
}],
parameters: HashMap::new(),
})
.unwrap();
conn.wal_insert_begin().unwrap();
{
let mut cursor = attached.init().unwrap();
run(&tmp_db, || cursor.create(&conn)).unwrap();
}
conn.wal_insert_end(true).unwrap();
for (i, vector_str) in [
"[0, 0, 0, 1]",
"[0, 0, 1, 0]",
"[0, 1, 0, 0]",
"[1, 0, 0, 0]",
]
.iter()
.enumerate()
{
let mut cursor = attached.init().unwrap();
run(&tmp_db, || cursor.open_write(&conn)).unwrap();
let values = [
Register::Value(sparse_vector(vector_str)),
Register::Value(Value::Integer((i + 1) as i64)),
];
run(&tmp_db, || cursor.insert(&values)).unwrap();
limbo_exec_rows(
&tmp_db,
&conn,
&format!("INSERT INTO t VALUES ('{i}', vector32_sparse('{vector_str}'))"),
);
}
for (vector, results) in [
("[0, 0, 0, 1]", &[(1, 0.0)][..]),
("[0, 0, 1, 0]", &[(2, 0.0)][..]),
("[0, 1, 0, 0]", &[(3, 0.0)][..]),
("[1, 0, 0, 0]", &[(4, 0.0)][..]),
("[1, 0, 0, 1]", &[(1, 0.5), (4, 0.5)][..]),
(
"[1, 1, 1, 1]",
&[(1, 0.75), (2, 0.75), (3, 0.75), (4, 0.75)][..],
),
] {
let mut cursor = attached.init().unwrap();
run(&tmp_db, || cursor.open_read(&conn)).unwrap();
let values = [
Register::Value(Value::Integer(0)),
Register::Value(sparse_vector(vector)),
Register::Value(Value::Integer(5)),
];
run(&tmp_db, || cursor.query_start(&values)).unwrap();
for (rowid, dist) in results {
assert!(run(&tmp_db, || cursor.query_next()).unwrap());
assert_eq!(
*rowid,
run(&tmp_db, || cursor.query_rowid()).unwrap().unwrap()
);
assert_eq!(
*dist,
run(&tmp_db, || cursor.query_column(0)).unwrap().as_float()
);
}
assert!(!run(&tmp_db, || cursor.query_next()).unwrap());
}
}
#[test]
fn test_vector_sparse_ivf_update() {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t(name, embedding)");
let conn = tmp_db.connect_limbo();
let index = VectorSparseInvertedIndexMethod;
let attached = index
.attach(&IndexMethodConfiguration {
table_name: "t".to_string(),
index_name: "t_idx".to_string(),
columns: vec![IndexColumn {
name: "embedding".to_string(),
order: SortOrder::Asc,
pos_in_table: 1,
collation: None,
default: None,
}],
parameters: HashMap::new(),
})
.unwrap();
conn.wal_insert_begin().unwrap();
{
let mut cursor = attached.init().unwrap();
run(&tmp_db, || cursor.create(&conn)).unwrap();
}
conn.wal_insert_end(true).unwrap();
let mut writer = attached.init().unwrap();
run(&tmp_db, || writer.open_write(&conn)).unwrap();
let v0_str = "[0, 1, 0, 0]";
let v1_str = "[1, 0, 0, 1]";
let q = sparse_vector("[1, 0, 0, 1]");
let v0 = sparse_vector(v0_str);
let v1 = sparse_vector(v1_str);
let insert0_values = [
Register::Value(v0.clone()),
Register::Value(Value::Integer(1)),
];
let insert1_values = [
Register::Value(v1.clone()),
Register::Value(Value::Integer(1)),
];
let query_values = [
Register::Value(Value::Integer(0)),
Register::Value(q.clone()),
Register::Value(Value::Integer(1)),
];
run(&tmp_db, || writer.insert(&insert0_values)).unwrap();
limbo_exec_rows(
&tmp_db,
&conn,
&format!("INSERT INTO t VALUES ('test', vector32_sparse('{v0_str}'))"),
);
let mut reader = attached.init().unwrap();
run(&tmp_db, || reader.open_read(&conn)).unwrap();
run(&tmp_db, || reader.query_start(&query_values)).unwrap();
assert!(!run(&tmp_db, || reader.query_next()).unwrap());
limbo_exec_rows(
&tmp_db,
&conn,
&format!("UPDATE t SET embedding = vector32_sparse('{v1_str}') WHERE rowid = 1"),
);
run(&tmp_db, || writer.delete(&insert0_values)).unwrap();
run(&tmp_db, || writer.insert(&insert1_values)).unwrap();
let mut reader = attached.init().unwrap();
run(&tmp_db, || reader.open_read(&conn)).unwrap();
run(&tmp_db, || reader.query_start(&query_values)).unwrap();
assert!(run(&tmp_db, || reader.query_next()).unwrap());
assert_eq!(1, run(&tmp_db, || reader.query_rowid()).unwrap().unwrap());
assert_eq!(
0.0,
run(&tmp_db, || reader.query_column(0)).unwrap().as_float()
);
assert!(!run(&tmp_db, || reader.query_next()).unwrap());
}

View File

@@ -1,6 +1,7 @@
mod common;
mod functions;
mod fuzz_transaction;
mod index_method;
mod pragma;
mod query_processing;
mod storage;

View File

@@ -3,7 +3,7 @@ use turso_core::{StepResult, Value};
#[test]
fn test_pragma_module_list_returns_list() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
let mut module_list = conn.query("PRAGMA module_list;").unwrap();
@@ -21,7 +21,7 @@ fn test_pragma_module_list_returns_list() {
#[test]
fn test_pragma_module_list_generate_series() {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
let conn = db.connect_limbo();
let mut rows = conn
@@ -61,7 +61,7 @@ fn test_pragma_module_list_generate_series() {
#[test]
fn test_pragma_page_sizes_without_writes_persists() {
for test_page_size in [512, 1024, 2048, 4096, 8192, 16384, 32768, 65536] {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
{
let conn = db.connect_limbo();
let pragma_query = format!("PRAGMA page_size={test_page_size}");
@@ -81,7 +81,7 @@ fn test_pragma_page_sizes_without_writes_persists() {
assert_eq!(*page_size, test_page_size);
// Reopen database and verify page size
let db = TempDatabase::new_with_existent(&db.path, false);
let db = TempDatabase::new_with_existent(&db.path);
let conn = db.connect_limbo();
let mut rows = conn.query("PRAGMA page_size").unwrap().unwrap();
let StepResult::Row = rows.step().unwrap() else {
@@ -98,7 +98,7 @@ fn test_pragma_page_sizes_without_writes_persists() {
#[test]
fn test_pragma_page_sizes_with_writes_persists() {
for test_page_size in [512, 1024, 2048, 4096, 8192, 16384, 32768, 65536] {
let db = TempDatabase::new_empty(false);
let db = TempDatabase::new_empty();
{
{
let conn = db.connect_limbo();
@@ -153,7 +153,7 @@ fn test_pragma_page_sizes_with_writes_persists() {
}
// Drop the db and reopen it, and verify the same
let db = TempDatabase::new_with_existent(&db.path, false);
let db = TempDatabase::new_with_existent(&db.path);
let conn = db.connect_limbo();
let mut page_size = conn.pragma_query("page_size").unwrap();
let mut page_size = page_size.pop().unwrap();

View File

@@ -9,7 +9,7 @@ const ENABLE_ENCRYPTION: bool = true;
fn test_per_page_encryption() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let db_name = format!("test-{}.db", rng().next_u32());
let tmp_db = TempDatabase::new(&db_name, false);
let tmp_db = TempDatabase::new(&db_name);
let db_path = tmp_db.path.clone();
{
@@ -199,7 +199,7 @@ fn test_per_page_encryption() -> anyhow::Result<()> {
fn test_non_4k_page_size_encryption() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let db_name = format!("test-8k-{}.db", rng().next_u32());
let tmp_db = TempDatabase::new(&db_name, false);
let tmp_db = TempDatabase::new(&db_name);
let db_path = tmp_db.path.clone();
{
@@ -261,7 +261,7 @@ fn test_non_4k_page_size_encryption() -> anyhow::Result<()> {
fn test_corruption_turso_magic_bytes() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let db_name = format!("test-corruption-magic-{}.db", rng().next_u32());
let tmp_db = TempDatabase::new(&db_name, false);
let tmp_db = TempDatabase::new(&db_name);
let db_path = tmp_db.path.clone();
{
@@ -331,7 +331,7 @@ fn test_corruption_turso_magic_bytes() -> anyhow::Result<()> {
fn test_corruption_associated_data_bytes() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let db_name = format!("test-corruption-ad-{}.db", rng().next_u32());
let tmp_db = TempDatabase::new(&db_name, false);
let tmp_db = TempDatabase::new(&db_name);
let db_path = tmp_db.path.clone();
{
@@ -365,7 +365,7 @@ fn test_corruption_associated_data_bytes() -> anyhow::Result<()> {
corrupt_pos,
rng().next_u32()
);
let test_tmp_db = TempDatabase::new(&test_db_name, false);
let test_tmp_db = TempDatabase::new(&test_db_name);
let test_db_path = test_tmp_db.path.clone();
std::fs::copy(&db_path, &test_db_path)?;
@@ -509,7 +509,7 @@ fn test_turso_header_structure() -> anyhow::Result<()> {
for (cipher_name, expected_id, description, hexkey) in test_cases {
let db_name = format!("test-header-{}-{}.db", cipher_name, rng().next_u32());
let tmp_db = TempDatabase::new(&db_name, false);
let tmp_db = TempDatabase::new(&db_name);
let db_path = tmp_db.path.clone();
{

View File

@@ -448,7 +448,6 @@ fn test_btree() {
for attempt in 0..16 {
let db = TempDatabase::new_with_rusqlite(
"create table test (k INTEGER PRIMARY KEY, b BLOB);",
false,
);
log::info!(
"depth: {}, attempt: {}, path: {:?}",

View File

@@ -3,7 +3,7 @@ use crate::common::TempDatabase;
#[test]
fn test_fail_drop_indexed_column() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b);", true);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b);");
let conn = tmp_db.connect_limbo();
conn.execute("CREATE INDEX i ON t (a)")?;
@@ -15,7 +15,7 @@ fn test_fail_drop_indexed_column() -> anyhow::Result<()> {
#[test]
fn test_fail_drop_unique_column() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a UNIQUE, b);", true);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a UNIQUE, b);");
let conn = tmp_db.connect_limbo();
let res = conn.execute("ALTER TABLE t DROP COLUMN a");
@@ -26,7 +26,7 @@ fn test_fail_drop_unique_column() -> anyhow::Result<()> {
#[test]
fn test_fail_drop_compound_unique_column() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b, UNIQUE(a, b));", true);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b, UNIQUE(a, b));");
let conn = tmp_db.connect_limbo();
let res = conn.execute("ALTER TABLE t DROP COLUMN a");
@@ -40,7 +40,7 @@ fn test_fail_drop_compound_unique_column() -> anyhow::Result<()> {
#[test]
fn test_fail_drop_primary_key_column() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a PRIMARY KEY, b);", true);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a PRIMARY KEY, b);");
let conn = tmp_db.connect_limbo();
let res = conn.execute("ALTER TABLE t DROP COLUMN a");
@@ -54,7 +54,7 @@ fn test_fail_drop_primary_key_column() -> anyhow::Result<()> {
#[test]
fn test_fail_drop_compound_primary_key_column() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b, PRIMARY KEY(a, b));", true);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b, PRIMARY KEY(a, b));");
let conn = tmp_db.connect_limbo();
let res = conn.execute("ALTER TABLE t DROP COLUMN a");
@@ -68,7 +68,7 @@ fn test_fail_drop_compound_primary_key_column() -> anyhow::Result<()> {
#[test]
fn test_fail_drop_partial_index_column() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b);", true);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b);");
let conn = tmp_db.connect_limbo();
conn.execute("CREATE INDEX i ON t (b) WHERE a > 0")?;
@@ -83,7 +83,7 @@ fn test_fail_drop_partial_index_column() -> anyhow::Result<()> {
#[test]
fn test_fail_drop_view_column() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b);", true);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b);");
let conn = tmp_db.connect_limbo();
conn.execute("CREATE VIEW v AS SELECT a, b FROM t")?;
@@ -99,7 +99,7 @@ fn test_fail_drop_view_column() -> anyhow::Result<()> {
#[test]
fn test_fail_rename_view_column() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b);", true);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (a, b);");
let conn = tmp_db.connect_limbo();
conn.execute("CREATE VIEW v AS SELECT a, b FROM t")?;
@@ -116,7 +116,6 @@ fn test_allow_drop_unreferenced_columns() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE t (pk INTEGER PRIMARY KEY, indexed INTEGER, viewed INTEGER, partial INTEGER, compound1 INTEGER, compound2 INTEGER, unused1 INTEGER, unused2 INTEGER, unused3 INTEGER);",
true
);
let conn = tmp_db.connect_limbo();

View File

@@ -6,7 +6,7 @@ use crate::common::{maybe_setup_tracing, TempDatabase};
#[test]
fn test_schema_reprepare() {
let tmp_db = TempDatabase::new_empty(false);
let tmp_db = TempDatabase::new_empty();
let conn1 = tmp_db.connect_limbo();
conn1.execute("CREATE TABLE t(x, y, z)").unwrap();
conn1
@@ -55,7 +55,7 @@ fn test_create_multiple_connections() -> anyhow::Result<()> {
maybe_setup_tracing();
let tries = 1;
for _ in 0..tries {
let tmp_db = Arc::new(TempDatabase::new_empty(false));
let tmp_db = Arc::new(TempDatabase::new_empty());
{
let conn = tmp_db.connect_limbo();
conn.execute("CREATE TABLE t (x)").unwrap();
@@ -134,7 +134,7 @@ fn test_create_multiple_connections() -> anyhow::Result<()> {
fn test_reader_writer() -> anyhow::Result<()> {
let tries = 10;
for _ in 0..tries {
let tmp_db = Arc::new(TempDatabase::new_empty(false));
let tmp_db = Arc::new(TempDatabase::new_empty());
{
let conn = tmp_db.connect_limbo();
conn.execute("CREATE TABLE t (x)").unwrap();
@@ -206,7 +206,7 @@ fn test_reader_writer() -> anyhow::Result<()> {
#[test]
fn test_schema_reprepare_write() {
maybe_setup_tracing();
let tmp_db = TempDatabase::new_empty(false);
let tmp_db = TempDatabase::new_empty();
let conn1 = tmp_db.connect_limbo();
conn1.execute("CREATE TABLE t(x, y, z)").unwrap();
let conn2 = tmp_db.connect_limbo();
@@ -253,7 +253,7 @@ fn advance(stmt: &mut Statement) -> anyhow::Result<()> {
#[test]
fn test_interleaved_transactions() -> anyhow::Result<()> {
maybe_setup_tracing();
let tmp_db = TempDatabase::new_empty(true);
let tmp_db = TempDatabase::new_empty();
{
let bootstrap_conn = tmp_db.connect_limbo();
bootstrap_conn.execute("CREATE TABLE table_0 (id INTEGER,col_1 REAL,col_2 INTEGER,col_3 REAL,col_4 TEXT,col_5 REAL,col_6 TEXT)")?;

View File

@@ -3,7 +3,7 @@ use turso_core::{LimboError, StepResult, Value};
#[test]
fn test_statement_reset_bind() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);", false);
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);");
let conn = tmp_db.connect_limbo();
let mut stmt = conn.prepare("select ?")?;
@@ -47,7 +47,7 @@ fn test_statement_reset_bind() -> anyhow::Result<()> {
#[test]
fn test_statement_bind() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);", false);
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);");
let conn = tmp_db.connect_limbo();
let mut stmt = conn.prepare("select ?, ?1, :named, ?3, ?4")?;
@@ -112,7 +112,6 @@ fn test_insert_parameter_remap() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b integer, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
@@ -177,7 +176,6 @@ fn test_insert_parameter_remap_all_params() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b integer, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (d, a, c, b) values (?, ?, ?, ?);")?;
@@ -245,7 +243,6 @@ fn test_insert_parameter_multiple_remap_backwards() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b integer, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (d,c,b,a) values (?, ?, ?, ?);")?;
@@ -312,7 +309,6 @@ fn test_insert_parameter_multiple_no_remap() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b integer, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (a,b,c,d) values (?, ?, ?, ?);")?;
@@ -379,7 +375,6 @@ fn test_insert_parameter_multiple_row() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b integer, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (b,a,d,c) values (?, ?, ?, ?), (?, ?, ?, ?);")?;
@@ -445,7 +440,7 @@ fn test_insert_parameter_multiple_row() -> anyhow::Result<()> {
#[test]
fn test_bind_parameters_update_query() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("create table test (a integer, b text);", false);
let tmp_db = TempDatabase::new_with_rusqlite("create table test (a integer, b text);");
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (a, b) values (3, 'test1');")?;
loop {
@@ -489,7 +484,6 @@ fn test_bind_parameters_update_query() -> anyhow::Result<()> {
fn test_bind_parameters_update_query_multiple_where() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b text, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (a, b, c, d) values (3, 'test1', 4, 5);")?;
@@ -535,10 +529,8 @@ fn test_bind_parameters_update_query_multiple_where() -> anyhow::Result<()> {
#[test]
fn test_bind_parameters_update_rowid_alias() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT);",
false,
);
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT);");
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (id, name) values (1, 'test');")?;
loop {
@@ -596,7 +588,6 @@ fn test_bind_parameters_update_rowid_alias() -> anyhow::Result<()> {
fn test_bind_parameters_update_rowid_alias_seek_rowid() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT, age integer);",
false,
);
let conn = tmp_db.connect_limbo();
conn.execute("insert into test (id, name, age) values (1, 'test', 4);")?;
@@ -664,7 +655,6 @@ fn test_bind_parameters_update_rowid_alias_seek_rowid() -> anyhow::Result<()> {
fn test_bind_parameters_delete_rowid_alias_seek_out_of_order() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT, age integer);",
false,
);
let conn = tmp_db.connect_limbo();
conn.execute("insert into test (id, name, age) values (1, 'correct', 4);")?;
@@ -706,10 +696,8 @@ fn test_bind_parameters_delete_rowid_alias_seek_out_of_order() -> anyhow::Result
#[test]
fn test_cte_alias() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT);",
false,
);
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT);");
let conn = tmp_db.connect_limbo();
conn.execute("INSERT INTO test (id, name) VALUES (1, 'Limbo');")?;
conn.execute("INSERT INTO test (id, name) VALUES (2, 'Turso');")?;
@@ -753,7 +741,7 @@ fn test_cte_alias() -> anyhow::Result<()> {
#[test]
fn test_avg_agg() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("create table t (x, y);", false);
let tmp_db = TempDatabase::new_with_rusqlite("create table t (x, y);");
let conn = tmp_db.connect_limbo();
conn.execute("insert into t values (1, null), (2, null), (3, null), (null, null), (4, null)")?;
let mut rows = Vec::new();
@@ -787,7 +775,7 @@ fn test_avg_agg() -> anyhow::Result<()> {
#[test]
fn test_offset_limit_bind() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (i INTEGER);", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (i INTEGER);");
let conn = tmp_db.connect_limbo();
conn.execute("INSERT INTO test VALUES (5), (4), (3), (2), (1)")?;
@@ -830,10 +818,8 @@ fn test_offset_limit_bind() -> anyhow::Result<()> {
#[test]
fn test_upsert_parameters_order() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (k INTEGER PRIMARY KEY, v INTEGER);",
false,
);
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (k INTEGER PRIMARY KEY, v INTEGER);");
let conn = tmp_db.connect_limbo();
conn.execute("INSERT INTO test VALUES (1, 2), (3, 4)")?;
@@ -879,10 +865,8 @@ fn test_upsert_parameters_order() -> anyhow::Result<()> {
#[test]
fn test_multiple_connections_visibility() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (k INTEGER PRIMARY KEY, v INTEGER);",
false,
);
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (k INTEGER PRIMARY KEY, v INTEGER);");
let conn1 = tmp_db.connect_limbo();
let conn2 = tmp_db.connect_limbo();
conn1.execute("BEGIN")?;
@@ -901,7 +885,7 @@ fn test_multiple_connections_visibility() -> anyhow::Result<()> {
#[test]
/// Test that we can only join up to 63 tables, and trying to join more should fail with an error instead of panicing.
fn test_max_joined_tables_limit() {
let tmp_db = TempDatabase::new("test_max_joined_tables_limit", false);
let tmp_db = TempDatabase::new("test_max_joined_tables_limit");
let conn = tmp_db.connect_limbo();
// Create 64 tables
@@ -934,7 +918,7 @@ fn test_many_columns() {
}
create_sql.push(')');
let tmp_db = TempDatabase::new("test_many_columns", false);
let tmp_db = TempDatabase::new("test_many_columns");
let conn = tmp_db.connect_limbo();
conn.execute(&create_sql).unwrap();

View File

@@ -14,7 +14,7 @@ use crate::common::TempDatabase;
// was still fresh (no reads or writes happened).
#[test]
fn test_deferred_transaction_restart() {
let tmp_db = TempDatabase::new("test_deferred_tx.db", true);
let tmp_db = TempDatabase::new("test_deferred_tx.db");
let conn1 = tmp_db.connect_limbo();
let conn2 = tmp_db.connect_limbo();
@@ -57,7 +57,7 @@ fn test_deferred_transaction_restart() {
// because it has performed reads and has a committed snapshot.
#[test]
fn test_deferred_transaction_no_restart() {
let tmp_db = TempDatabase::new("test_deferred_tx_no_restart.db", true);
let tmp_db = TempDatabase::new("test_deferred_tx_no_restart.db");
let conn1 = tmp_db.connect_limbo();
let conn2 = tmp_db.connect_limbo();
@@ -106,7 +106,7 @@ fn test_deferred_transaction_no_restart() {
#[test]
fn test_txn_error_doesnt_rollback_txn() -> Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("create table t (x);", false);
let tmp_db = TempDatabase::new_with_rusqlite("create table t (x);");
let conn = tmp_db.connect_limbo();
conn.execute("begin")?;
@@ -131,7 +131,7 @@ fn test_txn_error_doesnt_rollback_txn() -> Result<()> {
/// Connection 2 should see the initial data (table 'test' in schema + 2 rows). Regression test for #2997
/// It should then see another created table 'test2' in schema, as well.
fn test_transaction_visibility() {
let tmp_db = TempDatabase::new("test_transaction_visibility.db", true);
let tmp_db = TempDatabase::new("test_transaction_visibility.db");
let conn1 = tmp_db.connect_limbo();
let conn2 = tmp_db.connect_limbo();
@@ -179,10 +179,8 @@ fn test_transaction_visibility() {
#[test]
/// A constraint error does not rollback the transaction, it rolls back the statement.
fn test_constraint_error_aborts_only_stmt_not_entire_transaction() {
let tmp_db = TempDatabase::new(
"test_constraint_error_aborts_only_stmt_not_entire_transaction.db",
true,
);
let tmp_db =
TempDatabase::new("test_constraint_error_aborts_only_stmt_not_entire_transaction.db");
let conn = tmp_db.connect_limbo();
// Create table succeeds
@@ -224,7 +222,7 @@ fn test_constraint_error_aborts_only_stmt_not_entire_transaction() {
/// violations being persisted to the database, even though the transaction was aborted.
/// This test ensures that dirty pages are not flushed to WAL until after deferred violations are checked.
fn test_deferred_fk_violation_rollback_in_autocommit() {
let tmp_db = TempDatabase::new("test_deferred_fk_violation_rollback.db", true);
let tmp_db = TempDatabase::new("test_deferred_fk_violation_rollback.db");
let conn = tmp_db.connect_limbo();
// Enable foreign keys
@@ -579,7 +577,7 @@ fn test_mvcc_checkpoint_works() {
conn.execute("PRAGMA wal_checkpoint(TRUNCATE)").unwrap();
// Verify all rows after reopening database
let tmp_db = TempDatabase::new_with_existent(&tmp_db.path, true);
let tmp_db = TempDatabase::new_with_existent(&tmp_db.path);
let conn = tmp_db.connect_limbo();
let stmt = conn
.query("SELECT * FROM test ORDER BY id, value")
@@ -723,7 +721,7 @@ fn test_mvcc_recovery_of_both_checkpointed_and_noncheckpointed_tables_works() {
#[test]
fn test_non_mvcc_to_mvcc() {
// Create non-mvcc database
let tmp_db = TempDatabase::new("test_non_mvcc_to_mvcc.db", false);
let tmp_db = TempDatabase::new("test_non_mvcc_to_mvcc.db");
let conn = tmp_db.connect_limbo();
// Create table and insert data

View File

@@ -27,10 +27,8 @@ macro_rules! change_state {
#[ignore]
fn test_simple_overflow_page() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (x INTEGER PRIMARY KEY, t TEXT);",
false,
);
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY, t TEXT);");
let conn = tmp_db.connect_limbo();
let mut huge_text = String::new();
@@ -91,10 +89,8 @@ fn test_simple_overflow_page() -> anyhow::Result<()> {
fn test_sequential_overflow_page() -> anyhow::Result<()> {
let _ = env_logger::try_init();
maybe_setup_tracing();
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (x INTEGER PRIMARY KEY, t TEXT);",
false,
);
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY, t TEXT);");
let conn = tmp_db.connect_limbo();
let iterations = 10_usize;
@@ -163,8 +159,7 @@ fn test_sequential_write() -> anyhow::Result<()> {
let _ = env_logger::try_init();
maybe_setup_tracing();
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);");
let conn = tmp_db.connect_limbo();
let list_query = "SELECT * FROM test";
@@ -199,7 +194,7 @@ fn test_sequential_write() -> anyhow::Result<()> {
/// https://github.com/tursodatabase/turso/pull/679
fn test_regression_multi_row_insert() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x REAL);", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x REAL);");
let conn = tmp_db.connect_limbo();
let insert_query = "INSERT INTO test VALUES (-2), (-3), (-1)";
@@ -232,7 +227,7 @@ fn test_regression_multi_row_insert() -> anyhow::Result<()> {
#[test]
fn test_statement_reset() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);", false);
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);");
let conn = tmp_db.connect_limbo();
conn.execute("insert into test values (1)")?;
@@ -278,8 +273,7 @@ fn test_statement_reset() -> anyhow::Result<()> {
#[test]
fn test_wal_checkpoint() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);");
// threshold is 1000 by default
let iterations = 1001_usize;
let conn = tmp_db.connect_limbo();
@@ -309,8 +303,7 @@ fn test_wal_checkpoint() -> anyhow::Result<()> {
#[test]
fn test_wal_restart() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);");
// threshold is 1000 by default
fn insert(i: usize, conn: &Arc<Connection>, tmp_db: &TempDatabase) -> anyhow::Result<()> {
@@ -355,7 +348,7 @@ fn test_wal_restart() -> anyhow::Result<()> {
#[test]
fn test_insert_after_big_blob() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE temp (t1 BLOB, t2 INTEGER)", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE temp (t1 BLOB, t2 INTEGER)");
let conn = tmp_db.connect_limbo();
conn.execute("insert into temp(t1) values (zeroblob (262144))")?;
@@ -371,7 +364,7 @@ fn test_write_delete_with_index() -> anyhow::Result<()> {
maybe_setup_tracing();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x PRIMARY KEY);", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x PRIMARY KEY);");
let conn = tmp_db.connect_limbo();
let list_query = "SELECT * FROM test";
@@ -425,8 +418,7 @@ fn test_update_with_index() -> anyhow::Result<()> {
maybe_setup_tracing();
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x REAL PRIMARY KEY, y TEXT);", true);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x REAL PRIMARY KEY, y TEXT);");
let conn = tmp_db.connect_limbo();
common::run_query(&tmp_db, &conn, "INSERT INTO test VALUES (1.0, 'foo')")?;
@@ -462,7 +454,7 @@ fn test_delete_with_index() -> anyhow::Result<()> {
maybe_setup_tracing();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (x UNIQUE)", true);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (x UNIQUE)");
let conn = tmp_db.connect_limbo();
common::run_query(&tmp_db, &conn, "INSERT INTO t VALUES (1), (2)")?;
@@ -478,7 +470,7 @@ fn test_delete_with_index() -> anyhow::Result<()> {
#[test]
fn test_update_regression() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE imaginative_baroja (blithesome_hall BLOB,remarkable_lester INTEGER,generous_balagun TEXT,ample_earth INTEGER,marvelous_khadzhiev BLOB,glowing_parissi TEXT,insightful_ryner BLOB)", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE imaginative_baroja (blithesome_hall BLOB,remarkable_lester INTEGER,generous_balagun TEXT,ample_earth INTEGER,marvelous_khadzhiev BLOB,glowing_parissi TEXT,insightful_ryner BLOB)");
let conn = tmp_db.connect_limbo();
conn.execute("INSERT INTO imaginative_baroja VALUES (X'617070726F61636861626C655F6F6D6164', 5581285929211692372, 'approachable_podur', -4145754929970306534, X'666F72747569746F75735F7368617270', 'sensible_amesly', X'636F6D70657469746976655F6669746368'), (X'6D6972746866756C5F686F6673746565', -8554670009677647372, 'shimmering_modkraftdk', 4993627046425025026, X'636F6E73696465726174655F63616765', 'breathtaking_boggs', X'616D617A696E675F73696D6F6E65'), (X'7669766163696F75735F7363687761727A', 5860599187854155616, 'sparkling_aurora', 3757552048117668067, X'756E697175655F6769617A', 'lovely_leroy', X'68617264776F726B696E675F6D696C6C6572'), (X'677265676172696F75735F7061657065', -488992130149088413, 'focused_brinker', 4503849242092922100, X'66756E6E795F6A616B736963', 'competitive_communications', X'657863656C6C656E745F7873696C656E74'), (X'7374756E6E696E675F74616E6E656E6261756D', -5634782647279946253, 'fabulous_crute', -3978009805517476564, X'72656C617865645F63617272796F7574', 'spellbinding_erkan', X'66756E6E795F646F626273'), (X'696D6167696E61746976655F746F6C6F6B6F6E6E696B6F7661', 4236471363502323025, 'excellent_wolke', 7606168469334609395, X'736C65656B5F6D6361666565', 'magnificent_riley', X'616D6961626C655F706173736164616B6973'), (X'77696C6C696E675F736872657665', 5048296470820985219, 'ambitious_jeppesen', 6961857167361512834, X'70617469656E745F6272696E6B6572', 'giving_kramm', X'726573706F6E7369626C655F7363686D696474'), (X'73656E7369626C655F6D757865726573', -5519194136843846790, 'frank_ruggero', 4354855935194921345, X'76697669645F63617365', 'focused_lovecruft', X'6D61676E69666963656E745F736B79')")?;
@@ -635,7 +627,7 @@ fn test_rollback_on_foreign_key_constraint_violation() -> anyhow::Result<()> {
#[test]
fn test_multiple_statements() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (x)", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (x)");
let conn = tmp_db.connect_limbo();
conn.execute("INSERT INTO t values(1); insert into t values(2);")?;
@@ -729,7 +721,7 @@ fn test_write_concurrent_connections() -> anyhow::Result<()> {
maybe_setup_tracing();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (x)", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t (x)");
let num_connections = 4;
let num_inserts_per_connection = 100;
let mut connections = vec![];
@@ -780,7 +772,7 @@ fn test_wal_bad_frame() -> anyhow::Result<()> {
maybe_setup_tracing();
let _ = env_logger::try_init();
let db_path = {
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t1 (x)", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t1 (x)");
let db_path = tmp_db.path.clone();
let conn = tmp_db.connect_limbo();
conn.execute("BEGIN")?;
@@ -872,20 +864,20 @@ fn test_read_wal_dumb_no_frames() -> anyhow::Result<()> {
maybe_setup_tracing();
let _ = env_logger::try_init();
let db_path = {
let tmp_db = TempDatabase::new_empty(false);
let tmp_db = TempDatabase::new_empty();
let conn = tmp_db.connect_limbo();
conn.close()?;
tmp_db.path.clone()
};
// Second connection must recover from the WAL file. Last checksum should be filled correctly.
{
let tmp_db = TempDatabase::new_with_existent(&db_path, false);
let tmp_db = TempDatabase::new_with_existent(&db_path);
let conn = tmp_db.connect_limbo();
conn.execute("CREATE TABLE t0 (x)")?;
conn.close()?;
}
{
let tmp_db = TempDatabase::new_with_existent(&db_path, false);
let tmp_db = TempDatabase::new_with_existent(&db_path);
let conn = tmp_db.connect_limbo();
conn.execute("INSERT INTO t0(x) VALUES (1)")?;
conn.close()?;
@@ -896,7 +888,7 @@ fn test_read_wal_dumb_no_frames() -> anyhow::Result<()> {
#[test]
fn test_insert_with_column_names() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE a(z)", false);
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE a(z)");
let conn = tmp_db.connect_limbo();
let result = conn.execute("INSERT INTO a VALUES (b.x)");
@@ -917,7 +909,7 @@ fn test_insert_with_column_names() -> anyhow::Result<()> {
#[test]
pub fn delete_search_op_ignore_nulls() {
let limbo = TempDatabase::new_empty(true);
let limbo = TempDatabase::new_empty();
let conn = limbo.db.connect().unwrap();
for sql in [
"CREATE TABLE t (id INTEGER PRIMARY KEY AUTOINCREMENT, c INT);",
@@ -938,7 +930,7 @@ pub fn delete_search_op_ignore_nulls() {
#[test]
pub fn delete_eq_correct() {
let limbo = TempDatabase::new_empty(true);
let limbo = TempDatabase::new_empty();
let conn = limbo.db.connect().unwrap();
for sql in [
"CREATE TABLE t (id INTEGER PRIMARY KEY AUTOINCREMENT, c INT);",
@@ -966,7 +958,7 @@ pub fn delete_eq_correct() {
#[test]
pub fn upsert_conflict() {
let limbo = TempDatabase::new_empty(true);
let limbo = TempDatabase::new_empty();
let conn = limbo.db.connect().unwrap();
for sql in [
"CREATE TABLE t (id INTEGER PRIMARY KEY AUTOINCREMENT, c INT UNIQUE, value INT);",

View File

@@ -7,7 +7,7 @@ use turso_core::Row;
fn test_per_page_checksum() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let db_name = format!("test-{}.db", rng().next_u32());
let tmp_db = TempDatabase::new(&db_name, false);
let tmp_db = TempDatabase::new(&db_name);
let db_path = tmp_db.path.clone();
{
@@ -65,7 +65,7 @@ fn test_per_page_checksum() -> anyhow::Result<()> {
fn test_checksum_detects_corruption() {
let _ = env_logger::try_init();
let db_name = format!("test-corruption-{}.db", rng().next_u32());
let tmp_db = TempDatabase::new(&db_name, false);
let tmp_db = TempDatabase::new(&db_name);
let db_path = tmp_db.path.clone();
// Create and populate the database
@@ -98,7 +98,7 @@ fn test_checksum_detects_corruption() {
}
{
let existing_db = TempDatabase::new_with_existent(&db_path, false);
let existing_db = TempDatabase::new_with_existent(&db_path);
// this query should fail and result in panic because db is now corrupted
let should_panic = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let conn = existing_db.connect_limbo();

View File

@@ -9,7 +9,7 @@ use turso_core::{Connection, LimboError, Result, StepResult};
#[test]
fn test_wal_checkpoint_result() -> Result<()> {
maybe_setup_tracing();
let tmp_db = TempDatabase::new("test_wal.db", false);
let tmp_db = TempDatabase::new("test_wal.db");
let conn = tmp_db.connect_limbo();
conn.execute("CREATE TABLE t1 (id text);")?;
@@ -36,7 +36,7 @@ fn test_wal_checkpoint_result() -> Result<()> {
#[ignore = "ignored for now because it's flaky"]
fn test_wal_1_writer_1_reader() -> Result<()> {
maybe_setup_tracing();
let tmp_db = Arc::new(Mutex::new(TempDatabase::new("test_wal.db", false)));
let tmp_db = Arc::new(Mutex::new(TempDatabase::new("test_wal.db")));
let db = tmp_db.lock().unwrap().limbo_database(false);
{