mirror of
https://github.com/aljazceru/turso.git
synced 2025-12-17 08:34:19 +01:00
Merge 'core: Switch to parking_lot::Mutex' from Pekka Enberg
It's faster and we eliminate bunch of unwrap() calls. Closes #3993
This commit is contained in:
@@ -15,9 +15,10 @@ use crate::{vtab::VirtualTable, SymbolTable};
|
|||||||
use crate::{LimboError, IO};
|
use crate::{LimboError, IO};
|
||||||
#[cfg(feature = "fs")]
|
#[cfg(feature = "fs")]
|
||||||
pub use dynamic::{add_builtin_vfs_extensions, add_vfs_module, list_vfs_modules, VfsMod};
|
pub use dynamic::{add_builtin_vfs_extensions, add_vfs_module, list_vfs_modules, VfsMod};
|
||||||
|
use parking_lot::Mutex;
|
||||||
use std::{
|
use std::{
|
||||||
ffi::{c_char, c_void, CStr, CString},
|
ffi::{c_char, c_void, CStr, CString},
|
||||||
sync::{Arc, Mutex},
|
sync::Arc,
|
||||||
};
|
};
|
||||||
use turso_ext::{
|
use turso_ext::{
|
||||||
ExtensionApi, InitAggFunction, ResultCode, ScalarFunction, VTabKind, VTabModuleImpl,
|
ExtensionApi, InitAggFunction, ResultCode, ScalarFunction, VTabKind, VTabModuleImpl,
|
||||||
@@ -65,9 +66,7 @@ pub(crate) unsafe extern "C" fn register_vtab_module(
|
|||||||
// Use the schema handler to insert the table
|
// Use the schema handler to insert the table
|
||||||
let table = Arc::new(Table::Virtual(vtab));
|
let table = Arc::new(Table::Virtual(vtab));
|
||||||
let mutex = &*(ext_ctx.schema as *mut Mutex<Arc<Schema>>);
|
let mutex = &*(ext_ctx.schema as *mut Mutex<Arc<Schema>>);
|
||||||
let Ok(guard) = mutex.lock() else {
|
let guard = mutex.lock();
|
||||||
return ResultCode::Error;
|
|
||||||
};
|
|
||||||
let schema_ptr = Arc::as_ptr(&*guard) as *mut Schema;
|
let schema_ptr = Arc::as_ptr(&*guard) as *mut Schema;
|
||||||
(*schema_ptr).tables.insert(name_str, table);
|
(*schema_ptr).tables.insert(name_str, table);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -10,9 +10,10 @@ use crate::incremental::persistence::{ReadRecord, WriteRow};
|
|||||||
use crate::storage::btree::CursorTrait;
|
use crate::storage::btree::CursorTrait;
|
||||||
use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult, ValueRef};
|
use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult, ValueRef};
|
||||||
use crate::{return_and_restore_if_io, return_if_io, LimboError, Result, Value};
|
use crate::{return_and_restore_if_io, return_if_io, LimboError, Result, Value};
|
||||||
|
use parking_lot::Mutex;
|
||||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
use std::fmt::{self, Display};
|
use std::fmt::{self, Display};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
|
|
||||||
// Architecture of the Aggregate Operator
|
// Architecture of the Aggregate Operator
|
||||||
// ========================================
|
// ========================================
|
||||||
@@ -1492,7 +1493,7 @@ impl AggregateOperator {
|
|||||||
// Process each change in the delta
|
// Process each change in the delta
|
||||||
for (row, weight) in delta.changes.iter() {
|
for (row, weight) in delta.changes.iter() {
|
||||||
if let Some(tracker) = &self.tracker {
|
if let Some(tracker) = &self.tracker {
|
||||||
tracker.lock().unwrap().record_aggregation();
|
tracker.lock().record_aggregation();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract group key
|
// Extract group key
|
||||||
|
|||||||
@@ -9,7 +9,8 @@ use crate::{
|
|||||||
types::{IOResult, SeekKey, SeekOp, SeekResult, Value},
|
types::{IOResult, SeekKey, SeekOp, SeekResult, Value},
|
||||||
LimboError, Pager, Result,
|
LimboError, Pager, Result,
|
||||||
};
|
};
|
||||||
use std::sync::{Arc, Mutex};
|
use parking_lot::Mutex;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// State machine for seek operations
|
/// State machine for seek operations
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -89,7 +90,7 @@ impl MaterializedViewCursor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the view and the current transaction state
|
// Get the view and the current transaction state
|
||||||
let mut view_guard = self.view.lock().unwrap();
|
let mut view_guard = self.view.lock();
|
||||||
let table_deltas = self.tx_state.get_table_deltas();
|
let table_deltas = self.tx_state.get_table_deltas();
|
||||||
|
|
||||||
// Process the deltas through the circuit to get materialized changes
|
// Process the deltas through the circuit to get materialized changes
|
||||||
@@ -350,7 +351,7 @@ mod tests {
|
|||||||
))?;
|
))?;
|
||||||
|
|
||||||
// Get the view's root page
|
// Get the view's root page
|
||||||
let view = view_mutex.lock().unwrap();
|
let view = view_mutex.lock();
|
||||||
let root_page = view.get_root_page();
|
let root_page = view.get_root_page();
|
||||||
if root_page == 0 {
|
if root_page == 0 {
|
||||||
return Err(crate::LimboError::InternalError(
|
return Err(crate::LimboError::InternalError(
|
||||||
|
|||||||
@@ -8,7 +8,8 @@ use crate::incremental::operator::{
|
|||||||
};
|
};
|
||||||
use crate::types::IOResult;
|
use crate::types::IOResult;
|
||||||
use crate::{Result, Value};
|
use crate::{Result, Value};
|
||||||
use std::sync::{Arc, Mutex};
|
use parking_lot::Mutex;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// Filter predicate for filtering rows
|
/// Filter predicate for filtering rows
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
@@ -260,7 +261,7 @@ impl IncrementalOperator for FilterOperator {
|
|||||||
// Process the delta through the filter
|
// Process the delta through the filter
|
||||||
for (row, weight) in delta.changes {
|
for (row, weight) in delta.changes {
|
||||||
if let Some(tracker) = &self.tracker {
|
if let Some(tracker) = &self.tracker {
|
||||||
tracker.lock().unwrap().record_filter();
|
tracker.lock().record_filter();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only pass through rows that satisfy the filter predicate
|
// Only pass through rows that satisfy the filter predicate
|
||||||
@@ -292,7 +293,7 @@ impl IncrementalOperator for FilterOperator {
|
|||||||
// Only pass through and track rows that satisfy the filter predicate
|
// Only pass through and track rows that satisfy the filter predicate
|
||||||
for (row, weight) in deltas.left.changes {
|
for (row, weight) in deltas.left.changes {
|
||||||
if let Some(tracker) = &self.tracker {
|
if let Some(tracker) = &self.tracker {
|
||||||
tracker.lock().unwrap().record_filter();
|
tracker.lock().record_filter();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only track and output rows that pass the filter
|
// Only track and output rows that pass the filter
|
||||||
|
|||||||
@@ -7,7 +7,8 @@ use crate::incremental::operator::{
|
|||||||
};
|
};
|
||||||
use crate::types::IOResult;
|
use crate::types::IOResult;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
use std::sync::{Arc, Mutex};
|
use parking_lot::Mutex;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// Input operator - source of data for the circuit
|
/// Input operator - source of data for the circuit
|
||||||
/// Represents base relations/tables that receive external updates
|
/// Represents base relations/tables that receive external updates
|
||||||
|
|||||||
@@ -9,7 +9,8 @@ use crate::incremental::persistence::WriteRow;
|
|||||||
use crate::storage::btree::CursorTrait;
|
use crate::storage::btree::CursorTrait;
|
||||||
use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult};
|
use crate::types::{IOResult, ImmutableRecord, SeekKey, SeekOp, SeekResult};
|
||||||
use crate::{return_and_restore_if_io, return_if_io, Result, Value};
|
use crate::{return_and_restore_if_io, return_if_io, Result, Value};
|
||||||
use std::sync::{Arc, Mutex};
|
use parking_lot::Mutex;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum JoinType {
|
pub enum JoinType {
|
||||||
@@ -499,7 +500,7 @@ impl JoinOperator {
|
|||||||
|
|
||||||
if Self::sql_keys_equal(&left_key, &right_key) {
|
if Self::sql_keys_equal(&left_key, &right_key) {
|
||||||
if let Some(tracker) = &self.tracker {
|
if let Some(tracker) = &self.tracker {
|
||||||
tracker.lock().unwrap().record_join_lookup();
|
tracker.lock().record_join_lookup();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Combine the rows
|
// Combine the rows
|
||||||
|
|||||||
@@ -7,10 +7,11 @@ use crate::incremental::operator::{
|
|||||||
};
|
};
|
||||||
use crate::types::IOResult;
|
use crate::types::IOResult;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
use parking_lot::Mutex;
|
||||||
use std::collections::{hash_map::DefaultHasher, HashMap};
|
use std::collections::{hash_map::DefaultHasher, HashMap};
|
||||||
use std::fmt::{self, Display};
|
use std::fmt::{self, Display};
|
||||||
use std::hash::{Hash, Hasher};
|
use std::hash::{Hash, Hasher};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// How the merge operator should handle rowids when combining deltas
|
/// How the merge operator should handle rowids when combining deltas
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
|
|||||||
@@ -15,8 +15,9 @@ use crate::schema::{Index, IndexColumn};
|
|||||||
use crate::storage::btree::BTreeCursor;
|
use crate::storage::btree::BTreeCursor;
|
||||||
use crate::types::IOResult;
|
use crate::types::IOResult;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
use parking_lot::Mutex;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// Struct to hold both table and index cursors for DBSP state operations
|
/// Struct to hold both table and index cursors for DBSP state operations
|
||||||
pub struct DbspStateCursors {
|
pub struct DbspStateCursors {
|
||||||
@@ -263,7 +264,8 @@ mod tests {
|
|||||||
use crate::util::IOExt;
|
use crate::util::IOExt;
|
||||||
use crate::Value;
|
use crate::Value;
|
||||||
use crate::{Database, MemoryIO, IO};
|
use crate::{Database, MemoryIO, IO};
|
||||||
use std::sync::{Arc, Mutex};
|
use parking_lot::Mutex;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// Create a test pager for operator tests with both table and index
|
/// Create a test pager for operator tests with both table and index
|
||||||
fn create_test_pager() -> (std::sync::Arc<crate::Pager>, i64, i64) {
|
fn create_test_pager() -> (std::sync::Arc<crate::Pager>, i64, i64) {
|
||||||
@@ -702,7 +704,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Reset tracker for delta processing
|
// Reset tracker for delta processing
|
||||||
tracker.lock().unwrap().aggregation_updates = 0;
|
tracker.lock().aggregation_updates = 0;
|
||||||
|
|
||||||
// Add one item to category 'cat_0'
|
// Add one item to category 'cat_0'
|
||||||
let mut delta = Delta::new();
|
let mut delta = Delta::new();
|
||||||
@@ -720,7 +722,7 @@ mod tests {
|
|||||||
.block(|| agg.commit((&delta).into(), &mut cursors))
|
.block(|| agg.commit((&delta).into(), &mut cursors))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(tracker.lock().unwrap().aggregation_updates, 1);
|
assert_eq!(tracker.lock().aggregation_updates, 1);
|
||||||
|
|
||||||
// Check the final state - cat_0 should now have count 11
|
// Check the final state - cat_0 should now have count 11
|
||||||
let final_state = get_current_state_from_btree(&agg, &pager, &mut cursors);
|
let final_state = get_current_state_from_btree(&agg, &pager, &mut cursors);
|
||||||
@@ -732,7 +734,7 @@ mod tests {
|
|||||||
assert_eq!(cat_0.0.values[1], Value::Integer(11));
|
assert_eq!(cat_0.0.values[1], Value::Integer(11));
|
||||||
|
|
||||||
// Verify incremental behavior - we process the delta twice (eval + commit)
|
// Verify incremental behavior - we process the delta twice (eval + commit)
|
||||||
let t = tracker.lock().unwrap();
|
let t = tracker.lock();
|
||||||
assert_incremental(&t, 2, 101);
|
assert_incremental(&t, 2, 101);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -804,7 +806,7 @@ mod tests {
|
|||||||
assert_eq!(widget_sum.values[1], Value::Integer(250));
|
assert_eq!(widget_sum.values[1], Value::Integer(250));
|
||||||
|
|
||||||
// Reset tracker
|
// Reset tracker
|
||||||
tracker.lock().unwrap().aggregation_updates = 0;
|
tracker.lock().aggregation_updates = 0;
|
||||||
|
|
||||||
// Add sale of 50 for Widget
|
// Add sale of 50 for Widget
|
||||||
let mut delta = Delta::new();
|
let mut delta = Delta::new();
|
||||||
@@ -822,7 +824,7 @@ mod tests {
|
|||||||
.block(|| agg.commit((&delta).into(), &mut cursors))
|
.block(|| agg.commit((&delta).into(), &mut cursors))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(tracker.lock().unwrap().aggregation_updates, 1);
|
assert_eq!(tracker.lock().aggregation_updates, 1);
|
||||||
|
|
||||||
// Check final state - Widget should now be 300 (250 + 50)
|
// Check final state - Widget should now be 300 (250 + 50)
|
||||||
let final_state = get_current_state_from_btree(&agg, &pager, &mut cursors);
|
let final_state = get_current_state_from_btree(&agg, &pager, &mut cursors);
|
||||||
|
|||||||
@@ -8,7 +8,8 @@ use crate::incremental::operator::{
|
|||||||
};
|
};
|
||||||
use crate::types::IOResult;
|
use crate::types::IOResult;
|
||||||
use crate::{Connection, Database, Result, Value};
|
use crate::{Connection, Database, Result, Value};
|
||||||
use std::sync::{atomic::Ordering, Arc, Mutex};
|
use parking_lot::Mutex;
|
||||||
|
use std::sync::{atomic::Ordering, Arc};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct ProjectColumn {
|
pub struct ProjectColumn {
|
||||||
@@ -124,7 +125,7 @@ impl IncrementalOperator for ProjectOperator {
|
|||||||
|
|
||||||
for (row, weight) in delta.changes {
|
for (row, weight) in delta.changes {
|
||||||
if let Some(tracker) = &self.tracker {
|
if let Some(tracker) = &self.tracker {
|
||||||
tracker.lock().unwrap().record_project();
|
tracker.lock().record_project();
|
||||||
}
|
}
|
||||||
|
|
||||||
let projected = self.project_values(&row.values);
|
let projected = self.project_values(&row.values);
|
||||||
@@ -152,7 +153,7 @@ impl IncrementalOperator for ProjectOperator {
|
|||||||
// Commit the delta to our internal state and build output
|
// Commit the delta to our internal state and build output
|
||||||
for (row, weight) in &deltas.left.changes {
|
for (row, weight) in &deltas.left.changes {
|
||||||
if let Some(tracker) = &self.tracker {
|
if let Some(tracker) = &self.tracker {
|
||||||
tracker.lock().unwrap().record_project();
|
tracker.lock().record_project();
|
||||||
}
|
}
|
||||||
let projected = self.project_values(&row.values);
|
let projected = self.project_values(&row.values);
|
||||||
let projected_row = HashableRow::new(row.rowid, projected);
|
let projected_row = HashableRow::new(row.rowid, projected);
|
||||||
|
|||||||
@@ -7,11 +7,12 @@ use crate::translate::logical::LogicalPlanBuilder;
|
|||||||
use crate::types::{IOResult, Value};
|
use crate::types::{IOResult, Value};
|
||||||
use crate::util::{extract_view_columns, ViewColumnSchema};
|
use crate::util::{extract_view_columns, ViewColumnSchema};
|
||||||
use crate::{return_if_io, LimboError, Pager, Result, Statement};
|
use crate::{return_if_io, LimboError, Pager, Result, Statement};
|
||||||
|
use parking_lot::Mutex;
|
||||||
use std::cell::RefCell;
|
use std::cell::RefCell;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::rc::Rc;
|
use std::rc::Rc;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use turso_parser::ast;
|
use turso_parser::ast;
|
||||||
use turso_parser::{
|
use turso_parser::{
|
||||||
ast::{Cmd, Stmt},
|
ast::{Cmd, Stmt},
|
||||||
|
|||||||
@@ -3,10 +3,11 @@ use crate::turso_assert;
|
|||||||
use crate::{io::clock::DefaultClock, Result};
|
use crate::{io::clock::DefaultClock, Result};
|
||||||
|
|
||||||
use crate::io::clock::Instant;
|
use crate::io::clock::Instant;
|
||||||
|
use parking_lot::Mutex;
|
||||||
use std::{
|
use std::{
|
||||||
cell::{Cell, UnsafeCell},
|
cell::{Cell, UnsafeCell},
|
||||||
collections::{BTreeMap, HashMap},
|
collections::{BTreeMap, HashMap},
|
||||||
sync::{Arc, Mutex},
|
sync::Arc,
|
||||||
};
|
};
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
@@ -42,7 +43,7 @@ impl Clock for MemoryIO {
|
|||||||
|
|
||||||
impl IO for MemoryIO {
|
impl IO for MemoryIO {
|
||||||
fn open_file(&self, path: &str, flags: OpenFlags, _direct: bool) -> Result<Arc<dyn File>> {
|
fn open_file(&self, path: &str, flags: OpenFlags, _direct: bool) -> Result<Arc<dyn File>> {
|
||||||
let mut files = self.files.lock().unwrap();
|
let mut files = self.files.lock();
|
||||||
if !files.contains_key(path) && !flags.contains(OpenFlags::Create) {
|
if !files.contains_key(path) && !flags.contains(OpenFlags::Create) {
|
||||||
return Err(
|
return Err(
|
||||||
crate::error::CompletionError::IOError(std::io::ErrorKind::NotFound).into(),
|
crate::error::CompletionError::IOError(std::io::ErrorKind::NotFound).into(),
|
||||||
@@ -61,7 +62,7 @@ impl IO for MemoryIO {
|
|||||||
Ok(files.get(path).unwrap().clone())
|
Ok(files.get(path).unwrap().clone())
|
||||||
}
|
}
|
||||||
fn remove_file(&self, path: &str) -> Result<()> {
|
fn remove_file(&self, path: &str) -> Result<()> {
|
||||||
let mut files = self.files.lock().unwrap();
|
let mut files = self.files.lock();
|
||||||
files.remove(path);
|
files.remove(path);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
28
core/lib.rs
28
core/lib.rs
@@ -67,7 +67,7 @@ pub use io::{
|
|||||||
Buffer, Completion, CompletionType, File, GroupCompletion, MemoryIO, OpenFlags, PlatformIO,
|
Buffer, Completion, CompletionType, File, GroupCompletion, MemoryIO, OpenFlags, PlatformIO,
|
||||||
SyscallIO, WriteCompletion, IO,
|
SyscallIO, WriteCompletion, IO,
|
||||||
};
|
};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::{Mutex, RwLock};
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use schema::Schema;
|
use schema::Schema;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
@@ -82,7 +82,7 @@ use std::{
|
|||||||
rc::Rc,
|
rc::Rc,
|
||||||
sync::{
|
sync::{
|
||||||
atomic::{AtomicBool, AtomicI32, AtomicI64, AtomicIsize, AtomicU16, AtomicUsize, Ordering},
|
atomic::{AtomicBool, AtomicI32, AtomicI64, AtomicIsize, AtomicU16, AtomicUsize, Ordering},
|
||||||
Arc, LazyLock, Mutex, Weak,
|
Arc, LazyLock, Weak,
|
||||||
},
|
},
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
@@ -276,7 +276,7 @@ impl fmt::Debug for Database {
|
|||||||
};
|
};
|
||||||
debug_struct.field("mv_store", &mv_store_status);
|
debug_struct.field("mv_store", &mv_store_status);
|
||||||
|
|
||||||
let init_lock_status = if self.init_lock.try_lock().is_ok() {
|
let init_lock_status = if self.init_lock.try_lock().is_some() {
|
||||||
"unlocked"
|
"unlocked"
|
||||||
} else {
|
} else {
|
||||||
"locked"
|
"locked"
|
||||||
@@ -380,7 +380,7 @@ impl Database {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut registry = DATABASE_MANAGER.lock().unwrap();
|
let mut registry = DATABASE_MANAGER.lock();
|
||||||
|
|
||||||
let canonical_path = std::fs::canonicalize(path)
|
let canonical_path = std::fs::canonicalize(path)
|
||||||
.ok()
|
.ok()
|
||||||
@@ -623,7 +623,7 @@ impl Database {
|
|||||||
let conn = Arc::new(Connection {
|
let conn = Arc::new(Connection {
|
||||||
db: self.clone(),
|
db: self.clone(),
|
||||||
pager: ArcSwap::new(pager),
|
pager: ArcSwap::new(pager),
|
||||||
schema: RwLock::new(self.schema.lock().unwrap().clone()),
|
schema: RwLock::new(self.schema.lock().clone()),
|
||||||
database_schemas: RwLock::new(FxHashMap::default()),
|
database_schemas: RwLock::new(FxHashMap::default()),
|
||||||
auto_commit: AtomicBool::new(true),
|
auto_commit: AtomicBool::new(true),
|
||||||
transaction_state: AtomicTransactionState::new(TransactionState::None),
|
transaction_state: AtomicTransactionState::new(TransactionState::None),
|
||||||
@@ -900,17 +900,17 @@ impl Database {
|
|||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(crate) fn with_schema_mut<T>(&self, f: impl FnOnce(&mut Schema) -> Result<T>) -> Result<T> {
|
pub(crate) fn with_schema_mut<T>(&self, f: impl FnOnce(&mut Schema) -> Result<T>) -> Result<T> {
|
||||||
let mut schema_ref = self.schema.lock().unwrap();
|
let mut schema_ref = self.schema.lock();
|
||||||
let schema = Arc::make_mut(&mut *schema_ref);
|
let schema = Arc::make_mut(&mut *schema_ref);
|
||||||
f(schema)
|
f(schema)
|
||||||
}
|
}
|
||||||
pub(crate) fn clone_schema(&self) -> Arc<Schema> {
|
pub(crate) fn clone_schema(&self) -> Arc<Schema> {
|
||||||
let schema = self.schema.lock().unwrap();
|
let schema = self.schema.lock();
|
||||||
schema.clone()
|
schema.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn update_schema_if_newer(&self, another: Arc<Schema>) {
|
pub(crate) fn update_schema_if_newer(&self, another: Arc<Schema>) {
|
||||||
let mut schema = self.schema.lock().unwrap();
|
let mut schema = self.schema.lock();
|
||||||
if schema.schema_version < another.schema_version {
|
if schema.schema_version < another.schema_version {
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
"DB schema is outdated: {} < {}",
|
"DB schema is outdated: {} < {}",
|
||||||
@@ -1352,7 +1352,7 @@ impl Connection {
|
|||||||
};
|
};
|
||||||
pager.end_read_tx();
|
pager.end_read_tx();
|
||||||
|
|
||||||
let db_schema_version = self.db.schema.lock().unwrap().schema_version;
|
let db_schema_version = self.db.schema.lock().schema_version;
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
"path: {}, db_schema_version={} vs on_disk_schema_version={}",
|
"path: {}, db_schema_version={} vs on_disk_schema_version={}",
|
||||||
self.db.path,
|
self.db.path,
|
||||||
@@ -1676,7 +1676,7 @@ impl Connection {
|
|||||||
|
|
||||||
pub fn maybe_update_schema(&self) {
|
pub fn maybe_update_schema(&self) {
|
||||||
let current_schema_version = self.schema.read().schema_version;
|
let current_schema_version = self.schema.read().schema_version;
|
||||||
let schema = self.db.schema.lock().unwrap();
|
let schema = self.db.schema.lock();
|
||||||
if matches!(self.get_tx_state(), TransactionState::None)
|
if matches!(self.get_tx_state(), TransactionState::None)
|
||||||
&& current_schema_version != schema.schema_version
|
&& current_schema_version != schema.schema_version
|
||||||
{
|
{
|
||||||
@@ -2199,7 +2199,7 @@ impl Connection {
|
|||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let use_indexes = self.db.schema.lock().unwrap().indexes_enabled();
|
let use_indexes = self.db.schema.lock().indexes_enabled();
|
||||||
let use_mvcc = self.db.mv_store.is_some();
|
let use_mvcc = self.db.mv_store.is_some();
|
||||||
let use_views = self.db.experimental_views_enabled();
|
let use_views = self.db.experimental_views_enabled();
|
||||||
let use_strict = self.db.experimental_strict_enabled();
|
let use_strict = self.db.experimental_strict_enabled();
|
||||||
@@ -2316,11 +2316,7 @@ impl Connection {
|
|||||||
.get(&database_id)
|
.get(&database_id)
|
||||||
.expect("Database ID should be valid after resolve_database_id");
|
.expect("Database ID should be valid after resolve_database_id");
|
||||||
|
|
||||||
let schema = db
|
let schema = db.schema.lock().clone();
|
||||||
.schema
|
|
||||||
.lock()
|
|
||||||
.expect("Schema lock should not fail")
|
|
||||||
.clone();
|
|
||||||
|
|
||||||
// Cache the schema for future use
|
// Cache the schema for future use
|
||||||
schemas.insert(database_id, schema.clone());
|
schemas.insert(database_id, schema.clone());
|
||||||
|
|||||||
@@ -541,14 +541,8 @@ impl<Clock: LogicalClock> CheckpointStateMachine<Clock> {
|
|||||||
.io
|
.io
|
||||||
.block(|| {
|
.block(|| {
|
||||||
self.pager.with_header_mut(|header| {
|
self.pager.with_header_mut(|header| {
|
||||||
header.schema_cookie = self
|
header.schema_cookie =
|
||||||
.connection
|
self.connection.db.schema.lock().schema_version.into();
|
||||||
.db
|
|
||||||
.schema
|
|
||||||
.lock()
|
|
||||||
.unwrap()
|
|
||||||
.schema_version
|
|
||||||
.into();
|
|
||||||
*header
|
*header
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -1057,7 +1057,7 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
|||||||
// Make sure we capture all the schema changes that were deserialized from the logical log.
|
// Make sure we capture all the schema changes that were deserialized from the logical log.
|
||||||
bootstrap_conn.promote_to_regular_connection();
|
bootstrap_conn.promote_to_regular_connection();
|
||||||
bootstrap_conn.reparse_schema()?;
|
bootstrap_conn.reparse_schema()?;
|
||||||
*bootstrap_conn.db.schema.lock().unwrap() = bootstrap_conn.schema.read().clone();
|
*bootstrap_conn.db.schema.lock() = bootstrap_conn.schema.read().clone();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -1644,9 +1644,7 @@ impl<Clock: LogicalClock> MvStore<Clock> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if connection.schema.read().schema_version
|
if connection.schema.read().schema_version > connection.db.schema.lock().schema_version {
|
||||||
> connection.db.schema.lock().unwrap().schema_version
|
|
||||||
{
|
|
||||||
// Connection made schema changes during tx and rolled back -> revert connection-local schema.
|
// Connection made schema changes during tx and rolled back -> revert connection-local schema.
|
||||||
*connection.schema.write() = connection.db.clone_schema();
|
*connection.schema.write() = connection.db.clone_schema();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ impl MvccTestDbNoConn {
|
|||||||
// First let's clear any entries in database manager in order to force restart.
|
// First let's clear any entries in database manager in order to force restart.
|
||||||
// If not, we will load the same database instance again.
|
// If not, we will load the same database instance again.
|
||||||
{
|
{
|
||||||
let mut manager = DATABASE_MANAGER.lock().unwrap();
|
let mut manager = DATABASE_MANAGER.lock();
|
||||||
manager.clear();
|
manager.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -127,10 +127,10 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use crate::{util::normalize_ident, Result};
|
use crate::{util::normalize_ident, Result};
|
||||||
use core::fmt;
|
use core::fmt;
|
||||||
|
use parking_lot::Mutex;
|
||||||
use std::collections::{HashMap, HashSet, VecDeque};
|
use std::collections::{HashMap, HashSet, VecDeque};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::Mutex;
|
|
||||||
use tracing::trace;
|
use tracing::trace;
|
||||||
use turso_parser::ast::{
|
use turso_parser::ast::{
|
||||||
self, ColumnDefinition, Expr, InitDeferredPred, Literal, RefAct, SortOrder, TableOptions,
|
self, ColumnDefinition, Expr, InitDeferredPred, Literal, RefAct, SortOrder, TableOptions,
|
||||||
|
|||||||
@@ -7799,14 +7799,7 @@ mod tests {
|
|||||||
vdbe::Register,
|
vdbe::Register,
|
||||||
BufferPool, Completion, Connection, IOContext, StepResult, WalFile, WalFileShared,
|
BufferPool, Completion, Connection, IOContext, StepResult, WalFile, WalFileShared,
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{cell::RefCell, collections::HashSet, mem::transmute, ops::Deref, rc::Rc, sync::Arc};
|
||||||
cell::RefCell,
|
|
||||||
collections::HashSet,
|
|
||||||
mem::transmute,
|
|
||||||
ops::Deref,
|
|
||||||
rc::Rc,
|
|
||||||
sync::{Arc, Mutex},
|
|
||||||
};
|
|
||||||
|
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
@@ -9082,7 +9075,7 @@ mod tests {
|
|||||||
Arc::new(parking_lot::RwLock::new(PageCache::new(10))),
|
Arc::new(parking_lot::RwLock::new(PageCache::new(10))),
|
||||||
buffer_pool,
|
buffer_pool,
|
||||||
Arc::new(AtomicDbState::new(DbState::Uninitialized)),
|
Arc::new(AtomicDbState::new(DbState::Uninitialized)),
|
||||||
Arc::new(Mutex::new(())),
|
Arc::new(parking_lot::Mutex::new(())),
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ use crate::{
|
|||||||
IOResult, LimboError, Result, TransactionState,
|
IOResult, LimboError, Result, TransactionState,
|
||||||
};
|
};
|
||||||
use crate::{io_yield_one, CompletionError, IOContext, OpenFlags, IO};
|
use crate::{io_yield_one, CompletionError, IOContext, OpenFlags, IO};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::{Mutex, RwLock};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use std::cell::{RefCell, UnsafeCell};
|
use std::cell::{RefCell, UnsafeCell};
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
@@ -23,7 +23,7 @@ use std::rc::Rc;
|
|||||||
use std::sync::atomic::{
|
use std::sync::atomic::{
|
||||||
AtomicBool, AtomicU16, AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering,
|
AtomicBool, AtomicU16, AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering,
|
||||||
};
|
};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use tracing::{instrument, trace, Level};
|
use tracing::{instrument, trace, Level};
|
||||||
use turso_macros::AtomicEnum;
|
use turso_macros::AtomicEnum;
|
||||||
|
|
||||||
@@ -1423,7 +1423,7 @@ impl Pager {
|
|||||||
#[instrument(skip_all, level = Level::DEBUG)]
|
#[instrument(skip_all, level = Level::DEBUG)]
|
||||||
pub fn maybe_allocate_page1(&self) -> Result<IOResult<()>> {
|
pub fn maybe_allocate_page1(&self) -> Result<IOResult<()>> {
|
||||||
if !self.db_state.get().is_initialized() {
|
if !self.db_state.get().is_initialized() {
|
||||||
if let Ok(_lock) = self.init_lock.try_lock() {
|
if let Some(_lock) = self.init_lock.try_lock() {
|
||||||
match (self.db_state.get(), self.allocating_page1()) {
|
match (self.db_state.get(), self.allocating_page1()) {
|
||||||
// In case of being empty or (allocating and this connection is performing allocation) then allocate the first page
|
// In case of being empty or (allocating and this connection is performing allocation) then allocate the first page
|
||||||
(DbState::Uninitialized, false) | (DbState::Initializing, true) => {
|
(DbState::Uninitialized, false) | (DbState::Initializing, true) => {
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
#![allow(clippy::not_unsafe_ptr_arg_deref)]
|
#![allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||||
|
|
||||||
|
use parking_lot::Mutex;
|
||||||
use rustc_hash::{FxHashMap, FxHashSet};
|
use rustc_hash::{FxHashMap, FxHashSet};
|
||||||
use std::array;
|
use std::array;
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::sync::Mutex;
|
|
||||||
use strum::EnumString;
|
use strum::EnumString;
|
||||||
use tracing::{instrument, Level};
|
use tracing::{instrument, Level};
|
||||||
|
|
||||||
@@ -1254,7 +1254,7 @@ impl Wal for WalFile {
|
|||||||
);
|
);
|
||||||
let page = unsafe { std::slice::from_raw_parts(page_ptr as *mut u8, page_len) };
|
let page = unsafe { std::slice::from_raw_parts(page_ptr as *mut u8, page_len) };
|
||||||
if buf.as_slice() != page {
|
if buf.as_slice() != page {
|
||||||
*conflict.lock().unwrap() = true;
|
*conflict.lock() = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -1271,7 +1271,7 @@ impl Wal for WalFile {
|
|||||||
&self.io_ctx.read(),
|
&self.io_ctx.read(),
|
||||||
)?;
|
)?;
|
||||||
self.io.wait_for_completion(c)?;
|
self.io.wait_for_completion(c)?;
|
||||||
return if *conflict.lock().unwrap() {
|
return if *conflict.lock() {
|
||||||
Err(LimboError::Conflict(format!(
|
Err(LimboError::Conflict(format!(
|
||||||
"frame content differs from the WAL: frame_id={frame_id}"
|
"frame content differs from the WAL: frame_id={frame_id}"
|
||||||
)))
|
)))
|
||||||
@@ -2516,10 +2516,10 @@ pub mod test {
|
|||||||
CheckpointMode, CheckpointResult, Completion, Connection, Database, LimboError, PlatformIO,
|
CheckpointMode, CheckpointResult, Completion, Connection, Database, LimboError, PlatformIO,
|
||||||
StepResult, Wal, WalFile, WalFileShared, IO,
|
StepResult, Wal, WalFile, WalFileShared, IO,
|
||||||
};
|
};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::{Mutex, RwLock};
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
use std::os::unix::fs::MetadataExt;
|
use std::os::unix::fs::MetadataExt;
|
||||||
use std::sync::{atomic::Ordering, Arc, Mutex};
|
use std::sync::{atomic::Ordering, Arc};
|
||||||
#[allow(clippy::arc_with_non_send_sync)]
|
#[allow(clippy::arc_with_non_send_sync)]
|
||||||
pub(crate) fn get_database() -> (Arc<Database>, std::path::PathBuf) {
|
pub(crate) fn get_database() -> (Arc<Database>, std::path::PathBuf) {
|
||||||
let mut path = tempfile::tempdir().unwrap().keep();
|
let mut path = tempfile::tempdir().unwrap().keep();
|
||||||
@@ -2550,11 +2550,11 @@ pub mod test {
|
|||||||
let _ = wal_file.truncate(
|
let _ = wal_file.truncate(
|
||||||
WAL_HEADER_SIZE as u64,
|
WAL_HEADER_SIZE as u64,
|
||||||
Completion::new_trunc(move |_| {
|
Completion::new_trunc(move |_| {
|
||||||
*_done.lock().unwrap() = true;
|
*_done.lock() = true;
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
assert!(wal_file.size().unwrap() == WAL_HEADER_SIZE as u64);
|
assert!(wal_file.size().unwrap() == WAL_HEADER_SIZE as u64);
|
||||||
assert!(*done.lock().unwrap());
|
assert!(*done.lock());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -491,7 +491,7 @@ fn parse_table(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if this materialized view has persistent storage
|
// Check if this materialized view has persistent storage
|
||||||
let view_guard = view.lock().unwrap();
|
let view_guard = view.lock();
|
||||||
let root_page = view_guard.get_root_page();
|
let root_page = view_guard.get_root_page();
|
||||||
|
|
||||||
if root_page == 0 {
|
if root_page == 0 {
|
||||||
|
|||||||
@@ -553,7 +553,7 @@ fn query_pragma(
|
|||||||
if let Some(table) = schema.get_table(&name) {
|
if let Some(table) = schema.get_table(&name) {
|
||||||
emit_columns_for_table_info(&mut program, table.columns(), base_reg);
|
emit_columns_for_table_info(&mut program, table.columns(), base_reg);
|
||||||
} else if let Some(view_mutex) = schema.get_materialized_view(&name) {
|
} else if let Some(view_mutex) = schema.get_materialized_view(&name) {
|
||||||
let view = view_mutex.lock().unwrap();
|
let view = view_mutex.lock();
|
||||||
let flat_columns = view.column_schema.flat_columns();
|
let flat_columns = view.column_schema.flat_columns();
|
||||||
emit_columns_for_table_info(&mut program, &flat_columns, base_reg);
|
emit_columns_for_table_info(&mut program, &flat_columns, base_reg);
|
||||||
} else if let Some(view) = schema.get_view(&name) {
|
} else if let Some(view) = schema.get_view(&name) {
|
||||||
|
|||||||
@@ -14,12 +14,9 @@ use crate::{
|
|||||||
LimboError, OpenFlags, Result, Statement, StepResult, SymbolTable,
|
LimboError, OpenFlags, Result, Statement, StepResult, SymbolTable,
|
||||||
};
|
};
|
||||||
use crate::{Connection, MvStore, IO};
|
use crate::{Connection, MvStore, IO};
|
||||||
|
use parking_lot::Mutex;
|
||||||
use std::sync::atomic::AtomicU8;
|
use std::sync::atomic::AtomicU8;
|
||||||
use std::{
|
use std::{collections::HashMap, rc::Rc, sync::Arc};
|
||||||
collections::HashMap,
|
|
||||||
rc::Rc,
|
|
||||||
sync::{Arc, Mutex},
|
|
||||||
};
|
|
||||||
use tracing::{instrument, Level};
|
use tracing::{instrument, Level};
|
||||||
use turso_macros::match_ignore_ascii_case;
|
use turso_macros::match_ignore_ascii_case;
|
||||||
use turso_parser::ast::{
|
use turso_parser::ast::{
|
||||||
|
|||||||
@@ -143,7 +143,7 @@ pub enum CursorType {
|
|||||||
VirtualTable(Arc<VirtualTable>),
|
VirtualTable(Arc<VirtualTable>),
|
||||||
MaterializedView(
|
MaterializedView(
|
||||||
Arc<BTreeTable>,
|
Arc<BTreeTable>,
|
||||||
Arc<std::sync::Mutex<crate::incremental::view::IncrementalView>>,
|
Arc<parking_lot::Mutex<crate::incremental::view::IncrementalView>>,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ use std::ops::DerefMut;
|
|||||||
use std::{
|
use std::{
|
||||||
borrow::BorrowMut,
|
borrow::BorrowMut,
|
||||||
num::NonZero,
|
num::NonZero,
|
||||||
sync::{atomic::Ordering, Arc, Mutex},
|
sync::{atomic::Ordering, Arc},
|
||||||
};
|
};
|
||||||
use turso_macros::match_ignore_ascii_case;
|
use turso_macros::match_ignore_ascii_case;
|
||||||
|
|
||||||
@@ -75,7 +75,7 @@ use super::{
|
|||||||
insn::{Cookie, RegisterOrLiteral},
|
insn::{Cookie, RegisterOrLiteral},
|
||||||
CommitState,
|
CommitState,
|
||||||
};
|
};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::{Mutex, RwLock};
|
||||||
use turso_parser::ast::{self, ForeignKeyClause, Name, ResolveType};
|
use turso_parser::ast::{self, ForeignKeyClause, Name, ResolveType};
|
||||||
use turso_parser::parser::Parser;
|
use turso_parser::parser::Parser;
|
||||||
|
|
||||||
@@ -966,7 +966,7 @@ pub fn op_open_read(
|
|||||||
let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?;
|
let cursor = maybe_promote_to_mvcc_cursor(btree_cursor)?;
|
||||||
|
|
||||||
// Get the view name and look up or create its transaction state
|
// Get the view name and look up or create its transaction state
|
||||||
let view_name = view_mutex.lock().unwrap().name().to_string();
|
let view_name = view_mutex.lock().name().to_string();
|
||||||
let tx_state = program
|
let tx_state = program
|
||||||
.connection
|
.connection
|
||||||
.view_transaction_states
|
.view_transaction_states
|
||||||
@@ -7902,7 +7902,7 @@ pub fn op_populate_materialized_views(
|
|||||||
for (view_name, _root_page, cursor_id) in view_info {
|
for (view_name, _root_page, cursor_id) in view_info {
|
||||||
let schema = conn.schema.read();
|
let schema = conn.schema.read();
|
||||||
if let Some(view) = schema.get_materialized_view(&view_name) {
|
if let Some(view) = schema.get_materialized_view(&view_name) {
|
||||||
let mut view = view.lock().unwrap();
|
let mut view = view.lock();
|
||||||
// Drop the schema borrow before calling populate_from_table
|
// Drop the schema borrow before calling populate_from_table
|
||||||
drop(schema);
|
drop(schema);
|
||||||
|
|
||||||
|
|||||||
@@ -960,7 +960,7 @@ impl Program {
|
|||||||
let mut views = Vec::new();
|
let mut views = Vec::new();
|
||||||
for view_name in self.connection.view_transaction_states.get_view_names() {
|
for view_name in self.connection.view_transaction_states.get_view_names() {
|
||||||
if let Some(view_mutex) = schema.get_materialized_view(&view_name) {
|
if let Some(view_mutex) = schema.get_materialized_view(&view_name) {
|
||||||
let view = view_mutex.lock().unwrap();
|
let view = view_mutex.lock();
|
||||||
let root_page = view.get_root_page();
|
let root_page = view.get_root_page();
|
||||||
|
|
||||||
// Materialized views should always have storage (root_page != 0)
|
// Materialized views should always have storage (root_page != 0)
|
||||||
@@ -1002,7 +1002,7 @@ impl Program {
|
|||||||
|
|
||||||
let schema = self.connection.schema.read();
|
let schema = self.connection.schema.read();
|
||||||
if let Some(view_mutex) = schema.get_materialized_view(view_name) {
|
if let Some(view_mutex) = schema.get_materialized_view(view_name) {
|
||||||
let mut view = view_mutex.lock().unwrap();
|
let mut view = view_mutex.lock();
|
||||||
|
|
||||||
// Create a DeltaSet from the per-table deltas
|
// Create a DeltaSet from the per-table deltas
|
||||||
let mut delta_set = crate::incremental::compiler::DeltaSet::new();
|
let mut delta_set = crate::incremental::compiler::DeltaSet::new();
|
||||||
|
|||||||
Reference in New Issue
Block a user