diff --git a/bindings/javascript/src/lib.rs b/bindings/javascript/src/lib.rs index d92661269..2c1f08c4a 100644 --- a/bindings/javascript/src/lib.rs +++ b/bindings/javascript/src/lib.rs @@ -24,7 +24,6 @@ use std::{ }; use tracing_subscriber::filter::LevelFilter; use tracing_subscriber::fmt::format::FmtSpan; -use turso_core::storage::database::DatabaseFile; /// Step result constants const STEP_ROW: u32 = 1; @@ -176,15 +175,9 @@ fn connect_sync(db: &DatabaseInner) -> napi::Result<()> { } } let io = &db.io; - let file = io - .open_file(&db.path, flags, false) - .map_err(|e| to_generic_error(&format!("failed to open file {}", db.path), e))?; - - let db_file = DatabaseFile::new(file); - let db_core = turso_core::Database::open_with_flags( + let db_core = turso_core::Database::open_file_with_flags( io.clone(), &db.path, - db_file, flags, turso_core::DatabaseOpts::new() .with_mvcc(false) diff --git a/core/lib.rs b/core/lib.rs index 85088ab46..f0100bbd6 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -230,7 +230,7 @@ static DATABASE_MANAGER: LazyLock>>> = pub struct Database { mv_store: Option>, schema: Mutex>, - db_file: DatabaseFile, + db_file: Arc, path: String, wal_path: String, pub io: Arc, @@ -331,7 +331,7 @@ impl Database { encryption_opts: Option, ) -> Result> { let file = io.open_file(path, flags, true)?; - let db_file = DatabaseFile::new(file); + let db_file = Arc::new(DatabaseFile::new(file)); Self::open_with_flags(io, path, db_file, flags, opts, encryption_opts) } @@ -339,7 +339,7 @@ impl Database { pub fn open( io: Arc, path: &str, - db_file: DatabaseFile, + db_file: Arc, enable_mvcc: bool, enable_indexes: bool, ) -> Result> { @@ -359,7 +359,7 @@ impl Database { pub fn open_with_flags( io: Arc, path: &str, - db_file: DatabaseFile, + db_file: Arc, flags: OpenFlags, opts: DatabaseOpts, encryption_opts: Option, @@ -407,7 +407,7 @@ impl Database { io: Arc, path: &str, wal_path: &str, - db_file: DatabaseFile, + db_file: Arc, flags: OpenFlags, opts: DatabaseOpts, encryption_opts: Option, @@ -428,7 +428,7 @@ impl Database { io: Arc, path: &str, wal_path: &str, - db_file: DatabaseFile, + db_file: Arc, flags: OpenFlags, opts: DatabaseOpts, encryption_opts: Option, diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 0f8f3d783..e809673bb 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1,4 +1,3 @@ -use crate::storage::database::DatabaseFile; use crate::storage::subjournal::Subjournal; use crate::storage::wal::IOV_MAX; use crate::storage::{ @@ -502,7 +501,7 @@ impl Savepoint { /// transaction management. pub struct Pager { /// Source of the database pages. - pub db_file: DatabaseFile, + pub db_file: Arc, /// The write-ahead log (WAL) for the database. /// in-memory databases, ephemeral tables and ephemeral indexes do not have a WAL. pub(crate) wal: Option>>, @@ -619,7 +618,7 @@ enum FreePageState { impl Pager { pub fn new( - db_file: DatabaseFile, + db_file: Arc, wal: Option>>, io: Arc, page_cache: Arc>, @@ -1595,7 +1594,7 @@ impl Pager { io_ctx: &IOContext, ) -> Result { sqlite3_ondisk::begin_read_page( - self.db_file.clone(), + self.db_file.as_ref(), self.buffer_pool.clone(), page, page_idx, @@ -1963,7 +1962,7 @@ impl Pager { } CommitState::SyncDbFile => { let sync_result = - sqlite3_ondisk::begin_sync(self.db_file.clone(), self.syncing.clone()); + sqlite3_ondisk::begin_sync(self.db_file.as_ref(), self.syncing.clone()); self.commit_info .write() .completions @@ -2100,7 +2099,8 @@ impl Pager { *self.checkpoint_state.write() = CheckpointState::SyncDbFile { res }; } CheckpointState::SyncDbFile { res } => { - let c = sqlite3_ondisk::begin_sync(self.db_file.clone(), self.syncing.clone())?; + let c = + sqlite3_ondisk::begin_sync(self.db_file.as_ref(), self.syncing.clone())?; *self.checkpoint_state.write() = CheckpointState::CheckpointDone { res }; io_yield_one!(c); } @@ -3110,8 +3110,9 @@ mod ptrmap_tests { // Helper to create a Pager for testing fn test_pager_setup(page_size: u32, initial_db_pages: u32) -> Pager { let io: Arc = Arc::new(MemoryIO::new()); - let db_file: DatabaseFile = - DatabaseFile::new(io.open_file("test.db", OpenFlags::Create, true).unwrap()); + let db_file: Arc = Arc::new(DatabaseFile::new( + io.open_file("test.db", OpenFlags::Create, true).unwrap(), + )); // Construct interfaces for the pager let pages = initial_db_pages + 10; diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index c83131983..4a1e4e407 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -58,7 +58,7 @@ use crate::storage::btree::offset::{ }; use crate::storage::btree::{payload_overflow_threshold_max, payload_overflow_threshold_min}; use crate::storage::buffer_pool::BufferPool; -use crate::storage::database::{DatabaseFile, DatabaseStorage, EncryptionOrChecksum}; +use crate::storage::database::{DatabaseStorage, EncryptionOrChecksum}; use crate::storage::pager::Pager; use crate::storage::wal::READMARK_NOT_USED; use crate::types::{SerialType, SerialTypeKind, TextSubtype, ValueRef}; @@ -900,7 +900,7 @@ impl PageContent { /// if allow_empty_read is set, than empty read will be raise error for the page, but will not panic #[instrument(skip_all, level = Level::DEBUG)] pub fn begin_read_page( - db_file: DatabaseFile, + db_file: &dyn DatabaseStorage, buffer_pool: Arc, page: PageRef, page_idx: usize, @@ -1077,7 +1077,7 @@ pub fn write_pages_vectored( } #[instrument(skip_all, level = Level::DEBUG)] -pub fn begin_sync(db_file: DatabaseFile, syncing: Arc) -> Result { +pub fn begin_sync(db_file: &dyn DatabaseStorage, syncing: Arc) -> Result { assert!(!syncing.load(Ordering::SeqCst)); syncing.store(true, Ordering::SeqCst); let completion = Completion::new_sync(move |_| { diff --git a/core/vdbe/execute.rs b/core/vdbe/execute.rs index 1ddb21553..a4f3c7c0f 100644 --- a/core/vdbe/execute.rs +++ b/core/vdbe/execute.rs @@ -37,7 +37,7 @@ use crate::{ }, translate::emitter::TransactionMode, }; -use crate::{get_cursor, CheckpointMode, Connection, MvCursor}; +use crate::{get_cursor, CheckpointMode, Connection, DatabaseStorage, MvCursor}; use std::any::Any; use std::env::temp_dir; use std::ops::DerefMut; @@ -7897,7 +7897,7 @@ pub fn op_open_ephemeral( let conn = program.connection.clone(); let io = conn.pager.load().io.clone(); let rand_num = io.generate_random_number(); - let db_file; + let db_file: Arc; let db_file_io: Arc; // we support OPFS in WASM - but it require files to be pre-opened in the browser before use @@ -7909,7 +7909,7 @@ pub fn op_open_ephemeral( db_file_io = Arc::new(MemoryIO::new()); let file = db_file_io.open_file("temp-file", OpenFlags::Create, false)?; - db_file = DatabaseFile::new(file); + db_file = Arc::new(DatabaseFile::new(file)); } #[cfg(not(target_family = "wasm"))] { @@ -7922,7 +7922,7 @@ pub fn op_open_ephemeral( )); }; let file = io.open_file(rand_path_str, OpenFlags::Create, false)?; - db_file = DatabaseFile::new(file); + db_file = Arc::new(DatabaseFile::new(file)); db_file_io = io; } diff --git a/sync/engine/src/database_sync_engine.rs b/sync/engine/src/database_sync_engine.rs index 63145daf8..dcdc843cf 100644 --- a/sync/engine/src/database_sync_engine.rs +++ b/sync/engine/src/database_sync_engine.rs @@ -45,7 +45,7 @@ pub struct DatabaseSyncEngineOpts { pub struct DatabaseSyncEngine { io: Arc, protocol: Arc

, - db_file: turso_core::storage::database::DatabaseFile, + db_file: Arc, main_tape: DatabaseTape, main_db_wal_path: String, revert_db_wal_path: String, @@ -156,7 +156,7 @@ impl DatabaseSyncEngine

{ } let db_file = io.open_file(main_db_path, turso_core::OpenFlags::Create, false)?; - let db_file = turso_core::storage::database::DatabaseFile::new(db_file); + let db_file = Arc::new(turso_core::storage::database::DatabaseFile::new(db_file)); let main_db = turso_core::Database::open_with_flags( io.clone(),