use dyn DatabaseStorage instead of DatabaseFile

This commit is contained in:
Nikita Sivukhin
2025-11-06 17:42:03 +04:00
parent c3d2ea8429
commit da61fa32b4
6 changed files with 25 additions and 31 deletions

View File

@@ -24,7 +24,6 @@ use std::{
}; };
use tracing_subscriber::filter::LevelFilter; use tracing_subscriber::filter::LevelFilter;
use tracing_subscriber::fmt::format::FmtSpan; use tracing_subscriber::fmt::format::FmtSpan;
use turso_core::storage::database::DatabaseFile;
/// Step result constants /// Step result constants
const STEP_ROW: u32 = 1; const STEP_ROW: u32 = 1;
@@ -176,15 +175,9 @@ fn connect_sync(db: &DatabaseInner) -> napi::Result<()> {
} }
} }
let io = &db.io; let io = &db.io;
let file = io let db_core = turso_core::Database::open_file_with_flags(
.open_file(&db.path, flags, false)
.map_err(|e| to_generic_error(&format!("failed to open file {}", db.path), e))?;
let db_file = DatabaseFile::new(file);
let db_core = turso_core::Database::open_with_flags(
io.clone(), io.clone(),
&db.path, &db.path,
db_file,
flags, flags,
turso_core::DatabaseOpts::new() turso_core::DatabaseOpts::new()
.with_mvcc(false) .with_mvcc(false)

View File

@@ -230,7 +230,7 @@ static DATABASE_MANAGER: LazyLock<Mutex<HashMap<String, Weak<Database>>>> =
pub struct Database { pub struct Database {
mv_store: Option<Arc<MvStore>>, mv_store: Option<Arc<MvStore>>,
schema: Mutex<Arc<Schema>>, schema: Mutex<Arc<Schema>>,
db_file: DatabaseFile, db_file: Arc<dyn DatabaseStorage>,
path: String, path: String,
wal_path: String, wal_path: String,
pub io: Arc<dyn IO>, pub io: Arc<dyn IO>,
@@ -331,7 +331,7 @@ impl Database {
encryption_opts: Option<EncryptionOpts>, encryption_opts: Option<EncryptionOpts>,
) -> Result<Arc<Database>> { ) -> Result<Arc<Database>> {
let file = io.open_file(path, flags, true)?; let file = io.open_file(path, flags, true)?;
let db_file = DatabaseFile::new(file); let db_file = Arc::new(DatabaseFile::new(file));
Self::open_with_flags(io, path, db_file, flags, opts, encryption_opts) Self::open_with_flags(io, path, db_file, flags, opts, encryption_opts)
} }
@@ -339,7 +339,7 @@ impl Database {
pub fn open( pub fn open(
io: Arc<dyn IO>, io: Arc<dyn IO>,
path: &str, path: &str,
db_file: DatabaseFile, db_file: Arc<dyn DatabaseStorage>,
enable_mvcc: bool, enable_mvcc: bool,
enable_indexes: bool, enable_indexes: bool,
) -> Result<Arc<Database>> { ) -> Result<Arc<Database>> {
@@ -359,7 +359,7 @@ impl Database {
pub fn open_with_flags( pub fn open_with_flags(
io: Arc<dyn IO>, io: Arc<dyn IO>,
path: &str, path: &str,
db_file: DatabaseFile, db_file: Arc<dyn DatabaseStorage>,
flags: OpenFlags, flags: OpenFlags,
opts: DatabaseOpts, opts: DatabaseOpts,
encryption_opts: Option<EncryptionOpts>, encryption_opts: Option<EncryptionOpts>,
@@ -407,7 +407,7 @@ impl Database {
io: Arc<dyn IO>, io: Arc<dyn IO>,
path: &str, path: &str,
wal_path: &str, wal_path: &str,
db_file: DatabaseFile, db_file: Arc<dyn DatabaseStorage>,
flags: OpenFlags, flags: OpenFlags,
opts: DatabaseOpts, opts: DatabaseOpts,
encryption_opts: Option<EncryptionOpts>, encryption_opts: Option<EncryptionOpts>,
@@ -428,7 +428,7 @@ impl Database {
io: Arc<dyn IO>, io: Arc<dyn IO>,
path: &str, path: &str,
wal_path: &str, wal_path: &str,
db_file: DatabaseFile, db_file: Arc<dyn DatabaseStorage>,
flags: OpenFlags, flags: OpenFlags,
opts: DatabaseOpts, opts: DatabaseOpts,
encryption_opts: Option<EncryptionOpts>, encryption_opts: Option<EncryptionOpts>,

View File

@@ -1,4 +1,3 @@
use crate::storage::database::DatabaseFile;
use crate::storage::subjournal::Subjournal; use crate::storage::subjournal::Subjournal;
use crate::storage::wal::IOV_MAX; use crate::storage::wal::IOV_MAX;
use crate::storage::{ use crate::storage::{
@@ -502,7 +501,7 @@ impl Savepoint {
/// transaction management. /// transaction management.
pub struct Pager { pub struct Pager {
/// Source of the database pages. /// Source of the database pages.
pub db_file: DatabaseFile, pub db_file: Arc<dyn DatabaseStorage>,
/// The write-ahead log (WAL) for the database. /// The write-ahead log (WAL) for the database.
/// in-memory databases, ephemeral tables and ephemeral indexes do not have a WAL. /// in-memory databases, ephemeral tables and ephemeral indexes do not have a WAL.
pub(crate) wal: Option<Rc<RefCell<dyn Wal>>>, pub(crate) wal: Option<Rc<RefCell<dyn Wal>>>,
@@ -619,7 +618,7 @@ enum FreePageState {
impl Pager { impl Pager {
pub fn new( pub fn new(
db_file: DatabaseFile, db_file: Arc<dyn DatabaseStorage>,
wal: Option<Rc<RefCell<dyn Wal>>>, wal: Option<Rc<RefCell<dyn Wal>>>,
io: Arc<dyn crate::io::IO>, io: Arc<dyn crate::io::IO>,
page_cache: Arc<RwLock<PageCache>>, page_cache: Arc<RwLock<PageCache>>,
@@ -1595,7 +1594,7 @@ impl Pager {
io_ctx: &IOContext, io_ctx: &IOContext,
) -> Result<Completion> { ) -> Result<Completion> {
sqlite3_ondisk::begin_read_page( sqlite3_ondisk::begin_read_page(
self.db_file.clone(), self.db_file.as_ref(),
self.buffer_pool.clone(), self.buffer_pool.clone(),
page, page,
page_idx, page_idx,
@@ -1963,7 +1962,7 @@ impl Pager {
} }
CommitState::SyncDbFile => { CommitState::SyncDbFile => {
let sync_result = let sync_result =
sqlite3_ondisk::begin_sync(self.db_file.clone(), self.syncing.clone()); sqlite3_ondisk::begin_sync(self.db_file.as_ref(), self.syncing.clone());
self.commit_info self.commit_info
.write() .write()
.completions .completions
@@ -2100,7 +2099,8 @@ impl Pager {
*self.checkpoint_state.write() = CheckpointState::SyncDbFile { res }; *self.checkpoint_state.write() = CheckpointState::SyncDbFile { res };
} }
CheckpointState::SyncDbFile { res } => { CheckpointState::SyncDbFile { res } => {
let c = sqlite3_ondisk::begin_sync(self.db_file.clone(), self.syncing.clone())?; let c =
sqlite3_ondisk::begin_sync(self.db_file.as_ref(), self.syncing.clone())?;
*self.checkpoint_state.write() = CheckpointState::CheckpointDone { res }; *self.checkpoint_state.write() = CheckpointState::CheckpointDone { res };
io_yield_one!(c); io_yield_one!(c);
} }
@@ -3110,8 +3110,9 @@ mod ptrmap_tests {
// Helper to create a Pager for testing // Helper to create a Pager for testing
fn test_pager_setup(page_size: u32, initial_db_pages: u32) -> Pager { fn test_pager_setup(page_size: u32, initial_db_pages: u32) -> Pager {
let io: Arc<dyn IO> = Arc::new(MemoryIO::new()); let io: Arc<dyn IO> = Arc::new(MemoryIO::new());
let db_file: DatabaseFile = let db_file: Arc<dyn DatabaseStorage> = Arc::new(DatabaseFile::new(
DatabaseFile::new(io.open_file("test.db", OpenFlags::Create, true).unwrap()); io.open_file("test.db", OpenFlags::Create, true).unwrap(),
));
// Construct interfaces for the pager // Construct interfaces for the pager
let pages = initial_db_pages + 10; let pages = initial_db_pages + 10;

View File

@@ -58,7 +58,7 @@ use crate::storage::btree::offset::{
}; };
use crate::storage::btree::{payload_overflow_threshold_max, payload_overflow_threshold_min}; use crate::storage::btree::{payload_overflow_threshold_max, payload_overflow_threshold_min};
use crate::storage::buffer_pool::BufferPool; use crate::storage::buffer_pool::BufferPool;
use crate::storage::database::{DatabaseFile, DatabaseStorage, EncryptionOrChecksum}; use crate::storage::database::{DatabaseStorage, EncryptionOrChecksum};
use crate::storage::pager::Pager; use crate::storage::pager::Pager;
use crate::storage::wal::READMARK_NOT_USED; use crate::storage::wal::READMARK_NOT_USED;
use crate::types::{SerialType, SerialTypeKind, TextSubtype, ValueRef}; use crate::types::{SerialType, SerialTypeKind, TextSubtype, ValueRef};
@@ -900,7 +900,7 @@ impl PageContent {
/// if allow_empty_read is set, than empty read will be raise error for the page, but will not panic /// if allow_empty_read is set, than empty read will be raise error for the page, but will not panic
#[instrument(skip_all, level = Level::DEBUG)] #[instrument(skip_all, level = Level::DEBUG)]
pub fn begin_read_page( pub fn begin_read_page(
db_file: DatabaseFile, db_file: &dyn DatabaseStorage,
buffer_pool: Arc<BufferPool>, buffer_pool: Arc<BufferPool>,
page: PageRef, page: PageRef,
page_idx: usize, page_idx: usize,
@@ -1077,7 +1077,7 @@ pub fn write_pages_vectored(
} }
#[instrument(skip_all, level = Level::DEBUG)] #[instrument(skip_all, level = Level::DEBUG)]
pub fn begin_sync(db_file: DatabaseFile, syncing: Arc<AtomicBool>) -> Result<Completion> { pub fn begin_sync(db_file: &dyn DatabaseStorage, syncing: Arc<AtomicBool>) -> Result<Completion> {
assert!(!syncing.load(Ordering::SeqCst)); assert!(!syncing.load(Ordering::SeqCst));
syncing.store(true, Ordering::SeqCst); syncing.store(true, Ordering::SeqCst);
let completion = Completion::new_sync(move |_| { let completion = Completion::new_sync(move |_| {

View File

@@ -37,7 +37,7 @@ use crate::{
}, },
translate::emitter::TransactionMode, translate::emitter::TransactionMode,
}; };
use crate::{get_cursor, CheckpointMode, Connection, MvCursor}; use crate::{get_cursor, CheckpointMode, Connection, DatabaseStorage, MvCursor};
use std::any::Any; use std::any::Any;
use std::env::temp_dir; use std::env::temp_dir;
use std::ops::DerefMut; use std::ops::DerefMut;
@@ -7897,7 +7897,7 @@ pub fn op_open_ephemeral(
let conn = program.connection.clone(); let conn = program.connection.clone();
let io = conn.pager.load().io.clone(); let io = conn.pager.load().io.clone();
let rand_num = io.generate_random_number(); let rand_num = io.generate_random_number();
let db_file; let db_file: Arc<dyn DatabaseStorage>;
let db_file_io: Arc<dyn crate::IO>; let db_file_io: Arc<dyn crate::IO>;
// we support OPFS in WASM - but it require files to be pre-opened in the browser before use // we support OPFS in WASM - but it require files to be pre-opened in the browser before use
@@ -7909,7 +7909,7 @@ pub fn op_open_ephemeral(
db_file_io = Arc::new(MemoryIO::new()); db_file_io = Arc::new(MemoryIO::new());
let file = db_file_io.open_file("temp-file", OpenFlags::Create, false)?; let file = db_file_io.open_file("temp-file", OpenFlags::Create, false)?;
db_file = DatabaseFile::new(file); db_file = Arc::new(DatabaseFile::new(file));
} }
#[cfg(not(target_family = "wasm"))] #[cfg(not(target_family = "wasm"))]
{ {
@@ -7922,7 +7922,7 @@ pub fn op_open_ephemeral(
)); ));
}; };
let file = io.open_file(rand_path_str, OpenFlags::Create, false)?; let file = io.open_file(rand_path_str, OpenFlags::Create, false)?;
db_file = DatabaseFile::new(file); db_file = Arc::new(DatabaseFile::new(file));
db_file_io = io; db_file_io = io;
} }

View File

@@ -45,7 +45,7 @@ pub struct DatabaseSyncEngineOpts {
pub struct DatabaseSyncEngine<P: ProtocolIO> { pub struct DatabaseSyncEngine<P: ProtocolIO> {
io: Arc<dyn turso_core::IO>, io: Arc<dyn turso_core::IO>,
protocol: Arc<P>, protocol: Arc<P>,
db_file: turso_core::storage::database::DatabaseFile, db_file: Arc<dyn turso_core::storage::database::DatabaseStorage>,
main_tape: DatabaseTape, main_tape: DatabaseTape,
main_db_wal_path: String, main_db_wal_path: String,
revert_db_wal_path: String, revert_db_wal_path: String,
@@ -156,7 +156,7 @@ impl<P: ProtocolIO> DatabaseSyncEngine<P> {
} }
let db_file = io.open_file(main_db_path, turso_core::OpenFlags::Create, false)?; let db_file = io.open_file(main_db_path, turso_core::OpenFlags::Create, false)?;
let db_file = turso_core::storage::database::DatabaseFile::new(db_file); let db_file = Arc::new(turso_core::storage::database::DatabaseFile::new(db_file));
let main_db = turso_core::Database::open_with_flags( let main_db = turso_core::Database::open_with_flags(
io.clone(), io.clone(),