mirror of
https://github.com/aljazceru/turso.git
synced 2026-02-02 14:54:23 +01:00
core: Get rid of maybe_init_database_file
Initialization now only occurs in the first write transaction
This commit is contained in:
@@ -6,7 +6,7 @@ use std::num::NonZeroUsize;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
use limbo_core::{maybe_init_database_file, LimboError, StepResult};
|
||||
use limbo_core::{LimboError, StepResult};
|
||||
use napi::iterator::Generator;
|
||||
use napi::{bindgen_prelude::ObjectFinalize, Env, JsUnknown};
|
||||
use napi_derive::napi;
|
||||
@@ -65,7 +65,6 @@ impl Database {
|
||||
let file = io
|
||||
.open_file(&path, limbo_core::OpenFlags::Create, false)
|
||||
.map_err(into_napi_error)?;
|
||||
maybe_init_database_file(&file, &io).map_err(into_napi_error)?;
|
||||
let db_file = Arc::new(DatabaseFile::new(file));
|
||||
let db = limbo_core::Database::open(io.clone(), &path, db_file, false)
|
||||
.map_err(into_napi_error)?;
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
compile_error!("Features 'web' and 'nodejs' cannot be enabled at the same time");
|
||||
|
||||
use js_sys::{Array, Object};
|
||||
use limbo_core::{maybe_init_database_file, Clock, Instant, OpenFlags, Result};
|
||||
use limbo_core::{Clock, Instant, OpenFlags, Result};
|
||||
use std::cell::RefCell;
|
||||
use std::sync::Arc;
|
||||
use wasm_bindgen::prelude::*;
|
||||
@@ -21,7 +21,6 @@ impl Database {
|
||||
pub fn new(path: &str) -> Database {
|
||||
let io: Arc<dyn limbo_core::IO> = Arc::new(PlatformIO { vfs: VFS::new() });
|
||||
let file = io.open_file(path, OpenFlags::Create, false).unwrap();
|
||||
maybe_init_database_file(&file, &io).unwrap();
|
||||
let db_file = Arc::new(DatabaseFile::new(file));
|
||||
let db = limbo_core::Database::open(io, path, db_file, false).unwrap();
|
||||
let conn = db.connect().unwrap();
|
||||
|
||||
65
core/lib.rs
65
core/lib.rs
@@ -62,9 +62,10 @@ use std::{
|
||||
rc::Rc,
|
||||
sync::Arc,
|
||||
};
|
||||
use storage::btree::{btree_init_page, BTreePageInner};
|
||||
use storage::btree::btree_init_page;
|
||||
#[cfg(feature = "fs")]
|
||||
use storage::database::DatabaseFile;
|
||||
use storage::page_cache::DumbLruPageCache;
|
||||
pub use storage::pager::PagerCacheflushStatus;
|
||||
pub use storage::{
|
||||
buffer_pool::BufferPool,
|
||||
@@ -73,11 +74,6 @@ pub use storage::{
|
||||
pager::{Page, Pager},
|
||||
wal::{CheckpointMode, CheckpointResult, CheckpointStatus, Wal, WalFile, WalFileShared},
|
||||
};
|
||||
use storage::{
|
||||
page_cache::DumbLruPageCache,
|
||||
pager::allocate_page,
|
||||
sqlite3_ondisk::{DatabaseHeader, DATABASE_HEADER_SIZE},
|
||||
};
|
||||
use tracing::{instrument, Level};
|
||||
use translate::select::prepare_select_plan;
|
||||
pub use types::RefValue;
|
||||
@@ -129,7 +125,6 @@ impl Database {
|
||||
enable_mvcc: bool,
|
||||
) -> Result<Arc<Database>> {
|
||||
let file = io.open_file(path, flags, true)?;
|
||||
maybe_init_database_file(&file, &io)?;
|
||||
let db_file = Arc::new(DatabaseFile::new(file));
|
||||
Self::open_with_flags(io, path, db_file, flags, enable_mvcc)
|
||||
}
|
||||
@@ -154,6 +149,7 @@ impl Database {
|
||||
) -> Result<Arc<Database>> {
|
||||
let wal_path = format!("{}-wal", path);
|
||||
let maybe_shared_wal = WalFileShared::open_shared_if_exists(&io, wal_path.as_str())?;
|
||||
let db_size = db_file.size()?;
|
||||
|
||||
let mv_store = if enable_mvcc {
|
||||
Some(Rc::new(MvStore::new(
|
||||
@@ -177,7 +173,7 @@ impl Database {
|
||||
open_flags: flags,
|
||||
};
|
||||
let db = Arc::new(db);
|
||||
{
|
||||
if db_size > 0 {
|
||||
// parse schema
|
||||
let conn = db.connect()?;
|
||||
let rows = conn.query("SELECT * FROM sqlite_schema")?;
|
||||
@@ -299,59 +295,6 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybe_init_database_file(file: &Arc<dyn File>, io: &Arc<dyn IO>) -> Result<()> {
|
||||
if file.size()? == 0 {
|
||||
// init db
|
||||
let db_header = DatabaseHeader::default();
|
||||
let page1 = allocate_page(
|
||||
1,
|
||||
&Rc::new(BufferPool::new(Some(db_header.get_page_size() as usize))),
|
||||
DATABASE_HEADER_SIZE,
|
||||
);
|
||||
let page1 = Arc::new(BTreePageInner {
|
||||
page: RefCell::new(page1),
|
||||
});
|
||||
{
|
||||
// Create the sqlite_schema table, for this we just need to create the btree page
|
||||
// for the first page of the database which is basically like any other btree page
|
||||
// but with a 100 byte offset, so we just init the page so that sqlite understands
|
||||
// this is a correct page.
|
||||
btree_init_page(
|
||||
&page1,
|
||||
storage::sqlite3_ondisk::PageType::TableLeaf,
|
||||
DATABASE_HEADER_SIZE,
|
||||
(db_header.get_page_size() - db_header.reserved_space as u32) as u16,
|
||||
);
|
||||
|
||||
let page1 = page1.get();
|
||||
let contents = page1.get().contents.as_mut().unwrap();
|
||||
contents.write_database_header(&db_header);
|
||||
// write the first page to disk synchronously
|
||||
let flag_complete = Rc::new(RefCell::new(false));
|
||||
{
|
||||
let flag_complete = flag_complete.clone();
|
||||
let completion = Completion::Write(WriteCompletion::new(Box::new(move |_| {
|
||||
*flag_complete.borrow_mut() = true;
|
||||
})));
|
||||
#[allow(clippy::arc_with_non_send_sync)]
|
||||
file.pwrite(0, contents.buffer.clone(), Arc::new(completion))?;
|
||||
}
|
||||
let mut limit = 100;
|
||||
loop {
|
||||
io.run_once()?;
|
||||
if *flag_complete.borrow() {
|
||||
break;
|
||||
}
|
||||
limit -= 1;
|
||||
if limit == 0 {
|
||||
panic!("Database file couldn't be initialized, io loop run for {} iterations and write didn't finish", limit);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct Connection {
|
||||
_db: Arc<Database>,
|
||||
pager: Rc<Pager>,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::fast_lock::SpinLock;
|
||||
use crate::result::LimboResult;
|
||||
use crate::storage::btree::BTreePageInner;
|
||||
use crate::storage::buffer_pool::BufferPool;
|
||||
@@ -236,6 +237,12 @@ pub enum PagerCacheflushResult {
|
||||
}
|
||||
|
||||
impl Pager {
|
||||
/// Begins opening a database by reading the database header.
|
||||
pub fn begin_open(db_file: Arc<dyn DatabaseStorage>) -> Result<Arc<SpinLock<DatabaseHeader>>> {
|
||||
assert!(db_file.size()? > 0);
|
||||
sqlite3_ondisk::begin_read_database_header(db_file)
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
db_file: Arc<dyn DatabaseStorage>,
|
||||
wal: Rc<RefCell<dyn Wal>>,
|
||||
@@ -243,6 +250,16 @@ impl Pager {
|
||||
page_cache: Arc<RwLock<DumbLruPageCache>>,
|
||||
buffer_pool: Rc<BufferPool>,
|
||||
) -> Result<Self> {
|
||||
let npages = if db_file.size().unwrap() > 0 {
|
||||
let db_header = Pager::begin_open(db_file.clone()).unwrap();
|
||||
// ensure db header is there
|
||||
io.run_once().unwrap();
|
||||
let size = db_header.lock().database_size;
|
||||
size as usize
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
db_file,
|
||||
wal,
|
||||
@@ -258,7 +275,7 @@ impl Pager {
|
||||
checkpoint_inflight: Rc::new(RefCell::new(0)),
|
||||
buffer_pool,
|
||||
auto_vacuum_mode: RefCell::new(AutoVacuumMode::None),
|
||||
npages: AtomicUsize::new(0),
|
||||
npages: AtomicUsize::new(npages),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1381,8 +1398,9 @@ mod ptrmap_tests {
|
||||
// Helper to create a Pager for testing
|
||||
fn test_pager_setup(page_size: u32, initial_db_pages: u32) -> Pager {
|
||||
let io: Arc<dyn IO> = Arc::new(MemoryIO::new());
|
||||
let db_file_raw = io.open_file("test.db", OpenFlags::Create, true).unwrap();
|
||||
let db_storage: Arc<dyn DatabaseStorage> = Arc::new(DatabaseFile::new(db_file_raw));
|
||||
let db_file: Arc<dyn DatabaseStorage> = Arc::new(DatabaseFile::new(
|
||||
io.open_file("test.db", OpenFlags::Create, true).unwrap(),
|
||||
));
|
||||
|
||||
// Initialize a minimal header in autovacuum mode
|
||||
let mut header_data = DatabaseHeader::default();
|
||||
@@ -1408,7 +1426,7 @@ mod ptrmap_tests {
|
||||
buffer_pool.clone(),
|
||||
)));
|
||||
|
||||
let pager = Pager::new(db_storage, wal, io, page_cache, buffer_pool).unwrap();
|
||||
let pager = Pager::new(db_file, wal, io, page_cache, buffer_pool).unwrap();
|
||||
pager.set_auto_vacuum_mode(AutoVacuumMode::Full);
|
||||
|
||||
// Allocate all the pages as btree root pages
|
||||
|
||||
@@ -287,6 +287,60 @@ impl DatabaseHeader {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn begin_read_database_header(
|
||||
db_file: Arc<dyn DatabaseStorage>,
|
||||
) -> Result<Arc<SpinLock<DatabaseHeader>>> {
|
||||
let drop_fn = Rc::new(|_buf| {});
|
||||
#[allow(clippy::arc_with_non_send_sync)]
|
||||
let buf = Arc::new(RefCell::new(Buffer::allocate(512, drop_fn)));
|
||||
let result = Arc::new(SpinLock::new(DatabaseHeader::default()));
|
||||
let header = result.clone();
|
||||
let complete = Box::new(move |buf: Arc<RefCell<Buffer>>| {
|
||||
let header = header.clone();
|
||||
finish_read_database_header(buf, header).unwrap();
|
||||
});
|
||||
let c = Completion::Read(ReadCompletion::new(buf, complete));
|
||||
#[allow(clippy::arc_with_non_send_sync)]
|
||||
db_file.read_page(DATABASE_HEADER_PAGE_ID, Arc::new(c))?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn finish_read_database_header(
|
||||
buf: Arc<RefCell<Buffer>>,
|
||||
header: Arc<SpinLock<DatabaseHeader>>,
|
||||
) -> Result<()> {
|
||||
let buf = buf.borrow();
|
||||
let buf = buf.as_slice();
|
||||
let mut header = header.lock();
|
||||
header.magic.copy_from_slice(&buf[0..16]);
|
||||
header.page_size = u16::from_be_bytes([buf[16], buf[17]]);
|
||||
header.write_version = buf[18];
|
||||
header.read_version = buf[19];
|
||||
header.reserved_space = buf[20];
|
||||
header.max_embed_frac = buf[21];
|
||||
header.min_embed_frac = buf[22];
|
||||
header.min_leaf_frac = buf[23];
|
||||
header.change_counter = u32::from_be_bytes([buf[24], buf[25], buf[26], buf[27]]);
|
||||
header.database_size = u32::from_be_bytes([buf[28], buf[29], buf[30], buf[31]]);
|
||||
header.freelist_trunk_page = u32::from_be_bytes([buf[32], buf[33], buf[34], buf[35]]);
|
||||
header.freelist_pages = u32::from_be_bytes([buf[36], buf[37], buf[38], buf[39]]);
|
||||
header.schema_cookie = u32::from_be_bytes([buf[40], buf[41], buf[42], buf[43]]);
|
||||
header.schema_format = u32::from_be_bytes([buf[44], buf[45], buf[46], buf[47]]);
|
||||
header.default_page_cache_size = i32::from_be_bytes([buf[48], buf[49], buf[50], buf[51]]);
|
||||
if header.default_page_cache_size == 0 {
|
||||
header.default_page_cache_size = DEFAULT_CACHE_SIZE;
|
||||
}
|
||||
header.vacuum_mode_largest_root_page = u32::from_be_bytes([buf[52], buf[53], buf[54], buf[55]]);
|
||||
header.text_encoding = u32::from_be_bytes([buf[56], buf[57], buf[58], buf[59]]);
|
||||
header.user_version = i32::from_be_bytes([buf[60], buf[61], buf[62], buf[63]]);
|
||||
header.incremental_vacuum_enabled = u32::from_be_bytes([buf[64], buf[65], buf[66], buf[67]]);
|
||||
header.application_id = u32::from_be_bytes([buf[68], buf[69], buf[70], buf[71]]);
|
||||
header.reserved_for_expansion.copy_from_slice(&buf[72..92]);
|
||||
header.version_valid_for = u32::from_be_bytes([buf[92], buf[93], buf[94], buf[95]]);
|
||||
header.version_number = u32::from_be_bytes([buf[96], buf[97], buf[98], buf[99]]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_header_to_buf(buf: &mut [u8], header: &DatabaseHeader) {
|
||||
buf[0..16].copy_from_slice(&header.magic);
|
||||
buf[16..18].copy_from_slice(&header.page_size.to_be_bytes());
|
||||
|
||||
@@ -51,8 +51,7 @@ use crate::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
info, maybe_init_database_file, BufferPool, MvCursor, OpenFlags, RefValue, Row, StepResult,
|
||||
TransactionState, IO,
|
||||
info, BufferPool, MvCursor, OpenFlags, RefValue, Row, StepResult, TransactionState, IO,
|
||||
};
|
||||
|
||||
use super::{
|
||||
@@ -5211,7 +5210,6 @@ pub fn op_open_ephemeral(
|
||||
let io = conn.pager.io.get_memory_io();
|
||||
|
||||
let file = io.open_file("", OpenFlags::Create, true)?;
|
||||
maybe_init_database_file(&file, &(io.clone() as Arc<dyn IO>))?;
|
||||
let db_file = Arc::new(FileMemoryStorage::new(file));
|
||||
|
||||
let buffer_pool = Rc::new(BufferPool::new(None));
|
||||
|
||||
Reference in New Issue
Block a user