Implement a header_accessor module so that DatabaseHeader structs arent initialized on every access

This commit is contained in:
Jussi Saurio
2025-06-18 12:17:48 +03:00
committed by Diego Reis
parent ad20e306f7
commit 133d498724
10 changed files with 265 additions and 167 deletions

7
Cargo.lock generated
View File

@@ -1833,6 +1833,7 @@ dependencies = [
"miette",
"mimalloc",
"parking_lot",
"paste",
"polling",
"pprof",
"quickcheck",
@@ -2478,6 +2479,12 @@ dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "paste"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "percent-encoding"
version = "2.3.1"

View File

@@ -84,6 +84,7 @@ uncased = "0.9.10"
strum_macros = { workspace = true }
bitflags = "2.9.0"
serde = { workspace = true , optional = true, features = ["derive"] }
paste = "1.0.15"
[build-dependencies]
chrono = { version = "0.4.38", default-features = false }

View File

@@ -34,7 +34,7 @@ mod numeric;
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
use crate::storage::wal::DummyWAL;
use crate::storage::{header_accessor, wal::DummyWAL};
use crate::translate::optimizer::optimize_plan;
use crate::vtab::VirtualTable;
use core::str;
@@ -62,7 +62,6 @@ use std::{
rc::Rc,
sync::{atomic::Ordering, Arc},
};
use storage::btree::btree_init_page;
#[cfg(feature = "fs")]
use storage::database::DatabaseFile;
use storage::page_cache::DumbLruPageCache;
@@ -215,10 +214,9 @@ impl Database {
is_empty,
)?);
let header = pager.db_header()?;
pager
.buffer_pool
.set_page_size(header.get_page_size() as usize);
let page_size = header_accessor::get_page_size(&pager)?;
let default_cache_size = header_accessor::get_default_page_cache_size(&pager)?;
pager.buffer_pool.set_page_size(page_size as usize);
let conn = Arc::new(Connection {
_db: self.clone(),
pager: pager.clone(),
@@ -231,7 +229,7 @@ impl Database {
syms: RefCell::new(SymbolTable::new()),
total_changes: Cell::new(0),
_shared_cache: false,
cache_size: Cell::new(header.default_page_cache_size),
cache_size: Cell::new(default_cache_size),
});
if let Err(e) = conn.register_builtins() {
return Err(LimboError::ExtensionError(e));
@@ -251,10 +249,14 @@ impl Database {
buffer_pool.clone(),
is_empty,
)?;
let header = pager.db_header()?;
let page_size = header_accessor::get_page_size(&pager)
.unwrap_or(storage::sqlite3_ondisk::DEFAULT_PAGE_SIZE) as u32;
let default_cache_size = header_accessor::get_default_page_cache_size(&pager)
.unwrap_or(storage::sqlite3_ondisk::DEFAULT_CACHE_SIZE as i32);
let wal_path = format!("{}-wal", self.path);
let file = self.io.open_file(&wal_path, OpenFlags::Create, false)?;
let real_shared_wal = WalFileShared::new_shared(header.get_page_size(), &self.io, file)?;
let real_shared_wal = WalFileShared::new_shared(page_size, &self.io, file)?;
// Modify Database::maybe_shared_wal to point to the new WAL file so that other connections
// can open the existing WAL.
*self.maybe_shared_wal.write() = Some(real_shared_wal.clone());
@@ -276,7 +278,7 @@ impl Database {
total_changes: Cell::new(0),
syms: RefCell::new(SymbolTable::new()),
_shared_cache: false,
cache_size: Cell::new(header.default_page_cache_size as i32),
cache_size: Cell::new(default_cache_size),
});
if let Err(e) = conn.register_builtins() {

View File

@@ -3,6 +3,7 @@ use tracing::{instrument, Level};
use crate::{
schema::Index,
storage::{
header_accessor,
pager::{BtreePageAllocMode, Pager},
sqlite3_ondisk::{
read_u32, read_varint, BTreeCell, PageContent, PageType, TableInteriorCell,
@@ -3026,7 +3027,7 @@ impl BTreeCursor {
assert_eq!(left_pointer, page.get().get().id as u32);
// FIXME: remove this lock
assert!(
left_pointer <= self.pager.db_header()?.database_size,
left_pointer <= header_accessor::get_database_size(&self.pager)?,
"invalid page number divider left pointer {} > database number of pages",
left_pointer,
);
@@ -4660,7 +4661,8 @@ impl BTreeCursor {
}
OverflowState::ProcessPage { next_page } => {
if next_page < 2
|| next_page as usize > self.pager.db_header()?.database_size as usize
|| next_page as usize
> header_accessor::get_database_size(&self.pager)? as usize
{
self.overflow_state = None;
return Err(LimboError::Corrupt("Invalid overflow page number".into()));
@@ -7403,17 +7405,7 @@ mod tests {
pager.allocate_page().unwrap();
}
let mut db_header = pager.db_header().unwrap();
db_header.page_size = page_size as u16;
let page1 = pager.read_page(1).unwrap();
while page1.is_locked() {
pager.io.run_once().unwrap();
}
page1.set_dirty();
let page1 = page1.get();
let contents = page1.contents.as_mut().unwrap();
contents.write_database_header(&db_header);
pager.add_dirty(page1.id);
header_accessor::set_page_size(&pager, page_size as u16).unwrap();
pager
}
@@ -7436,7 +7428,7 @@ mod tests {
let drop_fn = Rc::new(|_buf| {});
#[allow(clippy::arc_with_non_send_sync)]
let buf = Arc::new(RefCell::new(Buffer::allocate(
pager.db_header().unwrap().get_page_size() as usize,
header_accessor::get_page_size(&pager)? as usize,
drop_fn,
)));
let write_complete = Box::new(|_| {});
@@ -7479,20 +7471,20 @@ mod tests {
payload_size: large_payload.len() as u64,
});
let initial_freelist_pages = pager.db_header().unwrap().freelist_pages;
let initial_freelist_pages = header_accessor::get_freelist_pages(&pager)?;
// Clear overflow pages
let clear_result = cursor.clear_overflow_pages(&leaf_cell)?;
match clear_result {
CursorResult::Ok(_) => {
// Verify proper number of pages were added to freelist
assert_eq!(
pager.db_header().unwrap().freelist_pages,
header_accessor::get_freelist_pages(&pager)?,
initial_freelist_pages + 3,
"Expected 3 pages to be added to freelist"
);
// If this is first trunk page
let trunk_page_id = pager.db_header().unwrap().freelist_trunk_page;
let trunk_page_id = header_accessor::get_freelist_trunk_page(&pager)?;
if trunk_page_id > 0 {
// Verify trunk page structure
let trunk_page = cursor.read_page(trunk_page_id as usize)?;
@@ -7535,7 +7527,7 @@ mod tests {
payload_size: small_payload.len() as u64,
});
let initial_freelist_pages = pager.db_header().unwrap().freelist_pages;
let initial_freelist_pages = header_accessor::get_freelist_pages(&pager)?;
// Try to clear non-existent overflow pages
let clear_result = cursor.clear_overflow_pages(&leaf_cell)?;
@@ -7543,14 +7535,14 @@ mod tests {
CursorResult::Ok(_) => {
// Verify freelist was not modified
assert_eq!(
pager.db_header().unwrap().freelist_pages,
header_accessor::get_freelist_pages(&pager)?,
initial_freelist_pages,
"Freelist should not change when no overflow pages exist"
);
// Verify trunk page wasn't created
assert_eq!(
pager.db_header().unwrap().freelist_trunk_page,
header_accessor::get_freelist_trunk_page(&pager)?,
0,
"No trunk page should be created when no overflow pages exist"
);
@@ -7620,18 +7612,18 @@ mod tests {
// Verify structure before destruction
assert_eq!(
pager.db_header().unwrap().database_size,
header_accessor::get_database_size(&pager)?,
4, // We should have pages 1-4
"Database should have 4 pages total"
);
// Track freelist state before destruction
let initial_free_pages = pager.db_header().unwrap().freelist_pages;
let initial_free_pages = header_accessor::get_freelist_pages(&pager)?;
assert_eq!(initial_free_pages, 0, "should start with no free pages");
run_until_done(|| cursor.btree_destroy(), pager.deref())?;
let pages_freed = pager.db_header().unwrap().freelist_pages - initial_free_pages;
let pages_freed = header_accessor::get_freelist_pages(&pager)? - initial_free_pages;
assert_eq!(pages_freed, 3, "should free 3 pages (root + 2 leaves)");
Ok(())

View File

@@ -0,0 +1,162 @@
use crate::{
storage::{
self,
pager::{PageRef, Pager},
sqlite3_ondisk::DATABASE_HEADER_PAGE_ID,
},
LimboError, Result,
};
use paste;
use std::sync::atomic::Ordering;
// const HEADER_OFFSET_MAGIC: usize = 0;
const HEADER_OFFSET_PAGE_SIZE: usize = 16;
const HEADER_OFFSET_WRITE_VERSION: usize = 18;
const HEADER_OFFSET_READ_VERSION: usize = 19;
const HEADER_OFFSET_RESERVED_SPACE: usize = 20;
const HEADER_OFFSET_MAX_EMBED_FRAC: usize = 21;
const HEADER_OFFSET_MIN_EMBED_FRAC: usize = 22;
const HEADER_OFFSET_MIN_LEAF_FRAC: usize = 23;
const HEADER_OFFSET_CHANGE_COUNTER: usize = 24;
const HEADER_OFFSET_DATABASE_SIZE: usize = 28;
const HEADER_OFFSET_FREELIST_TRUNK_PAGE: usize = 32;
const HEADER_OFFSET_FREELIST_PAGES: usize = 36;
const HEADER_OFFSET_SCHEMA_COOKIE: usize = 40;
const HEADER_OFFSET_SCHEMA_FORMAT: usize = 44;
const HEADER_OFFSET_DEFAULT_PAGE_CACHE_SIZE: usize = 48;
const HEADER_OFFSET_VACUUM_MODE_LARGEST_ROOT_PAGE: usize = 52;
const HEADER_OFFSET_TEXT_ENCODING: usize = 56;
const HEADER_OFFSET_USER_VERSION: usize = 60;
const HEADER_OFFSET_INCREMENTAL_VACUUM_ENABLED: usize = 64;
const HEADER_OFFSET_APPLICATION_ID: usize = 68;
//const HEADER_OFFSET_RESERVED_FOR_EXPANSION: usize = 72;
const HEADER_OFFSET_VERSION_VALID_FOR: usize = 92;
const HEADER_OFFSET_VERSION_NUMBER: usize = 96;
// Helper to get a read-only reference to the header page.
fn get_header_page(pager: &Pager) -> Result<PageRef> {
if pager.is_empty.load(Ordering::SeqCst) {
return Err(LimboError::InternalError(
"Database is empty, header does not exist - page 1 should've been allocated before this".to_string(),
));
}
let page = pager.read_page(DATABASE_HEADER_PAGE_ID)?;
while !page.is_loaded() || page.is_locked() {
// FIXME: LETS STOP DOING THESE SYNCHRONOUS IO HACKS
pager.io.run_once()?;
}
Ok(page)
}
// Helper to get a writable reference to the header page and mark it dirty.
fn get_header_page_for_write(pager: &Pager) -> Result<PageRef> {
if pager.is_empty.load(Ordering::SeqCst) {
// This should not be called on an empty DB for writing, as page 1 is allocated on first transaction.
return Err(LimboError::InternalError(
"Cannot write to header of an empty database - page 1 should've been allocated before this".to_string(),
));
}
let page = pager.read_page(DATABASE_HEADER_PAGE_ID)?;
while !page.is_loaded() || page.is_locked() {
// FIXME: LETS STOP DOING THESE SYNCHRONOUS IO HACKS
pager.io.run_once()?;
}
page.set_dirty();
pager.add_dirty(DATABASE_HEADER_PAGE_ID);
Ok(page)
}
/// Helper macro to implement getters and setters for header fields.
/// For example, `impl_header_field_accessor!(page_size, u16, HEADER_OFFSET_PAGE_SIZE);`
/// will generate the following functions:
/// - `pub fn get_page_size(pager: &Pager) -> Result<u16>`
/// - `pub fn set_page_size(pager: &Pager, value: u16) -> Result<()>`
///
/// The macro takes three required arguments:
/// - `$field_name`: The name of the field to implement.
/// - `$type`: The type of the field.
/// - `$offset`: The offset of the field in the header page.
///
/// And a fourth optional argument:
/// - `$ifzero`: A value to return if the field is 0.
///
/// The macro will generate the following functions:
/// - `pub fn get_<field_name>(pager: &Pager) -> Result<T>`
/// - `pub fn set_<field_name>(pager: &Pager, value: T) -> Result<()>`
///
macro_rules! impl_header_field_accessor {
($field_name:ident, $type:ty, $offset:expr $(, $ifzero:expr)?) => {
paste::paste! {
#[allow(dead_code)]
pub fn [<get_ $field_name>](pager: &Pager) -> Result<$type> {
if pager.is_empty.load(Ordering::SeqCst) {
return Err(LimboError::InternalError(format!("Database is empty, header does not exist - page 1 should've been allocated before this")));
}
let page = get_header_page(pager)?;
let page_inner = page.get();
let page_content = page_inner.contents.as_ref().unwrap();
let buf = page_content.buffer.borrow();
let buf_slice = buf.as_slice();
let mut bytes = [0; std::mem::size_of::<$type>()];
bytes.copy_from_slice(&buf_slice[$offset..$offset + std::mem::size_of::<$type>()]);
let value = <$type>::from_be_bytes(bytes);
$(
if value == 0 {
return Ok($ifzero);
}
)?
Ok(value)
}
#[allow(dead_code)]
pub fn [<set_ $field_name>](pager: &Pager, value: $type) -> Result<()> {
let page = get_header_page_for_write(pager)?;
let page_inner = page.get();
let page_content = page_inner.contents.as_ref().unwrap();
let mut buf = page_content.buffer.borrow_mut();
let buf_slice = buf.as_mut_slice();
buf_slice[$offset..$offset + std::mem::size_of::<$type>()].copy_from_slice(&value.to_be_bytes());
page.set_dirty();
pager.add_dirty(1);
Ok(())
}
}
};
}
// impl_header_field_accessor!(magic, [u8; 16], HEADER_OFFSET_MAGIC);
impl_header_field_accessor!(page_size, u16, HEADER_OFFSET_PAGE_SIZE);
impl_header_field_accessor!(write_version, u8, HEADER_OFFSET_WRITE_VERSION);
impl_header_field_accessor!(read_version, u8, HEADER_OFFSET_READ_VERSION);
impl_header_field_accessor!(reserved_space, u8, HEADER_OFFSET_RESERVED_SPACE);
impl_header_field_accessor!(max_embed_frac, u8, HEADER_OFFSET_MAX_EMBED_FRAC);
impl_header_field_accessor!(min_embed_frac, u8, HEADER_OFFSET_MIN_EMBED_FRAC);
impl_header_field_accessor!(min_leaf_frac, u8, HEADER_OFFSET_MIN_LEAF_FRAC);
impl_header_field_accessor!(change_counter, u32, HEADER_OFFSET_CHANGE_COUNTER);
impl_header_field_accessor!(database_size, u32, HEADER_OFFSET_DATABASE_SIZE);
impl_header_field_accessor!(freelist_trunk_page, u32, HEADER_OFFSET_FREELIST_TRUNK_PAGE);
impl_header_field_accessor!(freelist_pages, u32, HEADER_OFFSET_FREELIST_PAGES);
impl_header_field_accessor!(schema_cookie, u32, HEADER_OFFSET_SCHEMA_COOKIE);
impl_header_field_accessor!(schema_format, u32, HEADER_OFFSET_SCHEMA_FORMAT);
impl_header_field_accessor!(
default_page_cache_size,
i32,
HEADER_OFFSET_DEFAULT_PAGE_CACHE_SIZE,
storage::sqlite3_ondisk::DEFAULT_CACHE_SIZE as i32
);
impl_header_field_accessor!(
vacuum_mode_largest_root_page,
u32,
HEADER_OFFSET_VACUUM_MODE_LARGEST_ROOT_PAGE
);
impl_header_field_accessor!(text_encoding, u32, HEADER_OFFSET_TEXT_ENCODING);
impl_header_field_accessor!(user_version, i32, HEADER_OFFSET_USER_VERSION);
impl_header_field_accessor!(
incremental_vacuum_enabled,
u32,
HEADER_OFFSET_INCREMENTAL_VACUUM_ENABLED
);
impl_header_field_accessor!(application_id, u32, HEADER_OFFSET_APPLICATION_ID);
//impl_header_field_accessor!(reserved_for_expansion, [u8; 20], HEADER_OFFSET_RESERVED_FOR_EXPANSION);
impl_header_field_accessor!(version_valid_for, u32, HEADER_OFFSET_VERSION_VALID_FOR);
impl_header_field_accessor!(version_number, u32, HEADER_OFFSET_VERSION_NUMBER);

View File

@@ -13,6 +13,7 @@
pub(crate) mod btree;
pub(crate) mod buffer_pool;
pub(crate) mod database;
pub(crate) mod header_accessor;
pub(crate) mod page_cache;
#[allow(clippy::arc_with_non_send_sync)]
pub(crate) mod pager;

View File

@@ -2,9 +2,8 @@ use crate::result::LimboResult;
use crate::storage::btree::BTreePageInner;
use crate::storage::buffer_pool::BufferPool;
use crate::storage::database::DatabaseStorage;
use crate::storage::sqlite3_ondisk::{
self, DatabaseHeader, PageContent, PageType, DATABASE_HEADER_PAGE_ID, DEFAULT_CACHE_SIZE,
};
use crate::storage::header_accessor;
use crate::storage::sqlite3_ondisk::{self, DatabaseHeader, PageContent, PageType};
use crate::storage::wal::{CheckpointResult, Wal, WalFsyncStatus};
use crate::types::CursorResult;
use crate::{Buffer, LimboError, Result};
@@ -267,56 +266,6 @@ impl Pager {
self.wal = wal;
}
pub fn db_header(&self) -> Result<DatabaseHeader> {
if self.is_empty.load(Ordering::SeqCst) {
return Ok(DatabaseHeader::default());
}
// read page 1
let page = self.read_page(1)?;
// TODO: let's not create a new DatabaseHeader struct every time we read the header
// instead let's have accessor methods for reading and writing the header fields directly
let mut header = DatabaseHeader::default();
while !page.is_loaded() || page.is_locked() {
// FIXME: LETS STOP DOING THESE SYNCHRONOUS IO HACKS
self.io.run_once()?;
}
let page_inner = page.get();
let page_content = page_inner.contents.as_ref().unwrap();
let buf = page_content.buffer.borrow();
let buf = buf.as_slice();
header.magic.copy_from_slice(&buf[0..16]);
header.page_size = u16::from_be_bytes([buf[16], buf[17]]);
header.write_version = buf[18];
header.read_version = buf[19];
header.reserved_space = buf[20];
header.max_embed_frac = buf[21];
header.min_embed_frac = buf[22];
header.min_leaf_frac = buf[23];
header.change_counter = u32::from_be_bytes([buf[24], buf[25], buf[26], buf[27]]);
header.database_size = u32::from_be_bytes([buf[28], buf[29], buf[30], buf[31]]);
header.freelist_trunk_page = u32::from_be_bytes([buf[32], buf[33], buf[34], buf[35]]);
header.freelist_pages = u32::from_be_bytes([buf[36], buf[37], buf[38], buf[39]]);
header.schema_cookie = u32::from_be_bytes([buf[40], buf[41], buf[42], buf[43]]);
header.schema_format = u32::from_be_bytes([buf[44], buf[45], buf[46], buf[47]]);
header.default_page_cache_size = i32::from_be_bytes([buf[48], buf[49], buf[50], buf[51]]);
if header.default_page_cache_size == 0 {
header.default_page_cache_size = DEFAULT_CACHE_SIZE;
}
header.vacuum_mode_largest_root_page =
u32::from_be_bytes([buf[52], buf[53], buf[54], buf[55]]);
header.text_encoding = u32::from_be_bytes([buf[56], buf[57], buf[58], buf[59]]);
header.user_version = i32::from_be_bytes([buf[60], buf[61], buf[62], buf[63]]);
header.incremental_vacuum_enabled =
u32::from_be_bytes([buf[64], buf[65], buf[66], buf[67]]);
header.application_id = u32::from_be_bytes([buf[68], buf[69], buf[70], buf[71]]);
header.reserved_for_expansion.copy_from_slice(&buf[72..92]);
header.version_valid_for = u32::from_be_bytes([buf[92], buf[93], buf[94], buf[95]]);
header.version_number = u32::from_be_bytes([buf[96], buf[97], buf[98], buf[99]]);
Ok(header)
}
pub fn get_auto_vacuum_mode(&self) -> AutoVacuumMode {
*self.auto_vacuum_mode.borrow()
}
@@ -331,7 +280,7 @@ impl Pager {
#[cfg(not(feature = "omit_autovacuum"))]
pub fn ptrmap_get(&self, target_page_num: u32) -> Result<CursorResult<Option<PtrmapEntry>>> {
tracing::trace!("ptrmap_get(page_idx = {})", target_page_num);
let configured_page_size = self.db_header()?.get_page_size() as usize;
let configured_page_size = header_accessor::get_page_size(self)? as usize;
if target_page_num < FIRST_PTRMAP_PAGE_NO
|| is_ptrmap_page(target_page_num, configured_page_size)
@@ -417,7 +366,7 @@ impl Pager {
parent_page_no
);
let page_size = self.db_header()?.get_page_size() as usize;
let page_size = header_accessor::get_page_size(self)? as usize;
if db_page_no_to_update < FIRST_PTRMAP_PAGE_NO
|| is_ptrmap_page(db_page_no_to_update, page_size)
@@ -512,13 +461,16 @@ impl Pager {
Ok(CursorResult::Ok(page_id as u32))
}
AutoVacuumMode::Full => {
let mut root_page_num = self.db_header()?.vacuum_mode_largest_root_page;
let mut root_page_num =
header_accessor::get_vacuum_mode_largest_root_page(self)?;
assert!(root_page_num > 0); // Largest root page number cannot be 0 because that is set to 1 when creating the database with autovacuum enabled
root_page_num += 1;
assert!(root_page_num >= FIRST_PTRMAP_PAGE_NO); // can never be less than 2 because we have already incremented
while is_ptrmap_page(root_page_num, self.db_header()?.get_page_size() as usize)
{
while is_ptrmap_page(
root_page_num,
header_accessor::get_page_size(self)? as usize,
) {
root_page_num += 1;
}
assert!(root_page_num >= 3); // the very first root page is page 3
@@ -577,7 +529,7 @@ impl Pager {
let page = Arc::new(BTreePageInner {
page: RefCell::new(page),
});
crate::btree_init_page(&page, page_type, offset, self.usable_space() as u16);
btree_init_page(&page, page_type, offset, self.usable_space() as u16);
tracing::debug!(
"do_allocate_page(id={}, page_type={:?})",
page.get().get().id,
@@ -591,8 +543,9 @@ impl Pager {
/// The usable size of a page might be an odd number. However, the usable size is not allowed to be less than 480.
/// In other words, if the page size is 512, then the reserved space size cannot exceed 32.
pub fn usable_space(&self) -> usize {
let db_header = self.db_header().unwrap();
(db_header.get_page_size() - db_header.reserved_space as u32) as usize
let page_size = header_accessor::get_page_size(self).unwrap_or_default() as u32;
let reserved_space = header_accessor::get_reserved_space(self).unwrap_or_default() as u32;
(page_size - reserved_space) as usize
}
#[inline(always)]
@@ -690,22 +643,6 @@ impl Pager {
Ok(page)
}
/// Writes the database header.
pub fn write_database_header(&self, header: &DatabaseHeader) -> Result<()> {
let header_page = self.read_page(DATABASE_HEADER_PAGE_ID)?;
while header_page.is_locked() {
// FIXME: we should never run io here!
self.io.run_once()?;
}
header_page.set_dirty();
self.add_dirty(DATABASE_HEADER_PAGE_ID);
let contents = header_page.get().contents.as_ref().unwrap();
contents.write_database_header(header);
Ok(())
}
/// Changes the size of the page cache.
pub fn change_page_cache_size(&self, capacity: usize) -> Result<CacheResizeResult> {
let mut page_cache = self.page_cache.write();
@@ -733,7 +670,7 @@ impl Pager {
trace!("cacheflush {:?}", state);
match state {
FlushState::Start => {
let db_size = self.db_header()?.database_size;
let db_size = header_accessor::get_database_size(self)?;
for page_id in self.dirty_pages.borrow().iter() {
let mut cache = self.page_cache.write();
let page_key = PageCacheKey::new(*page_id);
@@ -932,7 +869,7 @@ impl Pager {
const TRUNK_PAGE_NEXT_PAGE_OFFSET: usize = 0; // Offset to next trunk page pointer
const TRUNK_PAGE_LEAF_COUNT_OFFSET: usize = 4; // Offset to leaf count
if page_id < 2 || page_id > self.db_header()?.database_size as usize {
if page_id < 2 || page_id > header_accessor::get_database_size(self)? as usize {
return Err(LimboError::Corrupt(format!(
"Invalid page number {} for free operation",
page_id
@@ -947,11 +884,9 @@ impl Pager {
None => self.read_page(page_id)?,
};
let mut header = self.db_header()?;
header.freelist_pages += 1;
self.write_database_header(&header)?;
header_accessor::set_freelist_pages(self, header_accessor::get_freelist_pages(self)? + 1)?;
let trunk_page_id = header.freelist_trunk_page;
let trunk_page_id = header_accessor::get_freelist_trunk_page(self)?;
if trunk_page_id != 0 {
// Add as leaf to current trunk
@@ -960,7 +895,7 @@ impl Pager {
let number_of_leaf_pages = trunk_page_contents.read_u32(TRUNK_PAGE_LEAF_COUNT_OFFSET);
// Reserve 2 slots for the trunk page header which is 8 bytes or 2*LEAF_ENTRY_SIZE
let max_free_list_entries = (self.usable_size() / LEAF_ENTRY_SIZE) - RESERVED_SLOTS;
let max_free_list_entries = (self.usable_space() / LEAF_ENTRY_SIZE) - RESERVED_SLOTS;
if number_of_leaf_pages < max_free_list_entries as u32 {
trunk_page.set_dirty();
@@ -989,9 +924,7 @@ impl Pager {
// Zero leaf count
contents.write_u32(TRUNK_PAGE_LEAF_COUNT_OFFSET, 0);
// Update page 1 to point to new trunk
let mut header = self.db_header()?;
header.freelist_trunk_page = page_id as u32;
self.write_database_header(&header)?;
header_accessor::set_freelist_trunk_page(self, page_id as u32)?;
// Clear flags
page.clear_uptodate();
page.clear_loaded();
@@ -1017,17 +950,17 @@ impl Pager {
(default_header.get_page_size() - default_header.reserved_space as u32) as u16,
);
let page1 = page1.get();
let contents = page1.get().contents.as_mut().unwrap();
let page1_ref = page1.get();
let contents = page1_ref.get().contents.as_mut().unwrap();
contents.write_database_header(&default_header);
page1.set_dirty();
self.add_dirty(page1.get().id);
let page_key = PageCacheKey::new(page1.get().id);
page1_ref.set_dirty();
self.add_dirty(page1_ref.get().id);
let page_key = PageCacheKey::new(page1_ref.get().id);
let mut cache = self.page_cache.write();
cache.insert(page_key, page1.clone()).map_err(|e| {
cache.insert(page_key, page1_ref.clone()).map_err(|e| {
LimboError::InternalError(format!("Failed to insert page 1 into cache: {:?}", e))
})?;
Ok(page1)
Ok(page1_ref.clone())
}
/*
@@ -1037,11 +970,11 @@ impl Pager {
// FIXME: handle no room in page cache
#[allow(clippy::readonly_write_lock)]
pub fn allocate_page(&self) -> Result<PageRef> {
let mut header = self.db_header()?;
header.database_size += 1;
let old_db_size = header_accessor::get_database_size(self)?;
let mut new_db_size = old_db_size + 1;
self.is_empty.store(false, Ordering::SeqCst);
tracing::debug!("allocate_page(database_size={})", header.database_size);
tracing::debug!("allocate_page(database_size={})", new_db_size);
#[cfg(not(feature = "omit_autovacuum"))]
{
@@ -1049,9 +982,9 @@ impl Pager {
// - autovacuum is enabled
// - the last page is a pointer map page
if matches!(*self.auto_vacuum_mode.borrow(), AutoVacuumMode::Full)
&& is_ptrmap_page(header.database_size, header.get_page_size() as usize)
&& is_ptrmap_page(new_db_size, header_accessor::get_page_size(self)? as usize)
{
let page = allocate_page(header.database_size as usize, &self.buffer_pool, 0);
let page = allocate_page(new_db_size as usize, &self.buffer_pool, 0);
page.set_dirty();
self.add_dirty(page.get().id);
@@ -1066,16 +999,15 @@ impl Pager {
))
}
}
// why do we increment here?
header.database_size += 1;
// we allocated a ptrmap page, so the next data page will be at new_db_size + 1
new_db_size += 1;
}
}
// update database size
self.write_database_header(&header)?;
header_accessor::set_database_size(self, new_db_size)?;
// FIXME: should reserve page cache entry before modifying the database
let page = allocate_page(header.database_size as usize, &self.buffer_pool, 0);
let page = allocate_page(new_db_size as usize, &self.buffer_pool, 0);
{
// setup page and add to cache
page.set_dirty();
@@ -1116,8 +1048,9 @@ impl Pager {
}
pub fn usable_size(&self) -> usize {
let db_header = self.db_header().unwrap();
(db_header.get_page_size() - db_header.reserved_space as u32) as usize
let page_size = header_accessor::get_page_size(self).unwrap_or_default() as u32;
let reserved_space = header_accessor::get_reserved_space(self).unwrap_or_default() as u32;
(page_size - reserved_space) as usize
}
}
@@ -1413,9 +1346,7 @@ mod ptrmap_tests {
let pager = Pager::new(db_file, wal, io, page_cache, buffer_pool, true).unwrap();
pager.allocate_page1().unwrap();
let mut header = pager.db_header().unwrap();
header.vacuum_mode_largest_root_page = 1;
pager.write_database_header(&header).unwrap();
header_accessor::set_vacuum_mode_largest_root_page(&pager, 1).unwrap();
pager.set_auto_vacuum_mode(AutoVacuumMode::Full);
// Allocate all the pages as btree root pages
@@ -1452,7 +1383,7 @@ mod ptrmap_tests {
// Ensure that the database header size is correctly reflected
assert_eq!(
pager.db_header().unwrap().database_size,
header_accessor::get_database_size(&pager).unwrap(),
initial_db_pages + 2
); // (1+1) -> (header + ptrmap)

View File

@@ -82,7 +82,7 @@ pub const MIN_PAGE_SIZE: u32 = 512;
const MAX_PAGE_SIZE: u32 = 65536;
/// The default page size in bytes.
const DEFAULT_PAGE_SIZE: u16 = 4096;
pub const DEFAULT_PAGE_SIZE: u16 = 4096;
pub const DATABASE_HEADER_PAGE_ID: usize = 1;

View File

@@ -13,11 +13,13 @@ use crate::storage::wal::CheckpointMode;
use crate::util::{normalize_ident, parse_signed_number};
use crate::vdbe::builder::{ProgramBuilder, ProgramBuilderOpts, QueryMode};
use crate::vdbe::insn::{Cookie, Insn};
use crate::{bail_parse_error, LimboError, Pager, Value};
use crate::{bail_parse_error, storage, LimboError, Value};
use std::str::FromStr;
use strum::IntoEnumIterator;
use super::integrity_check::translate_integrity_check;
use crate::storage::header_accessor;
use crate::storage::pager::Pager;
fn list_pragmas(program: &mut ProgramBuilder) {
for x in PragmaName::iter() {
@@ -340,7 +342,11 @@ fn query_pragma(
program.emit_result_row(register, 1);
}
PragmaName::PageSize => {
program.emit_int(pager.db_header()?.get_page_size().into(), register);
program.emit_int(
header_accessor::get_page_size(&pager)
.unwrap_or(storage::sqlite3_ondisk::DEFAULT_PAGE_SIZE) as i64,
register,
);
program.emit_result_row(register, 1);
program.add_pragma_result_column(pragma.to_string());
}
@@ -373,10 +379,8 @@ fn update_auto_vacuum_mode(
largest_root_page_number: u32,
pager: Rc<Pager>,
) -> crate::Result<()> {
let mut header_guard = pager.db_header()?;
header_guard.vacuum_mode_largest_root_page = largest_root_page_number;
header_accessor::set_vacuum_mode_largest_root_page(&pager, largest_root_page_number)?;
pager.set_auto_vacuum_mode(auto_vacuum_mode);
pager.write_database_header(&header_guard)?;
Ok(())
}
@@ -388,7 +392,8 @@ fn update_cache_size(
let mut cache_size_unformatted: i64 = value;
let mut cache_size = if cache_size_unformatted < 0 {
let kb = cache_size_unformatted.abs() * 1024;
let page_size = pager.db_header()?.get_page_size();
let page_size = header_accessor::get_page_size(&pager)
.unwrap_or(storage::sqlite3_ondisk::DEFAULT_PAGE_SIZE) as i64;
kb / page_size as i64
} else {
value

View File

@@ -7,6 +7,7 @@ use crate::storage::database::FileMemoryStorage;
use crate::storage::page_cache::DumbLruPageCache;
use crate::storage::pager::CreateBTreeFlags;
use crate::storage::wal::DummyWAL;
use crate::storage::{self, header_accessor};
use crate::translate::collate::CollationSeq;
use crate::types::{ImmutableRecord, Text};
use crate::util::normalize_ident;
@@ -3697,8 +3698,7 @@ pub fn op_function(
}
}
ScalarFunc::SqliteVersion => {
let header = pager.db_header()?;
let version_integer: i64 = header.version_number as i64;
let version_integer: i64 = header_accessor::get_version_number(&pager)? as i64;
let version = execute_sqlite_version(version_integer);
state.registers[*dest] = Register::Value(Value::build_text(version));
}
@@ -4903,7 +4903,7 @@ pub fn op_page_count(
// TODO: implement temp databases
todo!("temp databases not implemented yet");
}
let count = pager.db_header()?.database_size.into();
let count = header_accessor::get_database_size(pager)?.into();
state.registers[*dest] = Register::Value(Value::Integer(count));
state.pc += 1;
Ok(InsnFunctionStepResult::Step)
@@ -4980,9 +4980,11 @@ pub fn op_read_cookie(
todo!("temp databases not implemented yet");
}
let cookie_value = match cookie {
Cookie::UserVersion => pager.db_header()?.user_version.into(),
Cookie::SchemaVersion => pager.db_header()?.schema_cookie.into(),
Cookie::LargestRootPageNumber => pager.db_header()?.vacuum_mode_largest_root_page.into(),
Cookie::UserVersion => header_accessor::get_user_version(pager)?.into(),
Cookie::SchemaVersion => header_accessor::get_schema_cookie(pager)?.into(),
Cookie::LargestRootPageNumber => {
header_accessor::get_vacuum_mode_largest_root_page(pager)?.into()
}
cookie => todo!("{cookie:?} is not yet implement for ReadCookie"),
};
state.registers[*dest] = Register::Value(Value::Integer(cookie_value));
@@ -5011,19 +5013,13 @@ pub fn op_set_cookie(
}
match cookie {
Cookie::UserVersion => {
let mut header_guard = pager.db_header()?;
header_guard.user_version = *value;
pager.write_database_header(&header_guard)?;
header_accessor::set_user_version(pager, *value as i32)?;
}
Cookie::LargestRootPageNumber => {
let mut header_guard = pager.db_header()?;
header_guard.vacuum_mode_largest_root_page = *value as u32;
pager.write_database_header(&header_guard)?;
header_accessor::set_vacuum_mode_largest_root_page(pager, *value as u32)?;
}
Cookie::IncrementalVacuum => {
let mut header_guard = pager.db_header()?;
header_guard.incremental_vacuum_enabled = *value as u32;
pager.write_database_header(&header_guard)?;
header_accessor::set_incremental_vacuum_enabled(pager, *value as u32)?;
}
cookie => todo!("{cookie:?} is not yet implement for SetCookie"),
}
@@ -5225,8 +5221,9 @@ pub fn op_open_ephemeral(
true,
)?);
let header = pager.db_header()?;
buffer_pool.set_page_size(header.get_page_size() as usize);
let page_size = header_accessor::get_page_size(&pager)
.unwrap_or(storage::sqlite3_ondisk::DEFAULT_PAGE_SIZE) as usize;
buffer_pool.set_page_size(page_size);
let flag = if is_table {
&CreateBTreeFlags::new_table()