From 9017acd6225507246c0a4e5179af0ef133f00391 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 14 Aug 2025 19:23:00 -0400 Subject: [PATCH 1/2] Ensure we fsync the db file in all paths that we checkpoint --- core/storage/pager.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 988e07f6c..eda314fef 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1415,6 +1415,9 @@ impl Pager { }))?)?; } checkpoint_result.release_guard(); + } else if checkpoint_result.num_backfilled != 0 { + self.io + .wait_for_completion(self.db_file.sync(Completion::new_sync(move |_| {}))?)?; } // TODO: only clear cache of things that are really invalidated From 2ad479f413a7d2290ac3898e0606a7c6d16b2ce8 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Thu, 14 Aug 2025 19:47:10 -0400 Subject: [PATCH 2/2] dont forget other instance where db file is expected size --- core/storage/pager.rs | 56 ++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/core/storage/pager.rs b/core/storage/pager.rs index eda314fef..2e6c2bf8a 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -1392,34 +1392,40 @@ impl Pager { let mut checkpoint_result = self.io.block(|| wal.borrow_mut().checkpoint(self, mode))?; - if checkpoint_result.everything_backfilled() && checkpoint_result.num_backfilled != 0 { - let db_size = self - .io - .block(|| self.with_header(|header| header.database_size))? - .get(); - let page_size = self.page_size.get().unwrap_or_default(); - let expected = (db_size * page_size.get()) as u64; - if expected < self.db_file.size()? { - self.io.wait_for_completion(self.db_file.truncate( - expected as usize, - Completion::new_trunc(move |_| { - tracing::trace!( - "Database file truncated to expected size: {} bytes", - expected - ); - }), - )?)?; + 'ensure_sync: { + if checkpoint_result.num_backfilled != 0 { + if checkpoint_result.everything_backfilled() { + let db_size = self + .io + .block(|| self.with_header(|header| header.database_size))? + .get(); + let page_size = self.page_size.get().unwrap_or_default(); + let expected = (db_size * page_size.get()) as u64; + if expected < self.db_file.size()? { + self.io.wait_for_completion(self.db_file.truncate( + expected as usize, + Completion::new_trunc(move |_| { + tracing::trace!( + "Database file truncated to expected size: {} bytes", + expected + ); + }), + )?)?; + self.io + .wait_for_completion(self.db_file.sync(Completion::new_sync( + move |_| { + tracing::trace!("Database file syncd after truncation"); + }, + ))?)?; + break 'ensure_sync; + } + } + // if we backfilled at all, we have to sync the db-file here self.io - .wait_for_completion(self.db_file.sync(Completion::new_sync(move |_| { - tracing::trace!("Database file syncd after truncation"); - }))?)?; + .wait_for_completion(self.db_file.sync(Completion::new_sync(move |_| {}))?)?; } - checkpoint_result.release_guard(); - } else if checkpoint_result.num_backfilled != 0 { - self.io - .wait_for_completion(self.db_file.sync(Completion::new_sync(move |_| {}))?)?; } - + checkpoint_result.release_guard(); // TODO: only clear cache of things that are really invalidated self.page_cache .write()