diff --git a/core/storage/page_cache.rs b/core/storage/page_cache.rs index 743832d12..344280fe1 100644 --- a/core/storage/page_cache.rs +++ b/core/storage/page_cache.rs @@ -7,17 +7,9 @@ use super::pager::PageRef; const DEFAULT_PAGE_CACHE_SIZE_IN_PAGES: usize = 2000; -// In limbo, page cache is shared by default, meaning that multiple frames from WAL can reside in -// the cache, meaning, we need a way to differentiate between pages cached in different -// connections. For this we include the max_frame that a connection will read from so that if two -// connections have different max_frames, they might or not have different frame read from WAL. -// -// WAL was introduced after Shared cache in SQLite, so this is why these two features don't work -// well together because pages with different snapshots may collide. #[derive(Debug, Eq, Hash, PartialEq, Clone)] pub struct PageCacheKey { pgno: usize, - max_frame: Option, } #[allow(dead_code)] @@ -67,8 +59,8 @@ pub enum CacheResizeResult { } impl PageCacheKey { - pub fn new(pgno: usize, max_frame: Option) -> Self { - Self { pgno, max_frame } + pub fn new(pgno: usize) -> Self { + Self { pgno } } } impl DumbLruPageCache { @@ -604,7 +596,7 @@ mod tests { }; fn create_key(id: usize) -> PageCacheKey { - PageCacheKey::new(id, Some(id as u64)) + PageCacheKey::new(id) } #[allow(clippy::arc_with_non_send_sync)] @@ -840,20 +832,6 @@ mod tests { cache.verify_list_integrity(); } - #[test] - fn test_insert_same_id_different_frame() { - let mut cache = DumbLruPageCache::default(); - let key1_1 = PageCacheKey::new(1, Some(1 as u64)); - let key1_2 = PageCacheKey::new(1, Some(2 as u64)); - let page1_1 = page_with_content(1); - let page1_2 = page_with_content(1); - - assert!(cache.insert(key1_1.clone(), page1_1.clone()).is_ok()); - assert!(cache.insert(key1_2.clone(), page1_2.clone()).is_ok()); - assert_eq!(cache.len(), 2); - cache.verify_list_integrity(); - } - #[test] #[should_panic(expected = "Attempted to insert different page with same key")] fn test_insert_existing_key_fail() { @@ -1015,8 +993,7 @@ mod tests { 0 => { // add let id_page = rng.next_u64() % max_pages; - let id_frame = rng.next_u64() % max_pages; - let key = PageCacheKey::new(id_page as usize, Some(id_frame)); + let key = PageCacheKey::new(id_page as usize); #[allow(clippy::arc_with_non_send_sync)] let page = Arc::new(Page::new(id_page as usize)); if let Some(_) = cache.peek(&key, false) { @@ -1040,8 +1017,7 @@ mod tests { let random = rng.next_u64() % 2 == 0; let key = if random || lru.is_empty() { let id_page: u64 = rng.next_u64() % max_pages; - let id_frame = rng.next_u64() % max_pages; - let key = PageCacheKey::new(id_page as usize, Some(id_frame)); + let key = PageCacheKey::new(id_page as usize); key } else { let i = rng.next_u64() as usize % lru.len(); @@ -1066,7 +1042,6 @@ mod tests { assert_eq!(page.get().id, key.pgno); } } - assert!(lru.len() > 0); // Check it inserted some } pub fn compare_to_lru(cache: &mut DumbLruPageCache, lru: &LruCache) { diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 43af87f89..b36ce6f39 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -326,8 +326,7 @@ impl Pager { pub fn read_page(&self, page_idx: usize) -> Result { tracing::trace!("read_page(page_idx = {})", page_idx); let mut page_cache = self.page_cache.write(); - let max_frame = self.wal.borrow().get_max_frame(); - let page_key = PageCacheKey::new(page_idx, Some(max_frame)); + let page_key = PageCacheKey::new(page_idx); if let Some(page) = page_cache.get(&page_key) { tracing::trace!("read_page(page_idx = {}) = cached", page_idx); return Ok(page.clone()); @@ -415,10 +414,9 @@ impl Pager { match state { FlushState::Start => { let db_size = self.db_header.lock().database_size; - let max_frame = self.wal.borrow().get_max_frame(); for page_id in self.dirty_pages.borrow().iter() { let mut cache = self.page_cache.write(); - let page_key = PageCacheKey::new(*page_id, Some(max_frame)); + let page_key = PageCacheKey::new(*page_id); let page = cache.get(&page_key).expect("we somehow added a page to dirty list but we didn't mark it as dirty, causing cache to drop it."); let page_type = page.get().contents.as_ref().unwrap().maybe_page_type(); trace!("cacheflush(page={}, page_type={:?}", page_id, page_type); @@ -673,6 +671,7 @@ impl Pager { loop { let first_page_ref = self.read_page(1)?; if first_page_ref.is_locked() { + // FIXME: we should never run io here! self.io.run_once()?; continue; } @@ -691,9 +690,8 @@ impl Pager { // setup page and add to cache page.set_dirty(); self.add_dirty(page.get().id); - let max_frame = self.wal.borrow().get_max_frame(); - let page_key = PageCacheKey::new(page.get().id, Some(max_frame)); + let page_key = PageCacheKey::new(page.get().id); let mut cache = self.page_cache.write(); match cache.insert(page_key, page.clone()) { Err(CacheError::Full) => return Err(LimboError::CacheFull), @@ -713,8 +711,7 @@ impl Pager { page: PageRef, ) -> Result<(), LimboError> { let mut cache = self.page_cache.write(); - let max_frame = self.wal.borrow().get_max_frame(); - let page_key = PageCacheKey::new(id, Some(max_frame)); + let page_key = PageCacheKey::new(id); // FIXME: use specific page key for writer instead of max frame, this will make readers not conflict assert!(page.is_dirty()); @@ -797,13 +794,13 @@ mod tests { let cache = cache.clone(); std::thread::spawn(move || { let mut cache = cache.write(); - let page_key = PageCacheKey::new(1, None); + let page_key = PageCacheKey::new(1); cache.insert(page_key, Arc::new(Page::new(1))).unwrap(); }) }; let _ = thread.join(); let mut cache = cache.write(); - let page_key = PageCacheKey::new(1, None); + let page_key = PageCacheKey::new(1); let page = cache.get(&page_key); assert_eq!(page.unwrap().get().id, 1); }