From e67089b37709671fa73ab71448002ffb06fa9ba9 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Fri, 6 Jun 2025 12:16:09 +0200 Subject: [PATCH 1/2] fix false double acquire on write lock --- core/storage/wal.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/storage/wal.rs b/core/storage/wal.rs index 7f218490c..0d9769e3a 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -133,7 +133,7 @@ impl LimboRwLock { // no op false } - WRITE_LOCK => true, + WRITE_LOCK => false, _ => unreachable!(), }; tracing::trace!("write_lock({})", ok); From 914c1a44071236588b8f64944fe68cded07cf765 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Fri, 6 Jun 2025 12:16:35 +0200 Subject: [PATCH 2/2] fix race condition with read lock in between unlock --- core/storage/wal.rs | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/core/storage/wal.rs b/core/storage/wal.rs index 0d9769e3a..eb54e7eb8 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -106,8 +106,30 @@ impl LimboRwLock { ok } SHARED_LOCK => { + // There is this race condition where we could've unlocked after loading lock == + // SHARED_LOCK. self.nreads.fetch_add(1, Ordering::SeqCst); - true + let lock_after_load = self.lock.load(Ordering::SeqCst); + if lock_after_load != lock { + // try to lock it again + let res = self.lock.compare_exchange( + lock_after_load, + SHARED_LOCK, + Ordering::SeqCst, + Ordering::SeqCst, + ); + let ok = res.is_ok(); + if ok { + // we were able to acquire it back + true + } else { + // we couldn't acquire it back, reduce number again + self.nreads.fetch_sub(1, Ordering::SeqCst); + false + } + } else { + true + } } WRITE_LOCK => false, _ => unreachable!(),