mirror of
https://github.com/aljazceru/turso.git
synced 2026-01-09 03:04:20 +01:00
Merge 'core: Rename Connection::_db to db' from Pekka Enberg
Reviewed-by: Preston Thorpe <preston@turso.tech> Closes #3196
This commit is contained in:
@@ -219,7 +219,7 @@ impl Connection {
|
||||
///```
|
||||
pub unsafe fn _build_turso_ext(&self) -> ExtensionApi {
|
||||
let schema_mutex_ptr =
|
||||
&self._db.schema as *const Mutex<Arc<Schema>> as *mut Mutex<Arc<Schema>>;
|
||||
&self.db.schema as *const Mutex<Arc<Schema>> as *mut Mutex<Arc<Schema>>;
|
||||
let ctx = ExtensionCtx {
|
||||
syms: self.syms.data_ptr(),
|
||||
schema: schema_mutex_ptr as *mut c_void,
|
||||
|
||||
62
core/lib.rs
62
core/lib.rs
@@ -484,7 +484,7 @@ impl Database {
|
||||
.unwrap_or_default()
|
||||
.get();
|
||||
let conn = Arc::new(Connection {
|
||||
_db: self.clone(),
|
||||
db: self.clone(),
|
||||
pager: RefCell::new(Arc::new(pager)),
|
||||
schema: RefCell::new(
|
||||
self.schema
|
||||
@@ -967,7 +967,7 @@ impl DatabaseCatalog {
|
||||
}
|
||||
|
||||
pub struct Connection {
|
||||
_db: Arc<Database>,
|
||||
db: Arc<Database>,
|
||||
pager: RefCell<Arc<Pager>>,
|
||||
schema: RefCell<Arc<Schema>>,
|
||||
/// Per-database schema cache (database_index -> schema)
|
||||
@@ -1015,7 +1015,7 @@ impl Drop for Connection {
|
||||
fn drop(&mut self) {
|
||||
if !self.closed.get() {
|
||||
// if connection wasn't properly closed, decrement the connection counter
|
||||
self._db
|
||||
self.db
|
||||
.n_connections
|
||||
.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
|
||||
}
|
||||
@@ -1059,7 +1059,7 @@ impl Connection {
|
||||
)?;
|
||||
Ok(Statement::new(
|
||||
program,
|
||||
self._db.mv_store.clone(),
|
||||
self.db.mv_store.clone(),
|
||||
pager,
|
||||
mode,
|
||||
))
|
||||
@@ -1090,10 +1090,10 @@ impl Connection {
|
||||
};
|
||||
pager.end_read_tx().expect("read txn must be finished");
|
||||
|
||||
let db_schema_version = self._db.schema.lock().unwrap().schema_version;
|
||||
let db_schema_version = self.db.schema.lock().unwrap().schema_version;
|
||||
tracing::debug!(
|
||||
"path: {}, db_schema_version={} vs on_disk_schema_version={}",
|
||||
self._db.path,
|
||||
self.db.path,
|
||||
db_schema_version,
|
||||
on_disk_schema_version
|
||||
);
|
||||
@@ -1130,7 +1130,7 @@ impl Connection {
|
||||
reparse_result?;
|
||||
|
||||
let schema = self.schema.borrow().clone();
|
||||
self._db.update_schema_if_newer(schema)
|
||||
self.db.update_schema_if_newer(schema)
|
||||
}
|
||||
|
||||
fn reparse_schema(self: &Arc<Connection>) -> Result<()> {
|
||||
@@ -1207,7 +1207,7 @@ impl Connection {
|
||||
mode,
|
||||
input,
|
||||
)?;
|
||||
Statement::new(program, self._db.mv_store.clone(), pager.clone(), mode)
|
||||
Statement::new(program, self.db.mv_store.clone(), pager.clone(), mode)
|
||||
.run_ignore_rows()?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -1255,7 +1255,7 @@ impl Connection {
|
||||
mode,
|
||||
input,
|
||||
)?;
|
||||
let stmt = Statement::new(program, self._db.mv_store.clone(), pager, mode);
|
||||
let stmt = Statement::new(program, self.db.mv_store.clone(), pager, mode);
|
||||
Ok(Some(stmt))
|
||||
}
|
||||
|
||||
@@ -1291,7 +1291,7 @@ impl Connection {
|
||||
mode,
|
||||
input,
|
||||
)?;
|
||||
Statement::new(program, self._db.mv_store.clone(), pager.clone(), mode)
|
||||
Statement::new(program, self.db.mv_store.clone(), pager.clone(), mode)
|
||||
.run_ignore_rows()?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -1394,7 +1394,7 @@ impl Connection {
|
||||
pub fn maybe_update_schema(&self) -> Result<()> {
|
||||
let current_schema_version = self.schema.borrow().schema_version;
|
||||
let schema = self
|
||||
._db
|
||||
.db
|
||||
.schema
|
||||
.lock()
|
||||
.map_err(|_| LimboError::SchemaLocked)?;
|
||||
@@ -1495,7 +1495,7 @@ impl Connection {
|
||||
use crate::storage::sqlite3_ondisk::parse_wal_frame_header;
|
||||
|
||||
let c = self.pager.borrow().wal_get_frame(frame_no, frame)?;
|
||||
self._db.io.wait_for_completion(c)?;
|
||||
self.db.io.wait_for_completion(c)?;
|
||||
let (header, _) = parse_wal_frame_header(frame);
|
||||
Ok(WalFrameInfo {
|
||||
page_no: header.page_number,
|
||||
@@ -1625,7 +1625,7 @@ impl Connection {
|
||||
}
|
||||
|
||||
if self
|
||||
._db
|
||||
.db
|
||||
.n_connections
|
||||
.fetch_sub(1, std::sync::atomic::Ordering::SeqCst)
|
||||
.eq(&1)
|
||||
@@ -1681,14 +1681,14 @@ impl Connection {
|
||||
}
|
||||
|
||||
pub fn get_database_canonical_path(&self) -> String {
|
||||
if self._db.path == ":memory:" {
|
||||
if self.db.path == ":memory:" {
|
||||
// For in-memory databases, SQLite shows empty string
|
||||
String::new()
|
||||
} else {
|
||||
// For file databases, try show the full absolute path if that doesn't fail
|
||||
match std::fs::canonicalize(&self._db.path) {
|
||||
match std::fs::canonicalize(&self.db.path) {
|
||||
Ok(abs_path) => abs_path.to_string_lossy().to_string(),
|
||||
Err(_) => self._db.path.to_string(),
|
||||
Err(_) => self.db.path.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1696,7 +1696,7 @@ impl Connection {
|
||||
/// Check if a specific attached database is read only or not, by its index
|
||||
pub fn is_readonly(&self, index: usize) -> bool {
|
||||
if index == 0 {
|
||||
self._db.is_readonly()
|
||||
self.db.is_readonly()
|
||||
} else {
|
||||
let db = self
|
||||
.attached_databases
|
||||
@@ -1719,17 +1719,17 @@ impl Connection {
|
||||
};
|
||||
|
||||
self.page_size.set(size);
|
||||
if self._db.db_state.get() != DbState::Uninitialized {
|
||||
if self.db.db_state.get() != DbState::Uninitialized {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
{
|
||||
let mut shared_wal = self._db.shared_wal.write();
|
||||
let mut shared_wal = self.db.shared_wal.write();
|
||||
shared_wal.enabled.store(false, Ordering::SeqCst);
|
||||
shared_wal.file = None;
|
||||
}
|
||||
self.pager.borrow_mut().clear_page_cache();
|
||||
let pager = self._db.init_pager(Some(size.get() as usize))?;
|
||||
let pager = self.db.init_pager(Some(size.get() as usize))?;
|
||||
self.pager.replace(Arc::new(pager));
|
||||
self.pager.borrow().set_initial_page_size(size);
|
||||
|
||||
@@ -1738,7 +1738,7 @@ impl Connection {
|
||||
|
||||
#[cfg(feature = "fs")]
|
||||
pub fn open_new(&self, path: &str, vfs: &str) -> Result<(Arc<dyn IO>, Arc<Database>)> {
|
||||
Database::open_with_vfs(&self._db, path, vfs)
|
||||
Database::open_with_vfs(&self.db, path, vfs)
|
||||
}
|
||||
|
||||
pub fn list_vfs(&self) -> Vec<String> {
|
||||
@@ -1813,11 +1813,11 @@ impl Connection {
|
||||
}
|
||||
|
||||
pub fn experimental_views_enabled(&self) -> bool {
|
||||
self._db.experimental_views_enabled()
|
||||
self.db.experimental_views_enabled()
|
||||
}
|
||||
|
||||
pub fn experimental_strict_enabled(&self) -> bool {
|
||||
self._db.experimental_strict_enabled()
|
||||
self.db.experimental_strict_enabled()
|
||||
}
|
||||
|
||||
/// Query the current value(s) of `pragma_name` associated to
|
||||
@@ -1861,7 +1861,7 @@ impl Connection {
|
||||
}
|
||||
|
||||
pub fn is_db_initialized(&self) -> bool {
|
||||
self._db.db_state.is_initialized()
|
||||
self.db.db_state.is_initialized()
|
||||
}
|
||||
|
||||
fn get_pager_from_database_index(&self, index: &usize) -> Arc<Pager> {
|
||||
@@ -1909,21 +1909,21 @@ impl Connection {
|
||||
}
|
||||
|
||||
let use_indexes = self
|
||||
._db
|
||||
.db
|
||||
.schema
|
||||
.lock()
|
||||
.map_err(|_| LimboError::SchemaLocked)?
|
||||
.indexes_enabled();
|
||||
let use_mvcc = self._db.mv_store.is_some();
|
||||
let use_views = self._db.experimental_views_enabled();
|
||||
let use_strict = self._db.experimental_strict_enabled();
|
||||
let use_mvcc = self.db.mv_store.is_some();
|
||||
let use_views = self.db.experimental_views_enabled();
|
||||
let use_strict = self.db.experimental_strict_enabled();
|
||||
|
||||
let db_opts = DatabaseOpts::new()
|
||||
.with_mvcc(use_mvcc)
|
||||
.with_indexes(use_indexes)
|
||||
.with_views(use_views)
|
||||
.with_strict(use_strict);
|
||||
let db = Self::from_uri_attached(path, db_opts, self._db.io.clone())?;
|
||||
let db = Self::from_uri_attached(path, db_opts, self.db.io.clone())?;
|
||||
let pager = Arc::new(db.init_pager(None)?);
|
||||
|
||||
self.attached_databases
|
||||
@@ -2059,7 +2059,7 @@ impl Connection {
|
||||
let mut databases = Vec::new();
|
||||
|
||||
// Add main database (always seq=0, name="main")
|
||||
let main_path = Self::get_canonical_path_for_database(&self._db);
|
||||
let main_path = Self::get_canonical_path_for_database(&self.db);
|
||||
databases.push((0, "main".to_string(), main_path));
|
||||
|
||||
// Add attached databases
|
||||
@@ -2386,7 +2386,7 @@ impl Statement {
|
||||
fn reprepare(&mut self) -> Result<()> {
|
||||
tracing::trace!("repreparing statement");
|
||||
let conn = self.program.connection.clone();
|
||||
*conn.schema.borrow_mut() = conn._db.clone_schema()?;
|
||||
*conn.schema.borrow_mut() = conn.db.clone_schema()?;
|
||||
self.program = {
|
||||
let mut parser = Parser::new(self.program.sql.as_bytes());
|
||||
let cmd = parser.next_cmd()?;
|
||||
|
||||
@@ -804,7 +804,7 @@ pub(crate) fn commit_tx(
|
||||
let res = sm.step(&mv_store)?;
|
||||
match res {
|
||||
crate::state_machine::TransitionResult::Io(io) => {
|
||||
io.wait(conn._db.io.as_ref())?;
|
||||
io.wait(conn.db.io.as_ref())?;
|
||||
}
|
||||
crate::state_machine::TransitionResult::Continue => continue,
|
||||
crate::state_machine::TransitionResult::Done(_) => break,
|
||||
@@ -828,7 +828,7 @@ pub(crate) fn commit_tx_no_conn(
|
||||
let res = sm.step(&mv_store)?;
|
||||
match res {
|
||||
crate::state_machine::TransitionResult::Io(io) => {
|
||||
io.wait(conn._db.io.as_ref())?;
|
||||
io.wait(conn.db.io.as_ref())?;
|
||||
}
|
||||
crate::state_machine::TransitionResult::Continue => continue,
|
||||
crate::state_machine::TransitionResult::Done(_) => break,
|
||||
|
||||
@@ -1143,7 +1143,7 @@ impl Pager {
|
||||
|
||||
if schema_did_change {
|
||||
let schema = connection.schema.borrow().clone();
|
||||
connection._db.update_schema_if_newer(schema)?;
|
||||
connection.db.update_schema_if_newer(schema)?;
|
||||
}
|
||||
Ok(IOResult::Done(commit_status))
|
||||
}
|
||||
@@ -2313,7 +2313,7 @@ impl Pager {
|
||||
}
|
||||
self.reset_internal_states();
|
||||
if schema_did_change {
|
||||
connection.schema.replace(connection._db.clone_schema()?);
|
||||
connection.schema.replace(connection.db.clone_schema()?);
|
||||
}
|
||||
if is_write {
|
||||
if let Some(wal) = self.wal.as_ref() {
|
||||
|
||||
@@ -2189,14 +2189,13 @@ pub fn op_transaction_inner(
|
||||
},
|
||||
insn
|
||||
);
|
||||
|
||||
let pager = program.get_pager_from_database_index(db);
|
||||
loop {
|
||||
match state.op_transaction_state {
|
||||
OpTransactionState::Start => {
|
||||
let conn = program.connection.clone();
|
||||
let write = matches!(tx_mode, TransactionMode::Write);
|
||||
if write && conn._db.open_flags.contains(OpenFlags::ReadOnly) {
|
||||
if write && conn.db.open_flags.contains(OpenFlags::ReadOnly) {
|
||||
return Err(LimboError::ReadOnly);
|
||||
}
|
||||
|
||||
@@ -7219,7 +7218,7 @@ pub fn op_open_ephemeral(
|
||||
db_file_io = io;
|
||||
}
|
||||
|
||||
let buffer_pool = program.connection._db.buffer_pool.clone();
|
||||
let buffer_pool = program.connection.db.buffer_pool.clone();
|
||||
let page_cache = Arc::new(RwLock::new(PageCache::default()));
|
||||
|
||||
let pager = Arc::new(Pager::new(
|
||||
|
||||
Reference in New Issue
Block a user