Merge 'Support ATTACH (read only)' from Glauber Costa

Support for attaching databases. The main difference from SQLite is that
we support an arbitrary number of attached databases, and we are not
bound to just 100ish.
We for now only support read-only databases. We open them as read-only,
but also, to keep things simple, we don't patch any of the insert
machinery to resolve foreign tables.  So if an insert is tried on an
attached database, it will just fail with a "no such table" error - this
is perfect for now.
The code in core/translate/attach.rs is written by Claude, who also
played a key part in the boilerplate for stuff like the .databases
command and extending the pragma database_list, and also aided me in the
test cases.

Closes #2235
This commit is contained in:
Pekka Enberg
2025-07-25 10:33:36 +03:00
19 changed files with 920 additions and 82 deletions

View File

@@ -54,7 +54,7 @@ Turso aims to be fully compatible with SQLite, with opt-in features not supporte
|---------------------------|---------|-----------------------------------------------------------------------------------|
| ALTER TABLE | Yes | |
| ANALYZE | No | |
| ATTACH DATABASE | No | |
| ATTACH DATABASE | Partial | Only for reads. All modifications will currently fail to find the table |
| BEGIN TRANSACTION | Partial | Transaction names are not supported. |
| COMMIT TRANSACTION | Partial | Transaction names are not supported. |
| CREATE INDEX | Partial | Disabled by default. |

View File

@@ -944,6 +944,119 @@ impl Limbo {
Ok(guard)
}
fn print_schema_entry(&mut self, db_display_name: &str, row: &turso_core::Row) -> bool {
if let Ok(Value::Text(schema)) = row.get::<&Value>(0) {
let modified_schema = if db_display_name == "main" {
schema.as_str().to_string()
} else {
// We need to modify the SQL to include the database prefix in table names
// This is a simple approach - for CREATE TABLE statements, insert db name after "TABLE "
// For CREATE INDEX statements, insert db name after "ON "
let schema_str = schema.as_str();
if schema_str.to_uppercase().contains("CREATE TABLE ") {
// Find "CREATE TABLE " and insert database name after it
if let Some(pos) = schema_str.to_uppercase().find("CREATE TABLE ") {
let before = &schema_str[..pos + "CREATE TABLE ".len()];
let after = &schema_str[pos + "CREATE TABLE ".len()..];
format!("{before}{db_display_name}.{after}")
} else {
schema_str.to_string()
}
} else if schema_str.to_uppercase().contains(" ON ") {
// For indexes, find " ON " and insert database name after it
if let Some(pos) = schema_str.to_uppercase().find(" ON ") {
let before = &schema_str[..pos + " ON ".len()];
let after = &schema_str[pos + " ON ".len()..];
format!("{before}{db_display_name}.{after}")
} else {
schema_str.to_string()
}
} else {
schema_str.to_string()
}
};
let _ = self.write_fmt(format_args!("{modified_schema};"));
true
} else {
false
}
}
fn query_one_table_schema(
&mut self,
db_prefix: &str,
db_display_name: &str,
table_name: &str,
) -> anyhow::Result<bool> {
let sql = format!(
"SELECT sql FROM {db_prefix}.sqlite_schema WHERE type IN ('table', 'index') AND tbl_name = '{table_name}' AND name NOT LIKE 'sqlite_%'"
);
let mut found = false;
match self.conn.query(&sql) {
Ok(Some(ref mut rows)) => loop {
match rows.step()? {
StepResult::Row => {
let row = rows.row().unwrap();
found |= self.print_schema_entry(db_display_name, row);
}
StepResult::IO => {
rows.run_once()?;
}
StepResult::Interrupt => break,
StepResult::Done => break,
StepResult::Busy => {
let _ = self.writeln("database is busy");
break;
}
}
},
Ok(None) => {}
Err(_) => {} // Table not found in this database
}
Ok(found)
}
fn query_all_tables_schema(
&mut self,
db_prefix: &str,
db_display_name: &str,
) -> anyhow::Result<()> {
let sql = format!(
"SELECT sql FROM {db_prefix}.sqlite_schema WHERE type IN ('table', 'index') AND name NOT LIKE 'sqlite_%'"
);
match self.conn.query(&sql) {
Ok(Some(ref mut rows)) => loop {
match rows.step()? {
StepResult::Row => {
let row = rows.row().unwrap();
self.print_schema_entry(db_display_name, row);
}
StepResult::IO => {
rows.run_once()?;
}
StepResult::Interrupt => break,
StepResult::Done => break,
StepResult::Busy => {
let _ = self.writeln("database is busy");
break;
}
}
},
Ok(None) => {}
Err(err) => {
// If we can't access this database's schema, just skip it
if !err.to_string().contains("no such table") {
eprintln!(
"Warning: Could not query schema for database '{db_display_name}': {err}"
);
}
}
}
Ok(())
}
fn display_schema(&mut self, table: Option<&str>) -> anyhow::Result<()> {
if !self.conn.is_db_initialized() {
if let Some(table_name) = table {
@@ -954,55 +1067,51 @@ impl Limbo {
return Ok(());
}
let sql = match table {
Some(table_name) => format!(
"SELECT sql FROM sqlite_schema WHERE type IN ('table', 'index') AND tbl_name = '{table_name}' AND name NOT LIKE 'sqlite_%'"
),
None => String::from(
"SELECT sql FROM sqlite_schema WHERE type IN ('table', 'index') AND name NOT LIKE 'sqlite_%'"
),
};
match table {
Some(table_spec) => {
// Parse table name to handle database prefixes (e.g., "db.table")
let clean_table_spec = table_spec.trim_end_matches(';');
match self.conn.query(&sql) {
Ok(Some(ref mut rows)) => {
let mut found = false;
loop {
match rows.step()? {
StepResult::Row => {
let row = rows.row().unwrap();
if let Ok(Value::Text(schema)) = row.get::<&Value>(0) {
let _ = self.write_fmt(format_args!("{};", schema.as_str()));
found = true;
}
}
StepResult::IO => {
rows.run_once()?;
}
StepResult::Interrupt => break,
StepResult::Done => break,
StepResult::Busy => {
let _ = self.writeln("database is busy");
break;
}
}
}
if !found {
if let Some(table_name) = table {
let _ = self
.write_fmt(format_args!("-- Error: Table '{table_name}' not found."));
let (target_db, table_name) =
if let Some((db, tbl)) = clean_table_spec.split_once('.') {
(db, tbl)
} else {
let _ = self.writeln("-- No tables or indexes found in the database.");
("main", clean_table_spec)
};
// Query only the specific table in the specific database
let found = if target_db == "main" {
self.query_one_table_schema("main", "main", table_name)?
} else {
// Check if the database is attached
let attached_databases = self.conn.list_attached_databases();
if attached_databases.contains(&target_db.to_string()) {
self.query_one_table_schema(target_db, target_db, table_name)?
} else {
false
}
};
if !found {
let table_display = if target_db == "main" {
table_name.to_string()
} else {
format!("{target_db}.{table_name}")
};
let _ = self
.write_fmt(format_args!("-- Error: Table '{table_display}' not found."));
}
}
Ok(None) => {
let _ = self.writeln("No results returned from the query.");
}
Err(err) => {
if err.to_string().contains("no such table: sqlite_schema") {
return Err(anyhow::anyhow!("Unable to access database schema. The database may be using an older SQLite version or may not be properly initialized."));
} else {
return Err(anyhow::anyhow!("Error querying schema: {}", err));
None => {
// Show schema for all tables in all databases
let attached_databases = self.conn.list_attached_databases();
// Query main database first
self.query_all_tables_schema("main", "main")?;
// Query all attached databases
for db_name in attached_databases {
self.query_all_tables_schema(&db_name, &db_name)?;
}
}
}

View File

@@ -323,6 +323,8 @@ pub enum ScalarFunc {
Likelihood,
TableColumnsJsonArray,
BinRecordJsonObject,
Attach,
Detach,
}
impl ScalarFunc {
@@ -382,6 +384,8 @@ impl ScalarFunc {
ScalarFunc::Likelihood => true,
ScalarFunc::TableColumnsJsonArray => true, // while columns of the table can change with DDL statements, within single query plan it's static
ScalarFunc::BinRecordJsonObject => true,
ScalarFunc::Attach => false, // changes database state
ScalarFunc::Detach => false, // changes database state
}
}
}
@@ -443,6 +447,8 @@ impl Display for ScalarFunc {
Self::Likelihood => "likelihood".to_string(),
Self::TableColumnsJsonArray => "table_columns_json_array".to_string(),
Self::BinRecordJsonObject => "bin_record_json_object".to_string(),
Self::Attach => "attach".to_string(),
Self::Detach => "detach".to_string(),
};
write!(f, "{str}")
}

View File

@@ -303,6 +303,7 @@ impl Database {
.map_err(|_| LimboError::SchemaLocked)?
.clone(),
),
database_schemas: RefCell::new(std::collections::HashMap::new()),
auto_commit: Cell::new(true),
mv_transactions: RefCell::new(Vec::new()),
transaction_state: Cell::new(TransactionState::None),
@@ -316,6 +317,7 @@ impl Database {
wal_checkpoint_disabled: Cell::new(false),
capture_data_changes: RefCell::new(CaptureDataChangesMode::Off),
closed: Cell::new(false),
attached_databases: RefCell::new(DatabaseIndexer::new()),
});
let builtin_syms = self.builtin_syms.borrow();
// add built-in extensions symbols to the connection to prevent having to load each time
@@ -579,10 +581,120 @@ impl CaptureDataChangesMode {
}
}
// Optimized for fast get() operations and supports unlimited attached databases.
struct DatabaseIndexer {
name_to_index: HashMap<String, usize>,
allocated: Vec<u64>,
index_to_data: HashMap<usize, (Arc<Database>, Rc<Pager>)>,
}
#[allow(unused)]
impl DatabaseIndexer {
fn new() -> Self {
Self {
name_to_index: HashMap::new(),
index_to_data: HashMap::new(),
allocated: vec![3], // 0 | 1, as those are reserved for main and temp
}
}
fn get_database_by_index(&self, index: usize) -> Option<Arc<Database>> {
self.index_to_data
.get(&index)
.map(|(db, _pager)| db.clone())
}
fn get_database_by_name(&self, s: &str) -> Option<(usize, Arc<Database>)> {
match self.name_to_index.get(s) {
None => None,
Some(idx) => self
.index_to_data
.get(idx)
.map(|(db, _pager)| (*idx, db.clone())),
}
}
fn get_pager_by_index(&self, idx: &usize) -> Rc<Pager> {
let (_db, pager) = self
.index_to_data
.get(idx)
.expect("If we are looking up a database by index, it must exist.");
pager.clone()
}
fn add(&mut self, s: &str) -> usize {
assert_eq!(self.name_to_index.get(s), None);
let index = self.allocate_index();
self.name_to_index.insert(s.to_string(), index);
index
}
fn insert(&mut self, s: &str, data: (Arc<Database>, Rc<Pager>)) -> usize {
let idx = self.add(s);
self.index_to_data.insert(idx, data);
idx
}
fn remove(&mut self, s: &str) -> Option<usize> {
if let Some(index) = self.name_to_index.remove(s) {
// Should be impossible to remove main or temp.
assert!(index >= 2);
self.deallocate_index(index);
self.index_to_data.remove(&index);
Some(index)
} else {
None
}
}
#[inline(always)]
fn deallocate_index(&mut self, index: usize) {
let word_idx = index / 64;
let bit_idx = index % 64;
if word_idx < self.allocated.len() {
self.allocated[word_idx] &= !(1u64 << bit_idx);
}
}
fn allocate_index(&mut self) -> usize {
for word_idx in 0..self.allocated.len() {
let word = self.allocated[word_idx];
if word != u64::MAX {
let free_bit = Self::find_first_zero_bit(word);
let index = word_idx * 64 + free_bit;
self.allocated[word_idx] |= 1u64 << free_bit;
return index;
}
}
// Need to expand bitmap
let word_idx = self.allocated.len();
self.allocated.push(1u64); // Mark first bit as allocated
word_idx * 64
}
#[inline(always)]
fn find_first_zero_bit(word: u64) -> usize {
// Invert to find first zero as first one
let inverted = !word;
// Use trailing zeros count (compiles to single instruction on most CPUs)
inverted.trailing_zeros() as usize
}
}
pub struct Connection {
_db: Arc<Database>,
pager: RefCell<Rc<Pager>>,
schema: RefCell<Arc<Schema>>,
/// Per-database schema cache (database_index -> schema)
/// Loaded lazily to avoid copying all schemas on connection open
database_schemas: RefCell<std::collections::HashMap<usize, Arc<Schema>>>,
/// Whether to automatically commit transaction
auto_commit: Cell<bool>,
mv_transactions: RefCell<Vec<crate::mvcc::database::TxID>>,
@@ -599,6 +711,8 @@ pub struct Connection {
wal_checkpoint_disabled: Cell<bool>,
capture_data_changes: RefCell<CaptureDataChangesMode>,
closed: Cell<bool>,
/// Attached databases
attached_databases: RefCell<DatabaseIndexer>,
}
impl Connection {
@@ -727,6 +841,7 @@ impl Connection {
&[],
&mut table_ref_counter,
translate::plan::QueryDestination::ResultRows,
&self.clone(),
)?;
optimize_plan(&mut plan, self.schema.borrow().deref())?;
let _ = std::io::stdout().write_all(plan.to_string().as_bytes());
@@ -836,6 +951,21 @@ impl Connection {
Ok((io, conn))
}
#[cfg(feature = "fs")]
fn from_uri_attached(uri: &str, use_indexes: bool, use_mvcc: bool) -> Result<Arc<Database>> {
let mut opts = OpenOptions::parse(uri)?;
// FIXME: for now, only support read only attach
opts.mode = OpenMode::ReadOnly;
let flags = opts.get_flags()?;
let (_io, db) =
Database::open_new(&opts.path, opts.vfs.as_ref(), flags, use_indexes, use_mvcc)?;
if let Some(modeof) = opts.modeof {
let perms = std::fs::metadata(modeof)?;
std::fs::set_permissions(&opts.path, perms.permissions())?;
}
Ok(db)
}
pub fn maybe_update_schema(&self) -> Result<()> {
let current_schema_version = self.schema.borrow().schema_version;
let schema = self
@@ -998,10 +1128,16 @@ impl Connection {
/// Check if a specific attached database is read only or not, by its index
pub fn is_readonly(&self, index: usize) -> bool {
// Only internal callers for now. Nobody should be passing
// anything else here
assert_eq!(index, 0);
self._db.is_readonly()
if index == 0 {
self._db.is_readonly()
} else {
let db = self
.attached_databases
.borrow()
.get_database_by_index(index);
db.expect("Should never have called this without being sure the database exists")
.is_readonly()
}
}
/// Reset the page size for the current connection.
@@ -1171,6 +1307,212 @@ impl Connection {
pub fn is_db_initialized(&self) -> bool {
self._db.db_state.is_initialized()
}
fn get_pager_from_database_index(&self, index: &usize) -> Rc<Pager> {
if *index < 2 {
self.pager.borrow().clone()
} else {
self.attached_databases.borrow().get_pager_by_index(index)
}
}
#[cfg(feature = "fs")]
fn is_attached(&self, alias: &str) -> bool {
self.attached_databases
.borrow()
.name_to_index
.contains_key(alias)
}
/// Attach a database file with the given alias name
#[cfg(not(feature = "fs"))]
pub(crate) fn attach_database(&self, _path: &str, _alias: &str) -> Result<()> {
return Err(LimboError::InvalidArgument(format!(
"attach not available in this build (no-fs)"
)));
}
/// Attach a database file with the given alias name
#[cfg(feature = "fs")]
pub(crate) fn attach_database(&self, path: &str, alias: &str) -> Result<()> {
if self.closed.get() {
return Err(LimboError::InternalError("Connection closed".to_string()));
}
if self.is_attached(alias) {
return Err(LimboError::InvalidArgument(format!(
"database {alias} is already in use"
)));
}
// Check for reserved database names
if alias.eq_ignore_ascii_case("main") || alias.eq_ignore_ascii_case("temp") {
return Err(LimboError::InvalidArgument(format!(
"reserved name {alias} is already in use"
)));
}
let use_indexes = self
._db
.schema
.lock()
.map_err(|_| LimboError::SchemaLocked)?
.indexes_enabled();
let use_mvcc = self._db.mv_store.is_some();
let db = Self::from_uri_attached(path, use_indexes, use_mvcc)?;
let pager = Rc::new(db.init_pager(None)?);
self.attached_databases
.borrow_mut()
.insert(alias, (db, pager));
Ok(())
}
// Detach a database by alias name
fn detach_database(&self, alias: &str) -> Result<()> {
if self.closed.get() {
return Err(LimboError::InternalError("Connection closed".to_string()));
}
if alias == "main" || alias == "temp" {
return Err(LimboError::InvalidArgument(format!(
"cannot detach database: {alias}"
)));
}
// Remove from attached databases
let mut attached_dbs = self.attached_databases.borrow_mut();
if attached_dbs.remove(alias).is_none() {
return Err(LimboError::InvalidArgument(format!(
"no such database: {alias}"
)));
}
Ok(())
}
// Get an attached database by alias name
fn get_attached_database(&self, alias: &str) -> Option<(usize, Arc<Database>)> {
self.attached_databases.borrow().get_database_by_name(alias)
}
/// List all attached database aliases
pub fn list_attached_databases(&self) -> Vec<String> {
self.attached_databases
.borrow()
.name_to_index
.keys()
.cloned()
.collect()
}
/// Resolve database ID from a qualified name
pub(crate) fn resolve_database_id(&self, qualified_name: &ast::QualifiedName) -> Result<usize> {
use crate::util::normalize_ident;
// Check if this is a qualified name (database.table) or unqualified
if let Some(db_name) = &qualified_name.db_name {
let db_name_normalized = normalize_ident(&db_name.0);
if db_name_normalized.eq_ignore_ascii_case("main") {
Ok(0)
} else if db_name_normalized.eq_ignore_ascii_case("temp") {
Ok(1)
} else {
// Look up attached database
if let Some((idx, _attached_db)) = self.get_attached_database(&db_name_normalized) {
Ok(idx)
} else {
Err(LimboError::InvalidArgument(format!(
"no such database: {db_name_normalized}"
)))
}
}
} else {
// Unqualified table name - use main database
Ok(0)
}
}
/// Access schema for a database using a closure pattern to avoid cloning
pub(crate) fn with_schema<T>(&self, database_id: usize, f: impl FnOnce(&Schema) -> T) -> T {
if database_id == 0 {
// Main database - use connection's schema which should be kept in sync
let schema = self.schema.borrow();
f(&schema)
} else if database_id == 1 {
// Temp database - uses same schema as main for now, but this will change later.
let schema = self.schema.borrow();
f(&schema)
} else {
// Attached database - check cache first, then load from database
let mut schemas = self.database_schemas.borrow_mut();
if let Some(cached_schema) = schemas.get(&database_id) {
return f(cached_schema);
}
// Schema not cached, load it lazily from the attached database
let attached_dbs = self.attached_databases.borrow();
let (db, _pager) = attached_dbs
.index_to_data
.get(&database_id)
.expect("Database ID should be valid after resolve_database_id");
let schema = db
.schema
.lock()
.expect("Schema lock should not fail")
.clone();
// Cache the schema for future use
schemas.insert(database_id, schema.clone());
f(&schema)
}
}
// Get the canonical path for a database given its Database object
fn get_canonical_path_for_database(db: &Database) -> String {
if db.path == ":memory:" {
// For in-memory databases, SQLite shows empty string
String::new()
} else {
// For file databases, try to show the full absolute path if that doesn't fail
match std::fs::canonicalize(&db.path) {
Ok(abs_path) => abs_path.to_string_lossy().to_string(),
Err(_) => db.path.to_string(),
}
}
}
/// List all databases (main + attached) with their sequence numbers, names, and file paths
/// Returns a vector of tuples: (seq_number, name, file_path)
pub fn list_all_databases(&self) -> Vec<(usize, String, String)> {
let mut databases = Vec::new();
// Add main database (always seq=0, name="main")
let main_path = Self::get_canonical_path_for_database(&self._db);
databases.push((0, "main".to_string(), main_path));
// Add attached databases
let attached_dbs = self.attached_databases.borrow();
for (alias, &seq_number) in attached_dbs.name_to_index.iter() {
let file_path = if let Some((db, _pager)) = attached_dbs.index_to_data.get(&seq_number)
{
Self::get_canonical_path_for_database(db)
} else {
String::new()
};
databases.push((seq_number, alias.clone(), file_path));
}
// Sort by sequence number to ensure consistent ordering
databases.sort_by_key(|&(seq, _, _)| seq);
databases
}
}
pub struct Statement {

183
core/translate/attach.rs Normal file
View File

@@ -0,0 +1,183 @@
use crate::function::{Func, ScalarFunc};
use crate::schema::Schema;
use crate::translate::emitter::Resolver;
use crate::translate::expr::{sanitize_string, translate_expr};
use crate::translate::{ProgramBuilder, ProgramBuilderOpts};
use crate::util::normalize_ident;
use crate::vdbe::insn::Insn;
use crate::{Result, SymbolTable};
use turso_sqlite3_parser::ast::{Expr, Literal};
/// Translate ATTACH statement
/// SQLite implements ATTACH as a function call to sqlite_attach()
pub fn translate_attach(
expr: &Expr,
db_name: &Expr,
key: &Option<Box<Expr>>,
schema: &Schema,
syms: &SymbolTable,
mut program: ProgramBuilder,
) -> Result<ProgramBuilder> {
// SQLite treats ATTACH as a function call to sqlite_attach(filename, dbname, key)
// We'll allocate registers for the arguments and call the function
program.extend(&ProgramBuilderOpts {
num_cursors: 0,
approx_num_insns: 10,
approx_num_labels: 0,
});
let arg_reg = program.alloc_registers(4); // 3 for args + 1 for result
let resolver = Resolver::new(schema, syms);
// Load filename argument
// Handle different expression types as string literals for filenames
match expr {
Expr::Literal(Literal::String(s)) => {
// For ATTACH, string literals should be used directly (without quotes)
program.emit_insn(Insn::String8 {
value: sanitize_string(s),
dest: arg_reg,
});
}
Expr::Qualified(_, _) => {
// For ATTACH, qualified expressions like "foo.db" should be treated as filename strings
let filename = format!("{expr}");
program.emit_insn(Insn::String8 {
value: filename,
dest: arg_reg,
});
}
Expr::Id(id) => {
// For ATTACH, identifiers should be treated as filename strings
// Use normalize_ident to strip quotes from double-quoted identifiers
program.emit_insn(Insn::String8 {
value: normalize_ident(&id.0),
dest: arg_reg,
});
}
_ => {
translate_expr(&mut program, None, expr, arg_reg, &resolver)?;
}
}
// Load database name argument
// Handle different expression types as string literals for database names
match db_name {
Expr::Literal(Literal::String(s)) => {
// For ATTACH, string literals should be used directly (without quotes)
program.emit_insn(Insn::String8 {
value: sanitize_string(s),
dest: arg_reg + 1,
});
}
Expr::Qualified(_, _) => {
// For ATTACH, qualified expressions should be treated as name strings
let db_name_str = format!("{db_name}");
program.emit_insn(Insn::String8 {
value: db_name_str,
dest: arg_reg + 1,
});
}
Expr::Id(id) => {
// For ATTACH, identifiers should be treated as name strings
// Use normalize_ident to strip quotes from double-quoted identifiers
program.emit_insn(Insn::String8 {
value: normalize_ident(&id.0),
dest: arg_reg + 1,
});
}
_ => {
translate_expr(&mut program, None, db_name, arg_reg + 1, &resolver)?;
}
}
// Load key argument (NULL if not provided)
if let Some(key_expr) = key {
translate_expr(&mut program, None, key_expr, arg_reg + 2, &resolver)?;
} else {
program.emit_insn(Insn::Null {
dest: arg_reg + 2,
dest_end: None,
});
}
// Call sqlite_attach function
program.emit_insn(Insn::Function {
constant_mask: 0,
start_reg: arg_reg,
dest: arg_reg + 3, // Result register (not used but required)
func: crate::function::FuncCtx {
func: Func::Scalar(ScalarFunc::Attach),
arg_count: 3,
},
});
program.epilogue(super::emitter::TransactionMode::None);
Ok(program)
}
/// Translate DETACH statement
/// SQLite implements DETACH as a function call to sqlite_detach()
pub fn translate_detach(
expr: &Expr,
schema: &Schema,
syms: &SymbolTable,
mut program: ProgramBuilder,
) -> Result<ProgramBuilder> {
// SQLite treats DETACH as a function call to sqlite_detach(dbname)
program.extend(&ProgramBuilderOpts {
num_cursors: 0,
approx_num_insns: 5,
approx_num_labels: 0,
});
let arg_reg = program.alloc_registers(2); // 1 for arg + 1 for result
let resolver = Resolver::new(schema, syms);
// Load database name argument
// Handle different expression types as string literals for database names
match expr {
Expr::Literal(Literal::String(s)) => {
// For DETACH, string literals should be used directly (without quotes)
program.emit_insn(Insn::String8 {
value: sanitize_string(s),
dest: arg_reg,
});
}
Expr::Qualified(_, _) => {
// For DETACH, qualified expressions should be treated as name strings
let db_name_str = format!("{expr}");
program.emit_insn(Insn::String8 {
value: db_name_str,
dest: arg_reg,
});
}
Expr::Id(id) => {
// For DETACH, identifiers should be treated as name strings
// Use normalize_ident to strip quotes from double-quoted identifiers
program.emit_insn(Insn::String8 {
value: normalize_ident(&id.0),
dest: arg_reg,
});
}
_ => {
translate_expr(&mut program, None, expr, arg_reg, &resolver)?;
}
}
// Call sqlite_detach function
program.emit_insn(Insn::Function {
constant_mask: 0,
start_reg: arg_reg,
dest: arg_reg + 1, // Result register (not used but required)
func: crate::function::FuncCtx {
func: Func::Scalar(ScalarFunc::Detach),
arg_count: 1,
},
});
program.epilogue(super::emitter::TransactionMode::None);
Ok(program)
}

View File

@@ -75,6 +75,7 @@ pub fn prepare_delete_plan(
},
join_info: None,
col_used_mask: ColumnUsedMask::default(),
database_id: 0,
}];
let mut table_references = TableReferences::new(joined_tables, vec![]);

View File

@@ -1808,6 +1808,18 @@ pub fn translate_expr(
});
Ok(target_register)
}
ScalarFunc::Attach => {
// ATTACH is handled by the attach.rs module, not here
crate::bail_parse_error!(
"ATTACH should be handled at statement level, not as expression"
);
}
ScalarFunc::Detach => {
// DETACH is handled by the attach.rs module, not here
crate::bail_parse_error!(
"DETACH should be handled at statement level, not as expression"
);
}
}
}
Func::Math(math_func) => match math_func.arity() {

View File

@@ -1,4 +1,5 @@
use std::rc::Rc;
use std::sync::Arc;
use turso_sqlite3_parser::ast::{
DistinctNames, Expr, InsertBody, OneSelect, QualifiedName, ResolveType, ResultColumn, With,
@@ -44,6 +45,7 @@ pub fn translate_insert(
_returning: Option<Vec<ResultColumn>>,
syms: &SymbolTable,
mut program: ProgramBuilder,
connection: &Arc<crate::Connection>,
) -> Result<ProgramBuilder> {
let opts = ProgramBuilderOpts {
num_cursors: 1,
@@ -168,7 +170,14 @@ pub fn translate_insert(
coroutine_implementation_start: halt_label,
};
program.incr_nesting();
let result = translate_select(schema, *select, syms, program, query_destination)?;
let result = translate_select(
schema,
*select,
syms,
program,
query_destination,
connection,
)?;
program = result.program;
program.decr_nesting();

View File

@@ -207,14 +207,14 @@ pub fn init_loop(
program.emit_insn(Insn::OpenRead {
cursor_id,
root_page,
db: 0,
db: table.database_id,
});
}
if let Some(index_cursor_id) = index_cursor_id {
program.emit_insn(Insn::OpenRead {
cursor_id: index_cursor_id,
root_page: index.as_ref().unwrap().root_page,
db: 0,
db: table.database_id,
});
}
}
@@ -224,13 +224,13 @@ pub fn init_loop(
cursor_id: table_cursor_id
.expect("table cursor is always opened in OperationMode::DELETE"),
root_page: root_page.into(),
db: 0,
db: table.database_id,
});
if let Some(index_cursor_id) = index_cursor_id {
program.emit_insn(Insn::OpenWrite {
cursor_id: index_cursor_id,
root_page: index.as_ref().unwrap().root_page.into(),
db: 0,
db: table.database_id,
});
}
// For delete, we need to open all the other indexes too for writing
@@ -250,7 +250,7 @@ pub fn init_loop(
program.emit_insn(Insn::OpenWrite {
cursor_id,
root_page: index.root_page.into(),
db: 0,
db: table.database_id,
});
}
}
@@ -261,13 +261,13 @@ pub fn init_loop(
cursor_id: table_cursor_id
.expect("table cursor is always opened in OperationMode::UPDATE"),
root_page: root_page.into(),
db: 0,
db: table.database_id,
});
if let Some(index_cursor_id) = index_cursor_id {
program.emit_insn(Insn::OpenWrite {
cursor_id: index_cursor_id,
root_page: index.as_ref().unwrap().root_page.into(),
db: 0,
db: table.database_id,
});
}
}
@@ -292,7 +292,7 @@ pub fn init_loop(
program.emit_insn(Insn::OpenRead {
cursor_id: table_cursor_id,
root_page: table.table.get_root_page(),
db: 0,
db: table.database_id,
});
}
}
@@ -304,7 +304,7 @@ pub fn init_loop(
program.emit_insn(Insn::OpenWrite {
cursor_id: table_cursor_id,
root_page: table.table.get_root_page().into(),
db: 0,
db: table.database_id,
});
// For DELETE, we need to open all the indexes for writing
@@ -328,7 +328,7 @@ pub fn init_loop(
program.emit_insn(Insn::OpenWrite {
cursor_id,
root_page: index.root_page.into(),
db: 0,
db: table.database_id,
});
}
}
@@ -351,7 +351,7 @@ pub fn init_loop(
cursor_id: index_cursor_id
.expect("index cursor is always opened in Seek with index"),
root_page: index.root_page,
db: 0,
db: table.database_id,
});
}
OperationMode::UPDATE | OperationMode::DELETE => {
@@ -359,7 +359,7 @@ pub fn init_loop(
cursor_id: index_cursor_id
.expect("index cursor is always opened in Seek with index"),
root_page: index.root_page.into(),
db: 0,
db: table.database_id,
});
}
_ => {

View File

@@ -9,6 +9,7 @@
pub(crate) mod aggregation;
pub(crate) mod alter;
pub(crate) mod attach;
pub(crate) mod collate;
mod compound_select;
pub(crate) mod delete;
@@ -96,7 +97,7 @@ pub fn translate(
connection.clone(),
program,
)?,
stmt => translate_inner(schema, stmt, syms, program)?,
stmt => translate_inner(schema, stmt, syms, program, &connection)?,
};
// TODO: bring epilogue here when I can sort out what instructions correspond to a Write or a Read transaction
@@ -112,11 +113,14 @@ pub fn translate_inner(
stmt: ast::Stmt,
syms: &SymbolTable,
program: ProgramBuilder,
connection: &Arc<Connection>,
) -> Result<ProgramBuilder> {
let program = match stmt {
ast::Stmt::AlterTable(alter) => translate_alter_table(*alter, syms, schema, program)?,
ast::Stmt::Analyze(_) => bail_parse_error!("ANALYZE not supported yet"),
ast::Stmt::Attach { .. } => bail_parse_error!("ATTACH not supported yet"),
ast::Stmt::Attach { expr, db_name, key } => {
attach::translate_attach(&expr, &db_name, &key, schema, syms, program)?
}
ast::Stmt::Begin(tx_type, tx_name) => translate_tx_begin(tx_type, tx_name, program)?,
ast::Stmt::Commit(tx_name) => translate_tx_commit(tx_name, program)?,
ast::Stmt::CreateIndex {
@@ -154,7 +158,7 @@ pub fn translate_inner(
} = *delete;
translate_delete(schema, &tbl_name, where_clause, limit, syms, program)?
}
ast::Stmt::Detach(_) => bail_parse_error!("DETACH not supported yet"),
ast::Stmt::Detach(expr) => attach::translate_detach(&expr, schema, syms, program)?,
ast::Stmt::DropIndex {
if_exists,
idx_name,
@@ -182,6 +186,7 @@ pub fn translate_inner(
syms,
program,
plan::QueryDestination::ResultRows,
connection,
)?
.program
}
@@ -206,6 +211,7 @@ pub fn translate_inner(
returning,
syms,
program,
connection,
)?
}
};

View File

@@ -1310,6 +1310,7 @@ mod tests {
identifier: "t1".to_string(),
join_info: None,
col_used_mask: ColumnUsedMask::default(),
database_id: 0,
});
// Create where clause that only references second column
@@ -1401,6 +1402,7 @@ mod tests {
identifier: "t1".to_string(),
join_info: None,
col_used_mask: ColumnUsedMask::default(),
database_id: 0,
});
// Create where clause that references first and third columns
@@ -1517,6 +1519,7 @@ mod tests {
identifier: "t1".to_string(),
join_info: None,
col_used_mask: ColumnUsedMask::default(),
database_id: 0,
});
// Create where clause: c1 = 5 AND c2 > 10 AND c3 = 7
@@ -1666,6 +1669,7 @@ mod tests {
internal_id,
join_info,
col_used_mask: ColumnUsedMask::default(),
database_id: 0,
}
}

View File

@@ -617,6 +617,8 @@ pub struct JoinedTable {
/// Bitmask of columns that are referenced in the query.
/// Used to decide whether a covering index can be used.
pub col_used_mask: ColumnUsedMask,
/// The index of the database. "main" is always zero.
pub database_id: usize,
}
#[derive(Debug, Clone)]
@@ -938,6 +940,7 @@ impl JoinedTable {
internal_id,
join_info,
col_used_mask: ColumnUsedMask::default(),
database_id: 0,
}
}

View File

@@ -1,4 +1,5 @@
use std::cell::Cell;
use std::sync::Arc;
use super::{
expr::walk_expr,
@@ -249,6 +250,7 @@ pub fn bind_column_references(
})
}
#[allow(clippy::too_many_arguments)]
fn parse_from_clause_table(
schema: &Schema,
table: ast::SelectTable,
@@ -257,10 +259,10 @@ fn parse_from_clause_table(
ctes: &mut Vec<JoinedTable>,
syms: &SymbolTable,
table_ref_counter: &mut TableRefIdCounter,
connection: &Arc<crate::Connection>,
) -> Result<()> {
match table {
ast::SelectTable::Table(qualified_name, maybe_alias, _) => parse_table(
schema,
table_references,
ctes,
table_ref_counter,
@@ -268,6 +270,7 @@ fn parse_from_clause_table(
qualified_name,
maybe_alias,
None,
connection,
),
ast::SelectTable::Select(subselect, maybe_alias) => {
let Plan::Select(subplan) = prepare_select_plan(
@@ -280,6 +283,7 @@ fn parse_from_clause_table(
yield_reg: usize::MAX, // will be set later in bytecode emission
coroutine_implementation_start: BranchOffset::Placeholder, // will be set later in bytecode emission
},
connection,
)?
else {
crate::bail_parse_error!("Only non-compound SELECT queries are currently supported in FROM clause subqueries");
@@ -300,7 +304,6 @@ fn parse_from_clause_table(
Ok(())
}
ast::SelectTable::TableCall(qualified_name, maybe_args, maybe_alias) => parse_table(
schema,
table_references,
ctes,
table_ref_counter,
@@ -308,6 +311,7 @@ fn parse_from_clause_table(
qualified_name,
maybe_alias,
maybe_args,
connection,
),
_ => todo!(),
}
@@ -315,7 +319,6 @@ fn parse_from_clause_table(
#[allow(clippy::too_many_arguments)]
fn parse_table(
schema: &Schema,
table_references: &mut TableReferences,
ctes: &mut Vec<JoinedTable>,
table_ref_counter: &mut TableRefIdCounter,
@@ -323,8 +326,12 @@ fn parse_table(
qualified_name: QualifiedName,
maybe_alias: Option<As>,
maybe_args: Option<Vec<Expr>>,
connection: &Arc<crate::Connection>,
) -> Result<()> {
let normalized_qualified_name = normalize_ident(qualified_name.name.0.as_str());
let database_id = connection.resolve_database_id(&qualified_name)?;
let table_name = qualified_name.name;
// Check if the FROM clause table is referring to a CTE in the current scope.
if let Some(cte_idx) = ctes
.iter()
@@ -336,8 +343,10 @@ fn parse_table(
return Ok(());
};
// Check if our top level schema has this table.
if let Some(table) = schema.get_table(&normalized_qualified_name) {
// Resolve table using connection's with_schema method
let table = connection.with_schema(database_id, |schema| schema.get_table(&table_name.0));
if let Some(table) = table {
let alias = maybe_alias
.map(|a| match a {
ast::As::As(id) => id,
@@ -372,6 +381,7 @@ fn parse_table(
internal_id,
join_info: None,
col_used_mask: ColumnUsedMask::default(),
database_id,
});
return Ok(());
};
@@ -397,6 +407,7 @@ fn parse_table(
internal_id: table_ref_counter.next(),
join_info: None,
col_used_mask: ColumnUsedMask::default(),
database_id,
});
return Ok(());
}
@@ -471,6 +482,7 @@ fn contains_column_reference(top_level_expr: &Expr) -> Result<bool> {
Ok(contains)
}
#[allow(clippy::too_many_arguments)]
pub fn parse_from(
schema: &Schema,
mut from: Option<FromClause>,
@@ -479,6 +491,7 @@ pub fn parse_from(
out_where_clause: &mut Vec<WhereTerm>,
table_references: &mut TableReferences,
table_ref_counter: &mut TableRefIdCounter,
connection: &Arc<crate::Connection>,
) -> Result<()> {
if from.as_ref().and_then(|f| f.select.as_ref()).is_none() {
return Ok(());
@@ -541,6 +554,7 @@ pub fn parse_from(
yield_reg: usize::MAX, // will be set later in bytecode emission
coroutine_implementation_start: BranchOffset::Placeholder, // will be set later in bytecode emission
},
connection,
)?;
let Plan::Select(cte_plan) = cte_plan else {
crate::bail_parse_error!("Only SELECT queries are currently supported in CTEs");
@@ -565,6 +579,7 @@ pub fn parse_from(
&mut ctes_as_subqueries,
syms,
table_ref_counter,
connection,
)?;
for join in joins_owned.into_iter() {
@@ -576,6 +591,7 @@ pub fn parse_from(
out_where_clause,
table_references,
table_ref_counter,
connection,
)?;
}
@@ -781,6 +797,7 @@ pub fn determine_where_to_eval_expr(
Ok(eval_at)
}
#[allow(clippy::too_many_arguments)]
fn parse_join(
schema: &Schema,
join: ast::JoinedSelectTable,
@@ -789,6 +806,7 @@ fn parse_join(
out_where_clause: &mut Vec<WhereTerm>,
table_references: &mut TableReferences,
table_ref_counter: &mut TableRefIdCounter,
connection: &Arc<crate::Connection>,
) -> Result<()> {
let ast::JoinedSelectTable {
operator: join_operator,
@@ -804,6 +822,7 @@ fn parse_join(
ctes,
syms,
table_ref_counter,
connection,
)?;
let (outer, natural) = match join_operator {

View File

@@ -284,17 +284,21 @@ fn query_pragma(
let base_reg = register;
program.alloc_registers(2);
// For now, we only show the main database (seq=0)
// seq (sequence number)
program.emit_int(0, base_reg);
// Get all databases (main + attached) and emit a row for each
let all_databases = connection.list_all_databases();
for (seq_number, name, file_path) in all_databases {
// seq (sequence number)
program.emit_int(seq_number as i64, base_reg);
// name
program.emit_string8("main".into(), base_reg + 1);
// name (alias)
program.emit_string8(name, base_reg + 1);
let file_path = connection.get_database_canonical_path();
program.emit_string8(file_path, base_reg + 2);
// file path
program.emit_string8(file_path, base_reg + 2);
program.emit_result_row(base_reg, 3);
}
program.emit_result_row(base_reg, 3);
let pragma = pragma_for(&pragma);
for col_name in pragma.columns.iter() {
program.add_pragma_result_column(col_name.to_string());

View File

@@ -16,6 +16,7 @@ use crate::vdbe::builder::{ProgramBuilderOpts, TableRefIdCounter};
use crate::vdbe::insn::Insn;
use crate::SymbolTable;
use crate::{schema::Schema, vdbe::builder::ProgramBuilder, Result};
use std::sync::Arc;
use turso_sqlite3_parser::ast::{self, CompoundSelect, SortOrder};
use turso_sqlite3_parser::ast::{ResultColumn, SelectInner};
@@ -30,6 +31,7 @@ pub fn translate_select(
syms: &SymbolTable,
mut program: ProgramBuilder,
query_destination: QueryDestination,
connection: &Arc<crate::Connection>,
) -> Result<TranslateSelectResult> {
let mut select_plan = prepare_select_plan(
schema,
@@ -38,6 +40,7 @@ pub fn translate_select(
&[],
&mut program.table_reference_counter,
query_destination,
connection,
)?;
optimize_plan(&mut select_plan, schema)?;
let num_result_cols;
@@ -92,6 +95,7 @@ pub fn prepare_select_plan(
outer_query_refs: &[OuterQueryReference],
table_ref_counter: &mut TableRefIdCounter,
query_destination: QueryDestination,
connection: &Arc<crate::Connection>,
) -> Result<Plan> {
let compounds = select.body.compounds.take();
match compounds {
@@ -107,6 +111,7 @@ pub fn prepare_select_plan(
outer_query_refs,
table_ref_counter,
query_destination,
connection,
)?))
}
Some(compounds) => {
@@ -120,6 +125,7 @@ pub fn prepare_select_plan(
outer_query_refs,
table_ref_counter,
query_destination.clone(),
connection,
)?;
let mut left = Vec::with_capacity(compounds.len());
@@ -135,6 +141,7 @@ pub fn prepare_select_plan(
outer_query_refs,
table_ref_counter,
query_destination.clone(),
connection,
)?;
}
@@ -181,6 +188,7 @@ fn prepare_one_select_plan(
outer_query_refs: &[OuterQueryReference],
table_ref_counter: &mut TableRefIdCounter,
query_destination: QueryDestination,
connection: &Arc<crate::Connection>,
) -> Result<SelectPlan> {
match select {
ast::OneSelect::Select(select_inner) => {
@@ -223,6 +231,7 @@ fn prepare_one_select_plan(
&mut where_predicates,
&mut table_references,
table_ref_counter,
connection,
)?;
// Preallocate space for the result columns

View File

@@ -140,6 +140,7 @@ pub fn prepare_update_plan(
},
join_info: None,
col_used_mask: ColumnUsedMask::default(),
database_id: 0,
}];
let mut table_references = TableReferences::new(joined_tables, vec![]);
@@ -222,6 +223,7 @@ pub fn prepare_update_plan(
},
join_info: None,
col_used_mask: ColumnUsedMask::default(),
database_id: 0,
}];
let mut table_references = TableReferences::new(joined_tables, vec![]);

View File

@@ -873,7 +873,7 @@ pub fn op_open_read(
program: &Program,
state: &mut ProgramState,
insn: &Insn,
pager: &Rc<Pager>,
_pager: &Rc<Pager>,
mv_store: Option<&Rc<MvStore>>,
) -> Result<InsnFunctionStepResult> {
let Insn::OpenRead {
@@ -884,6 +884,9 @@ pub fn op_open_read(
else {
unreachable!("unexpected Insn {:?}", insn)
};
let pager = program.get_pager_from_database_index(db);
let (_, cursor_type) = program.cursor_ref.get(*cursor_id).unwrap();
let mv_cursor = match state.mv_tx_id {
Some(tx_id) => {
@@ -1916,10 +1919,10 @@ pub fn op_transaction(
program: &Program,
state: &mut ProgramState,
insn: &Insn,
pager: &Rc<Pager>,
_pager: &Rc<Pager>,
mv_store: Option<&Rc<MvStore>>,
) -> Result<InsnFunctionStepResult> {
let Insn::Transaction { db: 0, write } = insn else {
let Insn::Transaction { db, write } = insn else {
unreachable!("unexpected Insn {:?}", insn)
};
let conn = program.connection.clone();
@@ -1927,6 +1930,8 @@ pub fn op_transaction(
return Err(LimboError::ReadOnly);
}
let pager = program.get_pager_from_database_index(db);
if let Some(mv_store) = &mv_store {
if state.mv_tx_id.is_none() {
// We allocate the first page lazily in the first transaction.
@@ -4412,6 +4417,46 @@ pub fn op_function(
)?);
}
}
ScalarFunc::Attach => {
assert_eq!(arg_count, 3);
let filename = state.registers[*start_reg].get_owned_value();
let dbname = state.registers[*start_reg + 1].get_owned_value();
let _key = state.registers[*start_reg + 2].get_owned_value(); // Not used in read-only implementation
let Value::Text(filename_str) = filename else {
return Err(LimboError::InvalidArgument(
"attach: filename argument must be text".to_string(),
));
};
let Value::Text(dbname_str) = dbname else {
return Err(LimboError::InvalidArgument(
"attach: database name argument must be text".to_string(),
));
};
program
.connection
.attach_database(filename_str.as_str(), dbname_str.as_str())?;
state.registers[*dest] = Register::Value(Value::Null);
}
ScalarFunc::Detach => {
assert_eq!(arg_count, 1);
let dbname = state.registers[*start_reg].get_owned_value();
let Value::Text(dbname_str) = dbname else {
return Err(LimboError::InvalidArgument(
"detach: database name argument must be text".to_string(),
));
};
// Call the detach_database method on the connection
program.connection.detach_database(dbname_str.as_str())?;
// Set result to NULL (detach doesn't return a value)
state.registers[*dest] = Register::Value(Value::Null);
}
},
crate::function::Func::Vector(vector_func) => match vector_func {
VectorFunc::Vector => {
@@ -5623,7 +5668,7 @@ pub fn op_open_write(
program: &Program,
state: &mut ProgramState,
insn: &Insn,
pager: &Rc<Pager>,
_pager: &Rc<Pager>,
mv_store: Option<&Rc<MvStore>>,
) -> Result<InsnFunctionStepResult> {
let Insn::OpenWrite {
@@ -5637,6 +5682,8 @@ pub fn op_open_write(
if program.connection.is_readonly(*db) {
return Err(LimboError::ReadOnly);
}
let pager = program.get_pager_from_database_index(db);
let root_page = match root_page {
RegisterOrLiteral::Literal(lit) => *lit as u64,
RegisterOrLiteral::Register(reg) => match &state.registers[*reg].get_owned_value() {
@@ -5733,6 +5780,9 @@ pub fn op_create_btree(
let Insn::CreateBtree { db, root, flags } = insn else {
unreachable!("unexpected Insn {:?}", insn)
};
assert_eq!(*db, 0);
if program.connection.is_readonly(*db) {
return Err(LimboError::ReadOnly);
}

View File

@@ -382,6 +382,10 @@ pub struct Program {
}
impl Program {
fn get_pager_from_database_index(&self, idx: &usize) -> Rc<Pager> {
self.connection.get_pager_from_database_index(idx)
}
#[instrument(skip_all, level = Level::DEBUG)]
pub fn step(
&self,

75
testing/attach.test Executable file
View File

@@ -0,0 +1,75 @@
#!/usr/bin/env tclsh
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Test qualified table name access to main database
do_execsql_test_small attach-main-qualified {
SELECT count(id) FROM main.demo;
} {5}
# Test unqualified vs qualified access
do_execsql_test_small attach-unqualified-vs-qualified {
SELECT COUNT(*) FROM demo;
SELECT COUNT(*) FROM main.demo;
} {5
5}
# Test attach reserved name - main (should fail)
do_execsql_test_error attach-reserved-main {
ATTACH DATABASE "testing/testing_small.db" AS main
} {(.*in use.*)}
# Test attach reserved name - temp (should fail)
do_execsql_test_error attach-reserved-temp {
ATTACH DATABASE "testing/testing_small.db" AS temp
} {(.*in use.*)}
# Test attach duplicate database name - arbitrary (should fail)
do_execsql_test_error attach-duplicate-name {
ATTACH DATABASE "testing/testing_small.db" as small;
ATTACH DATABASE "testing/testing_small.db" as small;
} {(.*in use.*)}
# Test querying attached file database
do_execsql_test_on_specific_db {:memory:} attach-db-query {
ATTACH DATABASE "testing/testing_small.db" AS small;
SELECT value FROM small.demo where id = 1;
} {A}
# Test detach database
do_execsql_test_on_specific_db {:memory:} detach-database {
ATTACH DATABASE "testing/testing_small.db" AS small;
DETACH DATABASE small;
pragma database_list;
} {0|main|}
# Test detach non-existent database (should fail)
do_execsql_test_error detach-non-existent {
DETACH DATABASE nonexistent;
} {(.*no such database.*)}
# Test attach in-memory database
do_execsql_test_on_specific_db {:memory:} attach-memory-database {
ATTACH DATABASE ':memory:' AS mem;
pragma database_list;
} {0|main|
2|mem|}
# Test join between main and attached database
do_execsql_test_on_specific_db {:memory:} attach-cross-database-join {
ATTACH DATABASE "testing/testing_small.db" as small;
create table joiners (id int, otherid int);
insert into joiners (id, otherid) values (1,1);
insert into joiners (id, otherid) values (3,3);
select s.value from joiners j inner join small.demo s where j.otherid = s.id;
} {A
B}
# Test queries after detach (should fail for detached database)
do_execsql_test_error query-after-detach {
ATTACH DATABASE "testing/testing_small.db" as small;
DETACH DATABASE small;
select * from small.sqlite_schema;
} {(.*no such.*)}