support doubly qualified identifiers

This commit is contained in:
Glauber Costa
2025-07-25 11:10:22 -05:00
parent 76cf3444c6
commit b5927dcfd5
8 changed files with 204 additions and 68 deletions

View File

@@ -1,4 +1,5 @@
use fallible_iterator::FallibleIterator as _;
use std::sync::Arc;
use turso_sqlite3_parser::{ast, lexer::sql::Parser};
use crate::{
@@ -21,6 +22,7 @@ pub fn translate_alter_table(
syms: &SymbolTable,
schema: &Schema,
mut program: ProgramBuilder,
connection: &Arc<crate::Connection>,
) -> Result<ProgramBuilder> {
let (table_name, alter_table) = alter;
let table_name = table_name.name.as_str();
@@ -93,64 +95,72 @@ pub fn translate_alter_table(
unreachable!();
};
translate_update_with_after(schema, &mut update, syms, program, |program| {
let column_count = btree.columns.len();
let root_page = btree.root_page;
let table_name = btree.name.clone();
translate_update_with_after(
schema,
&mut update,
syms,
program,
connection,
|program| {
let column_count = btree.columns.len();
let root_page = btree.root_page;
let table_name = btree.name.clone();
let cursor_id = program
.alloc_cursor_id(crate::vdbe::builder::CursorType::BTreeTable(original_btree));
let cursor_id = program.alloc_cursor_id(
crate::vdbe::builder::CursorType::BTreeTable(original_btree),
);
program.emit_insn(Insn::OpenWrite {
cursor_id,
root_page: RegisterOrLiteral::Literal(root_page),
db: 0,
});
program.emit_insn(Insn::OpenWrite {
cursor_id,
root_page: RegisterOrLiteral::Literal(root_page),
db: 0,
});
program.cursor_loop(cursor_id, |program, rowid| {
let first_column = program.alloc_registers(column_count);
program.cursor_loop(cursor_id, |program, rowid| {
let first_column = program.alloc_registers(column_count);
let mut iter = first_column;
let mut iter = first_column;
for i in 0..(column_count + 1) {
if i == dropped_index {
continue;
for i in 0..(column_count + 1) {
if i == dropped_index {
continue;
}
program.emit_column(cursor_id, i, iter);
iter += 1;
}
program.emit_column(cursor_id, i, iter);
let record = program.alloc_register();
iter += 1;
}
program.emit_insn(Insn::MakeRecord {
start_reg: first_column,
count: column_count,
dest_reg: record,
index_name: None,
});
program.emit_insn(Insn::SetCookie {
db: 0,
cookie: Cookie::SchemaVersion,
value: schema.schema_version as i32 + 1,
p5: 0,
});
let record = program.alloc_register();
program.emit_insn(Insn::MakeRecord {
start_reg: first_column,
count: column_count,
dest_reg: record,
index_name: None,
});
program.emit_insn(Insn::SetCookie {
db: 0,
cookie: Cookie::SchemaVersion,
value: schema.schema_version as i32 + 1,
p5: 0,
program.emit_insn(Insn::Insert {
cursor: cursor_id,
key_reg: rowid,
record_reg: record,
flag: crate::vdbe::insn::InsertFlags(0),
table_name: table_name.clone(),
});
});
program.emit_insn(Insn::Insert {
cursor: cursor_id,
key_reg: rowid,
record_reg: record,
flag: crate::vdbe::insn::InsertFlags(0),
table_name: table_name.clone(),
});
});
program.emit_insn(Insn::ParseSchema {
db: usize::MAX, // TODO: This value is unused, change when we do something with it
where_clause: None,
})
})?
program.emit_insn(Insn::ParseSchema {
db: usize::MAX, // TODO: This value is unused, change when we do something with it
where_clause: None,
})
},
)?
}
ast::AlterTableBody::AddColumn(col_def) => {
let column = Column::from(col_def);
@@ -198,18 +208,25 @@ pub fn translate_alter_table(
unreachable!();
};
translate_update_with_after(schema, &mut update, syms, program, |program| {
program.emit_insn(Insn::SetCookie {
db: 0,
cookie: Cookie::SchemaVersion,
value: schema.schema_version as i32 + 1,
p5: 0,
});
program.emit_insn(Insn::ParseSchema {
db: usize::MAX, // TODO: This value is unused, change when we do something with it
where_clause: None,
});
})?
translate_update_with_after(
schema,
&mut update,
syms,
program,
connection,
|program| {
program.emit_insn(Insn::SetCookie {
db: 0,
cookie: Cookie::SchemaVersion,
value: schema.schema_version as i32 + 1,
p5: 0,
});
program.emit_insn(Insn::ParseSchema {
db: usize::MAX, // TODO: This value is unused, change when we do something with it
where_clause: None,
});
},
)?
}
ast::AlterTableBody::RenameColumn { old, new } => {
let rename_from = old.as_str();

View File

@@ -5,6 +5,7 @@ use crate::translate::plan::{DeletePlan, Operation, Plan};
use crate::translate::planner::{parse_limit, parse_where};
use crate::vdbe::builder::{ProgramBuilder, ProgramBuilderOpts, TableRefIdCounter};
use crate::{schema::Schema, Result, SymbolTable};
use std::sync::Arc;
use turso_sqlite3_parser::ast::{Expr, Limit, QualifiedName};
use super::plan::{ColumnUsedMask, IterationDirection, JoinedTable, TableReferences};
@@ -16,6 +17,7 @@ pub fn translate_delete(
limit: Option<Box<Limit>>,
syms: &SymbolTable,
mut program: ProgramBuilder,
connection: &Arc<crate::Connection>,
) -> Result<ProgramBuilder> {
if schema.table_has_indexes(&tbl_name.name.to_string()) && !schema.indexes_enabled() {
// Let's disable altering a table with indices altogether instead of checking column by
@@ -30,6 +32,7 @@ pub fn translate_delete(
where_clause,
limit,
&mut program.table_reference_counter,
connection,
)?;
optimize_plan(&mut delete_plan, schema)?;
let Plan::Delete(ref delete) = delete_plan else {
@@ -51,6 +54,7 @@ pub fn prepare_delete_plan(
where_clause: Option<Box<Expr>>,
limit: Option<Box<Limit>>,
table_ref_counter: &mut TableRefIdCounter,
connection: &Arc<crate::Connection>,
) -> Result<Plan> {
let table = match schema.get_table(tbl_name.name.as_str()) {
Some(table) => table,
@@ -87,6 +91,7 @@ pub fn prepare_delete_plan(
&mut table_references,
None,
&mut where_predicates,
connection,
)?;
// Parse the LIMIT/OFFSET clause

View File

@@ -116,7 +116,9 @@ pub fn translate_inner(
connection: &Arc<Connection>,
) -> Result<ProgramBuilder> {
let program = match stmt {
ast::Stmt::AlterTable(alter) => translate_alter_table(*alter, syms, schema, program)?,
ast::Stmt::AlterTable(alter) => {
translate_alter_table(*alter, syms, schema, program, connection)?
}
ast::Stmt::Analyze(_) => bail_parse_error!("ANALYZE not supported yet"),
ast::Stmt::Attach { expr, db_name, key } => {
attach::translate_attach(&expr, &db_name, &key, schema, syms, program)?
@@ -156,7 +158,15 @@ pub fn translate_inner(
limit,
..
} = *delete;
translate_delete(schema, &tbl_name, where_clause, limit, syms, program)?
translate_delete(
schema,
&tbl_name,
where_clause,
limit,
syms,
program,
connection,
)?
}
ast::Stmt::Detach(expr) => attach::translate_detach(&expr, schema, syms, program)?,
ast::Stmt::DropIndex {
@@ -190,7 +200,9 @@ pub fn translate_inner(
)?
.program
}
ast::Stmt::Update(mut update) => translate_update(schema, &mut update, syms, program)?,
ast::Stmt::Update(mut update) => {
translate_update(schema, &mut update, syms, program, connection)?
}
ast::Stmt::Vacuum(_, _) => bail_parse_error!("VACUUM not supported yet"),
ast::Stmt::Insert(insert) => {
let Insert {

View File

@@ -109,6 +109,7 @@ pub fn bind_column_references(
top_level_expr: &mut Expr,
referenced_tables: &mut TableReferences,
result_columns: Option<&[ResultSetColumn]>,
connection: &Arc<crate::Connection>,
) -> Result<()> {
walk_expr_mut(top_level_expr, &mut |expr: &mut Expr| -> Result<()> {
match expr {
@@ -247,6 +248,75 @@ pub fn bind_column_references(
referenced_tables.mark_column_used(tbl_id, col_idx);
Ok(())
}
Expr::DoublyQualified(db_name, tbl_name, col_name) => {
let normalized_col_name = normalize_ident(col_name.as_str());
// Create a QualifiedName and use existing resolve_database_id method
let qualified_name = ast::QualifiedName {
db_name: Some(db_name.clone()),
name: tbl_name.clone(),
alias: None,
};
let database_id = connection.resolve_database_id(&qualified_name)?;
// Get the table from the specified database
let table = connection
.with_schema(database_id, |schema| schema.get_table(tbl_name.as_str()))
.ok_or_else(|| {
crate::LimboError::ParseError(format!(
"no such table: {}.{}",
db_name.as_str(),
tbl_name.as_str()
))
})?;
// Find the column in the table
let col_idx = table
.columns()
.iter()
.position(|c| {
c.name
.as_ref()
.is_some_and(|name| name.eq_ignore_ascii_case(&normalized_col_name))
})
.ok_or_else(|| {
crate::LimboError::ParseError(format!(
"Column: {}.{}.{} not found",
db_name.as_str(),
tbl_name.as_str(),
col_name.as_str()
))
})?;
let col = table.columns().get(col_idx).unwrap();
// Check if this is a rowid alias
let is_rowid_alias = col.is_rowid_alias;
// Convert to Column expression - since this is a cross-database reference,
// we need to create a synthetic table reference for it
// For now, we'll error if the table isn't already in the referenced tables
let normalized_tbl_name = normalize_ident(tbl_name.as_str());
let matching_tbl = referenced_tables
.find_table_and_internal_id_by_identifier(&normalized_tbl_name);
if let Some((tbl_id, _)) = matching_tbl {
// Table is already in referenced tables, use existing internal ID
*expr = Expr::Column {
database: Some(database_id),
table: tbl_id,
column: col_idx,
is_rowid_alias,
};
referenced_tables.mark_column_used(tbl_id, col_idx);
} else {
return Err(crate::LimboError::ParseError(format!(
"table {normalized_tbl_name} is not in FROM clause - cross-database column references require the table to be explicitly joined"
)));
}
Ok(())
}
_ => Ok(()),
}
})
@@ -605,12 +675,13 @@ pub fn parse_where(
table_references: &mut TableReferences,
result_columns: Option<&[ResultSetColumn]>,
out_where_clause: &mut Vec<WhereTerm>,
connection: &Arc<crate::Connection>,
) -> Result<()> {
if let Some(where_expr) = where_clause {
let mut predicates = vec![];
break_predicate_at_and_boundaries(where_expr, &mut predicates);
for expr in predicates.iter_mut() {
bind_column_references(expr, table_references, result_columns)?;
bind_column_references(expr, table_references, result_columns, connection)?;
}
for expr in predicates {
out_where_clause.push(WhereTerm {
@@ -893,7 +964,7 @@ fn parse_join(
let mut preds = vec![];
break_predicate_at_and_boundaries(expr, &mut preds);
for predicate in preds.iter_mut() {
bind_column_references(predicate, table_references, None)?;
bind_column_references(predicate, table_references, None, connection)?;
}
for pred in preds {
out_where_clause.push(WhereTerm {

View File

@@ -337,6 +337,7 @@ fn prepare_one_select_plan(
expr,
&mut plan.table_references,
Some(&plan.result_columns),
connection,
)?;
match expr {
ast::Expr::FunctionCall {
@@ -530,6 +531,7 @@ fn prepare_one_select_plan(
&mut plan.table_references,
Some(&plan.result_columns),
&mut plan.where_clause,
connection,
)?;
if let Some(mut group_by) = group_by {
@@ -539,6 +541,7 @@ fn prepare_one_select_plan(
expr,
&mut plan.table_references,
Some(&plan.result_columns),
connection,
)?;
}
@@ -553,6 +556,7 @@ fn prepare_one_select_plan(
expr,
&mut plan.table_references,
Some(&plan.result_columns),
connection,
)?;
let contains_aggregates =
resolve_aggregates(schema, expr, &mut aggregate_expressions)?;
@@ -589,6 +593,7 @@ fn prepare_one_select_plan(
&mut o.expr,
&mut plan.table_references,
Some(&plan.result_columns),
connection,
)?;
resolve_aggregates(schema, &o.expr, &mut plan.aggregates)?;

View File

@@ -1,5 +1,6 @@
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::Arc;
use crate::schema::{BTreeTable, Column, Type};
use crate::translate::optimizer::optimize_select_plan;
@@ -56,8 +57,9 @@ pub fn translate_update(
body: &mut Update,
syms: &SymbolTable,
mut program: ProgramBuilder,
connection: &Arc<crate::Connection>,
) -> crate::Result<ProgramBuilder> {
let mut plan = prepare_update_plan(&mut program, schema, body)?;
let mut plan = prepare_update_plan(&mut program, schema, body, connection)?;
optimize_plan(&mut plan, schema)?;
// TODO: freestyling these numbers
let opts = ProgramBuilderOpts {
@@ -75,9 +77,10 @@ pub fn translate_update_with_after(
body: &mut Update,
syms: &SymbolTable,
mut program: ProgramBuilder,
connection: &Arc<crate::Connection>,
after: impl FnOnce(&mut ProgramBuilder),
) -> crate::Result<ProgramBuilder> {
let mut plan = prepare_update_plan(&mut program, schema, body)?;
let mut plan = prepare_update_plan(&mut program, schema, body, connection)?;
optimize_plan(&mut plan, schema)?;
// TODO: freestyling these numbers
let opts = ProgramBuilderOpts {
@@ -94,6 +97,7 @@ pub fn prepare_update_plan(
program: &mut ProgramBuilder,
schema: &Schema,
body: &mut Update,
connection: &Arc<crate::Connection>,
) -> crate::Result<Plan> {
if body.with.is_some() {
bail_parse_error!("WITH clause is not supported");
@@ -158,7 +162,7 @@ pub fn prepare_update_plan(
bail_parse_error!("Parse error: no such column: {}", ident);
};
let _ = bind_column_references(&mut set.expr, &mut table_references, None);
let _ = bind_column_references(&mut set.expr, &mut table_references, None, connection);
if let Some(idx) = set_clauses.iter().position(|(idx, _)| *idx == *col_index) {
set_clauses[idx].1 = set.expr.clone();
@@ -171,7 +175,7 @@ pub fn prepare_update_plan(
if let Some(returning) = &mut body.returning {
for rc in returning.iter_mut() {
if let ResultColumn::Expr(expr, alias) = rc {
bind_column_references(expr, &mut table_references, None)?;
bind_column_references(expr, &mut table_references, None, connection)?;
result_columns.push(ResultSetColumn {
expr: expr.clone(),
alias: alias.as_ref().and_then(|a| {
@@ -233,6 +237,7 @@ pub fn prepare_update_plan(
&mut table_references,
Some(&result_columns),
&mut where_clause,
connection,
)?;
let table = Rc::new(BTreeTable {
@@ -307,6 +312,7 @@ pub fn prepare_update_plan(
&mut table_references,
Some(&result_columns),
&mut where_clause,
connection,
)?;
};

View File

@@ -78,3 +78,11 @@ passionate_robin
faithful_thomas
vibrant_miroslav
sparkling_gray}
do_execsql_test_on_specific_db {:memory:} doubly-qualified-delete {
create table test(col);
insert into test(col) values (1);
insert into test(col) values (2);
delete from test where main.test.col = 2;
select col from test;
} {1}

View File

@@ -47,6 +47,18 @@ do_execsql_test select-limit-0 {
SELECT id FROM users LIMIT 0;
} {}
do_execsql_test select-doubly-qualified {
SELECT main.users.id FROM users LIMIT 0;
} {}
do_execsql_test_error select-doubly-qualified-wrong-table {
SELECT main.wrong.id FROM users LIMIT 0;
} {.*}
do_execsql_test_error select-doubly-qualified-wrong-column {
SELECT main.users.wrong FROM users LIMIT 0;
} {.*}
# ORDER BY id here because sqlite uses age_idx here and we (yet) don't so force it to evaluate in ID order
do_execsql_test select-limit-true {
SELECT id FROM users ORDER BY id LIMIT true;