inserting ain't working

hell yeah

concurrency tests passing now woosh

finally write tests passed

Most of the cdc tests are passing yay

autoincremeent draft

remove shared schema code that broke transactions

sequnce table should reset if table is drop

fmt

fmt

fmt
This commit is contained in:
Pavan-Nambi
2025-09-04 15:00:27 +05:30
parent caaf60a7ea
commit b833e71c20
14 changed files with 442 additions and 75 deletions

0
&1 Normal file
View File

BIN
aqlitedb Normal file

Binary file not shown.

View File

@@ -438,6 +438,7 @@ impl Limbo {
}
self.reset_input();
}
fn print_query_performance_stats(&mut self, start: Instant, stats: QueryStatistics) {

View File

@@ -981,6 +981,7 @@ mod tests {
],
has_rowid: true,
is_strict: false,
has_autoincrement: false,
unique_sets: None,
};
schema.add_btree_table(Arc::new(users_table));
@@ -2352,6 +2353,7 @@ mod tests {
],
has_rowid: true,
is_strict: false,
has_autoincrement: false,
unique_sets: None,
};
schema.add_btree_table(Arc::new(sales_table));

View File

@@ -1169,6 +1169,8 @@ impl Connection {
if self.closed.get() {
return Err(LimboError::InternalError("Connection closed".to_string()));
}
// self.maybe_update_schema()?;
let syms = self.syms.borrow();
let pager = self.pager.borrow().clone();
match cmd {
@@ -1336,6 +1338,7 @@ impl Connection {
if matches!(self.transaction_state.get(), TransactionState::None)
&& current_schema_version != schema.schema_version
{
// if current_schema_version != schema.schema_version {
self.schema.replace(schema.clone());
}
@@ -1346,10 +1349,24 @@ impl Connection {
#[cfg(all(feature = "fs", feature = "conn_raw_api"))]
pub fn read_schema_version(&self) -> Result<u32> {
let pager = self.pager.borrow();
pager
let was_in_tx = self.transaction_state.get() != TransactionState::None;
if !was_in_tx {
pager.begin_read_tx()?;
}
let result = pager
.io
.block(|| pager.with_header(|header| header.schema_cookie))
.map(|version| version.get())
.block(|| pager.with_header(|header| header.schema_cookie));
if !was_in_tx {
pager.end_read_tx()?;
}
match result {
Ok(version) => Ok(version.get()),
Err(LimboError::Page1NotAlloc) => Ok(0),
Err(e) => Err(e),
}
}
/// Update schema version to the new value within opened write transaction
@@ -2084,6 +2101,7 @@ impl Statement {
pub fn new(program: vdbe::Program, mv_store: Option<Arc<MvStore>>, pager: Rc<Pager>) -> Self {
let accesses_db = program.accesses_db;
let state = vdbe::ProgramState::new(program.max_registers, program.cursor_ref.len());
let schema_version = program.connection.schema.borrow().schema_version;
Self {
program,
state,
@@ -2193,6 +2211,7 @@ impl Statement {
Cmd::ExplainQueryPlan(_stmt) => todo!(),
}
};
// Save parameters before they are reset
let parameters = std::mem::take(&mut self.state.parameters);
self.reset();

View File

@@ -672,6 +672,7 @@ pub struct BTreeTable {
pub columns: Vec<Column>,
pub has_rowid: bool,
pub is_strict: bool,
pub has_autoincrement: bool,
pub unique_sets: Option<Vec<Vec<(String, SortOrder)>>>,
}
@@ -830,6 +831,7 @@ fn create_table(
let table_name = normalize_ident(tbl_name.name.as_str());
trace!("Creating table {}", table_name);
let mut has_rowid = true;
let mut has_autoincrement = false;
let mut primary_key_columns = vec![];
let mut cols = vec![];
let is_strict: bool;
@@ -922,12 +924,25 @@ fn create_table(
let mut collation = None;
for c_def in constraints {
match c_def.constraint {
ast::ColumnConstraint::PrimaryKey { order: o, .. } => {
ast::ColumnConstraint::PrimaryKey {
order: o,
auto_increment,
..
} => {
primary_key = true;
if auto_increment {
has_autoincrement = true;
}
if let Some(o) = o {
order = o;
}
}
// ast::ColumnConstraint::PrimaryKey { order: o, .. } => {
// primary_key = true;
// if let Some(o) = o {
// order = o;
// }
// }
ast::ColumnConstraint::NotNull { nullable, .. } => {
notnull = !nullable;
}
@@ -986,6 +1001,7 @@ fn create_table(
name: table_name,
has_rowid,
primary_key_columns,
has_autoincrement,
columns: cols,
is_strict,
unique_sets: if unique_sets.is_empty() {
@@ -1302,6 +1318,7 @@ pub fn sqlite_schema_table() -> BTreeTable {
name: "sqlite_schema".to_string(),
has_rowid: true,
is_strict: false,
has_autoincrement: false,
primary_key_columns: vec![],
columns: vec![
Column {
@@ -1984,6 +2001,7 @@ mod tests {
name: "t1".to_string(),
has_rowid: true,
is_strict: false,
has_autoincrement: false,
primary_key_columns: vec![("nonexistent".to_string(), SortOrder::Asc)],
columns: vec![Column {
name: Some("a".to_string()),

View File

@@ -34,6 +34,7 @@ use super::expr::{translate_expr, translate_expr_no_constant_opt, NoConstantOptR
use super::optimizer::rewrite_expr;
use super::plan::QueryDestination;
use super::select::translate_select;
use crate::schema::sqlite_schema_table;
struct TempTableCtx {
cursor_id: usize,
@@ -362,11 +363,28 @@ pub fn translate_insert(
}
// Create new rowid if a) not provided by user or b) provided by user but is NULL
program.emit_insn(Insn::NewRowid {
cursor: cursor_id,
rowid_reg: insertion.key_register(),
prev_largest_reg: 0,
});
// program.emit_insn(Insn::NewRowid {
// cursor: cursor_id,
// rowid_reg: insertion.key_register(),
// prev_largest_reg: 0,
// });
if btree_table.has_autoincrement {
// Table has AUTOINCREMENT, use the sqlite_sequence logic.
emit_autoincrement_logic(
&mut program,
schema,
&btree_table,
cursor_id,
insertion.key_register(),
)?;
} else {
program.emit_insn(Insn::NewRowid {
cursor: cursor_id,
rowid_reg: insertion.key_register(),
prev_largest_reg: 0,
});
}
if let Some(must_be_int_label) = check_rowid_is_integer_label {
program.resolve_label(must_be_int_label, program.offset());
@@ -1132,3 +1150,191 @@ fn translate_virtual_table_insert(
Ok(program)
}
fn emit_autoincrement_logic(
program: &mut ProgramBuilder,
schema: &Schema,
table: &schema::BTreeTable,
main_cursor_id: usize,
key_register: usize,
) -> Result<()> {
let seq_table = schema.get_btree_table("sqlite_sequence").ok_or_else(|| {
crate::error::LimboError::InternalError("sqlite_sequence table not found".to_string())
})?;
let seq_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(seq_table.clone()));
program.emit_insn(Insn::OpenWrite {
cursor_id: seq_cursor_id,
root_page: seq_table.root_page.into(),
db: 0,
});
let table_name_reg = program.emit_string8_new_reg(table.name.clone());
let r_seq = program.alloc_register();
let r_max = program.alloc_register();
let r_seq_rowid = program.alloc_register();
program.emit_insn(Insn::Integer {
dest: r_seq,
value: 0,
});
program.emit_insn(Insn::Null {
dest: r_seq_rowid,
dest_end: None,
});
let loop_start_label = program.allocate_label();
let loop_end_label = program.allocate_label();
let found_label = program.allocate_label();
program.emit_insn(Insn::Rewind {
cursor_id: seq_cursor_id,
pc_if_empty: loop_end_label,
});
program.preassign_label_to_next_insn(loop_start_label);
let name_col_reg = program.alloc_register();
// Read 'name' column (index 0)
program.emit_column_or_rowid(seq_cursor_id, 0, name_col_reg);
program.emit_insn(Insn::Ne {
// If name != our_table, continue
lhs: table_name_reg,
rhs: name_col_reg,
target_pc: found_label,
flags: Default::default(),
collation: None,
});
// Read 'seq' column (index 1)
program.emit_column_or_rowid(seq_cursor_id, 1, r_seq);
program.emit_insn(Insn::RowId {
cursor_id: seq_cursor_id,
dest: r_seq_rowid,
});
program.emit_insn(Insn::Goto {
target_pc: loop_end_label,
});
program.preassign_label_to_next_insn(found_label);
program.emit_insn(Insn::Next {
cursor_id: seq_cursor_id,
pc_if_next: loop_start_label,
});
program.preassign_label_to_next_insn(loop_end_label);
// get the largest
let dummy_reg = program.alloc_register();
program.emit_insn(Insn::NewRowid {
cursor: main_cursor_id,
rowid_reg: dummy_reg,
prev_largest_reg: r_max,
});
let r_max_is_bigger_label = program.allocate_label();
let continue_label = program.allocate_label();
program.emit_insn(Insn::Ge {
lhs: r_max,
rhs: r_seq,
target_pc: r_max_is_bigger_label,
flags: Default::default(),
collation: None,
});
program.emit_insn(Insn::Copy {
src_reg: r_seq,
dst_reg: key_register,
extra_amount: 0,
});
program.emit_insn(Insn::Goto {
target_pc: continue_label,
});
program.preassign_label_to_next_insn(r_max_is_bigger_label);
program.emit_insn(Insn::Copy {
src_reg: r_max,
dst_reg: key_register,
extra_amount: 0,
});
program.preassign_label_to_next_insn(continue_label);
program.emit_insn(Insn::AddImm {
register: key_register,
value: 1,
});
let record_reg = program.alloc_register();
let record_start_reg = program.alloc_registers(2);
program.emit_insn(Insn::Copy {
src_reg: table_name_reg,
dst_reg: record_start_reg,
extra_amount: 0,
});
program.emit_insn(Insn::Copy {
src_reg: key_register,
dst_reg: record_start_reg + 1,
extra_amount: 0,
});
program.emit_insn(Insn::MakeRecord {
start_reg: record_start_reg,
count: 2,
dest_reg: record_reg,
index_name: None,
});
let update_existing_label = program.allocate_label();
let end_update_label = program.allocate_label();
program.emit_insn(Insn::NotNull {
reg: r_seq_rowid,
target_pc: update_existing_label,
});
program.emit_insn(Insn::NewRowid {
cursor: seq_cursor_id,
rowid_reg: r_seq_rowid,
prev_largest_reg: 0,
});
program.emit_insn(Insn::Insert {
cursor: seq_cursor_id,
key_reg: r_seq_rowid,
record_reg,
flag: InsertFlags::new(),
table_name: "sqlite_sequence".to_string(),
});
program.emit_insn(Insn::Goto {
target_pc: end_update_label,
});
program.preassign_label_to_next_insn(update_existing_label);
program.emit_insn(Insn::Insert {
cursor: seq_cursor_id,
key_reg: r_seq_rowid, // Use the rowid we found
record_reg,
flag: InsertFlags(turso_parser::ast::ResolveType::Replace.bit_value() as u8),
table_name: "sqlite_sequence".to_string(),
});
// program.emit_insn(Insn::Goto { target_pc: end_update_label });
// program.preassign_label_to_next_insn(insert_new_label);
// program.emit_insn(Insn::NewRowid { cursor: seq_cursor_id, rowid_reg: r_seq_rowid, prev_largest_reg: 0 });
// program.emit_insn(Insn::Insert {
// cursor: seq_cursor_id,
// key_reg: r_seq_rowid,
// record_reg,
// flag: InsertFlags::new(),
// table_name: "sqlite_sequence".to_string(),
// });
program.preassign_label_to_next_insn(end_update_label);
program.emit_insn(Insn::Close {
cursor_id: seq_cursor_id,
});
Ok(())
}

View File

@@ -1903,6 +1903,7 @@ mod tests {
],
has_rowid: true,
is_strict: false,
has_autoincrement: false,
unique_sets: None,
};
schema.add_btree_table(Arc::new(users_table));
@@ -1964,6 +1965,7 @@ mod tests {
],
has_rowid: true,
is_strict: false,
has_autoincrement: false,
unique_sets: None,
};
schema.add_btree_table(Arc::new(orders_table));

View File

@@ -185,12 +185,12 @@ pub fn translate_inner(
} => translate_create_table(
tbl_name,
temporary,
body,
if_not_exists,
body,
schema,
syms,
connection,
program,
connection,
)?,
ast::Stmt::CreateTrigger { .. } => bail_parse_error!("CREATE TRIGGER not supported yet"),
ast::Stmt::CreateView {

View File

@@ -1650,6 +1650,7 @@ mod tests {
Arc::new(BTreeTable {
root_page: 1, // Page number doesn't matter for tests
name: name.to_string(),
has_autoincrement: false,
primary_key_columns: vec![],
columns,
has_rowid: true,

View File

@@ -274,25 +274,26 @@ fn update_pragma(
// but for now, let's keep it as is...
let opts = CaptureDataChangesMode::parse(&value)?;
if let Some(table) = &opts.table() {
// make sure that we have table created
program = translate_create_table(
QualifiedName {
db_name: None,
name: ast::Name::new(table),
alias: None,
},
false,
ast::CreateTableBody::ColumnsAndConstraints {
columns: turso_cdc_table_columns(),
constraints: vec![],
options: ast::TableOptions::NONE,
},
true,
schema,
syms,
&connection,
program,
)?;
if schema.get_table(table).is_none() {
program = translate_create_table(
QualifiedName {
db_name: None,
name: ast::Name::new(table),
alias: None,
},
false,
true, // if_not_exists
ast::CreateTableBody::ColumnsAndConstraints {
columns: turso_cdc_table_columns(),
constraints: vec![],
options: ast::TableOptions::NONE,
},
schema,
syms,
program,
&connection,
)?;
}
}
connection.set_capture_data_changes(opts);
Ok((program, TransactionMode::Write))

View File

@@ -23,6 +23,7 @@ use crate::util::PRIMARY_KEY_AUTOMATIC_INDEX_NAME_PREFIX;
use crate::vdbe::builder::CursorType;
use crate::vdbe::insn::Cookie;
use crate::vdbe::insn::{CmpInsFlags, InsertFlags, Insn};
use crate::Connection;
use crate::LimboError;
use crate::SymbolTable;
use crate::{bail_parse_error, Result};
@@ -33,13 +34,14 @@ use turso_ext::VTabKind;
pub fn translate_create_table(
tbl_name: ast::QualifiedName,
temporary: bool,
body: ast::CreateTableBody,
if_not_exists: bool,
body: ast::CreateTableBody,
schema: &Schema,
syms: &SymbolTable,
connection: &Arc<crate::Connection>,
mut program: ProgramBuilder,
connection: &Connection,
) -> Result<ProgramBuilder> {
let normalized_tbl_name = normalize_ident(tbl_name.name.as_str());
if temporary {
bail_parse_error!("TEMPORARY table not supported yet");
}
@@ -59,7 +61,6 @@ pub fn translate_create_table(
approx_num_labels: 1,
};
program.extend(&opts);
let normalized_tbl_name = normalize_ident(tbl_name.name.as_str());
if schema.get_table(&normalized_tbl_name).is_some() {
if if_not_exists {
return Ok(program);
@@ -67,6 +68,79 @@ pub fn translate_create_table(
bail_parse_error!("Table {} already exists", normalized_tbl_name);
}
let mut has_autoincrement = false;
if let ast::CreateTableBody::ColumnsAndConstraints {
columns,
constraints,
..
} = &body
{
for col in columns {
for constraint in &col.constraints {
if let ast::ColumnConstraint::PrimaryKey { auto_increment, .. } =
constraint.constraint
{
if auto_increment {
has_autoincrement = true;
break;
}
}
}
if has_autoincrement {
break;
}
}
if !has_autoincrement {
for constraint in constraints {
if let ast::TableConstraint::PrimaryKey { auto_increment, .. } =
constraint.constraint
{
if auto_increment {
has_autoincrement = true;
break;
}
}
}
}
}
let schema_master_table = schema.get_btree_table(SQLITE_TABLEID).unwrap();
let sqlite_schema_cursor_id =
program.alloc_cursor_id(CursorType::BTreeTable(schema_master_table.clone()));
program.emit_insn(Insn::OpenWrite {
cursor_id: sqlite_schema_cursor_id,
root_page: 1usize.into(),
db: 0,
});
let resolver = Resolver::new(schema, syms);
let cdc_table = prepare_cdc_if_necessary(&mut program, schema, SQLITE_TABLEID)?;
let created_sequence_table =
if has_autoincrement && schema.get_table("sqlite_sequence").is_none() {
let seq_table_root_reg = program.alloc_register();
program.emit_insn(Insn::CreateBtree {
db: 0,
root: seq_table_root_reg,
flags: CreateBTreeFlags::new_table(),
});
let seq_sql = "CREATE TABLE sqlite_sequence(name,seq)";
emit_schema_entry(
&mut program,
&resolver,
sqlite_schema_cursor_id,
cdc_table.as_ref().map(|x| x.0),
SchemaEntryType::Table,
"sqlite_sequence",
"sqlite_sequence",
seq_table_root_reg,
Some(seq_sql.to_string()),
)?;
true
} else {
false
};
let sql = create_table_body_to_str(&tbl_name, &body);
let parse_schema_label = program.allocate_label();
@@ -75,7 +149,6 @@ pub fn translate_create_table(
// TODO: SetCookie
// TODO: SetCookie
// Create the table B-tree
let table_root_reg = program.alloc_register();
program.emit_insn(Insn::CreateBtree {
db: 0,
@@ -110,7 +183,7 @@ pub fn translate_create_table(
let index_regs =
check_automatic_pk_index_required(&body, &mut program, tbl_name.name.as_str())?;
if let Some(index_regs) = index_regs.as_ref() {
if !schema.indexes_enabled() {
if !schema.indexes_enabled() {
bail_parse_error!("Constraints UNIQUE and PRIMARY KEY (unless INTEGER PRIMARY KEY) on table are not supported without indexes");
}
for index_reg in index_regs.clone() {
@@ -122,22 +195,11 @@ pub fn translate_create_table(
}
}
let table = schema.get_btree_table(SQLITE_TABLEID).unwrap();
let sqlite_schema_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(table.clone()));
program.emit_insn(Insn::OpenWrite {
cursor_id: sqlite_schema_cursor_id,
root_page: 1usize.into(),
db: 0,
});
let cdc_table = prepare_cdc_if_necessary(&mut program, schema, SQLITE_TABLEID)?;
let resolver = Resolver::new(schema, syms);
// Add the table entry to sqlite_schema
emit_schema_entry(
&mut program,
&resolver,
sqlite_schema_cursor_id,
cdc_table.map(|x| x.0),
cdc_table.as_ref().map(|x| x.0),
SchemaEntryType::Table,
&normalized_tbl_name,
&normalized_tbl_name,
@@ -145,15 +207,9 @@ pub fn translate_create_table(
Some(sql),
)?;
// If we need an automatic index, add its entry to sqlite_schema
if let Some(index_regs) = index_regs {
for (idx, index_reg) in index_regs.into_iter().enumerate() {
let index_name = format!(
"{}{}_{}",
PRIMARY_KEY_AUTOMATIC_INDEX_NAME_PREFIX,
tbl_name.name.as_str(),
idx + 1
);
for (i, index_reg) in index_regs.enumerate() {
let index_name = format!("sqlite_autoindex_{}_{}", tbl_name.name.as_str(), i + 1);
emit_schema_entry(
&mut program,
&resolver,
@@ -176,9 +232,14 @@ pub fn translate_create_table(
value: schema.schema_version as i32 + 1,
p5: 0,
});
// TODO: remove format, it sucks for performance but is convenient
let parse_schema_where_clause =
let mut parse_schema_where_clause =
format!("tbl_name = '{normalized_tbl_name}' AND type != 'trigger'");
if created_sequence_table {
parse_schema_where_clause.push_str(" OR tbl_name = 'sqlite_sequence'");
}
program.emit_insn(Insn::ParseSchema {
db: sqlite_schema_cursor_id,
where_clause: Some(parse_schema_where_clause),
@@ -230,16 +291,16 @@ pub fn emit_schema_entry(
program.emit_string8_new_reg(name.to_string());
program.emit_string8_new_reg(tbl_name.to_string());
let rootpage_reg = program.alloc_register();
let table_root_reg = program.alloc_register();
if root_page_reg == 0 {
program.emit_insn(Insn::Integer {
dest: rootpage_reg,
dest: table_root_reg,
value: 0, // virtual tables in sqlite always have rootpage=0
});
} else {
program.emit_insn(Insn::Copy {
src_reg: root_page_reg,
dst_reg: rootpage_reg,
dst_reg: table_root_reg,
extra_amount: 0,
});
}
@@ -300,7 +361,7 @@ struct PrimaryKeyColumnInfo<'a> {
/// An automatic PRIMARY KEY index is not required if:
/// - The table has no PRIMARY KEY
/// - The table has a single-column PRIMARY KEY whose typename is _exactly_ "INTEGER" e.g. not "INT".
/// In this case, the PRIMARY KEY column becomes an alias for the rowid.
/// In this case, the PRIMARY KEY column becomes an alias for the rowid.
///
/// Otherwise, an automatic PRIMARY KEY index is required.
fn check_automatic_pk_index_required(
@@ -675,8 +736,9 @@ pub fn translate_drop_table(
"DROP TABLE with indexes on the table is disabled by default. Omit the `--experimental-indexes=false` flag to enable this feature."
);
}
let opts = ProgramBuilderOpts {
num_cursors: 3,
num_cursors: 4,
approx_num_insns: 40,
approx_num_labels: 4,
};
@@ -722,7 +784,7 @@ pub fn translate_drop_table(
});
program.preassign_label_to_next_insn(metadata_loop);
// start loop on schema table
// start loop on schema table
program.emit_column_or_rowid(
sqlite_schema_cursor_id_0,
2,
@@ -753,7 +815,7 @@ pub fn translate_drop_table(
dest: row_id_reg,
});
if let Some((cdc_cursor_id, _)) = cdc_table {
let table_type = program.emit_string8_new_reg("table".to_string()); // r4
let table_type = program.emit_string8_new_reg("table".to_string()); // r4
program.mark_last_insn_constant();
let skip_cdc_label = program.allocate_label();
@@ -802,7 +864,7 @@ pub fn translate_drop_table(
pc_if_next: metadata_loop,
});
program.preassign_label_to_next_insn(end_metadata_label);
// end of loop on schema table
// end of loop on schema table
// 2. Destroy the indices within a loop
let indices = schema.get_indices(tbl_name.name.as_str());
@@ -851,17 +913,18 @@ pub fn translate_drop_table(
let schema_row_id_register = program.alloc_register();
program.emit_null(schema_data_register, Some(schema_row_id_register));
// All of the following processing needs to be done only if the table is not a virtual table
// All of the following processing needs to be done only if the table is not a virtual table
if table.btree().is_some() {
// 4. Open an ephemeral table, and read over the entry from the schema table whose root page was moved in the destroy operation
// 4. Open an ephemeral table, and read over the entry from the schema table whose root page was moved in the destroy operation
// cursor id 1
// cursor id 1
let sqlite_schema_cursor_id_1 =
program.alloc_cursor_id(CursorType::BTreeTable(schema_table.clone()));
let simple_table_rc = Arc::new(BTreeTable {
root_page: 0, // Not relevant for ephemeral table definition
name: "ephemeral_scratch".to_string(),
has_rowid: true,
has_autoincrement: false,
primary_key_columns: vec![],
columns: vec![Column {
name: Some("rowid".to_string()),
@@ -878,7 +941,7 @@ pub fn translate_drop_table(
is_strict: false,
unique_sets: None,
});
// cursor id 2
// cursor id 2
let ephemeral_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(simple_table_rc));
program.emit_insn(Insn::OpenEphemeral {
cursor_id: ephemeral_cursor_id,
@@ -913,9 +976,9 @@ pub fn translate_drop_table(
pc_if_empty: copy_schema_to_temp_table_loop_end_label,
});
program.preassign_label_to_next_insn(copy_schema_to_temp_table_loop);
// start loop on schema table
// start loop on schema table
program.emit_column_or_rowid(sqlite_schema_cursor_id_1, 3, prev_root_page_register);
// The label and Insn::Ne are used to skip over any rows in the schema table that don't have the root page that was moved
// The label and Insn::Ne are used to skip over any rows in the schema table that don't have the root page that was moved
let next_label = program.allocate_label();
program.emit_insn(Insn::Ne {
lhs: prev_root_page_register,
@@ -942,18 +1005,18 @@ pub fn translate_drop_table(
pc_if_next: copy_schema_to_temp_table_loop,
});
program.preassign_label_to_next_insn(copy_schema_to_temp_table_loop_end_label);
// End loop to copy over row id's from the schema table for rows that have the same root page as the one that was moved
// End loop to copy over row id's from the schema table for rows that have the same root page as the one that was moved
program.resolve_label(if_not_label, program.offset());
// 5. Open a write cursor to the schema table and re-insert the records placed in the ephemeral table but insert the correct root page now
// 5. Open a write cursor to the schema table and re-insert the records placed in the ephemeral table but insert the correct root page now
program.emit_insn(Insn::OpenWrite {
cursor_id: sqlite_schema_cursor_id_1,
root_page: 1usize.into(),
db: 0,
});
// Loop to copy over row id's from the ephemeral table and then re-insert into the schema table with the correct root page
// Loop to copy over row id's from the ephemeral table and then re-insert into the schema table with the correct root page
let copy_temp_table_to_schema_loop_end_label = program.allocate_label();
let copy_temp_table_to_schema_loop = program.allocate_label();
program.emit_insn(Insn::Rewind {
@@ -1009,10 +1072,59 @@ pub fn translate_drop_table(
pc_if_next: copy_temp_table_to_schema_loop,
});
program.preassign_label_to_next_insn(copy_temp_table_to_schema_loop_end_label);
// End loop to copy over row id's from the ephemeral table and then re-insert into the schema table with the correct root page
// End loop to copy over row id's from the ephemeral table and then re-insert into the schema table with the correct root page
}
// Drop the in-memory structures for the table
// if drops table, sequence table should reset.
if let Some(seq_table) = schema.get_table("sqlite_sequence").and_then(|t| t.btree()) {
let seq_cursor_id = program.alloc_cursor_id(CursorType::BTreeTable(seq_table.clone()));
let seq_table_name_reg = program.alloc_register();
let dropped_table_name_reg =
program.emit_string8_new_reg(tbl_name.name.as_str().to_string());
program.mark_last_insn_constant();
program.emit_insn(Insn::OpenWrite {
cursor_id: seq_cursor_id,
root_page: seq_table.root_page.into(),
db: 0,
});
let end_loop_label = program.allocate_label();
let loop_start_label = program.allocate_label();
program.emit_insn(Insn::Rewind {
cursor_id: seq_cursor_id,
pc_if_empty: end_loop_label,
});
program.preassign_label_to_next_insn(loop_start_label);
program.emit_column_or_rowid(seq_cursor_id, 0, seq_table_name_reg);
let continue_loop_label = program.allocate_label();
program.emit_insn(Insn::Ne {
lhs: seq_table_name_reg,
rhs: dropped_table_name_reg,
target_pc: continue_loop_label,
flags: CmpInsFlags::default(),
collation: None,
});
program.emit_insn(Insn::Delete {
cursor_id: seq_cursor_id,
table_name: "sqlite_sequence".to_string(),
});
program.resolve_label(continue_loop_label, program.offset());
program.emit_insn(Insn::Next {
cursor_id: seq_cursor_id,
pc_if_next: loop_start_label,
});
program.preassign_label_to_next_insn(end_loop_label);
}
// Drop the in-memory structures for the table
program.emit_insn(Insn::DropTable {
db: 0,
_p2: 0,

View File

@@ -258,6 +258,7 @@ pub fn prepare_update_plan(
root_page: 0, // Not relevant for ephemeral table definition
name: "ephemeral_scratch".to_string(),
has_rowid: true,
has_autoincrement: false,
primary_key_columns: vec![],
columns: vec![Column {
name: Some("rowid".to_string()),

View File

@@ -6493,6 +6493,10 @@ pub fn op_parse_schema(
conn.is_nested_stmt.set(false);
conn.auto_commit.set(previous_auto_commit);
maybe_nested_stmt_err?;
// let updated_local_schema = conn.schema.borrow().clone();
// conn._db.update_schema_if_newer(updated_local_schema)?;
state.pc += 1;
Ok(InsnFunctionStepResult::Step)
}