Merge 'Adds Drop Table' from Zaid Humayun

This PR adds support for `DROP TABLE` and addresses issue
https://github.com/tursodatabase/limbo/issues/894
It depends on https://github.com/tursodatabase/limbo/pull/785 being
merged in because it requires the implementation of `free_page`.
EDIT: The PR above has been merged.
It adds the following:
* an implementation for the `DropTable` AST instruction via a method
called `translate_drop_table`
* a couple of new instructions - `Destroy` and `DropTable`. The former
is to modify physical b-tree pages and the latter is to modify in-memory
structures like the schema hash table.
* `btree_destroy` on `BTreeCursor` to walk the tree of pages for this
table and place it in free list.
* state machine traversal for both `btree_destroy` and
`clear_overflow_pages` to ensure performant, correct code.
* unit & tcl tests
* modifies the `Null` instruction to follow SQLite semantics and accept
a second register. It will set all registers in this range to null. This
is required for `DROP TABLE`.
The screenshots below have a comparison of the bytecodes generated via
SQLite & Limbo.
Limbo has the same instruction set except for the subroutines which
involve opening an ephemeral table, copying over the triggers from the
`sqlite_schema` table and then re-inserting them back into the
`sqlite_schema` table.
This is because `OpenEphemeral` is still a WIP and is being tracked at
https://github.com/tursodatabase/limbo/pull/768
![Screenshot 2025-02-09 at 7 05 03 PM](https://github.com/user-
attachments/assets/1d597001-a60c-4a76-89fd-8b90881c77c9)
![Screenshot 2025-02-09 at 7 05 35 PM](https://github.com/user-
attachments/assets/ecfd2a7a-2edc-49cd-a8d1-7b4db8657444)

Reviewed-by: Pere Diaz Bou <pere-altea@homail.com>

Closes #897
This commit is contained in:
Pekka Enberg
2025-03-06 18:27:41 +02:00
9 changed files with 713 additions and 87 deletions

View File

@@ -45,6 +45,11 @@ impl Schema {
self.tables.get(&name).cloned()
}
pub fn remove_table(&mut self, table_name: &str) {
let name = normalize_ident(table_name);
self.tables.remove(&name);
}
pub fn get_btree_table(&self, name: &str) -> Option<Rc<BTreeTable>> {
let name = normalize_ident(name);
if let Some(table) = self.tables.get(&name) {
@@ -61,6 +66,18 @@ impl Schema {
.or_default()
.push(index.clone())
}
pub fn get_indices(&self, table_name: &str) -> &[Arc<Index>] {
let name = normalize_ident(table_name);
self.indexes
.get(&name)
.map_or_else(|| &[] as &[Arc<Index>], |v| v.as_slice())
}
pub fn remove_indices_for_table(&mut self, table_name: &str) {
let name = normalize_ident(table_name);
self.indexes.remove(&name);
}
}
#[derive(Clone, Debug)]

View File

@@ -12,11 +12,11 @@ use crate::{return_corrupt, LimboError, Result};
use std::cell::{Cell, Ref, RefCell};
use std::pin::Pin;
use std::rc::Rc;
use std::sync::Arc;
use super::pager::PageRef;
use super::sqlite3_ondisk::{
payload_overflows, write_varint_to_vec, IndexInteriorCell, IndexLeafCell, OverflowCell,
DATABASE_HEADER_SIZE,
write_varint_to_vec, IndexInteriorCell, IndexLeafCell, OverflowCell, DATABASE_HEADER_SIZE,
};
/*
@@ -74,6 +74,21 @@ macro_rules! return_if_locked {
}};
}
/// State machine of destroy operations
/// Keep track of traversal so that it can be resumed when IO is encountered
#[derive(Debug, Clone)]
enum DestroyState {
Start,
LoadPage,
ProcessPage,
ClearOverflowPages { cell: BTreeCell },
FreePage,
}
struct DestroyInfo {
state: DestroyState,
}
/// State machine of a write operation.
/// May involve balancing due to overflow.
#[derive(Debug, Clone, Copy)]
@@ -118,6 +133,7 @@ impl WriteInfo {
enum CursorState {
None,
Write(WriteInfo),
Destroy(DestroyInfo),
}
impl CursorState {
@@ -133,6 +149,25 @@ impl CursorState {
_ => None,
}
}
fn destroy_info(&self) -> Option<&DestroyInfo> {
match self {
CursorState::Destroy(x) => Some(x),
_ => None,
}
}
fn mut_destroy_info(&mut self) -> Option<&mut DestroyInfo> {
match self {
CursorState::Destroy(x) => Some(x),
_ => None,
}
}
}
enum OverflowState {
Start,
ProcessPage { next_page: u32 },
Done,
}
pub struct BTreeCursor {
@@ -152,6 +187,9 @@ pub struct BTreeCursor {
going_upwards: bool,
/// Information maintained across execution attempts when an operation yields due to I/O.
state: CursorState,
/// Information maintained while freeing overflow pages. Maintained separately from cursor state since
/// any method could require freeing overflow pages
overflow_state: Option<OverflowState>,
/// Page stack used to traverse the btree.
/// Each cursor has a stack because each cursor traverses the btree independently.
stack: PageStack,
@@ -196,6 +234,7 @@ impl BTreeCursor {
null_flag: false,
going_upwards: false,
state: CursorState::None,
overflow_state: None,
stack: PageStack {
current_page: Cell::new(-1),
cell_indices: RefCell::new([0; BTCURSOR_MAX_DEPTH + 1]),
@@ -1895,90 +1934,269 @@ impl BTreeCursor {
}
}
fn clear_overflow_pages(&self, cell: &BTreeCell) -> Result<CursorResult<()>> {
// Get overflow info based on cell type
let (first_overflow_page, n_overflow) = match cell {
BTreeCell::TableLeafCell(leaf_cell) => {
match self.calculate_overflow_info(leaf_cell._payload.len(), PageType::TableLeaf)? {
Some(n_overflow) => (leaf_cell.first_overflow_page, n_overflow),
None => return Ok(CursorResult::Ok(())),
/// Clear the overflow pages linked to a specific page provided by the leaf cell
/// Uses a state machine to keep track of it's operations so that traversal can be
/// resumed from last point after IO interruption
fn clear_overflow_pages(&mut self, cell: &BTreeCell) -> Result<CursorResult<()>> {
loop {
let state = self.overflow_state.take().unwrap_or(OverflowState::Start);
match state {
OverflowState::Start => {
let first_overflow_page = match cell {
BTreeCell::TableLeafCell(leaf_cell) => leaf_cell.first_overflow_page,
BTreeCell::IndexLeafCell(leaf_cell) => leaf_cell.first_overflow_page,
BTreeCell::IndexInteriorCell(interior_cell) => {
interior_cell.first_overflow_page
}
BTreeCell::TableInteriorCell(_) => return Ok(CursorResult::Ok(())), // No overflow pages
};
if let Some(page) = first_overflow_page {
self.overflow_state = Some(OverflowState::ProcessPage { next_page: page });
continue;
} else {
self.overflow_state = Some(OverflowState::Done);
}
}
}
BTreeCell::IndexLeafCell(leaf_cell) => {
match self.calculate_overflow_info(leaf_cell.payload.len(), PageType::IndexLeaf)? {
Some(n_overflow) => (leaf_cell.first_overflow_page, n_overflow),
None => return Ok(CursorResult::Ok(())),
OverflowState::ProcessPage { next_page } => {
if next_page < 2
|| next_page as usize
> self.pager.db_header.lock().unwrap().database_size as usize
{
self.overflow_state = None;
return Err(LimboError::Corrupt("Invalid overflow page number".into()));
}
let page = self.pager.read_page(next_page as usize)?;
return_if_locked!(page);
let contents = page.get().contents.as_ref().unwrap();
let next = contents.read_u32(0);
self.pager.free_page(Some(page), next_page as usize)?;
if next != 0 {
self.overflow_state = Some(OverflowState::ProcessPage { next_page: next });
} else {
self.overflow_state = Some(OverflowState::Done);
}
}
}
BTreeCell::IndexInteriorCell(interior_cell) => {
match self
.calculate_overflow_info(interior_cell.payload.len(), PageType::IndexInterior)?
{
Some(n_overflow) => (interior_cell.first_overflow_page, n_overflow),
None => return Ok(CursorResult::Ok(())),
OverflowState::Done => {
self.overflow_state = None;
return Ok(CursorResult::Ok(()));
}
}
BTreeCell::TableInteriorCell(_) => return Ok(CursorResult::Ok(())), // No overflow pages
};
let Some(first_page) = first_overflow_page else {
return Ok(CursorResult::Ok(()));
};
let page_count = self.pager.db_header.lock().unwrap().database_size as usize;
let mut pages_left = n_overflow;
let mut current_page = first_page;
// Clear overflow pages
while pages_left > 0 {
pages_left -= 1;
// Validate overflow page number
if current_page < 2 || current_page as usize > page_count {
return_corrupt!("Invalid overflow page number");
}
let page = self.pager.read_page(current_page as usize)?;
return_if_locked!(page);
let contents = page.get().contents.as_ref().unwrap();
let next_page = if pages_left > 0 {
contents.read_u32(0)
} else {
0
};
// Free the current page
self.pager.free_page(Some(page), current_page as usize)?;
current_page = next_page;
}
Ok(CursorResult::Ok(()))
}
fn calculate_overflow_info(
&self,
payload_len: usize,
page_type: PageType,
) -> Result<Option<usize>> {
let max_local = payload_overflow_threshold_max(page_type, self.usable_space() as u16);
let min_local = payload_overflow_threshold_min(page_type, self.usable_space() as u16);
let usable_size = self.usable_space();
let (_, local_size) = payload_overflows(payload_len, max_local, min_local, usable_size);
assert!(
local_size != payload_len,
"Trying to clear overflow pages when there are no overflow pages"
);
// Calculate expected overflow pages
let overflow_page_size = self.usable_space() - 4;
let n_overflow =
(payload_len - local_size + overflow_page_size).div_ceil(overflow_page_size);
if n_overflow == 0 {
return_corrupt!("Invalid overflow page count");
/// Destroys a B-tree by freeing all its pages in an iterative depth-first order.
/// This ensures child pages are freed before their parents
/// Uses a state machine to keep track of the operation to ensure IO doesn't cause repeated traversals
///
/// # Example
/// For a B-tree with this structure (where 4' is an overflow page):
/// ```text
/// 1 (root)
/// / \
/// 2 3
/// / \ / \
/// 4' <- 4 5 6 7
/// ```
///
/// The destruction order would be: [4',4,5,2,6,7,3,1]
pub fn btree_destroy(&mut self) -> Result<CursorResult<()>> {
if let CursorState::None = &self.state {
self.move_to_root();
self.state = CursorState::Destroy(DestroyInfo {
state: DestroyState::Start,
});
}
Ok(Some(n_overflow))
loop {
let destroy_state = {
let destroy_info = self
.state
.destroy_info()
.expect("unable to get a mut reference to destroy state in cursor");
destroy_info.state.clone()
};
match destroy_state {
DestroyState::Start => {
let destroy_info = self
.state
.mut_destroy_info()
.expect("unable to get a mut reference to destroy state in cursor");
destroy_info.state = DestroyState::LoadPage;
}
DestroyState::LoadPage => {
let page = self.stack.top();
return_if_locked!(page);
if !page.is_loaded() {
self.pager.load_page(Arc::clone(&page))?;
return Ok(CursorResult::IO);
}
let destroy_info = self
.state
.mut_destroy_info()
.expect("unable to get a mut reference to destroy state in cursor");
destroy_info.state = DestroyState::ProcessPage;
}
DestroyState::ProcessPage => {
let page = self.stack.top();
assert!(page.is_loaded()); // page should be loaded at this time
let contents = page.get().contents.as_ref().unwrap();
let cell_idx = self.stack.current_cell_index();
// If we've processed all cells in this page, figure out what to do with this page
if cell_idx >= contents.cell_count() as i32 {
match (contents.is_leaf(), cell_idx) {
// Leaf pages with all cells processed
(true, n) if n >= contents.cell_count() as i32 => {
let destroy_info = self.state.mut_destroy_info().expect(
"unable to get a mut reference to destroy state in cursor",
);
destroy_info.state = DestroyState::FreePage;
continue;
}
// Non-leaf page which has processed all children but not it's potential right child
(false, n) if n == contents.cell_count() as i32 => {
if let Some(rightmost) = contents.rightmost_pointer() {
let rightmost_page =
self.pager.read_page(rightmost as usize)?;
self.stack.advance();
self.stack.push(rightmost_page);
let destroy_info = self.state.mut_destroy_info().expect(
"unable to get a mut reference to destroy state in cursor",
);
destroy_info.state = DestroyState::LoadPage;
} else {
let destroy_info = self.state.mut_destroy_info().expect(
"unable to get a mut reference to destroy state in cursor",
);
destroy_info.state = DestroyState::FreePage;
}
continue;
}
// Non-leaf page which has processed all children and it's right child
(false, n) if n > contents.cell_count() as i32 => {
let destroy_info = self.state.mut_destroy_info().expect(
"unable to get a mut reference to destroy state in cursor",
);
destroy_info.state = DestroyState::FreePage;
continue;
}
_ => unreachable!("Invalid cell idx state"),
}
}
// We have not yet processed all cells in this page
// Get the current cell
let cell = contents.cell_get(
cell_idx as usize,
Rc::clone(&self.pager),
payload_overflow_threshold_max(
contents.page_type(),
self.usable_space() as u16,
),
payload_overflow_threshold_min(
contents.page_type(),
self.usable_space() as u16,
),
self.usable_space(),
)?;
match contents.is_leaf() {
// For a leaf cell, clear the overflow pages associated with this cell
true => {
let destroy_info = self
.state
.mut_destroy_info()
.expect("unable to get a mut reference to destroy state in cursor");
destroy_info.state = DestroyState::ClearOverflowPages { cell };
continue;
}
// For interior cells, check the type of cell to determine what to do
false => match &cell {
// For index interior cells, remove the overflow pages
BTreeCell::IndexInteriorCell(_) => {
let destroy_info = self.state.mut_destroy_info().expect(
"unable to get a mut reference to destroy state in cursor",
);
destroy_info.state = DestroyState::ClearOverflowPages { cell };
continue;
}
// For all other interior cells, load the left child page
_ => {
let child_page_id = match &cell {
BTreeCell::TableInteriorCell(cell) => cell._left_child_page,
BTreeCell::IndexInteriorCell(cell) => cell.left_child_page,
_ => panic!("expected interior cell"),
};
let child_page = self.pager.read_page(child_page_id as usize)?;
self.stack.advance();
self.stack.push(child_page);
let destroy_info = self.state.mut_destroy_info().expect(
"unable to get a mut reference to destroy state in cursor",
);
destroy_info.state = DestroyState::LoadPage;
continue;
}
},
}
}
DestroyState::ClearOverflowPages { cell } => {
match self.clear_overflow_pages(&cell)? {
CursorResult::Ok(_) => match cell {
// For an index interior cell, clear the left child page now that overflow pages have been cleared
BTreeCell::IndexInteriorCell(index_int_cell) => {
let child_page = self
.pager
.read_page(index_int_cell.left_child_page as usize)?;
self.stack.advance();
self.stack.push(child_page);
let destroy_info = self.state.mut_destroy_info().expect(
"unable to get a mut reference to destroy state in cursor",
);
destroy_info.state = DestroyState::LoadPage;
continue;
}
// For any leaf cell, advance the index now that overflow pages have been cleared
BTreeCell::TableLeafCell(_) | BTreeCell::IndexLeafCell(_) => {
self.stack.advance();
let destroy_info = self.state.mut_destroy_info().expect(
"unable to get a mut reference to destroy state in cursor",
);
destroy_info.state = DestroyState::LoadPage;
}
_ => panic!("unexpected cell type"),
},
CursorResult::IO => return Ok(CursorResult::IO),
}
}
DestroyState::FreePage => {
let page = self.stack.top();
let page_id = page.get().id;
self.pager.free_page(Some(page), page_id)?;
if self.stack.has_parent() {
self.stack.pop();
let destroy_info = self
.state
.mut_destroy_info()
.expect("unable to get a mut reference to destroy state in cursor");
destroy_info.state = DestroyState::ProcessPage;
} else {
self.state = CursorState::None;
return Ok(CursorResult::Ok(()));
}
}
}
}
}
pub fn table_id(&self) -> usize {
@@ -3389,7 +3607,7 @@ mod tests {
#[ignore]
pub fn test_clear_overflow_pages() -> Result<()> {
let (pager, db_header) = setup_test_env(5);
let cursor = BTreeCursor::new(None, pager.clone(), 1);
let mut cursor = BTreeCursor::new(None, pager.clone(), 1);
let max_local = payload_overflow_threshold_max(PageType::TableLeaf, 4096);
let usable_size = cursor.usable_space();
@@ -3487,7 +3705,7 @@ mod tests {
#[test]
pub fn test_clear_overflow_pages_no_overflow() -> Result<()> {
let (pager, db_header) = setup_test_env(5);
let cursor = BTreeCursor::new(None, pager.clone(), 1);
let mut cursor = BTreeCursor::new(None, pager.clone(), 1);
let small_payload = vec![b'A'; 10];
@@ -3525,6 +3743,90 @@ mod tests {
Ok(())
}
#[test]
fn test_btree_destroy() -> Result<()> {
let initial_size = 3;
let (pager, db_header) = setup_test_env(initial_size);
let mut cursor = BTreeCursor::new(pager.clone(), 2);
assert_eq!(
db_header.borrow().database_size,
initial_size,
"Database should initially have 3 pages"
);
// Initialize page 2 as a root page (interior)
let root_page = cursor.pager.read_page(2)?;
{
btree_init_page(&root_page, PageType::TableInterior, 0, 512); // Use proper page size
}
// Allocate two leaf pages
let page3 = cursor.pager.allocate_page()?;
btree_init_page(&page3, PageType::TableLeaf, 0, 512);
let page4 = cursor.pager.allocate_page()?;
btree_init_page(&page4, PageType::TableLeaf, 0, 512);
// Configure the root page to point to the two leaf pages
{
let contents = root_page.get().contents.as_mut().unwrap();
// Set rightmost pointer to page4
contents.write_u32(PAGE_HEADER_OFFSET_RIGHTMOST_PTR, page4.get().id as u32);
// Create a cell with pointer to page3
let cell_content = vec![
// First 4 bytes: left child pointer (page3)
(page3.get().id >> 24) as u8,
(page3.get().id >> 16) as u8,
(page3.get().id >> 8) as u8,
page3.get().id as u8,
// Next byte: rowid as varint (simple value 100)
100,
];
// Insert the cell
insert_into_cell(contents, &cell_content, 0, 512)?;
}
// Add a simple record to each leaf page
for page in [&page3, &page4] {
let contents = page.get().contents.as_mut().unwrap();
// Simple record with just a rowid and payload
let record_bytes = vec![
5, // Payload length (varint)
page.get().id as u8, // Rowid (varint)
b'h',
b'e',
b'l',
b'l',
b'o', // Payload
];
insert_into_cell(contents, &record_bytes, 0, 512)?;
}
// Verify structure before destruction
assert_eq!(
db_header.borrow().database_size,
5, // We should have pages 0-4
"Database should have 4 pages total"
);
// Track freelist state before destruction
let initial_free_pages = db_header.borrow().freelist_pages;
assert_eq!(initial_free_pages, 0, "should start with no free pages");
run_until_done(|| cursor.btree_destroy(), pager.deref())?;
let pages_freed = db_header.borrow().freelist_pages - initial_free_pages;
assert_eq!(pages_freed, 3, "should free 3 pages (root + 2 leaves)");
Ok(())
}
#[test]
pub fn test_defragment() {
let db = get_database();

View File

@@ -30,6 +30,7 @@ use crate::storage::sqlite3_ondisk::DatabaseHeader;
use crate::translate::delete::translate_delete;
use crate::util::PRIMARY_KEY_AUTOMATIC_INDEX_NAME_PREFIX;
use crate::vdbe::builder::{CursorType, ProgramBuilderOpts, QueryMode};
use crate::vdbe::insn::CmpInsFlags;
use crate::vdbe::{builder::ProgramBuilder, insn::Insn, Program};
use crate::{bail_parse_error, Connection, LimboError, Result, SymbolTable};
use insert::translate_insert;
@@ -88,7 +89,10 @@ pub fn translate(
}
ast::Stmt::Detach(_) => bail_parse_error!("DETACH not supported yet"),
ast::Stmt::DropIndex { .. } => bail_parse_error!("DROP INDEX not supported yet"),
ast::Stmt::DropTable { .. } => bail_parse_error!("DROP TABLE not supported yet"),
ast::Stmt::DropTable {
if_exists,
tbl_name,
} => translate_drop_table(query_mode, tbl_name, if_exists, schema)?,
ast::Stmt::DropTrigger { .. } => bail_parse_error!("DROP TRIGGER not supported yet"),
ast::Stmt::DropView { .. } => bail_parse_error!("DROP VIEW not supported yet"),
ast::Stmt::Pragma(name, body) => pragma::translate_pragma(
@@ -226,7 +230,7 @@ fn emit_schema_entry(
if let Some(sql) = sql {
program.emit_string8(sql, sql_reg);
} else {
program.emit_null(sql_reg);
program.emit_null(sql_reg, None);
}
let record_reg = program.alloc_register();
@@ -521,6 +525,173 @@ fn translate_create_table(
Ok(program)
}
fn translate_drop_table(
query_mode: QueryMode,
tbl_name: ast::QualifiedName,
if_exists: bool,
schema: &Schema,
) -> Result<ProgramBuilder> {
let mut program = ProgramBuilder::new(ProgramBuilderOpts {
query_mode,
num_cursors: 1,
approx_num_insns: 30,
approx_num_labels: 1,
});
let table = schema.get_btree_table(tbl_name.name.0.as_str());
if table.is_none() {
if if_exists {
let init_label = program.emit_init();
let start_offset = program.offset();
program.emit_halt();
program.resolve_label(init_label, program.offset());
program.emit_transaction(true);
program.emit_constant_insns();
program.emit_goto(start_offset);
return Ok(program);
}
bail_parse_error!("No such table: {}", tbl_name.name.0.as_str());
}
let table = table.unwrap(); // safe since we just checked for None
let init_label = program.emit_init();
let start_offset = program.offset();
let null_reg = program.alloc_register(); // r1
program.emit_null(null_reg, None);
let tbl_name_reg = program.alloc_register(); // r2
let table_reg = program.emit_string8_new_reg(tbl_name.name.0.clone()); // r3
program.mark_last_insn_constant();
let table_type = program.emit_string8_new_reg("trigger".to_string()); // r4
program.mark_last_insn_constant();
let row_id_reg = program.alloc_register(); // r5
let table_name = "sqlite_schema";
let schema_table = schema.get_btree_table(&table_name).unwrap();
let sqlite_schema_cursor_id = program.alloc_cursor_id(
Some(table_name.to_string()),
CursorType::BTreeTable(schema_table.clone()),
);
program.emit_insn(Insn::OpenWriteAsync {
cursor_id: sqlite_schema_cursor_id,
root_page: 1,
});
program.emit_insn(Insn::OpenWriteAwait {});
// 1. Remove all entries from the schema table related to the table we are dropping, except for triggers
// loop to beginning of schema table
program.emit_insn(Insn::RewindAsync {
cursor_id: sqlite_schema_cursor_id,
});
let end_metadata_label = program.allocate_label();
program.emit_insn(Insn::RewindAwait {
cursor_id: sqlite_schema_cursor_id,
pc_if_empty: end_metadata_label,
});
// start loop on schema table
let metadata_loop = program.allocate_label();
program.resolve_label(metadata_loop, program.offset());
program.emit_insn(Insn::Column {
cursor_id: sqlite_schema_cursor_id,
column: 2,
dest: tbl_name_reg,
});
let next_label = program.allocate_label();
program.emit_insn(Insn::Ne {
lhs: tbl_name_reg,
rhs: table_reg,
target_pc: next_label,
flags: CmpInsFlags::default(),
});
program.emit_insn(Insn::Column {
cursor_id: sqlite_schema_cursor_id,
column: 0,
dest: tbl_name_reg,
});
program.emit_insn(Insn::Eq {
lhs: tbl_name_reg,
rhs: table_type,
target_pc: next_label,
flags: CmpInsFlags::default(),
});
program.emit_insn(Insn::RowId {
cursor_id: sqlite_schema_cursor_id,
dest: row_id_reg,
});
program.emit_insn(Insn::DeleteAsync {
cursor_id: sqlite_schema_cursor_id,
});
program.emit_insn(Insn::DeleteAwait {
cursor_id: sqlite_schema_cursor_id,
});
program.resolve_label(next_label, program.offset());
program.emit_insn(Insn::NextAsync {
cursor_id: sqlite_schema_cursor_id,
});
program.emit_insn(Insn::NextAwait {
cursor_id: sqlite_schema_cursor_id,
pc_if_next: metadata_loop,
});
program.resolve_label(end_metadata_label, program.offset());
// end of loop on schema table
// 2. Destroy the indices within a loop
let indices = schema.get_indices(&tbl_name.name.0);
for index in indices {
program.emit_insn(Insn::Destroy {
root: index.root_page,
former_root_reg: 0, // no autovacuum (https://www.sqlite.org/opcode.html#Destroy)
is_temp: 0,
});
let null_reg_1 = program.alloc_register();
let null_reg_2 = program.alloc_register();
program.emit_null(null_reg_1, Some(null_reg_2));
// 3. TODO: Open an ephemeral table, and read over triggers from schema table into ephemeral table
// Requires support via https://github.com/tursodatabase/limbo/pull/768
// 4. TODO: Open a write cursor to the schema table and re-insert all triggers into the sqlite schema table from the ephemeral table and delete old trigger
// Requires support via https://github.com/tursodatabase/limbo/pull/768
}
// 3. Destroy the table structure
program.emit_insn(Insn::Destroy {
root: table.root_page,
former_root_reg: 0, // no autovacuum (https://www.sqlite.org/opcode.html#Destroy)
is_temp: 0,
});
let r6 = program.alloc_register();
let r7 = program.alloc_register();
program.emit_null(r6, Some(r7));
// 3. TODO: Open an ephemeral table, and read over triggers from schema table into ephemeral table
// Requires support via https://github.com/tursodatabase/limbo/pull/768
// 4. TODO: Open a write cursor to the schema table and re-insert all triggers into the sqlite schema table from the ephemeral table and delete old trigger
// Requires support via https://github.com/tursodatabase/limbo/pull/768
// Drop the in-memory structures for the table
program.emit_insn(Insn::DropTable {
db: 0,
_p2: 0,
_p3: 0,
table_name: tbl_name.name.0,
});
// end of the program
program.emit_halt();
program.resolve_label(init_label, program.offset());
program.emit_transaction(true);
program.emit_constant_insns();
program.emit_goto(start_offset);
Ok(program)
}
enum PrimaryKeyDefinitionType<'a> {
Simple {
typename: Option<&'a str>,

View File

@@ -237,7 +237,7 @@ fn query_pragma(
// dflt_value
match &column.default {
None => {
program.emit_null(base_reg + 4);
program.emit_null(base_reg + 4, None);
}
Some(expr) => {
program.emit_string8(expr.to_string(), base_reg + 4);

View File

@@ -139,11 +139,8 @@ impl ProgramBuilder {
});
}
pub fn emit_null(&mut self, dest: usize) {
self.emit_insn(Insn::Null {
dest,
dest_end: None,
});
pub fn emit_null(&mut self, dest: usize, dest_end: Option<usize>) {
self.emit_insn(Insn::Null { dest, dest_end });
}
pub fn emit_result_row(&mut self, start_reg: usize, count: usize) {

View File

@@ -1137,6 +1137,36 @@ pub fn insn_to_str(
0,
format!("r[{}]=root iDb={} flags={}", root, db, flags),
),
Insn::Destroy {
root,
former_root_reg,
is_temp,
} => (
"Destroy",
*root as i32,
*former_root_reg as i32,
*is_temp as i32,
OwnedValue::build_text(&Rc::new("".to_string())),
0,
format!(
"root iDb={} former_root={} is_temp={}",
root, former_root_reg, is_temp
),
),
Insn::DropTable {
db,
_p2,
_p3,
table_name,
} => (
"DropTable",
*db as i32,
0,
0,
OwnedValue::build_text(&Rc::new(table_name.clone())),
0,
format!("DROP TABLE {}", table_name),
),
Insn::Close { cursor_id } => (
"Close",
*cursor_id as i32,

View File

@@ -618,6 +618,28 @@ pub enum Insn {
flags: usize,
},
/// Deletes an entire database table or index whose root page in the database file is given by P1.
Destroy {
/// The root page of the table/index to destroy
root: usize,
/// Register to store the former value of any moved root page (for AUTOVACUUM)
former_root_reg: usize,
/// Whether this is a temporary table (1) or main database table (0)
is_temp: usize,
},
// Drop a table
DropTable {
/// The database within which this b-tree needs to be dropped (P1).
db: usize,
/// unused register p2
_p2: usize,
/// unused register p3
_p3: usize,
// The name of the table being dropped
table_name: String,
},
/// Close a cursor.
Close {
cursor_id: CursorID,

View File

@@ -3015,6 +3015,37 @@ impl Program {
state.registers[*root] = OwnedValue::Integer(root_page as i64);
state.pc += 1;
}
Insn::Destroy {
root,
former_root_reg: _,
is_temp,
} => {
if *is_temp == 1 {
todo!("temp databases not implemented yet.");
}
{
let mut cursor = state.get_cursor(*root);
let cursor = cursor.as_btree_mut();
cursor.btree_destroy()?;
}
state.pc += 1;
}
Insn::DropTable {
db,
_p2,
_p3,
table_name,
} => {
if *db > 0 {
todo!("temp databases not implemented yet");
}
if let Some(conn) = self.connection.upgrade() {
let mut schema = conn.schema.write();
schema.remove_indices_for_table(table_name);
schema.remove_table(table_name);
}
state.pc += 1;
}
Insn::Close { cursor_id } => {
let mut cursors = state.cursors.borrow_mut();
cursors.get_mut(*cursor_id).unwrap().take();

56
testing/drop_table.test Executable file
View File

@@ -0,0 +1,56 @@
#!/usr/bin/env tclsh
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Basic DROP TABLE functionality
do_execsql_test_on_specific_db {:memory:} drop-table-basic-1 {
CREATE TABLE t1(x INTEGER PRIMARY KEY);
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (2);
DROP TABLE t1;
SELECT count(*) FROM sqlite_schema WHERE type='table' AND name='t1';
} {0}
# Test DROP TABLE IF EXISTS on existing table
do_execsql_test_on_specific_db {:memory:} drop-table-if-exists-1 {
CREATE TABLE t2(x INTEGER PRIMARY KEY);
DROP TABLE IF EXISTS t2;
SELECT count(*) FROM sqlite_schema WHERE type='table' AND name='t2';
} {0}
# Test DROP TABLE IF EXISTS on non-existent table
do_execsql_test_on_specific_db {:memory:} drop-table-if-exists-2 {
DROP TABLE IF EXISTS nonexistent_table;
SELECT 'success';
} {success}
# Test dropping table with index
do_execsql_test_on_specific_db {:memory:} drop-table-with-index-1 {
CREATE TABLE t3(x INTEGER PRIMARY KEY, y TEXT);
CREATE INDEX idx_t3_y ON t3(y);
INSERT INTO t3 VALUES(1, 'one');
DROP TABLE t3;
SELECT count(*) FROM sqlite_schema WHERE tbl_name='t3';
} {0}
# Test dropping table cleans up related schema entries
do_execsql_test_on_specific_db {:memory:} drop-table-schema-cleanup-1 {
CREATE TABLE t4(x INTEGER PRIMARY KEY, y TEXT);
CREATE INDEX idx1_t4 ON t4(x);
CREATE INDEX idx2_t4 ON t4(y);
INSERT INTO t4 VALUES(1, 'one');
DROP TABLE t4;
SELECT count(*) FROM sqlite_schema WHERE tbl_name='t4';
} {0}
# Test dropping table after multiple inserts and deletes
do_execsql_test_on_specific_db {:memory:} drop-table-after-ops-1 {
CREATE TABLE t6(x INTEGER PRIMARY KEY);
INSERT INTO t6 VALUES (1);
INSERT INTO t6 VALUES (2);
DELETE FROM t6 WHERE x = 1;
INSERT INTO t6 VALUES (3);
DROP TABLE t6;
SELECT count(*) FROM sqlite_schema WHERE type='table' AND name='t6';
} {0}