Merge 'index method syntax extension' from Nikita Sivukhin

Add support for index method syntax extension (similar to postgresql)
and hide it for now behind `--experimental-index-method` flag
```sh
$> cargo run --package turso_cli -- --experimental-index-method
turso> CREATE TABLE t(x);
turso> CREATE INDEX t_idx ON t USING index_method (x) WITH (a = 1, b = '2');
turso> SELECT * FROM sqlite_master;
┌───────┬───────┬──────────┬──────────┬───────────────────────────────────────────────────────────────────────┐
│ type  │ name  │ tbl_name │ rootpage │ sql                                                                   │
├───────┼───────┼──────────┼──────────┼───────────────────────────────────────────────────────────────────────┤
│ table │ t     │ t        │        2 │ CREATE TABLE t (x)                                                    │
├───────┼───────┼──────────┼──────────┼───────────────────────────────────────────────────────────────────────┤
│ index │ t_idx │ t        │        3 │ CREATE INDEX t_idx ON t USING index_method (x) WITH (a = 1, b = '2') │
└───────┴───────┴──────────┴──────────┴───────────────────────────────────────────────────────────────────────┘
```

Reviewed-by: Jussi Saurio <jussi.saurio@gmail.com>
Reviewed-by: Preston Thorpe <preston@turso.tech>

Closes #3842
This commit is contained in:
Preston Thorpe
2025-10-27 14:03:22 -04:00
committed by GitHub
15 changed files with 307 additions and 161 deletions

View File

@@ -317,7 +317,7 @@ impl Drop for Connection {
#[allow(clippy::arc_with_non_send_sync)]
#[pyfunction(signature = (path))]
pub fn connect(path: &str) -> Result<Connection> {
match turso_core::Connection::from_uri(path, true, false, false, false, false) {
match turso_core::Connection::from_uri(path, true, false, false, false, false, false) {
Ok((io, conn)) => Ok(Connection { conn, _io: io }),
Err(e) => Err(PyErr::new::<ProgrammingError, _>(format!(
"Failed to create connection: {e:?}"

View File

@@ -78,6 +78,8 @@ pub struct Opts {
pub mcp: bool,
#[clap(long, help = "Enable experimental encryption feature")]
pub experimental_encryption: bool,
#[clap(long, help = "Enable experimental index method feature")]
pub experimental_index_method: bool,
}
const PROMPT: &str = "turso> ";
@@ -192,6 +194,7 @@ impl Limbo {
opts.experimental_views,
opts.experimental_strict,
opts.experimental_encryption,
opts.experimental_index_method,
)?
} else {
let flags = if opts.readonly {
@@ -209,6 +212,7 @@ impl Limbo {
.with_views(opts.experimental_views)
.with_strict(opts.experimental_strict)
.with_encryption(opts.experimental_encryption)
.with_index_method(opts.experimental_index_method)
.turso_cli(),
None,
)?;

View File

@@ -408,7 +408,7 @@ impl TursoMcpServer {
// Open the new database connection
let conn = if path == ":memory:" || path.contains([':', '?', '&', '#']) {
match Connection::from_uri(&path, true, false, false, false, false) {
match Connection::from_uri(&path, true, false, false, false, false, false) {
Ok((_io, c)) => c,
Err(e) => return format!("Failed to open database '{path}': {e}"),
}

View File

@@ -316,6 +316,7 @@ mod tests {
enable_strict: false,
enable_load_extension: false,
enable_encryption: false,
enable_index_method: false,
},
None,
)?;

View File

@@ -113,6 +113,7 @@ pub struct DatabaseOpts {
pub enable_views: bool,
pub enable_strict: bool,
pub enable_encryption: bool,
pub enable_index_method: bool,
enable_load_extension: bool,
}
@@ -124,6 +125,7 @@ impl Default for DatabaseOpts {
enable_views: false,
enable_strict: false,
enable_encryption: false,
enable_index_method: false,
enable_load_extension: false,
}
}
@@ -164,6 +166,11 @@ impl DatabaseOpts {
self.enable_encryption = enable;
self
}
pub fn with_index_method(mut self, enable: bool) -> Self {
self.enable_index_method = enable;
self
}
}
#[derive(Clone, Debug, Default)]
@@ -580,7 +587,7 @@ impl Database {
mv_tx: RwLock::new(None),
view_transaction_states: AllViewsTxState::new(),
metrics: RwLock::new(ConnectionMetrics::new()),
is_nested_stmt: AtomicBool::new(false),
nestedness: AtomicI32::new(0),
encryption_key: RwLock::new(None),
encryption_cipher_mode: AtomicCipherMode::new(CipherMode::None),
sync_mode: AtomicSyncMode::new(SyncMode::Full),
@@ -870,6 +877,10 @@ impl Database {
self.opts.enable_views
}
pub fn experimental_index_method_enabled(&self) -> bool {
self.opts.enable_index_method
}
pub fn experimental_strict_enabled(&self) -> bool {
self.opts.enable_strict
}
@@ -1094,9 +1105,13 @@ pub struct Connection {
view_transaction_states: AllViewsTxState,
/// Connection-level metrics aggregation
pub metrics: RwLock<ConnectionMetrics>,
/// Whether the connection is executing a statement initiated by another statement.
/// Generally this is only true for ParseSchema.
is_nested_stmt: AtomicBool,
/// Greater than zero if connection executes a program within a program
/// This is necessary in order for connection to not "finalize" transaction (commit/abort) when program ends
/// (because parent program is still pending and it will handle "finalization" instead)
///
/// The state is integer as we may want to spawn deep nested programs (e.g. Root -[run]-> S1 -[run]-> S2 -[run]-> ...)
/// and we need to track current nestedness depth in order to properly understand when we will reach the root back again
nestedness: AtomicI32,
encryption_key: RwLock<Option<EncryptionKey>>,
encryption_cipher_mode: AtomicCipherMode,
sync_mode: AtomicSyncMode,
@@ -1128,6 +1143,18 @@ impl Drop for Connection {
}
impl Connection {
/// check if connection executes nested program (so it must not do any "finalization" work as parent program will handle it)
pub fn is_nested_stmt(&self) -> bool {
self.nestedness.load(Ordering::SeqCst) > 0
}
/// starts nested program execution
pub fn start_nested(&self) {
self.nestedness.fetch_add(1, Ordering::SeqCst);
}
/// ends nested program execution
pub fn end_nested(&self) {
self.nestedness.fetch_add(-1, Ordering::SeqCst);
}
pub fn prepare(self: &Arc<Connection>, sql: impl AsRef<str>) -> Result<Statement> {
if self.is_mvcc_bootstrap_connection() {
// Never use MV store for bootstrapping - we read state directly from sqlite_schema in the DB file.
@@ -1458,6 +1485,8 @@ impl Connection {
strict: bool,
// flag to opt-in encryption support
encryption: bool,
// flag to opt-in custom modules support
custom_modules: bool,
) -> Result<(Arc<dyn IO>, Arc<Connection>)> {
use crate::util::MEMORY_PATH;
let opts = OpenOptions::parse(uri)?;
@@ -1473,7 +1502,8 @@ impl Connection {
.with_indexes(use_indexes)
.with_views(views)
.with_strict(strict)
.with_encryption(encryption),
.with_encryption(encryption)
.with_index_method(custom_modules),
None,
)?;
let conn = db.connect()?;
@@ -1502,7 +1532,8 @@ impl Connection {
.with_indexes(use_indexes)
.with_views(views)
.with_strict(strict)
.with_encryption(encryption),
.with_encryption(encryption)
.with_index_method(custom_modules),
encryption_opts.clone(),
)?;
if let Some(modeof) = opts.modeof {
@@ -1981,6 +2012,10 @@ impl Connection {
self.db.experimental_views_enabled()
}
pub fn experimental_index_method_enabled(&self) -> bool {
self.db.experimental_index_method_enabled()
}
pub fn experimental_strict_enabled(&self) -> bool {
self.db.experimental_strict_enabled()
}
@@ -2681,12 +2716,7 @@ impl Statement {
pub fn run_once(&self) -> Result<()> {
let res = self.pager.io.step();
if self
.program
.connection
.is_nested_stmt
.load(Ordering::SeqCst)
{
if self.program.connection.is_nested_stmt() {
return res;
}
if res.is_err() {

View File

@@ -3,6 +3,7 @@ use crate::incremental::view::IncrementalView;
use crate::translate::expr::{
bind_and_rewrite_expr, walk_expr, BindingBehavior, ParamState, WalkControl,
};
use crate::translate::index::resolve_sorted_columns;
use crate::translate::planner::ROWID_STRS;
use parking_lot::RwLock;
@@ -2449,27 +2450,7 @@ impl Index {
..
})) => {
let index_name = normalize_ident(idx_name.name.as_str());
let mut index_columns = Vec::with_capacity(columns.len());
for col in columns.into_iter() {
let name = normalize_ident(match col.expr.as_ref() {
Expr::Id(col_name) | Expr::Name(col_name) => col_name.as_str(),
_ => crate::bail_parse_error!("cannot use expressions in CREATE INDEX"),
});
let Some((pos_in_table, _)) = table.get_column(&name) else {
return Err(crate::LimboError::InternalError(format!(
"Column {} is in index {} but not found in table {}",
name, index_name, table.name
)));
};
let (_, column) = table.get_column(&name).unwrap();
index_columns.push(IndexColumn {
name,
order: col.order.unwrap_or(SortOrder::Asc),
pos_in_table,
collation: column.collation,
default: column.default.clone(),
});
}
let index_columns = resolve_sorted_columns(table, &columns)?;
Ok(Index {
name: index_name,
table_name: normalize_ident(tbl_name.as_str()),

View File

@@ -1443,8 +1443,8 @@ impl Pager {
#[instrument(skip_all, level = Level::DEBUG)]
pub fn commit_tx(&self, connection: &Connection) -> Result<IOResult<PagerCommitResult>> {
if connection.is_nested_stmt.load(Ordering::SeqCst) {
// Parent statement will handle the transaction rollback.
if connection.is_nested_stmt() {
// Parent statement will handle the transaction commit.
return Ok(IOResult::Done(PagerCommitResult::Rollback));
}
let Some(wal) = self.wal.as_ref() else {
@@ -1473,7 +1473,7 @@ impl Pager {
#[instrument(skip_all, level = Level::DEBUG)]
pub fn rollback_tx(&self, connection: &Connection) {
if connection.is_nested_stmt.load(Ordering::SeqCst) {
if connection.is_nested_stmt() {
// Parent statement will handle the transaction rollback.
return;
}

View File

@@ -15,7 +15,7 @@ use crate::vdbe::builder::CursorKey;
use crate::vdbe::insn::{CmpInsFlags, Cookie};
use crate::vdbe::BranchOffset;
use crate::{
schema::{BTreeTable, Column, Index, IndexColumn, PseudoCursorType},
schema::{BTreeTable, Index, IndexColumn, PseudoCursorType},
storage::pager::CreateBTreeFlags,
util::normalize_ident,
vdbe::{
@@ -23,24 +23,42 @@ use crate::{
insn::{IdxInsertFlags, Insn, RegisterOrLiteral},
},
};
use turso_parser::ast::{Expr, Name, SortOrder, SortedColumn};
use turso_parser::ast::{self, Expr, SortOrder, SortedColumn};
use super::schema::{emit_schema_entry, SchemaEntryType, SQLITE_TABLEID};
#[allow(clippy::too_many_arguments)]
pub fn translate_create_index(
unique_if_not_exists: (bool, bool),
resolver: &Resolver,
idx_name: &Name,
tbl_name: &Name,
columns: &[SortedColumn],
mut program: ProgramBuilder,
connection: &Arc<crate::Connection>,
where_clause: Option<Box<Expr>>,
resolver: &Resolver,
stmt: ast::Stmt,
) -> crate::Result<ProgramBuilder> {
let sql = stmt.to_string();
let ast::Stmt::CreateIndex {
unique,
if_not_exists,
idx_name,
tbl_name,
columns,
where_clause,
with_clause,
using,
} = stmt
else {
panic!("translate_create_index must be called with CreateIndex AST node");
};
if !connection.experimental_index_method_enabled()
&& (using.is_some() || !with_clause.is_empty())
{
bail_parse_error!(
"index method is an experimental feature. Enable with --experimental-index-method flag"
)
}
let original_idx_name = idx_name;
let original_tbl_name = tbl_name;
let idx_name = normalize_ident(idx_name.as_str());
let idx_name = normalize_ident(original_idx_name.name.as_str());
let tbl_name = normalize_ident(tbl_name.as_str());
if tbl_name.eq_ignore_ascii_case("sqlite_sequence") {
@@ -80,7 +98,7 @@ pub fn translate_create_index(
// the name is globally unique in the schema.
if !resolver.schema.is_unique_idx_name(&idx_name) {
// If IF NOT EXISTS is specified, silently return without error
if unique_if_not_exists.1 {
if if_not_exists {
return Ok(program);
}
crate::bail_parse_error!("Error: index with name '{idx_name}' already exists.");
@@ -91,24 +109,18 @@ pub fn translate_create_index(
let Some(tbl) = table.btree() else {
crate::bail_parse_error!("Error: table '{tbl_name}' is not a b-tree table.");
};
let original_columns = columns;
let columns = resolve_sorted_columns(&tbl, columns)?;
let unique = unique_if_not_exists.0;
let columns = resolve_sorted_columns(&tbl, &columns)?;
let custom_module = using.is_some();
if !with_clause.is_empty() && !custom_module {
crate::bail_parse_error!(
"Error: additional parameters are allowed only for custom module indices: '{idx_name}' is not custom module index"
);
}
let idx = Arc::new(Index {
name: idx_name.clone(),
table_name: tbl.name.clone(),
root_page: 0, // we dont have access till its created, after we parse the schema table
columns: columns
.iter()
.map(|((pos_in_table, col), order)| IndexColumn {
name: col.name.as_ref().unwrap().clone(),
order: *order,
pos_in_table: *pos_in_table,
collation: col.collation,
default: col.default.clone(),
})
.collect(),
columns: columns.clone(),
unique,
ephemeral: false,
has_rowid: tbl.has_rowid,
@@ -121,6 +133,7 @@ pub fn translate_create_index(
crate::bail_parse_error!(
"Error: cannot use aggregate, window functions or reference other tables in WHERE clause of CREATE INDEX:\n {}",
where_clause
.as_ref()
.expect("where expr has to exist in order to fail")
.to_string()
);
@@ -178,13 +191,6 @@ pub fn translate_create_index(
root_page: RegisterOrLiteral::Literal(sqlite_table.root_page),
db: 0,
});
let sql = create_idx_stmt_to_sql(
&original_tbl_name.as_ident(),
&original_idx_name.as_ident(),
unique_if_not_exists,
original_columns,
&idx.where_clause.clone(),
);
let cdc_table = prepare_cdc_if_necessary(&mut program, resolver.schema, SQLITE_TABLEID)?;
emit_schema_entry(
&mut program,
@@ -253,8 +259,8 @@ pub fn translate_create_index(
}
let start_reg = program.alloc_registers(columns.len() + 1);
for (i, (col, _)) in columns.iter().enumerate() {
program.emit_column_or_rowid(table_cursor_id, col.0, start_reg + i);
for (i, col) in columns.iter().enumerate() {
program.emit_column_or_rowid(table_cursor_id, col.pos_in_table, start_reg + i);
}
let rowid_reg = start_reg + columns.len();
program.emit_insn(Insn::RowId {
@@ -376,10 +382,10 @@ pub fn translate_create_index(
Ok(program)
}
fn resolve_sorted_columns<'a>(
table: &'a BTreeTable,
pub fn resolve_sorted_columns(
table: &BTreeTable,
cols: &[SortedColumn],
) -> crate::Result<Vec<((usize, &'a Column), SortOrder)>> {
) -> crate::Result<Vec<IndexColumn>> {
let mut resolved = Vec::with_capacity(cols.len());
for sc in cols {
let ident = match sc.expr.as_ref() {
@@ -394,52 +400,17 @@ fn resolve_sorted_columns<'a>(
table.name
);
};
resolved.push((col, sc.order.unwrap_or(SortOrder::Asc)));
resolved.push(IndexColumn {
name: col.1.name.as_ref().unwrap().clone(),
order: sc.order.unwrap_or(SortOrder::Asc),
pos_in_table: col.0,
collation: col.1.collation,
default: col.1.default.clone(),
});
}
Ok(resolved)
}
fn create_idx_stmt_to_sql(
tbl_name: &str,
idx_name: &str,
unique_if_not_exists: (bool, bool),
cols: &[SortedColumn],
where_clause: &Option<Box<Expr>>,
) -> String {
let mut sql = String::with_capacity(128);
sql.push_str("CREATE ");
if unique_if_not_exists.0 {
sql.push_str("UNIQUE ");
}
sql.push_str("INDEX ");
if unique_if_not_exists.1 {
sql.push_str("IF NOT EXISTS ");
}
sql.push_str(idx_name);
sql.push_str(" ON ");
sql.push_str(tbl_name);
sql.push_str(" (");
for (i, col) in cols.iter().enumerate() {
if i > 0 {
sql.push_str(", ");
}
let col_ident = match col.expr.as_ref() {
Expr::Id(name) | Expr::Name(name) => name.as_ident(),
_ => unreachable!("expressions in CREATE INDEX should have been rejected earlier"),
};
sql.push_str(&col_ident);
if col.order.unwrap_or(SortOrder::Asc) == SortOrder::Desc {
sql.push_str(" DESC");
}
}
sql.push(')');
if let Some(where_clause) = where_clause {
sql.push_str(" WHERE ");
sql.push_str(&where_clause.to_string());
}
sql
}
pub fn translate_drop_index(
idx_name: &str,
resolver: &Resolver,

View File

@@ -152,23 +152,9 @@ pub fn translate_inner(
}
ast::Stmt::Begin { typ, name } => translate_tx_begin(typ, name, resolver.schema, program)?,
ast::Stmt::Commit { name } => translate_tx_commit(name, program)?,
ast::Stmt::CreateIndex {
unique,
if_not_exists,
idx_name,
tbl_name,
columns,
where_clause,
} => translate_create_index(
(unique, if_not_exists),
resolver,
&idx_name.name,
&tbl_name,
&columns,
program,
connection,
where_clause,
)?,
ast::Stmt::CreateIndex { .. } => {
translate_create_index(program, connection, resolver, stmt)?
}
ast::Stmt::CreateTable {
temporary,
if_not_exists,

View File

@@ -2274,8 +2274,7 @@ pub fn op_transaction_inner(
// 1. We try to upgrade current version
let current_state = conn.get_tx_state();
let (new_transaction_state, updated) = if conn.is_nested_stmt.load(Ordering::SeqCst)
{
let (new_transaction_state, updated) = if conn.is_nested_stmt() {
(current_state, false)
} else {
match (current_state, write) {
@@ -2365,7 +2364,7 @@ pub fn op_transaction_inner(
}
if updated && matches!(current_state, TransactionState::None) {
turso_assert!(
!conn.is_nested_stmt.load(Ordering::SeqCst),
!conn.is_nested_stmt(),
"nested stmt should not begin a new read transaction"
);
pager.begin_read_tx()?;
@@ -2374,7 +2373,7 @@ pub fn op_transaction_inner(
if updated && matches!(new_transaction_state, TransactionState::Write { .. }) {
turso_assert!(
!conn.is_nested_stmt.load(Ordering::SeqCst),
!conn.is_nested_stmt(),
"nested stmt should not begin a new write transaction"
);
let begin_w_tx_res = pager.begin_write_tx();
@@ -5397,6 +5396,8 @@ pub fn op_function(
idx_name,
columns,
where_clause,
using,
with_clause,
} => {
let table_name = normalize_ident(tbl_name.as_str());
@@ -5412,6 +5413,8 @@ pub fn op_function(
idx_name,
columns,
where_clause,
using,
with_clause,
}
.to_string(),
)
@@ -5495,6 +5498,8 @@ pub fn op_function(
if_not_exists,
idx_name,
where_clause,
using,
with_clause,
} => {
if table != normalize_ident(tbl_name.as_str()) {
break 'sql None;
@@ -5521,6 +5526,8 @@ pub fn op_function(
if_not_exists,
idx_name,
where_clause,
using,
with_clause,
}
.to_string(),
)
@@ -7303,7 +7310,7 @@ pub fn op_parse_schema(
conn.with_schema_mut(|schema| {
// TODO: This function below is synchronous, make it async
let existing_views = schema.incremental_views.clone();
conn.is_nested_stmt.store(true, Ordering::SeqCst);
conn.start_nested();
parse_schema_rows(
stmt,
schema,
@@ -7318,7 +7325,7 @@ pub fn op_parse_schema(
conn.with_schema_mut(|schema| {
// TODO: This function below is synchronous, make it async
let existing_views = schema.incremental_views.clone();
conn.is_nested_stmt.store(true, Ordering::SeqCst);
conn.start_nested();
parse_schema_rows(
stmt,
schema,
@@ -7328,7 +7335,7 @@ pub fn op_parse_schema(
)
})
};
conn.is_nested_stmt.store(false, Ordering::SeqCst);
conn.end_nested();
conn.auto_commit
.store(previous_auto_commit, Ordering::SeqCst);
maybe_nested_stmt_err?;

View File

@@ -917,11 +917,11 @@ impl Program {
// hence the mv_store.is_none() check.
return Ok(IOResult::Done(()));
}
if let Some(mv_store) = mv_store {
if self.connection.is_nested_stmt.load(Ordering::SeqCst) {
if self.connection.is_nested_stmt() {
// We don't want to commit on nested statements. Let parent handle it.
return Ok(IOResult::Done(()));
}
if let Some(mv_store) = mv_store {
let conn = self.connection.clone();
let auto_commit = conn.auto_commit.load(Ordering::SeqCst);
if auto_commit {
@@ -1050,7 +1050,7 @@ impl Program {
cleanup: &mut TxnCleanup,
) {
// Errors from nested statements are handled by the parent statement.
if !self.connection.is_nested_stmt.load(Ordering::SeqCst) {
if !self.connection.is_nested_stmt() {
match err {
// Transaction errors, e.g. trying to start a nested transaction, do not cause a rollback.
Some(LimboError::TxError(_)) => {}

View File

@@ -114,8 +114,12 @@ pub enum Stmt {
idx_name: QualifiedName,
/// table name
tbl_name: Name,
/// USING module
using: Option<Name>,
/// indexed columns or expressions
columns: Vec<SortedColumn>,
/// WITH parameters
with_clause: Vec<(Name, Box<Expr>)>,
/// partial index
where_clause: Option<Box<Expr>>,
},

View File

@@ -294,7 +294,9 @@ impl ToTokens for Stmt {
if_not_exists,
idx_name,
tbl_name,
using,
columns,
with_clause,
where_clause,
} => {
s.append(TK_CREATE, None)?;
@@ -310,9 +312,28 @@ impl ToTokens for Stmt {
idx_name.to_tokens(s, context)?;
s.append(TK_ON, None)?;
tbl_name.to_tokens(s, context)?;
if let Some(using) = using {
s.append(TK_USING, None)?;
using.to_tokens(s, context)?;
}
s.append(TK_LP, None)?;
comma(columns, s, context)?;
s.append(TK_RP, None)?;
if !with_clause.is_empty() {
s.append(TK_WITH, None)?;
s.append(TK_LP, None)?;
let mut first = true;
for (name, value) in with_clause.iter() {
if !first {
s.append(TK_COMMA, None)?;
}
first = false;
name.to_tokens(s, context)?;
s.append(TK_EQ, None)?;
value.to_tokens(s, context)?;
}
s.append(TK_RP, None)?;
}
if let Some(where_clause) = where_clause {
s.append(TK_WHERE, None)?;
where_clause.to_tokens(s, context)?;

View File

@@ -3579,6 +3579,57 @@ impl<'a> Parser<'a> {
}
}
fn parse_create_index_using(&mut self) -> Result<Option<Name>> {
if let Some(tok) = self.peek()? {
if tok.token_type == Some(TK_USING) {
eat_assert!(self, TK_USING);
} else {
return Ok(None);
}
} else {
return Ok(None);
}
Ok(Some(self.parse_nm()?))
}
fn parse_with_parameter(&mut self) -> Result<(Name, Box<Expr>)> {
let name = self.parse_nm()?;
eat_expect!(self, TK_EQ);
let value = self.parse_term()?;
Ok((name, value))
}
fn parse_with_parameters(&mut self) -> Result<Vec<(Name, Box<Expr>)>> {
if let Some(tok) = self.peek()? {
if tok.token_type == Some(TK_WITH) {
eat_assert!(self, TK_WITH);
} else {
return Ok(Vec::new());
}
} else {
return Ok(Vec::new());
}
eat_expect!(self, TK_LP);
let mut parameters = vec![];
let mut first = true;
loop {
let tok = peek_expect!(self, TK_RP, TK_COMMA, TK_ID);
if tok.token_type == Some(TK_RP) {
break;
}
if !first {
eat_expect!(self, TK_COMMA);
}
first = false;
parameters.push(self.parse_with_parameter()?);
}
eat_expect!(self, TK_RP);
Ok(parameters)
}
fn parse_create_index(&mut self) -> Result<Stmt> {
let tok = eat_assert!(self, TK_INDEX, TK_UNIQUE);
let has_unique = tok.token_type == Some(TK_UNIQUE);
@@ -3590,16 +3641,23 @@ impl<'a> Parser<'a> {
let idx_name = self.parse_fullname(false)?;
eat_expect!(self, TK_ON);
let tbl_name = self.parse_nm()?;
let using = self.parse_create_index_using()?;
eat_expect!(self, TK_LP);
let columns = self.parse_sort_list()?;
eat_expect!(self, TK_RP);
let with_clause = self.parse_with_parameters()?;
let where_clause = self.parse_where()?;
Ok(Stmt::CreateIndex {
if_not_exists,
idx_name,
tbl_name,
using,
columns,
with_clause,
where_clause,
unique: has_unique,
})
@@ -9998,6 +10056,8 @@ mod tests {
nulls: None,
}],
where_clause: None,
using: None,
with_clause: Vec::new(),
})],
),
(
@@ -10019,6 +10079,8 @@ mod tests {
where_clause: Some(Box::new(
Expr::Literal(Literal::Numeric("1".to_owned()))
)),
using: None,
with_clause: Vec::new(),
})],
),
// parse create table
@@ -11559,6 +11621,32 @@ mod tests {
options: TableOptions::NONE,
},
})],
),
(
b"CREATE INDEX t_idx ON t USING custom_index (x) WITH (a = 1, b = 'test', c = x'deadbeef', d = NULL)".as_slice(),
vec![Cmd::Stmt(Stmt::CreateIndex {
unique: false,
if_not_exists: false,
idx_name: QualifiedName {
db_name: None,
name: Name::exact("t_idx".to_owned()),
alias: None,
},
tbl_name: Name::exact("t".to_owned()),
columns: vec![SortedColumn {
expr: Box::new(Expr::Id(Name::exact("x".to_owned()))),
order: None,
nulls: None,
}],
where_clause: None,
using: Some(Name::exact("custom_index".to_owned())),
with_clause: vec![
(Name::exact("a".to_string()), Box::new(Expr::Literal(Literal::Numeric("1".to_string())))),
(Name::exact("b".to_string()), Box::new(Expr::Literal(Literal::String("'test'".to_string())))),
(Name::exact("c".to_string()), Box::new(Expr::Literal(Literal::Blob("deadbeef".to_string())))),
(Name::exact("d".to_string()), Box::new(Expr::Literal(Literal::Null))),
],
})],
)
];

View File

@@ -46,8 +46,15 @@ fn test_per_page_encryption() -> anyhow::Result<()> {
"file:{}?cipher=aegis256&hexkey=b1bbfda4f589dc9daaf004fe21111e00dc00c98237102f5c7002a5669fc76327",
db_path.to_str().unwrap()
);
let (_io, conn) =
turso_core::Connection::from_uri(&uri, true, false, false, false, ENABLE_ENCRYPTION)?;
let (_io, conn) = turso_core::Connection::from_uri(
&uri,
true,
false,
false,
false,
ENABLE_ENCRYPTION,
false,
)?;
let mut row_count = 0;
run_query_on_row(&tmp_db, &conn, "SELECT * FROM test", |row: &Row| {
assert_eq!(row.get::<i64>(0).unwrap(), 1);
@@ -62,8 +69,15 @@ fn test_per_page_encryption() -> anyhow::Result<()> {
"file:{}?cipher=aegis256&hexkey=b1bbfda4f589dc9daaf004fe21111e00dc00c98237102f5c7002a5669fc76327",
db_path.to_str().unwrap()
);
let (_io, conn) =
turso_core::Connection::from_uri(&uri, true, false, false, false, ENABLE_ENCRYPTION)?;
let (_io, conn) = turso_core::Connection::from_uri(
&uri,
true,
false,
false,
false,
ENABLE_ENCRYPTION,
false,
)?;
run_query(
&tmp_db,
&conn,
@@ -77,8 +91,15 @@ fn test_per_page_encryption() -> anyhow::Result<()> {
"file:{}?cipher=aegis256&hexkey=b1bbfda4f589dc9daaf004fe21111e00dc00c98237102f5c7002a5669fc76327",
db_path.to_str().unwrap()
);
let (_io, conn) =
turso_core::Connection::from_uri(&uri, true, false, false, false, ENABLE_ENCRYPTION)?;
let (_io, conn) = turso_core::Connection::from_uri(
&uri,
true,
false,
false,
false,
ENABLE_ENCRYPTION,
false,
)?;
run_query(
&tmp_db,
&conn,
@@ -100,8 +121,15 @@ fn test_per_page_encryption() -> anyhow::Result<()> {
"file:{}?cipher=aegis256&hexkey=b1bbfda4f589dc9daaf004fe21111e00dc00c98237102f5c7002a5669fc76377",
db_path.to_str().unwrap()
);
let (_io, conn) =
turso_core::Connection::from_uri(&uri, true, false, false, false, ENABLE_ENCRYPTION)?;
let (_io, conn) = turso_core::Connection::from_uri(
&uri,
true,
false,
false,
false,
ENABLE_ENCRYPTION,
false,
)?;
let should_panic = panic::catch_unwind(panic::AssertUnwindSafe(|| {
run_query_on_row(&tmp_db, &conn, "SELECT * FROM test", |_row: &Row| {}).unwrap();
}));
@@ -114,7 +142,15 @@ fn test_per_page_encryption() -> anyhow::Result<()> {
//test connecting to encrypted db using insufficient encryption parameters in URI.This should panic.
let uri = format!("file:{}?cipher=aegis256", db_path.to_str().unwrap());
let should_panic = panic::catch_unwind(panic::AssertUnwindSafe(|| {
turso_core::Connection::from_uri(&uri, true, false, false, false, ENABLE_ENCRYPTION)
turso_core::Connection::from_uri(
&uri,
true,
false,
false,
false,
ENABLE_ENCRYPTION,
false,
)
.unwrap();
}));
assert!(
@@ -128,7 +164,15 @@ fn test_per_page_encryption() -> anyhow::Result<()> {
db_path.to_str().unwrap()
);
let should_panic = panic::catch_unwind(panic::AssertUnwindSafe(|| {
turso_core::Connection::from_uri(&uri, true, false, false, false, ENABLE_ENCRYPTION)
turso_core::Connection::from_uri(
&uri,
true,
false,
false,
false,
ENABLE_ENCRYPTION,
false,
)
.unwrap();
}));
assert!(
@@ -195,8 +239,15 @@ fn test_non_4k_page_size_encryption() -> anyhow::Result<()> {
"file:{}?cipher=aegis256&hexkey=b1bbfda4f589dc9daaf004fe21111e00dc00c98237102f5c7002a5669fc76327",
db_path.to_str().unwrap()
);
let (_io, conn) =
turso_core::Connection::from_uri(&uri, true, false, false, false, ENABLE_ENCRYPTION)?;
let (_io, conn) = turso_core::Connection::from_uri(
&uri,
true,
false,
false,
false,
ENABLE_ENCRYPTION,
false,
)?;
run_query_on_row(&tmp_db, &conn, "SELECT * FROM test", |row: &Row| {
assert_eq!(row.get::<i64>(0).unwrap(), 1);
assert_eq!(row.get::<String>(1).unwrap(), "Hello, World!");
@@ -261,6 +312,7 @@ fn test_corruption_turso_magic_bytes() -> anyhow::Result<()> {
false,
false,
ENABLE_ENCRYPTION,
false,
)
.unwrap();
run_query_on_row(&tmp_db, &conn, "SELECT * FROM test", |_row: &Row| {}).unwrap();
@@ -353,6 +405,7 @@ fn test_corruption_associated_data_bytes() -> anyhow::Result<()> {
false,
false,
ENABLE_ENCRYPTION,
false,
)
.unwrap();
run_query_on_row(&test_tmp_db, &conn, "SELECT * FROM test", |_row: &Row| {})