Merge branch 'main' into feat/db-open-js-impl

This commit is contained in:
Forato
2025-06-26 11:34:33 -03:00
47 changed files with 372 additions and 292 deletions

View File

@@ -29,7 +29,7 @@ jobs:
env:
RUST_BACKTRACE: 1
- name: Run ignored long tests with index
run: cargo test --features index_experimental -- --ignored fuzz_long
run: cargo test -- --ignored fuzz_long
env:
RUST_BACKTRACE: 1

View File

@@ -41,12 +41,6 @@ jobs:
RUST_LOG: ${{ runner.debug && 'limbo_core::storage=trace' || '' }}
run: cargo test --verbose
timeout-minutes: 20
- name: Tests with indexes
env:
RUST_LOG: ${{ runner.debug && 'limbo_core::storage=trace' || '' }}
run: cargo test --verbose --features index_experimental
timeout-minutes: 20
clippy:
runs-on: blacksmith-4vcpu-ubuntu-2404

View File

@@ -100,63 +100,63 @@ jobs:
never-fail: true
nyrkio-public: true
# tpc-h-criterion:
# runs-on: blacksmith-4vcpu-ubuntu-2404
# env:
# DB_FILE: "perf/tpc-h/TPC-H.db"
# steps:
# - uses: actions/checkout@v3
# - uses: useblacksmith/rust-cache@v3
# with:
# prefix-key: "v1-rust" # can be updated if we need to reset caches due to non-trivial change in the dependencies (for example, custom env var were set for single workspace project)
#
# - name: Cache TPC-H
# id: cache-primes
# uses: useblacksmith/cache@v5
# with:
# path: ${{ env.DB_FILE }}
# key: tpc-h
# - name: Download TPC-H
# if: steps.cache-primes.outputs.cache-hit != 'true'
# env:
# DB_URL: "https://github.com/lovasoa/TPCH-sqlite/releases/download/v1.0/TPC-H.db"
# run: wget -O $DB_FILE --no-verbose $DB_URL
#
# - name: Bench
# run: cargo bench --bench tpc_h_benchmark 2>&1 | tee output.txt
# - name: Analyze benchmark result with Nyrkiö
# uses: nyrkio/change-detection@HEAD
# with:
# name: tpc-h
# tool: criterion
# output-file-path: output.txt
#
# # What to do if a change is immediately detected by Nyrkiö.
# # Note that smaller changes are only detected with delay, usually after a change
# # persisted over 2-7 commits. Go to nyrkiö.com to view those or configure alerts.
# # Note that Nyrkiö will find all changes, also improvements. This means fail-on-alert
# # on pull events isn't compatible with this workflow being required to pass branch protection.
# fail-on-alert: false
# comment-on-alert: true
# comment-always: false
# # Nyrkiö configuration
# # Get yours from https://nyrkio.com/docs/getting-started
# nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }}
# # HTTP requests will fail for all non-core contributors that don't have their own token.
# # Don't want that to spoil the build, so:
# never-fail: true
# # Make results and change points public, so that any oss contributor can see them
# nyrkio-public: true
#
# # parameters of the algorithm. Note: These are global, so we only set them once and for all.
# # Smaller p-value = less change points found. Larger p-value = more, but also more false positives.
# nyrkio-settings-pvalue: 0.0001
# # Ignore changes smaller than this.
# nyrkio-settings-threshold: 0%
#
# tpc-h:
# runs-on: blacksmith-4vcpu-ubuntu-2404
# steps:
# - uses: actions/checkout@v3
# - name: TPC-H
# run: ./perf/tpc-h/benchmark.sh
tpc-h-criterion:
runs-on: blacksmith-4vcpu-ubuntu-2404
env:
DB_FILE: "perf/tpc-h/TPC-H.db"
steps:
- uses: actions/checkout@v3
- uses: useblacksmith/rust-cache@v3
with:
prefix-key: "v1-rust" # can be updated if we need to reset caches due to non-trivial change in the dependencies (for example, custom env var were set for single workspace project)
- name: Cache TPC-H
id: cache-primes
uses: useblacksmith/cache@v5
with:
path: ${{ env.DB_FILE }}
key: tpc-h
- name: Download TPC-H
if: steps.cache-primes.outputs.cache-hit != 'true'
env:
DB_URL: "https://github.com/lovasoa/TPCH-sqlite/releases/download/v1.0/TPC-H.db"
run: wget -O $DB_FILE --no-verbose $DB_URL
- name: Bench
run: cargo bench --bench tpc_h_benchmark 2>&1 | tee output.txt
- name: Analyze benchmark result with Nyrkiö
uses: nyrkio/change-detection@HEAD
with:
name: tpc-h
tool: criterion
output-file-path: output.txt
# What to do if a change is immediately detected by Nyrkiö.
# Note that smaller changes are only detected with delay, usually after a change
# persisted over 2-7 commits. Go to nyrkiö.com to view those or configure alerts.
# Note that Nyrkiö will find all changes, also improvements. This means fail-on-alert
# on pull events isn't compatible with this workflow being required to pass branch protection.
fail-on-alert: false
comment-on-alert: true
comment-always: false
# Nyrkiö configuration
# Get yours from https://nyrkio.com/docs/getting-started
nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }}
# HTTP requests will fail for all non-core contributors that don't have their own token.
# Don't want that to spoil the build, so:
never-fail: true
# Make results and change points public, so that any oss contributor can see them
nyrkio-public: true
# parameters of the algorithm. Note: These are global, so we only set them once and for all.
# Smaller p-value = less change points found. Larger p-value = more, but also more false positives.
nyrkio-settings-pvalue: 0.0001
# Ignore changes smaller than this.
nyrkio-settings-threshold: 0%
tpc-h:
runs-on: blacksmith-4vcpu-ubuntu-2404
steps:
- uses: actions/checkout@v3
- name: TPC-H
run: ./perf/tpc-h/benchmark.sh

View File

@@ -24,7 +24,7 @@ pub unsafe extern "C" fn db_open(path: *const c_char) -> *mut c_void {
p if p.contains(":memory:") => Arc::new(limbo_core::MemoryIO::new()),
_ => Arc::new(limbo_core::PlatformIO::new().expect("Failed to create IO")),
};
let db = Database::open_file(io.clone(), path, false);
let db = Database::open_file(io.clone(), path, false, false);
match db {
Ok(db) => {
let conn = db.connect().unwrap();

View File

@@ -68,7 +68,7 @@ pub extern "system" fn Java_tech_turso_core_LimboDB_openUtf8<'local>(
}
};
let db = match Database::open_file(io.clone(), &path, false) {
let db = match Database::open_file(io.clone(), &path, false, false) {
Ok(db) => db,
Err(e) => {
set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string());

View File

@@ -71,7 +71,7 @@ impl Database {
let file = io.open_file(&path, flag, false).map_err(into_napi_error)?;
let db_file = Arc::new(DatabaseFile::new(file));
let db = limbo_core::Database::open(io.clone(), &path, db_file, false)
let db = limbo_core::Database::open(io.clone(), &path, db_file, false, false)
.map_err(into_napi_error)?;
let conn = db.connect().map_err(into_napi_error)?;

View File

@@ -305,7 +305,7 @@ pub fn connect(path: &str) -> Result<Connection> {
io: Arc<dyn limbo_core::IO>,
path: &str,
) -> std::result::Result<Arc<limbo_core::Database>, PyErr> {
limbo_core::Database::open_file(io, path, false).map_err(|e| {
limbo_core::Database::open_file(io, path, false, false).map_err(|e| {
PyErr::new::<DatabaseError, _>(format!("Failed to open database: {:?}", e))
})
}

View File

@@ -9,9 +9,6 @@ license.workspace = true
repository.workspace = true
description = "Limbo Rust API"
[features]
index_experimental = ["limbo_core/index_experimental"]
[dependencies]
limbo_core = { workspace = true, features = ["io_uring"] }
thiserror = "2.0.9"

View File

@@ -1,3 +1,37 @@
//! # Limbo bindings for Rust
//!
//! Limbo is an in-process SQL database engine, compatible with SQLite.
//!
//! ## Getting Started
//!
//! To get started, you first need to create a [`Database`] object and then open a [`Connection`] to it, which you use to query:
//!
//! ```rust,no_run
//! # async fn run() {
//! use limbo::Builder;
//!
//! let db = Builder::new_local(":memory:").build().await.unwrap();
//! let conn = db.connect().unwrap();
//! conn.execute("CREATE TABLE IF NOT EXISTS users (email TEXT)", ()).await.unwrap();
//! conn.execute("INSERT INTO users (email) VALUES ('alice@example.org')", ()).await.unwrap();
//! # }
//! ```
//!
//! You can also prepare statements with the [`Connection`] object and then execute the [`Statement`] objects:
//!
//! ```rust,no_run
//! # async fn run() {
//! # use limbo::Builder;
//! # let db = Builder::new_local(":memory:").build().await.unwrap();
//! # let conn = db.connect().unwrap();
//! let mut stmt = conn.prepare("SELECT * FROM users WHERE email = ?1").await.unwrap();
//! let mut rows = stmt.query(["foo@example.com"]).await.unwrap();
//! let row = rows.next().await.unwrap().unwrap();
//! let value = row.get_value(0).unwrap();
//! println!("Row: {:?}", value);
//! # }
//! ```
pub mod params;
pub mod value;
@@ -29,34 +63,41 @@ impl From<limbo_core::LimboError> for Error {
pub(crate) type BoxError = Box<dyn std::error::Error + Send + Sync>;
pub type Result<T> = std::result::Result<T, Error>;
/// A builder for `Database`.
pub struct Builder {
path: String,
}
impl Builder {
/// Create a new local database.
pub fn new_local(path: &str) -> Self {
Self {
path: path.to_string(),
}
}
/// Build the database.
#[allow(unused_variables, clippy::arc_with_non_send_sync)]
pub async fn build(self) -> Result<Database> {
match self.path.as_str() {
":memory:" => {
let io: Arc<dyn limbo_core::IO> = Arc::new(limbo_core::MemoryIO::new());
let db = limbo_core::Database::open_file(io, self.path.as_str(), false)?;
let db = limbo_core::Database::open_file(io, self.path.as_str(), false, false)?;
Ok(Database { inner: db })
}
path => {
let io: Arc<dyn limbo_core::IO> = Arc::new(limbo_core::PlatformIO::new()?);
let db = limbo_core::Database::open_file(io, path, false)?;
let db = limbo_core::Database::open_file(io, path, false, false)?;
Ok(Database { inner: db })
}
}
}
}
/// A database.
///
/// The `Database` object points to a database and allows you to connect to it
#[derive(Clone)]
pub struct Database {
inner: Arc<limbo_core::Database>,
@@ -72,6 +113,7 @@ impl Debug for Database {
}
impl Database {
/// Connect to the database.
pub fn connect(&self) -> Result<Connection> {
let conn = self.inner.connect()?;
#[allow(clippy::arc_with_non_send_sync)]
@@ -82,6 +124,7 @@ impl Database {
}
}
/// A database connection.
pub struct Connection {
inner: Arc<Mutex<Arc<limbo_core::Connection>>>,
}
@@ -98,16 +141,19 @@ unsafe impl Send for Connection {}
unsafe impl Sync for Connection {}
impl Connection {
/// Query the database with SQL.
pub async fn query(&self, sql: &str, params: impl IntoParams) -> Result<Rows> {
let mut stmt = self.prepare(sql).await?;
stmt.query(params).await
}
/// Execute SQL statement on the database.
pub async fn execute(&self, sql: &str, params: impl IntoParams) -> Result<u64> {
let mut stmt = self.prepare(sql).await?;
stmt.execute(params).await
}
/// Prepare a SQL statement for later execution.
pub async fn prepare(&self, sql: &str) -> Result<Statement> {
let conn = self
.inner
@@ -123,6 +169,7 @@ impl Connection {
Ok(statement)
}
/// Query a pragma.
pub fn pragma_query<F>(&self, pragma_name: &str, mut f: F) -> Result<()>
where
F: FnMut(&Row) -> limbo_core::Result<()>,
@@ -154,6 +201,7 @@ impl Debug for Connection {
}
}
/// A prepared statement.
pub struct Statement {
inner: Arc<Mutex<limbo_core::Statement>>,
}
@@ -170,6 +218,7 @@ unsafe impl Send for Statement {}
unsafe impl Sync for Statement {}
impl Statement {
/// Query the database with this prepared statement.
pub async fn query(&mut self, params: impl IntoParams) -> Result<Rows> {
let params = params.into_params()?;
match params {
@@ -189,6 +238,7 @@ impl Statement {
Ok(rows)
}
/// Execute this prepared statement.
pub async fn execute(&mut self, params: impl IntoParams) -> Result<u64> {
{
// Reset the statement before executing
@@ -232,6 +282,7 @@ impl Statement {
}
}
/// Returns columns of the result of this prepared statement.
pub fn columns(&self) -> Vec<Column> {
let stmt = self.inner.lock().unwrap();
@@ -251,16 +302,19 @@ impl Statement {
}
}
/// Column information.
pub struct Column {
name: String,
decl_type: Option<String>,
}
impl Column {
/// Return the name of the column.
pub fn name(&self) -> &str {
&self.name
}
/// Returns the type of the column.
pub fn decl_type(&self) -> Option<&str> {
self.decl_type.as_deref()
}
@@ -276,8 +330,10 @@ pub enum Params {
Positional(Vec<Value>),
Named(Vec<(String, Value)>),
}
pub struct Transaction {}
/// Results of a prepared statement query.
pub struct Rows {
inner: Arc<Mutex<limbo_core::Statement>>,
}
@@ -294,6 +350,7 @@ unsafe impl Send for Rows {}
unsafe impl Sync for Rows {}
impl Rows {
/// Fetch the next row of this result set.
pub async fn next(&mut self) -> Result<Option<Row>> {
loop {
let mut stmt = self
@@ -322,6 +379,7 @@ impl Rows {
}
}
/// Query result row.
#[derive(Debug)]
pub struct Row {
values: Vec<limbo_core::Value>,

View File

@@ -22,7 +22,7 @@ impl Database {
let io: Arc<dyn limbo_core::IO> = Arc::new(PlatformIO { vfs: VFS::new() });
let file = io.open_file(path, OpenFlags::Create, false).unwrap();
let db_file = Arc::new(DatabaseFile::new(file));
let db = limbo_core::Database::open(io, path, db_file, false).unwrap();
let db = limbo_core::Database::open(io, path, db_file, false, false).unwrap();
let conn = db.connect().unwrap();
Database { db, conn }
}

View File

@@ -50,7 +50,6 @@ toml_edit = {version = "0.22.24", features = ["serde"]}
[features]
default = ["io_uring"]
io_uring = ["limbo_core/io_uring"]
index_experimental = ["limbo_core/index_experimental"]
[build-dependencies]
syntect = "5.2.0"

View File

@@ -57,6 +57,8 @@ pub struct Opts {
pub vfs: Option<String>,
#[clap(long, help = "Enable experimental MVCC feature")]
pub experimental_mvcc: bool,
#[clap(long, help = "Enable experimental indexing feature")]
pub experimental_indexes: bool,
#[clap(short = 't', long, help = "specify output file for log traces")]
pub tracing_output: Option<String>,
}
@@ -129,7 +131,12 @@ impl Limbo {
};
(
io.clone(),
Database::open_file(io.clone(), &db_file, opts.experimental_mvcc)?,
Database::open_file(
io.clone(),
&db_file,
opts.experimental_mvcc,
opts.experimental_indexes,
)?,
)
};
let conn = db.connect()?;
@@ -356,7 +363,10 @@ impl Limbo {
_path => get_io(DbLocation::Path, &self.opts.io.to_string())?,
}
};
(io.clone(), Database::open_file(io.clone(), path, false)?)
(
io.clone(),
Database::open_file(io.clone(), path, false, false)?,
)
};
self.io = io;
self.conn = db.connect()?;

View File

@@ -15,7 +15,6 @@ path = "lib.rs"
[features]
default = ["fs", "uuid", "time", "json", "static"]
index_experimental = []
fs = ["limbo_ext/vfs"]
json = []
uuid = ["limbo_uuid/static"]

View File

@@ -18,7 +18,7 @@ fn bench_prepare_query(criterion: &mut Criterion) {
#[allow(clippy::arc_with_non_send_sync)]
let io = Arc::new(PlatformIO::new().unwrap());
let db = Database::open_file(io.clone(), "../testing/testing.db", false).unwrap();
let db = Database::open_file(io.clone(), "../testing/testing.db", false, false).unwrap();
let limbo_conn = db.connect().unwrap();
let queries = [
@@ -65,7 +65,7 @@ fn bench_execute_select_rows(criterion: &mut Criterion) {
#[allow(clippy::arc_with_non_send_sync)]
let io = Arc::new(PlatformIO::new().unwrap());
let db = Database::open_file(io.clone(), "../testing/testing.db", false).unwrap();
let db = Database::open_file(io.clone(), "../testing/testing.db", false, false).unwrap();
let limbo_conn = db.connect().unwrap();
let mut group = criterion.benchmark_group("Execute `SELECT * FROM users LIMIT ?`");
@@ -134,7 +134,7 @@ fn bench_execute_select_1(criterion: &mut Criterion) {
#[allow(clippy::arc_with_non_send_sync)]
let io = Arc::new(PlatformIO::new().unwrap());
let db = Database::open_file(io.clone(), "../testing/testing.db", false).unwrap();
let db = Database::open_file(io.clone(), "../testing/testing.db", false, false).unwrap();
let limbo_conn = db.connect().unwrap();
let mut group = criterion.benchmark_group("Execute `SELECT 1`");
@@ -187,7 +187,7 @@ fn bench_execute_select_count(criterion: &mut Criterion) {
#[allow(clippy::arc_with_non_send_sync)]
let io = Arc::new(PlatformIO::new().unwrap());
let db = Database::open_file(io.clone(), "../testing/testing.db", false).unwrap();
let db = Database::open_file(io.clone(), "../testing/testing.db", false, false).unwrap();
let limbo_conn = db.connect().unwrap();
let mut group = criterion.benchmark_group("Execute `SELECT count() FROM users`");

View File

@@ -22,7 +22,7 @@ fn bench(criterion: &mut Criterion) {
#[allow(clippy::arc_with_non_send_sync)]
let io = Arc::new(PlatformIO::new().unwrap());
let db = Database::open_file(io.clone(), "../testing/testing.db", false).unwrap();
let db = Database::open_file(io.clone(), "../testing/testing.db", false, false).unwrap();
let limbo_conn = db.connect().unwrap();
// Benchmark JSONB with different payload sizes
@@ -491,7 +491,7 @@ fn bench_sequential_jsonb(criterion: &mut Criterion) {
#[allow(clippy::arc_with_non_send_sync)]
let io = Arc::new(PlatformIO::new().unwrap());
let db = Database::open_file(io.clone(), "../testing/testing.db", false).unwrap();
let db = Database::open_file(io.clone(), "../testing/testing.db", false, false).unwrap();
let limbo_conn = db.connect().unwrap();
// Select a subset of JSON payloads to use in the sequential test
@@ -648,7 +648,7 @@ fn bench_json_patch(criterion: &mut Criterion) {
#[allow(clippy::arc_with_non_send_sync)]
let io = Arc::new(PlatformIO::new().unwrap());
let db = Database::open_file(io.clone(), "../testing/testing.db", false).unwrap();
let db = Database::open_file(io.clone(), "../testing/testing.db", false, false).unwrap();
let limbo_conn = db.connect().unwrap();
let json_patch_cases = [

View File

@@ -30,7 +30,7 @@ fn bench_tpc_h_queries(criterion: &mut Criterion) {
#[allow(clippy::arc_with_non_send_sync)]
let io = Arc::new(PlatformIO::new().unwrap());
let db = Database::open_file(io.clone(), TPC_H_PATH, false).unwrap();
let db = Database::open_file(io.clone(), TPC_H_PATH, false, true).unwrap();
let limbo_conn = db.connect().unwrap();
let queries = [

View File

@@ -109,7 +109,7 @@ impl Database {
}
},
};
let db = Self::open_file(io.clone(), path, false)?;
let db = Self::open_file(io.clone(), path, false, false)?;
Ok((io, db))
}
}

View File

@@ -114,8 +114,13 @@ unsafe impl Sync for Database {}
impl Database {
#[cfg(feature = "fs")]
pub fn open_file(io: Arc<dyn IO>, path: &str, enable_mvcc: bool) -> Result<Arc<Database>> {
Self::open_file_with_flags(io, path, OpenFlags::default(), enable_mvcc)
pub fn open_file(
io: Arc<dyn IO>,
path: &str,
enable_mvcc: bool,
enable_indexes: bool,
) -> Result<Arc<Database>> {
Self::open_file_with_flags(io, path, OpenFlags::default(), enable_mvcc, enable_indexes)
}
#[cfg(feature = "fs")]
@@ -124,10 +129,11 @@ impl Database {
path: &str,
flags: OpenFlags,
enable_mvcc: bool,
enable_indexes: bool,
) -> Result<Arc<Database>> {
let file = io.open_file(path, flags, true)?;
let db_file = Arc::new(DatabaseFile::new(file));
Self::open_with_flags(io, path, db_file, flags, enable_mvcc)
Self::open_with_flags(io, path, db_file, flags, enable_mvcc, enable_indexes)
}
#[allow(clippy::arc_with_non_send_sync)]
@@ -136,8 +142,16 @@ impl Database {
path: &str,
db_file: Arc<dyn DatabaseStorage>,
enable_mvcc: bool,
enable_indexes: bool,
) -> Result<Arc<Database>> {
Self::open_with_flags(io, path, db_file, OpenFlags::default(), enable_mvcc)
Self::open_with_flags(
io,
path,
db_file,
OpenFlags::default(),
enable_mvcc,
enable_indexes,
)
}
#[allow(clippy::arc_with_non_send_sync)]
@@ -147,6 +161,7 @@ impl Database {
db_file: Arc<dyn DatabaseStorage>,
flags: OpenFlags,
enable_mvcc: bool,
enable_indexes: bool,
) -> Result<Arc<Database>> {
let wal_path = format!("{}-wal", path);
let maybe_shared_wal = WalFileShared::open_shared_if_exists(&io, wal_path.as_str())?;
@@ -167,7 +182,7 @@ impl Database {
let is_empty = db_size == 0 && !wal_has_frames;
let shared_page_cache = Arc::new(RwLock::new(DumbLruPageCache::default()));
let schema = Arc::new(RwLock::new(Schema::new()));
let schema = Arc::new(RwLock::new(Schema::new(enable_indexes)));
let db = Database {
mv_store,
path: path.to_string(),
@@ -319,7 +334,7 @@ impl Database {
}
},
};
let db = Self::open_file(io.clone(), path, false)?;
let db = Self::open_file(io.clone(), path, false, false)?;
Ok((io, db))
}
}

View File

@@ -21,18 +21,13 @@ pub struct Schema {
pub tables: HashMap<String, Arc<Table>>,
/// table_name to list of indexes for the table
pub indexes: HashMap<String, Vec<Arc<Index>>>,
/// Used for index_experimental feature flag to track whether a table has an index.
/// This is necessary because we won't populate indexes so that we don't use them but
/// we still need to know if a table has an index to disallow any write operation that requires
/// indexes.
#[cfg(not(feature = "index_experimental"))]
pub has_indexes: std::collections::HashSet<String>,
pub indexes_enabled: bool,
}
impl Schema {
pub fn new() -> Self {
pub fn new(indexes_enabled: bool) -> Self {
let mut tables: HashMap<String, Arc<Table>> = HashMap::new();
#[cfg(not(feature = "index_experimental"))]
let has_indexes = std::collections::HashSet::new();
let indexes: HashMap<String, Vec<Arc<Index>>> = HashMap::new();
#[allow(clippy::arc_with_non_send_sync)]
@@ -43,8 +38,8 @@ impl Schema {
Self {
tables,
indexes,
#[cfg(not(feature = "index_experimental"))]
has_indexes,
indexes_enabled,
}
}
@@ -89,7 +84,6 @@ impl Schema {
}
}
#[cfg(feature = "index_experimental")]
pub fn add_index(&mut self, index: Arc<Index>) {
let table_name = normalize_ident(&index.table_name);
self.indexes
@@ -126,15 +120,17 @@ impl Schema {
.retain_mut(|other_idx| other_idx.name != idx.name);
}
#[cfg(not(feature = "index_experimental"))]
pub fn table_has_indexes(&self, table_name: &str) -> bool {
self.has_indexes.contains(table_name)
}
#[cfg(not(feature = "index_experimental"))]
pub fn table_set_has_index(&mut self, table_name: &str) {
self.has_indexes.insert(table_name.to_string());
}
pub fn indexes_enabled(&self) -> bool {
self.indexes_enabled
}
}
#[derive(Clone, Debug)]

View File

@@ -6583,7 +6583,7 @@ mod tests {
.unwrap();
}
let io: Arc<dyn IO> = Arc::new(PlatformIO::new().unwrap());
let db = Database::open_file(io.clone(), path.to_str().unwrap(), false).unwrap();
let db = Database::open_file(io.clone(), path.to_str().unwrap(), false, false).unwrap();
db
}
@@ -7123,7 +7123,6 @@ mod tests {
}
}
#[cfg(feature = "index_experimental")]
fn btree_index_insert_fuzz_run(attempts: usize, inserts: usize) {
use crate::storage::pager::CreateBTreeFlags;
@@ -7311,7 +7310,6 @@ mod tests {
}
#[test]
#[cfg(feature = "index_experimental")]
pub fn btree_index_insert_fuzz_run_equal_size() {
btree_index_insert_fuzz_run(2, 1024);
}
@@ -7347,7 +7345,6 @@ mod tests {
#[test]
#[ignore]
#[cfg(feature = "index_experimental")]
pub fn fuzz_long_btree_index_insert_fuzz_run_equal_size() {
btree_index_insert_fuzz_run(2, 10_000);
}

View File

@@ -24,15 +24,12 @@ pub fn translate_alter_table(
) -> Result<ProgramBuilder> {
let (table_name, alter_table) = alter;
let ast::Name(table_name) = table_name.name;
#[cfg(not(feature = "index_experimental"))]
{
if schema.table_has_indexes(&table_name) && cfg!(not(feature = "index_experimental")) {
// Let's disable altering a table with indices altogether instead of checking column by
// column to be extra safe.
crate::bail_parse_error!(
"Alter table disabled for table with indexes without index_experimental feature flag"
);
}
if schema.table_has_indexes(&table_name) && !schema.indexes_enabled() {
// Let's disable altering a table with indices altogether instead of checking column by
// column to be extra safe.
crate::bail_parse_error!(
"ALTER TABLE for table with indexes is disabled by default. Run with `--experimental-indexes` to enable this feature."
);
}
let Some(original_btree) = schema

View File

@@ -154,7 +154,7 @@ fn emit_compound_select(
(cursor_id, index.clone())
}
_ => {
if cfg!(not(feature = "index_experimental")) {
if !schema.indexes_enabled() {
crate::bail_parse_error!("UNION not supported without indexes");
} else {
new_dedupe_index = true;

View File

@@ -18,15 +18,12 @@ pub fn translate_delete(
syms: &SymbolTable,
mut program: ProgramBuilder,
) -> Result<ProgramBuilder> {
#[cfg(not(feature = "index_experimental"))]
{
if schema.table_has_indexes(&tbl_name.name.to_string()) {
// Let's disable altering a table with indices altogether instead of checking column by
// column to be extra safe.
crate::bail_parse_error!(
"DELETE into table disabled for table with indexes and without index_experimental feature flag"
);
}
if schema.table_has_indexes(&tbl_name.name.to_string()) && !schema.indexes_enabled() {
// Let's disable altering a table with indices altogether instead of checking column by
// column to be extra safe.
crate::bail_parse_error!(
"DELETE for table with indexes is disabled by default. Run with `--experimental-indexes` to enable this feature."
);
}
let mut delete_plan = prepare_delete_plan(
schema,

View File

@@ -23,8 +23,10 @@ pub fn translate_create_index(
schema: &Schema,
mut program: ProgramBuilder,
) -> crate::Result<ProgramBuilder> {
if cfg!(not(feature = "index_experimental")) {
crate::bail_parse_error!("CREATE INDEX enabled only with index_experimental feature");
if !schema.indexes_enabled() {
crate::bail_parse_error!(
"CREATE INDEX is disabled by default. Run with `--experimental-indexes` to enable this feature."
);
}
let idx_name = normalize_ident(idx_name);
let tbl_name = normalize_ident(tbl_name);
@@ -299,8 +301,10 @@ pub fn translate_drop_index(
schema: &Schema,
mut program: ProgramBuilder,
) -> crate::Result<ProgramBuilder> {
if cfg!(not(feature = "index_experimental")) {
crate::bail_parse_error!("DROP INDEX enabled only with index_experimental feature");
if !schema.indexes_enabled() {
crate::bail_parse_error!(
"DROP INDEX is disabled by default. Run with `--experimental-indexes` to enable this feature."
);
}
let idx_name = normalize_ident(idx_name);
let opts = crate::vdbe::builder::ProgramBuilderOpts {

View File

@@ -58,15 +58,12 @@ pub fn translate_insert(
crate::bail_parse_error!("ON CONFLICT clause is not supported");
}
#[cfg(not(feature = "index_experimental"))]
{
if schema.table_has_indexes(&tbl_name.name.to_string()) {
// Let's disable altering a table with indices altogether instead of checking column by
// column to be extra safe.
crate::bail_parse_error!(
"INSERT table disabled for table with indexes and without index_experimental feature flag"
);
}
if schema.table_has_indexes(&tbl_name.name.to_string()) && !schema.indexes_enabled() {
// Let's disable altering a table with indices altogether instead of checking column by
// column to be extra safe.
crate::bail_parse_error!(
"INSERT to table with indexes is disabled by default. Run with `--experimental-indexes` to enable this feature."
);
}
let table_name = &tbl_name.name;
let table = match schema.get_table(table_name.0.as_str()) {

View File

@@ -71,6 +71,7 @@ pub fn optimize_select_plan(plan: &mut SelectPlan, schema: &Schema) -> Result<()
}
let best_join_order = optimize_table_access(
schema,
&mut plan.table_references,
&schema.indexes,
&mut plan.where_clause,
@@ -119,6 +120,7 @@ fn optimize_update_plan(plan: &mut UpdatePlan, schema: &Schema) -> Result<()> {
optimize_select_plan(ephemeral_plan, schema)?;
}
let _ = optimize_table_access(
schema,
&mut plan.table_references,
&schema.indexes,
&mut plan.where_clause,
@@ -150,6 +152,7 @@ fn optimize_subqueries(plan: &mut SelectPlan, schema: &Schema) -> Result<()> {
///
/// Returns the join order if it was optimized, or None if the default join order was considered best.
fn optimize_table_access(
schema: &Schema,
table_references: &mut TableReferences,
available_indexes: &HashMap<String, Vec<Arc<Index>>>,
where_clause: &mut [WhereTerm],
@@ -240,8 +243,7 @@ fn optimize_table_access(
let table_idx = join_order_member.original_idx;
let access_method = &access_methods_arena.borrow()[best_access_methods[i]];
if access_method.is_scan() {
#[cfg(feature = "index_experimental")]
let try_to_build_ephemeral_index = {
let try_to_build_ephemeral_index = if schema.indexes_enabled() {
let is_leftmost_table = i == 0;
let uses_index = access_method.index.is_some();
let source_table_is_from_clause_subquery = matches!(
@@ -249,9 +251,9 @@ fn optimize_table_access(
Table::FromClauseSubquery(_)
);
!is_leftmost_table && !uses_index && !source_table_is_from_clause_subquery
} else {
false
};
#[cfg(not(feature = "index_experimental"))]
let try_to_build_ephemeral_index = false;
if !try_to_build_ephemeral_index {
joined_tables[table_idx].op = Operation::Scan {

View File

@@ -25,7 +25,11 @@ use limbo_sqlite3_parser::ast::{
pub const ROWID: &str = "rowid";
pub fn resolve_aggregates(top_level_expr: &Expr, aggs: &mut Vec<Aggregate>) -> Result<bool> {
pub fn resolve_aggregates(
schema: &Schema,
top_level_expr: &Expr,
aggs: &mut Vec<Aggregate>,
) -> Result<bool> {
let mut contains_aggregates = false;
walk_expr(top_level_expr, &mut |expr: &Expr| -> Result<WalkControl> {
if aggs
@@ -51,13 +55,10 @@ pub fn resolve_aggregates(top_level_expr: &Expr, aggs: &mut Vec<Aggregate>) -> R
{
Ok(Func::Agg(f)) => {
let distinctness = Distinctness::from_ast(distinctness.as_ref());
#[cfg(not(feature = "index_experimental"))]
{
if distinctness.is_distinct() {
crate::bail_parse_error!(
"SELECT with DISTINCT is not allowed without indexes enabled"
);
}
if !schema.indexes_enabled() && distinctness.is_distinct() {
crate::bail_parse_error!(
"SELECT with DISTINCT is not allowed without indexes enabled"
);
}
let num_args = args.as_ref().map_or(0, |args| args.len());
if distinctness.is_distinct() && num_args != 1 {
@@ -76,7 +77,7 @@ pub fn resolve_aggregates(top_level_expr: &Expr, aggs: &mut Vec<Aggregate>) -> R
_ => {
if let Some(args) = args {
for arg in args.iter() {
contains_aggregates |= resolve_aggregates(arg, aggs)?;
contains_aggregates |= resolve_aggregates(schema, arg, aggs)?;
}
}
}

View File

@@ -93,7 +93,7 @@ pub fn translate_create_table(
let index_regs = check_automatic_pk_index_required(&body, &mut program, &tbl_name.name.0)?;
if let Some(index_regs) = index_regs.as_ref() {
if cfg!(not(feature = "index_experimental")) {
if !schema.indexes_enabled() {
bail_parse_error!("Constraints UNIQUE and PRIMARY KEY (unless INTEGER PRIMARY KEY) on table are not supported without indexes");
}
for index_reg in index_regs.clone() {
@@ -613,13 +613,10 @@ pub fn translate_drop_table(
schema: &Schema,
mut program: ProgramBuilder,
) -> Result<ProgramBuilder> {
#[cfg(not(feature = "index_experimental"))]
{
if schema.table_has_indexes(&tbl_name.name.to_string()) {
bail_parse_error!(
"DROP Table with indexes on the table enabled only with index_experimental feature"
);
}
if !schema.indexes_enabled() && schema.table_has_indexes(&tbl_name.name.to_string()) {
bail_parse_error!(
"DROP TABLE with indexes on the table is disabled by default. Run with `--experimental-indexes` to enable this feature."
);
}
let opts = ProgramBuilderOpts {
query_mode,

View File

@@ -203,13 +203,10 @@ fn prepare_one_select_plan(
distinctness,
..
} = *select_inner;
#[cfg(not(feature = "index_experimental"))]
{
if distinctness.is_some() {
crate::bail_parse_error!(
"SELECT with DISTINCT is not allowed without indexes enabled"
);
}
if !schema.indexes_enabled() && distinctness.is_some() {
crate::bail_parse_error!(
"SELECT with DISTINCT is not allowed without indexes enabled"
);
}
let col_count = columns.len();
if col_count == 0 {
@@ -346,13 +343,10 @@ fn prepare_one_select_plan(
};
let distinctness = Distinctness::from_ast(distinctness.as_ref());
#[cfg(not(feature = "index_experimental"))]
{
if distinctness.is_distinct() {
crate::bail_parse_error!(
"SELECT with DISTINCT is not allowed without indexes enabled"
);
}
if !schema.indexes_enabled() && distinctness.is_distinct() {
crate::bail_parse_error!(
"SELECT with DISTINCT is not allowed without indexes enabled"
);
}
if distinctness.is_distinct() && args_count != 1 {
crate::bail_parse_error!("DISTINCT aggregate functions must have exactly one argument");
@@ -393,8 +387,11 @@ fn prepare_one_select_plan(
});
}
Ok(_) => {
let contains_aggregates =
resolve_aggregates(expr, &mut aggregate_expressions)?;
let contains_aggregates = resolve_aggregates(
schema,
expr,
&mut aggregate_expressions,
)?;
plan.result_columns.push(ResultSetColumn {
alias: maybe_alias.as_ref().map(|alias| match alias {
ast::As::Elided(alias) => alias.0.clone(),
@@ -409,6 +406,7 @@ fn prepare_one_select_plan(
{
if let ExtFunc::Scalar(_) = f.as_ref().func {
let contains_aggregates = resolve_aggregates(
schema,
expr,
&mut aggregate_expressions,
)?;
@@ -486,7 +484,7 @@ fn prepare_one_select_plan(
}
expr => {
let contains_aggregates =
resolve_aggregates(expr, &mut aggregate_expressions)?;
resolve_aggregates(schema, expr, &mut aggregate_expressions)?;
plan.result_columns.push(ResultSetColumn {
alias: maybe_alias.as_ref().map(|alias| match alias {
ast::As::Elided(alias) => alias.0.clone(),
@@ -532,7 +530,7 @@ fn prepare_one_select_plan(
Some(&plan.result_columns),
)?;
let contains_aggregates =
resolve_aggregates(expr, &mut aggregate_expressions)?;
resolve_aggregates(schema, expr, &mut aggregate_expressions)?;
if !contains_aggregates {
// TODO: sqlite allows HAVING clauses with non aggregate expressions like
// HAVING id = 5. We should support this too eventually (I guess).
@@ -567,7 +565,7 @@ fn prepare_one_select_plan(
&mut plan.table_references,
Some(&plan.result_columns),
)?;
resolve_aggregates(&o.expr, &mut plan.aggregates)?;
resolve_aggregates(schema, &o.expr, &mut plan.aggregates)?;
key.push((o.expr, o.order.unwrap_or(ast::SortOrder::Asc)));
}

View File

@@ -104,15 +104,12 @@ pub fn prepare_update_plan(
bail_parse_error!("ON CONFLICT clause is not supported");
}
let table_name = &body.tbl_name.name;
#[cfg(not(feature = "index_experimental"))]
{
if schema.table_has_indexes(&table_name.to_string()) {
// Let's disable altering a table with indices altogether instead of checking column by
// column to be extra safe.
bail_parse_error!(
"UPDATE table disabled for table with indexes and without index_experimental feature flag"
);
}
if schema.table_has_indexes(&table_name.to_string()) && !schema.indexes_enabled() {
// Let's disable altering a table with indices altogether instead of checking column by
// column to be extra safe.
bail_parse_error!(
"UPDATE table disabled for table with indexes is disabled by default. Run with `--experimental-indexes` to enable this feature."
);
}
let table = match schema.get_table(table_name.0.as_str()) {
Some(table) => table,

View File

@@ -138,10 +138,9 @@ pub fn parse_schema_rows(
}
}
for unparsed_sql_from_index in from_sql_indexes {
#[cfg(not(feature = "index_experimental"))]
schema.table_set_has_index(&unparsed_sql_from_index.table_name);
#[cfg(feature = "index_experimental")]
{
if !schema.indexes_enabled() {
schema.table_set_has_index(&unparsed_sql_from_index.table_name);
} else {
let table = schema
.get_btree_table(&unparsed_sql_from_index.table_name)
.unwrap();
@@ -154,10 +153,9 @@ pub fn parse_schema_rows(
}
}
for automatic_index in automatic_indices {
#[cfg(not(feature = "index_experimental"))]
schema.table_set_has_index(&automatic_index.0);
#[cfg(feature = "index_experimental")]
{
if !schema.indexes_enabled() {
schema.table_set_has_index(&automatic_index.0);
} else {
let table = schema.get_btree_table(&automatic_index.0).unwrap();
let ret_index = schema::Index::automatic_from_primary_key_and_unique(
table.as_ref(),

View File

@@ -4946,7 +4946,7 @@ pub fn op_parse_schema(
}
} else {
let stmt = conn.prepare("SELECT * FROM sqlite_schema")?;
let mut new = Schema::new();
let mut new = Schema::new(conn.schema.read().indexes_enabled());
// TODO: This function below is synchronous, make it async
{

View File

@@ -3,9 +3,6 @@ name = "limbo-multitenancy"
version = "0.1.0"
edition = "2021"
[features]
index_experimental = ["limbo_core/index_experimental"]
[dependencies]
clap = { version = "4.5", features = ["derive"] }
env_logger = "0.11.0"

View File

@@ -66,7 +66,7 @@ for query_file in $(ls "$QUERIES_DIR"/*.sql | sort -V); do
# Clear caches before Limbo run
clear_caches
# Run Limbo
limbo_output=$( { time -p "$LIMBO_BIN" "$DB_FILE" --quiet --output-mode list "$(cat $query_file)" 2>&1; } 2>&1)
limbo_output=$( { time -p "$LIMBO_BIN" "$DB_FILE" --experimental-indexes --quiet --output-mode list "$(cat $query_file)" 2>&1; } 2>&1)
limbo_non_time_lines=$(echo "$limbo_output" | grep -v -e "^real" -e "^user" -e "^sys")
limbo_real_time=$(echo "$limbo_output" | grep "^real" | awk '{print $2}')
echo "Running $query_name with SQLite3..." >&2
@@ -95,4 +95,4 @@ for query_file in $(ls "$QUERIES_DIR"/*.sql | sort -V); do
done
echo "-----------------------------------------------------------"
echo "TPC-H query timing comparison completed."
echo "TPC-H query timing comparison completed."

View File

@@ -2,7 +2,7 @@
# if RUST_LOG is non-empty, enable tracing output
if [ -n "$RUST_LOG" ]; then
target/debug/limbo_index_experimental -m list -t testing/test.log "$@"
target/debug/limbo --experimental-indexes -m list -t testing/test.log "$@"
else
target/debug/limbo_index_experimental -m list "$@"
target/debug/limbo --experimental-indexes -m list "$@"
fi

View File

@@ -648,6 +648,7 @@ impl Interaction {
env.io.clone(),
&db_path,
false,
false,
) {
Ok(db) => db,
Err(e) => {

View File

@@ -135,7 +135,7 @@ impl SimulatorEnv {
std::fs::remove_file(wal_path).unwrap();
}
let db = match Database::open_file(io.clone(), db_path.to_str().unwrap(), false) {
let db = match Database::open_file(io.clone(), db_path.to_str().unwrap(), false, false) {
Ok(db) => db,
Err(e) => {
panic!("error opening simulator test file {:?}: {:?}", db_path, e);

View File

@@ -126,7 +126,7 @@ pub unsafe extern "C" fn sqlite3_open(
Err(_) => return SQLITE_CANTOPEN,
},
};
match limbo_core::Database::open_file(io.clone(), filename, false) {
match limbo_core::Database::open_file(io.clone(), filename, false, false) {
Ok(db) => {
let conn = db.connect().unwrap();
*db_out = Box::leak(Box::new(sqlite3::new(io, db, conn)));

View File

@@ -14,8 +14,6 @@ publish = false
name = "limbo_stress"
path = "main.rs"
[features]
index_experimental = ["limbo/index_experimental"]
[dependencies]
antithesis_sdk = "0.2.5"
clap = { version = "4.5", features = ["derive"] }

View File

@@ -165,7 +165,8 @@ impl ArbitrarySchema {
.map(|col| {
let mut col_def =
format!(" {} {}", col.name, data_type_to_sql(&col.data_type));
if cfg!(feature = "index_experimental") {
if false {
/* FIXME */
for constraint in &col.constraints {
col_def.push(' ');
col_def.push_str(&constraint_to_sql(constraint));
@@ -340,18 +341,18 @@ fn generate_plan(opts: &Opts) -> Result<Plan, Box<dyn std::error::Error + Send +
None
};
if let Some(tx) = tx {
queries.push(format!("{}", tx));
queries.push(tx.to_string());
}
let sql = generate_random_statement(&schema);
if !opts.skip_log {
writeln!(log_file, "{}", sql)?;
}
queries.push(sql);
if let Some(_) = tx {
if tx.is_some() {
if get_random() % 2 == 0 {
queries.push(format!("COMMIT"));
queries.push("COMMIT".to_string());
} else {
queries.push(format!("ROLLBACK"));
queries.push("ROLLBACK".to_string());
}
}
}

View File

@@ -14,9 +14,6 @@ path = "lib.rs"
name = "integration_tests"
path = "integration/mod.rs"
[features]
index_experimental = ["limbo_core/index_experimental"]
[dependencies]
anyhow.workspace = true
env_logger = "0.10.1"

View File

@@ -18,11 +18,11 @@ unsafe impl Send for TempDatabase {}
#[allow(dead_code, clippy::arc_with_non_send_sync)]
impl TempDatabase {
pub fn new_empty() -> Self {
Self::new(&format!("test-{}.db", rng().next_u32()))
pub fn new_empty(enable_indexes: bool) -> Self {
Self::new(&format!("test-{}.db", rng().next_u32()), enable_indexes)
}
pub fn new(db_name: &str) -> Self {
pub fn new(db_name: &str, enable_indexes: bool) -> Self {
let mut path = TempDir::new().unwrap().keep();
path.push(db_name);
let io: Arc<dyn IO + Send> = Arc::new(limbo_core::PlatformIO::new().unwrap());
@@ -31,20 +31,34 @@ impl TempDatabase {
path.to_str().unwrap(),
limbo_core::OpenFlags::default(),
false,
enable_indexes,
)
.unwrap();
Self { path, io, db }
}
pub fn new_with_existent(db_path: &Path) -> Self {
Self::new_with_existent_with_flags(db_path, limbo_core::OpenFlags::default())
pub fn new_with_existent(db_path: &Path, enable_indexes: bool) -> Self {
Self::new_with_existent_with_flags(
db_path,
limbo_core::OpenFlags::default(),
enable_indexes,
)
}
pub fn new_with_existent_with_flags(db_path: &Path, flags: limbo_core::OpenFlags) -> Self {
pub fn new_with_existent_with_flags(
db_path: &Path,
flags: limbo_core::OpenFlags,
enable_indexes: bool,
) -> Self {
let io: Arc<dyn IO + Send> = Arc::new(limbo_core::PlatformIO::new().unwrap());
let db =
Database::open_file_with_flags(io.clone(), db_path.to_str().unwrap(), flags, false)
.unwrap();
let db = Database::open_file_with_flags(
io.clone(),
db_path.to_str().unwrap(),
flags,
false,
enable_indexes,
)
.unwrap();
Self {
path: db_path.to_path_buf(),
io,
@@ -52,7 +66,7 @@ impl TempDatabase {
}
}
pub fn new_with_rusqlite(table_sql: &str) -> Self {
pub fn new_with_rusqlite(table_sql: &str, enable_indexes: bool) -> Self {
let _ = tracing_subscriber::fmt()
.with_max_level(tracing::Level::TRACE)
.finish();
@@ -71,6 +85,7 @@ impl TempDatabase {
path.to_str().unwrap(),
limbo_core::OpenFlags::default(),
false,
enable_indexes,
)
.unwrap();
@@ -85,9 +100,15 @@ impl TempDatabase {
conn
}
pub fn limbo_database(&self) -> Arc<limbo_core::Database> {
pub fn limbo_database(&self, enable_indexes: bool) -> Arc<limbo_core::Database> {
log::debug!("conneting to limbo");
Database::open_file(self.io.clone(), self.path.to_str().unwrap(), false).unwrap()
Database::open_file(
self.io.clone(),
self.path.to_str().unwrap(),
false,
enable_indexes,
)
.unwrap()
}
}
@@ -231,6 +252,7 @@ mod tests {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (foo integer, bar integer, baz integer);",
false,
);
let conn = tmp_db.connect_limbo();
@@ -268,8 +290,11 @@ mod tests {
fn test_limbo_open_read_only() -> anyhow::Result<()> {
let path = TempDir::new().unwrap().keep().join("temp_read_only");
{
let db =
TempDatabase::new_with_existent_with_flags(&path, limbo_core::OpenFlags::default());
let db = TempDatabase::new_with_existent_with_flags(
&path,
limbo_core::OpenFlags::default(),
false,
);
let conn = db.connect_limbo();
let ret = limbo_exec_rows(&db, &conn, "CREATE table t(a)");
assert!(ret.is_empty(), "{:?}", ret);
@@ -281,6 +306,7 @@ mod tests {
let db = TempDatabase::new_with_existent_with_flags(
&path,
limbo_core::OpenFlags::default() | limbo_core::OpenFlags::ReadOnly,
false,
);
let conn = db.connect_limbo();
let ret = limbo_exec_rows(&db, &conn, "SELECT * from t");
@@ -293,11 +319,10 @@ mod tests {
}
#[test]
#[cfg(feature = "index_experimental")]
fn test_unique_index_ordering() -> anyhow::Result<()> {
use rand::Rng;
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(true);
let conn = db.connect_limbo();
let _ = limbo_exec_rows(&db, &conn, "CREATE TABLE t(x INTEGER UNIQUE)");
@@ -336,10 +361,9 @@ mod tests {
}
#[test]
#[cfg(feature = "index_experimental")]
fn test_large_unique_blobs() -> anyhow::Result<()> {
let path = TempDir::new().unwrap().keep().join("temp_read_only");
let db = TempDatabase::new_with_existent(&path);
let db = TempDatabase::new_with_existent(&path, true);
let conn = db.connect_limbo();
let _ = limbo_exec_rows(&db, &conn, "CREATE TABLE t(x BLOB UNIQUE)");

View File

@@ -6,6 +6,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test_rowid (id INTEGER PRIMARY KEY, val TEXT);",
false,
);
let conn = tmp_db.connect_limbo();
@@ -90,7 +91,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
fn test_integer_primary_key() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test_rowid (id INTEGER PRIMARY KEY);");
TempDatabase::new_with_rusqlite("CREATE TABLE test_rowid (id INTEGER PRIMARY KEY);", false);
let conn = tmp_db.connect_limbo();
for query in &[

View File

@@ -2,9 +2,7 @@ pub mod grammar_generator;
#[cfg(test)]
mod tests {
#[cfg(feature = "index_experimental")]
use rand::seq::IndexedRandom;
#[cfg(feature = "index_experimental")]
use std::collections::HashSet;
use rand::{Rng, SeedableRng};
@@ -30,7 +28,7 @@ mod tests {
/// [See this issue for more info](https://github.com/tursodatabase/limbo/issues/1763)
#[test]
pub fn fuzz_failure_issue_1763() {
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(false);
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
let offending_query = "SELECT ((ceil(pow((((2.0))), (-2.0 - -1.0) / log(0.5)))) - -2.0)";
@@ -45,7 +43,7 @@ mod tests {
#[test]
pub fn arithmetic_expression_fuzz_ex1() {
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(false);
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -65,7 +63,7 @@ mod tests {
#[test]
pub fn rowid_seek_fuzz() {
let db = TempDatabase::new_with_rusqlite("CREATE TABLE t(x INTEGER PRIMARY KEY)"); // INTEGER PRIMARY KEY is a rowid alias, so an index is not created
let db = TempDatabase::new_with_rusqlite("CREATE TABLE t(x INTEGER PRIMARY KEY)", false); // INTEGER PRIMARY KEY is a rowid alias, so an index is not created
let sqlite_conn = rusqlite::Connection::open(db.path.clone()).unwrap();
let insert = format!(
@@ -184,9 +182,8 @@ mod tests {
}
#[test]
#[cfg(feature = "index_experimental")]
pub fn index_scan_fuzz() {
let db = TempDatabase::new_with_rusqlite("CREATE TABLE t(x PRIMARY KEY)");
let db = TempDatabase::new_with_rusqlite("CREATE TABLE t(x PRIMARY KEY)", true);
let sqlite_conn = rusqlite::Connection::open(db.path.clone()).unwrap();
let insert = format!(
@@ -232,7 +229,6 @@ mod tests {
}
#[test]
#[cfg(feature = "index_experimental")]
/// A test for verifying that index seek+scan works correctly for compound keys
/// on indexes with various column orderings.
pub fn index_scan_compound_key_fuzz() {
@@ -254,14 +250,14 @@ mod tests {
];
// Create all different 3-column primary key permutations
let dbs = [
TempDatabase::new_with_rusqlite(table_defs[0]),
TempDatabase::new_with_rusqlite(table_defs[1]),
TempDatabase::new_with_rusqlite(table_defs[2]),
TempDatabase::new_with_rusqlite(table_defs[3]),
TempDatabase::new_with_rusqlite(table_defs[4]),
TempDatabase::new_with_rusqlite(table_defs[5]),
TempDatabase::new_with_rusqlite(table_defs[6]),
TempDatabase::new_with_rusqlite(table_defs[7]),
TempDatabase::new_with_rusqlite(table_defs[0], true),
TempDatabase::new_with_rusqlite(table_defs[1], true),
TempDatabase::new_with_rusqlite(table_defs[2], true),
TempDatabase::new_with_rusqlite(table_defs[3], true),
TempDatabase::new_with_rusqlite(table_defs[4], true),
TempDatabase::new_with_rusqlite(table_defs[5], true),
TempDatabase::new_with_rusqlite(table_defs[6], true),
TempDatabase::new_with_rusqlite(table_defs[7], true),
];
let mut pk_tuples = HashSet::new();
while pk_tuples.len() < 100000 {
@@ -512,7 +508,6 @@ mod tests {
}
#[test]
#[cfg(feature = "index_experimental")]
pub fn compound_select_fuzz() {
let _ = env_logger::try_init();
let (mut rng, seed) = rng_from_time();
@@ -528,7 +523,7 @@ mod tests {
const MAX_SELECTS_IN_UNION_EXTRA: usize = 2;
const MAX_LIMIT_VALUE: usize = 50;
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(true);
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -672,7 +667,7 @@ mod tests {
let sql = g.create().concat(" ").push_str("SELECT").push(expr).build();
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(false);
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -693,7 +688,7 @@ mod tests {
#[test]
pub fn fuzz_ex() {
let _ = env_logger::try_init();
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(false);
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -793,7 +788,7 @@ mod tests {
let sql = g.create().concat(" ").push_str("SELECT").push(expr).build();
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(false);
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -957,7 +952,7 @@ mod tests {
let sql = g.create().concat(" ").push_str("SELECT").push(expr).build();
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(false);
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -991,7 +986,6 @@ mod tests {
pub cast_expr: SymbolHandle,
pub case_expr: SymbolHandle,
pub cmp_op: SymbolHandle,
#[cfg(feature = "index_experimental")]
pub number: SymbolHandle,
}
@@ -1226,12 +1220,10 @@ mod tests {
cast_expr,
case_expr,
cmp_op,
#[cfg(feature = "index_experimental")]
number,
}
}
#[cfg(feature = "index_experimental")]
fn predicate_builders(g: &GrammarGenerator, tables: Option<&[TestTable]>) -> PredicateBuilders {
let (in_op, in_op_builder) = g.create_handle();
let (column, column_builder) = g.create_handle();
@@ -1330,7 +1322,7 @@ mod tests {
.push(expr)
.build();
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(false);
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
@@ -1365,7 +1357,7 @@ mod tests {
"SELECT * FROM t",
],
] {
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(false);
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
for query in queries.iter() {
@@ -1381,7 +1373,6 @@ mod tests {
}
#[test]
#[cfg(feature = "index_experimental")]
pub fn table_logical_expression_fuzz_run() {
let _ = env_logger::try_init();
let g = GrammarGenerator::new();
@@ -1393,7 +1384,7 @@ mod tests {
let predicate = predicate_builders(&g, Some(&tables));
let expr = build_logical_expr(&g, &builders, Some(&predicate));
let db = TempDatabase::new_empty();
let db = TempDatabase::new_empty(true);
let limbo_conn = db.connect_limbo();
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
for table in tables.iter() {

View File

@@ -3,7 +3,7 @@ use limbo_core::{StepResult, Value};
#[test]
fn test_statement_reset_bind() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);");
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);", false);
let conn = tmp_db.connect_limbo();
let mut stmt = conn.prepare("select ?")?;
@@ -47,7 +47,7 @@ fn test_statement_reset_bind() -> anyhow::Result<()> {
#[test]
fn test_statement_bind() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);");
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);", false);
let conn = tmp_db.connect_limbo();
let mut stmt = conn.prepare("select ?, ?1, :named, ?3, ?4")?;
@@ -112,6 +112,7 @@ fn test_insert_parameter_remap() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b integer, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
@@ -176,6 +177,7 @@ fn test_insert_parameter_remap_all_params() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b integer, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (d, a, c, b) values (?, ?, ?, ?);")?;
@@ -243,6 +245,7 @@ fn test_insert_parameter_multiple_remap_backwards() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b integer, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (d,c,b,a) values (?, ?, ?, ?);")?;
@@ -309,6 +312,7 @@ fn test_insert_parameter_multiple_no_remap() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b integer, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (a,b,c,d) values (?, ?, ?, ?);")?;
@@ -375,6 +379,7 @@ fn test_insert_parameter_multiple_row() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b integer, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (b,a,d,c) values (?, ?, ?, ?), (?, ?, ?, ?);")?;
@@ -440,7 +445,7 @@ fn test_insert_parameter_multiple_row() -> anyhow::Result<()> {
#[test]
fn test_bind_parameters_update_query() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite("create table test (a integer, b text);");
let tmp_db = TempDatabase::new_with_rusqlite("create table test (a integer, b text);", false);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (a, b) values (3, 'test1');")?;
loop {
@@ -484,6 +489,7 @@ fn test_bind_parameters_update_query() -> anyhow::Result<()> {
fn test_bind_parameters_update_query_multiple_where() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"create table test (a integer, b text, c integer, d integer);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (a, b, c, d) values (3, 'test1', 4, 5);")?;
@@ -529,8 +535,10 @@ fn test_bind_parameters_update_query_multiple_where() -> anyhow::Result<()> {
#[test]
fn test_bind_parameters_update_rowid_alias() -> anyhow::Result<()> {
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT);");
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT);",
false,
);
let conn = tmp_db.connect_limbo();
let mut ins = conn.prepare("insert into test (id, name) values (1, 'test');")?;
loop {
@@ -588,6 +596,7 @@ fn test_bind_parameters_update_rowid_alias() -> anyhow::Result<()> {
fn test_bind_parameters_update_rowid_alias_seek_rowid() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT, age integer);",
false,
);
let conn = tmp_db.connect_limbo();
conn.execute("insert into test (id, name, age) values (1, 'test', 4);")?;
@@ -655,6 +664,7 @@ fn test_bind_parameters_update_rowid_alias_seek_rowid() -> anyhow::Result<()> {
fn test_bind_parameters_delete_rowid_alias_seek_out_of_order() -> anyhow::Result<()> {
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT, age integer);",
false,
);
let conn = tmp_db.connect_limbo();
conn.execute("insert into test (id, name, age) values (1, 'correct', 4);")?;

View File

@@ -20,8 +20,10 @@ macro_rules! change_state {
#[ignore]
fn test_simple_overflow_page() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY, t TEXT);");
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (x INTEGER PRIMARY KEY, t TEXT);",
false,
);
let conn = tmp_db.connect_limbo();
let mut huge_text = String::new();
@@ -82,8 +84,10 @@ fn test_simple_overflow_page() -> anyhow::Result<()> {
fn test_sequential_overflow_page() -> anyhow::Result<()> {
let _ = env_logger::try_init();
maybe_setup_tracing();
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY, t TEXT);");
let tmp_db = TempDatabase::new_with_rusqlite(
"CREATE TABLE test (x INTEGER PRIMARY KEY, t TEXT);",
false,
);
let conn = tmp_db.connect_limbo();
let iterations = 10_usize;
@@ -152,7 +156,8 @@ fn test_sequential_write() -> anyhow::Result<()> {
let _ = env_logger::try_init();
maybe_setup_tracing();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);");
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);", false);
let conn = tmp_db.connect_limbo();
let list_query = "SELECT * FROM test";
@@ -187,7 +192,7 @@ fn test_sequential_write() -> anyhow::Result<()> {
/// https://github.com/tursodatabase/limbo/pull/679
fn test_regression_multi_row_insert() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x REAL);");
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x REAL);", false);
let conn = tmp_db.connect_limbo();
let insert_query = "INSERT INTO test VALUES (-2), (-3), (-1)";
@@ -220,7 +225,7 @@ fn test_regression_multi_row_insert() -> anyhow::Result<()> {
#[test]
fn test_statement_reset() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);");
let tmp_db = TempDatabase::new_with_rusqlite("create table test (i integer);", false);
let conn = tmp_db.connect_limbo();
conn.execute("insert into test values (1)")?;
@@ -267,7 +272,8 @@ fn test_statement_reset() -> anyhow::Result<()> {
#[ignore]
fn test_wal_checkpoint() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);");
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);", false);
// threshold is 1000 by default
let iterations = 1001_usize;
let conn = tmp_db.connect_limbo();
@@ -294,7 +300,8 @@ fn test_wal_checkpoint() -> anyhow::Result<()> {
#[test]
fn test_wal_restart() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);");
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x INTEGER PRIMARY KEY);", false);
// threshold is 1000 by default
fn insert(i: usize, conn: &Arc<Connection>, tmp_db: &TempDatabase) -> anyhow::Result<()> {
@@ -339,7 +346,7 @@ fn test_wal_restart() -> anyhow::Result<()> {
#[test]
fn test_insert_after_big_blob() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE temp (t1 BLOB, t2 INTEGER)");
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE temp (t1 BLOB, t2 INTEGER)", false);
let conn = tmp_db.connect_limbo();
conn.execute("insert into temp(t1) values (zeroblob (262144))")?;
@@ -355,7 +362,7 @@ fn test_write_delete_with_index() -> anyhow::Result<()> {
maybe_setup_tracing();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x PRIMARY KEY);");
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x PRIMARY KEY);", false);
let conn = tmp_db.connect_limbo();
let list_query = "SELECT * FROM test";
@@ -404,13 +411,13 @@ fn test_write_delete_with_index() -> anyhow::Result<()> {
}
#[test]
#[cfg(feature = "index_experimental")]
fn test_update_with_index() -> anyhow::Result<()> {
let _ = env_logger::try_init();
maybe_setup_tracing();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE test (x REAL PRIMARY KEY, y TEXT);");
let tmp_db =
TempDatabase::new_with_rusqlite("CREATE TABLE test (x REAL PRIMARY KEY, y TEXT);", true);
let conn = tmp_db.connect_limbo();
run_query(&tmp_db, &conn, "INSERT INTO test VALUES (1.0, 'foo')")?;
@@ -446,7 +453,7 @@ fn test_delete_with_index() -> anyhow::Result<()> {
maybe_setup_tracing();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t(x UNIQUE)");
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t(x UNIQUE)", true);
let conn = tmp_db.connect_limbo();
run_query(&tmp_db, &conn, "INSERT INTO t VALUES (1), (2)")?;
@@ -462,7 +469,7 @@ fn test_delete_with_index() -> anyhow::Result<()> {
#[test]
fn test_update_regression() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE imaginative_baroja (blithesome_hall BLOB,remarkable_lester INTEGER,generous_balagun TEXT,ample_earth INTEGER,marvelous_khadzhiev BLOB,glowing_parissi TEXT,insightful_ryner BLOB)");
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE imaginative_baroja (blithesome_hall BLOB,remarkable_lester INTEGER,generous_balagun TEXT,ample_earth INTEGER,marvelous_khadzhiev BLOB,glowing_parissi TEXT,insightful_ryner BLOB)", false);
let conn = tmp_db.connect_limbo();
conn.execute("INSERT INTO imaginative_baroja VALUES (X'617070726F61636861626C655F6F6D6164', 5581285929211692372, 'approachable_podur', -4145754929970306534, X'666F72747569746F75735F7368617270', 'sensible_amesly', X'636F6D70657469746976655F6669746368'), (X'6D6972746866756C5F686F6673746565', -8554670009677647372, 'shimmering_modkraftdk', 4993627046425025026, X'636F6E73696465726174655F63616765', 'breathtaking_boggs', X'616D617A696E675F73696D6F6E65'), (X'7669766163696F75735F7363687761727A', 5860599187854155616, 'sparkling_aurora', 3757552048117668067, X'756E697175655F6769617A', 'lovely_leroy', X'68617264776F726B696E675F6D696C6C6572'), (X'677265676172696F75735F7061657065', -488992130149088413, 'focused_brinker', 4503849242092922100, X'66756E6E795F6A616B736963', 'competitive_communications', X'657863656C6C656E745F7873696C656E74'), (X'7374756E6E696E675F74616E6E656E6261756D', -5634782647279946253, 'fabulous_crute', -3978009805517476564, X'72656C617865645F63617272796F7574', 'spellbinding_erkan', X'66756E6E795F646F626273'), (X'696D6167696E61746976655F746F6C6F6B6F6E6E696B6F7661', 4236471363502323025, 'excellent_wolke', 7606168469334609395, X'736C65656B5F6D6361666565', 'magnificent_riley', X'616D6961626C655F706173736164616B6973'), (X'77696C6C696E675F736872657665', 5048296470820985219, 'ambitious_jeppesen', 6961857167361512834, X'70617469656E745F6272696E6B6572', 'giving_kramm', X'726573706F6E7369626C655F7363686D696474'), (X'73656E7369626C655F6D757865726573', -5519194136843846790, 'frank_ruggero', 4354855935194921345, X'76697669645F63617365', 'focused_lovecruft', X'6D61676E69666963656E745F736B79')")?;
@@ -568,7 +575,7 @@ fn test_write_concurrent_connections() -> anyhow::Result<()> {
maybe_setup_tracing();
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t(x)");
let tmp_db = TempDatabase::new_with_rusqlite("CREATE TABLE t(x)", false);
let num_connections = 4;
let num_inserts_per_connection = 100;
let mut connections = vec![];

View File

@@ -9,7 +9,7 @@ use std::sync::{Arc, Mutex};
#[test]
fn test_wal_checkpoint_result() -> Result<()> {
maybe_setup_tracing();
let tmp_db = TempDatabase::new("test_wal.db");
let tmp_db = TempDatabase::new("test_wal.db", false);
let conn = tmp_db.connect_limbo();
conn.execute("CREATE TABLE t1 (id text);")?;
@@ -36,8 +36,8 @@ fn test_wal_checkpoint_result() -> Result<()> {
#[ignore = "ignored for now because it's flaky"]
fn test_wal_1_writer_1_reader() -> Result<()> {
maybe_setup_tracing();
let tmp_db = Arc::new(Mutex::new(TempDatabase::new("test_wal.db")));
let db = tmp_db.lock().unwrap().limbo_database();
let tmp_db = Arc::new(Mutex::new(TempDatabase::new("test_wal.db", false)));
let db = tmp_db.lock().unwrap().limbo_database(false);
{
let conn = db.connect().unwrap();