mirror of
https://github.com/aljazceru/turso.git
synced 2026-02-11 03:04:22 +01:00
Merge branch 'main' of https://github.com/tursodatabase/limbo
This commit is contained in:
48
Cargo.lock
generated
48
Cargo.lock
generated
@@ -571,7 +571,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
|
||||
|
||||
[[package]]
|
||||
name = "core_tester"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
@@ -1879,14 +1879,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo-go"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"turso_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "limbo-wasm"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"console_error_panic_hook",
|
||||
"getrandom 0.2.15",
|
||||
@@ -1899,7 +1899,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_completion"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"mimalloc",
|
||||
"turso_ext",
|
||||
@@ -1907,7 +1907,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_crypto"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"blake3",
|
||||
"data-encoding",
|
||||
@@ -1920,7 +1920,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_csv"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"csv",
|
||||
"mimalloc",
|
||||
@@ -1930,7 +1930,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_ipaddr"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"ipnetwork",
|
||||
"mimalloc",
|
||||
@@ -1939,7 +1939,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_percentile"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"mimalloc",
|
||||
"turso_ext",
|
||||
@@ -1947,7 +1947,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_regexp"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"mimalloc",
|
||||
"regex",
|
||||
@@ -1956,7 +1956,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_sim"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"anarchist-readable-name-generator-lib",
|
||||
"anyhow",
|
||||
@@ -1983,7 +1983,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_sqlite3"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"env_logger 0.11.7",
|
||||
"libc",
|
||||
@@ -1996,7 +1996,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "limbo_sqlite_test_ext"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
@@ -2663,7 +2663,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "py-turso"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"pyo3",
|
||||
@@ -3768,7 +3768,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"tempfile",
|
||||
"thiserror 2.0.12",
|
||||
@@ -3778,7 +3778,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso-java"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"jni",
|
||||
"thiserror 2.0.12",
|
||||
@@ -3787,7 +3787,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_cli"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
@@ -3818,7 +3818,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_core"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"antithesis_sdk",
|
||||
"bitflags 2.9.0",
|
||||
@@ -3871,7 +3871,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_dart"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"flutter_rust_bridge",
|
||||
"turso_core",
|
||||
@@ -3879,7 +3879,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_ext"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"getrandom 0.3.2",
|
||||
@@ -3888,7 +3888,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_ext_tests"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"env_logger 0.11.7",
|
||||
"lazy_static",
|
||||
@@ -3899,7 +3899,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_macros"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -3908,7 +3908,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_node"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"napi",
|
||||
"napi-build",
|
||||
@@ -3918,7 +3918,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_sqlite3_parser"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"cc",
|
||||
@@ -3936,7 +3936,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "turso_stress"
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
dependencies = [
|
||||
"anarchist-readable-name-generator-lib",
|
||||
"antithesis_sdk",
|
||||
|
||||
26
Cargo.toml
26
Cargo.toml
@@ -31,25 +31,25 @@ members = [
|
||||
exclude = ["perf/latency/limbo"]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.2-pre.2"
|
||||
version = "0.1.2-pre.3"
|
||||
authors = ["the Limbo authors"]
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/tursodatabase/turso"
|
||||
|
||||
[workspace.dependencies]
|
||||
limbo_completion = { path = "extensions/completion", version = "0.1.2-pre.2" }
|
||||
turso_core = { path = "core", version = "0.1.2-pre.2" }
|
||||
limbo_crypto = { path = "extensions/crypto", version = "0.1.2-pre.2" }
|
||||
limbo_csv = { path = "extensions/csv", version = "0.1.2-pre.2" }
|
||||
turso_ext = { path = "extensions/core", version = "0.1.2-pre.2" }
|
||||
turso_ext_tests = { path = "extensions/tests", version = "0.1.2-pre.2" }
|
||||
limbo_ipaddr = { path = "extensions/ipaddr", version = "0.1.2-pre.2" }
|
||||
turso_macros = { path = "macros", version = "0.1.2-pre.2" }
|
||||
limbo_percentile = { path = "extensions/percentile", version = "0.1.2-pre.2" }
|
||||
limbo_regexp = { path = "extensions/regexp", version = "0.1.2-pre.2" }
|
||||
turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.1.2-pre.2" }
|
||||
limbo_uuid = { path = "extensions/uuid", version = "0.1.2-pre.2" }
|
||||
limbo_completion = { path = "extensions/completion", version = "0.1.2-pre.3" }
|
||||
turso_core = { path = "core", version = "0.1.2-pre.3" }
|
||||
limbo_crypto = { path = "extensions/crypto", version = "0.1.2-pre.3" }
|
||||
limbo_csv = { path = "extensions/csv", version = "0.1.2-pre.3" }
|
||||
turso_ext = { path = "extensions/core", version = "0.1.2-pre.3" }
|
||||
turso_ext_tests = { path = "extensions/tests", version = "0.1.2-pre.3" }
|
||||
limbo_ipaddr = { path = "extensions/ipaddr", version = "0.1.2-pre.3" }
|
||||
turso_macros = { path = "macros", version = "0.1.2-pre.3" }
|
||||
limbo_percentile = { path = "extensions/percentile", version = "0.1.2-pre.3" }
|
||||
limbo_regexp = { path = "extensions/regexp", version = "0.1.2-pre.3" }
|
||||
turso_sqlite3_parser = { path = "vendored/sqlite3-parser", version = "0.1.2-pre.3" }
|
||||
limbo_uuid = { path = "extensions/uuid", version = "0.1.2-pre.3" }
|
||||
strum = { version = "0.26", features = ["derive"] }
|
||||
strum_macros = "0.26"
|
||||
serde = "1.0"
|
||||
|
||||
@@ -83,6 +83,8 @@ for i in range(tbl_count):
|
||||
CREATE TABLE tbl_{i} ({cols_str})
|
||||
""")
|
||||
|
||||
con_init.commit()
|
||||
|
||||
con.commit()
|
||||
|
||||
print(f"DB Schemas\n------------\n{json.dumps(schemas, indent=2)}")
|
||||
|
||||
@@ -21,7 +21,7 @@ pub unsafe extern "C" fn db_open(path: *const c_char) -> *mut c_void {
|
||||
let path = unsafe { std::ffi::CStr::from_ptr(path) };
|
||||
let path = path.to_str().unwrap();
|
||||
let Ok((io, conn)) = Connection::from_uri(path, false, false) else {
|
||||
panic!("Failed to open connection with path: {}", path);
|
||||
panic!("Failed to open connection with path: {path}");
|
||||
};
|
||||
LimboConn::new(conn, io).to_ptr()
|
||||
}
|
||||
@@ -56,7 +56,7 @@ impl LimboConn {
|
||||
|
||||
fn get_error(&mut self) -> *const c_char {
|
||||
if let Some(err) = &self.err {
|
||||
let err = format!("{}", err);
|
||||
let err = format!("{err}");
|
||||
let c_str = std::ffi::CString::new(err).unwrap();
|
||||
self.err = None;
|
||||
c_str.into_raw() as *const c_char
|
||||
|
||||
@@ -34,7 +34,7 @@ impl<'conn> LimboRows<'conn> {
|
||||
|
||||
fn get_error(&mut self) -> *const c_char {
|
||||
if let Some(err) = &self.err {
|
||||
let err = format!("{}", err);
|
||||
let err = format!("{err}");
|
||||
let c_str = std::ffi::CString::new(err).unwrap();
|
||||
self.err = None;
|
||||
c_str.into_raw() as *const c_char
|
||||
|
||||
@@ -172,7 +172,7 @@ impl<'conn> LimboStatement<'conn> {
|
||||
|
||||
fn get_error(&mut self) -> *const c_char {
|
||||
if let Some(err) = &self.err {
|
||||
let err = format!("{}", err);
|
||||
let err = format!("{err}");
|
||||
let c_str = std::ffi::CString::new(err).unwrap();
|
||||
self.err = None;
|
||||
c_str.into_raw() as *const c_char
|
||||
|
||||
@@ -29,7 +29,7 @@ impl From<TursoError> for JniError {
|
||||
| TursoError::InvalidDatabasePointer
|
||||
| TursoError::InvalidConnectionPointer
|
||||
| TursoError::JNIErrors(_) => {
|
||||
eprintln!("Error occurred: {:?}", value);
|
||||
eprintln!("Error occurred: {value:?}");
|
||||
JniError::Other(-1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,7 +118,7 @@ fn row_to_obj_array<'local>(
|
||||
turso_core::Value::Blob(b) => env.byte_array_from_slice(b.as_slice())?.into(),
|
||||
};
|
||||
if let Err(e) = env.set_object_array_element(&obj_array, i as i32, obj) {
|
||||
eprintln!("Error on parsing row: {:?}", e);
|
||||
eprintln!("Error on parsing row: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@tursodatabase/turso-darwin-universal",
|
||||
"version": "0.1.2-pre.2",
|
||||
"version": "0.1.2-pre.3",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@tursodatabase/turso-linux-x64-gnu",
|
||||
"version": "0.1.2-pre.2",
|
||||
"version": "0.1.2-pre.3",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@tursodatabase/turso-win32-x64-msvc",
|
||||
"version": "0.1.2-pre.2",
|
||||
"version": "0.1.2-pre.3",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
|
||||
4
bindings/javascript/package-lock.json
generated
4
bindings/javascript/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@tursodatabase/turso",
|
||||
"version": "0.1.2-pre.2",
|
||||
"version": "0.1.2-pre.3",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@tursodatabase/turso",
|
||||
"version": "0.1.2-pre.2",
|
||||
"version": "0.1.2-pre.3",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@napi-rs/cli": "^2.18.4",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@tursodatabase/turso",
|
||||
"version": "0.1.2-pre.2",
|
||||
"version": "0.1.2-pre.3",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tursodatabase/turso"
|
||||
|
||||
@@ -107,7 +107,7 @@ impl Database {
|
||||
pragma_name: String,
|
||||
options: Option<PragmaOptions>,
|
||||
) -> napi::Result<JsUnknown> {
|
||||
let sql = format!("PRAGMA {}", pragma_name);
|
||||
let sql = format!("PRAGMA {pragma_name}");
|
||||
let stmt = self.prepare(sql)?;
|
||||
match options {
|
||||
Some(PragmaOptions { simple: true, .. }) => {
|
||||
@@ -129,7 +129,7 @@ impl Database {
|
||||
| step @ turso_core::StepResult::Busy => {
|
||||
return Err(napi::Error::new(
|
||||
napi::Status::GenericFailure,
|
||||
format!("{:?}", step),
|
||||
format!("{step:?}"),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -194,7 +194,7 @@ impl Database {
|
||||
Err(err) => {
|
||||
return Err(napi::Error::new(
|
||||
"SQLITE_ERROR".to_owned(),
|
||||
format!("Error executing SQL: {}", err),
|
||||
format!("Error executing SQL: {err}"),
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -203,7 +203,7 @@ impl Database {
|
||||
Err(err) => {
|
||||
return Err(napi::Error::new(
|
||||
"SQLITE_ERROR".to_owned(),
|
||||
format!("Error executing SQL: {}", err),
|
||||
format!("Error executing SQL: {err}"),
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -312,7 +312,7 @@ impl Statement {
|
||||
turso_core::StepResult::Interrupt | turso_core::StepResult::Busy => {
|
||||
return Err(napi::Error::new(
|
||||
napi::Status::GenericFailure,
|
||||
format!("{:?}", step),
|
||||
format!("{step:?}"),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ impl Cursor {
|
||||
let stmt_is_tx = stmt_is_tx(sql);
|
||||
|
||||
let statement = self.conn.conn.prepare(sql).map_err(|e| {
|
||||
PyErr::new::<ProgrammingError, _>(format!("Failed to prepare statement: {:?}", e))
|
||||
PyErr::new::<ProgrammingError, _>(format!("Failed to prepare statement: {e:?}"))
|
||||
})?;
|
||||
|
||||
let stmt = Rc::new(RefCell::new(statement));
|
||||
@@ -96,8 +96,7 @@ impl Cursor {
|
||||
if stmt_is_dml && self.conn.conn.get_auto_commit() {
|
||||
self.conn.conn.execute("BEGIN").map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!(
|
||||
"Failed to start transaction after DDL: {:?}",
|
||||
e
|
||||
"Failed to start transaction after DDL: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
@@ -108,10 +107,10 @@ impl Cursor {
|
||||
let mut stmt = stmt.borrow_mut();
|
||||
while let turso_core::StepResult::IO = stmt
|
||||
.step()
|
||||
.map_err(|e| PyErr::new::<OperationalError, _>(format!("Step error: {:?}", e)))?
|
||||
.map_err(|e| PyErr::new::<OperationalError, _>(format!("Step error: {e:?}")))?
|
||||
{
|
||||
stmt.run_once()
|
||||
.map_err(|e| PyErr::new::<OperationalError, _>(format!("IO error: {:?}", e)))?;
|
||||
.map_err(|e| PyErr::new::<OperationalError, _>(format!("IO error: {e:?}")))?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,9 +129,10 @@ impl Cursor {
|
||||
if let Some(smt) = &self.smt {
|
||||
loop {
|
||||
let mut stmt = smt.borrow_mut();
|
||||
match stmt.step().map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("Step error: {:?}", e))
|
||||
})? {
|
||||
match stmt
|
||||
.step()
|
||||
.map_err(|e| PyErr::new::<OperationalError, _>(format!("Step error: {e:?}")))?
|
||||
{
|
||||
turso_core::StepResult::Row => {
|
||||
let row = stmt.row().unwrap();
|
||||
let py_row = row_to_py(py, row)?;
|
||||
@@ -140,7 +140,7 @@ impl Cursor {
|
||||
}
|
||||
turso_core::StepResult::IO => {
|
||||
stmt.run_once().map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("IO error: {:?}", e))
|
||||
PyErr::new::<OperationalError, _>(format!("IO error: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
turso_core::StepResult::Interrupt => {
|
||||
@@ -166,9 +166,10 @@ impl Cursor {
|
||||
if let Some(smt) = &self.smt {
|
||||
loop {
|
||||
let mut stmt = smt.borrow_mut();
|
||||
match stmt.step().map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("Step error: {:?}", e))
|
||||
})? {
|
||||
match stmt
|
||||
.step()
|
||||
.map_err(|e| PyErr::new::<OperationalError, _>(format!("Step error: {e:?}")))?
|
||||
{
|
||||
turso_core::StepResult::Row => {
|
||||
let row = stmt.row().unwrap();
|
||||
let py_row = row_to_py(py, row)?;
|
||||
@@ -176,7 +177,7 @@ impl Cursor {
|
||||
}
|
||||
turso_core::StepResult::IO => {
|
||||
stmt.run_once().map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("IO error: {:?}", e))
|
||||
PyErr::new::<OperationalError, _>(format!("IO error: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
turso_core::StepResult::Interrupt => {
|
||||
@@ -257,7 +258,7 @@ impl Connection {
|
||||
|
||||
pub fn close(&self) -> PyResult<()> {
|
||||
self.conn.close().map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("Failed to close connection: {:?}", e))
|
||||
PyErr::new::<OperationalError, _>(format!("Failed to close connection: {e:?}"))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
@@ -266,11 +267,11 @@ impl Connection {
|
||||
pub fn commit(&self) -> PyResult<()> {
|
||||
if !self.conn.get_auto_commit() {
|
||||
self.conn.execute("COMMIT").map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("Failed to commit: {:?}", e))
|
||||
PyErr::new::<OperationalError, _>(format!("Failed to commit: {e:?}"))
|
||||
})?;
|
||||
|
||||
self.conn.execute("BEGIN").map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("Failed to commit: {:?}", e))
|
||||
PyErr::new::<OperationalError, _>(format!("Failed to commit: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -279,11 +280,11 @@ impl Connection {
|
||||
pub fn rollback(&self) -> PyResult<()> {
|
||||
if !self.conn.get_auto_commit() {
|
||||
self.conn.execute("ROLLBACK").map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("Failed to commit: {:?}", e))
|
||||
PyErr::new::<OperationalError, _>(format!("Failed to commit: {e:?}"))
|
||||
})?;
|
||||
|
||||
self.conn.execute("BEGIN").map_err(|e| {
|
||||
PyErr::new::<OperationalError, _>(format!("Failed to commit: {:?}", e))
|
||||
PyErr::new::<OperationalError, _>(format!("Failed to commit: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -319,8 +320,7 @@ pub fn connect(path: &str) -> Result<Connection> {
|
||||
match turso_core::Connection::from_uri(path, false, false) {
|
||||
Ok((io, conn)) => Ok(Connection { conn, _io: io }),
|
||||
Err(e) => Err(PyErr::new::<ProgrammingError, _>(format!(
|
||||
"Failed to create connection: {:?}",
|
||||
e
|
||||
"Failed to create connection: {e:?}"
|
||||
))
|
||||
.into()),
|
||||
}
|
||||
|
||||
@@ -36,5 +36,5 @@ async fn main() {
|
||||
|
||||
let value = row.get_value(0).unwrap();
|
||||
|
||||
println!("Row: {:?}", value);
|
||||
println!("Row: {value:?}");
|
||||
}
|
||||
|
||||
@@ -200,7 +200,7 @@ impl Connection {
|
||||
|
||||
rows.iter().try_for_each(|row| {
|
||||
f(row).map_err(|e| {
|
||||
Error::SqlExecutionFailure(format!("Error executing user defined function: {}", e))
|
||||
Error::SqlExecutionFailure(format!("Error executing user defined function: {e}"))
|
||||
})
|
||||
})?;
|
||||
Ok(())
|
||||
@@ -500,10 +500,10 @@ mod tests {
|
||||
|
||||
let mut original_data = Vec::with_capacity(NUM_INSERTS);
|
||||
for i in 0..NUM_INSERTS {
|
||||
let prefix = format!("test_string_{:04}_", i);
|
||||
let prefix = format!("test_string_{i:04}_");
|
||||
let padding_len = TARGET_STRING_LEN.saturating_sub(prefix.len());
|
||||
let padding: String = "A".repeat(padding_len);
|
||||
original_data.push(format!("{}{}", prefix, padding));
|
||||
original_data.push(format!("{prefix}{padding}"));
|
||||
}
|
||||
|
||||
// First, create the database, a table, and insert many large strings
|
||||
@@ -537,12 +537,11 @@ mod tests {
|
||||
let row = rows
|
||||
.next()
|
||||
.await?
|
||||
.unwrap_or_else(|| panic!("Expected row {} but found None", i));
|
||||
.unwrap_or_else(|| panic!("Expected row {i} but found None"));
|
||||
assert_eq!(
|
||||
row.get_value(0)?,
|
||||
Value::Text(value.clone()),
|
||||
"Mismatch in retrieved data for row {}",
|
||||
i
|
||||
"Mismatch in retrieved data for row {i}"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -552,9 +551,9 @@ mod tests {
|
||||
);
|
||||
|
||||
// Delete the WAL file only and try to re-open and query
|
||||
let wal_path = format!("{}-wal", db_path);
|
||||
let wal_path = format!("{db_path}-wal");
|
||||
std::fs::remove_file(&wal_path)
|
||||
.map_err(|e| eprintln!("Warning: Failed to delete WAL file for test: {}", e))
|
||||
.map_err(|e| eprintln!("Warning: Failed to delete WAL file for test: {e}"))
|
||||
.unwrap();
|
||||
|
||||
// Attempt to re-open the database after deleting WAL and assert that table is missing.
|
||||
@@ -570,13 +569,11 @@ mod tests {
|
||||
Err(Error::SqlExecutionFailure(msg)) => {
|
||||
assert!(
|
||||
msg.contains("no such table: test_large_persistence"),
|
||||
"Expected 'test_large_persistence not found' error, but got: {}",
|
||||
msg
|
||||
"Expected 'test_large_persistence not found' error, but got: {msg}"
|
||||
);
|
||||
}
|
||||
Err(e) => panic!(
|
||||
"Expected SqlExecutionFailure for 'no such table', but got a different error: {:?}",
|
||||
e
|
||||
"Expected SqlExecutionFailure for 'no such table', but got a different error: {e:?}"
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@@ -26,11 +26,11 @@ use sealed::Sealed;
|
||||
/// These can be supplied in a few ways:
|
||||
///
|
||||
/// - For heterogeneous parameter lists of 16 or less items a tuple syntax is supported
|
||||
/// by doing `(1, "foo")`.
|
||||
/// by doing `(1, "foo")`.
|
||||
/// - For hetergeneous parameter lists of 16 or greater, the [`turso::params!`] is supported
|
||||
/// by doing `turso::params![1, "foo"]`.
|
||||
/// by doing `turso::params![1, "foo"]`.
|
||||
/// - For homogeneous parameter types (where they are all the same type), const arrays are
|
||||
/// supported by doing `[1, 2, 3]`.
|
||||
/// supported by doing `[1, 2, 3]`.
|
||||
///
|
||||
/// # Example (positional)
|
||||
///
|
||||
@@ -61,11 +61,11 @@ use sealed::Sealed;
|
||||
/// # Named parameters
|
||||
///
|
||||
/// - For heterogeneous parameter lists of 16 or less items a tuple syntax is supported
|
||||
/// by doing `(("key1", 1), ("key2", "foo"))`.
|
||||
/// by doing `(("key1", 1), ("key2", "foo"))`.
|
||||
/// - For hetergeneous parameter lists of 16 or greater, the [`turso::params!`] is supported
|
||||
/// by doing `turso::named_params!["key1": 1, "key2": "foo"]`.
|
||||
/// by doing `turso::named_params!["key1": 1, "key2": "foo"]`.
|
||||
/// - For homogeneous parameter types (where they are all the same type), const arrays are
|
||||
/// supported by doing `[("key1", 1), ("key2, 2), ("key3", 3)]`.
|
||||
/// supported by doing `[("key1", 1), ("key2, 2), ("key3", 3)]`.
|
||||
///
|
||||
/// # Example (named)
|
||||
///
|
||||
|
||||
@@ -68,8 +68,9 @@ impl RowIterator {
|
||||
Ok(turso_core::StepResult::Done) | Ok(turso_core::StepResult::Interrupt) => {
|
||||
JsValue::UNDEFINED
|
||||
}
|
||||
|
||||
Ok(turso_core::StepResult::Busy) => JsValue::UNDEFINED,
|
||||
Err(e) => panic!("Error: {:?}", e),
|
||||
Err(e) => panic!("Error: {e:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -104,11 +105,12 @@ impl Statement {
|
||||
}
|
||||
JsValue::from(row_array)
|
||||
}
|
||||
|
||||
Ok(turso_core::StepResult::IO)
|
||||
| Ok(turso_core::StepResult::Done)
|
||||
| Ok(turso_core::StepResult::Interrupt)
|
||||
| Ok(turso_core::StepResult::Busy) => JsValue::UNDEFINED,
|
||||
Err(e) => panic!("Error: {:?}", e),
|
||||
Err(e) => panic!("Error: {e:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,7 +132,7 @@ impl Statement {
|
||||
Ok(turso_core::StepResult::Interrupt) => break,
|
||||
Ok(turso_core::StepResult::Done) => break,
|
||||
Ok(turso_core::StepResult::Busy) => break,
|
||||
Err(e) => panic!("Error: {:?}", e),
|
||||
Err(e) => panic!("Error: {e:?}"),
|
||||
}
|
||||
}
|
||||
array
|
||||
|
||||
4
bindings/wasm/package-lock.json
generated
4
bindings/wasm/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "limbo-wasm",
|
||||
"version": "0.1.2-pre.2",
|
||||
"version": "0.1.2-pre.3",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "limbo-wasm",
|
||||
"version": "0.1.2-pre.2",
|
||||
"version": "0.1.2-pre.3",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.49.1",
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"collaborators": [
|
||||
"the Limbo authors"
|
||||
],
|
||||
"version": "0.1.2-pre.2",
|
||||
"version": "0.1.2-pre.3",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
65
cli/app.rs
65
cli/app.rs
@@ -216,7 +216,7 @@ impl Limbo {
|
||||
}) {
|
||||
n if n < 0 => String::from(")x!...>"),
|
||||
0 => String::from(" ...> "),
|
||||
n if n < 10 => format!("(x{}...> ", n),
|
||||
n if n < 10 => format!("(x{n}...> "),
|
||||
_ => String::from("(.....> "),
|
||||
};
|
||||
}
|
||||
@@ -230,7 +230,7 @@ impl Limbo {
|
||||
}
|
||||
|
||||
fn dump_table(&mut self, name: &str) -> Result<(), LimboError> {
|
||||
let query = format!("pragma table_info={}", name);
|
||||
let query = format!("pragma table_info={name}");
|
||||
let mut cols = vec![];
|
||||
let mut value_types = vec![];
|
||||
query_internal!(
|
||||
@@ -248,7 +248,7 @@ impl Limbo {
|
||||
// it, but it requires pragma index_list, and it seems to be relevant
|
||||
// only for indexes.
|
||||
let cols_str = cols.join(", ");
|
||||
let select = format!("select {} from {}", cols_str, name);
|
||||
let select = format!("select {cols_str} from {name}");
|
||||
query_internal!(
|
||||
self,
|
||||
select,
|
||||
@@ -273,14 +273,14 @@ impl Limbo {
|
||||
fmt::Write::write_fmt(&mut output, format_args!("{b:02x}"));
|
||||
output
|
||||
});
|
||||
format!("X'{}'", hex_string)
|
||||
format!("X'{hex_string}'")
|
||||
} else {
|
||||
value.to_string()
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
self.write_fmt(format_args!("INSERT INTO {} VALUES({});", name, values))?;
|
||||
self.write_fmt(format_args!("INSERT INTO {name} VALUES({values});"))?;
|
||||
Ok(())
|
||||
}
|
||||
)?;
|
||||
@@ -306,7 +306,7 @@ impl Limbo {
|
||||
|row: &turso_core::Row| -> Result<(), LimboError> {
|
||||
let sql: &str = row.get::<&str>(2)?;
|
||||
let name: &str = row.get::<&str>(0)?;
|
||||
self.write_fmt(format_args!("{};", sql))?;
|
||||
self.write_fmt(format_args!("{sql};"))?;
|
||||
self.dump_table(name)
|
||||
}
|
||||
);
|
||||
@@ -484,7 +484,7 @@ impl Limbo {
|
||||
};
|
||||
let sample_stats_as_str = |name: &str, samples: Vec<Duration>| {
|
||||
if samples.is_empty() {
|
||||
return format!("{}: No samples available", name);
|
||||
return format!("{name}: No samples available");
|
||||
}
|
||||
let avg_time_spent = samples.iter().sum::<Duration>() / samples.len() as u32;
|
||||
let total_time = samples.iter().fold(Duration::ZERO, |acc, x| acc + *x);
|
||||
@@ -561,7 +561,7 @@ impl Limbo {
|
||||
let buff = self.input_buff.clone();
|
||||
self.run_query(buff.as_str());
|
||||
} else {
|
||||
self.buffer_input(format!("{}\n", line).as_str());
|
||||
self.buffer_input(format!("{line}\n").as_str());
|
||||
self.set_multiline_prompt();
|
||||
}
|
||||
self.reset_line(line)?;
|
||||
@@ -608,12 +608,12 @@ impl Limbo {
|
||||
if let Some(opcode) = args.opcode {
|
||||
for op in &OPCODE_DESCRIPTIONS {
|
||||
if op.name.eq_ignore_ascii_case(opcode.trim()) {
|
||||
let _ = self.write_fmt(format_args!("{}", op));
|
||||
let _ = self.write_fmt(format_args!("{op}"));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for op in &OPCODE_DESCRIPTIONS {
|
||||
let _ = self.write_fmt(format_args!("{}\n", op));
|
||||
let _ = self.write_fmt(format_args!("{op}\n"));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -622,13 +622,13 @@ impl Limbo {
|
||||
}
|
||||
Command::OutputMode(args) => {
|
||||
if let Err(e) = self.set_mode(args.mode) {
|
||||
let _ = self.write_fmt(format_args!("Error: {}", e));
|
||||
let _ = self.write_fmt(format_args!("Error: {e}"));
|
||||
}
|
||||
}
|
||||
Command::SetOutput(args) => {
|
||||
if let Some(path) = args.path {
|
||||
if let Err(e) = self.set_output_file(&path) {
|
||||
let _ = self.write_fmt(format_args!("Error: {}", e));
|
||||
let _ = self.write_fmt(format_args!("Error: {e}"));
|
||||
}
|
||||
} else {
|
||||
self.set_output_stdout();
|
||||
@@ -655,7 +655,7 @@ impl Limbo {
|
||||
}
|
||||
Command::Dump => {
|
||||
if let Err(e) = self.dump_database() {
|
||||
let _ = self.write_fmt(format_args!("/****** ERROR: {} ******/", e));
|
||||
let _ = self.write_fmt(format_args!("/****** ERROR: {e} ******/"));
|
||||
}
|
||||
}
|
||||
Command::ListVfs => {
|
||||
@@ -731,8 +731,7 @@ impl Limbo {
|
||||
let _ =
|
||||
self.writer.write(self.opts.null_value.as_bytes())?;
|
||||
} else {
|
||||
let _ =
|
||||
self.writer.write(format!("{}", value).as_bytes())?;
|
||||
let _ = self.writer.write(format!("{value}").as_bytes())?;
|
||||
}
|
||||
}
|
||||
let _ = self.writeln("");
|
||||
@@ -764,7 +763,7 @@ impl Limbo {
|
||||
}
|
||||
let report =
|
||||
miette::Error::from(err).with_source_code(sql.to_owned());
|
||||
let _ = self.write_fmt(format_args!("{:?}", report));
|
||||
let _ = self.write_fmt(format_args!("{report:?}"));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -808,17 +807,13 @@ impl Limbo {
|
||||
(self.opts.null_value.clone(), CellAlignment::Left)
|
||||
}
|
||||
Value::Integer(_) => {
|
||||
(format!("{}", value), CellAlignment::Right)
|
||||
(format!("{value}"), CellAlignment::Right)
|
||||
}
|
||||
Value::Float(_) => {
|
||||
(format!("{}", value), CellAlignment::Right)
|
||||
}
|
||||
Value::Text(_) => {
|
||||
(format!("{}", value), CellAlignment::Left)
|
||||
}
|
||||
Value::Blob(_) => {
|
||||
(format!("{}", value), CellAlignment::Left)
|
||||
(format!("{value}"), CellAlignment::Right)
|
||||
}
|
||||
Value::Text(_) => (format!("{value}"), CellAlignment::Left),
|
||||
Value::Blob(_) => (format!("{value}"), CellAlignment::Left),
|
||||
};
|
||||
row.add_cell(
|
||||
Cell::new(content)
|
||||
@@ -862,21 +857,21 @@ impl Limbo {
|
||||
}
|
||||
let report =
|
||||
miette::Error::from(err).with_source_code(sql.to_owned());
|
||||
let _ = self.write_fmt(format_args!("{:?}", report));
|
||||
let _ = self.write_fmt(format_args!("{report:?}"));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !table.is_empty() {
|
||||
let _ = self.write_fmt(format_args!("{}", table));
|
||||
let _ = self.write_fmt(format_args!("{table}"));
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(None) => {}
|
||||
Err(err) => {
|
||||
let report = miette::Error::from(err).with_source_code(sql.to_owned());
|
||||
let _ = self.write_fmt(format_args!("{:?}", report));
|
||||
let _ = self.write_fmt(format_args!("{report:?}"));
|
||||
anyhow::bail!("We have to throw here, even if we printed error");
|
||||
}
|
||||
}
|
||||
@@ -915,7 +910,7 @@ impl Limbo {
|
||||
)
|
||||
.try_init()
|
||||
{
|
||||
println!("Unable to setup tracing appender: {:?}", e);
|
||||
println!("Unable to setup tracing appender: {e:?}");
|
||||
}
|
||||
Ok(guard)
|
||||
}
|
||||
@@ -923,8 +918,7 @@ impl Limbo {
|
||||
fn display_schema(&mut self, table: Option<&str>) -> anyhow::Result<()> {
|
||||
let sql = match table {
|
||||
Some(table_name) => format!(
|
||||
"SELECT sql FROM sqlite_schema WHERE type IN ('table', 'index') AND tbl_name = '{}' AND name NOT LIKE 'sqlite_%'",
|
||||
table_name
|
||||
"SELECT sql FROM sqlite_schema WHERE type IN ('table', 'index') AND tbl_name = '{table_name}' AND name NOT LIKE 'sqlite_%'"
|
||||
),
|
||||
None => String::from(
|
||||
"SELECT sql FROM sqlite_schema WHERE type IN ('table', 'index') AND name NOT LIKE 'sqlite_%'"
|
||||
@@ -957,7 +951,7 @@ impl Limbo {
|
||||
if !found {
|
||||
if let Some(table_name) = table {
|
||||
let _ = self
|
||||
.write_fmt(format_args!("-- Error: Table '{}' not found.", table_name));
|
||||
.write_fmt(format_args!("-- Error: Table '{table_name}' not found."));
|
||||
} else {
|
||||
let _ = self.writeln("-- No tables or indexes found in the database.");
|
||||
}
|
||||
@@ -981,8 +975,7 @@ impl Limbo {
|
||||
fn display_indexes(&mut self, maybe_table: Option<String>) -> anyhow::Result<()> {
|
||||
let sql = match maybe_table {
|
||||
Some(ref tbl_name) => format!(
|
||||
"SELECT name FROM sqlite_schema WHERE type='index' AND tbl_name = '{}' ORDER BY 1",
|
||||
tbl_name
|
||||
"SELECT name FROM sqlite_schema WHERE type='index' AND tbl_name = '{tbl_name}' ORDER BY 1"
|
||||
),
|
||||
None => String::from("SELECT name FROM sqlite_schema WHERE type='index' ORDER BY 1"),
|
||||
};
|
||||
@@ -1030,8 +1023,7 @@ impl Limbo {
|
||||
fn display_tables(&mut self, pattern: Option<&str>) -> anyhow::Result<()> {
|
||||
let sql = match pattern {
|
||||
Some(pattern) => format!(
|
||||
"SELECT name FROM sqlite_schema WHERE type='table' AND name NOT LIKE 'sqlite_%' AND name LIKE '{}' ORDER BY 1",
|
||||
pattern
|
||||
"SELECT name FROM sqlite_schema WHERE type='table' AND name NOT LIKE 'sqlite_%' AND name LIKE '{pattern}' ORDER BY 1"
|
||||
),
|
||||
None => String::from(
|
||||
"SELECT name FROM sqlite_schema WHERE type='table' AND name NOT LIKE 'sqlite_%' ORDER BY 1"
|
||||
@@ -1066,8 +1058,7 @@ impl Limbo {
|
||||
let _ = self.writeln(tables.trim_end());
|
||||
} else if let Some(pattern) = pattern {
|
||||
let _ = self.write_fmt(format_args!(
|
||||
"Error: Tables with pattern '{}' not found.",
|
||||
pattern
|
||||
"Error: Tables with pattern '{pattern}' not found."
|
||||
));
|
||||
} else {
|
||||
let _ = self.writeln("No tables found in the database.");
|
||||
|
||||
@@ -37,7 +37,7 @@ impl<'a> ImportFile<'a> {
|
||||
let file = match File::open(args.file) {
|
||||
Ok(file) => file,
|
||||
Err(e) => {
|
||||
let _ = self.writer.write_all(format!("{:?}\n", e).as_bytes());
|
||||
let _ = self.writer.write_all(format!("{e:?}\n").as_bytes());
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -96,7 +96,7 @@ impl TryFrom<&str> for LimboColor {
|
||||
"dark-cyan" => Color::Fixed(6),
|
||||
"grey" => Color::Fixed(7),
|
||||
"dark-grey" => Color::Fixed(8),
|
||||
_ => return Err(format!("Could not parse color in string: {}", value)),
|
||||
_ => return Err(format!("Could not parse color in string: {value}")),
|
||||
};
|
||||
|
||||
trace!("Read predefined color: {}", value);
|
||||
|
||||
@@ -29,7 +29,7 @@ impl Display for Io {
|
||||
Io::Syscall => write!(f, "syscall"),
|
||||
#[cfg(all(target_os = "linux", feature = "io_uring"))]
|
||||
Io::IoUring => write!(f, "io_uring"),
|
||||
Io::External(str) => write!(f, "{}", str),
|
||||
Io::External(str) => write!(f, "{str}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -144,7 +144,7 @@ pub fn get_writer(output: &str) -> Box<dyn Write> {
|
||||
_ => match std::fs::File::create(output) {
|
||||
Ok(file) => Box::new(file),
|
||||
Err(e) => {
|
||||
eprintln!("Error: {}", e);
|
||||
eprintln!("Error: {e}");
|
||||
Box::new(io::stdout())
|
||||
}
|
||||
},
|
||||
|
||||
@@ -51,7 +51,7 @@ fn main() -> anyhow::Result<()> {
|
||||
Ok(line) => match app.handle_input_line(line.trim()) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
eprintln!("{e}");
|
||||
}
|
||||
},
|
||||
Err(ReadlineError::Interrupted) => {
|
||||
|
||||
@@ -11,6 +11,47 @@ fn rusqlite_open() -> rusqlite::Connection {
|
||||
sqlite_conn
|
||||
}
|
||||
|
||||
fn bench_open(criterion: &mut Criterion) {
|
||||
// https://github.com/tursodatabase/turso/issues/174
|
||||
// The rusqlite benchmark crashes on Mac M1 when using the flamegraph features
|
||||
let enable_rusqlite = std::env::var("DISABLE_RUSQLITE_BENCHMARK").is_err();
|
||||
|
||||
if !std::fs::exists("../testing/schema_5k.db").unwrap() {
|
||||
#[allow(clippy::arc_with_non_send_sync)]
|
||||
let io = Arc::new(PlatformIO::new().unwrap());
|
||||
let db = Database::open_file(io.clone(), "../testing/schema_5k.db", false, false).unwrap();
|
||||
let conn = db.connect().unwrap();
|
||||
|
||||
for i in 0..5000 {
|
||||
conn.execute(
|
||||
format!("CREATE TABLE table_{i} ( id INTEGER PRIMARY KEY, name TEXT, value INTEGER, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP )")
|
||||
).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
let mut group = criterion.benchmark_group("Open/Connect");
|
||||
|
||||
group.bench_function(BenchmarkId::new("limbo_schema", ""), |b| {
|
||||
b.iter(|| {
|
||||
#[allow(clippy::arc_with_non_send_sync)]
|
||||
let io = Arc::new(PlatformIO::new().unwrap());
|
||||
let db =
|
||||
Database::open_file(io.clone(), "../testing/schema_5k.db", false, false).unwrap();
|
||||
black_box(db.connect().unwrap());
|
||||
});
|
||||
});
|
||||
|
||||
if enable_rusqlite {
|
||||
group.bench_function(BenchmarkId::new("sqlite_schema", ""), |b| {
|
||||
b.iter(|| {
|
||||
black_box(rusqlite::Connection::open("../testing/schema_5k.db").unwrap());
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_prepare_query(criterion: &mut Criterion) {
|
||||
// https://github.com/tursodatabase/turso/issues/174
|
||||
// The rusqlite benchmark crashes on Mac M1 when using the flamegraph features
|
||||
@@ -28,7 +69,7 @@ fn bench_prepare_query(criterion: &mut Criterion) {
|
||||
];
|
||||
|
||||
for query in queries.iter() {
|
||||
let mut group = criterion.benchmark_group(format!("Prepare `{}`", query));
|
||||
let mut group = criterion.benchmark_group(format!("Prepare `{query}`"));
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("limbo_parse_query", query),
|
||||
@@ -233,6 +274,6 @@ fn bench_execute_select_count(criterion: &mut Criterion) {
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||
targets = bench_prepare_query, bench_execute_select_1, bench_execute_select_rows, bench_execute_select_count
|
||||
targets = bench_open, bench_prepare_query, bench_execute_select_1, bench_execute_select_rows, bench_execute_select_count
|
||||
}
|
||||
criterion_main!(benches);
|
||||
|
||||
@@ -443,7 +443,7 @@ fn bench(criterion: &mut Criterion) {
|
||||
for (size_name, json_payload) in json_sizes.iter() {
|
||||
let query = format!("SELECT jsonb('{}')", json_payload.replace("'", "\\'"));
|
||||
|
||||
let mut group = criterion.benchmark_group(format!("JSONB Size - {}", size_name));
|
||||
let mut group = criterion.benchmark_group(format!("JSONB Size - {size_name}"));
|
||||
|
||||
group.bench_function("Limbo", |b| {
|
||||
let mut stmt = limbo_conn.prepare(&query).unwrap();
|
||||
@@ -893,7 +893,7 @@ fn bench_json_patch(criterion: &mut Criterion) {
|
||||
patch_json.replace("'", "''")
|
||||
);
|
||||
|
||||
let mut group = criterion.benchmark_group(format!("JSON Patch - {}", case_name));
|
||||
let mut group = criterion.benchmark_group(format!("JSON Patch - {case_name}"));
|
||||
|
||||
group.bench_function("Limbo", |b| {
|
||||
let mut stmt = limbo_conn.prepare(&query).unwrap();
|
||||
|
||||
@@ -81,7 +81,7 @@ fn bench_tpc_h_queries(criterion: &mut Criterion) {
|
||||
];
|
||||
|
||||
for (idx, query) in queries.iter() {
|
||||
let mut group = criterion.benchmark_group(format!("Query `{}` ", idx));
|
||||
let mut group = criterion.benchmark_group(format!("Query `{idx}` "));
|
||||
group.sampling_mode(SamplingMode::Flat);
|
||||
group.sample_size(10);
|
||||
|
||||
|
||||
@@ -102,10 +102,7 @@ impl Database {
|
||||
other => match get_vfs_modules().iter().find(|v| v.0 == vfs) {
|
||||
Some((_, vfs)) => vfs.clone(),
|
||||
None => {
|
||||
return Err(LimboError::InvalidArgument(format!(
|
||||
"no such VFS: {}",
|
||||
other
|
||||
)));
|
||||
return Err(LimboError::InvalidArgument(format!("no such VFS: {other}")));
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -173,7 +173,7 @@ impl Display for VectorFunc {
|
||||
Self::VectorExtract => "vector_extract".to_string(),
|
||||
Self::VectorDistanceCos => "vector_distance_cos".to_string(),
|
||||
};
|
||||
write!(f, "{}", str)
|
||||
write!(f, "{str}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -435,7 +435,7 @@ impl Display for ScalarFunc {
|
||||
Self::TimeDiff => "timediff".to_string(),
|
||||
Self::Likelihood => "likelihood".to_string(),
|
||||
};
|
||||
write!(f, "{}", str)
|
||||
write!(f, "{str}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -550,7 +550,7 @@ impl Display for MathFunc {
|
||||
Self::Tanh => "tanh".to_string(),
|
||||
Self::Trunc => "trunc".to_string(),
|
||||
};
|
||||
write!(f, "{}", str)
|
||||
write!(f, "{str}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -585,13 +585,13 @@ impl Display for Func {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Agg(agg_func) => write!(f, "{}", agg_func.to_string()),
|
||||
Self::Scalar(scalar_func) => write!(f, "{}", scalar_func),
|
||||
Self::Math(math_func) => write!(f, "{}", math_func),
|
||||
Self::Vector(vector_func) => write!(f, "{}", vector_func),
|
||||
Self::Scalar(scalar_func) => write!(f, "{scalar_func}"),
|
||||
Self::Math(math_func) => write!(f, "{math_func}"),
|
||||
Self::Vector(vector_func) => write!(f, "{vector_func}"),
|
||||
#[cfg(feature = "json")]
|
||||
Self::Json(json_func) => write!(f, "{}", json_func),
|
||||
Self::External(generic_func) => write!(f, "{}", generic_func),
|
||||
Self::AlterTable(alter_func) => write!(f, "{}", alter_func),
|
||||
Self::Json(json_func) => write!(f, "{json_func}"),
|
||||
Self::External(generic_func) => write!(f, "{generic_func}"),
|
||||
Self::AlterTable(alter_func) => write!(f, "{alter_func}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -636,7 +636,7 @@ impl Func {
|
||||
}
|
||||
"group_concat" => {
|
||||
if arg_count != 1 && arg_count != 2 {
|
||||
println!("{}", arg_count);
|
||||
println!("{arg_count}");
|
||||
crate::bail_parse_error!("wrong number of arguments to function {}()", name)
|
||||
}
|
||||
Ok(Self::Agg(AggFunc::GroupConcat))
|
||||
|
||||
@@ -29,7 +29,7 @@ pub fn exec_strftime(values: &[Register]) -> Value {
|
||||
|
||||
let value = &values[0].get_owned_value();
|
||||
let format_str = if matches!(value, Value::Text(_) | Value::Integer(_) | Value::Float(_)) {
|
||||
format!("{}", value)
|
||||
format!("{value}")
|
||||
} else {
|
||||
return Value::Null;
|
||||
};
|
||||
@@ -416,8 +416,8 @@ fn get_date_time_from_time_value_string(value: &str) -> Option<NaiveDateTime> {
|
||||
// For time-only formats, assume date 2000-01-01
|
||||
// Ref: https://sqlite.org/lang_datefunc.html#tmval
|
||||
parse_datetime_with_optional_tz(
|
||||
&format!("2000-01-01 {}", value),
|
||||
&format!("%Y-%m-%d {}", format),
|
||||
&format!("2000-01-01 {value}"),
|
||||
&format!("%Y-%m-%d {format}"),
|
||||
)
|
||||
} else {
|
||||
parse_datetime_with_optional_tz(value, format)
|
||||
@@ -463,10 +463,7 @@ fn get_date_time_from_time_value_float(value: f64) -> Option<NaiveDateTime> {
|
||||
if value.is_infinite() || value.is_nan() || !is_julian_day_value(value) {
|
||||
return None;
|
||||
}
|
||||
match julian_day_converter::julian_day_to_datetime(value) {
|
||||
Ok(dt) => Some(dt),
|
||||
Err(_) => None,
|
||||
}
|
||||
julian_day_converter::julian_day_to_datetime(value).ok()
|
||||
}
|
||||
|
||||
fn is_leap_second(dt: &NaiveDateTime) -> bool {
|
||||
@@ -521,7 +518,7 @@ enum Modifier {
|
||||
fn parse_modifier_number(s: &str) -> Result<i64> {
|
||||
s.trim()
|
||||
.parse::<i64>()
|
||||
.map_err(|_| InvalidModifier(format!("Invalid number: {}", s)))
|
||||
.map_err(|_| InvalidModifier(format!("Invalid number: {s}")))
|
||||
}
|
||||
|
||||
/// supports YYYY-MM-DD format for time shift modifiers
|
||||
@@ -539,9 +536,9 @@ fn parse_modifier_time(s: &str) -> Result<NaiveTime> {
|
||||
5 => NaiveTime::parse_from_str(s, "%H:%M"),
|
||||
8 => NaiveTime::parse_from_str(s, "%H:%M:%S"),
|
||||
12 => NaiveTime::parse_from_str(s, "%H:%M:%S.%3f"),
|
||||
_ => return Err(InvalidModifier(format!("Invalid time format: {}", s))),
|
||||
_ => return Err(InvalidModifier(format!("Invalid time format: {s}"))),
|
||||
}
|
||||
.map_err(|_| InvalidModifier(format!("Invalid time format: {}", s)))
|
||||
.map_err(|_| InvalidModifier(format!("Invalid time format: {s}")))
|
||||
}
|
||||
|
||||
fn parse_modifier(modifier: &str) -> Result<Modifier> {
|
||||
@@ -811,8 +808,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
result,
|
||||
Value::build_text(expected),
|
||||
"Failed for input: {:?}",
|
||||
input
|
||||
"Failed for input: {input:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -851,10 +847,7 @@ mod tests {
|
||||
let result = exec_date(&[Register::Value(case.clone())]);
|
||||
match result {
|
||||
Value::Text(ref result_str) if result_str.value.is_empty() => (),
|
||||
_ => panic!(
|
||||
"Expected empty string for input: {:?}, but got: {:?}",
|
||||
case, result
|
||||
),
|
||||
_ => panic!("Expected empty string for input: {case:?}, but got: {result:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -947,7 +940,7 @@ mod tests {
|
||||
if let Value::Text(result_str) = result {
|
||||
assert_eq!(result_str.as_str(), expected);
|
||||
} else {
|
||||
panic!("Expected Value::Text, but got: {:?}", result);
|
||||
panic!("Expected Value::Text, but got: {result:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -986,10 +979,7 @@ mod tests {
|
||||
let result = exec_time(&[Register::Value(case.clone())]);
|
||||
match result {
|
||||
Value::Text(ref result_str) if result_str.value.is_empty() => (),
|
||||
_ => panic!(
|
||||
"Expected empty string for input: {:?}, but got: {:?}",
|
||||
case, result
|
||||
),
|
||||
_ => panic!("Expected empty string for input: {case:?}, but got: {result:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,8 +34,8 @@ pub fn exec_printf(values: &[Register]) -> crate::Result<Value> {
|
||||
}
|
||||
let value = &values[args_index].get_owned_value();
|
||||
match value {
|
||||
Value::Integer(_) => result.push_str(&format!("{}", value)),
|
||||
Value::Float(_) => result.push_str(&format!("{}", value)),
|
||||
Value::Integer(_) => result.push_str(&format!("{value}")),
|
||||
Value::Float(_) => result.push_str(&format!("{value}")),
|
||||
_ => result.push('0'),
|
||||
}
|
||||
args_index += 1;
|
||||
@@ -47,7 +47,7 @@ pub fn exec_printf(values: &[Register]) -> crate::Result<Value> {
|
||||
match &values[args_index].get_owned_value() {
|
||||
Value::Text(t) => result.push_str(t.as_str()),
|
||||
Value::Null => result.push_str("(null)"),
|
||||
v => result.push_str(&format!("{}", v)),
|
||||
v => result.push_str(&format!("{v}")),
|
||||
}
|
||||
args_index += 1;
|
||||
}
|
||||
@@ -57,7 +57,7 @@ pub fn exec_printf(values: &[Register]) -> crate::Result<Value> {
|
||||
}
|
||||
let value = &values[args_index].get_owned_value();
|
||||
match value {
|
||||
Value::Float(f) => result.push_str(&format!("{:.6}", f)),
|
||||
Value::Float(f) => result.push_str(&format!("{f:.6}")),
|
||||
Value::Integer(i) => result.push_str(&format!("{:.6}", *i as f64)),
|
||||
_ => result.push_str("0.0"),
|
||||
}
|
||||
|
||||
@@ -29,8 +29,7 @@ impl fmt::Display for UringIOError {
|
||||
match self {
|
||||
UringIOError::IOUringCQError(code) => write!(
|
||||
f,
|
||||
"IOUring completion queue error occurred with code {}",
|
||||
code
|
||||
"IOUring completion queue error occurred with code {code}",
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -254,7 +253,7 @@ impl File for UringFile {
|
||||
ErrorKind::WouldBlock => {
|
||||
"Failed locking file. File is locked by another process".to_string()
|
||||
}
|
||||
_ => format!("Failed locking file, {}", io_error),
|
||||
_ => format!("Failed locking file, {io_error}"),
|
||||
};
|
||||
LimboError::LockingError(message)
|
||||
})?;
|
||||
|
||||
@@ -314,7 +314,7 @@ impl File for UnixFile<'_> {
|
||||
ErrorKind::WouldBlock => {
|
||||
"Failed locking file. File is locked by another process".to_string()
|
||||
}
|
||||
_ => format!("Failed locking file, {}", io_error),
|
||||
_ => format!("Failed locking file, {io_error}"),
|
||||
};
|
||||
LimboError::LockingError(message)
|
||||
})?;
|
||||
|
||||
@@ -38,7 +38,7 @@ impl From<std::str::Utf8Error> for Error {
|
||||
impl Display for Error {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Message { ref msg, .. } => write!(formatter, "{}", msg),
|
||||
Self::Message { ref msg, .. } => write!(formatter, "{msg}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1136,7 +1136,7 @@ impl Jsonb {
|
||||
b'\r' => string.push_str("\\r"),
|
||||
_ => {
|
||||
// Format as \u00XX
|
||||
let hex = format!("\\u{:04x}", ch);
|
||||
let hex = format!("\\u{ch:04x}");
|
||||
string.push_str(&hex);
|
||||
}
|
||||
}
|
||||
@@ -1304,7 +1304,7 @@ impl Jsonb {
|
||||
|
||||
value = value * 16 + ch.to_digit(16).unwrap_or(0) as u64;
|
||||
}
|
||||
write!(string, "{}", value)
|
||||
write!(string, "{value}")
|
||||
.map_err(|_| LimboError::ParseError("Error writing string to json!".to_string()))?;
|
||||
} else {
|
||||
string.push_str(hex_str);
|
||||
@@ -1336,7 +1336,7 @@ impl Jsonb {
|
||||
val if val
|
||||
.chars()
|
||||
.next()
|
||||
.map_or(false, |c| c.is_ascii_alphanumeric() || c == '+' || c == '-') =>
|
||||
.is_some_and(|c| c.is_ascii_alphanumeric() || c == '+' || c == '-') =>
|
||||
{
|
||||
string.push_str(val);
|
||||
string.push('0');
|
||||
@@ -1403,7 +1403,7 @@ impl Jsonb {
|
||||
|| c == b'-'
|
||||
|| c == b'+'
|
||||
|| c == b'.'
|
||||
|| c.to_ascii_lowercase() == b'i' =>
|
||||
|| c.eq_ignore_ascii_case(&b'i') =>
|
||||
{
|
||||
pos = self.deserialize_number(input, pos)?;
|
||||
}
|
||||
@@ -2113,7 +2113,7 @@ impl Jsonb {
|
||||
std::cmp::Ordering::Greater => {
|
||||
self.data.splice(
|
||||
cursor + old_len..cursor + old_len,
|
||||
std::iter::repeat(0).take(new_len - old_len),
|
||||
std::iter::repeat_n(0, new_len - old_len),
|
||||
);
|
||||
}
|
||||
std::cmp::Ordering::Less => {
|
||||
@@ -3562,7 +3562,7 @@ world""#,
|
||||
// Generate a large JSON with many elements
|
||||
let mut large_array = String::from("[");
|
||||
for i in 0..1000 {
|
||||
large_array.push_str(&format!("{}", i));
|
||||
large_array.push_str(&format!("{i}"));
|
||||
if i < 999 {
|
||||
large_array.push(',');
|
||||
}
|
||||
|
||||
@@ -737,7 +737,7 @@ mod tests {
|
||||
let binary_json: Vec<u8> = vec![0xA2, 0x62, 0x6B, 0x31, 0x62, 0x76]; // Incomplete binary JSON
|
||||
let input = Value::Blob(binary_json);
|
||||
let result = get_json(&input, None);
|
||||
println!("{:?}", result);
|
||||
println!("{result:?}");
|
||||
match result {
|
||||
Ok(_) => panic!("Expected error for malformed JSON"),
|
||||
Err(e) => assert!(e.to_string().contains("malformed JSON")),
|
||||
@@ -923,7 +923,7 @@ mod tests {
|
||||
|
||||
match result {
|
||||
Ok(Value::Null) => (),
|
||||
_ => panic!("Expected null result, got: {:?}", result),
|
||||
_ => panic!("Expected null result, got: {result:?}"),
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
@@ -937,7 +937,7 @@ mod tests {
|
||||
|
||||
match result {
|
||||
Ok(Value::Null) => (),
|
||||
_ => panic!("Expected null result, got: {:?}", result),
|
||||
_ => panic!("Expected null result, got: {result:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -395,7 +395,7 @@ mod tests {
|
||||
Err(crate::error::LimboError::ParseError(_)) => {
|
||||
// happy path
|
||||
}
|
||||
_ => panic!("Expected error for: {:?}, got: {:?}", value, path),
|
||||
_ => panic!("Expected error for: {value:?}, got: {path:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
35
core/lib.rs
35
core/lib.rs
@@ -176,7 +176,7 @@ impl Database {
|
||||
enable_mvcc: bool,
|
||||
enable_indexes: bool,
|
||||
) -> Result<Arc<Database>> {
|
||||
let wal_path = format!("{}-wal", path);
|
||||
let wal_path = format!("{path}-wal");
|
||||
let maybe_shared_wal = WalFileShared::open_shared_if_exists(&io, wal_path.as_str())?;
|
||||
let db_size = db_file.size()?;
|
||||
|
||||
@@ -188,9 +188,9 @@ impl Database {
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let wal_has_frames = maybe_shared_wal.as_ref().map_or(false, |wal| {
|
||||
unsafe { &*wal.get() }.max_frame.load(Ordering::SeqCst) > 0
|
||||
});
|
||||
let wal_has_frames = maybe_shared_wal
|
||||
.as_ref()
|
||||
.is_some_and(|wal| unsafe { &*wal.get() }.max_frame.load(Ordering::SeqCst) > 0);
|
||||
|
||||
let is_empty = if db_size == 0 && !wal_has_frames {
|
||||
DB_STATE_UNITIALIZED
|
||||
@@ -220,17 +220,19 @@ impl Database {
|
||||
let conn = db.connect()?;
|
||||
let schema_version = get_schema_version(&conn)?;
|
||||
schema.write().schema_version = schema_version;
|
||||
let rows = conn.query("SELECT * FROM sqlite_schema")?;
|
||||
|
||||
let mut schema = schema
|
||||
.try_write()
|
||||
.expect("lock on schema should succeed first try");
|
||||
|
||||
let syms = conn.syms.borrow();
|
||||
|
||||
if let Err(LimboError::ExtensionError(e)) =
|
||||
parse_schema_rows(rows, &mut schema, &syms, None)
|
||||
schema.make_from_btree(None, conn.pager.clone(), &syms)
|
||||
{
|
||||
// this means that a vtab exists and we no longer have the module loaded. we print
|
||||
// a warning to the user to load the module
|
||||
eprintln!("Warning: {}", e);
|
||||
eprintln!("Warning: {e}");
|
||||
}
|
||||
}
|
||||
Ok(db)
|
||||
@@ -374,8 +376,7 @@ impl Database {
|
||||
"io_uring" => Arc::new(UringIO::new()?),
|
||||
other => {
|
||||
return Err(LimboError::InvalidArgument(format!(
|
||||
"no such VFS: {}",
|
||||
other
|
||||
"no such VFS: {other}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
@@ -846,7 +847,7 @@ impl Connection {
|
||||
{
|
||||
// this means that a vtab exists and we no longer have the module loaded. we print
|
||||
// a warning to the user to load the module
|
||||
eprintln!("Warning: {}", e);
|
||||
eprintln!("Warning: {e}");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -858,7 +859,7 @@ impl Connection {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let pragma = format!("PRAGMA {}", pragma_name);
|
||||
let pragma = format!("PRAGMA {pragma_name}");
|
||||
let mut stmt = self.prepare(pragma)?;
|
||||
let mut results = Vec::new();
|
||||
loop {
|
||||
@@ -889,7 +890,7 @@ impl Connection {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let pragma = format!("PRAGMA {} = {}", pragma_name, pragma_value);
|
||||
let pragma = format!("PRAGMA {pragma_name} = {pragma_value}");
|
||||
let mut stmt = self.prepare(pragma)?;
|
||||
let mut results = Vec::new();
|
||||
loop {
|
||||
@@ -922,7 +923,7 @@ impl Connection {
|
||||
if self.closed.get() {
|
||||
return Err(LimboError::InternalError("Connection closed".to_string()));
|
||||
}
|
||||
let pragma = format!("PRAGMA {}({})", pragma_name, pragma_value);
|
||||
let pragma = format!("PRAGMA {pragma_name}({pragma_value})");
|
||||
let mut stmt = self.prepare(pragma)?;
|
||||
let mut results = Vec::new();
|
||||
loop {
|
||||
@@ -1047,7 +1048,7 @@ impl std::fmt::Debug for SymbolTable {
|
||||
|
||||
fn is_shared_library(path: &std::path::Path) -> bool {
|
||||
path.extension()
|
||||
.map_or(false, |ext| ext == "so" || ext == "dylib" || ext == "dll")
|
||||
.is_some_and(|ext| ext == "so" || ext == "dylib" || ext == "dll")
|
||||
}
|
||||
|
||||
pub fn resolve_ext_path(extpath: &str) -> Result<std::path::PathBuf> {
|
||||
@@ -1055,8 +1056,7 @@ pub fn resolve_ext_path(extpath: &str) -> Result<std::path::PathBuf> {
|
||||
if !path.exists() {
|
||||
if is_shared_library(path) {
|
||||
return Err(LimboError::ExtensionError(format!(
|
||||
"Extension file not found: {}",
|
||||
extpath
|
||||
"Extension file not found: {extpath}"
|
||||
)));
|
||||
};
|
||||
let maybe = path.with_extension(std::env::consts::DLL_EXTENSION);
|
||||
@@ -1064,8 +1064,7 @@ pub fn resolve_ext_path(extpath: &str) -> Result<std::path::PathBuf> {
|
||||
.exists()
|
||||
.then_some(maybe)
|
||||
.ok_or(LimboError::ExtensionError(format!(
|
||||
"Extension file not found: {}",
|
||||
extpath
|
||||
"Extension file not found: {extpath}"
|
||||
)))
|
||||
} else {
|
||||
Ok(path.to_path_buf())
|
||||
|
||||
@@ -686,7 +686,7 @@ fn setup_sequential_db() -> (Rc<MvStore<TestClock>>, u64) {
|
||||
let table_id = 1;
|
||||
for i in 1..6 {
|
||||
let id = RowID::new(table_id, i);
|
||||
let data = format!("row{}", i).into_bytes();
|
||||
let data = format!("row{i}").into_bytes();
|
||||
let row = Row::new(id, data);
|
||||
db.insert(tx_id, row).unwrap();
|
||||
}
|
||||
|
||||
@@ -144,8 +144,7 @@ impl PragmaVirtualTable {
|
||||
|
||||
fn no_such_pragma(pragma_name: &str) -> LimboError {
|
||||
LimboError::ParseError(format!(
|
||||
"No such table-valued function: pragma_{}",
|
||||
pragma_name
|
||||
"No such table-valued function: pragma_{pragma_name}"
|
||||
))
|
||||
}
|
||||
|
||||
@@ -246,7 +245,7 @@ impl PragmaVirtualTableCursor {
|
||||
|
||||
let mut sql = format!("PRAGMA {}", self.pragma_name);
|
||||
if let Some(arg) = &self.arg {
|
||||
sql.push_str(&format!("=\"{}\"", arg));
|
||||
sql.push_str(&format!("=\"{arg}\""));
|
||||
}
|
||||
|
||||
self.stmt = Some(self.conn.prepare(sql)?);
|
||||
|
||||
279
core/schema.rs
279
core/schema.rs
@@ -1,9 +1,15 @@
|
||||
use crate::result::LimboResult;
|
||||
use crate::storage::btree::BTreeCursor;
|
||||
use crate::translate::collate::CollationSeq;
|
||||
use crate::translate::plan::SelectPlan;
|
||||
use crate::types::CursorResult;
|
||||
use crate::util::{module_args_from_sql, module_name_from_sql, UnparsedFromSqlIndex};
|
||||
use crate::{util::normalize_ident, Result};
|
||||
use crate::{LimboError, VirtualTable};
|
||||
use crate::{LimboError, MvCursor, Pager, SymbolTable, VirtualTable};
|
||||
use core::fmt;
|
||||
use fallible_iterator::FallibleIterator;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::{BTreeSet, HashMap};
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
@@ -134,6 +140,148 @@ impl Schema {
|
||||
pub fn indexes_enabled(&self) -> bool {
|
||||
self.indexes_enabled
|
||||
}
|
||||
|
||||
/// Update [Schema] by scanning the first root page (sqlite_schema)
|
||||
pub fn make_from_btree(
|
||||
&mut self,
|
||||
mv_cursor: Option<Rc<RefCell<MvCursor>>>,
|
||||
pager: Rc<Pager>,
|
||||
syms: &SymbolTable,
|
||||
) -> Result<()> {
|
||||
let mut cursor = BTreeCursor::new_table(mv_cursor, pager.clone(), 1);
|
||||
|
||||
let mut from_sql_indexes = Vec::with_capacity(10);
|
||||
let mut automatic_indices: HashMap<String, Vec<(String, usize)>> =
|
||||
HashMap::with_capacity(10);
|
||||
|
||||
match pager.begin_read_tx()? {
|
||||
CursorResult::Ok(v) => {
|
||||
if matches!(v, LimboResult::Busy) {
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
}
|
||||
CursorResult::IO => pager.io.run_once()?,
|
||||
}
|
||||
|
||||
match cursor.rewind()? {
|
||||
CursorResult::Ok(v) => v,
|
||||
CursorResult::IO => pager.io.run_once()?,
|
||||
};
|
||||
|
||||
loop {
|
||||
let Some(row) = (loop {
|
||||
match cursor.record()? {
|
||||
CursorResult::Ok(v) => break v,
|
||||
CursorResult::IO => pager.io.run_once()?,
|
||||
}
|
||||
}) else {
|
||||
break;
|
||||
};
|
||||
|
||||
let ty = row.get::<&str>(0)?;
|
||||
match ty {
|
||||
"table" => {
|
||||
let root_page = row.get::<i64>(3)?;
|
||||
let sql = row.get::<&str>(4)?;
|
||||
let create_virtual = "create virtual";
|
||||
if root_page == 0
|
||||
&& sql[0..create_virtual.len()].eq_ignore_ascii_case(create_virtual)
|
||||
{
|
||||
let name: &str = row.get::<&str>(1)?;
|
||||
// a virtual table is found in the sqlite_schema, but it's no
|
||||
// longer in the in-memory schema. We need to recreate it if
|
||||
// the module is loaded in the symbol table.
|
||||
let vtab = if let Some(vtab) = syms.vtabs.get(name) {
|
||||
vtab.clone()
|
||||
} else {
|
||||
let mod_name = module_name_from_sql(sql)?;
|
||||
crate::VirtualTable::table(
|
||||
Some(name),
|
||||
mod_name,
|
||||
module_args_from_sql(sql)?,
|
||||
syms,
|
||||
)?
|
||||
};
|
||||
self.add_virtual_table(vtab);
|
||||
continue;
|
||||
}
|
||||
|
||||
let table = BTreeTable::from_sql(sql, root_page as usize)?;
|
||||
self.add_btree_table(Rc::new(table));
|
||||
}
|
||||
"index" => {
|
||||
let root_page = row.get::<i64>(3)?;
|
||||
match row.get::<&str>(4) {
|
||||
Ok(sql) => {
|
||||
from_sql_indexes.push(UnparsedFromSqlIndex {
|
||||
table_name: row.get::<&str>(2)?.to_string(),
|
||||
root_page: root_page as usize,
|
||||
sql: sql.to_string(),
|
||||
});
|
||||
}
|
||||
_ => {
|
||||
// Automatic index on primary key and/or unique constraint, e.g.
|
||||
// table|foo|foo|2|CREATE TABLE foo (a text PRIMARY KEY, b)
|
||||
// index|sqlite_autoindex_foo_1|foo|3|
|
||||
let index_name = row.get::<&str>(1)?.to_string();
|
||||
let table_name = row.get::<&str>(2)?.to_string();
|
||||
let root_page = row.get::<i64>(3)?;
|
||||
match automatic_indices.entry(table_name) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(vec![(index_name, root_page as usize)]);
|
||||
}
|
||||
Entry::Occupied(mut e) => {
|
||||
e.get_mut().push((index_name, root_page as usize));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
|
||||
drop(row);
|
||||
|
||||
if matches!(cursor.next()?, CursorResult::IO) {
|
||||
pager.io.run_once()?;
|
||||
};
|
||||
}
|
||||
|
||||
pager.end_read_tx()?;
|
||||
|
||||
for unparsed_sql_from_index in from_sql_indexes {
|
||||
if !self.indexes_enabled() {
|
||||
self.table_set_has_index(&unparsed_sql_from_index.table_name);
|
||||
} else {
|
||||
let table = self
|
||||
.get_btree_table(&unparsed_sql_from_index.table_name)
|
||||
.unwrap();
|
||||
let index = Index::from_sql(
|
||||
&unparsed_sql_from_index.sql,
|
||||
unparsed_sql_from_index.root_page,
|
||||
table.as_ref(),
|
||||
)?;
|
||||
self.add_index(Arc::new(index));
|
||||
}
|
||||
}
|
||||
|
||||
for automatic_index in automatic_indices {
|
||||
if !self.indexes_enabled() {
|
||||
self.table_set_has_index(&automatic_index.0);
|
||||
} else {
|
||||
let table = self.get_btree_table(&automatic_index.0).unwrap();
|
||||
let ret_index = Index::automatic_from_primary_key_and_unique(
|
||||
table.as_ref(),
|
||||
automatic_index.1,
|
||||
)?;
|
||||
for index in ret_index {
|
||||
self.add_index(Arc::new(index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -261,10 +409,11 @@ impl BTreeTable {
|
||||
sql.push_str(", ");
|
||||
}
|
||||
sql.push_str(column.name.as_ref().expect("column name is None"));
|
||||
if !matches!(column.ty, Type::Null) {
|
||||
|
||||
if !column.ty_str.is_empty() {
|
||||
sql.push(' ');
|
||||
sql.push_str(&column.ty_str);
|
||||
}
|
||||
sql.push_str(&column.ty.to_string());
|
||||
|
||||
if column.unique {
|
||||
sql.push_str(" UNIQUE");
|
||||
@@ -419,43 +568,47 @@ fn create_table(
|
||||
// A column defined as exactly INTEGER PRIMARY KEY is a rowid alias, meaning that the rowid
|
||||
// and the value of this column are the same.
|
||||
// https://www.sqlite.org/lang_createtable.html#rowids_and_the_integer_primary_key
|
||||
let mut typename_exactly_integer = false;
|
||||
let (ty, ty_str) = match col_def.col_type {
|
||||
Some(data_type) => {
|
||||
let s = data_type.name.as_str();
|
||||
let ty_str = if matches!(
|
||||
s.to_uppercase().as_str(),
|
||||
"TEXT" | "INT" | "INTEGER" | "BLOB" | "REAL"
|
||||
) {
|
||||
s.to_uppercase().to_string()
|
||||
} else {
|
||||
s.to_string()
|
||||
};
|
||||
let ty_str = col_def
|
||||
.col_type
|
||||
.as_ref()
|
||||
.map(|ast::Type { name, .. }| name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
let mut typename_exactly_integer = false;
|
||||
let ty = match col_def.col_type {
|
||||
Some(data_type) => 'ty: {
|
||||
// https://www.sqlite.org/datatype3.html
|
||||
let type_name = ty_str.to_uppercase();
|
||||
if type_name.contains("INT") {
|
||||
typename_exactly_integer = type_name == "INTEGER";
|
||||
(Type::Integer, ty_str)
|
||||
} else if type_name.contains("CHAR")
|
||||
|| type_name.contains("CLOB")
|
||||
|| type_name.contains("TEXT")
|
||||
{
|
||||
(Type::Text, ty_str)
|
||||
} else if type_name.contains("BLOB") {
|
||||
(Type::Blob, ty_str)
|
||||
} else if type_name.is_empty() {
|
||||
(Type::Blob, "".to_string())
|
||||
} else if type_name.contains("REAL")
|
||||
|| type_name.contains("FLOA")
|
||||
|| type_name.contains("DOUB")
|
||||
{
|
||||
(Type::Real, ty_str)
|
||||
} else {
|
||||
(Type::Numeric, ty_str)
|
||||
let mut type_name = data_type.name;
|
||||
type_name.make_ascii_uppercase();
|
||||
|
||||
if type_name.is_empty() {
|
||||
break 'ty Type::Blob;
|
||||
}
|
||||
|
||||
if type_name == "INTEGER" {
|
||||
typename_exactly_integer = true;
|
||||
break 'ty Type::Integer;
|
||||
}
|
||||
|
||||
if let Some(ty) = type_name.as_bytes().windows(3).find_map(|s| match s {
|
||||
b"INT" => Some(Type::Integer),
|
||||
_ => None,
|
||||
}) {
|
||||
break 'ty ty;
|
||||
}
|
||||
|
||||
if let Some(ty) = type_name.as_bytes().windows(4).find_map(|s| match s {
|
||||
b"CHAR" | b"CLOB" | b"TEXT" => Some(Type::Text),
|
||||
b"BLOB" => Some(Type::Blob),
|
||||
b"REAL" | b"FLOA" | b"DOUB" => Some(Type::Real),
|
||||
_ => None,
|
||||
}) {
|
||||
break 'ty ty;
|
||||
}
|
||||
|
||||
Type::Numeric
|
||||
}
|
||||
None => (Type::Null, "".to_string()),
|
||||
None => Type::Null,
|
||||
};
|
||||
|
||||
let mut default = None;
|
||||
@@ -464,22 +617,22 @@ fn create_table(
|
||||
let mut order = SortOrder::Asc;
|
||||
let mut unique = false;
|
||||
let mut collation = None;
|
||||
for c_def in &col_def.constraints {
|
||||
match &c_def.constraint {
|
||||
for c_def in col_def.constraints {
|
||||
match c_def.constraint {
|
||||
turso_sqlite3_parser::ast::ColumnConstraint::PrimaryKey {
|
||||
order: o,
|
||||
..
|
||||
} => {
|
||||
primary_key = true;
|
||||
if let Some(o) = o {
|
||||
order = *o;
|
||||
order = o;
|
||||
}
|
||||
}
|
||||
turso_sqlite3_parser::ast::ColumnConstraint::NotNull { .. } => {
|
||||
notnull = true;
|
||||
}
|
||||
turso_sqlite3_parser::ast::ColumnConstraint::Default(expr) => {
|
||||
default = Some(expr.clone())
|
||||
default = Some(expr)
|
||||
}
|
||||
// TODO: for now we don't check Resolve type of unique
|
||||
turso_sqlite3_parser::ast::ColumnConstraint::Unique(on_conflict) => {
|
||||
@@ -491,7 +644,6 @@ fn create_table(
|
||||
turso_sqlite3_parser::ast::ColumnConstraint::Collate { collation_name } => {
|
||||
collation = Some(CollationSeq::new(collation_name.0.as_str())?);
|
||||
}
|
||||
// Collate
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
@@ -825,8 +977,7 @@ impl Affinity {
|
||||
SQLITE_AFF_REAL => Ok(Affinity::Real),
|
||||
SQLITE_AFF_NUMERIC => Ok(Affinity::Numeric),
|
||||
_ => Err(LimboError::InternalError(format!(
|
||||
"Invalid affinity character: {}",
|
||||
char
|
||||
"Invalid affinity character: {char}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
@@ -858,7 +1009,7 @@ impl fmt::Display for Type {
|
||||
Self::Real => "REAL",
|
||||
Self::Blob => "BLOB",
|
||||
};
|
||||
write!(f, "{}", s)
|
||||
write!(f, "{s}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1453,49 +1604,13 @@ mod tests {
|
||||
let sql = r#"CREATE TABLE t1 (a InTeGeR);"#;
|
||||
let table = BTreeTable::from_sql(sql, 0)?;
|
||||
let column = table.get_column("a").unwrap().1;
|
||||
assert_eq!(column.ty_str, "INTEGER");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_col_type_string_int() -> Result<()> {
|
||||
let sql = r#"CREATE TABLE t1 (a InT);"#;
|
||||
let table = BTreeTable::from_sql(sql, 0)?;
|
||||
let column = table.get_column("a").unwrap().1;
|
||||
assert_eq!(column.ty_str, "INT");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_col_type_string_blob() -> Result<()> {
|
||||
let sql = r#"CREATE TABLE t1 (a bLoB);"#;
|
||||
let table = BTreeTable::from_sql(sql, 0)?;
|
||||
let column = table.get_column("a").unwrap().1;
|
||||
assert_eq!(column.ty_str, "BLOB");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_col_type_string_empty() -> Result<()> {
|
||||
let sql = r#"CREATE TABLE t1 (a);"#;
|
||||
let table = BTreeTable::from_sql(sql, 0)?;
|
||||
let column = table.get_column("a").unwrap().1;
|
||||
assert_eq!(column.ty_str, "");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_col_type_string_some_nonsense() -> Result<()> {
|
||||
let sql = r#"CREATE TABLE t1 (a someNonsenseName);"#;
|
||||
let table = BTreeTable::from_sql(sql, 0)?;
|
||||
let column = table.get_column("a").unwrap().1;
|
||||
assert_eq!(column.ty_str, "someNonsenseName");
|
||||
assert_eq!(column.ty_str, "InTeGeR");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_sqlite_schema() {
|
||||
let expected = r#"CREATE TABLE sqlite_schema (type TEXT, name TEXT, tbl_name TEXT, rootpage INTEGER, sql TEXT)"#;
|
||||
let expected = r#"CREATE TABLE sqlite_schema (type TEXT, name TEXT, tbl_name TEXT, rootpage INT, sql TEXT)"#;
|
||||
let actual = sqlite_schema_table().to_sql();
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
||||
@@ -274,20 +274,13 @@ mod tests {
|
||||
let stop = series.stop;
|
||||
let step = series.step;
|
||||
let values = collect_series(series.clone()).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Failed to generate series for start={}, stop={}, step={}: {:?}",
|
||||
start, stop, step, e
|
||||
)
|
||||
panic!("Failed to generate series for start={start}, stop={stop}, step={step}: {e:?}")
|
||||
});
|
||||
|
||||
if series_is_invalid_or_empty(&series) {
|
||||
assert!(
|
||||
values.is_empty(),
|
||||
"Series should be empty for invalid range: start={}, stop={}, step={}, got {:?}",
|
||||
start,
|
||||
stop,
|
||||
step,
|
||||
values
|
||||
"Series should be empty for invalid range: start={start}, stop={stop}, step={step}, got {values:?}"
|
||||
);
|
||||
} else {
|
||||
let expected_len = series_expected_length(&series);
|
||||
@@ -316,19 +309,13 @@ mod tests {
|
||||
let step = series.step;
|
||||
|
||||
let values = collect_series(series.clone()).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Failed to generate series for start={}, stop={}, step={}: {:?}",
|
||||
start, stop, step, e
|
||||
)
|
||||
panic!("Failed to generate series for start={start}, stop={stop}, step={step}: {e:?}")
|
||||
});
|
||||
|
||||
if series_is_invalid_or_empty(&series) {
|
||||
assert!(
|
||||
values.is_empty(),
|
||||
"Series should be empty for invalid range: start={}, stop={}, step={}",
|
||||
start,
|
||||
stop,
|
||||
step
|
||||
"Series should be empty for invalid range: start={start}, stop={stop}, step={step}"
|
||||
);
|
||||
} else {
|
||||
assert!(
|
||||
@@ -356,19 +343,13 @@ mod tests {
|
||||
let step = series.step;
|
||||
|
||||
let values = collect_series(series.clone()).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Failed to generate series for start={}, stop={}, step={}: {:?}",
|
||||
start, stop, step, e
|
||||
)
|
||||
panic!("Failed to generate series for start={start}, stop={stop}, step={step}: {e:?}")
|
||||
});
|
||||
|
||||
if series_is_invalid_or_empty(&series) {
|
||||
assert!(
|
||||
values.is_empty(),
|
||||
"Series should be empty for invalid range: start={}, stop={}, step={}",
|
||||
start,
|
||||
stop,
|
||||
step
|
||||
"Series should be empty for invalid range: start={start}, stop={stop}, step={step}"
|
||||
);
|
||||
} else if !values.is_empty() {
|
||||
assert!(
|
||||
@@ -396,37 +377,27 @@ mod tests {
|
||||
let step = series.step;
|
||||
|
||||
let values = collect_series(series.clone()).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Failed to generate series for start={}, stop={}, step={}: {:?}",
|
||||
start, stop, step, e
|
||||
)
|
||||
panic!("Failed to generate series for start={start}, stop={stop}, step={step}: {e:?}")
|
||||
});
|
||||
|
||||
if series_is_invalid_or_empty(&series) {
|
||||
assert!(
|
||||
values.is_empty(),
|
||||
"Series should be empty for invalid range: start={}, stop={}, step={}",
|
||||
start,
|
||||
stop,
|
||||
step
|
||||
"Series should be empty for invalid range: start={start}, stop={stop}, step={step}"
|
||||
);
|
||||
} else if !values.is_empty() {
|
||||
assert_eq!(
|
||||
values.first(),
|
||||
Some(&start),
|
||||
"Series doesn't start with start value: {:?} (expected start: {})",
|
||||
values,
|
||||
start
|
||||
"Series doesn't start with start value: {values:?} (expected start: {start})"
|
||||
);
|
||||
assert!(
|
||||
values.last().map_or(true, |&last| if step > 0 {
|
||||
values.last().is_none_or(|&last| if step > 0 {
|
||||
last <= stop
|
||||
} else {
|
||||
last >= stop
|
||||
}),
|
||||
"Series exceeds stop value: {:?} (stop: {})",
|
||||
values,
|
||||
stop
|
||||
"Series exceeds stop value: {values:?} (stop: {stop})"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -501,8 +472,7 @@ mod tests {
|
||||
.expect("Failed to generate series");
|
||||
assert!(
|
||||
values.is_empty(),
|
||||
"Invalid positive range should return empty series, got {:?}",
|
||||
values
|
||||
"Invalid positive range should return empty series, got {values:?}"
|
||||
);
|
||||
|
||||
let values = collect_series(Series {
|
||||
@@ -564,20 +534,15 @@ mod tests {
|
||||
match cursor.next() {
|
||||
ResultCode::OK => rowids.push(cur_rowid),
|
||||
ResultCode::EOF => break,
|
||||
err => panic!(
|
||||
"Unexpected error {:?} for start={}, stop={}, step={}",
|
||||
err, start, stop, step
|
||||
),
|
||||
err => {
|
||||
panic!("Unexpected error {err:?} for start={start}, stop={stop}, step={step}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
rowids.windows(2).all(|w| w[1] == w[0] + 1),
|
||||
"Rowids not monotonically increasing: {:?} (start={}, stop={}, step={})",
|
||||
rowids,
|
||||
start,
|
||||
stop,
|
||||
step
|
||||
"Rowids not monotonically increasing: {rowids:?} (start={start}, stop={stop}, step={step})"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -589,19 +554,13 @@ mod tests {
|
||||
let step = series.step;
|
||||
|
||||
let values = collect_series(series.clone()).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Failed to generate series for start={}, stop={}, step={}: {:?}",
|
||||
start, stop, step, e
|
||||
)
|
||||
panic!("Failed to generate series for start={start}, stop={stop}, step={step}: {e:?}")
|
||||
});
|
||||
|
||||
if series_is_invalid_or_empty(&series) {
|
||||
assert!(
|
||||
values.is_empty(),
|
||||
"Series should be empty for invalid range: start={}, stop={}, step={}",
|
||||
start,
|
||||
stop,
|
||||
step
|
||||
"Series should be empty for invalid range: start={start}, stop={stop}, step={step}"
|
||||
);
|
||||
} else if start == stop {
|
||||
assert_eq!(
|
||||
|
||||
@@ -7,7 +7,8 @@ use crate::{
|
||||
pager::{BtreePageAllocMode, Pager},
|
||||
sqlite3_ondisk::{
|
||||
read_u32, read_varint, BTreeCell, PageContent, PageType, TableInteriorCell,
|
||||
TableLeafCell,
|
||||
TableLeafCell, CELL_PTR_SIZE_BYTES, INTERIOR_PAGE_HEADER_SIZE_BYTES,
|
||||
LEAF_PAGE_HEADER_SIZE_BYTES, LEFT_CHILD_PTR_SIZE_BYTES,
|
||||
},
|
||||
},
|
||||
translate::{collate::CollationSeq, plan::IterationDirection},
|
||||
@@ -100,6 +101,12 @@ pub mod offset {
|
||||
/// assumed that the database is corrupt.
|
||||
pub const BTCURSOR_MAX_DEPTH: usize = 20;
|
||||
|
||||
/// Maximum number of sibling pages that balancing is performed on.
|
||||
pub const MAX_SIBLING_PAGES_TO_BALANCE: usize = 3;
|
||||
|
||||
/// We only need maximum 5 pages to balance 3 pages, because we can guarantee that cells from 3 pages will fit in 5 pages.
|
||||
pub const MAX_NEW_SIBLING_PAGES_AFTER_BALANCE: usize = 5;
|
||||
|
||||
/// Evaluate a Result<CursorResult<T>>, if IO return IO.
|
||||
macro_rules! return_if_io {
|
||||
($expr:expr) => {
|
||||
@@ -309,11 +316,11 @@ impl BTreeKey<'_> {
|
||||
#[derive(Clone)]
|
||||
struct BalanceInfo {
|
||||
/// Old pages being balanced. We can have maximum 3 pages being balanced at the same time.
|
||||
pages_to_balance: [Option<BTreePage>; 3],
|
||||
pages_to_balance: [Option<BTreePage>; MAX_SIBLING_PAGES_TO_BALANCE],
|
||||
/// Bookkeeping of the rightmost pointer so the offset::BTREE_RIGHTMOST_PTR can be updated.
|
||||
rightmost_pointer: *mut u8,
|
||||
/// Divider cells of old pages. We can have maximum 2 divider cells because of 3 pages.
|
||||
divider_cells: [Option<Vec<u8>>; 2],
|
||||
divider_cell_payloads: [Option<Vec<u8>>; MAX_SIBLING_PAGES_TO_BALANCE - 1],
|
||||
/// Number of siblings being used to balance
|
||||
sibling_count: usize,
|
||||
/// First divider cell to remove that marks the first sibling
|
||||
@@ -1629,7 +1636,7 @@ impl BTreeCursor {
|
||||
|
||||
if matches!(
|
||||
self.seek_state,
|
||||
CursorSeekState::Start { .. }
|
||||
CursorSeekState::Start
|
||||
| CursorSeekState::MovingBetweenPages { .. }
|
||||
| CursorSeekState::InteriorPageBinarySearch { .. }
|
||||
) {
|
||||
@@ -1747,7 +1754,7 @@ impl BTreeCursor {
|
||||
) -> Result<CursorResult<bool>> {
|
||||
if matches!(
|
||||
self.seek_state,
|
||||
CursorSeekState::Start { .. }
|
||||
CursorSeekState::Start
|
||||
| CursorSeekState::MovingBetweenPages { .. }
|
||||
| CursorSeekState::InteriorPageBinarySearch { .. }
|
||||
) {
|
||||
@@ -2116,7 +2123,7 @@ impl BTreeCursor {
|
||||
} else {
|
||||
write_info.state = WriteState::BalanceStart;
|
||||
// If we balance, we must save the cursor position and seek to it later.
|
||||
// FIXME: we shouldn't have both DeleteState::SeekAfterBalancing and
|
||||
// FIXME: we shouldn't have both DeleteState::SeekAfterBalancing and
|
||||
// save_context()/restore/context(), they are practically the same thing.
|
||||
self.save_context(CursorContext::TableRowId(bkey.to_rowid()));
|
||||
}
|
||||
@@ -2147,14 +2154,14 @@ impl BTreeCursor {
|
||||
} else {
|
||||
write_info.state = WriteState::BalanceStart;
|
||||
// If we balance, we must save the cursor position and seek to it later.
|
||||
// FIXME: we shouldn't have both DeleteState::SeekAfterBalancing and
|
||||
// FIXME: we shouldn't have both DeleteState::SeekAfterBalancing and
|
||||
// save_context()/restore/context(), they are practically the same thing.
|
||||
self.save_context(CursorContext::IndexKeyRowId((*record).clone()));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
other => panic!("unexpected cell type, expected TableLeaf or IndexLeaf, found: {:?}", other),
|
||||
other => panic!("unexpected cell type, expected TableLeaf or IndexLeaf, found: {other:?}"),
|
||||
}
|
||||
}
|
||||
// insert cell
|
||||
@@ -2288,7 +2295,7 @@ impl BTreeCursor {
|
||||
return_if_io!(self.balance_non_root());
|
||||
}
|
||||
WriteState::Finish => return Ok(CursorResult::Ok(())),
|
||||
_ => panic!("unexpected state on balance {:?}", state),
|
||||
_ => panic!("unexpected state on balance {state:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2338,7 +2345,8 @@ impl BTreeCursor {
|
||||
"expected index or table interior page"
|
||||
);
|
||||
// Part 1: Find the sibling pages to balance
|
||||
let mut pages_to_balance: [Option<BTreePage>; 3] = [const { None }; 3];
|
||||
let mut pages_to_balance: [Option<BTreePage>; MAX_SIBLING_PAGES_TO_BALANCE] =
|
||||
[const { None }; MAX_SIBLING_PAGES_TO_BALANCE];
|
||||
let number_of_cells_in_parent =
|
||||
parent_contents.cell_count() + parent_contents.overflow_cells.len();
|
||||
|
||||
@@ -2348,9 +2356,7 @@ impl BTreeCursor {
|
||||
);
|
||||
turso_assert!(
|
||||
page_to_balance_idx <= parent_contents.cell_count(),
|
||||
"page_to_balance_idx={} is out of bounds for parent cell count {}",
|
||||
page_to_balance_idx,
|
||||
number_of_cells_in_parent
|
||||
"page_to_balance_idx={page_to_balance_idx} is out of bounds for parent cell count {number_of_cells_in_parent}"
|
||||
);
|
||||
// As there will be at maximum 3 pages used to balance:
|
||||
// sibling_pointer is the index represeneting one of those 3 pages, and we initialize it to the last possible page.
|
||||
@@ -2457,7 +2463,7 @@ impl BTreeCursor {
|
||||
.replace(Some(BalanceInfo {
|
||||
pages_to_balance,
|
||||
rightmost_pointer: right_pointer,
|
||||
divider_cells: [const { None }; 2],
|
||||
divider_cell_payloads: [const { None }; MAX_SIBLING_PAGES_TO_BALANCE - 1],
|
||||
sibling_count,
|
||||
first_divider_cell: first_cell_divider,
|
||||
}));
|
||||
@@ -2492,8 +2498,9 @@ impl BTreeCursor {
|
||||
// The count includes: all cells and overflow cells from the sibling pages, and divider cells from the parent page,
|
||||
// excluding the rightmost divider, which will not be dropped from the parent; instead it will be updated at the end.
|
||||
let mut total_cells_to_redistribute = 0;
|
||||
// We only need maximum 5 pages to balance 3 pages, because we can guarantee that cells from 3 pages will fit in 5 pages.
|
||||
let mut pages_to_balance_new: [Option<BTreePage>; 5] = [const { None }; 5];
|
||||
let mut pages_to_balance_new: [Option<BTreePage>;
|
||||
MAX_NEW_SIBLING_PAGES_AFTER_BALANCE] =
|
||||
[const { None }; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE];
|
||||
for i in (0..balance_info.sibling_count).rev() {
|
||||
let sibling_page = balance_info.pages_to_balance[i].as_ref().unwrap();
|
||||
let sibling_page = sibling_page.get();
|
||||
@@ -2504,7 +2511,8 @@ impl BTreeCursor {
|
||||
|
||||
// Right pointer is not dropped, we simply update it at the end. This could be a divider cell that points
|
||||
// to the last page in the list of pages to balance or this could be the rightmost pointer that points to a page.
|
||||
if i == balance_info.sibling_count - 1 {
|
||||
let is_last_sibling = i == balance_info.sibling_count - 1;
|
||||
if is_last_sibling {
|
||||
continue;
|
||||
}
|
||||
// Since we know we have a left sibling, take the divider that points to left sibling of this page
|
||||
@@ -2524,7 +2532,7 @@ impl BTreeCursor {
|
||||
);
|
||||
|
||||
// TODO(pere): make this reference and not copy
|
||||
balance_info.divider_cells[i].replace(cell_buf.to_vec());
|
||||
balance_info.divider_cell_payloads[i].replace(cell_buf.to_vec());
|
||||
tracing::trace!(
|
||||
"dropping divider cell from parent cell_idx={} count={}",
|
||||
cell_idx,
|
||||
@@ -2536,14 +2544,15 @@ impl BTreeCursor {
|
||||
/* 2. Initialize CellArray with all the cells used for distribution, this includes divider cells if !leaf. */
|
||||
let mut cell_array = CellArray {
|
||||
cell_payloads: Vec::with_capacity(total_cells_to_redistribute),
|
||||
cell_count_per_page_cumulative: [0; 5],
|
||||
cell_count_per_page_cumulative: [0; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE],
|
||||
};
|
||||
let cells_capacity_start = cell_array.cell_payloads.capacity();
|
||||
|
||||
let mut total_cells_inserted = 0;
|
||||
// This is otherwise identical to CellArray.cell_count_per_page_cumulative,
|
||||
// but we exclusively track what the prefix sums were _before_ we started redistributing cells.
|
||||
let mut old_cell_count_per_page_cumulative: [u16; 5] = [0; 5];
|
||||
let mut old_cell_count_per_page_cumulative: [u16;
|
||||
MAX_NEW_SIBLING_PAGES_AFTER_BALANCE] = [0; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE];
|
||||
|
||||
let page_type = balance_info.pages_to_balance[0]
|
||||
.as_ref()
|
||||
@@ -2585,10 +2594,11 @@ impl BTreeCursor {
|
||||
let mut cells_inserted =
|
||||
old_page_contents.cell_count() + old_page_contents.overflow_cells.len();
|
||||
|
||||
if i < balance_info.sibling_count - 1 && !is_table_leaf {
|
||||
let is_last_sibling = i == balance_info.sibling_count - 1;
|
||||
if !is_last_sibling && !is_table_leaf {
|
||||
// If we are a index page or a interior table page we need to take the divider cell too.
|
||||
// But we don't need the last divider as it will remain the same.
|
||||
let mut divider_cell = balance_info.divider_cells[i]
|
||||
let mut divider_cell = balance_info.divider_cell_payloads[i]
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.as_mut_slice();
|
||||
@@ -2598,12 +2608,16 @@ impl BTreeCursor {
|
||||
if !is_leaf {
|
||||
// This divider cell needs to be updated with new left pointer,
|
||||
let right_pointer = old_page_contents.rightmost_pointer().unwrap();
|
||||
divider_cell[..4].copy_from_slice(&right_pointer.to_be_bytes());
|
||||
divider_cell[..LEFT_CHILD_PTR_SIZE_BYTES]
|
||||
.copy_from_slice(&right_pointer.to_be_bytes());
|
||||
} else {
|
||||
// index leaf
|
||||
turso_assert!(divider_cell.len() >= 4, "divider cell is too short");
|
||||
turso_assert!(
|
||||
divider_cell.len() >= LEFT_CHILD_PTR_SIZE_BYTES,
|
||||
"divider cell is too short"
|
||||
);
|
||||
// let's strip the page pointer
|
||||
divider_cell = &mut divider_cell[4..];
|
||||
divider_cell = &mut divider_cell[LEFT_CHILD_PTR_SIZE_BYTES..];
|
||||
}
|
||||
cell_array.cell_payloads.push(to_static_buf(divider_cell));
|
||||
}
|
||||
@@ -2632,11 +2646,16 @@ impl BTreeCursor {
|
||||
validate_cells_after_insertion(&cell_array, is_table_leaf);
|
||||
|
||||
/* 3. Initiliaze current size of every page including overflow cells and divider cells that might be included. */
|
||||
let mut new_page_sizes: [i64; 5] = [0; 5];
|
||||
let leaf_correction = if is_leaf { 4 } else { 0 };
|
||||
let mut new_page_sizes: [i64; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE] =
|
||||
[0; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE];
|
||||
let header_size = if is_leaf {
|
||||
LEAF_PAGE_HEADER_SIZE_BYTES
|
||||
} else {
|
||||
INTERIOR_PAGE_HEADER_SIZE_BYTES
|
||||
};
|
||||
// number of bytes beyond header, different from global usableSapce which includes
|
||||
// header
|
||||
let usable_space = self.usable_space() - 12 + leaf_correction;
|
||||
let usable_space = self.usable_space() - header_size;
|
||||
for i in 0..balance_info.sibling_count {
|
||||
cell_array.cell_count_per_page_cumulative[i] =
|
||||
old_cell_count_per_page_cumulative[i];
|
||||
@@ -2650,7 +2669,8 @@ impl BTreeCursor {
|
||||
// 2 to account of pointer
|
||||
new_page_sizes[i] += 2 + overflow.payload.len() as i64;
|
||||
}
|
||||
if !is_leaf && i < balance_info.sibling_count - 1 {
|
||||
let is_last_sibling = i == balance_info.sibling_count - 1;
|
||||
if !is_leaf && !is_last_sibling {
|
||||
// Account for divider cell which is included in this page.
|
||||
new_page_sizes[i] += cell_array.cell_payloads
|
||||
[cell_array.cell_count_up_to_page(i)]
|
||||
@@ -2695,8 +2715,9 @@ impl BTreeCursor {
|
||||
{
|
||||
// This means we move to the right page the divider cell and we
|
||||
// promote left cell to divider
|
||||
2 + cell_array.cell_payloads[cell_array.cell_count_up_to_page(i)]
|
||||
.len() as i64
|
||||
CELL_PTR_SIZE_BYTES as i64
|
||||
+ cell_array.cell_payloads[cell_array.cell_count_up_to_page(i)]
|
||||
.len() as i64
|
||||
} else {
|
||||
0
|
||||
}
|
||||
@@ -2711,8 +2732,8 @@ impl BTreeCursor {
|
||||
while cell_array.cell_count_per_page_cumulative[i]
|
||||
< cell_array.cell_payloads.len() as u16
|
||||
{
|
||||
let size_of_cell_to_remove_from_right =
|
||||
2 + cell_array.cell_payloads[cell_array.cell_count_up_to_page(i)].len()
|
||||
let size_of_cell_to_remove_from_right = CELL_PTR_SIZE_BYTES as i64
|
||||
+ cell_array.cell_payloads[cell_array.cell_count_up_to_page(i)].len()
|
||||
as i64;
|
||||
let can_take = new_page_sizes[i] + size_of_cell_to_remove_from_right
|
||||
> usable_space as i64;
|
||||
@@ -2726,8 +2747,9 @@ impl BTreeCursor {
|
||||
if cell_array.cell_count_per_page_cumulative[i]
|
||||
< cell_array.cell_payloads.len() as u16
|
||||
{
|
||||
2 + cell_array.cell_payloads[cell_array.cell_count_up_to_page(i)]
|
||||
.len() as i64
|
||||
CELL_PTR_SIZE_BYTES as i64
|
||||
+ cell_array.cell_payloads[cell_array.cell_count_up_to_page(i)]
|
||||
.len() as i64
|
||||
} else {
|
||||
0
|
||||
}
|
||||
@@ -2837,15 +2859,24 @@ impl BTreeCursor {
|
||||
cell_array.cell_size_bytes(cell_right as usize) as i64;
|
||||
// TODO: add assert nMaxCells
|
||||
|
||||
let pointer_size = if i == sibling_count_new - 1 { 0 } else { 2 };
|
||||
let would_not_improve_balance = size_right_page + cell_right_size + 2
|
||||
> size_left_page - (cell_left_size + pointer_size);
|
||||
let is_last_sibling = i == sibling_count_new - 1;
|
||||
let pointer_size = if is_last_sibling {
|
||||
0
|
||||
} else {
|
||||
CELL_PTR_SIZE_BYTES as i64
|
||||
};
|
||||
// As mentioned, this step rebalances the siblings so that cells are moved from left to right, since the previous step just
|
||||
// packed as much as possible to the left. However, if the right-hand-side page would become larger than the left-hand-side page,
|
||||
// we stop.
|
||||
let would_not_improve_balance =
|
||||
size_right_page + cell_right_size + (CELL_PTR_SIZE_BYTES as i64)
|
||||
> size_left_page - (cell_left_size + pointer_size);
|
||||
if size_right_page != 0 && would_not_improve_balance {
|
||||
break;
|
||||
}
|
||||
|
||||
size_left_page -= cell_left_size + 2;
|
||||
size_right_page += cell_right_size + 2;
|
||||
size_left_page -= cell_left_size + (CELL_PTR_SIZE_BYTES as i64);
|
||||
size_right_page += cell_right_size + (CELL_PTR_SIZE_BYTES as i64);
|
||||
cell_array.cell_count_per_page_cumulative[i - 1] = cell_left;
|
||||
|
||||
if cell_left == 0 {
|
||||
@@ -2886,7 +2917,8 @@ impl BTreeCursor {
|
||||
|
||||
// Reassign page numbers in increasing order
|
||||
{
|
||||
let mut page_numbers: [usize; 5] = [0; 5];
|
||||
let mut page_numbers: [usize; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE] =
|
||||
[0; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE];
|
||||
for (i, page) in pages_to_balance_new
|
||||
.iter()
|
||||
.take(sibling_count_new)
|
||||
@@ -2955,7 +2987,8 @@ impl BTreeCursor {
|
||||
// that was originally on that place.
|
||||
let is_leaf_page = matches!(page_type, PageType::TableLeaf | PageType::IndexLeaf);
|
||||
if !is_leaf_page {
|
||||
let last_page = balance_info.pages_to_balance[balance_info.sibling_count - 1]
|
||||
let last_sibling_idx = balance_info.sibling_count - 1;
|
||||
let last_page = balance_info.pages_to_balance[last_sibling_idx]
|
||||
.as_ref()
|
||||
.unwrap();
|
||||
let right_pointer = last_page.get().get_contents().rightmost_pointer().unwrap();
|
||||
@@ -2969,14 +3002,16 @@ impl BTreeCursor {
|
||||
}
|
||||
// TODO: pointer map update (vacuum support)
|
||||
// Update divider cells in parent
|
||||
for (i, page) in pages_to_balance_new
|
||||
for (sibling_page_idx, page) in pages_to_balance_new
|
||||
.iter()
|
||||
.enumerate()
|
||||
.take(sibling_count_new - 1)
|
||||
/* do not take last page */
|
||||
{
|
||||
let page = page.as_ref().unwrap();
|
||||
let divider_cell_idx = cell_array.cell_count_up_to_page(i);
|
||||
// e.g. if we have 3 pages and the leftmost child page has 3 cells,
|
||||
// then the divider cell idx is 3 in the flat cell array.
|
||||
let divider_cell_idx = cell_array.cell_count_up_to_page(sibling_page_idx);
|
||||
let mut divider_cell = &mut cell_array.cell_payloads[divider_cell_idx];
|
||||
// FIXME: dont use auxiliary space, could be done without allocations
|
||||
let mut new_divider_cell = Vec::new();
|
||||
@@ -3018,7 +3053,7 @@ impl BTreeCursor {
|
||||
new_divider_cell.extend_from_slice(divider_cell);
|
||||
}
|
||||
|
||||
let left_pointer = read_u32(&new_divider_cell[..4], 0);
|
||||
let left_pointer = read_u32(&new_divider_cell[..LEFT_CHILD_PTR_SIZE_BYTES], 0);
|
||||
turso_assert!(
|
||||
left_pointer != parent_page.get().id as u32,
|
||||
"left pointer is the same as parent page id"
|
||||
@@ -3028,7 +3063,7 @@ impl BTreeCursor {
|
||||
tracing::debug!(
|
||||
"balance_non_root(insert_divider_cell, first_divider_cell={}, divider_cell={}, left_pointer={})",
|
||||
balance_info.first_divider_cell,
|
||||
i,
|
||||
sibling_page_idx,
|
||||
left_pointer
|
||||
);
|
||||
turso_assert!(
|
||||
@@ -3036,25 +3071,33 @@ impl BTreeCursor {
|
||||
"left pointer is not the same as page id"
|
||||
);
|
||||
// FIXME: remove this lock
|
||||
let database_size = header_accessor::get_database_size(&self.pager)?;
|
||||
turso_assert!(
|
||||
left_pointer <= header_accessor::get_database_size(&self.pager)?,
|
||||
"invalid page number divider left pointer {} > database number of pages",
|
||||
left_pointer <= database_size,
|
||||
"invalid page number divider left pointer {} > database number of pages {}",
|
||||
left_pointer,
|
||||
database_size
|
||||
);
|
||||
// FIXME: defragment shouldn't be needed
|
||||
// defragment_page(parent_contents, self.usable_space() as u16);
|
||||
let divider_cell_insert_idx_in_parent =
|
||||
balance_info.first_divider_cell + sibling_page_idx;
|
||||
let overflow_cell_count_before = parent_contents.overflow_cells.len();
|
||||
insert_into_cell(
|
||||
parent_contents,
|
||||
&new_divider_cell,
|
||||
balance_info.first_divider_cell + i,
|
||||
divider_cell_insert_idx_in_parent,
|
||||
self.usable_space() as u16,
|
||||
)
|
||||
.unwrap();
|
||||
)?;
|
||||
let overflow_cell_count_after = parent_contents.overflow_cells.len();
|
||||
let divider_cell_is_overflow_cell =
|
||||
overflow_cell_count_after > overflow_cell_count_before;
|
||||
#[cfg(debug_assertions)]
|
||||
self.validate_balance_non_root_divider_cell_insertion(
|
||||
balance_info,
|
||||
parent_contents,
|
||||
i,
|
||||
divider_cell_insert_idx_in_parent,
|
||||
divider_cell_is_overflow_cell,
|
||||
&page.get(),
|
||||
);
|
||||
}
|
||||
@@ -3098,8 +3141,10 @@ impl BTreeCursor {
|
||||
** upwards pass simply processes pages that were missed on the downward
|
||||
** pass.
|
||||
*/
|
||||
let mut done = [false; 5];
|
||||
for i in (1 - sibling_count_new as i64)..sibling_count_new as i64 {
|
||||
let mut done = [false; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE];
|
||||
let rightmost_page_negative_idx = 1 - sibling_count_new as i64;
|
||||
let rightmost_page_positive_idx = sibling_count_new as i64 - 1;
|
||||
for i in rightmost_page_negative_idx..=rightmost_page_positive_idx {
|
||||
// As mentioned above, we do two passes over the pages:
|
||||
// 1. Downward pass: Process pages in decreasing order
|
||||
// 2. Upward pass: Process pages in increasing order
|
||||
@@ -3232,7 +3277,7 @@ impl BTreeCursor {
|
||||
is_table_leaf,
|
||||
cells_debug,
|
||||
sibling_count_new,
|
||||
rightmost_pointer,
|
||||
right_page_id,
|
||||
);
|
||||
|
||||
// We have to free pages that are not used anymore
|
||||
@@ -3254,41 +3299,54 @@ impl BTreeCursor {
|
||||
result
|
||||
}
|
||||
|
||||
/// Validates that a divider cell was correctly inserted into the parent page
|
||||
/// during B-tree balancing and that it points to the correct child page.
|
||||
#[cfg(debug_assertions)]
|
||||
fn validate_balance_non_root_divider_cell_insertion(
|
||||
&self,
|
||||
balance_info: &mut BalanceInfo,
|
||||
parent_contents: &mut PageContent,
|
||||
i: usize,
|
||||
page: &std::sync::Arc<crate::Page>,
|
||||
divider_cell_insert_idx_in_parent: usize,
|
||||
divider_cell_is_overflow_cell: bool,
|
||||
child_page: &std::sync::Arc<crate::Page>,
|
||||
) {
|
||||
let left_pointer = if parent_contents.overflow_cells.is_empty() {
|
||||
let left_pointer = if divider_cell_is_overflow_cell {
|
||||
parent_contents.overflow_cells
|
||||
.iter()
|
||||
.find(|cell| cell.index == divider_cell_insert_idx_in_parent)
|
||||
.map(|cell| read_u32(&cell.payload, 0))
|
||||
.unwrap_or_else(|| {
|
||||
panic!(
|
||||
"overflow cell with divider cell was not found (divider_cell_idx={}, balance_info.first_divider_cell={}, overflow_cells.len={})",
|
||||
divider_cell_insert_idx_in_parent,
|
||||
balance_info.first_divider_cell,
|
||||
parent_contents.overflow_cells.len(),
|
||||
)
|
||||
})
|
||||
} else if divider_cell_insert_idx_in_parent < parent_contents.cell_count() {
|
||||
let (cell_start, cell_len) = parent_contents
|
||||
.cell_get_raw_region(balance_info.first_divider_cell + i, self.usable_space());
|
||||
tracing::debug!(
|
||||
"balance_non_root(cell_start={}, cell_len={})",
|
||||
cell_start,
|
||||
cell_len
|
||||
);
|
||||
|
||||
let left_pointer = read_u32(
|
||||
.cell_get_raw_region(divider_cell_insert_idx_in_parent, self.usable_space());
|
||||
read_u32(
|
||||
&parent_contents.as_ptr()[cell_start..cell_start + cell_len],
|
||||
0,
|
||||
);
|
||||
left_pointer
|
||||
)
|
||||
} else {
|
||||
let mut left_pointer = None;
|
||||
for cell in parent_contents.overflow_cells.iter() {
|
||||
if cell.index == balance_info.first_divider_cell + i {
|
||||
left_pointer = Some(read_u32(&cell.payload, 0))
|
||||
}
|
||||
}
|
||||
left_pointer.expect("overflow cell with divider cell was not found")
|
||||
panic!(
|
||||
"divider cell is not in the parent page (divider_cell_idx={}, balance_info.first_divider_cell={}, overflow_cells.len={})",
|
||||
divider_cell_insert_idx_in_parent,
|
||||
balance_info.first_divider_cell,
|
||||
parent_contents.overflow_cells.len(),
|
||||
)
|
||||
};
|
||||
assert_eq!(left_pointer, page.get().id as u32, "the cell we just inserted doesn't point to the correct page. points to {}, should point to {}",
|
||||
left_pointer,
|
||||
page.get().id as u32
|
||||
);
|
||||
|
||||
// Verify the left pointer points to the correct page
|
||||
assert_eq!(
|
||||
left_pointer,
|
||||
child_page.get().id as u32,
|
||||
"the cell we just inserted doesn't point to the correct page. points to {}, should point to {}",
|
||||
left_pointer,
|
||||
child_page.get().id as u32
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
@@ -3298,12 +3356,12 @@ impl BTreeCursor {
|
||||
parent_page: &BTreePage,
|
||||
balance_info: &mut BalanceInfo,
|
||||
parent_contents: &mut PageContent,
|
||||
pages_to_balance_new: [Option<BTreePage>; 5],
|
||||
pages_to_balance_new: [Option<BTreePage>; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE],
|
||||
page_type: PageType,
|
||||
leaf_data: bool,
|
||||
mut cells_debug: Vec<Vec<u8>>,
|
||||
sibling_count_new: usize,
|
||||
rightmost_pointer: &mut [u8],
|
||||
right_page_id: u32,
|
||||
) {
|
||||
let mut valid = true;
|
||||
let mut current_index_cell = 0;
|
||||
@@ -3416,7 +3474,6 @@ impl BTreeCursor {
|
||||
if sibling_count_new == 0 {
|
||||
// Balance-shallower case
|
||||
// We need to check data in parent page
|
||||
let rightmost = read_u32(rightmost_pointer, 0);
|
||||
debug_validate_cells!(parent_contents, self.usable_space() as u16);
|
||||
|
||||
if pages_to_balance_new[0].is_none() {
|
||||
@@ -3455,32 +3512,32 @@ impl BTreeCursor {
|
||||
valid = false;
|
||||
}
|
||||
|
||||
if rightmost == page.get().id as u32
|
||||
|| rightmost == parent_page.get().get().id as u32
|
||||
if right_page_id == page.get().id as u32
|
||||
|| right_page_id == parent_page.get().get().id as u32
|
||||
{
|
||||
tracing::error!("balance_non_root(balance_shallower_rightmost_pointer, page_id={}, parent_page_id={}, rightmost={})",
|
||||
page.get().id,
|
||||
parent_page.get().get().id,
|
||||
rightmost,
|
||||
right_page_id,
|
||||
);
|
||||
valid = false;
|
||||
}
|
||||
|
||||
if let Some(rm) = contents.rightmost_pointer() {
|
||||
if rm != rightmost {
|
||||
if rm != right_page_id {
|
||||
tracing::error!("balance_non_root(balance_shallower_rightmost_pointer, page_rightmost={}, rightmost={})",
|
||||
rm,
|
||||
rightmost,
|
||||
right_page_id,
|
||||
);
|
||||
valid = false;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(rm) = parent_contents.rightmost_pointer() {
|
||||
if rm != rightmost {
|
||||
if rm != right_page_id {
|
||||
tracing::error!("balance_non_root(balance_shallower_rightmost_pointer, parent_rightmost={}, rightmost={})",
|
||||
rm,
|
||||
rightmost,
|
||||
right_page_id,
|
||||
);
|
||||
valid = false;
|
||||
}
|
||||
@@ -3521,15 +3578,14 @@ impl BTreeCursor {
|
||||
// We will only validate rightmost pointer of parent page, we will not validate rightmost if it's a cell and not the last pointer because,
|
||||
// insert cell could've defragmented the page and invalidated the pointer.
|
||||
// right pointer, we just check right pointer points to this page.
|
||||
if cell_divider_idx == parent_contents.cell_count() {
|
||||
let rightmost = read_u32(rightmost_pointer, 0);
|
||||
if rightmost != page.get().id as u32 {
|
||||
tracing::error!("balance_non_root(cell_divider_right_pointer, should point to {}, but points to {})",
|
||||
if cell_divider_idx == parent_contents.cell_count()
|
||||
&& right_page_id != page.get().id as u32
|
||||
{
|
||||
tracing::error!("balance_non_root(cell_divider_right_pointer, should point to {}, but points to {})",
|
||||
page.get().id,
|
||||
rightmost
|
||||
right_page_id
|
||||
);
|
||||
valid = false;
|
||||
}
|
||||
valid = false;
|
||||
}
|
||||
} else {
|
||||
// divider cell might be an overflow cell
|
||||
@@ -3574,7 +3630,8 @@ impl BTreeCursor {
|
||||
if leaf_data {
|
||||
// If we are in a table leaf page, we just need to check that this cell that should be a divider cell is in the parent
|
||||
// This means we already check cell in leaf pages but not on parent so we don't advance current_index_cell
|
||||
if page_idx >= balance_info.sibling_count - 1 {
|
||||
let last_sibling_idx = balance_info.sibling_count - 1;
|
||||
if page_idx >= last_sibling_idx {
|
||||
// This means we are in the last page and we don't need to check anything
|
||||
continue;
|
||||
}
|
||||
@@ -3784,10 +3841,7 @@ impl BTreeCursor {
|
||||
while low <= high && cell_count > 0 {
|
||||
let mid = low + (high - low) / 2;
|
||||
self.find_cell_state.set((low, high));
|
||||
let cell = match page.cell_get(mid, self.usable_space()) {
|
||||
Ok(c) => c,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
let cell = page.cell_get(mid, self.usable_space())?;
|
||||
|
||||
let comparison_result = match cell {
|
||||
BTreeCell::TableLeafCell(cell) => key.to_rowid().cmp(&cell.rowid),
|
||||
@@ -4014,7 +4068,7 @@ impl BTreeCursor {
|
||||
.reusable_immutable_record
|
||||
.borrow()
|
||||
.as_ref()
|
||||
.map_or(true, |record| record.is_invalidated());
|
||||
.is_none_or(|record| record.is_invalidated());
|
||||
if !invalidated {
|
||||
*self.parse_record_state.borrow_mut() = ParseRecordState::Init;
|
||||
let record_ref =
|
||||
@@ -5262,7 +5316,7 @@ fn validate_cells_after_insertion(cell_array: &CellArray, leaf_data: bool) {
|
||||
assert!(cell.len() >= 4);
|
||||
|
||||
if leaf_data {
|
||||
assert!(cell[0] != 0, "payload is {:?}", cell);
|
||||
assert!(cell[0] != 0, "payload is {cell:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5487,7 +5541,7 @@ struct CellArray {
|
||||
/// Prefix sum of cells in each page.
|
||||
/// For example, if three pages have 1, 2, and 3 cells, respectively,
|
||||
/// then cell_count_per_page_cumulative will be [1, 3, 6].
|
||||
cell_count_per_page_cumulative: [u16; 5],
|
||||
cell_count_per_page_cumulative: [u16; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE],
|
||||
}
|
||||
|
||||
impl CellArray {
|
||||
@@ -5981,10 +6035,7 @@ fn debug_validate_cells_core(page: &PageContent, usable_space: u16) {
|
||||
// Rowid 1 (stored as SerialTypeKind::ConstInt1)
|
||||
assert!(
|
||||
size >= 2,
|
||||
"cell size should be at least 2 bytes idx={}, cell={:?}, offset={}",
|
||||
i,
|
||||
buf,
|
||||
offset
|
||||
"cell size should be at least 2 bytes idx={i}, cell={buf:?}, offset={offset}"
|
||||
);
|
||||
if page.is_leaf() {
|
||||
assert!(page.as_ptr()[offset] != 0);
|
||||
@@ -6014,8 +6065,7 @@ fn insert_into_cell(
|
||||
page.cell_count()
|
||||
);
|
||||
let free = compute_free_space(page, usable_space);
|
||||
const CELL_POINTER_SIZE_BYTES: usize = 2;
|
||||
let enough_space = payload.len() + CELL_POINTER_SIZE_BYTES <= free as usize;
|
||||
let enough_space = payload.len() + CELL_PTR_SIZE_BYTES <= free as usize;
|
||||
if !enough_space {
|
||||
// add to overflow cell
|
||||
page.overflow_cells.push(OverflowCell {
|
||||
@@ -6040,15 +6090,15 @@ fn insert_into_cell(
|
||||
.copy_from_slice(payload);
|
||||
// memmove(pIns+2, pIns, 2*(pPage->nCell - i));
|
||||
let (cell_pointer_array_start, _) = page.cell_pointer_array_offset_and_size();
|
||||
let cell_pointer_cur_idx = cell_pointer_array_start + (CELL_POINTER_SIZE_BYTES * cell_idx);
|
||||
let cell_pointer_cur_idx = cell_pointer_array_start + (CELL_PTR_SIZE_BYTES * cell_idx);
|
||||
|
||||
// move existing pointers forward by CELL_POINTER_SIZE_BYTES...
|
||||
// move existing pointers forward by CELL_PTR_SIZE_BYTES...
|
||||
let n_cells_forward = page.cell_count() - cell_idx;
|
||||
let n_bytes_forward = CELL_POINTER_SIZE_BYTES * n_cells_forward;
|
||||
let n_bytes_forward = CELL_PTR_SIZE_BYTES * n_cells_forward;
|
||||
if n_bytes_forward > 0 {
|
||||
buf.copy_within(
|
||||
cell_pointer_cur_idx..cell_pointer_cur_idx + n_bytes_forward,
|
||||
cell_pointer_cur_idx + CELL_POINTER_SIZE_BYTES,
|
||||
cell_pointer_cur_idx + CELL_PTR_SIZE_BYTES,
|
||||
);
|
||||
}
|
||||
// ...and insert new cell pointer at the current index
|
||||
@@ -6518,7 +6568,7 @@ mod tests {
|
||||
valid &= child_valid;
|
||||
child_depth
|
||||
}
|
||||
_ => panic!("unsupported btree cell: {:?}", cell),
|
||||
_ => panic!("unsupported btree cell: {cell:?}"),
|
||||
};
|
||||
if current_depth >= 100 {
|
||||
tracing::error!("depth is too big");
|
||||
@@ -6543,7 +6593,7 @@ mod tests {
|
||||
}
|
||||
previous_key = Some(rowid);
|
||||
}
|
||||
_ => panic!("unsupported btree cell: {:?}", cell),
|
||||
_ => panic!("unsupported btree cell: {cell:?}"),
|
||||
}
|
||||
}
|
||||
if let Some(right) = contents.rightmost_pointer() {
|
||||
@@ -6621,7 +6671,7 @@ mod tests {
|
||||
cell.first_overflow_page.is_some()
|
||||
));
|
||||
}
|
||||
_ => panic!("unsupported btree cell: {:?}", cell),
|
||||
_ => panic!("unsupported btree cell: {cell:?}"),
|
||||
}
|
||||
}
|
||||
if let Some(rightmost) = contents.rightmost_pointer() {
|
||||
@@ -6738,8 +6788,7 @@ mod tests {
|
||||
cursor.seek(seek_key, SeekOp::GE { eq_only: true }).unwrap(),
|
||||
CursorResult::Ok(true)
|
||||
),
|
||||
"key {} is not found",
|
||||
key
|
||||
"key {key} is not found"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -6767,7 +6816,7 @@ mod tests {
|
||||
) {
|
||||
const VALIDATE_INTERVAL: usize = 1000;
|
||||
let do_validate_btree = std::env::var("VALIDATE_BTREE")
|
||||
.map_or(false, |v| v.parse().expect("validate should be bool"));
|
||||
.is_ok_and(|v| v.parse().expect("validate should be bool"));
|
||||
let (mut rng, seed) = rng_from_time_or_env();
|
||||
let mut seen = HashSet::new();
|
||||
tracing::info!("super seed: {}", seed);
|
||||
@@ -6775,7 +6824,7 @@ mod tests {
|
||||
let (pager, root_page, _db, conn) = empty_btree();
|
||||
let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page);
|
||||
let mut keys = SortedVec::new();
|
||||
tracing::info!("seed: {}", seed);
|
||||
tracing::info!("seed: {seed}");
|
||||
for insert_id in 0..inserts {
|
||||
let do_validate = do_validate_btree || (insert_id % VALIDATE_INTERVAL == 0);
|
||||
run_until_done(|| pager.begin_read_tx(), &pager).unwrap();
|
||||
@@ -6844,7 +6893,7 @@ mod tests {
|
||||
.unwrap();
|
||||
if *key != cursor_rowid {
|
||||
valid = false;
|
||||
println!("key {} is not found, got {}", key, cursor_rowid);
|
||||
println!("key {key} is not found, got {cursor_rowid}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -6854,8 +6903,8 @@ mod tests {
|
||||
&& (!valid || matches!(validate_btree(pager.clone(), root_page), (_, false)))
|
||||
{
|
||||
let btree_after = format_btree(pager.clone(), root_page, 0);
|
||||
println!("btree before:\n{}", btree_before);
|
||||
println!("btree after:\n{}", btree_after);
|
||||
println!("btree before:\n{btree_before}");
|
||||
println!("btree after:\n{btree_after}");
|
||||
panic!("invalid btree");
|
||||
}
|
||||
pager.end_read_tx().unwrap();
|
||||
@@ -6877,8 +6926,7 @@ mod tests {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
*key, cursor_rowid,
|
||||
"key {} is not found, got {}",
|
||||
key, cursor_rowid
|
||||
"key {key} is not found, got {cursor_rowid}"
|
||||
);
|
||||
}
|
||||
pager.end_read_tx().unwrap();
|
||||
@@ -6910,7 +6958,7 @@ mod tests {
|
||||
};
|
||||
let mut cursor = BTreeCursor::new_table(None, pager.clone(), index_root_page);
|
||||
let mut keys = SortedVec::new();
|
||||
tracing::info!("seed: {}", seed);
|
||||
tracing::info!("seed: {seed}");
|
||||
for i in 0..inserts {
|
||||
pager.begin_read_tx().unwrap();
|
||||
pager.begin_write_tx().unwrap();
|
||||
@@ -6977,7 +7025,7 @@ mod tests {
|
||||
pager.deref(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(exists, "key {:?} is not found", key);
|
||||
assert!(exists, "key {key:?} is not found");
|
||||
}
|
||||
// Check that key count is right
|
||||
cursor.move_to_root().unwrap();
|
||||
@@ -7003,13 +7051,11 @@ mod tests {
|
||||
let cur = record.get_values().clone();
|
||||
if let Some(prev) = prev {
|
||||
if prev >= cur {
|
||||
println!("Seed: {}", seed);
|
||||
println!("Seed: {seed}");
|
||||
}
|
||||
assert!(
|
||||
prev < cur,
|
||||
"keys are not in ascending order: {:?} < {:?}",
|
||||
prev,
|
||||
cur
|
||||
"keys are not in ascending order: {prev:?} < {cur:?}",
|
||||
);
|
||||
}
|
||||
prev = Some(cur);
|
||||
@@ -7272,8 +7318,7 @@ mod tests {
|
||||
let leaf_page_id = contents.read_u32(8 + (i as usize * 4));
|
||||
assert!(
|
||||
(2..=4).contains(&leaf_page_id),
|
||||
"Leaf page ID {} should be in range 2-4",
|
||||
leaf_page_id
|
||||
"Leaf page ID {leaf_page_id} should be in range 2-4"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -8017,7 +8062,7 @@ mod tests {
|
||||
let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page);
|
||||
let key = Value::Integer(*key);
|
||||
let exists = run_until_done(|| cursor.exists(&key), pager.deref()).unwrap();
|
||||
assert!(exists, "key not found {}", key);
|
||||
assert!(exists, "key not found {key}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8112,7 +8157,7 @@ mod tests {
|
||||
let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page);
|
||||
let key = Value::Integer(i);
|
||||
let exists = run_until_done(|| cursor.exists(&key), pager.deref()).unwrap();
|
||||
assert!(exists, "Key {} should exist but doesn't", i);
|
||||
assert!(exists, "Key {i} should exist but doesn't");
|
||||
}
|
||||
|
||||
// Verify the deleted records don't exist.
|
||||
@@ -8120,7 +8165,7 @@ mod tests {
|
||||
let mut cursor = BTreeCursor::new_table(None, pager.clone(), root_page);
|
||||
let key = Value::Integer(i);
|
||||
let exists = run_until_done(|| cursor.exists(&key), pager.deref()).unwrap();
|
||||
assert!(!exists, "Deleted key {} still exists", i);
|
||||
assert!(!exists, "Deleted key {i} still exists");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8366,7 +8411,7 @@ mod tests {
|
||||
for _ in 0..ITERATIONS {
|
||||
let mut cell_array = CellArray {
|
||||
cell_payloads: Vec::new(),
|
||||
cell_count_per_page_cumulative: [0; 5],
|
||||
cell_count_per_page_cumulative: [0; MAX_NEW_SIBLING_PAGES_AFTER_BALANCE],
|
||||
};
|
||||
let mut cells_cloned = Vec::new();
|
||||
let (pager, _, _, _) = empty_btree();
|
||||
|
||||
@@ -102,8 +102,7 @@ impl DumbLruPageCache {
|
||||
if let Some(existing_page_ref) = self.get(&key) {
|
||||
assert!(
|
||||
Arc::ptr_eq(&value, &existing_page_ref),
|
||||
"Attempted to insert different page with same key: {:?}",
|
||||
key
|
||||
"Attempted to insert different page with same key: {key:?}"
|
||||
);
|
||||
return Err(CacheError::KeyExists);
|
||||
}
|
||||
@@ -418,15 +417,13 @@ impl DumbLruPageCache {
|
||||
|
||||
if forward_count > map_len + 5 {
|
||||
panic!(
|
||||
"Infinite loop suspected in forward integrity check. Size {}, count {}",
|
||||
map_len, forward_count
|
||||
"Infinite loop suspected in forward integrity check. Size {map_len}, count {forward_count}"
|
||||
);
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
forward_count, map_len,
|
||||
"Forward count mismatch (counted {}, map has {})",
|
||||
forward_count, map_len
|
||||
"Forward count mismatch (counted {forward_count}, map has {map_len})"
|
||||
);
|
||||
assert_eq!(
|
||||
tail_ptr, last_ptr,
|
||||
@@ -457,15 +454,13 @@ impl DumbLruPageCache {
|
||||
}
|
||||
if backward_count > map_len + 5 {
|
||||
panic!(
|
||||
"Infinite loop suspected in backward integrity check. Size {}, count {}",
|
||||
map_len, backward_count
|
||||
"Infinite loop suspected in backward integrity check. Size {map_len}, count {backward_count}"
|
||||
);
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
backward_count, map_len,
|
||||
"Backward count mismatch (counted {}, map has {})",
|
||||
backward_count, map_len
|
||||
"Backward count mismatch (counted {backward_count}, map has {map_len})"
|
||||
);
|
||||
assert_eq!(
|
||||
head_ptr, last_ptr,
|
||||
@@ -1018,7 +1013,7 @@ mod tests {
|
||||
Err(CacheError::Full | CacheError::ActiveRefs) => {} // Ignore
|
||||
Err(err) => {
|
||||
// Any other error should fail the test
|
||||
panic!("Cache insertion failed: {:?}", err);
|
||||
panic!("Cache insertion failed: {err:?}");
|
||||
}
|
||||
Ok(_) => {
|
||||
lru.push(key, page);
|
||||
@@ -1051,7 +1046,7 @@ mod tests {
|
||||
}
|
||||
cache.verify_list_integrity();
|
||||
for (key, page) in &lru {
|
||||
println!("getting page {:?}", key);
|
||||
println!("getting page {key:?}");
|
||||
cache.peek(key, false).unwrap();
|
||||
assert_eq!(page.get().id, key.pgno);
|
||||
}
|
||||
@@ -1214,11 +1209,10 @@ mod tests {
|
||||
let final_memory = memory_stats::memory_stats().unwrap().physical_mem;
|
||||
|
||||
let growth = final_memory.saturating_sub(initial_memory);
|
||||
println!("Growth: {}", growth);
|
||||
println!("Growth: {growth}");
|
||||
assert!(
|
||||
growth < 10_000_000,
|
||||
"Memory grew by {} bytes over 10 cycles",
|
||||
growth
|
||||
"Memory grew by {growth} bytes over 10 cycles"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,8 +344,7 @@ impl Pager {
|
||||
Some(content) => content,
|
||||
None => {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"Ptrmap page {} content not loaded",
|
||||
ptrmap_pg_no
|
||||
"Ptrmap page {ptrmap_pg_no} content not loaded"
|
||||
)))
|
||||
}
|
||||
};
|
||||
@@ -367,8 +366,7 @@ impl Pager {
|
||||
// Check if the calculated offset for the entry is within the bounds of the actual page data length.
|
||||
if offset_in_ptrmap_page + PTRMAP_ENTRY_SIZE > actual_data_length {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"Ptrmap offset {} + entry size {} out of bounds for page {} (actual data len {})",
|
||||
offset_in_ptrmap_page, PTRMAP_ENTRY_SIZE, ptrmap_pg_no, actual_data_length
|
||||
"Ptrmap offset {offset_in_ptrmap_page} + entry size {PTRMAP_ENTRY_SIZE} out of bounds for page {ptrmap_pg_no} (actual data len {actual_data_length})"
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -377,8 +375,7 @@ impl Pager {
|
||||
match PtrmapEntry::deserialize(entry_slice) {
|
||||
Some(entry) => Ok(CursorResult::Ok(Some(entry))),
|
||||
None => Err(LimboError::Corrupt(format!(
|
||||
"Failed to deserialize ptrmap entry for page {} from ptrmap page {}",
|
||||
target_page_num, ptrmap_pg_no
|
||||
"Failed to deserialize ptrmap entry for page {target_page_num} from ptrmap page {ptrmap_pg_no}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
@@ -406,8 +403,7 @@ impl Pager {
|
||||
|| is_ptrmap_page(db_page_no_to_update, page_size)
|
||||
{
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"Cannot set ptrmap entry for page {}: it's a header/ptrmap page or invalid.",
|
||||
db_page_no_to_update
|
||||
"Cannot set ptrmap entry for page {db_page_no_to_update}: it's a header/ptrmap page or invalid."
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -436,8 +432,7 @@ impl Pager {
|
||||
Some(content) => content,
|
||||
None => {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"Ptrmap page {} content not loaded",
|
||||
ptrmap_pg_no
|
||||
"Ptrmap page {ptrmap_pg_no} content not loaded"
|
||||
)))
|
||||
}
|
||||
};
|
||||
@@ -525,7 +520,7 @@ impl Pager {
|
||||
|
||||
// For now map allocated_page_id since we are not swapping it with root_page_num
|
||||
match self.ptrmap_put(allocated_page_id, PtrmapType::RootPage, 0)? {
|
||||
CursorResult::Ok(_) => Ok(CursorResult::Ok(allocated_page_id as u32)),
|
||||
CursorResult::Ok(_) => Ok(CursorResult::Ok(allocated_page_id)),
|
||||
CursorResult::IO => Ok(CursorResult::IO),
|
||||
}
|
||||
}
|
||||
@@ -707,8 +702,7 @@ impl Pager {
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"Failed to insert page into cache: {:?}",
|
||||
e
|
||||
"Failed to insert page into cache: {e:?}"
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -729,8 +723,7 @@ impl Pager {
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"Failed to insert page into cache: {:?}",
|
||||
e
|
||||
"Failed to insert page into cache: {e:?}"
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -960,13 +953,14 @@ impl Pager {
|
||||
checkpoint_result = res;
|
||||
break;
|
||||
}
|
||||
Err(err) => panic!("error while clearing cache {}", err),
|
||||
Err(err) => panic!("error while clearing cache {err}"),
|
||||
}
|
||||
}
|
||||
// TODO: only clear cache of things that are really invalidated
|
||||
self.page_cache.write().clear().map_err(|e| {
|
||||
LimboError::InternalError(format!("Failed to clear page cache: {:?}", e))
|
||||
})?;
|
||||
self.page_cache
|
||||
.write()
|
||||
.clear()
|
||||
.map_err(|e| LimboError::InternalError(format!("Failed to clear page cache: {e:?}")))?;
|
||||
Ok(checkpoint_result)
|
||||
}
|
||||
|
||||
@@ -984,8 +978,7 @@ impl Pager {
|
||||
|
||||
if page_id < 2 || page_id > header_accessor::get_database_size(self)? as usize {
|
||||
return Err(LimboError::Corrupt(format!(
|
||||
"Invalid page number {} for free operation",
|
||||
page_id
|
||||
"Invalid page number {page_id} for free operation"
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -1094,10 +1087,7 @@ impl Pager {
|
||||
let page_key = PageCacheKey::new(page1_ref.get().id);
|
||||
let mut cache = self.page_cache.write();
|
||||
cache.insert(page_key, page1_ref.clone()).map_err(|e| {
|
||||
LimboError::InternalError(format!(
|
||||
"Failed to insert page 1 into cache: {:?}",
|
||||
e
|
||||
))
|
||||
LimboError::InternalError(format!("Failed to insert page 1 into cache: {e:?}"))
|
||||
})?;
|
||||
self.is_empty.store(DB_STATE_INITIALIZED, Ordering::SeqCst);
|
||||
self.allocate_page1_state.replace(AllocatePage1State::Done);
|
||||
@@ -1191,8 +1181,7 @@ impl Pager {
|
||||
.insert_ignore_existing(page_key, page.clone())
|
||||
.map_err(|e| {
|
||||
LimboError::InternalError(format!(
|
||||
"Failed to insert loaded page {} into cache: {:?}",
|
||||
id, e
|
||||
"Failed to insert loaded page {id} into cache: {e:?}"
|
||||
))
|
||||
})?;
|
||||
page.set_loaded();
|
||||
@@ -1427,14 +1416,12 @@ mod ptrmap {
|
||||
|| db_page_no_to_query > last_data_page_mapped
|
||||
{
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"Page {} is not mapped by the data page range [{}, {}] of ptrmap page {}",
|
||||
db_page_no_to_query, first_data_page_mapped, last_data_page_mapped, ptrmap_page_no
|
||||
"Page {db_page_no_to_query} is not mapped by the data page range [{first_data_page_mapped}, {last_data_page_mapped}] of ptrmap page {ptrmap_page_no}"
|
||||
)));
|
||||
}
|
||||
if is_ptrmap_page(db_page_no_to_query, page_size) {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"Page {} is a pointer map page and should not have an entry calculated this way.",
|
||||
db_page_no_to_query
|
||||
"Page {db_page_no_to_query} is a pointer map page and should not have an entry calculated this way."
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -1551,12 +1538,12 @@ mod ptrmap_tests {
|
||||
panic!("test_pager_setup: btree_create returned CursorResult::IO unexpectedly");
|
||||
}
|
||||
Err(e) => {
|
||||
panic!("test_pager_setup: btree_create failed: {:?}", e);
|
||||
panic!("test_pager_setup: btree_create failed: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pager;
|
||||
pager
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1636,7 +1623,7 @@ mod ptrmap_tests {
|
||||
assert_eq!(get_ptrmap_offset_in_page(3, 2, page_size).unwrap(), 0);
|
||||
assert_eq!(
|
||||
get_ptrmap_offset_in_page(4, 2, page_size).unwrap(),
|
||||
1 * PTRMAP_ENTRY_SIZE
|
||||
PTRMAP_ENTRY_SIZE
|
||||
);
|
||||
assert_eq!(
|
||||
get_ptrmap_offset_in_page(5, 2, page_size).unwrap(),
|
||||
@@ -1650,7 +1637,7 @@ mod ptrmap_tests {
|
||||
assert_eq!(get_ptrmap_offset_in_page(106, 105, page_size).unwrap(), 0);
|
||||
assert_eq!(
|
||||
get_ptrmap_offset_in_page(107, 105, page_size).unwrap(),
|
||||
1 * PTRMAP_ENTRY_SIZE
|
||||
PTRMAP_ENTRY_SIZE
|
||||
);
|
||||
assert_eq!(
|
||||
get_ptrmap_offset_in_page(108, 105, page_size).unwrap(),
|
||||
|
||||
@@ -95,6 +95,11 @@ pub const DATABASE_HEADER_PAGE_ID: usize = 1;
|
||||
/// The minimum size of a cell in bytes.
|
||||
pub const MINIMUM_CELL_SIZE: usize = 4;
|
||||
|
||||
pub const CELL_PTR_SIZE_BYTES: usize = 2;
|
||||
pub const INTERIOR_PAGE_HEADER_SIZE_BYTES: usize = 12;
|
||||
pub const LEAF_PAGE_HEADER_SIZE_BYTES: usize = 8;
|
||||
pub const LEFT_CHILD_PTR_SIZE_BYTES: usize = 4;
|
||||
|
||||
/// The database header.
|
||||
/// The first 100 bytes of the database file comprise the database file header.
|
||||
/// The database file header is divided into fields as shown by the table below.
|
||||
@@ -351,7 +356,7 @@ impl TryFrom<u8> for PageType {
|
||||
5 => Ok(Self::TableInterior),
|
||||
10 => Ok(Self::IndexLeaf),
|
||||
13 => Ok(Self::TableLeaf),
|
||||
_ => Err(LimboError::Corrupt(format!("Invalid page type: {}", value))),
|
||||
_ => Err(LimboError::Corrupt(format!("Invalid page type: {value}"))),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -382,7 +387,6 @@ impl Clone for PageContent {
|
||||
}
|
||||
}
|
||||
|
||||
const CELL_POINTER_SIZE_BYTES: usize = 2;
|
||||
impl PageContent {
|
||||
pub fn new(offset: usize, buffer: Arc<RefCell<Buffer>>) -> Self {
|
||||
Self {
|
||||
@@ -397,10 +401,7 @@ impl PageContent {
|
||||
}
|
||||
|
||||
pub fn maybe_page_type(&self) -> Option<PageType> {
|
||||
match self.read_u8(0).try_into() {
|
||||
Ok(v) => Some(v),
|
||||
Err(_) => None, // this could be an overflow page
|
||||
}
|
||||
self.read_u8(0).try_into().ok() // this could be an overflow page
|
||||
}
|
||||
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
@@ -475,8 +476,7 @@ impl PageContent {
|
||||
/// The size of the cell pointer array in bytes.
|
||||
/// 2 bytes per cell pointer
|
||||
pub fn cell_pointer_array_size(&self) -> usize {
|
||||
const CELL_POINTER_SIZE_BYTES: usize = 2;
|
||||
self.cell_count() * CELL_POINTER_SIZE_BYTES
|
||||
self.cell_count() * CELL_PTR_SIZE_BYTES
|
||||
}
|
||||
|
||||
/// The start of the unallocated region.
|
||||
@@ -504,10 +504,8 @@ impl PageContent {
|
||||
/// 8 bytes for leaf pages, 12 bytes for interior pages (due to storing rightmost child pointer)
|
||||
pub fn header_size(&self) -> usize {
|
||||
match self.page_type() {
|
||||
PageType::IndexInterior => 12,
|
||||
PageType::TableInterior => 12,
|
||||
PageType::IndexLeaf => 8,
|
||||
PageType::TableLeaf => 8,
|
||||
PageType::IndexInterior | PageType::TableInterior => INTERIOR_PAGE_HEADER_SIZE_BYTES,
|
||||
PageType::IndexLeaf | PageType::TableLeaf => LEAF_PAGE_HEADER_SIZE_BYTES,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -544,12 +542,10 @@ impl PageContent {
|
||||
let ncells = self.cell_count();
|
||||
assert!(
|
||||
idx < ncells,
|
||||
"cell_get: idx out of bounds: idx={}, ncells={}",
|
||||
idx,
|
||||
ncells
|
||||
"cell_get: idx out of bounds: idx={idx}, ncells={ncells}"
|
||||
);
|
||||
let cell_pointer_array_start = self.header_size();
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_POINTER_SIZE_BYTES);
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_PTR_SIZE_BYTES);
|
||||
let cell_pointer = self.read_u16(cell_pointer) as usize;
|
||||
|
||||
// SAFETY: this buffer is valid as long as the page is alive. We could store the page in the cell and do some lifetime magic
|
||||
@@ -564,7 +560,7 @@ impl PageContent {
|
||||
debug_assert!(self.page_type() == PageType::TableInterior);
|
||||
let buf = self.as_ptr();
|
||||
let cell_pointer_array_start = self.header_size();
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_POINTER_SIZE_BYTES);
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_PTR_SIZE_BYTES);
|
||||
let cell_pointer = self.read_u16(cell_pointer) as usize;
|
||||
const LEFT_CHILD_PAGE_SIZE_BYTES: usize = 4;
|
||||
let (rowid, _) = read_varint(&buf[cell_pointer + LEFT_CHILD_PAGE_SIZE_BYTES..])?;
|
||||
@@ -580,7 +576,7 @@ impl PageContent {
|
||||
);
|
||||
let buf = self.as_ptr();
|
||||
let cell_pointer_array_start = self.header_size();
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_POINTER_SIZE_BYTES);
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_PTR_SIZE_BYTES);
|
||||
let cell_pointer = self.read_u16(cell_pointer) as usize;
|
||||
u32::from_be_bytes([
|
||||
buf[cell_pointer],
|
||||
@@ -596,7 +592,7 @@ impl PageContent {
|
||||
debug_assert!(self.page_type() == PageType::TableLeaf);
|
||||
let buf = self.as_ptr();
|
||||
let cell_pointer_array_start = self.header_size();
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_POINTER_SIZE_BYTES);
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_PTR_SIZE_BYTES);
|
||||
let cell_pointer = self.read_u16(cell_pointer) as usize;
|
||||
let mut pos = cell_pointer;
|
||||
let (_, nr) = read_varint(&buf[pos..])?;
|
||||
@@ -622,7 +618,7 @@ impl PageContent {
|
||||
let ncells = self.cell_count();
|
||||
let (cell_pointer_array_start, _) = self.cell_pointer_array_offset_and_size();
|
||||
assert!(idx < ncells, "cell_get: idx out of bounds");
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_POINTER_SIZE_BYTES);
|
||||
let cell_pointer = cell_pointer_array_start + (idx * CELL_PTR_SIZE_BYTES);
|
||||
let cell_pointer = self.read_u16_no_offset(cell_pointer) as usize;
|
||||
let start = cell_pointer;
|
||||
let payload_overflow_threshold_max =
|
||||
@@ -707,7 +703,7 @@ impl PageContent {
|
||||
let mut pc = self.first_freeblock() as usize;
|
||||
let mut block_num = 0;
|
||||
println!("---- Free List Blocks ----");
|
||||
println!("first freeblock pointer: {}", pc);
|
||||
println!("first freeblock pointer: {pc}");
|
||||
println!("cell content area: {}", self.cell_content_area());
|
||||
println!("fragmented bytes: {}", self.num_frag_free_bytes());
|
||||
|
||||
@@ -715,10 +711,7 @@ impl PageContent {
|
||||
let next = self.read_u16_no_offset(pc);
|
||||
let size = self.read_u16_no_offset(pc + 2);
|
||||
|
||||
println!(
|
||||
"block {}: position={}, size={}, next={}",
|
||||
block_num, pc, size, next
|
||||
);
|
||||
println!("block {block_num}: position={pc}, size={size}, next={next}");
|
||||
pc = next as usize;
|
||||
block_num += 1;
|
||||
}
|
||||
@@ -1378,7 +1371,7 @@ pub fn read_entire_wal_dumb(file: &Arc<dyn File>) -> Result<Arc<UnsafeCell<WalFi
|
||||
if !(MIN_PAGE_SIZE..=MAX_PAGE_SIZE).contains(&page_size_u32)
|
||||
|| page_size_u32.count_ones() != 1
|
||||
{
|
||||
panic!("Invalid page size in WAL header: {}", page_size_u32);
|
||||
panic!("Invalid page size in WAL header: {page_size_u32}");
|
||||
}
|
||||
let page_size = page_size_u32 as usize;
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ pub enum CollationSeq {
|
||||
impl CollationSeq {
|
||||
pub fn new(collation: &str) -> crate::Result<Self> {
|
||||
CollationSeq::from_str(collation).map_err(|_| {
|
||||
crate::LimboError::ParseError(format!("no such collation sequence: {}", collation))
|
||||
crate::LimboError::ParseError(format!("no such collation sequence: {collation}"))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -38,14 +38,14 @@ impl Display for Plan {
|
||||
} => {
|
||||
for (plan, operator) in left {
|
||||
plan.fmt(f)?;
|
||||
writeln!(f, "{}", operator)?;
|
||||
writeln!(f, "{operator}")?;
|
||||
}
|
||||
right_most.fmt(f)?;
|
||||
if let Some(limit) = limit {
|
||||
writeln!(f, "LIMIT: {}", limit)?;
|
||||
writeln!(f, "LIMIT: {limit}")?;
|
||||
}
|
||||
if let Some(offset) = offset {
|
||||
writeln!(f, "OFFSET: {}", offset)?;
|
||||
writeln!(f, "OFFSET: {offset}")?;
|
||||
}
|
||||
if let Some(order_by) = order_by {
|
||||
writeln!(f, "ORDER BY:")?;
|
||||
@@ -95,7 +95,7 @@ impl Display for SelectPlan {
|
||||
format!("{} AS {}", reference.table.get_name(), reference.identifier)
|
||||
};
|
||||
|
||||
writeln!(f, "{}SCAN {}", indent, table_name)?;
|
||||
writeln!(f, "{indent}SCAN {table_name}")?;
|
||||
}
|
||||
Operation::Search(search) => match search {
|
||||
Search::RowidEq { .. } | Search::Seek { index: None, .. } => {
|
||||
@@ -137,7 +137,7 @@ impl Display for DeletePlan {
|
||||
format!("{} AS {}", reference.table.get_name(), reference.identifier)
|
||||
};
|
||||
|
||||
writeln!(f, "{}DELETE FROM {}", indent, table_name)?;
|
||||
writeln!(f, "{indent}DELETE FROM {table_name}")?;
|
||||
}
|
||||
Operation::Search { .. } => {
|
||||
panic!("DELETE plans should not contain search operations");
|
||||
@@ -173,9 +173,9 @@ impl fmt::Display for UpdatePlan {
|
||||
};
|
||||
|
||||
if i == 0 {
|
||||
writeln!(f, "{}UPDATE {}", indent, table_name)?;
|
||||
writeln!(f, "{indent}UPDATE {table_name}")?;
|
||||
} else {
|
||||
writeln!(f, "{}SCAN {}", indent, table_name)?;
|
||||
writeln!(f, "{indent}SCAN {table_name}")?;
|
||||
}
|
||||
}
|
||||
Operation::Search(search) => match search {
|
||||
@@ -214,7 +214,7 @@ impl fmt::Display for UpdatePlan {
|
||||
}
|
||||
}
|
||||
if let Some(limit) = self.limit {
|
||||
writeln!(f, "LIMIT: {}", limit)?;
|
||||
writeln!(f, "LIMIT: {limit}")?;
|
||||
}
|
||||
if let Some(ret) = &self.returning {
|
||||
writeln!(f, "RETURNING:")?;
|
||||
@@ -301,10 +301,10 @@ impl ToSqlString for Plan {
|
||||
));
|
||||
}
|
||||
if let Some(limit) = &limit {
|
||||
ret.push(format!("LIMIT {}", limit));
|
||||
ret.push(format!("LIMIT {limit}"));
|
||||
}
|
||||
if let Some(offset) = &offset {
|
||||
ret.push(format!("OFFSET {}", offset));
|
||||
ret.push(format!("OFFSET {offset}"));
|
||||
}
|
||||
ret.join(" ")
|
||||
}
|
||||
@@ -364,7 +364,7 @@ impl ToSqlString for SelectPlan {
|
||||
.map(|e| e.to_sql_string(context))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
format!("({})", joined_value)
|
||||
format!("({joined_value})")
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
@@ -384,7 +384,7 @@ impl ToSqlString for SelectPlan {
|
||||
cols.expr.to_sql_string(context),
|
||||
cols.alias
|
||||
.as_ref()
|
||||
.map_or("".to_string(), |alias| format!(" AS {}", alias))
|
||||
.map_or("".to_string(), |alias| format!(" AS {alias}"))
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
@@ -450,10 +450,10 @@ impl ToSqlString for SelectPlan {
|
||||
));
|
||||
}
|
||||
if let Some(limit) = &self.limit {
|
||||
ret.push(format!("LIMIT {}", limit));
|
||||
ret.push(format!("LIMIT {limit}"));
|
||||
}
|
||||
if let Some(offset) = &self.offset {
|
||||
ret.push(format!("OFFSET {}", offset));
|
||||
ret.push(format!("OFFSET {offset}"));
|
||||
}
|
||||
ret.join(" ")
|
||||
}
|
||||
@@ -493,10 +493,10 @@ impl ToSqlString for DeletePlan {
|
||||
));
|
||||
}
|
||||
if let Some(limit) = &self.limit {
|
||||
ret.push(format!("LIMIT {}", limit));
|
||||
ret.push(format!("LIMIT {limit}"));
|
||||
}
|
||||
if let Some(offset) = &self.offset {
|
||||
ret.push(format!("OFFSET {}", offset));
|
||||
ret.push(format!("OFFSET {offset}"));
|
||||
}
|
||||
ret.join(" ")
|
||||
}
|
||||
@@ -560,10 +560,10 @@ impl ToSqlString for UpdatePlan {
|
||||
));
|
||||
}
|
||||
if let Some(limit) = &self.limit {
|
||||
ret.push(format!("LIMIT {}", limit));
|
||||
ret.push(format!("LIMIT {limit}"));
|
||||
}
|
||||
if let Some(offset) = &self.offset {
|
||||
ret.push(format!("OFFSET {}", offset));
|
||||
ret.push(format!("OFFSET {offset}"));
|
||||
}
|
||||
ret.join(" ")
|
||||
}
|
||||
|
||||
@@ -226,7 +226,7 @@ pub fn translate_create_index(
|
||||
p5: 0,
|
||||
});
|
||||
// Parse the schema table to get the index root page and add new index to Schema
|
||||
let parse_schema_where_clause = format!("name = '{}' AND type = 'index'", idx_name);
|
||||
let parse_schema_where_clause = format!("name = '{idx_name}' AND type = 'index'");
|
||||
program.emit_insn(Insn::ParseSchema {
|
||||
db: sqlite_schema_cursor_id,
|
||||
where_clause: Some(parse_schema_where_clause),
|
||||
|
||||
@@ -682,7 +682,7 @@ fn resolve_columns_for_insert<'a>(
|
||||
let table_index = table_columns.iter().position(|c| {
|
||||
c.name
|
||||
.as_ref()
|
||||
.map_or(false, |name| name.eq_ignore_ascii_case(&column_name))
|
||||
.is_some_and(|name| name.eq_ignore_ascii_case(&column_name))
|
||||
});
|
||||
|
||||
let Some(table_index) = table_index else {
|
||||
@@ -743,7 +743,7 @@ fn resolve_indicies_for_insert(
|
||||
.column
|
||||
.name
|
||||
.as_ref()
|
||||
.map_or(false, |name| name.eq_ignore_ascii_case(&target_name))
|
||||
.is_some_and(|name| name.eq_ignore_ascii_case(&target_name))
|
||||
}) {
|
||||
idx_map.columns.push((i, idx_col.clone()));
|
||||
idx_map.value_indicies.push(col_mapping.value_index);
|
||||
|
||||
@@ -98,7 +98,7 @@ pub fn find_best_access_method_for_join_order<'a>(
|
||||
match &candidate.index {
|
||||
Some(index) => index.columns[i].pos_in_table == order_target.0[i].column_no,
|
||||
None => {
|
||||
rowid_column_idx.map_or(false, |idx| idx == order_target.0[i].column_no)
|
||||
rowid_column_idx.is_some_and(|idx| idx == order_target.0[i].column_no)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -293,7 +293,7 @@ pub fn constraints_from_where_clause(
|
||||
|
||||
// For each constraint we found, add a reference to it for each index that may be able to use it.
|
||||
for (i, constraint) in cs.constraints.iter().enumerate() {
|
||||
if rowid_alias_column.map_or(false, |idx| constraint.table_col_pos == idx) {
|
||||
if rowid_alias_column == Some(constraint.table_col_pos) {
|
||||
let rowid_candidate = cs
|
||||
.candidates
|
||||
.iter_mut()
|
||||
@@ -325,7 +325,7 @@ pub fn constraints_from_where_clause(
|
||||
if candidate
|
||||
.index
|
||||
.as_ref()
|
||||
.map_or(false, |i| Arc::ptr_eq(index, i))
|
||||
.is_some_and(|i| Arc::ptr_eq(index, i))
|
||||
{
|
||||
Some(candidate)
|
||||
} else {
|
||||
@@ -409,6 +409,6 @@ fn opposite_cmp_op(op: ast::Operator) -> ast::Operator {
|
||||
ast::Operator::GreaterEquals => ast::Operator::LessEquals,
|
||||
ast::Operator::Less => ast::Operator::Greater,
|
||||
ast::Operator::LessEquals => ast::Operator::GreaterEquals,
|
||||
_ => panic!("unexpected operator: {:?}", op),
|
||||
_ => panic!("unexpected operator: {op:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -217,7 +217,7 @@ pub fn compute_best_join_order<'a>(
|
||||
let left_join_illegal_map = {
|
||||
let left_join_count = joined_tables
|
||||
.iter()
|
||||
.filter(|t| t.join_info.as_ref().map_or(false, |j| j.outer))
|
||||
.filter(|t| t.join_info.as_ref().is_some_and(|j| j.outer))
|
||||
.count();
|
||||
if left_join_count == 0 {
|
||||
None
|
||||
@@ -227,7 +227,7 @@ pub fn compute_best_join_order<'a>(
|
||||
HashMap::with_capacity(left_join_count);
|
||||
for (i, _) in joined_tables.iter().enumerate() {
|
||||
for (j, joined_table) in joined_tables.iter().enumerate().skip(i + 1) {
|
||||
if joined_table.join_info.as_ref().map_or(false, |j| j.outer) {
|
||||
if joined_table.join_info.as_ref().is_some_and(|j| j.outer) {
|
||||
// bitwise OR the masks
|
||||
if let Some(illegal_lhs) = left_join_illegal_map.get_mut(&i) {
|
||||
illegal_lhs.add_table(j);
|
||||
@@ -296,7 +296,7 @@ pub fn compute_best_join_order<'a>(
|
||||
is_outer: joined_tables[table_no]
|
||||
.join_info
|
||||
.as_ref()
|
||||
.map_or(false, |j| j.outer),
|
||||
.is_some_and(|j| j.outer),
|
||||
});
|
||||
}
|
||||
join_order.push(JoinOrderMember {
|
||||
@@ -305,7 +305,7 @@ pub fn compute_best_join_order<'a>(
|
||||
is_outer: joined_tables[rhs_idx]
|
||||
.join_info
|
||||
.as_ref()
|
||||
.map_or(false, |j| j.outer),
|
||||
.is_some_and(|j| j.outer),
|
||||
});
|
||||
assert!(join_order.len() == subset_size);
|
||||
|
||||
@@ -406,7 +406,7 @@ pub fn compute_naive_left_deep_plan<'a>(
|
||||
.map(|(i, t)| JoinOrderMember {
|
||||
table_id: t.internal_id,
|
||||
original_idx: i,
|
||||
is_outer: t.join_info.as_ref().map_or(false, |j| j.outer),
|
||||
is_outer: t.join_info.as_ref().is_some_and(|j| j.outer),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -828,7 +828,7 @@ mod tests {
|
||||
.iter()
|
||||
.for_each(|table_name| {
|
||||
// add primary key index called sqlite_autoindex_<tablename>_1
|
||||
let index_name = format!("sqlite_autoindex_{}_1", table_name);
|
||||
let index_name = format!("sqlite_autoindex_{table_name}_1");
|
||||
let index = Arc::new(Index {
|
||||
name: index_name,
|
||||
table_name: table_name.to_string(),
|
||||
@@ -1063,10 +1063,7 @@ mod tests {
|
||||
// Create fact table with foreign keys to all dimension tables
|
||||
let mut fact_columns = vec![_create_column_rowid_alias("id")];
|
||||
for i in 0..NUM_DIM_TABLES {
|
||||
fact_columns.push(_create_column_of_type(
|
||||
&format!("dim{}_id", i),
|
||||
Type::Integer,
|
||||
));
|
||||
fact_columns.push(_create_column_of_type(&format!("dim{i}_id"), Type::Integer));
|
||||
}
|
||||
let fact_table = _create_btree_table("fact", fact_columns);
|
||||
|
||||
@@ -1074,7 +1071,7 @@ mod tests {
|
||||
let dim_tables: Vec<_> = (0..NUM_DIM_TABLES)
|
||||
.map(|i| {
|
||||
_create_btree_table(
|
||||
&format!("dim{}", i),
|
||||
&format!("dim{i}"),
|
||||
vec![
|
||||
_create_column_rowid_alias("id"),
|
||||
_create_column_of_type("value", Type::Integer),
|
||||
|
||||
@@ -231,7 +231,7 @@ fn optimize_table_access(
|
||||
is_outer: joined_tables[table_number]
|
||||
.join_info
|
||||
.as_ref()
|
||||
.map_or(false, |join_info| join_info.outer),
|
||||
.is_some_and(|join_info| join_info.outer),
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -334,8 +334,7 @@ fn optimize_table_access(
|
||||
}
|
||||
assert!(
|
||||
constraint_refs.len() == 1,
|
||||
"expected exactly one constraint for rowid seek, got {:?}",
|
||||
constraint_refs
|
||||
"expected exactly one constraint for rowid seek, got {constraint_refs:?}"
|
||||
);
|
||||
let constraint = &constraints_per_table[table_idx].constraints
|
||||
[constraint_refs[0].constraint_vec_pos];
|
||||
@@ -467,14 +466,10 @@ pub trait Optimizable {
|
||||
// return a [ConstantPredicate].
|
||||
fn check_always_true_or_false(&self) -> Result<Option<AlwaysTrueOrFalse>>;
|
||||
fn is_always_true(&self) -> Result<bool> {
|
||||
Ok(self
|
||||
.check_always_true_or_false()?
|
||||
.map_or(false, |c| c == AlwaysTrueOrFalse::AlwaysTrue))
|
||||
Ok(self.check_always_true_or_false()? == Some(AlwaysTrueOrFalse::AlwaysTrue))
|
||||
}
|
||||
fn is_always_false(&self) -> Result<bool> {
|
||||
Ok(self
|
||||
.check_always_true_or_false()?
|
||||
.map_or(false, |c| c == AlwaysTrueOrFalse::AlwaysFalse))
|
||||
Ok(self.check_always_true_or_false()? == Some(AlwaysTrueOrFalse::AlwaysFalse))
|
||||
}
|
||||
fn is_constant(&self, resolver: &Resolver<'_>) -> bool;
|
||||
fn is_nonnull(&self, tables: &TableReferences) -> bool;
|
||||
@@ -499,13 +494,13 @@ impl Optimizable for ast::Expr {
|
||||
else_expr,
|
||||
..
|
||||
} => {
|
||||
base.as_ref().map_or(true, |base| base.is_nonnull(tables))
|
||||
base.as_ref().is_none_or(|base| base.is_nonnull(tables))
|
||||
&& when_then_pairs
|
||||
.iter()
|
||||
.all(|(_, then)| then.is_nonnull(tables))
|
||||
&& else_expr
|
||||
.as_ref()
|
||||
.map_or(true, |else_expr| else_expr.is_nonnull(tables))
|
||||
.is_none_or(|else_expr| else_expr.is_nonnull(tables))
|
||||
}
|
||||
Expr::Cast { expr, .. } => expr.is_nonnull(tables),
|
||||
Expr::Collate(expr, _) => expr.is_nonnull(tables),
|
||||
@@ -536,7 +531,7 @@ impl Optimizable for ast::Expr {
|
||||
lhs.is_nonnull(tables)
|
||||
&& rhs
|
||||
.as_ref()
|
||||
.map_or(true, |rhs| rhs.iter().all(|rhs| rhs.is_nonnull(tables)))
|
||||
.is_none_or(|rhs| rhs.iter().all(|rhs| rhs.is_nonnull(tables)))
|
||||
}
|
||||
Expr::InSelect { .. } => false,
|
||||
Expr::InTable { .. } => false,
|
||||
@@ -582,14 +577,13 @@ impl Optimizable for ast::Expr {
|
||||
when_then_pairs,
|
||||
else_expr,
|
||||
} => {
|
||||
base.as_ref()
|
||||
.map_or(true, |base| base.is_constant(resolver))
|
||||
base.as_ref().is_none_or(|base| base.is_constant(resolver))
|
||||
&& when_then_pairs.iter().all(|(when, then)| {
|
||||
when.is_constant(resolver) && then.is_constant(resolver)
|
||||
})
|
||||
&& else_expr
|
||||
.as_ref()
|
||||
.map_or(true, |else_expr| else_expr.is_constant(resolver))
|
||||
.is_none_or(|else_expr| else_expr.is_constant(resolver))
|
||||
}
|
||||
Expr::Cast { expr, .. } => expr.is_constant(resolver),
|
||||
Expr::Collate(expr, _) => expr.is_constant(resolver),
|
||||
@@ -604,9 +598,9 @@ impl Optimizable for ast::Expr {
|
||||
return false;
|
||||
};
|
||||
func.is_deterministic()
|
||||
&& args.as_ref().map_or(true, |args| {
|
||||
args.iter().all(|arg| arg.is_constant(resolver))
|
||||
})
|
||||
&& args
|
||||
.as_ref()
|
||||
.is_none_or(|args| args.iter().all(|arg| arg.is_constant(resolver)))
|
||||
}
|
||||
Expr::FunctionCallStar { .. } => false,
|
||||
Expr::Id(_) => panic!("Id should have been rewritten as Column"),
|
||||
@@ -616,7 +610,7 @@ impl Optimizable for ast::Expr {
|
||||
lhs.is_constant(resolver)
|
||||
&& rhs
|
||||
.as_ref()
|
||||
.map_or(true, |rhs| rhs.iter().all(|rhs| rhs.is_constant(resolver)))
|
||||
.is_none_or(|rhs| rhs.iter().all(|rhs| rhs.is_constant(resolver)))
|
||||
}
|
||||
Expr::InSelect { .. } => {
|
||||
false // might be constant, too annoying to check subqueries etc. implement later
|
||||
@@ -630,7 +624,7 @@ impl Optimizable for ast::Expr {
|
||||
&& rhs.is_constant(resolver)
|
||||
&& escape
|
||||
.as_ref()
|
||||
.map_or(true, |escape| escape.is_constant(resolver))
|
||||
.is_none_or(|escape| escape.is_constant(resolver))
|
||||
}
|
||||
Expr::Literal(_) => true,
|
||||
Expr::Name(_) => false,
|
||||
@@ -639,9 +633,7 @@ impl Optimizable for ast::Expr {
|
||||
Expr::Qualified(_, _) => {
|
||||
panic!("Qualified should have been rewritten as Column")
|
||||
}
|
||||
Expr::Raise(_, expr) => expr
|
||||
.as_ref()
|
||||
.map_or(true, |expr| expr.is_constant(resolver)),
|
||||
Expr::Raise(_, expr) => expr.as_ref().is_none_or(|expr| expr.is_constant(resolver)),
|
||||
Expr::Subquery(_) => false,
|
||||
Expr::Unary(_, expr) => expr.is_constant(resolver),
|
||||
Expr::Variable(_) => false,
|
||||
@@ -816,7 +808,7 @@ fn ephemeral_index_build(
|
||||
has_rowid: table_reference
|
||||
.table
|
||||
.btree()
|
||||
.map_or(false, |btree| btree.has_rowid),
|
||||
.is_some_and(|btree| btree.has_rowid),
|
||||
};
|
||||
|
||||
ephemeral_index
|
||||
@@ -1322,7 +1314,7 @@ pub fn rewrite_expr(top_level_expr: &mut ast::Expr, param_idx: &mut usize) -> Re
|
||||
if var.is_empty() {
|
||||
// rewrite anonymous variables only, ensure that the `param_idx` starts at 1 and
|
||||
// all the expressions are rewritten in the order they come in the statement
|
||||
*expr = ast::Expr::Variable(format!("{}{param_idx}", PARAM_PREFIX));
|
||||
*expr = ast::Expr::Variable(format!("{PARAM_PREFIX}{param_idx}"));
|
||||
*param_idx += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -562,7 +562,7 @@ pub fn select_star(tables: &[JoinedTable], out_columns: &mut Vec<ResultSetColumn
|
||||
!using_cols.iter().any(|using_col| {
|
||||
col.name
|
||||
.as_ref()
|
||||
.map_or(false, |name| name.eq_ignore_ascii_case(&using_col.0))
|
||||
.is_some_and(|name| name.eq_ignore_ascii_case(&using_col.0))
|
||||
})
|
||||
} else {
|
||||
true
|
||||
@@ -811,10 +811,7 @@ impl TableReferences {
|
||||
{
|
||||
outer_query_ref.mark_column_used(column_index);
|
||||
} else {
|
||||
panic!(
|
||||
"table with internal id {} not found in table references",
|
||||
internal_id
|
||||
);
|
||||
panic!("table with internal id {internal_id} not found in table references");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -964,7 +961,7 @@ impl JoinedTable {
|
||||
match &self.table {
|
||||
Table::BTree(btree) => {
|
||||
let use_covering_index = self.utilizes_covering_index();
|
||||
let index_is_ephemeral = index.map_or(false, |index| index.ephemeral);
|
||||
let index_is_ephemeral = index.is_some_and(|index| index.ephemeral);
|
||||
let table_not_required =
|
||||
OperationMode::SELECT == mode && use_covering_index && !index_is_ephemeral;
|
||||
let table_cursor_id = if table_not_required {
|
||||
|
||||
@@ -135,7 +135,7 @@ pub fn bind_column_references(
|
||||
let col_idx = joined_table.table.columns().iter().position(|c| {
|
||||
c.name
|
||||
.as_ref()
|
||||
.map_or(false, |name| name.eq_ignore_ascii_case(&normalized_id))
|
||||
.is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id))
|
||||
});
|
||||
if col_idx.is_some() {
|
||||
if match_result.is_some() {
|
||||
@@ -163,7 +163,7 @@ pub fn bind_column_references(
|
||||
let col_idx = outer_ref.table.columns().iter().position(|c| {
|
||||
c.name
|
||||
.as_ref()
|
||||
.map_or(false, |name| name.eq_ignore_ascii_case(&normalized_id))
|
||||
.is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id))
|
||||
});
|
||||
if col_idx.is_some() {
|
||||
if match_result.is_some() {
|
||||
@@ -191,7 +191,7 @@ pub fn bind_column_references(
|
||||
for result_column in result_columns.iter() {
|
||||
if result_column
|
||||
.name(referenced_tables)
|
||||
.map_or(false, |name| name.eq_ignore_ascii_case(&normalized_id))
|
||||
.is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id))
|
||||
{
|
||||
*expr = result_column.expr.clone();
|
||||
return Ok(());
|
||||
@@ -218,7 +218,7 @@ pub fn bind_column_references(
|
||||
let col_idx = tbl.columns().iter().position(|c| {
|
||||
c.name
|
||||
.as_ref()
|
||||
.map_or(false, |name| name.eq_ignore_ascii_case(&normalized_id))
|
||||
.is_some_and(|name| name.eq_ignore_ascii_case(&normalized_id))
|
||||
});
|
||||
let Some(col_idx) = col_idx else {
|
||||
crate::bail_parse_error!("Column {} not found", normalized_id);
|
||||
@@ -340,7 +340,7 @@ fn parse_from_clause_table(
|
||||
ast::As::As(id) => id.0.clone(),
|
||||
ast::As::Elided(id) => id.0.clone(),
|
||||
})
|
||||
.unwrap_or(format!("subquery_{}", cur_table_index));
|
||||
.unwrap_or(format!("subquery_{cur_table_index}"));
|
||||
table_references.add_joined_table(JoinedTable::new_subquery(
|
||||
identifier,
|
||||
subplan,
|
||||
@@ -808,7 +808,7 @@ fn parse_join(
|
||||
.find(|(_, col)| {
|
||||
col.name
|
||||
.as_ref()
|
||||
.map_or(false, |name| *name == name_normalized)
|
||||
.is_some_and(|name| *name == name_normalized)
|
||||
})
|
||||
.map(|(idx, col)| (left_table_idx, left_table.internal_id, idx, col));
|
||||
if left_col.is_some() {
|
||||
@@ -824,7 +824,7 @@ fn parse_join(
|
||||
let right_col = right_table.columns().iter().enumerate().find(|(_, col)| {
|
||||
col.name
|
||||
.as_ref()
|
||||
.map_or(false, |name| *name == name_normalized)
|
||||
.is_some_and(|name| *name == name_normalized)
|
||||
});
|
||||
if right_col.is_none() {
|
||||
crate::bail_parse_error!(
|
||||
|
||||
@@ -263,7 +263,7 @@ fn query_pragma(
|
||||
Some(ast::Expr::Name(name)) => {
|
||||
let mode_name = normalize_ident(&name.0);
|
||||
CheckpointMode::from_str(&mode_name).map_err(|e| {
|
||||
LimboError::ParseError(format!("Unknown Checkpoint Mode: {}", e))
|
||||
LimboError::ParseError(format!("Unknown Checkpoint Mode: {e}"))
|
||||
})?
|
||||
}
|
||||
_ => CheckpointMode::Passive,
|
||||
@@ -458,9 +458,7 @@ fn update_cache_size(
|
||||
|
||||
pager
|
||||
.change_page_cache_size(final_cache_size)
|
||||
.map_err(|e| {
|
||||
LimboError::InternalError(format!("Failed to update page cache size: {}", e))
|
||||
})?;
|
||||
.map_err(|e| LimboError::InternalError(format!("Failed to update page cache size: {e}")))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -152,7 +152,7 @@ pub fn translate_create_table(
|
||||
p5: 0,
|
||||
});
|
||||
// TODO: remove format, it sucks for performance but is convenient
|
||||
let parse_schema_where_clause = format!("tbl_name = '{}' AND type != 'trigger'", tbl_name);
|
||||
let parse_schema_where_clause = format!("tbl_name = '{tbl_name}' AND type != 'trigger'");
|
||||
program.emit_insn(Insn::ParseSchema {
|
||||
db: sqlite_schema_cursor_id,
|
||||
where_clause: Some(parse_schema_where_clause),
|
||||
@@ -506,7 +506,7 @@ fn create_vtable_body_to_str(vtab: &CreateVirtualTable, module: Rc<VTabImpl>) ->
|
||||
if args.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!("({})", args)
|
||||
format!("({args})")
|
||||
},
|
||||
vtab.tbl_name.name.0,
|
||||
vtab_args
|
||||
@@ -602,7 +602,7 @@ pub fn translate_create_virtual_table(
|
||||
value: schema.schema_version as i32 + 1,
|
||||
p5: 0,
|
||||
});
|
||||
let parse_schema_where_clause = format!("tbl_name = '{}' AND type != 'trigger'", table_name);
|
||||
let parse_schema_where_clause = format!("tbl_name = '{table_name}' AND type != 'trigger'");
|
||||
program.emit_insn(Insn::ParseSchema {
|
||||
db: sqlite_schema_cursor_id,
|
||||
where_clause: Some(parse_schema_where_clause),
|
||||
|
||||
@@ -74,7 +74,7 @@ pub fn translate_select(
|
||||
.sum::<usize>(),
|
||||
}
|
||||
}
|
||||
other => panic!("plan is not a SelectPlan: {:?}", other),
|
||||
other => panic!("plan is not a SelectPlan: {other:?}"),
|
||||
};
|
||||
|
||||
program.extend(&opts);
|
||||
@@ -148,7 +148,7 @@ pub fn prepare_select_plan(
|
||||
let (limit, offset) = select.limit.map_or(Ok((None, None)), |l| parse_limit(&l))?;
|
||||
|
||||
// FIXME: handle OFFSET for compound selects
|
||||
if offset.map_or(false, |o| o > 0) {
|
||||
if offset.is_some_and(|o| o > 0) {
|
||||
crate::bail_parse_error!("OFFSET is not supported for compound SELECTs yet");
|
||||
}
|
||||
// FIXME: handle ORDER BY for compound selects
|
||||
@@ -257,7 +257,7 @@ fn prepare_one_select_plan(
|
||||
.map(|(i, t)| JoinOrderMember {
|
||||
table_id: t.internal_id,
|
||||
original_idx: i,
|
||||
is_outer: t.join_info.as_ref().map_or(false, |j| j.outer),
|
||||
is_outer: t.join_info.as_ref().is_some_and(|j| j.outer),
|
||||
})
|
||||
.collect(),
|
||||
table_references,
|
||||
|
||||
@@ -39,7 +39,7 @@ impl Display for ValueType {
|
||||
Self::Text => "TEXT",
|
||||
Self::Error => "ERROR",
|
||||
};
|
||||
write!(f, "{}", value)
|
||||
write!(f, "{value}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ fn float_to_string<S>(float: &f64, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_str(&format!("{}", float))
|
||||
serializer.serialize_str(&format!("{float}"))
|
||||
}
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
@@ -317,7 +317,7 @@ impl Display for Value {
|
||||
match self {
|
||||
Self::Null => write!(f, ""),
|
||||
Self::Integer(i) => {
|
||||
write!(f, "{}", i)
|
||||
write!(f, "{i}")
|
||||
}
|
||||
Self::Float(fl) => {
|
||||
let fl = *fl;
|
||||
@@ -337,7 +337,7 @@ impl Display for Value {
|
||||
|
||||
// handle scientific notation without trailing zeros
|
||||
if (fl.abs() < 1e-4 || fl.abs() >= 1e15) && fl != 0.0 {
|
||||
let sci_notation = format!("{:.14e}", fl);
|
||||
let sci_notation = format!("{fl:.14e}");
|
||||
let parts: Vec<&str> = sci_notation.split('e').collect();
|
||||
|
||||
if parts.len() == 2 {
|
||||
@@ -358,7 +358,7 @@ impl Display for Value {
|
||||
let trimmed_mantissa = if fraction.is_empty() {
|
||||
whole.to_string()
|
||||
} else {
|
||||
format!("{}.{}", whole, fraction)
|
||||
format!("{whole}.{fraction}")
|
||||
};
|
||||
let (prefix, exponent) =
|
||||
if let Some(stripped_exponent) = exponent.strip_prefix('-') {
|
||||
@@ -366,12 +366,12 @@ impl Display for Value {
|
||||
} else {
|
||||
("+", exponent)
|
||||
};
|
||||
return write!(f, "{}e{}{}", trimmed_mantissa, prefix, exponent);
|
||||
return write!(f, "{trimmed_mantissa}e{prefix}{exponent}");
|
||||
}
|
||||
}
|
||||
|
||||
// fallback
|
||||
return write!(f, "{}", sci_notation);
|
||||
return write!(f, "{sci_notation}");
|
||||
}
|
||||
|
||||
// handle floating point max size is 15.
|
||||
@@ -381,15 +381,15 @@ impl Display for Value {
|
||||
if (fl - rounded).abs() < 1e-14 {
|
||||
// if we very close to integer trim decimal part to 1 digit
|
||||
if rounded == rounded as i64 as f64 {
|
||||
return write!(f, "{:.1}", fl);
|
||||
return write!(f, "{fl:.1}");
|
||||
}
|
||||
}
|
||||
|
||||
let fl_str = format!("{}", fl);
|
||||
let fl_str = format!("{fl}");
|
||||
let splitted = fl_str.split('.').collect::<Vec<&str>>();
|
||||
// fallback
|
||||
if splitted.len() != 2 {
|
||||
return write!(f, "{:.14e}", fl);
|
||||
return write!(f, "{fl:.14e}");
|
||||
}
|
||||
|
||||
let first_part = if fl < 0.0 {
|
||||
@@ -411,7 +411,7 @@ impl Display for Value {
|
||||
};
|
||||
// float that have integer part > 15 converted to sci notation
|
||||
if reminder < 0 {
|
||||
return write!(f, "{:.14e}", fl);
|
||||
return write!(f, "{fl:.14e}");
|
||||
}
|
||||
// trim decimal part to reminder or self len so total digits is 15;
|
||||
let mut fl = format!("{:.*}", second.len().min(reminder as usize), fl);
|
||||
@@ -419,7 +419,7 @@ impl Display for Value {
|
||||
while fl.ends_with('0') {
|
||||
fl.pop();
|
||||
}
|
||||
write!(f, "{}", fl)
|
||||
write!(f, "{fl}")
|
||||
}
|
||||
Self::Text(s) => {
|
||||
write!(f, "{}", s.as_str())
|
||||
@@ -969,7 +969,7 @@ impl ImmutableRecord {
|
||||
SerialTypeKind::I32 => writer.extend_from_slice(&(*i as i32).to_be_bytes()),
|
||||
SerialTypeKind::I48 => writer.extend_from_slice(&i.to_be_bytes()[2..]), // remove 2 most significant bytes
|
||||
SerialTypeKind::I64 => writer.extend_from_slice(&i.to_be_bytes()),
|
||||
other => panic!("Serial type is not an integer: {:?}", other),
|
||||
other => panic!("Serial type is not an integer: {other:?}"),
|
||||
}
|
||||
}
|
||||
Value::Float(f) => {
|
||||
@@ -1134,8 +1134,8 @@ impl Display for RefValue {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Null => write!(f, "NULL"),
|
||||
Self::Integer(i) => write!(f, "{}", i),
|
||||
Self::Float(fl) => write!(f, "{:?}", fl),
|
||||
Self::Integer(i) => write!(f, "{i}"),
|
||||
Self::Float(fl) => write!(f, "{fl:?}"),
|
||||
Self::Text(s) => write!(f, "{}", s.as_str()),
|
||||
Self::Blob(b) => write!(f, "{}", String::from_utf8_lossy(b.to_slice())),
|
||||
}
|
||||
@@ -1198,7 +1198,7 @@ pub struct IndexKeySortOrder(u64);
|
||||
|
||||
impl IndexKeySortOrder {
|
||||
pub fn get_sort_order_for_col(&self, column_idx: usize) -> SortOrder {
|
||||
assert!(column_idx < 64, "column index out of range: {}", column_idx);
|
||||
assert!(column_idx < 64, "column index out of range: {column_idx}");
|
||||
match self.0 & (1 << column_idx) {
|
||||
0 => SortOrder::Asc,
|
||||
_ => SortOrder::Desc,
|
||||
@@ -1442,10 +1442,7 @@ impl TryFrom<u64> for SerialType {
|
||||
|
||||
fn try_from(uint: u64) -> Result<Self> {
|
||||
if uint == 10 || uint == 11 {
|
||||
return Err(LimboError::Corrupt(format!(
|
||||
"Invalid serial type: {}",
|
||||
uint
|
||||
)));
|
||||
return Err(LimboError::Corrupt(format!("Invalid serial type: {uint}")));
|
||||
}
|
||||
Ok(SerialType(uint))
|
||||
}
|
||||
@@ -1505,7 +1502,7 @@ impl Record {
|
||||
// if( nVarint<sqlite3VarintLen(nHdr) ) nHdr++;
|
||||
}
|
||||
assert!(header_size <= 126);
|
||||
header_bytes_buf.extend(std::iter::repeat(0).take(9));
|
||||
header_bytes_buf.extend(std::iter::repeat_n(0, 9));
|
||||
let n = write_varint(header_bytes_buf.as_mut_slice(), header_size as u64);
|
||||
header_bytes_buf.truncate(n);
|
||||
buf.splice(initial_i..initial_i, header_bytes_buf.iter().cloned());
|
||||
@@ -1513,7 +1510,7 @@ impl Record {
|
||||
}
|
||||
|
||||
pub enum Cursor {
|
||||
BTree(BTreeCursor),
|
||||
BTree(Box<BTreeCursor>),
|
||||
Pseudo(PseudoCursor),
|
||||
Sorter(Sorter),
|
||||
Virtual(VirtualTableCursor),
|
||||
@@ -1521,7 +1518,7 @@ pub enum Cursor {
|
||||
|
||||
impl Cursor {
|
||||
pub fn new_btree(cursor: BTreeCursor) -> Self {
|
||||
Self::BTree(cursor)
|
||||
Self::BTree(Box::new(cursor))
|
||||
}
|
||||
|
||||
pub fn new_pseudo(cursor: PseudoCursor) -> Self {
|
||||
|
||||
21
core/util.rs
21
core/util.rs
@@ -42,10 +42,10 @@ pub const PRIMARY_KEY_AUTOMATIC_INDEX_NAME_PREFIX: &str = "sqlite_autoindex_";
|
||||
/// Unparsed index that comes from a sql query, i.e not an automatic index
|
||||
///
|
||||
/// CREATE INDEX idx ON table_name(sql)
|
||||
struct UnparsedFromSqlIndex {
|
||||
table_name: String,
|
||||
root_page: usize,
|
||||
sql: String,
|
||||
pub struct UnparsedFromSqlIndex {
|
||||
pub table_name: String,
|
||||
pub root_page: usize,
|
||||
pub sql: String,
|
||||
}
|
||||
|
||||
pub fn parse_schema_rows(
|
||||
@@ -188,7 +188,7 @@ pub fn check_ident_equivalency(ident1: &str, ident2: &str) -> bool {
|
||||
strip_quotes(ident1).eq_ignore_ascii_case(strip_quotes(ident2))
|
||||
}
|
||||
|
||||
fn module_name_from_sql(sql: &str) -> Result<&str> {
|
||||
pub fn module_name_from_sql(sql: &str) -> Result<&str> {
|
||||
if let Some(start) = sql.find("USING") {
|
||||
let start = start + 6;
|
||||
// stop at the first space, semicolon, or parenthesis
|
||||
@@ -206,7 +206,7 @@ fn module_name_from_sql(sql: &str) -> Result<&str> {
|
||||
|
||||
// CREATE VIRTUAL TABLE table_name USING module_name(arg1, arg2, ...);
|
||||
// CREATE VIRTUAL TABLE table_name USING module_name;
|
||||
fn module_args_from_sql(sql: &str) -> Result<Vec<turso_ext::Value>> {
|
||||
pub fn module_args_from_sql(sql: &str) -> Result<Vec<turso_ext::Value>> {
|
||||
if !sql.contains('(') {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
@@ -666,8 +666,7 @@ impl OpenMode {
|
||||
"memory" => Ok(OpenMode::Memory),
|
||||
"rwc" => Ok(OpenMode::ReadWriteCreate),
|
||||
_ => Err(LimboError::InvalidArgument(format!(
|
||||
"Invalid mode: '{}'. Expected one of 'ro', 'rw', 'memory', 'rwc'",
|
||||
s
|
||||
"Invalid mode: '{s}'. Expected one of 'ro', 'rw', 'memory', 'rwc'"
|
||||
))),
|
||||
}
|
||||
}
|
||||
@@ -728,8 +727,7 @@ impl<'a> OpenOptions<'a> {
|
||||
// sqlite allows only `localhost` or empty authority.
|
||||
if !(authority.is_empty() || authority == "localhost") {
|
||||
return Err(LimboError::InvalidArgument(format!(
|
||||
"Invalid authority '{}'. Only '' or 'localhost' allowed.",
|
||||
authority
|
||||
"Invalid authority '{authority}'. Only '' or 'localhost' allowed."
|
||||
)));
|
||||
}
|
||||
opts.authority = if authority.is_empty() {
|
||||
@@ -1049,8 +1047,7 @@ pub fn parse_string(expr: &Expr) -> Result<String> {
|
||||
Ok(s[1..s.len() - 1].to_string())
|
||||
}
|
||||
_ => Err(LimboError::InvalidArgument(format!(
|
||||
"string parameter expected, got {:?} instead",
|
||||
expr
|
||||
"string parameter expected, got {expr:?} instead"
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -225,7 +225,7 @@ impl ProgramBuilder {
|
||||
pub fn constant_span_is_open(&self) -> bool {
|
||||
self.constant_spans
|
||||
.last()
|
||||
.map_or(false, |(_, end)| *end == usize::MAX)
|
||||
.is_some_and(|(_, end)| *end == usize::MAX)
|
||||
}
|
||||
|
||||
/// Get the index of the next constant span.
|
||||
@@ -272,7 +272,7 @@ impl ProgramBuilder {
|
||||
!self
|
||||
.cursor_ref
|
||||
.iter()
|
||||
.any(|(k, _)| k.as_ref().map_or(false, |k| k.equals(&key))),
|
||||
.any(|(k, _)| k.as_ref().is_some_and(|k| k.equals(&key))),
|
||||
"duplicate cursor key"
|
||||
);
|
||||
self._alloc_cursor_id(Some(key), cursor_type)
|
||||
@@ -456,7 +456,7 @@ impl ProgramBuilder {
|
||||
/// reordering the emitted instructions.
|
||||
#[inline]
|
||||
pub fn preassign_label_to_next_insn(&mut self, label: BranchOffset) {
|
||||
assert!(label.is_label(), "BranchOffset {:?} is not a label", label);
|
||||
assert!(label.is_label(), "BranchOffset {label:?} is not a label");
|
||||
self._resolve_label(label, self.offset().sub(1u32), JumpTarget::AfterThisInsn);
|
||||
}
|
||||
|
||||
@@ -492,10 +492,7 @@ impl ProgramBuilder {
|
||||
let Some(Some((to_offset, target))) =
|
||||
self.label_to_resolved_offset.get(*label as usize)
|
||||
else {
|
||||
panic!(
|
||||
"Reference to undefined or unresolved label in {}: {}",
|
||||
insn_name, label
|
||||
);
|
||||
panic!("Reference to undefined or unresolved label in {insn_name}: {label}");
|
||||
};
|
||||
*pc = BranchOffset::Offset(
|
||||
to_offset
|
||||
@@ -707,12 +704,12 @@ impl ProgramBuilder {
|
||||
pub fn resolve_cursor_id_safe(&self, key: &CursorKey) -> Option<CursorID> {
|
||||
self.cursor_ref
|
||||
.iter()
|
||||
.position(|(k, _)| k.as_ref().map_or(false, |k| k.equals(key)))
|
||||
.position(|(k, _)| k.as_ref().is_some_and(|k| k.equals(key)))
|
||||
}
|
||||
|
||||
pub fn resolve_cursor_id(&self, key: &CursorKey) -> CursorID {
|
||||
self.resolve_cursor_id_safe(key)
|
||||
.unwrap_or_else(|| panic!("Cursor not found: {:?}", key))
|
||||
.unwrap_or_else(|| panic!("Cursor not found: {key:?}"))
|
||||
}
|
||||
|
||||
pub fn set_collation(&mut self, c: Option<(CollationSeq, bool)>) {
|
||||
|
||||
@@ -1129,8 +1129,7 @@ pub fn op_vupdate(
|
||||
Err(e) => {
|
||||
// virtual table update failed
|
||||
return Err(LimboError::ExtensionError(format!(
|
||||
"Virtual table update failed: {}",
|
||||
e
|
||||
"Virtual table update failed: {e}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
@@ -1572,20 +1571,17 @@ pub fn halt(
|
||||
0 => {}
|
||||
SQLITE_CONSTRAINT_PRIMARYKEY => {
|
||||
return Err(LimboError::Constraint(format!(
|
||||
"UNIQUE constraint failed: {} (19)",
|
||||
description
|
||||
"UNIQUE constraint failed: {description} (19)"
|
||||
)));
|
||||
}
|
||||
SQLITE_CONSTRAINT_NOTNULL => {
|
||||
return Err(LimboError::Constraint(format!(
|
||||
"NOT NULL constraint failed: {} (19)",
|
||||
description
|
||||
"NOT NULL constraint failed: {description} (19)"
|
||||
)));
|
||||
}
|
||||
_ => {
|
||||
return Err(LimboError::Constraint(format!(
|
||||
"undocumented halt error code {}",
|
||||
description
|
||||
"undocumented halt error code {description}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
@@ -1620,20 +1616,17 @@ pub fn op_halt(
|
||||
0 => {}
|
||||
SQLITE_CONSTRAINT_PRIMARYKEY => {
|
||||
return Err(LimboError::Constraint(format!(
|
||||
"UNIQUE constraint failed: {} (19)",
|
||||
description
|
||||
"UNIQUE constraint failed: {description} (19)"
|
||||
)));
|
||||
}
|
||||
SQLITE_CONSTRAINT_NOTNULL => {
|
||||
return Err(LimboError::Constraint(format!(
|
||||
"NOTNULL constraint failed: {} (19)",
|
||||
description
|
||||
"NOTNULL constraint failed: {description} (19)"
|
||||
)));
|
||||
}
|
||||
_ => {
|
||||
return Err(LimboError::Constraint(format!(
|
||||
"undocumented halt error code {}",
|
||||
description
|
||||
"undocumented halt error code {description}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
@@ -1855,7 +1848,7 @@ pub fn op_return(
|
||||
if let Value::Integer(pc) = state.registers[*return_reg].get_owned_value() {
|
||||
let pc: u32 = (*pc)
|
||||
.try_into()
|
||||
.unwrap_or_else(|_| panic!("Return register is negative: {}", pc));
|
||||
.unwrap_or_else(|_| panic!("Return register is negative: {pc}"));
|
||||
state.pc = pc;
|
||||
} else {
|
||||
if !*can_fallthrough {
|
||||
@@ -2178,8 +2171,7 @@ pub fn op_seek(
|
||||
};
|
||||
assert!(
|
||||
target_pc.is_offset(),
|
||||
"target_pc should be an offset, is: {:?}",
|
||||
target_pc
|
||||
"target_pc should be an offset, is: {target_pc:?}"
|
||||
);
|
||||
let eq_only = match insn {
|
||||
Insn::SeekGE { eq_only, .. } | Insn::SeekLE { eq_only, .. } => *eq_only,
|
||||
@@ -2567,28 +2559,8 @@ pub fn op_agg_step(
|
||||
AggFunc::Count | AggFunc::Count0 => {
|
||||
Register::Aggregate(AggContext::Count(Value::Integer(0)))
|
||||
}
|
||||
AggFunc::Max => {
|
||||
let col = state.registers[*col].get_owned_value();
|
||||
match col {
|
||||
Value::Integer(_) => Register::Aggregate(AggContext::Max(None)),
|
||||
Value::Float(_) => Register::Aggregate(AggContext::Max(None)),
|
||||
Value::Text(_) => Register::Aggregate(AggContext::Max(None)),
|
||||
_ => {
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
}
|
||||
AggFunc::Min => {
|
||||
let col = state.registers[*col].get_owned_value();
|
||||
match col {
|
||||
Value::Integer(_) => Register::Aggregate(AggContext::Min(None)),
|
||||
Value::Float(_) => Register::Aggregate(AggContext::Min(None)),
|
||||
Value::Text(_) => Register::Aggregate(AggContext::Min(None)),
|
||||
_ => {
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
}
|
||||
AggFunc::Max => Register::Aggregate(AggContext::Max(None)),
|
||||
AggFunc::Min => Register::Aggregate(AggContext::Min(None)),
|
||||
AggFunc::GroupConcat | AggFunc::StringAgg => {
|
||||
Register::Aggregate(AggContext::GroupConcat(Value::build_text("")))
|
||||
}
|
||||
@@ -2682,28 +2654,9 @@ pub fn op_agg_step(
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
match (acc.as_mut(), col.get_owned_value()) {
|
||||
(None, value) => {
|
||||
*acc = Some(value.clone());
|
||||
}
|
||||
(Some(Value::Integer(ref mut current_max)), Value::Integer(value)) => {
|
||||
if *value > *current_max {
|
||||
*current_max = *value;
|
||||
}
|
||||
}
|
||||
(Some(Value::Float(ref mut current_max)), Value::Float(value)) => {
|
||||
if *value > *current_max {
|
||||
*current_max = *value;
|
||||
}
|
||||
}
|
||||
(Some(Value::Text(ref mut current_max)), Value::Text(value)) => {
|
||||
if value.value > current_max.value {
|
||||
*current_max = value.clone();
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
eprintln!("Unexpected types in max aggregation");
|
||||
}
|
||||
let new_value = col.get_owned_value();
|
||||
if *new_value != Value::Null && acc.as_ref().is_none_or(|acc| new_value > acc) {
|
||||
*acc = Some(new_value.clone());
|
||||
}
|
||||
}
|
||||
AggFunc::Min => {
|
||||
@@ -2718,28 +2671,10 @@ pub fn op_agg_step(
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
match (acc.as_mut(), col.get_owned_value()) {
|
||||
(None, value) => {
|
||||
*acc.borrow_mut() = Some(value.clone());
|
||||
}
|
||||
(Some(Value::Integer(ref mut current_min)), Value::Integer(value)) => {
|
||||
if *value < *current_min {
|
||||
*current_min = *value;
|
||||
}
|
||||
}
|
||||
(Some(Value::Float(ref mut current_min)), Value::Float(value)) => {
|
||||
if *value < *current_min {
|
||||
*current_min = *value;
|
||||
}
|
||||
}
|
||||
(Some(Value::Text(ref mut current_min)), Value::Text(text)) => {
|
||||
if text.value < current_min.value {
|
||||
*current_min = text.clone();
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
eprintln!("Unexpected types in min aggregation");
|
||||
}
|
||||
let new_value = col.get_owned_value();
|
||||
|
||||
if *new_value != Value::Null && acc.as_ref().is_none_or(|acc| new_value < acc) {
|
||||
*acc = Some(new_value.clone());
|
||||
}
|
||||
}
|
||||
AggFunc::GroupConcat | AggFunc::StringAgg => {
|
||||
@@ -2961,7 +2896,7 @@ pub fn op_agg_final(
|
||||
}
|
||||
}
|
||||
other => {
|
||||
panic!("Unexpected value {:?} in AggFinal", other);
|
||||
panic!("Unexpected value {other:?} in AggFinal");
|
||||
}
|
||||
};
|
||||
state.pc += 1;
|
||||
@@ -3506,12 +3441,9 @@ pub fn op_function(
|
||||
|
||||
let result = match (pattern, match_expression) {
|
||||
(Value::Text(pattern), Value::Text(match_expression)) if arg_count == 3 => {
|
||||
let escape = match construct_like_escape_arg(
|
||||
let escape = construct_like_escape_arg(
|
||||
state.registers[*start_reg + 2].get_owned_value(),
|
||||
) {
|
||||
Ok(x) => x,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
)?;
|
||||
|
||||
Value::Integer(exec_like_with_escape(
|
||||
pattern.as_str(),
|
||||
@@ -3720,8 +3652,7 @@ pub fn op_function(
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(LimboError::ParseError(format!(
|
||||
"Error encountered while parsing datetime value: {}",
|
||||
e
|
||||
"Error encountered while parsing datetime value: {e}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
@@ -4206,7 +4137,7 @@ pub fn op_end_coroutine(
|
||||
state.ended_coroutine.set(*yield_reg);
|
||||
let pc: u32 = (*pc)
|
||||
.try_into()
|
||||
.unwrap_or_else(|_| panic!("EndCoroutine: pc overflow: {}", pc));
|
||||
.unwrap_or_else(|_| panic!("EndCoroutine: pc overflow: {pc}"));
|
||||
state.pc = pc - 1; // yield jump is always next to yield. Here we subtract 1 to go back to yield instruction
|
||||
} else {
|
||||
unreachable!();
|
||||
@@ -4234,7 +4165,7 @@ pub fn op_yield(
|
||||
} else {
|
||||
let pc: u32 = (*pc)
|
||||
.try_into()
|
||||
.unwrap_or_else(|_| panic!("Yield: pc overflow: {}", pc));
|
||||
.unwrap_or_else(|_| panic!("Yield: pc overflow: {pc}"));
|
||||
// swap the program counter with the value in the yield register
|
||||
// this is the mechanism that allows jumping back and forth between the coroutine and the caller
|
||||
(state.pc, state.registers[*yield_reg]) =
|
||||
@@ -4425,8 +4356,7 @@ pub fn op_idx_delete(
|
||||
// Also, do not raise this (self-correcting and non-critical) error if in writable_schema mode.
|
||||
if *raise_error_if_no_matching_entry {
|
||||
return Err(LimboError::Corrupt(format!(
|
||||
"IdxDelete: no matching index entry found for record {:?}",
|
||||
record
|
||||
"IdxDelete: no matching index entry found for record {record:?}"
|
||||
)));
|
||||
}
|
||||
state.pc += 1;
|
||||
@@ -4502,8 +4432,7 @@ pub fn op_idx_insert(
|
||||
Register::Record(ref r) => r,
|
||||
o => {
|
||||
return Err(LimboError::InternalError(format!(
|
||||
"expected record, got {:?}",
|
||||
o
|
||||
"expected record, got {o:?}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
@@ -5064,10 +4993,7 @@ pub fn op_parse_schema(
|
||||
conn.auto_commit.set(false);
|
||||
|
||||
if let Some(where_clause) = where_clause {
|
||||
let stmt = conn.prepare(format!(
|
||||
"SELECT * FROM sqlite_schema WHERE {}",
|
||||
where_clause
|
||||
))?;
|
||||
let stmt = conn.prepare(format!("SELECT * FROM sqlite_schema WHERE {where_clause}"))?;
|
||||
|
||||
let mut new_schema = conn.schema.borrow().clone();
|
||||
|
||||
@@ -6515,7 +6441,7 @@ fn exec_concat_strings(registers: &[Register]) -> Value {
|
||||
match reg.get_owned_value() {
|
||||
Value::Null => continue,
|
||||
Value::Blob(_) => todo!("TODO concat blob"),
|
||||
v => result.push_str(&format!("{}", v)),
|
||||
v => result.push_str(&format!("{v}")),
|
||||
}
|
||||
}
|
||||
Value::build_text(result)
|
||||
@@ -6528,7 +6454,7 @@ fn exec_concat_ws(registers: &[Register]) -> Value {
|
||||
|
||||
let separator = match ®isters[0].get_owned_value() {
|
||||
Value::Null | Value::Blob(_) => return Value::Null,
|
||||
v => format!("{}", v),
|
||||
v => format!("{v}"),
|
||||
};
|
||||
|
||||
let mut result = String::new();
|
||||
@@ -6538,7 +6464,7 @@ fn exec_concat_ws(registers: &[Register]) -> Value {
|
||||
}
|
||||
match reg.get_owned_value() {
|
||||
v if matches!(v, Value::Text(_) | Value::Integer(_) | Value::Float(_)) => {
|
||||
result.push_str(&format!("{}", v))
|
||||
result.push_str(&format!("{v}"))
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
@@ -6705,7 +6631,7 @@ fn execute_sqlite_version(version_integer: i64) -> String {
|
||||
let minor = (version_integer % 1_000_000) / 1_000;
|
||||
let release = version_integer % 1_000;
|
||||
|
||||
format!("{}.{}.{}", major, minor, release)
|
||||
format!("{major}.{minor}.{release}")
|
||||
}
|
||||
|
||||
pub fn extract_int_value(value: &Value) -> i64 {
|
||||
@@ -7172,9 +7098,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
lhs.exec_add(rhs),
|
||||
outputs[i],
|
||||
"Wrong ADD for lhs: {}, rhs: {}",
|
||||
lhs,
|
||||
rhs
|
||||
"Wrong ADD for lhs: {lhs}, rhs: {rhs}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7230,9 +7154,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
lhs.exec_subtract(rhs),
|
||||
outputs[i],
|
||||
"Wrong subtract for lhs: {}, rhs: {}",
|
||||
lhs,
|
||||
rhs
|
||||
"Wrong subtract for lhs: {lhs}, rhs: {rhs}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7288,9 +7210,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
lhs.exec_multiply(rhs),
|
||||
outputs[i],
|
||||
"Wrong multiply for lhs: {}, rhs: {}",
|
||||
lhs,
|
||||
rhs
|
||||
"Wrong multiply for lhs: {lhs}, rhs: {rhs}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7334,9 +7254,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
lhs.exec_divide(rhs),
|
||||
outputs[i],
|
||||
"Wrong divide for lhs: {}, rhs: {}",
|
||||
lhs,
|
||||
rhs
|
||||
"Wrong divide for lhs: {lhs}, rhs: {rhs}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7402,9 +7320,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
lhs.exec_remainder(rhs),
|
||||
outputs[i],
|
||||
"Wrong remainder for lhs: {}, rhs: {}",
|
||||
lhs,
|
||||
rhs
|
||||
"Wrong remainder for lhs: {lhs}, rhs: {rhs}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7441,9 +7357,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
lhs.exec_and(rhs),
|
||||
outputs[i],
|
||||
"Wrong AND for lhs: {}, rhs: {}",
|
||||
lhs,
|
||||
rhs
|
||||
"Wrong AND for lhs: {lhs}, rhs: {rhs}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7482,9 +7396,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
lhs.exec_or(rhs),
|
||||
outputs[i],
|
||||
"Wrong OR for lhs: {}, rhs: {}",
|
||||
lhs,
|
||||
rhs
|
||||
"Wrong OR for lhs: {lhs}, rhs: {rhs}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}]+r[{}]", dest, lhs, rhs),
|
||||
format!("r[{dest}]=r[{lhs}]+r[{rhs}]"),
|
||||
),
|
||||
Insn::Subtract { lhs, rhs, dest } => (
|
||||
"Subtract",
|
||||
@@ -49,7 +49,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}]-r[{}]", dest, lhs, rhs),
|
||||
format!("r[{dest}]=r[{lhs}]-r[{rhs}]"),
|
||||
),
|
||||
Insn::Multiply { lhs, rhs, dest } => (
|
||||
"Multiply",
|
||||
@@ -58,7 +58,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}]*r[{}]", dest, lhs, rhs),
|
||||
format!("r[{dest}]=r[{lhs}]*r[{rhs}]"),
|
||||
),
|
||||
Insn::Divide { lhs, rhs, dest } => (
|
||||
"Divide",
|
||||
@@ -67,7 +67,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}]/r[{}]", dest, lhs, rhs),
|
||||
format!("r[{dest}]=r[{lhs}]/r[{rhs}]"),
|
||||
),
|
||||
Insn::BitAnd { lhs, rhs, dest } => (
|
||||
"BitAnd",
|
||||
@@ -76,7 +76,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}]&r[{}]", dest, lhs, rhs),
|
||||
format!("r[{dest}]=r[{lhs}]&r[{rhs}]"),
|
||||
),
|
||||
Insn::BitOr { lhs, rhs, dest } => (
|
||||
"BitOr",
|
||||
@@ -85,7 +85,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}]|r[{}]", dest, lhs, rhs),
|
||||
format!("r[{dest}]=r[{lhs}]|r[{rhs}]"),
|
||||
),
|
||||
Insn::BitNot { reg, dest } => (
|
||||
"BitNot",
|
||||
@@ -94,7 +94,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=~r[{}]", dest, reg),
|
||||
format!("r[{dest}]=~r[{reg}]"),
|
||||
),
|
||||
Insn::Checkpoint {
|
||||
database,
|
||||
@@ -107,7 +107,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=~r[{}]", dest, database),
|
||||
format!("r[{dest}]=~r[{database}]"),
|
||||
),
|
||||
Insn::Remainder { lhs, rhs, dest } => (
|
||||
"Remainder",
|
||||
@@ -116,7 +116,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}]%r[{}]", dest, lhs, rhs),
|
||||
format!("r[{dest}]=r[{lhs}]%r[{rhs}]"),
|
||||
),
|
||||
Insn::Null { dest, dest_end } => (
|
||||
"Null",
|
||||
@@ -125,8 +125,8 @@ pub fn insn_to_str(
|
||||
dest_end.map_or(0, |end| end as i32),
|
||||
Value::build_text(""),
|
||||
0,
|
||||
dest_end.map_or(format!("r[{}]=NULL", dest), |end| {
|
||||
format!("r[{}..{}]=NULL", dest, end)
|
||||
dest_end.map_or(format!("r[{dest}]=NULL"), |end| {
|
||||
format!("r[{dest}..{end}]=NULL")
|
||||
}),
|
||||
),
|
||||
Insn::NullRow { cursor_id } => (
|
||||
@@ -136,7 +136,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("Set cursor {} to a (pseudo) NULL row", cursor_id),
|
||||
format!("Set cursor {cursor_id} to a (pseudo) NULL row"),
|
||||
),
|
||||
Insn::NotNull { reg, target_pc } => (
|
||||
"NotNull",
|
||||
@@ -417,7 +417,7 @@ pub fn insn_to_str(
|
||||
args_reg.unwrap_or(0) as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("table={}, module={}", table_name, module_name),
|
||||
format!("table={table_name}, module={module_name}"),
|
||||
),
|
||||
Insn::VFilter {
|
||||
cursor_id,
|
||||
@@ -492,7 +492,7 @@ pub fn insn_to_str(
|
||||
*num_fields as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("{} columns in r[{}]", num_fields, content_reg),
|
||||
format!("{num_fields} columns in r[{content_reg}]"),
|
||||
),
|
||||
Insn::Rewind {
|
||||
cursor_id,
|
||||
@@ -578,7 +578,7 @@ pub fn insn_to_str(
|
||||
dest_reg,
|
||||
index_name,
|
||||
} => {
|
||||
let for_index = index_name.as_ref().map(|name| format!("; for {}", name));
|
||||
let for_index = index_name.as_ref().map(|name| format!("; for {name}"));
|
||||
(
|
||||
"MakeRecord",
|
||||
*start_reg as i32,
|
||||
@@ -603,7 +603,7 @@ pub fn insn_to_str(
|
||||
Value::build_text(""),
|
||||
0,
|
||||
if *count == 1 {
|
||||
format!("output=r[{}]", start_reg)
|
||||
format!("output=r[{start_reg}]")
|
||||
} else {
|
||||
format!("output=r[{}..{}]", start_reg, start_reg + count - 1)
|
||||
},
|
||||
@@ -652,7 +652,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("write={}", write),
|
||||
format!("write={write}"),
|
||||
),
|
||||
Insn::Goto { target_pc } => (
|
||||
"Goto",
|
||||
@@ -694,7 +694,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]={}", dest, value),
|
||||
format!("r[{dest}]={value}"),
|
||||
),
|
||||
Insn::Real { value, dest } => (
|
||||
"Real",
|
||||
@@ -703,7 +703,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::Float(*value),
|
||||
0,
|
||||
format!("r[{}]={}", dest, value),
|
||||
format!("r[{dest}]={value}"),
|
||||
),
|
||||
Insn::RealAffinity { register } => (
|
||||
"RealAffinity",
|
||||
@@ -721,7 +721,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(value),
|
||||
0,
|
||||
format!("r[{}]='{}'", dest, value),
|
||||
format!("r[{dest}]='{value}'"),
|
||||
),
|
||||
Insn::Blob { value, dest } => (
|
||||
"Blob",
|
||||
@@ -765,7 +765,7 @@ pub fn insn_to_str(
|
||||
if k.index.is_some() { "index" } else { "table" },
|
||||
get_table_or_index_name(*cursor_id),
|
||||
))
|
||||
.unwrap_or(format!("cursor {}", cursor_id))
|
||||
.unwrap_or(format!("cursor {cursor_id}"))
|
||||
),
|
||||
),
|
||||
Insn::SeekRowid {
|
||||
@@ -791,7 +791,7 @@ pub fn insn_to_str(
|
||||
if k.index.is_some() { "index" } else { "table" },
|
||||
get_table_or_index_name(*cursor_id),
|
||||
))
|
||||
.unwrap_or(format!("cursor {}", cursor_id)),
|
||||
.unwrap_or(format!("cursor {cursor_id}")),
|
||||
target_pc.as_debug_int()
|
||||
),
|
||||
),
|
||||
@@ -873,7 +873,7 @@ pub fn insn_to_str(
|
||||
unpacked_start.unwrap_or(0) as i32,
|
||||
Value::build_text(""),
|
||||
flags.0 as u16,
|
||||
format!("key=r[{}]", record_reg),
|
||||
format!("key=r[{record_reg}]"),
|
||||
),
|
||||
Insn::IdxGT {
|
||||
cursor_id,
|
||||
@@ -974,7 +974,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(format!("k({},{})", order.len(), to_print.join(","))),
|
||||
0,
|
||||
format!("cursor={}", cursor_id),
|
||||
format!("cursor={cursor_id}"),
|
||||
)
|
||||
}
|
||||
Insn::SorterData {
|
||||
@@ -988,7 +988,7 @@ pub fn insn_to_str(
|
||||
*pseudo_cursor as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=data", dest_reg),
|
||||
format!("r[{dest_reg}]=data"),
|
||||
),
|
||||
Insn::SorterInsert {
|
||||
cursor_id,
|
||||
@@ -1000,7 +1000,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::Integer(0),
|
||||
0,
|
||||
format!("key=r[{}]", record_reg),
|
||||
format!("key=r[{record_reg}]"),
|
||||
),
|
||||
Insn::SorterSort {
|
||||
cursor_id,
|
||||
@@ -1046,9 +1046,9 @@ pub fn insn_to_str(
|
||||
},
|
||||
0,
|
||||
if func.arg_count == 0 {
|
||||
format!("r[{}]=func()", dest)
|
||||
format!("r[{dest}]=func()")
|
||||
} else if *start_reg == *start_reg + func.arg_count - 1 {
|
||||
format!("r[{}]=func(r[{}])", dest, start_reg)
|
||||
format!("r[{dest}]=func(r[{start_reg}])")
|
||||
} else {
|
||||
format!(
|
||||
"r[{}]=func(r[{}..{}])",
|
||||
@@ -1105,7 +1105,7 @@ pub fn insn_to_str(
|
||||
*key_reg as i32,
|
||||
Value::build_text(table_name),
|
||||
flag.0 as u16,
|
||||
format!("intkey=r[{}] data=r[{}]", key_reg, record_reg),
|
||||
format!("intkey=r[{key_reg}] data=r[{record_reg}]"),
|
||||
),
|
||||
Insn::Delete { cursor_id } => (
|
||||
"Delete",
|
||||
@@ -1141,7 +1141,7 @@ pub fn insn_to_str(
|
||||
*prev_largest_reg as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=rowid", rowid_reg),
|
||||
format!("r[{rowid_reg}]=rowid"),
|
||||
),
|
||||
Insn::MustBeInt { reg } => (
|
||||
"MustBeInt",
|
||||
@@ -1170,7 +1170,7 @@ pub fn insn_to_str(
|
||||
let key = if *num_regs > 0 {
|
||||
format!("key=r[{}..{}]", record_reg, record_reg + num_regs - 1)
|
||||
} else {
|
||||
format!("key=r[{}]", record_reg)
|
||||
format!("key=r[{record_reg}]")
|
||||
};
|
||||
(
|
||||
"NoConflict",
|
||||
@@ -1207,8 +1207,7 @@ pub fn insn_to_str(
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!(
|
||||
"if r[{}]>0 then r[{}]=r[{}]+max(0,r[{}]) else r[{}]=(-1)",
|
||||
limit_reg, combined_reg, limit_reg, offset_reg, combined_reg
|
||||
"if r[{limit_reg}]>0 then r[{combined_reg}]=r[{limit_reg}]+max(0,r[{offset_reg}]) else r[{combined_reg}]=(-1)"
|
||||
),
|
||||
),
|
||||
Insn::OpenWrite {
|
||||
@@ -1226,7 +1225,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("root={}; {}", root_page, name),
|
||||
format!("root={root_page}; {name}"),
|
||||
),
|
||||
Insn::Copy {
|
||||
src_reg,
|
||||
@@ -1239,7 +1238,7 @@ pub fn insn_to_str(
|
||||
*amount as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}]", dst_reg, src_reg),
|
||||
format!("r[{dst_reg}]=r[{src_reg}]"),
|
||||
),
|
||||
Insn::CreateBtree { db, root, flags } => (
|
||||
"CreateBtree",
|
||||
@@ -1262,8 +1261,7 @@ pub fn insn_to_str(
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!(
|
||||
"root iDb={} former_root={} is_temp={}",
|
||||
root, former_root_reg, is_temp
|
||||
"root iDb={root} former_root={former_root_reg} is_temp={is_temp}"
|
||||
),
|
||||
),
|
||||
Insn::DropTable {
|
||||
@@ -1278,7 +1276,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(table_name),
|
||||
0,
|
||||
format!("DROP TABLE {}", table_name),
|
||||
format!("DROP TABLE {table_name}"),
|
||||
),
|
||||
Insn::DropIndex { db: _, index } => (
|
||||
"DropIndex",
|
||||
@@ -1347,7 +1345,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}] >> r[{}]", dest, lhs, rhs),
|
||||
format!("r[{dest}]=r[{lhs}] >> r[{rhs}]"),
|
||||
),
|
||||
Insn::ShiftLeft { lhs, rhs, dest } => (
|
||||
"ShiftLeft",
|
||||
@@ -1356,7 +1354,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}] << r[{}]", dest, lhs, rhs),
|
||||
format!("r[{dest}]=r[{lhs}] << r[{rhs}]"),
|
||||
),
|
||||
Insn::Variable { index, dest } => (
|
||||
"Variable",
|
||||
@@ -1375,8 +1373,7 @@ pub fn insn_to_str(
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!(
|
||||
"((r[{}]=NULL)|(r[{}]=NULL)) ? r[{}]=NULL : r[{}]=0",
|
||||
rg1, rg2, dest, dest
|
||||
"((r[{rg1}]=NULL)|(r[{rg2}]=NULL)) ? r[{dest}]=NULL : r[{dest}]=0"
|
||||
),
|
||||
),
|
||||
Insn::Not { reg, dest } => (
|
||||
@@ -1386,7 +1383,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=!r[{}]", dest, reg),
|
||||
format!("r[{dest}]=!r[{reg}]"),
|
||||
),
|
||||
Insn::Concat { lhs, rhs, dest } => (
|
||||
"Concat",
|
||||
@@ -1395,7 +1392,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=r[{}] + r[{}]", dest, lhs, rhs),
|
||||
format!("r[{dest}]=r[{lhs}] + r[{rhs}]"),
|
||||
),
|
||||
Insn::And { lhs, rhs, dest } => (
|
||||
"And",
|
||||
@@ -1404,7 +1401,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=(r[{}] && r[{}])", dest, lhs, rhs),
|
||||
format!("r[{dest}]=(r[{lhs}] && r[{rhs}])"),
|
||||
),
|
||||
Insn::Or { lhs, rhs, dest } => (
|
||||
"Or",
|
||||
@@ -1413,7 +1410,7 @@ pub fn insn_to_str(
|
||||
*dest as i32,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("r[{}]=(r[{}] || r[{}])", dest, lhs, rhs),
|
||||
format!("r[{dest}]=(r[{lhs}] || r[{rhs}])"),
|
||||
),
|
||||
Insn::Noop => ("Noop", 0, 0, 0, Value::build_text(""), 0, String::new()),
|
||||
Insn::PageCount { db, dest } => (
|
||||
@@ -1458,7 +1455,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("auto_commit={}, rollback={}", auto_commit, rollback),
|
||||
format!("auto_commit={auto_commit}, rollback={rollback}"),
|
||||
),
|
||||
Insn::OpenEphemeral {
|
||||
cursor_id,
|
||||
@@ -1483,7 +1480,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("cursor={}", cursor_id),
|
||||
format!("cursor={cursor_id}"),
|
||||
),
|
||||
Insn::Once {
|
||||
target_pc_when_reentered,
|
||||
@@ -1503,8 +1500,8 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
dest_end.map_or(format!("r[{}]=NULL", dest), |end| {
|
||||
format!("r[{}..{}]=NULL", dest, end)
|
||||
dest_end.map_or(format!("r[{dest}]=NULL"), |end| {
|
||||
format!("r[{dest}..{end}]=NULL")
|
||||
}),
|
||||
),
|
||||
Insn::NotFound {
|
||||
@@ -1599,7 +1596,7 @@ pub fn insn_to_str(
|
||||
0,
|
||||
Value::build_text(""),
|
||||
0,
|
||||
format!("roots={:?} message_register={}", roots, message_register),
|
||||
format!("roots={roots:?} message_register={message_register}"),
|
||||
),
|
||||
Insn::RowData { cursor_id, dest } => (
|
||||
"RowData",
|
||||
@@ -1620,6 +1617,6 @@ pub fn insn_to_str(
|
||||
p3,
|
||||
p4.to_string(),
|
||||
p5,
|
||||
manual_comment.map_or(comment.to_string(), |mc| format!("{}; {}", comment, mc))
|
||||
manual_comment.map_or(comment.to_string(), |mc| format!("{comment}; {mc}"))
|
||||
)
|
||||
}
|
||||
|
||||
@@ -329,9 +329,9 @@ impl ProgramState {
|
||||
let cursors = self.cursors.borrow_mut();
|
||||
std::cell::RefMut::map(cursors, |c| {
|
||||
c.get_mut(cursor_id)
|
||||
.unwrap_or_else(|| panic!("cursor id {} out of bounds", cursor_id))
|
||||
.unwrap_or_else(|| panic!("cursor id {cursor_id} out of bounds"))
|
||||
.as_mut()
|
||||
.unwrap_or_else(|| panic!("cursor id {} is None", cursor_id))
|
||||
.unwrap_or_else(|| panic!("cursor id {cursor_id} is None"))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -344,7 +344,7 @@ impl Register {
|
||||
assert!(!r.is_invalidated());
|
||||
r.as_blob_value()
|
||||
}
|
||||
_ => panic!("register holds unexpected value: {:?}", self),
|
||||
_ => panic!("register holds unexpected value: {self:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
15
core/vtab.rs
15
core/vtab.rs
@@ -44,8 +44,7 @@ impl VirtualTable {
|
||||
.map(|(vtab, columns)| (VirtualTableType::Pragma(vtab), columns))?
|
||||
} else {
|
||||
return Err(LimboError::ParseError(format!(
|
||||
"No such table-valued function: {}",
|
||||
name
|
||||
"No such table-valued function: {name}"
|
||||
)));
|
||||
};
|
||||
|
||||
@@ -93,7 +92,9 @@ impl VirtualTable {
|
||||
|
||||
pub(crate) fn open(&self, conn: Arc<Connection>) -> crate::Result<VirtualTableCursor> {
|
||||
match &self.vtab_type {
|
||||
VirtualTableType::Pragma(table) => Ok(VirtualTableCursor::Pragma(table.open(conn)?)),
|
||||
VirtualTableType::Pragma(table) => {
|
||||
Ok(VirtualTableCursor::Pragma(Box::new(table.open(conn)?)))
|
||||
}
|
||||
VirtualTableType::External(table) => {
|
||||
Ok(VirtualTableCursor::External(table.open(conn)?))
|
||||
}
|
||||
@@ -132,7 +133,7 @@ impl VirtualTable {
|
||||
}
|
||||
|
||||
pub enum VirtualTableCursor {
|
||||
Pragma(PragmaVirtualTableCursor),
|
||||
Pragma(Box<PragmaVirtualTableCursor>),
|
||||
External(ExtVirtualTableCursor),
|
||||
}
|
||||
|
||||
@@ -216,8 +217,7 @@ impl ExtVirtualTable {
|
||||
kind: VTabKind,
|
||||
) -> crate::Result<(Self, String)> {
|
||||
let module = module.ok_or(LimboError::ExtensionError(format!(
|
||||
"Virtual table module not found: {}",
|
||||
module_name
|
||||
"Virtual table module not found: {module_name}"
|
||||
)))?;
|
||||
if kind != module.module_kind {
|
||||
let expected = match kind {
|
||||
@@ -225,8 +225,7 @@ impl ExtVirtualTable {
|
||||
VTabKind::TableValuedFunction => "table-valued function",
|
||||
};
|
||||
return Err(LimboError::ExtensionError(format!(
|
||||
"{} is not a {} module",
|
||||
module_name, expected
|
||||
"{module_name} is not a {expected} module"
|
||||
)));
|
||||
}
|
||||
let (schema, table_ptr) = module.implementation.create(args)?;
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
//! - `filename` — path to the CSV file (mutually exclusive with `data=`)
|
||||
//! - `data` — inline CSV content as a string
|
||||
//! - `header` — whether the first row contains column names;
|
||||
//! accepts `yes`/`no`, `on`/`off`, `true`/`false`, or `1`/`0`
|
||||
//! accepts `yes`/`no`, `on`/`off`, `true`/`false`, or `1`/`0`
|
||||
//! - `columns` — number of columns
|
||||
//! - `schema` — optional custom SQL `CREATE TABLE` schema
|
||||
use std::fs::File;
|
||||
@@ -398,7 +398,7 @@ mod tests {
|
||||
|
||||
fn write_csv(content: &str) -> NamedTempFile {
|
||||
let mut tmp = NamedTempFile::new().expect("Failed to create temp file");
|
||||
write!(tmp, "{}", content).unwrap();
|
||||
write!(tmp, "{content}").unwrap();
|
||||
tmp
|
||||
}
|
||||
|
||||
@@ -681,19 +681,19 @@ mod tests {
|
||||
for &val in &true_values {
|
||||
let result = try_new_table(vec![
|
||||
"data=id,name\n1,Alice\n2,Bob\n",
|
||||
&format!("header={}", val),
|
||||
&format!("header={val}"),
|
||||
]);
|
||||
assert!(result.is_ok(), "Expected Ok for header='{}'", val);
|
||||
assert!(result.unwrap().1.header, "Expected true for '{}'", val);
|
||||
assert!(result.is_ok(), "Expected Ok for header='{val}'");
|
||||
assert!(result.unwrap().1.header, "Expected true for '{val}'");
|
||||
}
|
||||
|
||||
for &val in &false_values {
|
||||
let result = try_new_table(vec![
|
||||
"data=id,name\n1,Alice\n2,Bob\n",
|
||||
&format!("header={}", val),
|
||||
&format!("header={val}"),
|
||||
]);
|
||||
assert!(result.is_ok(), "Expected Ok for header='{}'", val);
|
||||
assert!(!result.unwrap().1.header, "Expected false for '{}'", val);
|
||||
assert!(result.is_ok(), "Expected Ok for header='{val}'");
|
||||
assert!(!result.unwrap().1.header, "Expected false for '{val}'");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -704,7 +704,7 @@ mod tests {
|
||||
for &val in &invalid_values {
|
||||
let result = try_new_table(vec![
|
||||
"data=id,name\n1,Alice\n2,Bob\n",
|
||||
&format!("header={}", val),
|
||||
&format!("header={val}"),
|
||||
]);
|
||||
assert!(matches!(result, Err(ResultCode::InvalidArgs)));
|
||||
}
|
||||
@@ -747,13 +747,10 @@ mod tests {
|
||||
let quotes = ["'", "\""];
|
||||
|
||||
for "e in "es {
|
||||
let table = new_table(vec![&format!(
|
||||
"data={}aa{}{}bb{}",
|
||||
quote, quote, quote, quote
|
||||
)]);
|
||||
let table = new_table(vec![&format!("data={quote}aa{quote}{quote}bb{quote}")]);
|
||||
let cursor = table.open(None).unwrap();
|
||||
let rows = read_rows(cursor, 1);
|
||||
assert_eq!(rows, vec![vec![cell!(format!("aa{}bb", quote))]]);
|
||||
assert_eq!(rows, vec![vec![cell!(format!("aa{quote}bb"))]]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -763,13 +760,10 @@ mod tests {
|
||||
|
||||
for &case in &cases {
|
||||
let (outer, inner) = case;
|
||||
let table = new_table(vec![&format!(
|
||||
"data={}aa{}{}bb{}",
|
||||
outer, inner, inner, outer
|
||||
)]);
|
||||
let table = new_table(vec![&format!("data={outer}aa{inner}{inner}bb{outer}")]);
|
||||
let cursor = table.open(None).unwrap();
|
||||
let rows = read_rows(cursor, 1);
|
||||
assert_eq!(rows, vec![vec![cell!(format!("aa{}{}bb", inner, inner))]]);
|
||||
assert_eq!(rows, vec![vec![cell!(format!("aa{inner}{inner}bb"))]]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -786,7 +780,7 @@ mod tests {
|
||||
for &val in &invalid_values {
|
||||
let result = try_new_table(vec![
|
||||
"data=id,name\n1,Alice\n2,Bob\n",
|
||||
&format!("columns={}", val),
|
||||
&format!("columns={val}"),
|
||||
]);
|
||||
assert!(matches!(result, Err(ResultCode::InvalidArgs)));
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ impl VTabCursor for KVStoreCursor {
|
||||
.first()
|
||||
.and_then(|v| v.to_text())
|
||||
.map(|s| s.to_string());
|
||||
log::debug!("idx_str found: key_eq\n value: {:?}", key);
|
||||
log::debug!("idx_str found: key_eq\n value: {key:?}");
|
||||
if let Some(key) = key {
|
||||
let rowid = hash_key(&key);
|
||||
let store = GLOBAL_STORE.lock().unwrap();
|
||||
@@ -251,7 +251,7 @@ impl VfsExtension for TestFS {
|
||||
type File = TestFile;
|
||||
fn open_file(&self, path: &str, flags: i32, _direct: bool) -> ExtResult<Self::File> {
|
||||
let _ = env_logger::try_init();
|
||||
log::debug!("opening file with testing VFS: {} flags: {}", path, flags);
|
||||
log::debug!("opening file with testing VFS: {path} flags: {flags}");
|
||||
let file = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
@@ -372,7 +372,7 @@ impl VTabCursor for StatsCursor {
|
||||
master.close();
|
||||
for tbl in tables {
|
||||
// count rows for each table
|
||||
if let Ok(mut count_stmt) = conn.prepare(&format!("SELECT COUNT(*) FROM {};", tbl)) {
|
||||
if let Ok(mut count_stmt) = conn.prepare(&format!("SELECT COUNT(*) FROM {tbl};")) {
|
||||
let count = match count_stmt.step() {
|
||||
StepResult::Row => count_stmt.get_row()[0].to_integer().unwrap_or(0),
|
||||
_ => 0,
|
||||
|
||||
@@ -23,8 +23,7 @@ pub fn register_extension(input: TokenStream) -> TokenStream {
|
||||
} = input_ast;
|
||||
|
||||
let scalar_calls = scalars.iter().map(|scalar_ident| {
|
||||
let register_fn =
|
||||
syn::Ident::new(&format!("register_{}", scalar_ident), scalar_ident.span());
|
||||
let register_fn = syn::Ident::new(&format!("register_{scalar_ident}"), scalar_ident.span());
|
||||
quote! {
|
||||
{
|
||||
let result = unsafe { #register_fn(api)};
|
||||
@@ -36,7 +35,7 @@ pub fn register_extension(input: TokenStream) -> TokenStream {
|
||||
});
|
||||
|
||||
let aggregate_calls = aggregates.iter().map(|agg_ident| {
|
||||
let register_fn = syn::Ident::new(&format!("register_{}", agg_ident), agg_ident.span());
|
||||
let register_fn = syn::Ident::new(&format!("register_{agg_ident}"), agg_ident.span());
|
||||
quote! {
|
||||
{
|
||||
let result = unsafe{ #agg_ident::#register_fn(api)};
|
||||
@@ -47,7 +46,7 @@ pub fn register_extension(input: TokenStream) -> TokenStream {
|
||||
}
|
||||
});
|
||||
let vtab_calls = vtabs.iter().map(|vtab_ident| {
|
||||
let register_fn = syn::Ident::new(&format!("register_{}", vtab_ident), vtab_ident.span());
|
||||
let register_fn = syn::Ident::new(&format!("register_{vtab_ident}"), vtab_ident.span());
|
||||
quote! {
|
||||
{
|
||||
let result = unsafe{ #vtab_ident::#register_fn(api)};
|
||||
@@ -58,7 +57,7 @@ pub fn register_extension(input: TokenStream) -> TokenStream {
|
||||
}
|
||||
});
|
||||
let vfs_calls = vfs.iter().map(|vfs_ident| {
|
||||
let register_fn = syn::Ident::new(&format!("register_{}", vfs_ident), vfs_ident.span());
|
||||
let register_fn = syn::Ident::new(&format!("register_{vfs_ident}"), vfs_ident.span());
|
||||
quote! {
|
||||
{
|
||||
let result = unsafe { #register_fn(api) };
|
||||
@@ -70,7 +69,7 @@ pub fn register_extension(input: TokenStream) -> TokenStream {
|
||||
});
|
||||
let static_vfs = vfs.iter().map(|vfs_ident| {
|
||||
let static_register =
|
||||
syn::Ident::new(&format!("register_static_{}", vfs_ident), vfs_ident.span());
|
||||
syn::Ident::new(&format!("register_static_{vfs_ident}"), vfs_ident.span());
|
||||
quote! {
|
||||
{
|
||||
let result = api.add_builtin_vfs(unsafe { #static_register()});
|
||||
|
||||
@@ -87,7 +87,7 @@ fn process_payload(payload_group: Group) -> String {
|
||||
match token {
|
||||
TokenTree::Ident(ident) => {
|
||||
if is_variable_name {
|
||||
variable_name_list.push_str(&format!("{},", ident));
|
||||
variable_name_list.push_str(&format!("{ident},"));
|
||||
}
|
||||
is_variable_name = false;
|
||||
}
|
||||
@@ -99,7 +99,7 @@ fn process_payload(payload_group: Group) -> String {
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
format!("{{ {} }}", variable_name_list).to_string()
|
||||
format!("{{ {variable_name_list} }}").to_string()
|
||||
}
|
||||
/// Generates the `get_description` implementation for the processed enum.
|
||||
fn generate_get_description(
|
||||
@@ -112,25 +112,21 @@ fn generate_get_description(
|
||||
let payload = payload.unwrap_or("".to_string());
|
||||
let desc;
|
||||
if let Some(description) = variant_description_map.get(&variant) {
|
||||
desc = format!("Some({})", description);
|
||||
desc = format!("Some({description})");
|
||||
} else {
|
||||
desc = "None".to_string();
|
||||
}
|
||||
all_enum_arms.push_str(&format!(
|
||||
"{}::{} {} => {},\n",
|
||||
enum_name, variant, payload, desc
|
||||
));
|
||||
all_enum_arms.push_str(&format!("{enum_name}::{variant} {payload} => {desc},\n"));
|
||||
}
|
||||
|
||||
let enum_impl = format!(
|
||||
"impl {} {{
|
||||
"impl {enum_name} {{
|
||||
pub fn get_description(&self) -> Option<&str> {{
|
||||
match self {{
|
||||
{}
|
||||
{all_enum_arms}
|
||||
}}
|
||||
}}
|
||||
}}",
|
||||
enum_name, all_enum_arms
|
||||
}}"
|
||||
);
|
||||
enum_impl.parse().unwrap()
|
||||
}
|
||||
|
||||
@@ -475,7 +475,9 @@ impl Interaction {
|
||||
StepResult::Done => {
|
||||
break;
|
||||
}
|
||||
StepResult::Busy => {}
|
||||
StepResult::Busy => {
|
||||
return Err(turso_core::LimboError::Busy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -608,7 +610,10 @@ impl Interaction {
|
||||
StepResult::Done => {
|
||||
break;
|
||||
}
|
||||
StepResult::Interrupt | StepResult::Busy => {}
|
||||
StepResult::Busy => {
|
||||
return Err(turso_core::LimboError::Busy);
|
||||
}
|
||||
StepResult::Interrupt => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -675,7 +680,10 @@ impl Interaction {
|
||||
StepResult::Done => {
|
||||
break;
|
||||
}
|
||||
StepResult::Interrupt | StepResult::Busy => {}
|
||||
StepResult::Busy => {
|
||||
return Err(turso_core::LimboError::Busy);
|
||||
}
|
||||
StepResult::Interrupt => {}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ use runner::{differential, watch};
|
||||
use std::any::Any;
|
||||
use std::backtrace::Backtrace;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use std::io::{IsTerminal, Write};
|
||||
use std::sync::{mpsc, Arc, Mutex};
|
||||
use tracing_subscriber::field::MakeExt;
|
||||
use tracing_subscriber::fmt::format;
|
||||
@@ -604,7 +604,6 @@ fn run_simulation_default(
|
||||
result
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
fn init_logger() {
|
||||
let file = OpenOptions::new()
|
||||
.create(true)
|
||||
@@ -612,16 +611,20 @@ fn init_logger() {
|
||||
.truncate(true)
|
||||
.open("simulator.log")
|
||||
.unwrap();
|
||||
|
||||
let requires_ansi = std::io::stdout().is_terminal();
|
||||
|
||||
let _ = tracing_subscriber::registry()
|
||||
.with(
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_ansi(true)
|
||||
.with_ansi(requires_ansi)
|
||||
.with_line_number(true)
|
||||
.without_time()
|
||||
.with_thread_ids(false),
|
||||
)
|
||||
.with(EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")))
|
||||
.with(
|
||||
#[allow(deprecated)]
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_writer(file)
|
||||
.with_ansi(false)
|
||||
|
||||
@@ -267,7 +267,9 @@ fn limbo_integrity_check(conn: &Arc<Connection>) -> Result<()> {
|
||||
StepResult::Done => {
|
||||
break;
|
||||
}
|
||||
StepResult::Busy => {}
|
||||
StepResult::Busy => {
|
||||
return Err(LimboError::Busy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -370,7 +370,7 @@ mod tests {
|
||||
let mut stmt = ptr::null_mut();
|
||||
for i in 1..2000 {
|
||||
let sql =
|
||||
std::ffi::CString::new(format!("INSERT INTO test (id) VALUES ({})", i))
|
||||
std::ffi::CString::new(format!("INSERT INTO test (id) VALUES ({i})"))
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
sqlite3_prepare_v2(db, sql.as_ptr(), -1, &mut stmt, ptr::null_mut()),
|
||||
@@ -473,7 +473,7 @@ mod tests {
|
||||
let mut stmt = ptr::null_mut();
|
||||
for i in 1..=2000 {
|
||||
let sql =
|
||||
std::ffi::CString::new(format!("INSERT INTO test (id) VALUES ({})", i))
|
||||
std::ffi::CString::new(format!("INSERT INTO test (id) VALUES ({i})"))
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
sqlite3_prepare_v2(db, sql.as_ptr(), -1, &mut stmt, ptr::null_mut()),
|
||||
|
||||
@@ -349,7 +349,7 @@ fn generate_plan(opts: &Opts) -> Result<Plan, Box<dyn std::error::Error + Send +
|
||||
writeln!(log_file, "{}", opts.nr_iterations)?;
|
||||
writeln!(log_file, "{}", ddl_statements.len())?;
|
||||
for stmt in &ddl_statements {
|
||||
writeln!(log_file, "{}", stmt)?;
|
||||
writeln!(log_file, "{stmt}")?;
|
||||
}
|
||||
}
|
||||
plan.ddl_statements = ddl_statements;
|
||||
@@ -373,7 +373,7 @@ fn generate_plan(opts: &Opts) -> Result<Plan, Box<dyn std::error::Error + Send +
|
||||
}
|
||||
let sql = generate_random_statement(&schema);
|
||||
if !opts.skip_log {
|
||||
writeln!(log_file, "{}", sql)?;
|
||||
writeln!(log_file, "{sql}")?;
|
||||
}
|
||||
queries.push(sql);
|
||||
if tx.is_some() {
|
||||
@@ -443,7 +443,7 @@ pub fn init_tracing() -> Result<WorkerGuard, std::io::Error> {
|
||||
.with(EnvFilter::from_default_env())
|
||||
.try_init()
|
||||
{
|
||||
println!("Unable to setup tracing appender: {:?}", e);
|
||||
println!("Unable to setup tracing appender: {e:?}");
|
||||
}
|
||||
Ok(guard)
|
||||
}
|
||||
@@ -486,7 +486,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Apply each DDL statement individually
|
||||
for stmt in &plan.ddl_statements {
|
||||
if opts.verbose {
|
||||
println!("executing ddl {}", stmt);
|
||||
println!("executing ddl {stmt}");
|
||||
}
|
||||
if let Err(e) = conn.execute(stmt, ()).await {
|
||||
match e {
|
||||
@@ -494,7 +494,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
if e.contains("Corrupt database") {
|
||||
panic!("Error creating table: {}", e);
|
||||
} else {
|
||||
println!("Error creating table: {}", e);
|
||||
println!("Error creating table: {e}");
|
||||
}
|
||||
}
|
||||
_ => panic!("Error creating table: {}", e),
|
||||
@@ -528,7 +528,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let sql = &plan.queries_per_thread[thread][query_index];
|
||||
if !opts.silent {
|
||||
if opts.verbose {
|
||||
println!("executing query {}", sql);
|
||||
println!("executing query {sql}");
|
||||
} else if query_index % 100 == 0 {
|
||||
print!(
|
||||
"\r{:.2} %",
|
||||
@@ -544,10 +544,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
panic!("Error executing query: {}", e);
|
||||
} else if e.contains("UNIQUE constraint failed") {
|
||||
if opts.verbose {
|
||||
println!("Skipping UNIQUE constraint violation: {}", e);
|
||||
println!("Skipping UNIQUE constraint violation: {e}");
|
||||
}
|
||||
} else if opts.verbose {
|
||||
println!("Error executing query: {}", e);
|
||||
println!("Error executing query: {e}");
|
||||
}
|
||||
}
|
||||
_ => panic!("Error executing query: {}", e),
|
||||
@@ -575,6 +575,6 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
handle.await??;
|
||||
}
|
||||
println!("Done. SQL statements written to {}", opts.log_file);
|
||||
println!("Database file: {}", db_file);
|
||||
println!("Database file: {db_file}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -55,6 +55,19 @@ do_execsql_test select-min {
|
||||
SELECT min(age) FROM users;
|
||||
} {1}
|
||||
|
||||
do_execsql_test_on_specific_db {:memory:} min-null-regression-test {
|
||||
CREATE TABLE t(a);
|
||||
INSERT INTO t VALUES ('abc'), (NULL);
|
||||
SELECT min(a) FROM t;
|
||||
} {abc}
|
||||
|
||||
do_execsql_test_on_specific_db {:memory:} max-null-regression-test {
|
||||
CREATE TABLE t(a);
|
||||
INSERT INTO t VALUES ('abc'), (NULL);
|
||||
SELECT max(a) FROM t;
|
||||
} {abc}
|
||||
|
||||
|
||||
do_execsql_test select-max-text {
|
||||
SELECT max(first_name) FROM users;
|
||||
} {Zoe}
|
||||
|
||||
@@ -172,7 +172,7 @@ pub(crate) fn sqlite_exec_rows(
|
||||
let column: rusqlite::types::Value = match row.get(i) {
|
||||
Ok(column) => column,
|
||||
Err(rusqlite::Error::InvalidColumnIndex(_)) => break,
|
||||
Err(err) => panic!("unexpected rusqlite error: {}", err),
|
||||
Err(err) => panic!("unexpected rusqlite error: {err}"),
|
||||
};
|
||||
result.push(column);
|
||||
}
|
||||
@@ -201,8 +201,9 @@ pub(crate) fn limbo_exec_rows(
|
||||
stmt.run_once().unwrap();
|
||||
continue;
|
||||
}
|
||||
|
||||
turso_core::StepResult::Done => break 'outer,
|
||||
r => panic!("unexpected result {:?}: expecting single row", r),
|
||||
r => panic!("unexpected result {r:?}: expecting single row"),
|
||||
}
|
||||
};
|
||||
let row = row
|
||||
@@ -234,7 +235,7 @@ pub(crate) fn limbo_exec_rows_error(
|
||||
continue;
|
||||
}
|
||||
turso_core::StepResult::Done => return Ok(()),
|
||||
r => panic!("unexpected result {:?}: expecting single row", r),
|
||||
r => panic!("unexpected result {r:?}: expecting single row"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -297,7 +298,7 @@ mod tests {
|
||||
);
|
||||
let conn = db.connect_limbo();
|
||||
let ret = limbo_exec_rows(&db, &conn, "CREATE table t(a)");
|
||||
assert!(ret.is_empty(), "{:?}", ret);
|
||||
assert!(ret.is_empty(), "{ret:?}");
|
||||
limbo_exec_rows(&db, &conn, "INSERT INTO t values (1)");
|
||||
conn.close().unwrap()
|
||||
}
|
||||
@@ -313,7 +314,7 @@ mod tests {
|
||||
assert_eq!(ret, vec![vec![Value::Integer(1)]]);
|
||||
|
||||
let err = limbo_exec_rows_error(&db, &conn, "INSERT INTO t values (1)").unwrap_err();
|
||||
assert!(matches!(err, turso_core::LimboError::ReadOnly), "{:?}", err);
|
||||
assert!(matches!(err, turso_core::LimboError::ReadOnly), "{err:?}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -338,8 +339,8 @@ mod tests {
|
||||
}
|
||||
i += 1;
|
||||
expected.push(val);
|
||||
let ret = limbo_exec_rows(&db, &conn, &format!("INSERT INTO t VALUES ({})", val));
|
||||
assert!(ret.is_empty(), "Insert failed for value {}: {:?}", val, ret);
|
||||
let ret = limbo_exec_rows(&db, &conn, &format!("INSERT INTO t VALUES ({val})"));
|
||||
assert!(ret.is_empty(), "Insert failed for value {val}: {ret:?}");
|
||||
}
|
||||
|
||||
// Sort expected values to match index order
|
||||
@@ -370,9 +371,9 @@ mod tests {
|
||||
|
||||
// Insert 11 unique 1MB blobs
|
||||
for i in 0..11 {
|
||||
println!("Inserting blob #{}", i);
|
||||
println!("Inserting blob #{i}");
|
||||
let ret = limbo_exec_rows(&db, &conn, "INSERT INTO t VALUES (randomblob(1024*1024))");
|
||||
assert!(ret.is_empty(), "Insert #{} failed: {:?}", i, ret);
|
||||
assert!(ret.is_empty(), "Insert #{i} failed: {ret:?}");
|
||||
}
|
||||
|
||||
// Verify we have 11 rows
|
||||
@@ -380,8 +381,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
ret,
|
||||
vec![vec![Value::Integer(11)]],
|
||||
"Expected 11 rows but got {:?}",
|
||||
ret
|
||||
"Expected 11 rows but got {ret:?}",
|
||||
);
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -57,7 +57,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
|
||||
}
|
||||
},
|
||||
Ok(None) => {}
|
||||
Err(err) => eprintln!("{}", err),
|
||||
Err(err) => eprintln!("{err}"),
|
||||
};
|
||||
|
||||
// Check last_insert_rowid after explicit id
|
||||
@@ -80,7 +80,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
|
||||
}
|
||||
},
|
||||
Ok(None) => {}
|
||||
Err(err) => eprintln!("{}", err),
|
||||
Err(err) => eprintln!("{err}"),
|
||||
};
|
||||
assert_eq!(last_id, 5, "Explicit insert should have rowid 5");
|
||||
do_flush(&conn, &tmp_db)?;
|
||||
|
||||
@@ -36,8 +36,7 @@ mod tests {
|
||||
let sqlite_result = sqlite_exec_rows(&sqlite_conn, offending_query);
|
||||
assert_eq!(
|
||||
limbo_result, sqlite_result,
|
||||
"query: {}, limbo: {:?}, sqlite: {:?}",
|
||||
offending_query, limbo_result, sqlite_result
|
||||
"query: {offending_query}, limbo: {limbo_result:?}, sqlite: {sqlite_result:?}"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -55,8 +54,7 @@ mod tests {
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, query);
|
||||
assert_eq!(
|
||||
limbo, sqlite,
|
||||
"query: {}, limbo: {:?}, sqlite: {:?}",
|
||||
query, limbo, sqlite
|
||||
"query: {query}, limbo: {limbo:?}, sqlite: {sqlite:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -69,7 +67,7 @@ mod tests {
|
||||
let insert = format!(
|
||||
"INSERT INTO t VALUES {}",
|
||||
(1..100)
|
||||
.map(|x| format!("({})", x))
|
||||
.map(|x| format!("({x})"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
);
|
||||
@@ -104,13 +102,12 @@ mod tests {
|
||||
order_by.unwrap_or("")
|
||||
);
|
||||
|
||||
log::trace!("query: {}", query);
|
||||
log::trace!("query: {query}");
|
||||
let limbo_result = limbo_exec_rows(&db, &limbo_conn, &query);
|
||||
let sqlite_result = sqlite_exec_rows(&sqlite_conn, &query);
|
||||
assert_eq!(
|
||||
limbo_result, sqlite_result,
|
||||
"query: {}, limbo: {:?}, sqlite: {:?}, seed: {}",
|
||||
query, limbo_result, sqlite_result, seed
|
||||
"query: {query}, limbo: {limbo_result:?}, sqlite: {sqlite_result:?}, seed: {seed}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -189,7 +186,7 @@ mod tests {
|
||||
let insert = format!(
|
||||
"INSERT INTO t VALUES {}",
|
||||
(0..10000)
|
||||
.map(|x| format!("({})", x))
|
||||
.map(|x| format!("({x})"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
);
|
||||
@@ -220,8 +217,7 @@ mod tests {
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, &query);
|
||||
assert_eq!(
|
||||
limbo, sqlite,
|
||||
"query: {}, limbo: {:?}, sqlite: {:?}",
|
||||
query, limbo, sqlite
|
||||
"query: {query}, limbo: {limbo:?}, sqlite: {sqlite:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -409,9 +405,9 @@ mod tests {
|
||||
|
||||
// Generate ORDER BY string
|
||||
let order_by_components = vec![
|
||||
order_by1.map(|x| format!("x {}", x)),
|
||||
order_by2.map(|x| format!("y {}", x)),
|
||||
order_by3.map(|x| format!("z {}", x)),
|
||||
order_by1.map(|x| format!("x {x}")),
|
||||
order_by2.map(|x| format!("y {x}")),
|
||||
order_by3.map(|x| format!("z {x}")),
|
||||
]
|
||||
.into_iter()
|
||||
.flatten()
|
||||
@@ -430,7 +426,7 @@ mod tests {
|
||||
order_by,
|
||||
limit
|
||||
);
|
||||
log::debug!("query: {}", query);
|
||||
log::debug!("query: {query}");
|
||||
|
||||
// Execute the query on all databases and compare the results
|
||||
for (i, sqlite_conn) in sqlite_conns.iter().enumerate() {
|
||||
@@ -445,11 +441,11 @@ mod tests {
|
||||
let order_by_only_equalities = !order_by_components.is_empty()
|
||||
&& order_by_components.iter().all(|o: &String| {
|
||||
if o.starts_with("x ") {
|
||||
comp1.map_or(false, |c| c == "=")
|
||||
comp1 == Some("=")
|
||||
} else if o.starts_with("y ") {
|
||||
comp2.map_or(false, |c| c == "=")
|
||||
comp2 == Some("=")
|
||||
} else {
|
||||
comp3.map_or(false, |c| c == "=")
|
||||
comp3 == Some("=")
|
||||
}
|
||||
});
|
||||
|
||||
@@ -511,7 +507,7 @@ mod tests {
|
||||
pub fn compound_select_fuzz() {
|
||||
let _ = env_logger::try_init();
|
||||
let (mut rng, seed) = rng_from_time();
|
||||
log::info!("compound_select_fuzz seed: {}", seed);
|
||||
log::info!("compound_select_fuzz seed: {seed}");
|
||||
|
||||
// Constants for fuzzing parameters
|
||||
const MAX_TABLES: usize = 7;
|
||||
@@ -532,12 +528,12 @@ mod tests {
|
||||
|
||||
const COLS: [&str; 3] = ["c1", "c2", "c3"];
|
||||
for i in 0..num_tables {
|
||||
let table_name = format!("t{}", i);
|
||||
let table_name = format!("t{i}");
|
||||
let create_table_sql = format!(
|
||||
"CREATE TABLE {} ({})",
|
||||
table_name,
|
||||
COLS.iter()
|
||||
.map(|c| format!("{} INTEGER", c))
|
||||
.map(|c| format!("{c} INTEGER"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
);
|
||||
@@ -551,10 +547,8 @@ mod tests {
|
||||
let c2_val: i64 = rng.random_range(-3..3);
|
||||
let c3_val: i64 = rng.random_range(-3..3);
|
||||
|
||||
let insert_sql = format!(
|
||||
"INSERT INTO {} VALUES ({}, {}, {})",
|
||||
table_name, c1_val, c2_val, c3_val
|
||||
);
|
||||
let insert_sql =
|
||||
format!("INSERT INTO {table_name} VALUES ({c1_val}, {c2_val}, {c3_val})",);
|
||||
limbo_exec_rows(&db, &limbo_conn, &insert_sql);
|
||||
sqlite_exec_rows(&sqlite_conn, &insert_sql);
|
||||
}
|
||||
@@ -597,7 +591,7 @@ mod tests {
|
||||
|
||||
if rng.random_bool(0.8) {
|
||||
let limit_val = rng.random_range(0..=MAX_LIMIT_VALUE); // LIMIT 0 is valid
|
||||
query = format!("{} LIMIT {}", query, limit_val);
|
||||
query = format!("{query} LIMIT {limit_val}");
|
||||
}
|
||||
|
||||
log::debug!(
|
||||
@@ -673,15 +667,14 @@ mod tests {
|
||||
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
|
||||
|
||||
let (mut rng, seed) = rng_from_time();
|
||||
log::info!("seed: {}", seed);
|
||||
log::info!("seed: {seed}");
|
||||
for _ in 0..1024 {
|
||||
let query = g.generate(&mut rng, sql, 50);
|
||||
let limbo = limbo_exec_rows(&db, &limbo_conn, &query);
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, &query);
|
||||
assert_eq!(
|
||||
limbo, sqlite,
|
||||
"query: {}, limbo: {:?}, sqlite: {:?} seed: {}",
|
||||
query, limbo, sqlite, seed
|
||||
"query: {query}, limbo: {limbo:?}, sqlite: {sqlite:?} seed: {seed}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -708,8 +701,7 @@ mod tests {
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, query);
|
||||
assert_eq!(
|
||||
limbo, sqlite,
|
||||
"query: {}, limbo: {:?}, sqlite: {:?}",
|
||||
query, limbo, sqlite
|
||||
"query: {query}, limbo: {limbo:?}, sqlite: {sqlite:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -794,10 +786,10 @@ mod tests {
|
||||
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
|
||||
|
||||
let (mut rng, seed) = rng_from_time();
|
||||
log::info!("seed: {}", seed);
|
||||
log::info!("seed: {seed}");
|
||||
for _ in 0..1024 {
|
||||
let query = g.generate(&mut rng, sql, 50);
|
||||
log::info!("query: {}", query);
|
||||
log::info!("query: {query}");
|
||||
let limbo = limbo_exec_rows(&db, &limbo_conn, &query);
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, &query);
|
||||
match (&limbo[0][0], &sqlite[0][0]) {
|
||||
@@ -808,11 +800,7 @@ mod tests {
|
||||
assert!(
|
||||
(limbo - sqlite).abs() < 1e-9
|
||||
|| (limbo - sqlite) / (limbo.abs().max(sqlite.abs())) < 1e-9,
|
||||
"query: {}, limbo: {:?}, sqlite: {:?} seed: {}",
|
||||
query,
|
||||
limbo,
|
||||
sqlite,
|
||||
seed
|
||||
"query: {query}, limbo: {limbo:?}, sqlite: {sqlite:?} seed: {seed}"
|
||||
)
|
||||
}
|
||||
_ => {}
|
||||
@@ -958,16 +946,15 @@ mod tests {
|
||||
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
|
||||
|
||||
let (mut rng, seed) = rng_from_time();
|
||||
log::info!("seed: {}", seed);
|
||||
log::info!("seed: {seed}");
|
||||
for _ in 0..1024 {
|
||||
let query = g.generate(&mut rng, sql, 50);
|
||||
log::info!("query: {}", query);
|
||||
log::info!("query: {query}");
|
||||
let limbo = limbo_exec_rows(&db, &limbo_conn, &query);
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, &query);
|
||||
assert_eq!(
|
||||
limbo, sqlite,
|
||||
"query: {}, limbo: {:?}, sqlite: {:?} seed: {}",
|
||||
query, limbo, sqlite, seed
|
||||
"query: {query}, limbo: {limbo:?}, sqlite: {sqlite:?} seed: {seed}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1328,16 +1315,15 @@ mod tests {
|
||||
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
|
||||
|
||||
let (mut rng, seed) = rng_from_time();
|
||||
log::info!("seed: {}", seed);
|
||||
log::info!("seed: {seed}");
|
||||
for _ in 0..1024 {
|
||||
let query = g.generate(&mut rng, sql, 50);
|
||||
log::info!("query: {}", query);
|
||||
log::info!("query: {query}");
|
||||
let limbo = limbo_exec_rows(&db, &limbo_conn, &query);
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, &query);
|
||||
assert_eq!(
|
||||
limbo, sqlite,
|
||||
"query: {}, limbo: {:?}, sqlite: {:?} seed: {}",
|
||||
query, limbo, sqlite, seed
|
||||
"query: {query}, limbo: {limbo:?}, sqlite: {sqlite:?} seed: {seed}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1366,8 +1352,66 @@ mod tests {
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, query);
|
||||
assert_eq!(
|
||||
limbo, sqlite,
|
||||
"queries: {:?}, query: {}, limbo: {:?}, sqlite: {:?}",
|
||||
queries, query, limbo, sqlite
|
||||
"queries: {queries:?}, query: {query}, limbo: {limbo:?}, sqlite: {sqlite:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
/// Ignored because of https://github.com/tursodatabase/turso/issues/2040, https://github.com/tursodatabase/turso/issues/2041
|
||||
/// TODO: add fuzzing for other aggregate functions
|
||||
pub fn min_max_agg_fuzz() {
|
||||
let _ = env_logger::try_init();
|
||||
|
||||
let datatypes = ["INTEGER", "TEXT", "REAL", "BLOB"];
|
||||
let (mut rng, seed) = rng_from_time();
|
||||
log::info!("seed: {seed}");
|
||||
|
||||
for _ in 0..1000 {
|
||||
// Create table with random datatype
|
||||
let datatype = datatypes[rng.random_range(0..datatypes.len())];
|
||||
let create_table = format!("CREATE TABLE t(x {datatype})");
|
||||
|
||||
let db = TempDatabase::new_empty(false);
|
||||
let limbo_conn = db.connect_limbo();
|
||||
let sqlite_conn = rusqlite::Connection::open_in_memory().unwrap();
|
||||
|
||||
limbo_exec_rows(&db, &limbo_conn, &create_table);
|
||||
sqlite_exec_rows(&sqlite_conn, &create_table);
|
||||
|
||||
// Insert 5 random values of random types
|
||||
let mut values = Vec::new();
|
||||
for _ in 0..5 {
|
||||
let value = match rng.random_range(0..4) {
|
||||
0 => rng.random_range(-1000..1000).to_string(), // Integer
|
||||
1 => format!(
|
||||
"'{}'",
|
||||
(0..10)
|
||||
.map(|_| rng.random_range(b'a'..=b'z') as char)
|
||||
.collect::<String>()
|
||||
), // Text
|
||||
2 => format!("{:.2}", rng.random_range(-100..100) as f64 / 10.0), // Real
|
||||
3 => "NULL".to_string(), // NULL
|
||||
_ => unreachable!(),
|
||||
};
|
||||
values.push(format!("({value})"));
|
||||
}
|
||||
|
||||
let insert = format!("INSERT INTO t VALUES {}", values.join(","));
|
||||
limbo_exec_rows(&db, &limbo_conn, &insert);
|
||||
sqlite_exec_rows(&sqlite_conn, &insert);
|
||||
|
||||
// Test min and max
|
||||
for agg in ["min(x)", "max(x)"] {
|
||||
let query = format!("SELECT {agg} FROM t");
|
||||
let limbo = limbo_exec_rows(&db, &limbo_conn, &query);
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, &query);
|
||||
|
||||
assert_eq!(
|
||||
limbo, sqlite,
|
||||
"query: {query}, limbo: {limbo:?}, sqlite: {sqlite:?}, seed: {seed}, values: {values:?}, schema: {create_table}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1405,13 +1449,12 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
limbo, sqlite,
|
||||
"query: {}, limbo: {:?}, sqlite: {:?}",
|
||||
query, limbo, sqlite
|
||||
"query: {query}, limbo: {limbo:?}, sqlite: {sqlite:?}",
|
||||
);
|
||||
}
|
||||
|
||||
let (mut rng, seed) = rng_from_time();
|
||||
log::info!("seed: {}", seed);
|
||||
log::info!("seed: {seed}");
|
||||
|
||||
let mut i = 0;
|
||||
let mut primary_key_set = HashSet::with_capacity(100);
|
||||
@@ -1425,14 +1468,13 @@ mod tests {
|
||||
g.generate(&mut rng, builders.number, 1),
|
||||
g.generate(&mut rng, builders.number, 1),
|
||||
);
|
||||
let query = format!("INSERT INTO t VALUES ({}, {}, {})", x, y, z);
|
||||
log::info!("insert: {}", query);
|
||||
let query = format!("INSERT INTO t VALUES ({x}, {y}, {z})");
|
||||
log::info!("insert: {query}");
|
||||
dbg!(&query);
|
||||
assert_eq!(
|
||||
limbo_exec_rows(&db, &limbo_conn, &query),
|
||||
sqlite_exec_rows(&sqlite_conn, &query),
|
||||
"seed: {}",
|
||||
seed,
|
||||
"seed: {seed}",
|
||||
);
|
||||
i += 1;
|
||||
}
|
||||
@@ -1440,7 +1482,7 @@ mod tests {
|
||||
let query = "SELECT COUNT(*) FROM t".to_string();
|
||||
let limbo = limbo_exec_rows(&db, &limbo_conn, &query);
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, &query);
|
||||
assert_eq!(limbo, sqlite, "seed: {}", seed);
|
||||
assert_eq!(limbo, sqlite, "seed: {seed}");
|
||||
|
||||
let sql = g
|
||||
.create()
|
||||
@@ -1451,7 +1493,7 @@ mod tests {
|
||||
|
||||
for _ in 0..1024 {
|
||||
let query = g.generate(&mut rng, sql, 50);
|
||||
log::info!("query: {}", query);
|
||||
log::info!("query: {query}");
|
||||
let limbo = limbo_exec_rows(&db, &limbo_conn, &query);
|
||||
let sqlite = sqlite_exec_rows(&sqlite_conn, &query);
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ fn test_simple_overflow_page() -> anyhow::Result<()> {
|
||||
},
|
||||
Ok(None) => {}
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
eprintln!("{err}");
|
||||
}
|
||||
};
|
||||
|
||||
@@ -77,7 +77,7 @@ fn test_simple_overflow_page() -> anyhow::Result<()> {
|
||||
},
|
||||
Ok(None) => {}
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
eprintln!("{err}");
|
||||
}
|
||||
}
|
||||
do_flush(&conn, &tmp_db)?;
|
||||
@@ -118,7 +118,7 @@ fn test_sequential_overflow_page() -> anyhow::Result<()> {
|
||||
},
|
||||
Ok(None) => {}
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
eprintln!("{err}");
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -147,7 +147,7 @@ fn test_sequential_overflow_page() -> anyhow::Result<()> {
|
||||
},
|
||||
Ok(None) => {}
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
eprintln!("{err}");
|
||||
}
|
||||
}
|
||||
do_flush(&conn, &tmp_db)?;
|
||||
@@ -167,12 +167,12 @@ fn test_sequential_write() -> anyhow::Result<()> {
|
||||
let list_query = "SELECT * FROM test";
|
||||
let max_iterations = 10000;
|
||||
for i in 0..max_iterations {
|
||||
println!("inserting {} ", i);
|
||||
println!("inserting {i} ");
|
||||
if (i % 100) == 0 {
|
||||
let progress = (i as f64 / max_iterations as f64) * 100.0;
|
||||
println!("progress {:.1}%", progress);
|
||||
println!("progress {progress:.1}%");
|
||||
}
|
||||
let insert_query = format!("INSERT INTO test VALUES ({})", i);
|
||||
let insert_query = format!("INSERT INTO test VALUES ({i})");
|
||||
run_query(&tmp_db, &conn, &insert_query)?;
|
||||
|
||||
let mut current_read_index = 0;
|
||||
@@ -283,7 +283,7 @@ fn test_wal_checkpoint() -> anyhow::Result<()> {
|
||||
let conn = tmp_db.connect_limbo();
|
||||
|
||||
for i in 0..iterations {
|
||||
let insert_query = format!("INSERT INTO test VALUES ({})", i);
|
||||
let insert_query = format!("INSERT INTO test VALUES ({i})");
|
||||
do_flush(&conn, &tmp_db)?;
|
||||
conn.checkpoint()?;
|
||||
run_query(&tmp_db, &conn, &insert_query)?;
|
||||
@@ -309,10 +309,10 @@ fn test_wal_restart() -> anyhow::Result<()> {
|
||||
// threshold is 1000 by default
|
||||
|
||||
fn insert(i: usize, conn: &Arc<Connection>, tmp_db: &TempDatabase) -> anyhow::Result<()> {
|
||||
debug!("inserting {}", i);
|
||||
let insert_query = format!("INSERT INTO test VALUES ({})", i);
|
||||
debug!("inserting {i}");
|
||||
let insert_query = format!("INSERT INTO test VALUES ({i})");
|
||||
run_query(tmp_db, conn, &insert_query)?;
|
||||
debug!("inserted {}", i);
|
||||
debug!("inserted {i}");
|
||||
tmp_db.io.run_once()?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -324,7 +324,7 @@ fn test_wal_restart() -> anyhow::Result<()> {
|
||||
run_query_on_row(tmp_db, conn, list_query, |row: &Row| {
|
||||
assert!(count.is_none());
|
||||
count = Some(row.get::<i64>(0).unwrap() as usize);
|
||||
debug!("counted {:?}", count);
|
||||
debug!("counted {count:?}");
|
||||
})?;
|
||||
Ok(count.unwrap())
|
||||
}
|
||||
@@ -372,15 +372,15 @@ fn test_write_delete_with_index() -> anyhow::Result<()> {
|
||||
let list_query = "SELECT * FROM test";
|
||||
let max_iterations = 1000;
|
||||
for i in 0..max_iterations {
|
||||
println!("inserting {} ", i);
|
||||
let insert_query = format!("INSERT INTO test VALUES ({})", i);
|
||||
println!("inserting {i} ");
|
||||
let insert_query = format!("INSERT INTO test VALUES ({i})");
|
||||
run_query(&tmp_db, &conn, &insert_query)?;
|
||||
}
|
||||
for i in 0..max_iterations {
|
||||
println!("deleting {} ", i);
|
||||
let delete_query = format!("delete from test where x={}", i);
|
||||
println!("deleting {i} ");
|
||||
let delete_query = format!("delete from test where x={i}");
|
||||
run_query(&tmp_db, &conn, &delete_query)?;
|
||||
println!("listing after deleting {} ", i);
|
||||
println!("listing after deleting {i} ");
|
||||
let mut current_read_index = i + 1;
|
||||
run_query_on_row(&tmp_db, &conn, list_query, |row: &Row| {
|
||||
let first_value = row.get::<&Value>(0).expect("missing id");
|
||||
@@ -397,7 +397,7 @@ fn test_write_delete_with_index() -> anyhow::Result<()> {
|
||||
run_query_on_row(
|
||||
&tmp_db,
|
||||
&conn,
|
||||
&format!("select * from test where x = {}", i),
|
||||
&format!("select * from test where x = {i}"),
|
||||
|row| {
|
||||
let first_value = row.get::<&Value>(0).expect("missing id");
|
||||
let id = match first_value {
|
||||
@@ -530,8 +530,13 @@ fn check_integrity_is_ok(tmp_db: TempDatabase, conn: Arc<Connection>) -> Result<
|
||||
}
|
||||
|
||||
enum ConnectionState {
|
||||
PrepareQuery { query_idx: usize },
|
||||
ExecuteQuery { query_idx: usize, stmt: Statement },
|
||||
PrepareQuery {
|
||||
query_idx: usize,
|
||||
},
|
||||
ExecuteQuery {
|
||||
query_idx: usize,
|
||||
stmt: Box<Statement>,
|
||||
},
|
||||
Done,
|
||||
}
|
||||
|
||||
@@ -552,7 +557,7 @@ impl ConnectionPlan {
|
||||
}
|
||||
let query = &self.queries[*query_idx];
|
||||
tracing::info!("preparing {}", query);
|
||||
let stmt = self.conn.query(query)?.unwrap();
|
||||
let stmt = Box::new(self.conn.query(query)?.unwrap());
|
||||
self.state = ConnectionState::ExecuteQuery {
|
||||
query_idx: *query_idx,
|
||||
stmt,
|
||||
@@ -723,8 +728,7 @@ fn test_wal_bad_frame() -> anyhow::Result<()> {
|
||||
|
||||
assert!(
|
||||
panic_msg.contains("WAL frame checksum mismatch."),
|
||||
"Expected panic message not found. Got: {}",
|
||||
panic_msg
|
||||
"Expected panic message not found. Got: {panic_msg}"
|
||||
);
|
||||
}
|
||||
Ok(_) => panic!("Expected query to panic, but it succeeded"),
|
||||
@@ -798,7 +802,7 @@ fn run_query_core(
|
||||
},
|
||||
Ok(None) => {}
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
eprintln!("{err}");
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
|
||||
@@ -64,7 +64,7 @@ fn test_wal_1_writer_1_reader() -> Result<()> {
|
||||
let writer_thread = std::thread::spawn(move || {
|
||||
let conn = tmp_db_w.connect().unwrap();
|
||||
for i in 0..ROWS_WRITE {
|
||||
conn.execute(format!("INSERT INTO t values({})", i).as_str())
|
||||
conn.execute(format!("INSERT INTO t values({i})").as_str())
|
||||
.unwrap();
|
||||
let mut rows = rows_.lock().unwrap();
|
||||
*rows += 1;
|
||||
@@ -95,7 +95,7 @@ fn test_wal_1_writer_1_reader() -> Result<()> {
|
||||
},
|
||||
Ok(None) => {}
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
eprintln!("{err}");
|
||||
}
|
||||
}
|
||||
if rows == ROWS_WRITE {
|
||||
|
||||
@@ -78,7 +78,7 @@ fn build_keyword_map(
|
||||
fn write_entry(writer: &mut impl Write, entry: &PathEntry) -> Result<()> {
|
||||
if let Some(result) = entry.result {
|
||||
writeln!(writer, "if idx == buf.len() {{")?;
|
||||
writeln!(writer, "return Some(TokenType::{});", result)?;
|
||||
writeln!(writer, "return Some(TokenType::{result});")?;
|
||||
writeln!(writer, "}}")?;
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ fn build_keyword_map(
|
||||
if b.is_ascii_alphabetic() {
|
||||
writeln!(writer, "{} | {} => {{", b, b.to_ascii_lowercase())?;
|
||||
} else {
|
||||
writeln!(writer, "{} => {{", b)?;
|
||||
writeln!(writer, "{b} => {{")?;
|
||||
}
|
||||
writeln!(writer, "idx += 1;")?;
|
||||
write_entry(writer, sub_entry)?;
|
||||
@@ -110,19 +110,16 @@ fn build_keyword_map(
|
||||
|
||||
writeln!(
|
||||
writer,
|
||||
"pub(crate) const MAX_KEYWORD_LEN: usize = {};",
|
||||
max_len
|
||||
"pub(crate) const MAX_KEYWORD_LEN: usize = {max_len};"
|
||||
)?;
|
||||
writeln!(
|
||||
writer,
|
||||
"pub(crate) const MIN_KEYWORD_LEN: usize = {};",
|
||||
min_len
|
||||
"pub(crate) const MIN_KEYWORD_LEN: usize = {min_len};"
|
||||
)?;
|
||||
writeln!(writer, "/// Check if `word` is a keyword")?;
|
||||
writeln!(
|
||||
writer,
|
||||
"pub fn {}(buf: &[u8]) -> Option<TokenType> {{",
|
||||
func_name
|
||||
"pub fn {func_name}(buf: &[u8]) -> Option<TokenType> {{"
|
||||
)?;
|
||||
writeln!(
|
||||
writer,
|
||||
|
||||
@@ -690,8 +690,7 @@ fn find_end_of_number(
|
||||
if test(&b) {
|
||||
continue;
|
||||
} else if b == b'_' {
|
||||
if j >= 1 && data.get(j - 1).map_or(false, test) && data.get(j + 1).map_or(false, test)
|
||||
{
|
||||
if j >= 1 && data.get(j - 1).is_some_and(test) && data.get(j + 1).is_some_and(test) {
|
||||
continue;
|
||||
}
|
||||
return Err(Error::BadNumber(None, None, Some(j), unsafe {
|
||||
|
||||
@@ -1100,7 +1100,6 @@ impl TryFrom<&[u8]> for JoinType {
|
||||
/// `JOIN` constraint
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
|
||||
pub enum JoinConstraint {
|
||||
/// `ON`
|
||||
On(Expr),
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
trace!(target: TARGET, "incomplete input");
|
||||
self.ctx.error = Some(ParserError::UnexpectedEof);
|
||||
} else {
|
||||
trace!(target: TARGET, "near \"{:?}\": syntax error", yyminor);
|
||||
trace!(target: TARGET, "near \"{yyminor:?}\": syntax error");
|
||||
self.ctx.error = Some(ParserError::SyntaxError(from_bytes(yyminor.1)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -299,7 +299,7 @@ impl Display for ast::Operator {
|
||||
Self::RightShift => ">>",
|
||||
Self::Subtract => "-",
|
||||
};
|
||||
write!(f, "{}", value)
|
||||
write!(f, "{value}")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ impl ToSqlString for ast::ColumnConstraint {
|
||||
// nullable should always be true here
|
||||
format!(
|
||||
"NOT NULL{}",
|
||||
conflict_clause.map_or("".to_string(), |conflict| format!(" {}", conflict))
|
||||
conflict_clause.map_or("".to_string(), |conflict| format!(" {conflict}"))
|
||||
)
|
||||
}
|
||||
Self::PrimaryKey {
|
||||
@@ -104,15 +104,15 @@ impl ToSqlString for ast::ColumnConstraint {
|
||||
} => {
|
||||
format!(
|
||||
"PRIMARY KEY{}{}{}",
|
||||
order.map_or("".to_string(), |order| format!(" {}", order)),
|
||||
conflict_clause.map_or("".to_string(), |conflict| format!(" {}", conflict)),
|
||||
order.map_or("".to_string(), |order| format!(" {order}")),
|
||||
conflict_clause.map_or("".to_string(), |conflict| format!(" {conflict}")),
|
||||
auto_increment.then_some(" AUTOINCREMENT").unwrap_or("")
|
||||
)
|
||||
}
|
||||
Self::Unique(conflict_clause) => {
|
||||
format!(
|
||||
"UNIQUE{}",
|
||||
conflict_clause.map_or("".to_string(), |conflict| format!(" {}", conflict))
|
||||
conflict_clause.map_or("".to_string(), |conflict| format!(" {conflict}"))
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -149,7 +149,7 @@ impl Display for ast::ForeignKeyClause {
|
||||
"".to_string()
|
||||
}
|
||||
);
|
||||
write!(f, "{}", value)
|
||||
write!(f, "{value}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -157,13 +157,13 @@ impl Display for ast::RefArg {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let value = match self {
|
||||
Self::Match(name) => format!("MATCH {}", name.0),
|
||||
Self::OnDelete(act) => format!("ON DELETE {}", act),
|
||||
Self::OnUpdate(act) => format!("ON UPDATE {}", act),
|
||||
Self::OnDelete(act) => format!("ON DELETE {act}"),
|
||||
Self::OnUpdate(act) => format!("ON UPDATE {act}"),
|
||||
Self::OnInsert(..) => unimplemented!(
|
||||
"On Insert does not exist in SQLite: https://www.sqlite.org/lang_altertable.html"
|
||||
),
|
||||
};
|
||||
write!(f, "{}", value)
|
||||
write!(f, "{value}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,7 +176,7 @@ impl Display for ast::RefAct {
|
||||
Self::SetDefault => "SET DEFAULT",
|
||||
Self::SetNull => "SET NULL",
|
||||
};
|
||||
write!(f, "{}", value)
|
||||
write!(f, "{value}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -198,7 +198,7 @@ impl Display for ast::DeferSubclause {
|
||||
""
|
||||
}
|
||||
);
|
||||
write!(f, "{}", value)
|
||||
write!(f, "{value}")
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -82,7 +82,7 @@ impl ToSqlString for ast::TableConstraint {
|
||||
.map(|col| col.to_sql_string(context))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", "),
|
||||
conflict_clause.map_or("".to_string(), |conflict| format!(" {}", conflict)),
|
||||
conflict_clause.map_or("".to_string(), |conflict| format!(" {conflict}")),
|
||||
auto_increment.then_some(" AUTOINCREMENT").unwrap_or("")
|
||||
),
|
||||
Self::Unique {
|
||||
@@ -95,7 +95,7 @@ impl ToSqlString for ast::TableConstraint {
|
||||
.map(|col| col.to_sql_string(context))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", "),
|
||||
conflict_clause.map_or("".to_string(), |conflict| format!(" {}", conflict))
|
||||
conflict_clause.map_or("".to_string(), |conflict| format!(" {conflict}"))
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,8 +16,7 @@ impl ToSqlString for ast::CreateTrigger {
|
||||
""
|
||||
},
|
||||
self.trigger_name.to_sql_string(context),
|
||||
self.time
|
||||
.map_or("".to_string(), |time| format!(" {}", time)),
|
||||
self.time.map_or("".to_string(), |time| format!(" {time}")),
|
||||
self.event,
|
||||
self.tbl_name.to_sql_string(context),
|
||||
if self.for_each_row {
|
||||
@@ -106,7 +105,7 @@ impl ToSqlString for ast::TriggerCmdInsert {
|
||||
format!(
|
||||
"INSERT {}INTO {} {}{}{}{}",
|
||||
self.or_conflict
|
||||
.map_or("".to_string(), |conflict| format!("OR {} ", conflict)),
|
||||
.map_or("".to_string(), |conflict| format!("OR {conflict} ")),
|
||||
self.tbl_name.0,
|
||||
self.col_names
|
||||
.as_ref()
|
||||
@@ -223,7 +222,7 @@ impl ToSqlString for ast::TriggerCmdUpdate {
|
||||
format!(
|
||||
"UPDATE {}{} SET {}{}{}",
|
||||
self.or_conflict
|
||||
.map_or("".to_string(), |conflict| format!("OR {}", conflict)),
|
||||
.map_or("".to_string(), |conflict| format!("OR {conflict}")),
|
||||
self.tbl_name.0, // TODO: should be a qualified table name,
|
||||
self.sets
|
||||
.iter()
|
||||
|
||||
@@ -11,7 +11,7 @@ impl ToSqlString for ast::Delete {
|
||||
self.tbl_name.to_sql_string(context),
|
||||
self.indexed
|
||||
.as_ref()
|
||||
.map_or("".to_string(), |indexed| format!(" {}", indexed)),
|
||||
.map_or("".to_string(), |indexed| format!(" {indexed}")),
|
||||
self.where_clause
|
||||
.as_ref()
|
||||
.map_or("".to_string(), |expr| format!(
|
||||
|
||||
@@ -9,7 +9,7 @@ impl ToSqlString for ast::Insert {
|
||||
with.to_sql_string(context)
|
||||
)),
|
||||
self.or_conflict
|
||||
.map_or("".to_string(), |conflict| format!("OR {} ", conflict)),
|
||||
.map_or("".to_string(), |conflict| format!("OR {conflict} ")),
|
||||
self.tbl_name.to_sql_string(context),
|
||||
self.columns
|
||||
.as_ref()
|
||||
|
||||
@@ -49,7 +49,7 @@ impl ToSqlString for ast::Stmt {
|
||||
ast::TransactionType::Exclusive => " EXCLUSIVE",
|
||||
ast::TransactionType::Immediate => " IMMEDIATE",
|
||||
});
|
||||
format!("BEGIN{};", t_type)
|
||||
format!("BEGIN{t_type};")
|
||||
}
|
||||
// END or COMMIT are equivalent here, so just defaulting to COMMIT
|
||||
// TODO: again there are no names in the docs
|
||||
|
||||
@@ -21,7 +21,7 @@ impl ToSqlString for ast::Select {
|
||||
.map(|col| col.to_sql_string(context))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
ret.push(format!("ORDER BY {}", joined_cols));
|
||||
ret.push(format!("ORDER BY {joined_cols}"));
|
||||
}
|
||||
if let Some(limit) = &self.limit {
|
||||
ret.push(limit.to_sql_string(context));
|
||||
@@ -65,11 +65,11 @@ impl ToSqlString for ast::OneSelect {
|
||||
.map(|e| e.to_sql_string(context))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
format!("({})", joined_value)
|
||||
format!("({joined_value})")
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
format!("VALUES {}", joined_values)
|
||||
format!("VALUES {joined_values}")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -239,7 +239,7 @@ impl ToSqlString for ast::CommonTableExpr {
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
ret.push(format!("({})", joined_cols));
|
||||
ret.push(format!("({joined_cols})"));
|
||||
}
|
||||
ret.push(format!(
|
||||
"AS {}({})",
|
||||
@@ -296,7 +296,7 @@ impl Display for ast::Materialized {
|
||||
Self::No => "NOT MATERIALIZED",
|
||||
Self::Yes => "MATERIALIZED",
|
||||
};
|
||||
write!(f, "{}", value)
|
||||
write!(f, "{value}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -360,7 +360,7 @@ impl Display for ast::JoinOperator {
|
||||
Self::TypedJoin(join) => {
|
||||
let join_keyword = "JOIN";
|
||||
if let Some(join) = join {
|
||||
format!("{} {}", join, join_keyword)
|
||||
format!("{join} {join_keyword}")
|
||||
} else {
|
||||
join_keyword.to_string()
|
||||
}
|
||||
@@ -400,7 +400,7 @@ impl Display for ast::JoinType {
|
||||
}
|
||||
modifiers.join(" ")
|
||||
};
|
||||
write!(f, "{}", value)
|
||||
write!(f, "{value}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -416,7 +416,7 @@ impl ToSqlString for ast::JoinConstraint {
|
||||
.map(|col| col.0.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
format!("USING ({})", joined_names)
|
||||
format!("USING ({joined_names})")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -457,7 +457,7 @@ impl ToSqlString for ast::Window {
|
||||
.map(|e| e.to_sql_string(context))
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
ret.push(format!("PARTITION BY {}", joined_exprs));
|
||||
ret.push(format!("PARTITION BY {joined_exprs}"));
|
||||
}
|
||||
if let Some(order_by) = &self.order_by {
|
||||
let joined_cols = order_by
|
||||
@@ -465,7 +465,7 @@ impl ToSqlString for ast::Window {
|
||||
.map(|col| col.to_sql_string(context))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
ret.push(format!("ORDER BY {}", joined_cols));
|
||||
ret.push(format!("ORDER BY {joined_cols}"));
|
||||
}
|
||||
if let Some(frame_claue) = &self.frame_clause {
|
||||
ret.push(frame_claue.to_sql_string(context));
|
||||
@@ -523,7 +523,7 @@ impl Display for ast::FrameExclude {
|
||||
Self::NoOthers => "NO OTHERS",
|
||||
Self::Ties => "TIES",
|
||||
};
|
||||
format!("EXCLUDE {}", clause)
|
||||
format!("EXCLUDE {clause}")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,11 +9,11 @@ impl ToSqlString for ast::Update {
|
||||
with.to_sql_string(context)
|
||||
)),
|
||||
self.or_conflict
|
||||
.map_or("".to_string(), |conflict| format!("OR {} ", conflict)),
|
||||
.map_or("".to_string(), |conflict| format!("OR {conflict} ")),
|
||||
self.tbl_name.to_sql_string(context),
|
||||
self.indexed
|
||||
.as_ref()
|
||||
.map_or("".to_string(), |indexed| format!(" {}", indexed)),
|
||||
.map_or("".to_string(), |indexed| format!(" {indexed}")),
|
||||
self.sets
|
||||
.iter()
|
||||
.map(|set| set.to_sql_string(context))
|
||||
|
||||
Reference in New Issue
Block a user