From 28cde537a883f977296dfdaedaee7e8ee944a68b Mon Sep 17 00:00:00 2001 From: alpaylan Date: Fri, 17 Jan 2025 01:28:37 +0300 Subject: [PATCH 01/34] this commit; - makes interaction plans serializable - fixes the shadowing bug where non-created tables were assumed to be created in the shadow tables map - makes small changes to make clippy happy - reorganizes simulation running flow to remove unnecessary plan regenerations while shrinking and double checking --- Cargo.lock | 14 ++-- simulator/Cargo.toml | 2 + simulator/generation/plan.rs | 48 +++++++------ simulator/generation/property.rs | 7 +- simulator/main.rs | 119 +++++++++++++------------------ simulator/model/query.rs | 57 +++++++++++++-- simulator/model/table.rs | 12 ++-- simulator/runner/cli.rs | 1 + simulator/runner/env.rs | 1 + simulator/runner/execution.rs | 5 +- 10 files changed, 154 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9cbce1207..03266c68f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1284,6 +1284,8 @@ dependencies = [ "log", "rand", "rand_chacha", + "serde", + "serde_json", "tempfile", ] @@ -2128,18 +2130,18 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", @@ -2148,9 +2150,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "indexmap", "itoa", diff --git a/simulator/Cargo.toml b/simulator/Cargo.toml index 31a54f1e6..9967c96ee 100644 --- a/simulator/Cargo.toml +++ b/simulator/Cargo.toml @@ -23,3 +23,5 @@ tempfile = "3.0.7" env_logger = "0.10.1" anarchist-readable-name-generator-lib = "0.1.2" clap = { version = "4.5", features = ["derive"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = { version = "1.0" } diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 9acef25ad..135cdfee3 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -1,6 +1,7 @@ use std::{fmt::Display, rc::Rc, vec}; use limbo_core::{Connection, Result, StepResult}; +use serde::{Deserialize, Serialize}; use crate::{ model::{ @@ -19,7 +20,7 @@ use super::{ pub(crate) type ResultSet = Result>>; -#[derive(Clone)] +#[derive(Clone, Serialize, Deserialize)] pub(crate) struct InteractionPlan { pub(crate) plan: Vec, } @@ -30,7 +31,7 @@ pub(crate) struct InteractionPlanState { pub(crate) secondary_pointer: usize, } -#[derive(Clone)] +#[derive(Clone, Serialize, Deserialize)] pub(crate) enum Interactions { Property(Property), Query(Query), @@ -178,7 +179,7 @@ pub(crate) struct Assertion { pub(crate) message: String, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) enum Fault { Disconnect, } @@ -195,6 +196,29 @@ impl Interactions { pub(crate) fn shadow(&self, env: &mut SimulatorEnv) { match self { Interactions::Property(property) => { + match property { + Property::InsertSelect { + insert, + row_index: _, + queries, + select, + } => { + insert.shadow(env); + for query in queries { + query.shadow(env); + } + select.shadow(env); + } + Property::DoubleCreateFailure { create, queries } => { + if env.tables.iter().any(|t| t.name == create.table.name) { + return; + } + create.shadow(env); + for query in queries { + query.shadow(env); + } + } + } for interaction in property.interactions() { match interaction { Interaction::Query(query) => match query { @@ -220,23 +244,7 @@ impl Interactions { } } } - Interactions::Query(query) => match query { - Query::Create(create) => { - if !env.tables.iter().any(|t| t.name == create.table.name) { - env.tables.push(create.table.clone()); - } - } - Query::Insert(insert) => { - let table = env - .tables - .iter_mut() - .find(|t| t.name == insert.table) - .unwrap(); - table.rows.extend(insert.values.clone()); - } - Query::Delete(_) => todo!(), - Query::Select(_) => {} - }, + Interactions::Query(query) => query.shadow(env), Interactions::Fault(_) => {} } } diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index cae2a4145..2a7f57cb4 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -1,4 +1,5 @@ use limbo_core::LimboError; +use serde::{Deserialize, Serialize}; use crate::{ model::{ @@ -16,7 +17,7 @@ use super::{ /// Properties are representations of executable specifications /// about the database behavior. -#[derive(Clone)] +#[derive(Clone, Serialize, Deserialize)] pub(crate) enum Property { /// Insert-Select is a property in which the inserted row /// must be in the resulting rows of a select query that has a @@ -205,7 +206,7 @@ fn property_insert_select( .collect::>(); // Pick a random row to select - let row_index = pick_index(rows.len(), rng).clone(); + let row_index = pick_index(rows.len(), rng); let row = rows[row_index].clone(); // Insert the rows @@ -228,7 +229,7 @@ fn property_insert_select( predicate, }) => { // The inserted row will not be deleted. - if t == &table.name && predicate.test(&row, &table) { + if t == &table.name && predicate.test(&row, table) { continue; } } diff --git a/simulator/main.rs b/simulator/main.rs index 680249d6a..c53552386 100644 --- a/simulator/main.rs +++ b/simulator/main.rs @@ -97,32 +97,26 @@ fn main() -> Result<(), String> { log::error!("captured backtrace:\n{}", bt); })); + let (env, plans) = setup_simulation(seed, &cli_opts, &paths.db, &paths.plan); + let env = Arc::new(Mutex::new(env)); let result = SandboxedResult::from( std::panic::catch_unwind(|| { - run_simulation( - seed, - &cli_opts, - &paths.db, - &paths.plan, - last_execution.clone(), - None, - ) + run_simulation(env.clone(), &mut plans.clone(), last_execution.clone()) }), last_execution.clone(), ); if cli_opts.doublecheck { + { + let mut env_ = env.lock().unwrap(); + env_.db = Database::open_file(env_.io.clone(), paths.doublecheck_db.to_str().unwrap()) + .unwrap(); + } + // Run the simulation again let result2 = SandboxedResult::from( std::panic::catch_unwind(|| { - run_simulation( - seed, - &cli_opts, - &paths.doublecheck_db, - &paths.plan, - last_execution.clone(), - None, - ) + run_simulation(env.clone(), &mut plans.clone(), last_execution.clone()) }), last_execution.clone(), ); @@ -202,18 +196,24 @@ fn main() -> Result<(), String> { if cli_opts.shrink { log::info!("Starting to shrink"); - let shrink = Some(last_execution); + + let shrunk_plans = plans + .iter() + .map(|plan| { + let shrunk = plan.shrink_interaction_plan(last_execution); + log::info!("{}", shrunk.stats()); + shrunk + }) + .collect::>(); + let last_execution = Arc::new(Mutex::new(*last_execution)); let shrunk = SandboxedResult::from( std::panic::catch_unwind(|| { run_simulation( - seed, - &cli_opts, - &paths.shrunk_db, - &paths.shrunk_plan, + env.clone(), + &mut shrunk_plans.clone(), last_execution.clone(), - shrink, ) }), last_execution, @@ -270,28 +270,6 @@ fn main() -> Result<(), String> { Ok(()) } -fn move_db_and_plan_files(output_dir: &Path) { - let old_db_path = output_dir.join("simulator.db"); - let old_plan_path = output_dir.join("simulator.plan"); - - let new_db_path = output_dir.join("simulator_double.db"); - let new_plan_path = output_dir.join("simulator_double.plan"); - - std::fs::rename(&old_db_path, &new_db_path).unwrap(); - std::fs::rename(&old_plan_path, &new_plan_path).unwrap(); -} - -fn revert_db_and_plan_files(output_dir: &Path) { - let old_db_path = output_dir.join("simulator.db"); - let old_plan_path = output_dir.join("simulator.plan"); - - let new_db_path = output_dir.join("simulator_double.db"); - let new_plan_path = output_dir.join("simulator_double.plan"); - - std::fs::rename(&new_db_path, &old_db_path).unwrap(); - std::fs::rename(&new_plan_path, &old_plan_path).unwrap(); -} - #[derive(Debug)] enum SandboxedResult { Panicked { @@ -345,14 +323,12 @@ impl SandboxedResult { } } -fn run_simulation( +fn setup_simulation( seed: u64, cli_opts: &SimulatorCLI, db_path: &Path, plan_path: &Path, - last_execution: Arc>, - shrink: Option<&Execution>, -) -> ExecutionResult { +) -> (SimulatorEnv, Vec) { let mut rng = ChaCha8Rng::seed_from_u64(seed); let (create_percent, read_percent, write_percent, delete_percent) = { @@ -403,9 +379,32 @@ fn run_simulation( }; log::info!("Generating database interaction plan..."); - let mut plans = (1..=env.opts.max_connections) + + let plans = (1..=env.opts.max_connections) .map(|_| InteractionPlan::arbitrary_from(&mut env.rng.clone(), &mut env)) .collect::>(); + + // todo: for now, we only use 1 connection, so it's safe to use the first plan. + let plan = plans[0].clone(); + + let mut f = std::fs::File::create(plan_path).unwrap(); + // todo: create a detailed plan file with all the plans. for now, we only use 1 connection, so it's safe to use the first plan. + f.write_all(plan.to_string().as_bytes()).unwrap(); + let mut f = std::fs::File::create(plan_path.with_extension(".json")).unwrap(); + f.write_all(serde_json::to_string(&plan).unwrap().as_bytes()) + .unwrap(); + + log::info!("{}", plan.stats()); + + log::info!("Executing database interaction plan..."); + (env, plans) +} + +fn run_simulation( + env: Arc>, + plans: &mut [InteractionPlan], + last_execution: Arc>, +) -> ExecutionResult { let mut states = plans .iter() .map(|_| InteractionPlanState { @@ -414,27 +413,9 @@ fn run_simulation( secondary_pointer: 0, }) .collect::>(); + let result = execute_plans(env.clone(), plans, &mut states, last_execution); - let plan = if let Some(failing_execution) = shrink { - // todo: for now, we only use 1 connection, so it's safe to use the first plan. - println!("Interactions Before: {}", plans[0].plan.len()); - let shrunk = plans[0].shrink_interaction_plan(failing_execution); - println!("Interactions After: {}", shrunk.plan.len()); - shrunk - } else { - plans[0].clone() - }; - - let mut f = std::fs::File::create(plan_path).unwrap(); - // todo: create a detailed plan file with all the plans. for now, we only use 1 connection, so it's safe to use the first plan. - f.write_all(plan.to_string().as_bytes()).unwrap(); - - log::info!("{}", plan.stats()); - - log::info!("Executing database interaction plan..."); - - let result = execute_plans(&mut env, &mut plans, &mut states, last_execution); - + let env = env.lock().unwrap(); env.io.print_stats(); log::info!("Simulation completed"); diff --git a/simulator/model/query.rs b/simulator/model/query.rs index 9138b1988..a0b7b33a2 100644 --- a/simulator/model/query.rs +++ b/simulator/model/query.rs @@ -1,8 +1,13 @@ use std::fmt::Display; -use crate::model::table::{Table, Value}; +use serde::{Deserialize, Serialize}; -#[derive(Clone, Debug, PartialEq)] +use crate::{ + model::table::{Table, Value}, + runner::env::SimulatorEnv, +}; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub(crate) enum Predicate { And(Vec), // p1 AND p2 AND p3... AND pn Or(Vec), // p1 OR p2 OR p3... OR pn @@ -83,7 +88,7 @@ impl Display for Predicate { } // This type represents the potential queries on the database. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) enum Query { Create(Create), Select(Select), @@ -108,30 +113,68 @@ impl Query { | Query::Delete(Delete { table, .. }) => vec![table.clone()], } } + + pub(crate) fn shadow(&self, env: &mut SimulatorEnv) { + match self { + Query::Create(create) => create.shadow(env), + Query::Insert(insert) => insert.shadow(env), + Query::Delete(delete) => delete.shadow(env), + Query::Select(select) => select.shadow(env), + } + } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct Create { pub(crate) table: Table, } -#[derive(Clone, Debug, PartialEq)] +impl Create { + pub(crate) fn shadow(&self, env: &mut SimulatorEnv) { + if !env.tables.iter().any(|t| t.name == self.table.name) { + env.tables.push(self.table.clone()); + } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub(crate) struct Select { pub(crate) table: String, pub(crate) predicate: Predicate, } -#[derive(Clone, Debug, PartialEq)] +impl Select { + pub(crate) fn shadow(&self, _env: &mut SimulatorEnv) {} +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub(crate) struct Insert { pub(crate) table: String, pub(crate) values: Vec>, } -#[derive(Clone, Debug, PartialEq)] +impl Insert { + pub(crate) fn shadow(&self, env: &mut SimulatorEnv) { + let table = env + .tables + .iter_mut() + .find(|t| t.name == self.table) + .unwrap(); + table.rows.extend(self.values.clone()); + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub(crate) struct Delete { pub(crate) table: String, pub(crate) predicate: Predicate, } +impl Delete { + pub(crate) fn shadow(&self, _env: &mut SimulatorEnv) { + todo!() + } +} + impl Display for Query { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/simulator/model/table.rs b/simulator/model/table.rs index ab3b003af..10718b7f6 100644 --- a/simulator/model/table.rs +++ b/simulator/model/table.rs @@ -1,5 +1,7 @@ use std::{fmt::Display, ops::Deref}; +use serde::{Deserialize, Serialize}; + pub(crate) struct Name(pub(crate) String); impl Deref for Name { @@ -10,14 +12,14 @@ impl Deref for Name { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct Table { pub(crate) rows: Vec>, pub(crate) name: String, pub(crate) columns: Vec, } -#[allow(dead_code)] -#[derive(Debug, Clone)] + +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct Column { pub(crate) name: String, pub(crate) column_type: ColumnType, @@ -25,7 +27,7 @@ pub(crate) struct Column { pub(crate) unique: bool, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) enum ColumnType { Integer, Float, @@ -44,7 +46,7 @@ impl Display for ColumnType { } } -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub(crate) enum Value { Null, Integer(i64), diff --git a/simulator/runner/cli.rs b/simulator/runner/cli.rs index b4a6d94f1..a36fbaaaf 100644 --- a/simulator/runner/cli.rs +++ b/simulator/runner/cli.rs @@ -51,6 +51,7 @@ impl SimulatorCLI { if self.maximum_size < 1 { return Err("maximum size must be at least 1".to_string()); } + // todo: fix an issue here where if minimum size is not defined, it prevents setting low maximum sizes. if self.minimum_size > self.maximum_size { return Err("Minimum size cannot be greater than maximum size".to_string()); } diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index 7edad025f..90e7b446c 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -8,6 +8,7 @@ use crate::model::table::Table; use crate::runner::io::SimulatorIO; +#[derive(Clone)] pub(crate) struct SimulatorEnv { pub(crate) opts: SimulatorOpts, pub(crate) tables: Vec, diff --git a/simulator/runner/execution.rs b/simulator/runner/execution.rs index 3ac44e894..71765a60a 100644 --- a/simulator/runner/execution.rs +++ b/simulator/runner/execution.rs @@ -55,13 +55,14 @@ impl ExecutionResult { } pub(crate) fn execute_plans( - env: &mut SimulatorEnv, + env: Arc>, plans: &mut [InteractionPlan], states: &mut [InteractionPlanState], last_execution: Arc>, ) -> ExecutionResult { let mut history = ExecutionHistory::new(); let now = std::time::Instant::now(); + let mut env = env.lock().unwrap(); for _tick in 0..env.opts.ticks { // Pick the connection to interact with let connection_index = pick_index(env.connections.len(), &mut env.rng); @@ -77,7 +78,7 @@ pub(crate) fn execute_plans( last_execution.interaction_index = state.interaction_pointer; last_execution.secondary_index = state.secondary_pointer; // Execute the interaction for the selected connection - match execute_plan(env, connection_index, plans, states) { + match execute_plan(&mut env, connection_index, plans, states) { Ok(_) => {} Err(err) => { return ExecutionResult::new(history, Some(err)); From c30e2757b4f8e53b35cd6301575c113970900751 Mon Sep 17 00:00:00 2001 From: alpaylan Date: Fri, 17 Jan 2025 22:04:55 +0300 Subject: [PATCH 02/34] - implement '--load ' flag that loads an interaction plan and executes it instead of generating one from scratch - save a json serialization of the generated plans to `/simulator.plan.json` --- simulator/generation/plan.rs | 3 +- simulator/main.rs | 82 +++++++++++------------------------- simulator/runner/cli.rs | 8 ++++ simulator/runner/env.rs | 58 +++++++++++++++++++++++++ 4 files changed, 93 insertions(+), 58 deletions(-) diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 135cdfee3..0554b7ed0 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -8,7 +8,8 @@ use crate::{ query::{Create, Insert, Query, Select}, table::Value, }, - SimConnection, SimulatorEnv, + runner::env::SimConnection, + SimulatorEnv, }; use crate::generation::{frequency, Arbitrary, ArbitraryFrom}; diff --git a/simulator/main.rs b/simulator/main.rs index c53552386..d73f69185 100644 --- a/simulator/main.rs +++ b/simulator/main.rs @@ -1,15 +1,12 @@ #![allow(clippy::arc_with_non_send_sync, dead_code)] use clap::Parser; -use core::panic; use generation::plan::{InteractionPlan, InteractionPlanState}; use generation::ArbitraryFrom; use limbo_core::Database; use rand::prelude::*; -use rand_chacha::ChaCha8Rng; use runner::cli::SimulatorCLI; -use runner::env::{SimConnection, SimulatorEnv, SimulatorOpts}; +use runner::env::SimulatorEnv; use runner::execution::{execute_plans, Execution, ExecutionHistory, ExecutionResult}; -use runner::io::SimulatorIO; use std::any::Any; use std::backtrace::Backtrace; use std::io::Write; @@ -98,6 +95,7 @@ fn main() -> Result<(), String> { })); let (env, plans) = setup_simulation(seed, &cli_opts, &paths.db, &paths.plan); + let env = Arc::new(Mutex::new(env)); let result = SandboxedResult::from( std::panic::catch_unwind(|| { @@ -261,6 +259,10 @@ fn main() -> Result<(), String> { println!("shrunk database path: {:?}", paths.shrunk_db); } println!("simulator plan path: {:?}", paths.plan); + println!( + "simulator plan serialized path: {:?}", + paths.plan.with_extension("plan.json") + ); if cli_opts.shrink { println!("shrunk plan path: {:?}", paths.shrunk_plan); } @@ -329,68 +331,34 @@ fn setup_simulation( db_path: &Path, plan_path: &Path, ) -> (SimulatorEnv, Vec) { - let mut rng = ChaCha8Rng::seed_from_u64(seed); + let mut env = SimulatorEnv::new(seed, cli_opts, db_path); - let (create_percent, read_percent, write_percent, delete_percent) = { - let mut remaining = 100.0; - let read_percent = rng.gen_range(0.0..=remaining); - remaining -= read_percent; - let write_percent = rng.gen_range(0.0..=remaining); - remaining -= write_percent; - let delete_percent = remaining; - - let create_percent = write_percent / 10.0; - let write_percent = write_percent - create_percent; - - (create_percent, read_percent, write_percent, delete_percent) + // todo: the loading works correctly because of a hacky decision + // Rigth now, the plan generation is the only point we use the rng, so the environment doesn't + // even need it. In the future, especially with multi-connections and multi-threading, we might + // use the RNG for more things such as scheduling, so this assumption will fail. When that happens, + // we'll need to reachitect this logic by saving and loading RNG state. + let plans = if let Some(load) = &cli_opts.load { + log::info!("Loading database interaction plan..."); + let plan = std::fs::read_to_string(load).unwrap(); + let plan: InteractionPlan = serde_json::from_str(&plan).unwrap(); + vec![plan] + } else { + log::info!("Generating database interaction plan..."); + (1..=env.opts.max_connections) + .map(|_| InteractionPlan::arbitrary_from(&mut env.rng.clone(), &mut env)) + .collect::>() }; - let opts = SimulatorOpts { - ticks: rng.gen_range(cli_opts.minimum_size..=cli_opts.maximum_size), - max_connections: 1, // TODO: for now let's use one connection as we didn't implement - // correct transactions procesing - max_tables: rng.gen_range(0..128), - create_percent, - read_percent, - write_percent, - delete_percent, - page_size: 4096, // TODO: randomize this too - max_interactions: rng.gen_range(cli_opts.minimum_size..=cli_opts.maximum_size), - max_time_simulation: cli_opts.maximum_time, - }; - let io = Arc::new(SimulatorIO::new(seed, opts.page_size).unwrap()); - - let db = match Database::open_file(io.clone(), db_path.to_str().unwrap()) { - Ok(db) => db, - Err(e) => { - panic!("error opening simulator test file {:?}: {:?}", db_path, e); - } - }; - - let connections = vec![SimConnection::Disconnected; opts.max_connections]; - - let mut env = SimulatorEnv { - opts, - tables: Vec::new(), - connections, - rng, - io, - db, - }; - - log::info!("Generating database interaction plan..."); - - let plans = (1..=env.opts.max_connections) - .map(|_| InteractionPlan::arbitrary_from(&mut env.rng.clone(), &mut env)) - .collect::>(); - // todo: for now, we only use 1 connection, so it's safe to use the first plan. let plan = plans[0].clone(); let mut f = std::fs::File::create(plan_path).unwrap(); // todo: create a detailed plan file with all the plans. for now, we only use 1 connection, so it's safe to use the first plan. f.write_all(plan.to_string().as_bytes()).unwrap(); - let mut f = std::fs::File::create(plan_path.with_extension(".json")).unwrap(); + + let json_path = plan_path.with_extension("plan.json"); + let mut f = std::fs::File::create(&json_path).unwrap(); f.write_all(serde_json::to_string(&plan).unwrap().as_bytes()) .unwrap(); diff --git a/simulator/runner/cli.rs b/simulator/runner/cli.rs index a36fbaaaf..d7402d5e2 100644 --- a/simulator/runner/cli.rs +++ b/simulator/runner/cli.rs @@ -41,6 +41,8 @@ pub struct SimulatorCLI { help = "minimize(shrink) the failing counterexample" )] pub shrink: bool, + #[clap(short = 'l', long, help = "load plan from a file")] + pub load: Option, } impl SimulatorCLI { @@ -55,6 +57,12 @@ impl SimulatorCLI { if self.minimum_size > self.maximum_size { return Err("Minimum size cannot be greater than maximum size".to_string()); } + + if let Some(plan_path) = &self.load { + std::fs::File::open(plan_path) + .map_err(|_| format!("Plan file '{}' could not be opened", plan_path))?; + } + Ok(()) } } diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index 90e7b446c..e087cb5c5 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -1,13 +1,17 @@ +use std::path::Path; use std::rc::Rc; use std::sync::Arc; use limbo_core::{Connection, Database}; +use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; use crate::model::table::Table; use crate::runner::io::SimulatorIO; +use super::cli::SimulatorCLI; + #[derive(Clone)] pub(crate) struct SimulatorEnv { pub(crate) opts: SimulatorOpts, @@ -18,6 +22,60 @@ pub(crate) struct SimulatorEnv { pub(crate) rng: ChaCha8Rng, } +impl SimulatorEnv { + pub(crate) fn new(seed: u64, cli_opts: &SimulatorCLI, db_path: &Path) -> Self { + let mut rng = ChaCha8Rng::seed_from_u64(seed); + + let (create_percent, read_percent, write_percent, delete_percent) = { + let mut remaining = 100.0; + let read_percent = rng.gen_range(0.0..=remaining); + remaining -= read_percent; + let write_percent = rng.gen_range(0.0..=remaining); + remaining -= write_percent; + let delete_percent = remaining; + + let create_percent = write_percent / 10.0; + let write_percent = write_percent - create_percent; + + (create_percent, read_percent, write_percent, delete_percent) + }; + + let opts = SimulatorOpts { + ticks: rng.gen_range(cli_opts.minimum_size..=cli_opts.maximum_size), + max_connections: 1, // TODO: for now let's use one connection as we didn't implement + // correct transactions procesing + max_tables: rng.gen_range(0..128), + create_percent, + read_percent, + write_percent, + delete_percent, + page_size: 4096, // TODO: randomize this too + max_interactions: rng.gen_range(cli_opts.minimum_size..=cli_opts.maximum_size), + max_time_simulation: cli_opts.maximum_time, + }; + + let io = Arc::new(SimulatorIO::new(seed, opts.page_size).unwrap()); + + let db = match Database::open_file(io.clone(), db_path.to_str().unwrap()) { + Ok(db) => db, + Err(e) => { + panic!("error opening simulator test file {:?}: {:?}", db_path, e); + } + }; + + let connections = vec![SimConnection::Disconnected; opts.max_connections]; + + SimulatorEnv { + opts, + tables: Vec::new(), + connections, + rng, + io, + db, + } + } +} + #[derive(Clone)] pub(crate) enum SimConnection { Connected(Rc), From e476b9f6970e91206e2aec046a21cf5529dd9a2d Mon Sep 17 00:00:00 2001 From: alpaylan Date: Sat, 18 Jan 2025 23:54:03 +0300 Subject: [PATCH 03/34] implement watch mode - add `--watch` flag - start saving seeds in persistent storage - make a separate version of execution functions that use `vector of interaction` instead of `InteractionPlan` --- Cargo.lock | 107 ++++++++++++-- simulator/Cargo.toml | 1 + simulator/generation/plan.rs | 85 +++++++++++- simulator/generation/property.rs | 1 - simulator/main.rs | 231 ++++++++++++++++++++++--------- simulator/model/query.rs | 9 +- simulator/model/table.rs | 20 +++ simulator/runner/cli.rs | 11 ++ simulator/runner/env.rs | 5 + simulator/runner/execution.rs | 8 +- simulator/runner/mod.rs | 1 + simulator/runner/watch.rs | 133 ++++++++++++++++++ 12 files changed, 521 insertions(+), 91 deletions(-) create mode 100644 simulator/runner/watch.rs diff --git a/Cargo.lock b/Cargo.lock index 03266c68f..ab8bf70a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -183,9 +183,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "block-buffer" @@ -735,6 +735,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "filetime" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.59.0", +] + [[package]] name = "findshlibs" version = "0.10.2" @@ -753,6 +765,15 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" +[[package]] +name = "fsevent-sys" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" +dependencies = [ + "libc", +] + [[package]] name = "futures" version = "0.3.31" @@ -1009,6 +1030,26 @@ dependencies = [ "str_stack", ] +[[package]] +name = "inotify" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3" +dependencies = [ + "bitflags 2.8.0", + "inotify-sys", + "libc", +] + +[[package]] +name = "inotify-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +dependencies = [ + "libc", +] + [[package]] name = "io-uring" version = "0.6.4" @@ -1125,6 +1166,26 @@ dependencies = [ "chrono", ] +[[package]] +name = "kqueue" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7447f1ca1b7b563588a205fe93dea8df60fd981423a768bc1c0ded35ed147d0c" +dependencies = [ + "kqueue-sys", + "libc", +] + +[[package]] +name = "kqueue-sys" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +dependencies = [ + "bitflags 1.3.2", + "libc", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -1163,8 +1224,9 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "libc", + "redox_syscall", ] [[package]] @@ -1282,6 +1344,7 @@ dependencies = [ "env_logger 0.10.2", "limbo_core", "log", + "notify", "rand", "rand_chacha", "serde", @@ -1416,6 +1479,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", + "log", "wasi", "windows-sys 0.52.0", ] @@ -1472,7 +1536,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if", "cfg_aliases", "libc", @@ -1488,6 +1552,31 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "notify" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fee8403b3d66ac7b26aee6e40a897d85dc5ce26f44da36b8b73e987cc52e943" +dependencies = [ + "bitflags 2.8.0", + "filetime", + "fsevent-sys", + "inotify", + "kqueue", + "libc", + "log", + "mio", + "notify-types", + "walkdir", + "windows-sys 0.59.0", +] + +[[package]] +name = "notify-types" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e0826a989adedc2a244799e823aece04662b66609d96af8dff7ac6df9a8925d" + [[package]] name = "num-format" version = "0.4.4" @@ -1949,7 +2038,7 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", ] [[package]] @@ -2042,7 +2131,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "fallible-iterator 0.2.0", "fallible-streaming-iterator", "hashlink", @@ -2071,7 +2160,7 @@ version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "errno", "libc", "linux-raw-sys", @@ -2084,7 +2173,7 @@ version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "994eca4bca05c87e86e15d90fc7a91d1be64b4482b38cb2d27474568fe7c9db9" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if", "clipboard-win", "fd-lock", @@ -2228,7 +2317,7 @@ dependencies = [ name = "sqlite3-parser" version = "0.13.0" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cc", "env_logger 0.11.5", "fallible-iterator 0.3.0", diff --git a/simulator/Cargo.toml b/simulator/Cargo.toml index 9967c96ee..43956e5e2 100644 --- a/simulator/Cargo.toml +++ b/simulator/Cargo.toml @@ -25,3 +25,4 @@ anarchist-readable-name-generator-lib = "0.1.2" clap = { version = "4.5", features = ["derive"] } serde = { version = "1.0", features = ["derive"] } serde_json = { version = "1.0" } +notify = "8.0.0" diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 0554b7ed0..8ac7970a9 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -1,4 +1,4 @@ -use std::{fmt::Display, rc::Rc, vec}; +use std::{fmt::Display, path::Path, rc::Rc, vec}; use limbo_core::{Connection, Result, StepResult}; use serde::{Deserialize, Serialize}; @@ -26,6 +26,69 @@ pub(crate) struct InteractionPlan { pub(crate) plan: Vec, } +impl InteractionPlan { + /// Compute via diff computes a a plan from a given `.plan` file without the need to parse + /// sql. This is possible because there are two versions of the plan file, one that is human + /// readable and one that is serialized as JSON. Under watch mode, the users will be able to + /// delete interactions from the human readable file, and this function uses the JSON file as + /// a baseline to detect with interactions were deleted and constructs the plan from the + /// remaining interactions. + pub(crate) fn compute_via_diff(plan_path: &Path) -> Vec> { + let interactions = std::fs::read_to_string(plan_path).unwrap(); + let interactions = interactions.lines().collect::>(); + + let plan: InteractionPlan = serde_json::from_str( + std::fs::read_to_string(plan_path.with_extension("plan.json")) + .unwrap() + .as_str(), + ) + .unwrap(); + + let mut plan = plan + .plan + .into_iter() + .map(|i| i.interactions()) + .collect::>(); + + let (mut i, mut j1, mut j2) = (0, 0, 0); + + while i < interactions.len() && j1 < plan.len() { + if interactions[i].starts_with("-- begin") + || interactions[i].starts_with("-- end") + || interactions[i].is_empty() + { + i += 1; + continue; + } + + if interactions[i].contains(plan[j1][j2].to_string().as_str()) { + i += 1; + if j2 + 1 < plan[j1].len() { + j2 += 1; + } else { + j1 += 1; + j2 = 0; + } + } else { + plan[j1].remove(j2); + + if plan[j1].is_empty() { + plan.remove(j1); + j2 = 0; + } + } + } + if j1 < plan.len() { + if j2 < plan[j1].len() { + let _ = plan[j1].split_off(j2); + } + let _ = plan.split_off(j1); + } + + plan + } +} + pub(crate) struct InteractionPlanState { pub(crate) stack: Vec, pub(crate) interaction_pointer: usize, @@ -110,12 +173,12 @@ impl Display for InteractionPlan { match interaction { Interaction::Query(query) => writeln!(f, "{};", query)?, Interaction::Assumption(assumption) => { - writeln!(f, "-- ASSUME: {};", assumption.message)? + writeln!(f, "-- ASSUME {};", assumption.message)? } Interaction::Assertion(assertion) => { - writeln!(f, "-- ASSERT: {};", assertion.message)? + writeln!(f, "-- ASSERT {};", assertion.message)? } - Interaction::Fault(fault) => writeln!(f, "-- FAULT: {};", fault)?, + Interaction::Fault(fault) => writeln!(f, "-- FAULT '{}';", fault)?, } } writeln!(f, "-- end testing '{}'", name)?; @@ -162,9 +225,9 @@ impl Display for Interaction { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Query(query) => write!(f, "{}", query), - Self::Assumption(assumption) => write!(f, "ASSUME: {}", assumption.message), - Self::Assertion(assertion) => write!(f, "ASSERT: {}", assertion.message), - Self::Fault(fault) => write!(f, "FAULT: {}", fault), + Self::Assumption(assumption) => write!(f, "ASSUME {}", assumption.message), + Self::Assertion(assertion) => write!(f, "ASSERT {}", assertion.message), + Self::Fault(fault) => write!(f, "FAULT '{}'", fault), } } } @@ -326,6 +389,14 @@ impl ArbitraryFrom<&mut SimulatorEnv> for InteractionPlan { } impl Interaction { + pub(crate) fn shadow(&self, env: &mut SimulatorEnv) { + match self { + Self::Query(query) => query.shadow(env), + Self::Assumption(_) => {} + Self::Assertion(_) => {} + Self::Fault(_) => {} + } + } pub(crate) fn execute_query(&self, conn: &mut Rc) -> ResultSet { if let Self::Query(query) = self { let query_str = query.to_string(); diff --git a/simulator/generation/property.rs b/simulator/generation/property.rs index 2a7f57cb4..bfa1e1ed5 100644 --- a/simulator/generation/property.rs +++ b/simulator/generation/property.rs @@ -104,7 +104,6 @@ impl Property { let assertion = Interaction::Assertion(Assertion { message: format!( - // todo: add the part inserting ({} = {})", "row [{:?}] not found in table {}", row.iter().map(|v| v.to_string()).collect::>(), insert.table, diff --git a/simulator/main.rs b/simulator/main.rs index d73f69185..b274c1dce 100644 --- a/simulator/main.rs +++ b/simulator/main.rs @@ -1,17 +1,20 @@ #![allow(clippy::arc_with_non_send_sync, dead_code)] use clap::Parser; -use generation::plan::{InteractionPlan, InteractionPlanState}; +use generation::plan::{Interaction, InteractionPlan, InteractionPlanState}; use generation::ArbitraryFrom; use limbo_core::Database; +use notify::event::{DataChange, ModifyKind}; +use notify::{EventKind, RecursiveMode, Watcher}; use rand::prelude::*; use runner::cli::SimulatorCLI; use runner::env::SimulatorEnv; use runner::execution::{execute_plans, Execution, ExecutionHistory, ExecutionResult}; +use runner::watch; use std::any::Any; use std::backtrace::Backtrace; use std::io::Write; use std::path::{Path, PathBuf}; -use std::sync::{Arc, Mutex}; +use std::sync::{mpsc, Arc, Mutex}; use tempfile::TempDir; mod generation; @@ -46,6 +49,10 @@ impl Paths { log::info!("shrunk database path: {:?}", paths.shrunk_db); } log::info!("simulator plan path: {:?}", paths.plan); + log::info!( + "simulator plan serialized path: {:?}", + paths.plan.with_extension("plan.json") + ); if shrink { log::info!("shrunk plan path: {:?}", paths.shrunk_plan); } @@ -77,7 +84,85 @@ fn main() -> Result<(), String> { log::info!("seed: {}", seed); let last_execution = Arc::new(Mutex::new(Execution::new(0, 0, 0))); + let (env, plans) = setup_simulation(seed, &cli_opts, &paths.db, &paths.plan); + if cli_opts.watch { + watch_mode(seed, &cli_opts, &paths, last_execution.clone()).unwrap(); + } else { + run_simulator(seed, &cli_opts, &paths, env, plans, last_execution.clone()); + } + + Ok(()) +} + +fn watch_mode( + seed: u64, + cli_opts: &SimulatorCLI, + paths: &Paths, + last_execution: Arc>, +) -> notify::Result<()> { + let (tx, rx) = mpsc::channel::>(); + println!("watching {:?}", paths.plan); + // Use recommended_watcher() to automatically select the best implementation + // for your platform. The `EventHandler` passed to this constructor can be a + // closure, a `std::sync::mpsc::Sender`, a `crossbeam_channel::Sender`, or + // another type the trait is implemented for. + let mut watcher = notify::recommended_watcher(tx)?; + + // Add a path to be watched. All files and directories at that path and + // below will be monitored for changes. + watcher.watch(&paths.plan, RecursiveMode::NonRecursive)?; + // Block forever, printing out events as they come in + for res in rx { + match res { + Ok(event) => { + if let EventKind::Modify(ModifyKind::Data(DataChange::Content)) = event.kind { + log::info!("plan file modified, rerunning simulation"); + + let result = SandboxedResult::from( + std::panic::catch_unwind(|| { + let plan: Vec> = + InteractionPlan::compute_via_diff(&paths.plan); + + let mut env = SimulatorEnv::new(seed, cli_opts, &paths.db); + plan.iter().for_each(|is| { + is.iter().for_each(|i| { + i.shadow(&mut env); + }); + }); + let env = Arc::new(Mutex::new(env.clone())); + watch::run_simulation(env, &mut [plan], last_execution.clone()) + }), + last_execution.clone(), + ); + match result { + SandboxedResult::Correct => { + log::info!("simulation succeeded"); + println!("simulation succeeded"); + } + SandboxedResult::Panicked { error, .. } + | SandboxedResult::FoundBug { error, .. } => { + log::error!("simulation failed: '{}'", error); + println!("simulation failed: '{}'", error); + } + } + } + } + Err(e) => println!("watch error: {:?}", e), + } + } + + Ok(()) +} + +fn run_simulator( + seed: u64, + cli_opts: &SimulatorCLI, + paths: &Paths, + env: SimulatorEnv, + plans: Vec, + last_execution: Arc>, +) { std::panic::set_hook(Box::new(move |info| { log::error!("panic occurred"); @@ -94,8 +179,6 @@ fn main() -> Result<(), String> { log::error!("captured backtrace:\n{}", bt); })); - let (env, plans) = setup_simulation(seed, &cli_opts, &paths.db, &paths.plan); - let env = Arc::new(Mutex::new(env)); let result = SandboxedResult::from( std::panic::catch_unwind(|| { @@ -105,65 +188,13 @@ fn main() -> Result<(), String> { ); if cli_opts.doublecheck { - { - let mut env_ = env.lock().unwrap(); - env_.db = Database::open_file(env_.io.clone(), paths.doublecheck_db.to_str().unwrap()) - .unwrap(); - } - - // Run the simulation again - let result2 = SandboxedResult::from( - std::panic::catch_unwind(|| { - run_simulation(env.clone(), &mut plans.clone(), last_execution.clone()) - }), - last_execution.clone(), - ); - - match (result, result2) { - (SandboxedResult::Correct, SandboxedResult::Panicked { .. }) => { - log::error!("doublecheck failed! first run succeeded, but second run panicked."); - } - (SandboxedResult::FoundBug { .. }, SandboxedResult::Panicked { .. }) => { - log::error!( - "doublecheck failed! first run failed an assertion, but second run panicked." - ); - } - (SandboxedResult::Panicked { .. }, SandboxedResult::Correct) => { - log::error!("doublecheck failed! first run panicked, but second run succeeded."); - } - (SandboxedResult::Panicked { .. }, SandboxedResult::FoundBug { .. }) => { - log::error!( - "doublecheck failed! first run panicked, but second run failed an assertion." - ); - } - (SandboxedResult::Correct, SandboxedResult::FoundBug { .. }) => { - log::error!( - "doublecheck failed! first run succeeded, but second run failed an assertion." - ); - } - (SandboxedResult::FoundBug { .. }, SandboxedResult::Correct) => { - log::error!( - "doublecheck failed! first run failed an assertion, but second run succeeded." - ); - } - (SandboxedResult::Correct, SandboxedResult::Correct) - | (SandboxedResult::FoundBug { .. }, SandboxedResult::FoundBug { .. }) - | (SandboxedResult::Panicked { .. }, SandboxedResult::Panicked { .. }) => { - // Compare the two database files byte by byte - let db_bytes = std::fs::read(&paths.db).unwrap(); - let doublecheck_db_bytes = std::fs::read(&paths.doublecheck_db).unwrap(); - if db_bytes != doublecheck_db_bytes { - log::error!("doublecheck failed! database files are different."); - } else { - log::info!("doublecheck succeeded! database files are the same."); - } - } - } + doublecheck(env.clone(), paths, &plans, last_execution.clone(), result); } else { // No doublecheck, run shrinking if panicking or found a bug. match &result { SandboxedResult::Correct => { log::info!("simulation succeeded"); + println!("simulation succeeded"); } SandboxedResult::Panicked { error, @@ -191,6 +222,7 @@ fn main() -> Result<(), String> { } log::error!("simulation failed: '{}'", error); + println!("simulation failed: '{}'", error); if cli_opts.shrink { log::info!("Starting to shrink"); @@ -268,8 +300,69 @@ fn main() -> Result<(), String> { } println!("simulator history path: {:?}", paths.history); println!("seed: {}", seed); +} - Ok(()) +fn doublecheck( + env: Arc>, + paths: &Paths, + plans: &[InteractionPlan], + last_execution: Arc>, + result: SandboxedResult, +) { + { + let mut env_ = env.lock().unwrap(); + env_.db = + Database::open_file(env_.io.clone(), paths.doublecheck_db.to_str().unwrap()).unwrap(); + } + + // Run the simulation again + let result2 = SandboxedResult::from( + std::panic::catch_unwind(|| { + run_simulation(env.clone(), &mut plans.to_owned(), last_execution.clone()) + }), + last_execution.clone(), + ); + + match (result, result2) { + (SandboxedResult::Correct, SandboxedResult::Panicked { .. }) => { + log::error!("doublecheck failed! first run succeeded, but second run panicked."); + } + (SandboxedResult::FoundBug { .. }, SandboxedResult::Panicked { .. }) => { + log::error!( + "doublecheck failed! first run failed an assertion, but second run panicked." + ); + } + (SandboxedResult::Panicked { .. }, SandboxedResult::Correct) => { + log::error!("doublecheck failed! first run panicked, but second run succeeded."); + } + (SandboxedResult::Panicked { .. }, SandboxedResult::FoundBug { .. }) => { + log::error!( + "doublecheck failed! first run panicked, but second run failed an assertion." + ); + } + (SandboxedResult::Correct, SandboxedResult::FoundBug { .. }) => { + log::error!( + "doublecheck failed! first run succeeded, but second run failed an assertion." + ); + } + (SandboxedResult::FoundBug { .. }, SandboxedResult::Correct) => { + log::error!( + "doublecheck failed! first run failed an assertion, but second run succeeded." + ); + } + (SandboxedResult::Correct, SandboxedResult::Correct) + | (SandboxedResult::FoundBug { .. }, SandboxedResult::FoundBug { .. }) + | (SandboxedResult::Panicked { .. }, SandboxedResult::Panicked { .. }) => { + // Compare the two database files byte by byte + let db_bytes = std::fs::read(&paths.db).unwrap(); + let doublecheck_db_bytes = std::fs::read(&paths.doublecheck_db).unwrap(); + if db_bytes != doublecheck_db_bytes { + log::error!("doublecheck failed! database files are different."); + } else { + log::info!("doublecheck succeeded! database files are the same."); + } + } + } } #[derive(Debug)] @@ -326,11 +419,17 @@ impl SandboxedResult { } fn setup_simulation( - seed: u64, + mut seed: u64, cli_opts: &SimulatorCLI, db_path: &Path, plan_path: &Path, ) -> (SimulatorEnv, Vec) { + if let Some(load) = &cli_opts.load { + let seed_path = PathBuf::from(load).with_extension("seed"); + let seed_str = std::fs::read_to_string(&seed_path).unwrap(); + seed = seed_str.parse().unwrap(); + } + let mut env = SimulatorEnv::new(seed, cli_opts, db_path); // todo: the loading works correctly because of a hacky decision @@ -357,14 +456,16 @@ fn setup_simulation( // todo: create a detailed plan file with all the plans. for now, we only use 1 connection, so it's safe to use the first plan. f.write_all(plan.to_string().as_bytes()).unwrap(); - let json_path = plan_path.with_extension("plan.json"); - let mut f = std::fs::File::create(&json_path).unwrap(); + let serialized_plan_path = plan_path.with_extension("plan.json"); + let mut f = std::fs::File::create(&serialized_plan_path).unwrap(); f.write_all(serde_json::to_string(&plan).unwrap().as_bytes()) .unwrap(); - log::info!("{}", plan.stats()); + let seed_path = plan_path.with_extension("seed"); + let mut f = std::fs::File::create(&seed_path).unwrap(); + f.write_all(seed.to_string().as_bytes()).unwrap(); - log::info!("Executing database interaction plan..."); + log::info!("{}", plan.stats()); (env, plans) } @@ -373,6 +474,8 @@ fn run_simulation( plans: &mut [InteractionPlan], last_execution: Arc>, ) -> ExecutionResult { + log::info!("Executing database interaction plan..."); + let mut states = plans .iter() .map(|_| InteractionPlanState { diff --git a/simulator/model/query.rs b/simulator/model/query.rs index a0b7b33a2..f03bbde6f 100644 --- a/simulator/model/query.rs +++ b/simulator/model/query.rs @@ -154,12 +154,9 @@ pub(crate) struct Insert { impl Insert { pub(crate) fn shadow(&self, env: &mut SimulatorEnv) { - let table = env - .tables - .iter_mut() - .find(|t| t.name == self.table) - .unwrap(); - table.rows.extend(self.values.clone()); + if let Some(t) = env.tables.iter_mut().find(|t| t.name == self.table) { + t.rows.extend(self.values.clone()); + } } } diff --git a/simulator/model/table.rs b/simulator/model/table.rs index 10718b7f6..ff3e2e5bf 100644 --- a/simulator/model/table.rs +++ b/simulator/model/table.rs @@ -46,10 +46,30 @@ impl Display for ColumnType { } } +fn float_to_string(float: &f64, serializer: S) -> Result +where + S: serde::Serializer, +{ + serializer.serialize_str(&format!("{}", float)) +} + +fn string_to_float<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + s.parse().map_err(serde::de::Error::custom) +} + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub(crate) enum Value { Null, Integer(i64), + // we use custom serialization to preserve float precision + #[serde( + serialize_with = "float_to_string", + deserialize_with = "string_to_float" + )] Float(f64), Text(String), Blob(Vec), diff --git a/simulator/runner/cli.rs b/simulator/runner/cli.rs index d7402d5e2..93a14849f 100644 --- a/simulator/runner/cli.rs +++ b/simulator/runner/cli.rs @@ -43,6 +43,12 @@ pub struct SimulatorCLI { pub shrink: bool, #[clap(short = 'l', long, help = "load plan from a file")] pub load: Option, + #[clap( + short = 'w', + long, + help = "enable watch mode that reruns the simulation on file changes" + )] + pub watch: bool, } impl SimulatorCLI { @@ -58,6 +64,11 @@ impl SimulatorCLI { return Err("Minimum size cannot be greater than maximum size".to_string()); } + // Make sure uncompatible options are not set + if self.shrink && self.doublecheck { + return Err("Cannot use shrink and doublecheck at the same time".to_string()); + } + if let Some(plan_path) = &self.load { std::fs::File::open(plan_path) .map_err(|_| format!("Plan file '{}' could not be opened", plan_path))?; diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index e087cb5c5..2813b80e8 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -56,6 +56,11 @@ impl SimulatorEnv { let io = Arc::new(SimulatorIO::new(seed, opts.page_size).unwrap()); + // Remove existing database file if it exists + if db_path.exists() { + std::fs::remove_file(db_path).unwrap(); + } + let db = match Database::open_file(io.clone(), db_path.to_str().unwrap()) { Ok(db) => db, Err(e) => { diff --git a/simulator/runner/execution.rs b/simulator/runner/execution.rs index 71765a60a..0be7d58bc 100644 --- a/simulator/runner/execution.rs +++ b/simulator/runner/execution.rs @@ -36,7 +36,7 @@ pub(crate) struct ExecutionHistory { } impl ExecutionHistory { - fn new() -> Self { + pub(crate) fn new() -> Self { Self { history: Vec::new(), } @@ -49,7 +49,7 @@ pub(crate) struct ExecutionResult { } impl ExecutionResult { - fn new(history: ExecutionHistory, error: Option) -> Self { + pub(crate) fn new(history: ExecutionHistory, error: Option) -> Self { Self { history, error } } } @@ -156,14 +156,14 @@ fn execute_plan( /// `execute_interaction` uses this type in conjunction with a result, where /// the `Err` case indicates a full-stop due to a bug, and the `Ok` case /// indicates the next step in the plan. -enum ExecutionContinuation { +pub(crate) enum ExecutionContinuation { /// Default continuation, execute the next interaction. NextInteraction, /// Typically used in the case of preconditions failures, skip to the next property. NextProperty, } -fn execute_interaction( +pub(crate) fn execute_interaction( env: &mut SimulatorEnv, connection_index: usize, interaction: &Interaction, diff --git a/simulator/runner/mod.rs b/simulator/runner/mod.rs index 3f014bef0..2eabaef8b 100644 --- a/simulator/runner/mod.rs +++ b/simulator/runner/mod.rs @@ -4,3 +4,4 @@ pub mod execution; #[allow(dead_code)] pub mod file; pub mod io; +pub mod watch; diff --git a/simulator/runner/watch.rs b/simulator/runner/watch.rs new file mode 100644 index 000000000..75ecb1801 --- /dev/null +++ b/simulator/runner/watch.rs @@ -0,0 +1,133 @@ +use std::sync::{Arc, Mutex}; + +use crate::{ + generation::{ + pick_index, + plan::{Interaction, InteractionPlanState}, + }, + runner::execution::ExecutionContinuation, +}; + +use super::{ + env::{SimConnection, SimulatorEnv}, + execution::{execute_interaction, Execution, ExecutionHistory, ExecutionResult}, +}; + +pub(crate) fn run_simulation( + env: Arc>, + plans: &mut [Vec>], + last_execution: Arc>, +) -> ExecutionResult { + let mut states = plans + .iter() + .map(|_| InteractionPlanState { + stack: vec![], + interaction_pointer: 0, + secondary_pointer: 0, + }) + .collect::>(); + let result = execute_plans(env.clone(), plans, &mut states, last_execution); + + let env = env.lock().unwrap(); + env.io.print_stats(); + + log::info!("Simulation completed"); + + result +} + +pub(crate) fn execute_plans( + env: Arc>, + plans: &mut [Vec>], + states: &mut [InteractionPlanState], + last_execution: Arc>, +) -> ExecutionResult { + let mut history = ExecutionHistory::new(); + let now = std::time::Instant::now(); + let mut env = env.lock().unwrap(); + for _tick in 0..env.opts.ticks { + // Pick the connection to interact with + let connection_index = pick_index(env.connections.len(), &mut env.rng); + let state = &mut states[connection_index]; + + history.history.push(Execution::new( + connection_index, + state.interaction_pointer, + state.secondary_pointer, + )); + let mut last_execution = last_execution.lock().unwrap(); + last_execution.connection_index = connection_index; + last_execution.interaction_index = state.interaction_pointer; + last_execution.secondary_index = state.secondary_pointer; + // Execute the interaction for the selected connection + match execute_plan(&mut env, connection_index, plans, states) { + Ok(_) => {} + Err(err) => { + return ExecutionResult::new(history, Some(err)); + } + } + // Check if the maximum time for the simulation has been reached + if now.elapsed().as_secs() >= env.opts.max_time_simulation as u64 { + return ExecutionResult::new( + history, + Some(limbo_core::LimboError::InternalError( + "maximum time for simulation reached".into(), + )), + ); + } + } + + ExecutionResult::new(history, None) +} + +fn execute_plan( + env: &mut SimulatorEnv, + connection_index: usize, + plans: &mut [Vec>], + states: &mut [InteractionPlanState], +) -> limbo_core::Result<()> { + let connection = &env.connections[connection_index]; + let plan = &mut plans[connection_index]; + let state = &mut states[connection_index]; + + if state.interaction_pointer >= plan.len() { + return Ok(()); + } + + let interaction = &plan[state.interaction_pointer][state.secondary_pointer]; + + if let SimConnection::Disconnected = connection { + log::info!("connecting {}", connection_index); + env.connections[connection_index] = SimConnection::Connected(env.db.connect()); + } else { + match execute_interaction(env, connection_index, interaction, &mut state.stack) { + Ok(next_execution) => { + log::debug!("connection {} processed", connection_index); + // Move to the next interaction or property + match next_execution { + ExecutionContinuation::NextInteraction => { + if state.secondary_pointer + 1 >= plan[state.interaction_pointer].len() { + // If we have reached the end of the interactions for this property, move to the next property + state.interaction_pointer += 1; + state.secondary_pointer = 0; + } else { + // Otherwise, move to the next interaction + state.secondary_pointer += 1; + } + } + ExecutionContinuation::NextProperty => { + // Skip to the next property + state.interaction_pointer += 1; + state.secondary_pointer = 0; + } + } + } + Err(err) => { + log::error!("error {}", err); + return Err(err); + } + } + } + + Ok(()) +} From c3542196758fed71ac3a975995b58247fca736ca Mon Sep 17 00:00:00 2001 From: Henrik Ingo Date: Mon, 20 Jan 2025 04:49:52 +0200 Subject: [PATCH 04/34] =?UTF-8?q?Add=20Nyrki=C3=B6=20change=20point=20dete?= =?UTF-8?q?ction=20to=20'cargo=20bench'=20workflow?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a separate push-only.yml workflow. For now pull request API wasn't integrated yet, so shouldn't run on PRs. disable cargo color --- .github/workflows/push_only.yml | 42 +++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 .github/workflows/push_only.yml diff --git a/.github/workflows/push_only.yml b/.github/workflows/push_only.yml new file mode 100644 index 000000000..1c10bbb62 --- /dev/null +++ b/.github/workflows/push_only.yml @@ -0,0 +1,42 @@ +name: Benchmarks+Nyrkiö + +# Pull request support isn't integrated to the github-action-benchmark so run only post-merge +on: + push: + branches: [ "main", "master", "notmain", "add-nyrkio" ] + +env: + CARGO_TERM_COLOR: never + +jobs: + bench: + runs-on: ubuntu-latest + environment: test + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-node@v4 + with: + node-version: 20 + # cache: 'npm' + # - name: Install dependencies + # run: npm install && npm run build + + - name: Bench + run: cargo bench 2>&1 | tee output.txt + + - name: Analyze benchmark result with Nyrkiö + uses: nyrkio/github-action-benchmark@HEAD + with: + name: turso + tool: criterion + output-file-path: output.txt + fail-on-alert: true + # Nyrkiö configuration + nyrkio-enable: true + # Get yours from https://nyrkio.com/docs/getting-started + nyrkio-token: ${{ secrets.NYRKIO_JWT_TOKEN }} + + # Old way... + # Explicitly set this to null. We don't want threshold based alerts today. + external-data-json-path: null + gh-repository: null From 667a3f594e4faea8d20e1d006fb2d79680f8a863 Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Wed, 22 Jan 2025 15:02:25 -0300 Subject: [PATCH 05/34] change foreign discord link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5b4b7bd30..85fb2f1ea 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@

- Chat on Discord + Chat on Discord

--- From fcd893284b6a61f55766d4cd779a5c38c1f34819 Mon Sep 17 00:00:00 2001 From: sonhmai <> Date: Thu, 23 Jan 2025 09:51:46 +0700 Subject: [PATCH 06/34] chore: fix typos --- core/storage/page_cache.rs | 2 +- core/storage/wal.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/core/storage/page_cache.rs b/core/storage/page_cache.rs index e21433af1..27ea8244e 100644 --- a/core/storage/page_cache.rs +++ b/core/storage/page_cache.rs @@ -6,7 +6,7 @@ use super::pager::PageRef; // In limbo, page cache is shared by default, meaning that multiple frames from WAL can reside in // the cache, meaning, we need a way to differentiate between pages cached in different -// connections. For this we include the max_frame that will read a connection from so that if two +// connections. For this we include the max_frame that a connection will read from so that if two // connections have different max_frames, they might or not have different frame read from WAL. // // WAL was introduced after Shared cache in SQLite, so this is why these two features don't work diff --git a/core/storage/wal.rs b/core/storage/wal.rs index 5d40d5a1d..0de3b7590 100644 --- a/core/storage/wal.rs +++ b/core/storage/wal.rs @@ -187,7 +187,7 @@ pub enum CheckpointStatus { // min_frame and max_frame is the range of frames that can be safely transferred from WAL to db // file. // current_page is a helper to iterate through all the pages that might have a frame in the safe -// range. This is inneficient for now. +// range. This is inefficient for now. struct OngoingCheckpoint { page: PageRef, state: CheckpointState, @@ -228,13 +228,13 @@ pub struct WalFileShared { max_frame: u64, nbackfills: u64, // Frame cache maps a Page to all the frames it has stored in WAL in ascending order. - // This is do to easily find the frame it must checkpoint each connection if a checkpoint is + // This is to easily find the frame it must checkpoint each connection if a checkpoint is // necessary. // One difference between SQLite and limbo is that we will never support multi process, meaning // we don't need WAL's index file. So we can do stuff like this without shared memory. - // TODO: this will need refactoring because this is incredible memory inneficient. + // TODO: this will need refactoring because this is incredible memory inefficient. frame_cache: HashMap>, - // Another memory inneficient array made to just keep track of pages that are in frame_cache. + // Another memory inefficient array made to just keep track of pages that are in frame_cache. pages_in_frames: Vec, last_checksum: (u32, u32), // Check of last frame in WAL, this is a cumulative checksum over all frames in the WAL file: Rc, From 545990f80633e77f9f9451b540b92a2d7455a206 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Wed, 22 Jan 2025 10:17:59 -0500 Subject: [PATCH 07/34] Support returning column names from prepared statement --- core/lib.rs | 9 +++++++- core/translate/emitter.rs | 12 +++++++++-- core/vdbe/builder.rs | 3 +++ core/vdbe/mod.rs | 1 + tests/integration/common.rs | 42 +++++++++++++++++++++++++++++++++++++ 5 files changed, 64 insertions(+), 3 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index f093762fb..d75510f7c 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -295,7 +295,6 @@ impl Connection { pub(crate) fn run_cmd(self: &Rc, cmd: Cmd) -> Result> { let db = self.db.clone(); let syms: &SymbolTable = &db.syms.borrow(); - match cmd { Cmd::Stmt(stmt) => { let program = Rc::new(translate::translate( @@ -466,6 +465,10 @@ impl Statement { Ok(Rows::new(stmt)) } + pub fn columns(&self) -> &[String] { + &self.program.columns + } + pub fn parameters(&self) -> ¶meters::Parameters { &self.program.parameters } @@ -513,6 +516,10 @@ impl Rows { pub fn next_row(&mut self) -> Result> { self.stmt.step() } + + pub fn columns(&self) -> &[String] { + self.stmt.columns() + } } pub(crate) struct SymbolTable { diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 13daa85ed..939a287f0 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -175,7 +175,11 @@ fn emit_program_for_select( // Finalize program epilogue(program, init_label, start_offset)?; - + program.columns = plan + .result_columns + .iter() + .map(|rc| rc.name.clone()) + .collect::>(); Ok(()) } @@ -286,7 +290,11 @@ fn emit_program_for_delete( // Finalize program epilogue(program, init_label, start_offset)?; - + program.columns = plan + .result_columns + .iter() + .map(|rc| rc.name.clone()) + .collect::>(); Ok(()) } diff --git a/core/vdbe/builder.rs b/core/vdbe/builder.rs index 08b35d9e3..0af4d1182 100644 --- a/core/vdbe/builder.rs +++ b/core/vdbe/builder.rs @@ -30,6 +30,7 @@ pub struct ProgramBuilder { // map of instruction index to manual comment (used in EXPLAIN) comments: HashMap, pub parameters: Parameters, + pub columns: Vec, } #[derive(Debug, Clone)] @@ -60,6 +61,7 @@ impl ProgramBuilder { seekrowid_emitted_bitmask: 0, comments: HashMap::new(), parameters: Parameters::new(), + columns: Vec::new(), } } @@ -352,6 +354,7 @@ impl ProgramBuilder { parameters: self.parameters, n_change: Cell::new(0), change_cnt_on, + columns: self.columns, } } } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 98b328b71..27b39fcab 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -284,6 +284,7 @@ pub struct Program { pub auto_commit: bool, pub n_change: Cell, pub change_cnt_on: bool, + pub columns: Vec, } impl Program { diff --git a/tests/integration/common.rs b/tests/integration/common.rs index 86f4b7b3f..07c840b23 100644 --- a/tests/integration/common.rs +++ b/tests/integration/common.rs @@ -67,3 +67,45 @@ pub(crate) fn compare_string(a: &String, b: &String) { } } } + +#[cfg(test)] +mod tests { + use super::TempDatabase; + + #[test] + fn test_statement_columns() -> anyhow::Result<()> { + let _ = env_logger::try_init(); + let tmp_db = + TempDatabase::new("create table test (foo integer, bar integer, baz integer);"); + let conn = tmp_db.connect_limbo(); + + let stmt = conn.prepare("select * from test;")?; + + let columns = stmt.columns(); + assert_eq!(columns.len(), 3); + assert_eq!(&columns[0], "foo"); + assert_eq!(&columns[1], "bar"); + assert_eq!(&columns[2], "baz"); + + let stmt = conn.prepare("select foo, bar from test;")?; + + let columns = stmt.columns(); + assert_eq!(columns.len(), 2); + assert_eq!(&columns[0], "foo"); + assert_eq!(&columns[1], "bar"); + + let stmt = conn.prepare("delete from test;")?; + let columns = stmt.columns(); + assert_eq!(columns.len(), 0); + + let stmt = conn.prepare("insert into test (foo, bar, baz) values (1, 2, 3);")?; + let columns = stmt.columns(); + assert_eq!(columns.len(), 0); + + let stmt = conn.prepare("delete from test where foo = 1")?; + let columns = stmt.columns(); + assert_eq!(columns.len(), 0); + + Ok(()) + } +} From f7a8d1b4289d6d6afbf8200d40070dce5e02c720 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=A0=EC=9A=B0?= Date: Tue, 21 Jan 2025 19:00:25 +0900 Subject: [PATCH 08/34] Change `Java_org_github_tursodatabase_core_LimboStatement_step` to run in loop to handle `StepResult::IO` --- bindings/java/rs_src/limbo_statement.rs | 34 +++++++++++-------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/bindings/java/rs_src/limbo_statement.rs b/bindings/java/rs_src/limbo_statement.rs index cdd8a5c75..54b1fbb27 100644 --- a/bindings/java/rs_src/limbo_statement.rs +++ b/bindings/java/rs_src/limbo_statement.rs @@ -55,25 +55,21 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboStatement_step<'l } }; - match stmt.stmt.step() { - Ok(StepResult::Row(row)) => match row_to_obj_array(&mut env, &row) { - Ok(row) => to_limbo_step_result(&mut env, STEP_RESULT_ID_ROW, Some(row)), - Err(e) => { - set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); - to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None) - } - }, - Ok(StepResult::IO) => match env.new_object_array(0, "java/lang/Object", JObject::null()) { - Ok(row) => to_limbo_step_result(&mut env, STEP_RESULT_ID_IO, Some(row.into())), - Err(e) => { - set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); - to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None) - } - }, - Ok(StepResult::Done) => to_limbo_step_result(&mut env, STEP_RESULT_ID_DONE, None), - Ok(StepResult::Interrupt) => to_limbo_step_result(&mut env, STEP_RESULT_ID_INTERRUPT, None), - Ok(StepResult::Busy) => to_limbo_step_result(&mut env, STEP_RESULT_ID_BUSY, None), - _ => to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None), + loop { + match stmt.stmt.step() { + Ok(StepResult::Row(row)) => match row_to_obj_array(&mut env, &row) { + Ok(row) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_ROW, Some(row)), + Err(e) => { + set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); + return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None) + } + }, + Ok(StepResult::IO) => {}, + Ok(StepResult::Done) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_DONE, None), + Ok(StepResult::Interrupt) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_INTERRUPT, None), + Ok(StepResult::Busy) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_BUSY, None), + _ => return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None), + } } } From 82e9fe02190c2dfbda7208a6eaa496640b9863e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=A0=EC=9A=B0?= Date: Tue, 21 Jan 2025 19:00:38 +0900 Subject: [PATCH 09/34] Handle invalid step results --- .../org/github/tursodatabase/core/LimboResultSet.java | 5 +++++ .../org/github/tursodatabase/core/LimboStepResult.java | 9 +++++++++ .../github/tursodatabase/jdbc4/JDBC4ResultSetTest.java | 3 --- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboResultSet.java b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboResultSet.java index 19d730727..882d2b78b 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboResultSet.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboResultSet.java @@ -64,6 +64,11 @@ public class LimboResultSet { row++; } + if (lastStepResult.isInInvalidState()) { + open = false; + throw new SQLException("step() returned invalid result: " + lastStepResult); + } + pastLastRow = lastStepResult.isDone(); if (pastLastRow) { open = false; diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStepResult.java b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStepResult.java index 7870cbeab..27a8dfc05 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStepResult.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStepResult.java @@ -13,6 +13,7 @@ public class LimboStepResult { private static final int STEP_RESULT_ID_IO = 20; private static final int STEP_RESULT_ID_DONE = 30; private static final int STEP_RESULT_ID_INTERRUPT = 40; + // Indicates that the database file could not be written because of concurrent activity by some other connection private static final int STEP_RESULT_ID_BUSY = 50; private static final int STEP_RESULT_ID_ERROR = 60; @@ -41,6 +42,14 @@ public class LimboStepResult { return stepResultId == STEP_RESULT_ID_DONE; } + public boolean isInInvalidState() { + // current implementation doesn't allow STEP_RESULT_ID_IO to be returned + return stepResultId == STEP_RESULT_ID_IO || + stepResultId == STEP_RESULT_ID_INTERRUPT || + stepResultId == STEP_RESULT_ID_BUSY || + stepResultId == STEP_RESULT_ID_ERROR; + } + @Override public String toString() { return "LimboStepResult{" + diff --git a/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ResultSetTest.java b/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ResultSetTest.java index e717232a8..88a499b9d 100644 --- a/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ResultSetTest.java +++ b/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ResultSetTest.java @@ -9,7 +9,6 @@ import java.util.Properties; import org.github.tursodatabase.TestUtils; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; class JDBC4ResultSetTest { @@ -27,7 +26,6 @@ class JDBC4ResultSetTest { } @Test - @Disabled("https://github.com/tursodatabase/limbo/pull/743#issuecomment-2600746904") void invoking_next_before_the_last_row_should_return_true() throws Exception { stmt.executeUpdate("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); stmt.executeUpdate("INSERT INTO users VALUES (1, 'sinwoo');"); @@ -41,7 +39,6 @@ class JDBC4ResultSetTest { } @Test - @Disabled("https://github.com/tursodatabase/limbo/pull/743#issuecomment-2600746904") void invoking_next_after_the_last_row_should_return_false() throws Exception { stmt.executeUpdate("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); stmt.executeUpdate("INSERT INTO users VALUES (1, 'sinwoo');"); From d05ffce613b94995bf678e86ccf95c151346269a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=A0=EC=9A=B0?= Date: Tue, 21 Jan 2025 19:08:35 +0900 Subject: [PATCH 10/34] Apply fmt --- bindings/java/rs_src/limbo_statement.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/bindings/java/rs_src/limbo_statement.rs b/bindings/java/rs_src/limbo_statement.rs index 54b1fbb27..92080f314 100644 --- a/bindings/java/rs_src/limbo_statement.rs +++ b/bindings/java/rs_src/limbo_statement.rs @@ -7,6 +7,7 @@ use jni::JNIEnv; use limbo_core::{Statement, StepResult}; pub const STEP_RESULT_ID_ROW: i32 = 10; +#[allow(dead_code)] pub const STEP_RESULT_ID_IO: i32 = 20; pub const STEP_RESULT_ID_DONE: i32 = 30; pub const STEP_RESULT_ID_INTERRUPT: i32 = 40; @@ -61,13 +62,19 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboStatement_step<'l Ok(row) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_ROW, Some(row)), Err(e) => { set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); - return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None) + return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None); } }, - Ok(StepResult::IO) => {}, - Ok(StepResult::Done) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_DONE, None), - Ok(StepResult::Interrupt) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_INTERRUPT, None), - Ok(StepResult::Busy) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_BUSY, None), + Ok(StepResult::IO) => {} + Ok(StepResult::Done) => { + return to_limbo_step_result(&mut env, STEP_RESULT_ID_DONE, None) + } + Ok(StepResult::Interrupt) => { + return to_limbo_step_result(&mut env, STEP_RESULT_ID_INTERRUPT, None) + } + Ok(StepResult::Busy) => { + return to_limbo_step_result(&mut env, STEP_RESULT_ID_BUSY, None) + } _ => return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None), } } From 36dff168b3f2641a230bc46055d027bb6b071a78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=A0=EC=9A=B0?= Date: Wed, 22 Jan 2025 08:50:47 +0900 Subject: [PATCH 11/34] Execute io.run_once when receiving StepResult::IO --- bindings/java/rs_src/limbo_connection.rs | 2 +- bindings/java/rs_src/limbo_statement.rs | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/bindings/java/rs_src/limbo_connection.rs b/bindings/java/rs_src/limbo_connection.rs index 1399d8b42..5441acee5 100644 --- a/bindings/java/rs_src/limbo_connection.rs +++ b/bindings/java/rs_src/limbo_connection.rs @@ -69,7 +69,7 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboConnection_prepar }; match connection.conn.prepare(sql) { - Ok(stmt) => LimboStatement::new(stmt).to_ptr(), + Ok(stmt) => LimboStatement::new(stmt, connection.clone()).to_ptr(), Err(e) => { set_err_msg_and_throw_exception( &mut env, diff --git a/bindings/java/rs_src/limbo_statement.rs b/bindings/java/rs_src/limbo_statement.rs index 92080f314..29ecb7cd1 100644 --- a/bindings/java/rs_src/limbo_statement.rs +++ b/bindings/java/rs_src/limbo_statement.rs @@ -1,5 +1,6 @@ use crate::errors::Result; use crate::errors::{LimboError, LIMBO_ETC}; +use crate::limbo_connection::LimboConnection; use crate::utils::set_err_msg_and_throw_exception; use jni::objects::{JObject, JValue}; use jni::sys::jlong; @@ -16,11 +17,12 @@ pub const STEP_RESULT_ID_ERROR: i32 = 60; pub struct LimboStatement { pub(crate) stmt: Statement, + pub(crate) connection: LimboConnection, } impl LimboStatement { - pub fn new(stmt: Statement) -> Self { - LimboStatement { stmt } + pub fn new(stmt: Statement, connection: LimboConnection) -> Self { + LimboStatement { stmt, connection } } pub fn to_ptr(self) -> jlong { @@ -65,7 +67,9 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboStatement_step<'l return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None); } }, - Ok(StepResult::IO) => {} + Ok(StepResult::IO) => { + stmt.connection.io.run_once().unwrap(); + } Ok(StepResult::Done) => { return to_limbo_step_result(&mut env, STEP_RESULT_ID_DONE, None) } From 0481e692172be53f3f549df72851ff71c7f635d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=A0=EC=9A=B0?= Date: Fri, 24 Jan 2025 14:07:52 +0900 Subject: [PATCH 12/34] Handle Err case from `connection.io` --- bindings/java/rs_src/limbo_statement.rs | 35 ++++++++++++++----------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/bindings/java/rs_src/limbo_statement.rs b/bindings/java/rs_src/limbo_statement.rs index 29ecb7cd1..4277b599e 100644 --- a/bindings/java/rs_src/limbo_statement.rs +++ b/bindings/java/rs_src/limbo_statement.rs @@ -59,27 +59,32 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboStatement_step<'l }; loop { - match stmt.stmt.step() { - Ok(StepResult::Row(row)) => match row_to_obj_array(&mut env, &row) { - Ok(row) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_ROW, Some(row)), - Err(e) => { + match stmt + .stmt + .step() + .map_err(|_e| to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None)) + .unwrap() + { + StepResult::Row(row) => { + return match row_to_obj_array(&mut env, &row) { + Ok(row) => to_limbo_step_result(&mut env, STEP_RESULT_ID_ROW, Some(row)), + Err(e) => { + set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); + to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None) + } + } + } + StepResult::IO => { + if let Err(e) = stmt.connection.io.run_once() { set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None); } - }, - Ok(StepResult::IO) => { - stmt.connection.io.run_once().unwrap(); } - Ok(StepResult::Done) => { - return to_limbo_step_result(&mut env, STEP_RESULT_ID_DONE, None) - } - Ok(StepResult::Interrupt) => { + StepResult::Done => return to_limbo_step_result(&mut env, STEP_RESULT_ID_DONE, None), + StepResult::Interrupt => { return to_limbo_step_result(&mut env, STEP_RESULT_ID_INTERRUPT, None) } - Ok(StepResult::Busy) => { - return to_limbo_step_result(&mut env, STEP_RESULT_ID_BUSY, None) - } - _ => return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None), + StepResult::Busy => return to_limbo_step_result(&mut env, STEP_RESULT_ID_BUSY, None), } } } From 53586b9d009aba63fac6d08535dd811dd8cedf40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=A0=EC=9A=B0?= Date: Fri, 24 Jan 2025 15:06:10 +0900 Subject: [PATCH 13/34] Break the loop when step() returns Err --- bindings/java/rs_src/limbo_statement.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/bindings/java/rs_src/limbo_statement.rs b/bindings/java/rs_src/limbo_statement.rs index 4277b599e..7de4b2c19 100644 --- a/bindings/java/rs_src/limbo_statement.rs +++ b/bindings/java/rs_src/limbo_statement.rs @@ -53,18 +53,17 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboStatement_step<'l Ok(stmt) => stmt, Err(e) => { set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); - - return JObject::null(); + return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None); } }; loop { - match stmt - .stmt - .step() - .map_err(|_e| to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None)) - .unwrap() - { + let step_result = match stmt.stmt.step() { + Ok(result) => result, + Err(_) => return to_limbo_step_result(&mut env, STEP_RESULT_ID_ERROR, None), + }; + + match step_result { StepResult::Row(row) => { return match row_to_obj_array(&mut env, &row) { Ok(row) => to_limbo_step_result(&mut env, STEP_RESULT_ID_ROW, Some(row)), From f10b41c5b57585c49dbd019c7181bd207c2fc4e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=A0=EC=9A=B0?= Date: Fri, 24 Jan 2025 15:43:44 +0900 Subject: [PATCH 14/34] Pass io to statement from db --- bindings/java/rs_src/limbo_connection.rs | 7 ++- bindings/java/rs_src/limbo_db.rs | 45 +++---------------- .../github/tursodatabase/core/LimboDB.java | 28 +++++------- 3 files changed, 22 insertions(+), 58 deletions(-) diff --git a/bindings/java/rs_src/limbo_connection.rs b/bindings/java/rs_src/limbo_connection.rs index 5441acee5..dd54e9087 100644 --- a/bindings/java/rs_src/limbo_connection.rs +++ b/bindings/java/rs_src/limbo_connection.rs @@ -9,16 +9,19 @@ use jni::sys::jlong; use jni::JNIEnv; use limbo_core::Connection; use std::rc::Rc; +use std::sync::Arc; #[derive(Clone)] #[allow(dead_code)] pub struct LimboConnection { + // Because java's LimboConnection is 1:1 mapped to limbo connection, we can use Rc pub(crate) conn: Rc, - pub(crate) io: Rc, + // Because io is shared across multiple `LimboConnection`s, wrap it with Arc + pub(crate) io: Arc, } impl LimboConnection { - pub fn new(conn: Rc, io: Rc) -> Self { + pub fn new(conn: Rc, io: Arc) -> Self { LimboConnection { conn, io } } diff --git a/bindings/java/rs_src/limbo_db.rs b/bindings/java/rs_src/limbo_db.rs index 09d8afa75..16cb3d66b 100644 --- a/bindings/java/rs_src/limbo_db.rs +++ b/bindings/java/rs_src/limbo_db.rs @@ -5,16 +5,16 @@ use jni::objects::{JByteArray, JObject}; use jni::sys::{jint, jlong}; use jni::JNIEnv; use limbo_core::Database; -use std::rc::Rc; use std::sync::Arc; struct LimboDB { db: Arc, + io: Arc, } impl LimboDB { - pub fn new(db: Arc) -> Self { - LimboDB { db } + pub fn new(db: Arc, io: Arc) -> Self { + LimboDB { db, io } } pub fn to_ptr(self) -> jlong { @@ -76,14 +76,13 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboDB_openUtf8<'loca } }; - LimboDB::new(db).to_ptr() + LimboDB::new(db, io).to_ptr() } #[no_mangle] pub extern "system" fn Java_org_github_tursodatabase_core_LimboDB_connect0<'local>( mut env: JNIEnv<'local>, obj: JObject<'local>, - file_path_byte_arr: JByteArray<'local>, db_pointer: jlong, ) -> jlong { let db = match to_limbo_db(db_pointer) { @@ -94,41 +93,7 @@ pub extern "system" fn Java_org_github_tursodatabase_core_LimboDB_connect0<'loca } }; - let path = match env - .convert_byte_array(file_path_byte_arr) - .map_err(|e| e.to_string()) - { - Ok(bytes) => match String::from_utf8(bytes) { - Ok(s) => s, - Err(e) => { - set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); - return 0; - } - }, - Err(e) => { - set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); - return 0; - } - }; - - let io: Rc = match path.as_str() { - ":memory:" => match limbo_core::MemoryIO::new() { - Ok(io) => Rc::new(io), - Err(e) => { - set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); - return 0; - } - }, - _ => match limbo_core::PlatformIO::new() { - Ok(io) => Rc::new(io), - Err(e) => { - set_err_msg_and_throw_exception(&mut env, obj, LIMBO_ETC, e.to_string()); - return 0; - } - }, - }; - let conn = LimboConnection::new(db.db.connect(), io); - + let conn = LimboConnection::new(db.db.connect(), db.io.clone()); conn.to_ptr() } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDB.java b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDB.java index 89d13b8cf..ad6ee68a0 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDB.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDB.java @@ -1,5 +1,9 @@ package org.github.tursodatabase.core; +import static org.github.tursodatabase.utils.ByteArrayUtils.stringToUtf8ByteArray; + +import java.sql.SQLException; +import java.util.concurrent.locks.ReentrantLock; import org.github.tursodatabase.LimboErrorCode; import org.github.tursodatabase.annotations.NativeInvocation; @@ -8,12 +12,6 @@ import org.github.tursodatabase.utils.LimboExceptionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; -import java.util.concurrent.locks.ReentrantLock; - -import static org.github.tursodatabase.utils.ByteArrayUtils.stringToUtf8ByteArray; - /** * This class provides a thin JNI layer over the SQLite3 C API. */ @@ -39,7 +37,7 @@ public final class LimboDB extends AbstractDB { * Loads the SQLite interface backend. */ public static void load() { - if (isLoaded) return; + if (isLoaded) {return;} try { System.loadLibrary("_limbo_java"); @@ -49,7 +47,7 @@ public final class LimboDB extends AbstractDB { } /** - * @param url e.g. "jdbc:sqlite:fileName + * @param url e.g. "jdbc:sqlite:fileName * @param filePath e.g. path to file */ public static LimboDB create(String url, String filePath) throws SQLException { @@ -86,7 +84,9 @@ public final class LimboDB extends AbstractDB { byte[] filePathBytes = stringToUtf8ByteArray(filePath); if (filePathBytes == null) { - throw LimboExceptionUtils.buildLimboException(LimboErrorCode.LIMBO_ETC.code, "File path cannot be converted to byteArray. File name: " + filePath); + throw LimboExceptionUtils.buildLimboException( + LimboErrorCode.LIMBO_ETC.code, + "File path cannot be converted to byteArray. File name: " + filePath); } dbPointer = openUtf8(filePathBytes, openFlags); @@ -95,14 +95,10 @@ public final class LimboDB extends AbstractDB { @Override public long connect() throws SQLException { - byte[] filePathBytes = stringToUtf8ByteArray(filePath); - if (filePathBytes == null) { - throw LimboExceptionUtils.buildLimboException(LimboErrorCode.LIMBO_ETC.code, "File path cannot be converted to byteArray. File name: " + filePath); - } - return connect0(filePathBytes, dbPointer); + return connect0(dbPointer); } - private native long connect0(byte[] path, long databasePtr) throws SQLException; + private native long connect0(long databasePtr) throws SQLException; @VisibleForTesting native void throwJavaException(int errorCode) throws SQLException; @@ -110,7 +106,7 @@ public final class LimboDB extends AbstractDB { /** * Throws formatted SQLException with error code and message. * - * @param errorCode Error code. + * @param errorCode Error code. * @param errorMessageBytes Error message. */ @NativeInvocation(invokedFrom = "limbo_db.rs") From aff454b5f6318b874b0f5366b6b2e0387d50828b Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Sat, 25 Jan 2025 02:12:50 -0300 Subject: [PATCH 15/34] Implement And bytecode Take the logical AND of the values in registers P1 and P2 and write the result into register P3. If either P1 or P2 is 0 (false) then the result is 0 even if the other input is NULL. A NULL and true or two NULLs give a NULL output. --- COMPAT.md | 2 +- core/translate/expr.rs | 7 +++++ core/vdbe/explain.rs | 9 +++++++ core/vdbe/insn.rs | 58 ++++++++++++++++++++++++++++++++++++++++++ core/vdbe/mod.rs | 9 +++++-- 5 files changed, 82 insertions(+), 3 deletions(-) diff --git a/COMPAT.md b/COMPAT.md index 17ce24568..b476bc3f0 100644 --- a/COMPAT.md +++ b/COMPAT.md @@ -400,7 +400,7 @@ Modifiers: | AggFinal | Yes | | AggStep | Yes | | AggStep | Yes | -| And | No | +| And | Yes | | AutoCommit | No | | BitAnd | Yes | | BitNot | Yes | diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 476b78d77..d522ddec2 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -605,6 +605,13 @@ pub fn translate_expr( dest: target_register, }); } + ast::Operator::And => { + program.emit_insn(Insn::And { + lhs: e1_reg, + rhs: e2_reg, + dest: target_register, + }); + } ast::Operator::BitwiseAnd => { program.emit_insn(Insn::BitAnd { lhs: e1_reg, diff --git a/core/vdbe/explain.rs b/core/vdbe/explain.rs index 80f419ed7..40d6f25e9 100644 --- a/core/vdbe/explain.rs +++ b/core/vdbe/explain.rs @@ -1120,6 +1120,15 @@ pub fn insn_to_str( 0, format!("r[{}]=r[{}] + r[{}]", dest, lhs, rhs), ), + Insn::And { lhs, rhs, dest } => ( + "And", + *rhs as i32, + *lhs as i32, + *dest as i32, + OwnedValue::build_text(Rc::new("".to_string())), + 0, + format!("r[{}]=(r[{}] && r[{}])", dest, lhs, rhs), + ), }; format!( "{:<4} {:<17} {:<4} {:<4} {:<4} {:<13} {:<2} {}", diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index f3f5e36a7..4e393116c 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -551,6 +551,12 @@ pub enum Insn { rhs: usize, dest: usize, }, + /// Take the logical AND of the values in registers P1 and P2 and write the result into register P3. + And { + lhs: usize, + rhs: usize, + dest: usize, + }, } fn cast_text_to_numerical(value: &str) -> OwnedValue { @@ -955,3 +961,55 @@ pub fn exec_concat(lhs: &OwnedValue, rhs: &OwnedValue) -> OwnedValue { (OwnedValue::Record(_), _) | (_, OwnedValue::Record(_)) => unreachable!(), } } + +pub fn exec_and(mut lhs: &OwnedValue, mut rhs: &OwnedValue) -> OwnedValue { + if let OwnedValue::Agg(agg) = lhs { + lhs = agg.final_value(); + } + if let OwnedValue::Agg(agg) = rhs { + rhs = agg.final_value(); + } + + match (lhs, rhs) { + (_, OwnedValue::Integer(0)) + | (OwnedValue::Integer(0), _) + | (_, OwnedValue::Float(0.0)) + | (OwnedValue::Float(0.0), _) => OwnedValue::Integer(0), + (OwnedValue::Null, _) | (_, OwnedValue::Null) => OwnedValue::Null, + _ => OwnedValue::Integer(1), + } +} + +#[cfg(test)] +mod tests { + use crate::types::OwnedValue; + + use super::exec_and; + + #[test] + fn test_exec_and() { + let inputs = vec![ + (OwnedValue::Integer(0), OwnedValue::Null), + (OwnedValue::Null, OwnedValue::Integer(1)), + (OwnedValue::Null, OwnedValue::Null), + (OwnedValue::Float(0.0), OwnedValue::Null), + (OwnedValue::Integer(1), OwnedValue::Float(2.2)), + ]; + let outpus = vec![ + OwnedValue::Integer(0), + OwnedValue::Null, + OwnedValue::Null, + OwnedValue::Integer(0), + OwnedValue::Integer(1), + ]; + + assert_eq!( + inputs.len(), + outpus.len(), + "Inputs and Outputs should have same size" + ); + for (i, (lhs, rhs)) in inputs.iter().enumerate() { + assert_eq!(exec_and(lhs, rhs), outpus[i]); + } + } +} diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 98b328b71..24eb7657c 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -46,8 +46,8 @@ use crate::{ use crate::{resolve_ext_path, Connection, Result, Rows, TransactionState, DATABASE_VERSION}; use datetime::{exec_date, exec_datetime_full, exec_julianday, exec_time, exec_unixepoch}; use insn::{ - exec_add, exec_bit_and, exec_bit_not, exec_bit_or, exec_boolean_not, exec_concat, exec_divide, - exec_multiply, exec_remainder, exec_shift_left, exec_shift_right, exec_subtract, + exec_add, exec_and, exec_bit_and, exec_bit_not, exec_bit_or, exec_boolean_not, exec_concat, + exec_divide, exec_multiply, exec_remainder, exec_shift_left, exec_shift_right, exec_subtract, }; use likeop::{construct_like_escape_arg, exec_glob, exec_like_with_escape}; use rand::distributions::{Distribution, Uniform}; @@ -2346,6 +2346,11 @@ impl Program { exec_concat(&state.registers[*lhs], &state.registers[*rhs]); state.pc += 1; } + Insn::And { lhs, rhs, dest } => { + state.registers[*dest] = + exec_and(&state.registers[*lhs], &state.registers[*rhs]); + state.pc += 1; + } } } } From e7d95399e347dbe115cd43f346fcd6f2c02c200e Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Sat, 25 Jan 2025 02:54:14 -0300 Subject: [PATCH 16/34] Add Or bytecode Take the logical OR of the values in register P1 and P2 and store the answer in register P3. If either P1 or P2 is nonzero (true) then the result is 1 (true) even if the other input is NULL. A NULL and false or two NULLs give a NULL output. --- COMPAT.md | 2 +- core/translate/expr.rs | 7 ++++ core/vdbe/explain.rs | 9 +++++ core/vdbe/insn.rs | 92 +++++++++++++++++++++++++++++++++++++++++- core/vdbe/mod.rs | 8 +++- 5 files changed, 115 insertions(+), 3 deletions(-) diff --git a/COMPAT.md b/COMPAT.md index b476bc3f0..f7bbb963a 100644 --- a/COMPAT.md +++ b/COMPAT.md @@ -493,7 +493,7 @@ Modifiers: | OpenWrite | No | | OpenWriteAsync | Yes | | OpenWriteAwait | Yes | -| Or | No | +| Or | Yes | | Pagecount | No | | Param | No | | ParseSchema | No | diff --git a/core/translate/expr.rs b/core/translate/expr.rs index d522ddec2..abec8b9a8 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -612,6 +612,13 @@ pub fn translate_expr( dest: target_register, }); } + ast::Operator::Or => { + program.emit_insn(Insn::Or { + lhs: e1_reg, + rhs: e2_reg, + dest: target_register, + }); + } ast::Operator::BitwiseAnd => { program.emit_insn(Insn::BitAnd { lhs: e1_reg, diff --git a/core/vdbe/explain.rs b/core/vdbe/explain.rs index 40d6f25e9..89967608a 100644 --- a/core/vdbe/explain.rs +++ b/core/vdbe/explain.rs @@ -1129,6 +1129,15 @@ pub fn insn_to_str( 0, format!("r[{}]=(r[{}] && r[{}])", dest, lhs, rhs), ), + Insn::Or { lhs, rhs, dest } => ( + "Or", + *rhs as i32, + *lhs as i32, + *dest as i32, + OwnedValue::build_text(Rc::new("".to_string())), + 0, + format!("r[{}]=(r[{}] || r[{}])", dest, lhs, rhs), + ), }; format!( "{:<4} {:<17} {:<4} {:<4} {:<4} {:<13} {:<2} {}", diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index 4e393116c..3fdd02a14 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -557,6 +557,12 @@ pub enum Insn { rhs: usize, dest: usize, }, + /// Take the logical OR of the values in register P1 and P2 and store the answer in register P3. + Or { + lhs: usize, + rhs: usize, + dest: usize, + }, } fn cast_text_to_numerical(value: &str) -> OwnedValue { @@ -980,9 +986,43 @@ pub fn exec_and(mut lhs: &OwnedValue, mut rhs: &OwnedValue) -> OwnedValue { } } +pub fn exec_or(mut lhs: &OwnedValue, mut rhs: &OwnedValue) -> OwnedValue { + if let OwnedValue::Agg(agg) = lhs { + lhs = agg.final_value(); + } + if let OwnedValue::Agg(agg) = rhs { + rhs = agg.final_value(); + } + + match (lhs, rhs) { + (OwnedValue::Null, OwnedValue::Null) + | (OwnedValue::Null, OwnedValue::Float(0.0)) + | (OwnedValue::Float(0.0), OwnedValue::Null) + | (OwnedValue::Null, OwnedValue::Integer(0)) + | (OwnedValue::Integer(0), OwnedValue::Null) => OwnedValue::Null, + (OwnedValue::Float(0.0), OwnedValue::Integer(0)) + | (OwnedValue::Integer(0), OwnedValue::Float(0.0)) + | (OwnedValue::Float(0.0), OwnedValue::Float(0.0)) + | (OwnedValue::Integer(0), OwnedValue::Integer(0)) => OwnedValue::Integer(0), + (OwnedValue::Text(lhs), OwnedValue::Text(rhs)) => exec_or( + &cast_text_to_numerical(&lhs.value), + &cast_text_to_numerical(&rhs.value), + ), + (OwnedValue::Text(text), other) | (other, OwnedValue::Text(text)) => { + exec_or(&cast_text_to_numerical(&text.value), other) + } + _ => OwnedValue::Integer(1), + } +} + #[cfg(test)] mod tests { - use crate::types::OwnedValue; + use std::rc::Rc; + + use crate::{ + types::{LimboText, OwnedValue}, + vdbe::insn::exec_or, + }; use super::exec_and; @@ -1012,4 +1052,54 @@ mod tests { assert_eq!(exec_and(lhs, rhs), outpus[i]); } } + + #[test] + fn test_exec_or() { + let inputs = vec![ + (OwnedValue::Integer(0), OwnedValue::Null), + (OwnedValue::Null, OwnedValue::Integer(1)), + (OwnedValue::Null, OwnedValue::Null), + (OwnedValue::Float(0.0), OwnedValue::Null), + (OwnedValue::Integer(1), OwnedValue::Float(2.2)), + (OwnedValue::Float(0.0), OwnedValue::Integer(0)), + ( + OwnedValue::Integer(0), + OwnedValue::Text(LimboText::new(Rc::new("string".to_string()))), + ), + ( + OwnedValue::Integer(0), + OwnedValue::Text(LimboText::new(Rc::new("1".to_string()))), + ), + ( + OwnedValue::Integer(0), + OwnedValue::Text(LimboText::new(Rc::new("".to_string()))), + ), + ]; + let outpus = vec![ + OwnedValue::Null, + OwnedValue::Integer(1), + OwnedValue::Null, + OwnedValue::Null, + OwnedValue::Integer(1), + OwnedValue::Integer(0), + OwnedValue::Integer(0), + OwnedValue::Integer(1), + OwnedValue::Integer(0), + ]; + + assert_eq!( + inputs.len(), + outpus.len(), + "Inputs and Outputs should have same size" + ); + for (i, (lhs, rhs)) in inputs.iter().enumerate() { + assert_eq!( + exec_or(lhs, rhs), + outpus[i], + "Wrong OR for lhs: {}, rhs: {}", + lhs, + rhs + ); + } + } } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 24eb7657c..2c4dd89fb 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -47,7 +47,8 @@ use crate::{resolve_ext_path, Connection, Result, Rows, TransactionState, DATABA use datetime::{exec_date, exec_datetime_full, exec_julianday, exec_time, exec_unixepoch}; use insn::{ exec_add, exec_and, exec_bit_and, exec_bit_not, exec_bit_or, exec_boolean_not, exec_concat, - exec_divide, exec_multiply, exec_remainder, exec_shift_left, exec_shift_right, exec_subtract, + exec_divide, exec_multiply, exec_or, exec_remainder, exec_shift_left, exec_shift_right, + exec_subtract, }; use likeop::{construct_like_escape_arg, exec_glob, exec_like_with_escape}; use rand::distributions::{Distribution, Uniform}; @@ -2351,6 +2352,11 @@ impl Program { exec_and(&state.registers[*lhs], &state.registers[*rhs]); state.pc += 1; } + Insn::Or { lhs, rhs, dest } => { + state.registers[*dest] = + exec_or(&state.registers[*lhs], &state.registers[*rhs]); + state.pc += 1; + } } } } From 7d81c32261cfb8587e7496274b4a6b2c8e5c2369 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=A0=EC=9A=B0?= Date: Sat, 25 Jan 2025 15:25:13 +0900 Subject: [PATCH 17/34] Add spotless support for bindings/java formatting --- NOTICE.md | 5 + bindings/java/build.gradle.kts | 12 ++ licenses/bindings/java/spotless-license.md | 191 +++++++++++++++++++++ 3 files changed, 208 insertions(+) create mode 100644 licenses/bindings/java/spotless-license.md diff --git a/NOTICE.md b/NOTICE.md index 5c654dfbf..c2bba2ba7 100644 --- a/NOTICE.md +++ b/NOTICE.md @@ -23,6 +23,11 @@ This product depends on logback, distributed by the logback authors: * License: licenses/bindings/java/logback-license.md (Apache License v2.0) * Homepage: https://github.com/qos-ch/logback?tab=License-1-ov-file +This product depends on spotless, distributed by the diffplug authors: + +* License: licenses/bindings/java/spotless-license.md (Apache License v2.0) +* Homepage: https://github.com/diffplug/spotless + This product depends on serde, distributed by the serde-rs project: * License: licenses/core/serde-apache-license.md (Apache License v2.0) diff --git a/bindings/java/build.gradle.kts b/bindings/java/build.gradle.kts index c20fa561b..a9137c888 100644 --- a/bindings/java/build.gradle.kts +++ b/bindings/java/build.gradle.kts @@ -7,6 +7,9 @@ plugins { java application id("net.ltgt.errorprone") version "3.1.0" + + // If you're stuck on JRE 8, use id 'com.diffplug.spotless' version '6.13.0' or older. + id("com.diffplug.spotless") version "6.13.0" } group = "org.github.tursodatabase" @@ -111,3 +114,12 @@ tasks.withType { } } } + +spotless { + java { + target("**/*.java") + targetExclude(layout.buildDirectory.dir("**/*.java").get().asFile) + removeUnusedImports() + googleJavaFormat("1.7") // or use eclipse().configFile("path/to/eclipse-format.xml") + } +} diff --git a/licenses/bindings/java/spotless-license.md b/licenses/bindings/java/spotless-license.md new file mode 100644 index 000000000..635a39306 --- /dev/null +++ b/licenses/bindings/java/spotless-license.md @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "{}" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + +Copyright {yyyy} {name of copyright owner} + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. From 7902d5f2005dff0440e5f5a21de50adce7b96bd9 Mon Sep 17 00:00:00 2001 From: Diego Reis Date: Sat, 25 Jan 2025 03:08:51 -0300 Subject: [PATCH 18/34] Fix Text handling of And bytecode --- core/vdbe/insn.rs | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/core/vdbe/insn.rs b/core/vdbe/insn.rs index 3fdd02a14..f17a1a354 100644 --- a/core/vdbe/insn.rs +++ b/core/vdbe/insn.rs @@ -982,6 +982,13 @@ pub fn exec_and(mut lhs: &OwnedValue, mut rhs: &OwnedValue) -> OwnedValue { | (_, OwnedValue::Float(0.0)) | (OwnedValue::Float(0.0), _) => OwnedValue::Integer(0), (OwnedValue::Null, _) | (_, OwnedValue::Null) => OwnedValue::Null, + (OwnedValue::Text(lhs), OwnedValue::Text(rhs)) => exec_and( + &cast_text_to_numerical(&lhs.value), + &cast_text_to_numerical(&rhs.value), + ), + (OwnedValue::Text(text), other) | (other, OwnedValue::Text(text)) => { + exec_and(&cast_text_to_numerical(&text.value), other) + } _ => OwnedValue::Integer(1), } } @@ -1034,13 +1041,28 @@ mod tests { (OwnedValue::Null, OwnedValue::Null), (OwnedValue::Float(0.0), OwnedValue::Null), (OwnedValue::Integer(1), OwnedValue::Float(2.2)), + ( + OwnedValue::Integer(0), + OwnedValue::Text(LimboText::new(Rc::new("string".to_string()))), + ), + ( + OwnedValue::Integer(0), + OwnedValue::Text(LimboText::new(Rc::new("1".to_string()))), + ), + ( + OwnedValue::Integer(1), + OwnedValue::Text(LimboText::new(Rc::new("1".to_string()))), + ), ]; - let outpus = vec![ + let outpus = [ OwnedValue::Integer(0), OwnedValue::Null, OwnedValue::Null, OwnedValue::Integer(0), OwnedValue::Integer(1), + OwnedValue::Integer(0), + OwnedValue::Integer(0), + OwnedValue::Integer(1), ]; assert_eq!( @@ -1049,7 +1071,13 @@ mod tests { "Inputs and Outputs should have same size" ); for (i, (lhs, rhs)) in inputs.iter().enumerate() { - assert_eq!(exec_and(lhs, rhs), outpus[i]); + assert_eq!( + exec_and(lhs, rhs), + outpus[i], + "Wrong AND for lhs: {}, rhs: {}", + lhs, + rhs + ); } } @@ -1075,7 +1103,7 @@ mod tests { OwnedValue::Text(LimboText::new(Rc::new("".to_string()))), ), ]; - let outpus = vec![ + let outpus = [ OwnedValue::Null, OwnedValue::Integer(1), OwnedValue::Null, From 4be1f9c3cc4bf8d0fe3310503085ac0edcc19721 Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Tue, 21 Jan 2025 10:21:55 -0500 Subject: [PATCH 19/34] Begin work on Go bindings (purego) --- Cargo.lock | 7 ++ Cargo.toml | 1 + bindings/go/Cargo.toml | 23 ++++ bindings/go/cmd/main.go | 18 +++ bindings/go/go.mod | 5 + bindings/go/go.sum | 2 + bindings/go/rs_src/lib.rs | 210 ++++++++++++++++++++++++++++++++ bindings/go/rs_src/statement.rs | 160 ++++++++++++++++++++++++ bindings/go/rs_src/types.rs | 14 +++ bindings/go/stmt.go | 79 ++++++++++++ bindings/go/turso.go | 127 +++++++++++++++++++ bindings/go/types.go | 28 +++++ 12 files changed, 674 insertions(+) create mode 100644 bindings/go/Cargo.toml create mode 100644 bindings/go/cmd/main.go create mode 100644 bindings/go/go.mod create mode 100644 bindings/go/go.sum create mode 100644 bindings/go/rs_src/lib.rs create mode 100644 bindings/go/rs_src/statement.rs create mode 100644 bindings/go/rs_src/types.rs create mode 100644 bindings/go/stmt.go create mode 100644 bindings/go/turso.go create mode 100644 bindings/go/types.go diff --git a/Cargo.lock b/Cargo.lock index 540fb77d2..e7365d1e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2572,6 +2572,13 @@ version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +[[package]] +name = "turso-go" +version = "0.0.13" +dependencies = [ + "limbo_core", +] + [[package]] name = "typenum" version = "1.17.0" diff --git a/Cargo.toml b/Cargo.toml index 5e243c98b..570e5b638 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,7 @@ members = [ "bindings/python", "bindings/rust", "bindings/wasm", + "bindings/go", "cli", "core", "extensions/core", diff --git a/bindings/go/Cargo.toml b/bindings/go/Cargo.toml new file mode 100644 index 000000000..98056cbe6 --- /dev/null +++ b/bindings/go/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "turso-go" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true + +[lib] +name = "_turso_go" +crate-type = ["cdylib"] +path = "rs_src/lib.rs" + +[features] +default = ["io_uring"] +io_uring = ["limbo_core/io_uring"] + + +[dependencies] +limbo_core = { path = "../../core/" } + +[target.'cfg(target_os = "linux")'.dependencies] +limbo_core = { path = "../../core/", features = ["io_uring"] } diff --git a/bindings/go/cmd/main.go b/bindings/go/cmd/main.go new file mode 100644 index 000000000..32fcbea23 --- /dev/null +++ b/bindings/go/cmd/main.go @@ -0,0 +1,18 @@ +// package main +// +// import ( +// "fmt" +// ) +// +// func main() { +// conn, err := lc.Open("new.db") +// if err != nil { +// panic(err) +// } +// fmt.Println("Connected to database") +// sql := "select c from t;" +// conn.Query(sql) +// +// conn.Close() +// fmt.Println("Connection closed") +// } diff --git a/bindings/go/go.mod b/bindings/go/go.mod new file mode 100644 index 000000000..fa1d99d3e --- /dev/null +++ b/bindings/go/go.mod @@ -0,0 +1,5 @@ +module turso + +go 1.23.4 + +require github.com/ebitengine/purego v0.8.2 // indirect diff --git a/bindings/go/go.sum b/bindings/go/go.sum new file mode 100644 index 000000000..38eca3dfd --- /dev/null +++ b/bindings/go/go.sum @@ -0,0 +1,2 @@ +github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I= +github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= diff --git a/bindings/go/rs_src/lib.rs b/bindings/go/rs_src/lib.rs new file mode 100644 index 000000000..862c8c191 --- /dev/null +++ b/bindings/go/rs_src/lib.rs @@ -0,0 +1,210 @@ +mod statement; +mod types; +use limbo_core::{Connection, Database, LimboError, Value}; +use std::{ + ffi::{c_char, c_void}, + rc::Rc, + str::FromStr, + sync::Arc, +}; + +#[no_mangle] +pub unsafe extern "C" fn db_open(path: *const c_char) -> *mut c_void { + if path.is_null() { + println!("Path is null"); + return std::ptr::null_mut(); + } + let path = unsafe { std::ffi::CStr::from_ptr(path) }; + let path = path.to_str().unwrap(); + let db_options = parse_query_str(path); + if let Ok(io) = get_io(&db_options.path) { + let db = Database::open_file(io.clone(), &db_options.path.to_string()); + match db { + Ok(db) => { + println!("Opened database: {}", path); + let conn = db.connect(); + return TursoConn::new(conn, io).to_ptr(); + } + Err(e) => { + println!("Error opening database: {}", e); + return std::ptr::null_mut(); + } + }; + } + std::ptr::null_mut() +} + +struct TursoConn<'a> { + conn: Rc, + io: Arc, + cursor_idx: usize, + cursor: Option>>, +} + +impl<'a> TursoConn<'_> { + fn new(conn: Rc, io: Arc) -> Self { + TursoConn { + conn, + io, + cursor_idx: 0, + cursor: None, + } + } + fn to_ptr(self) -> *mut c_void { + Box::into_raw(Box::new(self)) as *mut c_void + } + + fn from_ptr(ptr: *mut c_void) -> &'static mut TursoConn<'a> { + if ptr.is_null() { + panic!("Null pointer"); + } + unsafe { &mut *(ptr as *mut TursoConn) } + } +} + +/// Close the database connection +/// # Safety +/// safely frees the connection's memory +#[no_mangle] +pub unsafe extern "C" fn db_close(db: *mut c_void) { + if !db.is_null() { + let _ = unsafe { Box::from_raw(db) }; + } +} + +#[allow(clippy::arc_with_non_send_sync)] +fn get_io(db_location: &DbType) -> Result, LimboError> { + Ok(match db_location { + DbType::Memory => Arc::new(limbo_core::MemoryIO::new()?), + _ => { + #[cfg(target_family = "unix")] + if cfg!(all(target_os = "linux", feature = "io_uring")) { + Arc::new(limbo_core::UringIO::new()?) + } else { + Arc::new(limbo_core::UnixIO::new()?) + } + + #[cfg(target_family = "windows")] + Arc::new(limbo_core::WindowsIO::new()?); + } + }) +} + +struct DbOptions { + path: DbType, + params: Parameters, +} + +#[derive(Default, Debug, Clone)] +enum DbType { + File(String), + #[default] + Memory, +} + +impl std::fmt::Display for DbType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + DbType::File(path) => write!(f, "{}", path), + DbType::Memory => write!(f, ":memory:"), + } + } +} + +#[derive(Debug, Clone, Default)] +struct Parameters { + mode: Mode, + cache: Option, + vfs: Option, + nolock: bool, + immutable: bool, + modeof: Option, +} + +impl FromStr for Parameters { + type Err = (); + fn from_str(s: &str) -> Result { + if !s.contains('?') { + return Ok(Parameters::default()); + } + let mut params = Parameters::default(); + for param in s.split('?').nth(1).unwrap().split('&') { + let mut kv = param.split('='); + match kv.next() { + Some("mode") => params.mode = kv.next().unwrap().parse().unwrap(), + Some("cache") => params.cache = Some(kv.next().unwrap().parse().unwrap()), + Some("vfs") => params.vfs = Some(kv.next().unwrap().to_string()), + Some("nolock") => params.nolock = true, + Some("immutable") => params.immutable = true, + Some("modeof") => params.modeof = Some(kv.next().unwrap().to_string()), + _ => {} + } + } + Ok(params) + } +} + +#[derive(Default, Debug, Clone, Copy)] +enum Cache { + Shared, + #[default] + Private, +} + +impl FromStr for Cache { + type Err = (); + fn from_str(s: &str) -> Result { + match s { + "shared" => Ok(Cache::Shared), + _ => Ok(Cache::Private), + } + } +} + +#[allow(clippy::enum_variant_names)] +#[derive(Default, Debug, Clone, Copy)] +enum Mode { + ReadOnly, + ReadWrite, + #[default] + ReadWriteCreate, +} + +impl FromStr for Mode { + type Err = (); + fn from_str(s: &str) -> Result { + match s { + "readonly" | "ro" => Ok(Mode::ReadOnly), + "readwrite" | "rw" => Ok(Mode::ReadWrite), + "readwritecreate" | "rwc" => Ok(Mode::ReadWriteCreate), + _ => Ok(Mode::default()), + } + } +} + +// At this point we don't have configurable parameters but many +// DSN's are going to have query parameters +fn parse_query_str(mut path: &str) -> DbOptions { + if path == ":memory:" { + return DbOptions { + path: DbType::Memory, + params: Parameters::default(), + }; + } + if path.starts_with("sqlite://") { + path = &path[10..]; + } + if path.contains('?') { + let parameters = Parameters::from_str(path).unwrap(); + let path = &path[..path.find('?').unwrap()]; + DbOptions { + path: DbType::File(path.to_string()), + params: parameters, + } + } else { + DbOptions { + path: DbType::File(path.to_string()), + params: Parameters::default(), + } + } +} diff --git a/bindings/go/rs_src/statement.rs b/bindings/go/rs_src/statement.rs new file mode 100644 index 000000000..99a45d692 --- /dev/null +++ b/bindings/go/rs_src/statement.rs @@ -0,0 +1,160 @@ +use crate::types::ResultCode; +use crate::TursoConn; +use limbo_core::{Rows, Statement, StepResult, Value}; +use std::ffi::{c_char, c_void}; + +#[no_mangle] +pub extern "C" fn db_prepare(ctx: *mut c_void, query: *const c_char) -> *mut c_void { + if ctx.is_null() || query.is_null() { + return std::ptr::null_mut(); + } + let query_str = unsafe { std::ffi::CStr::from_ptr(query) }.to_str().unwrap(); + + let db = TursoConn::from_ptr(ctx); + + let stmt = db.conn.prepare(query_str.to_string()); + match stmt { + Ok(stmt) => TursoStatement::new(stmt, db).to_ptr(), + Err(_) => std::ptr::null_mut(), + } +} + +struct TursoStatement<'a> { + statement: Statement, + conn: &'a TursoConn<'a>, +} + +impl<'a> TursoStatement<'a> { + fn new(statement: Statement, conn: &'a TursoConn<'a>) -> Self { + TursoStatement { statement, conn } + } + fn to_ptr(self) -> *mut c_void { + Box::into_raw(Box::new(self)) as *mut c_void + } + fn from_ptr(ptr: *mut c_void) -> &'static mut TursoStatement<'a> { + if ptr.is_null() { + panic!("Null pointer"); + } + unsafe { &mut *(ptr as *mut TursoStatement) } + } +} + +#[no_mangle] +pub extern "C" fn db_get_columns(ctx: *mut c_void) -> *const c_void { + if ctx.is_null() { + return std::ptr::null(); + } + let stmt = TursoStatement::from_ptr(ctx); + let columns = stmt.statement.columns(); + let mut column_names = Vec::new(); + for column in columns { + column_names.push(column.name().to_string()); + } + let c_string = std::ffi::CString::new(column_names.join(",")).unwrap(); + c_string.into_raw() as *const c_void +} + +struct TursoRows<'a> { + rows: Rows<'a>, + conn: &'a mut TursoConn<'a>, +} + +impl<'a> TursoRows<'a> { + fn new(rows: Rows<'a>, conn: &'a mut TursoConn<'a>) -> Self { + TursoRows { rows, conn } + } + + fn to_ptr(self) -> *mut c_void { + Box::into_raw(Box::new(self)) as *mut c_void + } + + fn from_ptr(ptr: *mut c_void) -> &'static mut TursoRows<'a> { + if ptr.is_null() { + panic!("Null pointer"); + } + unsafe { &mut *(ptr as *mut TursoRows) } + } +} + +#[no_mangle] +pub extern "C" fn rows_next(ctx: *mut c_void, rows_ptr: *mut c_void) -> ResultCode { + if rows_ptr.is_null() || ctx.is_null() { + return ResultCode::Error; + } + let rows = unsafe { &mut *(rows_ptr as *mut Rows) }; + let conn = TursoConn::from_ptr(ctx); + + match rows.next_row() { + Ok(StepResult::Row(row)) => { + conn.cursor = Some(row.values); + ResultCode::Row + } + Ok(StepResult::Done) => { + // No more rows + ResultCode::Done + } + Ok(StepResult::IO) => { + let _ = conn.io.run_once(); + ResultCode::Io + } + Ok(StepResult::Busy) => ResultCode::Busy, + Ok(StepResult::Interrupt) => ResultCode::Interrupt, + Err(_) => ResultCode::Error, + } +} + +#[no_mangle] +pub extern "C" fn rows_get_value(ctx: *mut c_void, col_idx: usize) -> *const c_char { + if ctx.is_null() { + return std::ptr::null(); + } + let conn = TursoConn::from_ptr(ctx); + + if let Some(ref cursor) = conn.cursor { + if let Some(value) = cursor.get(col_idx) { + let c_string = std::ffi::CString::new(value.to_string()).unwrap(); + return c_string.into_raw(); // Caller must free this pointer + } + } + std::ptr::null() // No data or invalid index +} + +// Free the returned string +#[no_mangle] +pub extern "C" fn free_c_string(s: *mut c_char) { + if !s.is_null() { + unsafe { drop(std::ffi::CString::from_raw(s)) }; + } +} +#[no_mangle] +pub extern "C" fn rows_get_string( + ctx: *mut c_void, + rows_ptr: *mut c_void, + col_idx: i32, +) -> *const c_char { + if rows_ptr.is_null() || ctx.is_null() { + return std::ptr::null(); + } + let _rows = unsafe { &mut *(rows_ptr as *mut Rows) }; + let conn = TursoConn::from_ptr(ctx); + if col_idx > conn.cursor_idx as i32 || conn.cursor.is_none() { + return std::ptr::null(); + } + if let Some(values) = &conn.cursor { + let value = &values[col_idx as usize]; + match value { + Value::Text(s) => { + return s.as_ptr() as *const i8; + } + _ => return std::ptr::null(), + } + }; + std::ptr::null() +} + +#[no_mangle] +pub extern "C" fn rows_close(rows_ptr: *mut c_void) { + if !rows_ptr.is_null() { + let _ = unsafe { Box::from_raw(rows_ptr as *mut Rows) }; + } +} diff --git a/bindings/go/rs_src/types.rs b/bindings/go/rs_src/types.rs new file mode 100644 index 000000000..711887229 --- /dev/null +++ b/bindings/go/rs_src/types.rs @@ -0,0 +1,14 @@ +#[repr(C)] +pub enum ResultCode { + Error = -1, + Ok = 0, + Row = 1, + Busy = 2, + Done = 3, + Io = 4, + Interrupt = 5, + Invalid = 6, + Null = 7, + NoMem = 8, + ReadOnly = 9, +} diff --git a/bindings/go/stmt.go b/bindings/go/stmt.go new file mode 100644 index 000000000..20e1a5774 --- /dev/null +++ b/bindings/go/stmt.go @@ -0,0 +1,79 @@ +package turso + +import ( + "database/sql/driver" + "fmt" + "io" +) + +type stmt struct { + ctx uintptr + sql string +} + +type rows struct { + ctx uintptr + rowsPtr uintptr + columns []string + err error +} + +func (ls *stmt) Query(args []driver.Value) (driver.Rows, error) { + var dbPrepare func(uintptr, uintptr) uintptr + getExtFunc(&dbPrepare, "db_prepare") + + queryPtr := toCString(ls.sql) + defer freeCString(queryPtr) + + rowsPtr := dbPrepare(ls.ctx, queryPtr) + if rowsPtr == 0 { + return nil, fmt.Errorf("failed to prepare query") + } + var colFunc func(uintptr, uintptr) uintptr + + getExtFunc(&colFunc, "columns") + + rows := &rows{ + ctx: ls.ctx, + rowsPtr: rowsPtr, + } + return rows, nil +} + +func (lr *rows) Columns() []string { + return lr.columns +} + +func (lr *rows) Close() error { + var rowsClose func(uintptr) + getExtFunc(&rowsClose, "rows_close") + rowsClose(lr.rowsPtr) + return nil +} + +func (lr *rows) Next(dest []driver.Value) error { + var rowsNext func(uintptr, uintptr) int32 + getExtFunc(&rowsNext, "rows_next") + + status := rowsNext(lr.ctx, lr.rowsPtr) + switch ResultCode(status) { + case Row: + for i := range dest { + getExtFunc(&rowsGetValue, "rows_get_value") + + valPtr := rowsGetValue(lr.ctx, int32(i)) + if valPtr != 0 { + val := cStringToGoString(valPtr) + dest[i] = val + freeCString(valPtr) + } else { + dest[i] = nil + } + } + return nil + case 0: // No more rows + return io.EOF + default: + return fmt.Errorf("unexpected status: %d", status) + } +} diff --git a/bindings/go/turso.go b/bindings/go/turso.go new file mode 100644 index 000000000..0c095ac80 --- /dev/null +++ b/bindings/go/turso.go @@ -0,0 +1,127 @@ +package turso + +import ( + "database/sql" + "database/sql/driver" + "errors" + "log/slog" + "os" + "sync" + "unsafe" + + "github.com/ebitengine/purego" +) + +const ( + turso = "../../target/debug/lib_turso_go.so" +) + +func toGoStr(ptr uintptr, length int) string { + if ptr == 0 { + return "" + } + uptr := unsafe.Pointer(ptr) + s := (*string)(uptr) + if s == nil { + // redundant + return "" + } + return *s +} + +func init() { + slib, err := purego.Dlopen(turso, purego.RTLD_LAZY) + if err != nil { + slog.Error("Error opening turso library: ", err) + os.Exit(1) + } + lib = slib + sql.Register("turso", &tursoDriver{}) +} + +type tursoDriver struct { + tursoCtx +} + +func toCString(s string) uintptr { + b := append([]byte(s), 0) + return uintptr(unsafe.Pointer(&b[0])) +} + +func getExtFunc(ptr interface{}, name string) { + purego.RegisterLibFunc(ptr, lib, name) +} + +type conn struct { + ctx uintptr + sync.Mutex + writeTimeFmt string + lastInsertID int64 + lastAffected int64 +} + +func newConn() *conn { + return &conn{ + 0, + sync.Mutex{}, + "2006-01-02 15:04:05", + 0, + 0, + } +} + +func open(dsn string) (*conn, error) { + var open func(uintptr) uintptr + getExtFunc(&open, ExtDBOpen) + c := newConn() + path := toCString(dsn) + ctx := open(path) + c.ctx = ctx + return c, nil +} + +type tursoCtx struct { + conn *conn + tx *sql.Tx + err error + rows *sql.Rows + stmt *sql.Stmt +} + +func (lc tursoCtx) Open(dsn string) (driver.Conn, error) { + conn, err := open(dsn) + if err != nil { + return nil, err + } + nc := tursoCtx{conn: conn} + return nc, nil +} + +func (lc tursoCtx) Close() error { + var closedb func(uintptr) uintptr + getExtFunc(&closedb, ExtDBClose) + closedb(lc.conn.ctx) + return nil +} + +// TODO: Begin not implemented +func (lc tursoCtx) Begin() (driver.Tx, error) { + return nil, nil +} + +func (ls tursoCtx) Prepare(sql string) (driver.Stmt, error) { + var prepare func(uintptr, uintptr) uintptr + getExtFunc(&prepare, ExtDBPrepare) + s := toCString(sql) + statement := prepare(ls.conn.ctx, s) + if statement == 0 { + return nil, errors.New("no rows") + } + ls.stmt = stmt{ + ctx: statement, + + } + + } + return nil, nil +} diff --git a/bindings/go/types.go b/bindings/go/types.go new file mode 100644 index 000000000..0569b317a --- /dev/null +++ b/bindings/go/types.go @@ -0,0 +1,28 @@ +package turso + +type ResultCode int + +const ( + Error ResultCode = -1 + Ok ResultCode = 0 + Row ResultCode = 1 + Busy ResultCode = 2 + Done ResultCode = 3 + Io ResultCode = 4 + Interrupt ResultCode = 5 + Invalid ResultCode = 6 + Null ResultCode = 7 + NoMem ResultCode = 8 + ReadOnly ResultCode = 9 + ExtDBOpen string = "db_open" + ExtDBClose string = "db_close" + ExtDBPrepare string = "db_prepare" +) + +var ( + lib uintptr + dbPrepare func(uintptr, uintptr) uintptr + rowsNext func(rowsPtr uintptr) int32 + rowsGetValue func(rowsPtr uintptr, colIdx uint) uintptr + freeCString func(strPtr uintptr) +) From a316ab51acc10922aeb24019a2cd1a2a05fc083e Mon Sep 17 00:00:00 2001 From: pedrocarlo Date: Sat, 25 Jan 2025 16:22:53 -0300 Subject: [PATCH 20/34] feature: implement strftime function --- COMPAT.md | 24 +++-- core/function.rs | 3 + core/translate/expr.rs | 20 ++++ core/vdbe/datetime.rs | 68 ++++++++++-- core/vdbe/mod.rs | 10 +- testing/scalar-functions-datetime.test | 137 +++++++++++++++++++++++++ 6 files changed, 243 insertions(+), 19 deletions(-) diff --git a/COMPAT.md b/COMPAT.md index 17ce24568..fcd72b633 100644 --- a/COMPAT.md +++ b/COMPAT.md @@ -4,16 +4,24 @@ This document describes the compatibility of Limbo with SQLite. ## Table of contents: -- [Features](#features) -- [SQLite query language](#sqlite-query-language) +- [Compatibility with SQLite](#compatibility-with-sqlite) + - [Table of contents:](#table-of-contents) + - [Features](#features) + - [SQLite query language](#sqlite-query-language) - [Statements](#statements) - - [PRAGMA Statements](#pragma) + - [PRAGMA](#pragma) - [Expressions](#expressions) - - [Functions](#functions) -- [SQLite C API](#sqlite-c-api) -- [SQLite VDBE opcodes](#sqlite-vdbe-opcodes) -- [Extensions](#extensions) + - [SQL functions](#sql-functions) + - [Scalar functions](#scalar-functions) + - [Mathematical functions](#mathematical-functions) + - [Aggregate functions](#aggregate-functions) + - [Date and time functions](#date-and-time-functions) + - [JSON functions](#json-functions) + - [SQLite C API](#sqlite-c-api) + - [SQLite VDBE opcodes](#sqlite-vdbe-opcodes) + - [Extensions](#extensions) - [UUID](#uuid) + - [regexp](#regexp) ## Features @@ -308,7 +316,7 @@ Feature support of [sqlite expr syntax](https://www.sqlite.org/lang_expr.html). | datetime() | Yes | partially supports modifiers | | julianday() | Partial | does not support modifiers | | unixepoch() | Partial | does not support modifiers | -| strftime() | No | | +| strftime() | Yes | partially supports modifiers | | timediff() | No | | Modifiers: diff --git a/core/function.rs b/core/function.rs index 5023c3c94..6b9ca800e 100644 --- a/core/function.rs +++ b/core/function.rs @@ -213,6 +213,7 @@ pub enum ScalarFunc { Replace, #[cfg(not(target_family = "wasm"))] LoadExtension, + StrfTime, } impl Display for ScalarFunc { @@ -264,6 +265,7 @@ impl Display for ScalarFunc { Self::DateTime => "datetime".to_string(), #[cfg(not(target_family = "wasm"))] Self::LoadExtension => "load_extension".to_string(), + Self::StrfTime => "strftime".to_string(), }; write!(f, "{}", str) } @@ -554,6 +556,7 @@ impl Func { "trunc" => Ok(Self::Math(MathFunc::Trunc)), #[cfg(not(target_family = "wasm"))] "load_extension" => Ok(Self::Scalar(ScalarFunc::LoadExtension)), + "strftime" => Ok(Self::Scalar(ScalarFunc::StrfTime)), _ => crate::bail_parse_error!("no such function: {}", name), } } diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 476b78d77..598a6f858 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -1517,6 +1517,26 @@ pub fn translate_expr( }); Ok(target_register) } + ScalarFunc::StrfTime => { + if let Some(args) = args { + for arg in args.iter() { + // register containing result of each argument expression + let _ = translate_and_mark( + program, + referenced_tables, + arg, + resolver, + )?; + } + } + program.emit_insn(Insn::Function { + constant_mask: 0, + start_reg: target_register + 1, + dest: target_register, + func: func_ctx, + }); + Ok(target_register) + } } } Func::Math(math_func) => match math_func.arity() { diff --git a/core/vdbe/datetime.rs b/core/vdbe/datetime.rs index 2f9cdc601..4cb89a65d 100644 --- a/core/vdbe/datetime.rs +++ b/core/vdbe/datetime.rs @@ -22,24 +22,44 @@ pub fn exec_datetime_full(values: &[OwnedValue]) -> OwnedValue { exec_datetime(values, DateTimeOutput::DateTime) } +#[inline(always)] +pub fn exec_strftime(values: &[OwnedValue]) -> OwnedValue { + if values.is_empty() { + return OwnedValue::Null; + } + + let format_str = match &values[0] { + OwnedValue::Text(text) => text.value.to_string(), + OwnedValue::Integer(num) => num.to_string(), + OwnedValue::Float(num) => format!("{:.14}", num), + _ => return OwnedValue::Null, + }; + + exec_datetime(&values[1..], DateTimeOutput::StrfTime(format_str)) +} + enum DateTimeOutput { Date, Time, DateTime, + // Holds the format string + StrfTime(String), } fn exec_datetime(values: &[OwnedValue], output_type: DateTimeOutput) -> OwnedValue { if values.is_empty() { - return OwnedValue::build_text(Rc::new( - parse_naive_date_time(&OwnedValue::build_text(Rc::new("now".to_string()))) - .unwrap() - .format(match output_type { - DateTimeOutput::DateTime => "%Y-%m-%d %H:%M:%S", - DateTimeOutput::Time => "%H:%M:%S", - DateTimeOutput::Date => "%Y-%m-%d", - }) - .to_string(), - )); + let now = + parse_naive_date_time(&OwnedValue::build_text(Rc::new("now".to_string()))).unwrap(); + + let formatted_str = match output_type { + DateTimeOutput::DateTime => now.format("%Y-%m-%d %H:%M:%S").to_string(), + DateTimeOutput::Time => now.format("%H:%M:%S").to_string(), + DateTimeOutput::Date => now.format("%Y-%m-%d").to_string(), + DateTimeOutput::StrfTime(ref format_str) => strftime_format(&now, format_str), + }; + + // Parse here + return OwnedValue::build_text(Rc::new(formatted_str)); } if let Some(mut dt) = parse_naive_date_time(&values[0]) { // if successful, treat subsequent entries as modifiers @@ -95,6 +115,31 @@ fn format_dt(dt: NaiveDateTime, output_type: DateTimeOutput, subsec: bool) -> St dt.format("%Y-%m-%d %H:%M:%S").to_string() } } + DateTimeOutput::StrfTime(format_str) => strftime_format(&dt, &format_str), + } +} + +// Not as fast as if the formatting was native to chrono, but a good enough +// for now, just to have the feature implemented +fn strftime_format(dt: &NaiveDateTime, format_str: &str) -> String { + use std::fmt::Write; + // Necessary to remove %f and %J that are exclusive formatters to sqlite + // Chrono does not support them, so it is necessary to replace the modifiers manually + + // Sqlite uses 9 decimal places for julianday in strftime + let copy_format = format_str + .to_string() + .replace("%J", &format!("{:.9}", to_julian_day_exact(dt))); + // Just change the formatting here to have fractional seconds using chrono builtin modifier + let copy_format = copy_format.replace("%f", "%S.%3f"); + + // The write! macro is used here as chrono's format can panic if the formatting string contains + // unknown specifiers. By using a writer, we can catch the panic and handle the error + let mut formatted = String::new(); + match write!(formatted, "{}", dt.format(©_format)) { + Ok(_) => formatted, + // On sqlite when the formatting fails nothing is printed + Err(_) => "".to_string(), } } @@ -1729,4 +1774,7 @@ mod tests { .naive_utc(); assert!(is_leap_second(&dt)); } + + #[test] + fn test_strftime() {} } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 98b328b71..55f009314 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -44,7 +44,9 @@ use crate::{ json::json_extract, json::json_object, json::json_type, }; use crate::{resolve_ext_path, Connection, Result, Rows, TransactionState, DATABASE_VERSION}; -use datetime::{exec_date, exec_datetime_full, exec_julianday, exec_time, exec_unixepoch}; +use datetime::{ + exec_date, exec_datetime_full, exec_julianday, exec_strftime, exec_time, exec_unixepoch, +}; use insn::{ exec_add, exec_bit_and, exec_bit_not, exec_bit_or, exec_boolean_not, exec_concat, exec_divide, exec_multiply, exec_remainder, exec_shift_left, exec_shift_right, exec_subtract, @@ -2005,6 +2007,12 @@ impl Program { conn.load_extension(ext)?; } } + ScalarFunc::StrfTime => { + let result = exec_strftime( + &state.registers[*start_reg..*start_reg + arg_count], + ); + state.registers[*dest] = result; + } }, crate::function::Func::External(f) => match f.func { ExtFunc::Scalar(f) => { diff --git a/testing/scalar-functions-datetime.test b/testing/scalar-functions-datetime.test index 8831f723e..2917167ac 100755 --- a/testing/scalar-functions-datetime.test +++ b/testing/scalar-functions-datetime.test @@ -443,3 +443,140 @@ do_execsql_test julianday-time-only { # SELECT julianday('2023-05-18'); #} {2460082.5} + + + +# Strftime tests + + +set FMT {%d,%e,%f,%F,%G,%g,%H,%I,%j,%J,%k,%l,%i,%m,%M,%p,%P,%R,%s,%S,%T,%U,%u,%V,%w,%W,%Y,%%} + +do_execsql_test strftime-day { + SELECT strftime('%d', '2025-01-23T13:10:30.567'); +} {23} + +do_execsql_test strftime-day-without-leading-zero-1 { + SELECT strftime('%e', '2025-01-23T13:10:30.567'); +} {23} + +do_execsql_test strftime-day-without-leading-zero-2 { + SELECT strftime('%e', '2025-01-02T13:10:30.567'); +} {" 2"} +# TODO not a typo in sqlite there is also a space + +do_execsql_test strftime-fractional-seconds { + SELECT strftime('%f', '2025-01-02T13:10:30.567'); +} {30.567} + +do_execsql_test strftime-iso-8601-date { + SELECT strftime('%F', '2025-01-23T13:10:30.567'); +} {2025-01-23} + +do_execsql_test strftime-iso-8601-year { + SELECT strftime('%G', '2025-01-23T13:10:30.567'); +} {2025} + +do_execsql_test strftime-iso-8601-year-2_digit { + SELECT strftime('%g', '2025-01-23T13:10:30.567'); +} {25} + +do_execsql_test strftime-hour { + SELECT strftime('%H', '2025-01-23T13:10:30.567'); +} {13} + +do_execsql_test strftime-hour-12-hour-clock { + SELECT strftime('%I', '2025-01-23T13:10:30.567'); +} {01} + +do_execsql_test strftime-day-of-year { + SELECT strftime('%j', '2025-01-23T13:10:30.567'); +} {023} + +do_execsql_test strftime-julianday { + SELECT strftime('%J', '2025-01-23T13:10:30.567'); +} {2460699.048964896} + +do_execsql_test strftime-hour-without-leading-zero-1 { + SELECT strftime('%k', '2025-01-23T13:10:30.567'); +} {13} + +do_execsql_test strftime-hour-without-leading-zero-2 { + SELECT strftime('%k', '2025-01-23T02:10:30.567'); +} {" 2"} + +do_execsql_test strftime-hour-12-hour-clock-without-leading-zero-2 { + SELECT strftime('%l', '2025-01-23T13:10:30.567'); +} {" 1"} + +do_execsql_test strftime-month { + SELECT strftime('%m', '2025-01-23T13:10:30.567'); +} {01} + +do_execsql_test strftime-minute { + SELECT strftime('%M', '2025-01-23T13:14:30.567'); +} {14} + +do_execsql_test strftime-am-pm=1 { + SELECT strftime('%p', '2025-01-23T11:14:30.567'); +} {AM} + +do_execsql_test strftime-am-pm-2 { + SELECT strftime('%p', '2025-01-23T13:14:30.567'); +} {PM} + +do_execsql_test strftime-am-pm-lower-1 { + SELECT strftime('%P', '2025-01-23T11:14:30.567'); +} {am} + +do_execsql_test strftime-am-pm-lower-2 { + SELECT strftime('%P', '2025-01-23T13:14:30.567'); +} {pm} + +do_execsql_test strftime-iso8601-time { + SELECT strftime('%R', '2025-01-23T13:14:30.567'); +} {13:14} + +do_execsql_test strftime-seconds-since-epoch { + SELECT strftime('%s', '2025-01-23T13:14:30.567'); +} {1737638070} + +do_execsql_test strftime-seconds { + SELECT strftime('%S', '2025-01-23T13:14:30.567'); +} {30} + +do_execsql_test strftime-iso8601-with-seconds { + SELECT strftime('%T', '2025-01-23T13:14:30.567'); +} {13:14:30} + +do_execsql_test strftime-week-year-start-sunday { + SELECT strftime('%U', '2025-01-23T13:14:30.567'); +} {03} + +do_execsql_test strftime-day-week-start-monday { + SELECT strftime('%u', '2025-01-23T13:14:30.567'); +} {4} + +do_execsql_test strftime-iso8601-week-year { + SELECT strftime('%V', '2025-01-23T13:14:30.567'); +} {04} + +do_execsql_test strftime-day-week-start-sunday { + SELECT strftime('%w', '2025-01-23T13:14:30.567'); +} {4} + +do_execsql_test strftime-day-week-start-sunday { + SELECT strftime('%w', '2025-01-23T13:14:30.567'); +} {4} + +do_execsql_test strftime-week-year-start-sunday { + SELECT strftime('%W', '2025-01-23T13:14:30.567'); +} {03} + +do_execsql_test strftime-year { + SELECT strftime('%Y', '2025-01-23T13:14:30.567'); +} {2025} + +do_execsql_test strftime-percent { + SELECT strftime('%%', '2025-01-23T13:14:30.567'); +} {%} + From 32c985f9a810be0ac995a67cce16acdd9ef08e3d Mon Sep 17 00:00:00 2001 From: PThorpe92 Date: Fri, 24 Jan 2025 22:19:37 -0500 Subject: [PATCH 21/34] Progress on Go bindings, add prepare + query statement --- bindings/go/cmd/main.go | 18 --- bindings/go/go.mod | 5 +- bindings/go/go.sum | 2 + bindings/go/rs_src/lib.rs | 37 ++--- bindings/go/rs_src/rows.rs | 138 +++++++++++++++++ bindings/go/rs_src/statement.rs | 237 +++++++++++++---------------- bindings/go/rs_src/types.rs | 190 ++++++++++++++++++++++- bindings/go/stmt.go | 207 +++++++++++++++++++------ bindings/go/turso.go | 162 +++++++++++--------- bindings/go/types.go | 260 +++++++++++++++++++++++++++++--- core/lib.rs | 8 + 11 files changed, 947 insertions(+), 317 deletions(-) delete mode 100644 bindings/go/cmd/main.go create mode 100644 bindings/go/rs_src/rows.rs diff --git a/bindings/go/cmd/main.go b/bindings/go/cmd/main.go deleted file mode 100644 index 32fcbea23..000000000 --- a/bindings/go/cmd/main.go +++ /dev/null @@ -1,18 +0,0 @@ -// package main -// -// import ( -// "fmt" -// ) -// -// func main() { -// conn, err := lc.Open("new.db") -// if err != nil { -// panic(err) -// } -// fmt.Println("Connected to database") -// sql := "select c from t;" -// conn.Query(sql) -// -// conn.Close() -// fmt.Println("Connection closed") -// } diff --git a/bindings/go/go.mod b/bindings/go/go.mod index fa1d99d3e..c108e721d 100644 --- a/bindings/go/go.mod +++ b/bindings/go/go.mod @@ -2,4 +2,7 @@ module turso go 1.23.4 -require github.com/ebitengine/purego v0.8.2 // indirect +require ( + github.com/ebitengine/purego v0.8.2 + golang.org/x/sys/windows v0.29.0 +) diff --git a/bindings/go/go.sum b/bindings/go/go.sum index 38eca3dfd..16a0ba53f 100644 --- a/bindings/go/go.sum +++ b/bindings/go/go.sum @@ -1,2 +1,4 @@ github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I= github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/bindings/go/rs_src/lib.rs b/bindings/go/rs_src/lib.rs index 862c8c191..36b5a6db1 100644 --- a/bindings/go/rs_src/lib.rs +++ b/bindings/go/rs_src/lib.rs @@ -1,6 +1,8 @@ +mod rows; +#[allow(dead_code)] mod statement; mod types; -use limbo_core::{Connection, Database, LimboError, Value}; +use limbo_core::{Connection, Database, LimboError}; use std::{ ffi::{c_char, c_void}, rc::Rc, @@ -8,6 +10,9 @@ use std::{ sync::Arc, }; +/// # Safety +/// Safe to be called from Go with null terminated DSN string. +/// performs null check on the path. #[no_mangle] pub unsafe extern "C" fn db_open(path: *const c_char) -> *mut c_void { if path.is_null() { @@ -34,27 +39,22 @@ pub unsafe extern "C" fn db_open(path: *const c_char) -> *mut c_void { std::ptr::null_mut() } -struct TursoConn<'a> { +#[allow(dead_code)] +struct TursoConn { conn: Rc, io: Arc, - cursor_idx: usize, - cursor: Option>>, } -impl<'a> TursoConn<'_> { +impl TursoConn { fn new(conn: Rc, io: Arc) -> Self { - TursoConn { - conn, - io, - cursor_idx: 0, - cursor: None, - } + TursoConn { conn, io } } + #[allow(clippy::wrong_self_convention)] fn to_ptr(self) -> *mut c_void { Box::into_raw(Box::new(self)) as *mut c_void } - fn from_ptr(ptr: *mut c_void) -> &'static mut TursoConn<'a> { + fn from_ptr(ptr: *mut c_void) -> &'static mut TursoConn { if ptr.is_null() { panic!("Null pointer"); } @@ -68,7 +68,7 @@ impl<'a> TursoConn<'_> { #[no_mangle] pub unsafe extern "C" fn db_close(db: *mut c_void) { if !db.is_null() { - let _ = unsafe { Box::from_raw(db) }; + let _ = unsafe { Box::from_raw(db as *mut TursoConn) }; } } @@ -77,19 +77,12 @@ fn get_io(db_location: &DbType) -> Result, LimboError> { Ok(match db_location { DbType::Memory => Arc::new(limbo_core::MemoryIO::new()?), _ => { - #[cfg(target_family = "unix")] - if cfg!(all(target_os = "linux", feature = "io_uring")) { - Arc::new(limbo_core::UringIO::new()?) - } else { - Arc::new(limbo_core::UnixIO::new()?) - } - - #[cfg(target_family = "windows")] - Arc::new(limbo_core::WindowsIO::new()?); + return Ok(Arc::new(limbo_core::PlatformIO::new()?)); } }) } +#[allow(dead_code)] struct DbOptions { path: DbType, params: Parameters, diff --git a/bindings/go/rs_src/rows.rs b/bindings/go/rs_src/rows.rs new file mode 100644 index 000000000..c505924bc --- /dev/null +++ b/bindings/go/rs_src/rows.rs @@ -0,0 +1,138 @@ +use crate::{ + statement::TursoStatement, + types::{ResultCode, TursoValue}, +}; +use limbo_core::{Rows, StepResult, Value}; +use std::ffi::{c_char, c_void}; + +pub struct TursoRows<'a> { + rows: Rows, + cursor: Option>>, + stmt: Box>, +} + +impl<'a> TursoRows<'a> { + pub fn new(rows: Rows, stmt: Box>) -> Self { + TursoRows { + rows, + stmt, + cursor: None, + } + } + + #[allow(clippy::wrong_self_convention)] + pub fn to_ptr(self) -> *mut c_void { + Box::into_raw(Box::new(self)) as *mut c_void + } + + pub fn from_ptr(ptr: *mut c_void) -> &'static mut TursoRows<'a> { + if ptr.is_null() { + panic!("Null pointer"); + } + unsafe { &mut *(ptr as *mut TursoRows) } + } +} + +#[no_mangle] +pub extern "C" fn rows_next(ctx: *mut c_void) -> ResultCode { + if ctx.is_null() { + return ResultCode::Error; + } + let ctx = TursoRows::from_ptr(ctx); + + match ctx.rows.next_row() { + Ok(StepResult::Row(row)) => { + ctx.cursor = Some(row.values); + ResultCode::Row + } + Ok(StepResult::Done) => ResultCode::Done, + Ok(StepResult::IO) => { + let _ = ctx.stmt.conn.io.run_once(); + ResultCode::Io + } + Ok(StepResult::Busy) => ResultCode::Busy, + Ok(StepResult::Interrupt) => ResultCode::Interrupt, + Err(_) => ResultCode::Error, + } +} + +#[no_mangle] +pub extern "C" fn rows_get_value(ctx: *mut c_void, col_idx: usize) -> *const c_void { + if ctx.is_null() { + return std::ptr::null(); + } + let ctx = TursoRows::from_ptr(ctx); + + if let Some(ref cursor) = ctx.cursor { + if let Some(value) = cursor.get(col_idx) { + let val = TursoValue::from_value(value); + return val.to_ptr(); + } + } + std::ptr::null() +} + +#[no_mangle] +pub extern "C" fn free_string(s: *mut c_char) { + if !s.is_null() { + unsafe { drop(std::ffi::CString::from_raw(s)) }; + } +} + +#[no_mangle] +pub extern "C" fn rows_get_columns( + rows_ptr: *mut c_void, + out_length: *mut usize, +) -> *mut *const c_char { + if rows_ptr.is_null() || out_length.is_null() { + return std::ptr::null_mut(); + } + let rows = TursoRows::from_ptr(rows_ptr); + let c_strings: Vec = rows + .rows + .columns() + .iter() + .map(|name| std::ffi::CString::new(name.as_str()).unwrap()) + .collect(); + + let c_ptrs: Vec<*const c_char> = c_strings.iter().map(|s| s.as_ptr()).collect(); + unsafe { + *out_length = c_ptrs.len(); + } + let ptr = c_ptrs.as_ptr(); + std::mem::forget(c_strings); + std::mem::forget(c_ptrs); + ptr as *mut *const c_char +} + +#[no_mangle] +pub extern "C" fn rows_close(rows_ptr: *mut c_void) { + if !rows_ptr.is_null() { + let _ = unsafe { Box::from_raw(rows_ptr as *mut TursoRows) }; + } +} + +#[no_mangle] +pub extern "C" fn free_columns(columns: *mut *const c_char) { + if columns.is_null() { + return; + } + unsafe { + let mut idx = 0; + while !(*columns.add(idx)).is_null() { + let _ = std::ffi::CString::from_raw(*columns.add(idx) as *mut c_char); + idx += 1; + } + let _ = Box::from_raw(columns); + } +} + +#[no_mangle] +pub extern "C" fn free_rows(rows: *mut c_void) { + if rows.is_null() { + return; + } + unsafe { + let _ = Box::from_raw(rows as *mut Rows); + } +} diff --git a/bindings/go/rs_src/statement.rs b/bindings/go/rs_src/statement.rs index 99a45d692..4a4e29e34 100644 --- a/bindings/go/rs_src/statement.rs +++ b/bindings/go/rs_src/statement.rs @@ -1,7 +1,9 @@ -use crate::types::ResultCode; +use crate::rows::TursoRows; +use crate::types::{AllocPool, ResultCode, TursoValue}; use crate::TursoConn; -use limbo_core::{Rows, Statement, StepResult, Value}; +use limbo_core::{Statement, StepResult}; use std::ffi::{c_char, c_void}; +use std::num::NonZero; #[no_mangle] pub extern "C" fn db_prepare(ctx: *mut c_void, query: *const c_char) -> *mut c_void { @@ -19,142 +21,119 @@ pub extern "C" fn db_prepare(ctx: *mut c_void, query: *const c_char) -> *mut c_v } } -struct TursoStatement<'a> { - statement: Statement, - conn: &'a TursoConn<'a>, +#[no_mangle] +pub extern "C" fn stmt_execute( + ctx: *mut c_void, + args_ptr: *mut TursoValue, + arg_count: usize, + changes: *mut i64, +) -> ResultCode { + if ctx.is_null() { + return ResultCode::Error; + } + let stmt = TursoStatement::from_ptr(ctx); + + let args = if !args_ptr.is_null() && arg_count > 0 { + unsafe { std::slice::from_raw_parts(args_ptr, arg_count) } + } else { + &[] + }; + for (i, arg) in args.iter().enumerate() { + let val = arg.to_value(&mut stmt.pool); + stmt.statement.bind_at(NonZero::new(i + 1).unwrap(), val); + } + loop { + match stmt.statement.step() { + Ok(StepResult::Row(_)) => { + // unexpected row during execution, error out. + return ResultCode::Error; + } + Ok(StepResult::Done) => { + stmt.conn.conn.total_changes(); + if !changes.is_null() { + unsafe { + *changes = stmt.conn.conn.total_changes(); + } + } + return ResultCode::Done; + } + Ok(StepResult::IO) => { + let _ = stmt.conn.io.run_once(); + } + Ok(StepResult::Busy) => { + return ResultCode::Busy; + } + Ok(StepResult::Interrupt) => { + return ResultCode::Interrupt; + } + Err(_) => { + return ResultCode::Error; + } + } + } } -impl<'a> TursoStatement<'a> { - fn new(statement: Statement, conn: &'a TursoConn<'a>) -> Self { - TursoStatement { statement, conn } +#[no_mangle] +pub extern "C" fn stmt_parameter_count(ctx: *mut c_void) -> i32 { + if ctx.is_null() { + return -1; } + let stmt = TursoStatement::from_ptr(ctx); + stmt.statement.parameters_count() as i32 +} + +#[no_mangle] +pub extern "C" fn stmt_query( + ctx: *mut c_void, + args_ptr: *mut TursoValue, + args_count: usize, +) -> *mut c_void { + if ctx.is_null() { + return std::ptr::null_mut(); + } + let stmt = TursoStatement::from_ptr(ctx); + let args = if !args_ptr.is_null() && args_count > 0 { + unsafe { std::slice::from_raw_parts(args_ptr, args_count) } + } else { + &[] + }; + for (i, arg) in args.iter().enumerate() { + let val = arg.to_value(&mut stmt.pool); + stmt.statement.bind_at(NonZero::new(i + 1).unwrap(), val); + } + match stmt.statement.query() { + Ok(rows) => { + let stmt = unsafe { Box::from_raw(stmt) }; + TursoRows::new(rows, stmt).to_ptr() + } + Err(_) => std::ptr::null_mut(), + } +} + +pub struct TursoStatement<'conn> { + pub statement: Statement, + pub conn: &'conn mut TursoConn, + pub pool: AllocPool, +} + +impl<'conn> TursoStatement<'conn> { + pub fn new(statement: Statement, conn: &'conn mut TursoConn) -> Self { + TursoStatement { + statement, + conn, + pool: AllocPool::new(), + } + } + + #[allow(clippy::wrong_self_convention)] fn to_ptr(self) -> *mut c_void { Box::into_raw(Box::new(self)) as *mut c_void } - fn from_ptr(ptr: *mut c_void) -> &'static mut TursoStatement<'a> { + + fn from_ptr(ptr: *mut c_void) -> &'static mut TursoStatement<'conn> { if ptr.is_null() { panic!("Null pointer"); } unsafe { &mut *(ptr as *mut TursoStatement) } } } - -#[no_mangle] -pub extern "C" fn db_get_columns(ctx: *mut c_void) -> *const c_void { - if ctx.is_null() { - return std::ptr::null(); - } - let stmt = TursoStatement::from_ptr(ctx); - let columns = stmt.statement.columns(); - let mut column_names = Vec::new(); - for column in columns { - column_names.push(column.name().to_string()); - } - let c_string = std::ffi::CString::new(column_names.join(",")).unwrap(); - c_string.into_raw() as *const c_void -} - -struct TursoRows<'a> { - rows: Rows<'a>, - conn: &'a mut TursoConn<'a>, -} - -impl<'a> TursoRows<'a> { - fn new(rows: Rows<'a>, conn: &'a mut TursoConn<'a>) -> Self { - TursoRows { rows, conn } - } - - fn to_ptr(self) -> *mut c_void { - Box::into_raw(Box::new(self)) as *mut c_void - } - - fn from_ptr(ptr: *mut c_void) -> &'static mut TursoRows<'a> { - if ptr.is_null() { - panic!("Null pointer"); - } - unsafe { &mut *(ptr as *mut TursoRows) } - } -} - -#[no_mangle] -pub extern "C" fn rows_next(ctx: *mut c_void, rows_ptr: *mut c_void) -> ResultCode { - if rows_ptr.is_null() || ctx.is_null() { - return ResultCode::Error; - } - let rows = unsafe { &mut *(rows_ptr as *mut Rows) }; - let conn = TursoConn::from_ptr(ctx); - - match rows.next_row() { - Ok(StepResult::Row(row)) => { - conn.cursor = Some(row.values); - ResultCode::Row - } - Ok(StepResult::Done) => { - // No more rows - ResultCode::Done - } - Ok(StepResult::IO) => { - let _ = conn.io.run_once(); - ResultCode::Io - } - Ok(StepResult::Busy) => ResultCode::Busy, - Ok(StepResult::Interrupt) => ResultCode::Interrupt, - Err(_) => ResultCode::Error, - } -} - -#[no_mangle] -pub extern "C" fn rows_get_value(ctx: *mut c_void, col_idx: usize) -> *const c_char { - if ctx.is_null() { - return std::ptr::null(); - } - let conn = TursoConn::from_ptr(ctx); - - if let Some(ref cursor) = conn.cursor { - if let Some(value) = cursor.get(col_idx) { - let c_string = std::ffi::CString::new(value.to_string()).unwrap(); - return c_string.into_raw(); // Caller must free this pointer - } - } - std::ptr::null() // No data or invalid index -} - -// Free the returned string -#[no_mangle] -pub extern "C" fn free_c_string(s: *mut c_char) { - if !s.is_null() { - unsafe { drop(std::ffi::CString::from_raw(s)) }; - } -} -#[no_mangle] -pub extern "C" fn rows_get_string( - ctx: *mut c_void, - rows_ptr: *mut c_void, - col_idx: i32, -) -> *const c_char { - if rows_ptr.is_null() || ctx.is_null() { - return std::ptr::null(); - } - let _rows = unsafe { &mut *(rows_ptr as *mut Rows) }; - let conn = TursoConn::from_ptr(ctx); - if col_idx > conn.cursor_idx as i32 || conn.cursor.is_none() { - return std::ptr::null(); - } - if let Some(values) = &conn.cursor { - let value = &values[col_idx as usize]; - match value { - Value::Text(s) => { - return s.as_ptr() as *const i8; - } - _ => return std::ptr::null(), - } - }; - std::ptr::null() -} - -#[no_mangle] -pub extern "C" fn rows_close(rows_ptr: *mut c_void) { - if !rows_ptr.is_null() { - let _ = unsafe { Box::from_raw(rows_ptr as *mut Rows) }; - } -} diff --git a/bindings/go/rs_src/types.rs b/bindings/go/rs_src/types.rs index 711887229..b8fc3ac75 100644 --- a/bindings/go/rs_src/types.rs +++ b/bindings/go/rs_src/types.rs @@ -1,14 +1,190 @@ +use std::ffi::{c_char, c_void}; +#[allow(dead_code)] #[repr(C)] pub enum ResultCode { Error = -1, Ok = 0, Row = 1, Busy = 2, - Done = 3, - Io = 4, - Interrupt = 5, - Invalid = 6, - Null = 7, - NoMem = 8, - ReadOnly = 9, + Io = 3, + Interrupt = 4, + Invalid = 5, + Null = 6, + NoMem = 7, + ReadOnly = 8, + NoData = 9, + Done = 10, +} + +#[repr(C)] +pub enum ValueType { + Integer = 0, + Text = 1, + Blob = 2, + Real = 3, + Null = 4, +} + +#[repr(C)] +pub struct TursoValue { + pub value_type: ValueType, + pub value: ValueUnion, +} + +#[repr(C)] +pub union ValueUnion { + pub int_val: i64, + pub real_val: f64, + pub text_ptr: *const c_char, + pub blob_ptr: *const c_void, +} + +#[repr(C)] +pub struct Blob { + pub data: *const u8, + pub len: usize, +} + +impl Blob { + pub fn to_ptr(&self) -> *const c_void { + self as *const Blob as *const c_void + } +} + +pub struct AllocPool { + strings: Vec, + blobs: Vec>, +} +impl AllocPool { + pub fn new() -> Self { + AllocPool { + strings: Vec::new(), + blobs: Vec::new(), + } + } + pub fn add_string(&mut self, s: String) -> &String { + self.strings.push(s); + self.strings.last().unwrap() + } + + pub fn add_blob(&mut self, b: Vec) -> &Vec { + self.blobs.push(b); + self.blobs.last().unwrap() + } +} + +#[no_mangle] +pub extern "C" fn free_blob(blob_ptr: *mut c_void) { + if blob_ptr.is_null() { + return; + } + unsafe { + let _ = Box::from_raw(blob_ptr as *mut Blob); + } +} +#[allow(dead_code)] +impl ValueUnion { + fn from_str(s: &str) -> Self { + ValueUnion { + text_ptr: s.as_ptr() as *const c_char, + } + } + + fn from_bytes(b: &[u8]) -> Self { + ValueUnion { + blob_ptr: Blob { + data: b.as_ptr(), + len: b.len(), + } + .to_ptr(), + } + } + + fn from_int(i: i64) -> Self { + ValueUnion { int_val: i } + } + + fn from_real(r: f64) -> Self { + ValueUnion { real_val: r } + } + + fn from_null() -> Self { + ValueUnion { int_val: 0 } + } + + pub fn to_int(&self) -> i64 { + unsafe { self.int_val } + } + + pub fn to_real(&self) -> f64 { + unsafe { self.real_val } + } + + pub fn to_str(&self) -> &str { + unsafe { std::ffi::CStr::from_ptr(self.text_ptr).to_str().unwrap() } + } + + pub fn to_bytes(&self) -> &[u8] { + let blob = unsafe { self.blob_ptr as *const Blob }; + let blob = unsafe { &*blob }; + unsafe { std::slice::from_raw_parts(blob.data, blob.len) } + } +} + +impl TursoValue { + pub fn new(value_type: ValueType, value: ValueUnion) -> Self { + TursoValue { value_type, value } + } + + #[allow(clippy::wrong_self_convention)] + pub fn to_ptr(self) -> *const c_void { + Box::into_raw(Box::new(self)) as *const c_void + } + + pub fn from_value(value: &limbo_core::Value<'_>) -> Self { + match value { + limbo_core::Value::Integer(i) => { + TursoValue::new(ValueType::Integer, ValueUnion::from_int(*i)) + } + limbo_core::Value::Float(r) => { + TursoValue::new(ValueType::Real, ValueUnion::from_real(*r)) + } + limbo_core::Value::Text(s) => TursoValue::new(ValueType::Text, ValueUnion::from_str(s)), + limbo_core::Value::Blob(b) => { + TursoValue::new(ValueType::Blob, ValueUnion::from_bytes(b)) + } + limbo_core::Value::Null => TursoValue::new(ValueType::Null, ValueUnion::from_null()), + } + } + + pub fn to_value<'pool>(&self, pool: &'pool mut AllocPool) -> limbo_core::Value<'pool> { + match self.value_type { + ValueType::Integer => limbo_core::Value::Integer(unsafe { self.value.int_val }), + ValueType::Real => limbo_core::Value::Float(unsafe { self.value.real_val }), + ValueType::Text => { + let cstr = unsafe { std::ffi::CStr::from_ptr(self.value.text_ptr) }; + match cstr.to_str() { + Ok(utf8_str) => { + let owned = utf8_str.to_owned(); + // statement needs to own these strings, will free when closed + let borrowed = pool.add_string(owned); + limbo_core::Value::Text(borrowed) + } + Err(_) => limbo_core::Value::Null, + } + } + ValueType::Blob => { + let blob_ptr = unsafe { self.value.blob_ptr as *const Blob }; + if blob_ptr.is_null() { + limbo_core::Value::Null + } else { + let blob = unsafe { &*blob_ptr }; + let data = unsafe { std::slice::from_raw_parts(blob.data, blob.len) }; + let borrowed = pool.add_blob(data.to_vec()); + limbo_core::Value::Blob(borrowed) + } + } + ValueType::Null => limbo_core::Value::Null, + } + } } diff --git a/bindings/go/stmt.go b/bindings/go/stmt.go index 20e1a5774..2b7895fe2 100644 --- a/bindings/go/stmt.go +++ b/bindings/go/stmt.go @@ -1,77 +1,192 @@ package turso import ( + "context" "database/sql/driver" + "errors" "fmt" "io" + "unsafe" ) -type stmt struct { - ctx uintptr - sql string +// only construct tursoStmt with initStmt function to ensure proper initialization +type tursoStmt struct { + ctx uintptr + sql string + query stmtQueryFn + execute stmtExecuteFn + getParamCount func(uintptr) int32 } -type rows struct { - ctx uintptr - rowsPtr uintptr - columns []string - err error +// Initialize/register the FFI function pointers for the statement methods +func initStmt(ctx uintptr, sql string) *tursoStmt { + var query stmtQueryFn + var execute stmtExecuteFn + var getParamCount func(uintptr) int32 + methods := []ExtFunc{{query, FfiStmtQuery}, {execute, FfiStmtExec}, {getParamCount, FfiStmtParameterCount}} + for i := range methods { + methods[i].initFunc() + } + return &tursoStmt{ + ctx: uintptr(ctx), + sql: sql, + } } -func (ls *stmt) Query(args []driver.Value) (driver.Rows, error) { - var dbPrepare func(uintptr, uintptr) uintptr - getExtFunc(&dbPrepare, "db_prepare") +func (st *tursoStmt) NumInput() int { + return int(st.getParamCount(st.ctx)) +} - queryPtr := toCString(ls.sql) - defer freeCString(queryPtr) +func (st *tursoStmt) Exec(args []driver.Value) (driver.Result, error) { + argArray, err := buildArgs(args) + if err != nil { + return nil, err + } + argPtr := uintptr(0) + argCount := uint64(len(argArray)) + if argCount > 0 { + argPtr = uintptr(unsafe.Pointer(&argArray[0])) + } + var changes uint64 + rc := st.execute(st.ctx, argPtr, argCount, uintptr(unsafe.Pointer(&changes))) + switch ResultCode(rc) { + case Ok: + return driver.RowsAffected(changes), nil + case Error: + return nil, errors.New("error executing statement") + case Busy: + return nil, errors.New("busy") + case Interrupt: + return nil, errors.New("interrupted") + case Invalid: + return nil, errors.New("invalid statement") + default: + return nil, fmt.Errorf("unexpected status: %d", rc) + } +} - rowsPtr := dbPrepare(ls.ctx, queryPtr) +func (st *tursoStmt) Query(args []driver.Value) (driver.Rows, error) { + queryArgs, err := buildArgs(args) + if err != nil { + return nil, err + } + rowsPtr := st.query(st.ctx, uintptr(unsafe.Pointer(&queryArgs[0])), uint64(len(queryArgs))) if rowsPtr == 0 { - return nil, fmt.Errorf("failed to prepare query") + return nil, fmt.Errorf("query failed for: %q", st.sql) } - var colFunc func(uintptr, uintptr) uintptr - - getExtFunc(&colFunc, "columns") - - rows := &rows{ - ctx: ls.ctx, - rowsPtr: rowsPtr, - } - return rows, nil + return initRows(rowsPtr), nil } -func (lr *rows) Columns() []string { - return lr.columns +func (ts *tursoStmt) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + stripped := namedValueToValue(args) + argArray, err := getArgsPtr(stripped) + if err != nil { + return nil, err + } + var changes uintptr + res := ts.execute(ts.ctx, argArray, uint64(len(args)), changes) + switch ResultCode(res) { + case Ok: + return driver.RowsAffected(changes), nil + case Error: + return nil, errors.New("error executing statement") + case Busy: + return nil, errors.New("busy") + case Interrupt: + return nil, errors.New("interrupted") + default: + return nil, fmt.Errorf("unexpected status: %d", res) + } } -func (lr *rows) Close() error { - var rowsClose func(uintptr) - getExtFunc(&rowsClose, "rows_close") - rowsClose(lr.rowsPtr) +func (st *tursoStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + queryArgs, err := buildNamedArgs(args) + if err != nil { + return nil, err + } + rowsPtr := st.query(st.ctx, uintptr(unsafe.Pointer(&queryArgs[0])), uint64(len(queryArgs))) + if rowsPtr == 0 { + return nil, fmt.Errorf("query failed for: %q", st.sql) + } + return initRows(rowsPtr), nil +} + +// only construct tursoRows with initRows function to ensure proper initialization +type tursoRows struct { + ctx uintptr + columns []string + closed bool + getCols func(uintptr, *uint) uintptr + next func(uintptr) uintptr + getValue func(uintptr, int32) uintptr + closeRows func(uintptr) uintptr + freeCols func(uintptr) uintptr +} + +// Initialize/register the FFI function pointers for the rows methods +// DO NOT construct 'tursoRows' without this function +func initRows(ctx uintptr) *tursoRows { + var getCols func(uintptr, *uint) uintptr + var getValue func(uintptr, int32) uintptr + var closeRows func(uintptr) uintptr + var freeCols func(uintptr) uintptr + var next func(uintptr) uintptr + methods := []ExtFunc{ + {getCols, FfiRowsGetColumns}, + {getValue, FfiRowsGetValue}, + {closeRows, FfiRowsClose}, + {freeCols, FfiFreeColumns}, + {next, FfiRowsNext}} + for i := range methods { + methods[i].initFunc() + } + + return &tursoRows{ + ctx: ctx, + getCols: getCols, + getValue: getValue, + closeRows: closeRows, + freeCols: freeCols, + next: next, + } +} + +func (r *tursoRows) Columns() []string { + if r.columns == nil { + var columnCount uint + colArrayPtr := r.getCols(r.ctx, &columnCount) + if colArrayPtr != 0 && columnCount > 0 { + r.columns = cArrayToGoStrings(colArrayPtr, columnCount) + if r.freeCols == nil { + getFfiFunc(&r.freeCols, FfiFreeColumns) + } + defer r.freeCols(colArrayPtr) + } + } + return r.columns +} + +func (r *tursoRows) Close() error { + if r.closed { + return nil + } + r.closed = true + r.closeRows(r.ctx) + r.ctx = 0 return nil } -func (lr *rows) Next(dest []driver.Value) error { - var rowsNext func(uintptr, uintptr) int32 - getExtFunc(&rowsNext, "rows_next") - - status := rowsNext(lr.ctx, lr.rowsPtr) +func (r *tursoRows) Next(dest []driver.Value) error { + status := r.next(r.ctx) switch ResultCode(status) { case Row: for i := range dest { - getExtFunc(&rowsGetValue, "rows_get_value") - - valPtr := rowsGetValue(lr.ctx, int32(i)) - if valPtr != 0 { - val := cStringToGoString(valPtr) - dest[i] = val - freeCString(valPtr) - } else { - dest[i] = nil - } + valPtr := r.getValue(r.ctx, int32(i)) + val := toGoValue(valPtr) + dest[i] = val } return nil - case 0: // No more rows + case Done: return io.EOF default: return fmt.Errorf("unexpected status: %d", status) diff --git a/bindings/go/turso.go b/bindings/go/turso.go index 0c095ac80..dcafb3a64 100644 --- a/bindings/go/turso.go +++ b/bindings/go/turso.go @@ -4,43 +4,61 @@ import ( "database/sql" "database/sql/driver" "errors" + "fmt" "log/slog" "os" + "runtime" "sync" "unsafe" "github.com/ebitengine/purego" + "golang.org/x/sys/windows" ) -const ( - turso = "../../target/debug/lib_turso_go.so" -) +const turso = "../../target/debug/lib_turso_go" +const driverName = "turso" -func toGoStr(ptr uintptr, length int) string { - if ptr == 0 { - return "" +var tursoLib uintptr + +func getSystemLibrary() error { + switch runtime.GOOS { + case "darwin": + slib, err := purego.Dlopen(fmt.Sprintf("%s.dylib", turso), purego.RTLD_LAZY) + if err != nil { + return err + } + tursoLib = slib + case "linux": + slib, err := purego.Dlopen(fmt.Sprintf("%s.so", turso), purego.RTLD_LAZY) + if err != nil { + return err + } + tursoLib = slib + case "windows": + slib, err := windows.LoadLibrary(fmt.Sprintf("%s.dll", turso)) + if err != nil { + return err + } + tursoLib = slib + default: + panic(fmt.Errorf("GOOS=%s is not supported", runtime.GOOS)) } - uptr := unsafe.Pointer(ptr) - s := (*string)(uptr) - if s == nil { - // redundant - return "" - } - return *s + return nil } func init() { - slib, err := purego.Dlopen(turso, purego.RTLD_LAZY) + err := getSystemLibrary() if err != nil { slog.Error("Error opening turso library: ", err) os.Exit(1) } - lib = slib - sql.Register("turso", &tursoDriver{}) + sql.Register(driverName, &tursoDriver{}) } -type tursoDriver struct { - tursoCtx +type tursoDriver struct{} + +func (d tursoDriver) Open(name string) (driver.Conn, error) { + return openConn(name) } func toCString(s string) uintptr { @@ -48,80 +66,76 @@ func toCString(s string) uintptr { return uintptr(unsafe.Pointer(&b[0])) } -func getExtFunc(ptr interface{}, name string) { - purego.RegisterLibFunc(ptr, lib, name) +// helper to register an FFI function in the lib_turso_go library +func getFfiFunc(ptr interface{}, name string) { + purego.RegisterLibFunc(&ptr, tursoLib, name) } -type conn struct { +type tursoConn struct { ctx uintptr sync.Mutex - writeTimeFmt string - lastInsertID int64 - lastAffected int64 + prepare func(uintptr, uintptr) uintptr } -func newConn() *conn { - return &conn{ - 0, +func newConn(ctx uintptr) *tursoConn { + var prepare func(uintptr, uintptr) uintptr + getFfiFunc(&prepare, FfiDbPrepare) + return &tursoConn{ + ctx, sync.Mutex{}, - "2006-01-02 15:04:05", - 0, - 0, + prepare, } } -func open(dsn string) (*conn, error) { - var open func(uintptr) uintptr - getExtFunc(&open, ExtDBOpen) - c := newConn() - path := toCString(dsn) - ctx := open(path) - c.ctx = ctx - return c, nil -} +func openConn(dsn string) (*tursoConn, error) { + var dbOpen func(uintptr) uintptr + getFfiFunc(&dbOpen, FfiDbOpen) -type tursoCtx struct { - conn *conn - tx *sql.Tx - err error - rows *sql.Rows - stmt *sql.Stmt -} + cStr := toCString(dsn) + defer freeCString(cStr) -func (lc tursoCtx) Open(dsn string) (driver.Conn, error) { - conn, err := open(dsn) - if err != nil { - return nil, err + ctx := dbOpen(cStr) + if ctx == 0 { + return nil, fmt.Errorf("failed to open database for dsn=%q", dsn) } - nc := tursoCtx{conn: conn} - return nc, nil + return &tursoConn{ctx: ctx}, nil } -func (lc tursoCtx) Close() error { - var closedb func(uintptr) uintptr - getExtFunc(&closedb, ExtDBClose) - closedb(lc.conn.ctx) +func (c *tursoConn) Close() error { + if c.ctx == 0 { + return nil + } + var dbClose func(uintptr) uintptr + getFfiFunc(&dbClose, FfiDbClose) + + dbClose(c.ctx) + c.ctx = 0 return nil } -// TODO: Begin not implemented -func (lc tursoCtx) Begin() (driver.Tx, error) { - return nil, nil +func (c *tursoConn) Prepare(query string) (driver.Stmt, error) { + if c.ctx == 0 { + return nil, errors.New("connection closed") + } + if c.prepare == nil { + var dbPrepare func(uintptr, uintptr) uintptr + getFfiFunc(&dbPrepare, FfiDbPrepare) + c.prepare = dbPrepare + } + qPtr := toCString(query) + stmtPtr := c.prepare(c.ctx, qPtr) + freeCString(qPtr) + + if stmtPtr == 0 { + return nil, fmt.Errorf("prepare failed: %q", query) + } + return &tursoStmt{ + ctx: stmtPtr, + sql: query, + }, nil } -func (ls tursoCtx) Prepare(sql string) (driver.Stmt, error) { - var prepare func(uintptr, uintptr) uintptr - getExtFunc(&prepare, ExtDBPrepare) - s := toCString(sql) - statement := prepare(ls.conn.ctx, s) - if statement == 0 { - return nil, errors.New("no rows") - } - ls.stmt = stmt{ - ctx: statement, - - } - - } - return nil, nil +// begin is needed to implement driver.Conn.. for now not implemented +func (c *tursoConn) Begin() (driver.Tx, error) { + return nil, errors.New("transactions not implemented") } diff --git a/bindings/go/types.go b/bindings/go/types.go index 0569b317a..e24b2f168 100644 --- a/bindings/go/types.go +++ b/bindings/go/types.go @@ -1,28 +1,248 @@ package turso +import ( + "database/sql/driver" + "fmt" + "unsafe" +) + type ResultCode int const ( - Error ResultCode = -1 - Ok ResultCode = 0 - Row ResultCode = 1 - Busy ResultCode = 2 - Done ResultCode = 3 - Io ResultCode = 4 - Interrupt ResultCode = 5 - Invalid ResultCode = 6 - Null ResultCode = 7 - NoMem ResultCode = 8 - ReadOnly ResultCode = 9 - ExtDBOpen string = "db_open" - ExtDBClose string = "db_close" - ExtDBPrepare string = "db_prepare" + Error ResultCode = -1 + Ok ResultCode = 0 + Row ResultCode = 1 + Busy ResultCode = 2 + Io ResultCode = 3 + Interrupt ResultCode = 4 + Invalid ResultCode = 5 + Null ResultCode = 6 + NoMem ResultCode = 7 + ReadOnly ResultCode = 8 + NoData ResultCode = 9 + Done ResultCode = 10 ) -var ( - lib uintptr - dbPrepare func(uintptr, uintptr) uintptr - rowsNext func(rowsPtr uintptr) int32 - rowsGetValue func(rowsPtr uintptr, colIdx uint) uintptr - freeCString func(strPtr uintptr) +const ( + FfiDbOpen string = "db_open" + FfiDbClose string = "db_close" + FfiDbPrepare string = "db_prepare" + FfiStmtExec string = "stmt_execute" + FfiStmtQuery string = "stmt_query" + FfiStmtParameterCount string = "stmt_parameter_count" + FfiRowsClose string = "rows_close" + FfiRowsGetColumns string = "rows_get_columns" + FfiRowsNext string = "rows_next" + FfiRowsGetValue string = "rows_get_value" + FfiFreeColumns string = "free_columns" + FfiFreeCString string = "free_string" ) + +// convert a namedValue slice into normal values until named parameters are supported +func namedValueToValue(named []driver.NamedValue) []driver.Value { + out := make([]driver.Value, len(named)) + for i, nv := range named { + out[i] = nv.Value + } + return out +} + +func buildNamedArgs(named []driver.NamedValue) ([]tursoValue, error) { + args := make([]driver.Value, len(named)) + for i, nv := range named { + args[i] = nv.Value + } + return buildArgs(args) +} + +type ExtFunc struct { + funcPtr interface{} + funcName string +} + +func (ef *ExtFunc) initFunc() { + getFfiFunc(&ef.funcPtr, ef.funcName) +} + +type valueType int + +const ( + intVal valueType = iota + textVal + blobVal + realVal + nullVal +) + +// struct to pass Go values over FFI +type tursoValue struct { + Type valueType + Value [8]byte +} + +// struct to pass byte slices over FFI +type Blob struct { + Data uintptr + Len uint +} + +// convert a tursoValue to a native Go value +func toGoValue(valPtr uintptr) interface{} { + val := (*tursoValue)(unsafe.Pointer(valPtr)) + switch val.Type { + case intVal: + return *(*int64)(unsafe.Pointer(&val.Value)) + case realVal: + return *(*float64)(unsafe.Pointer(&val.Value)) + case textVal: + textPtr := *(*uintptr)(unsafe.Pointer(&val.Value)) + return GoString(textPtr) + case blobVal: + blobPtr := *(*uintptr)(unsafe.Pointer(&val.Value)) + return toGoBlob(blobPtr) + case nullVal: + return nil + default: + return nil + } +} + +func getArgsPtr(args []driver.Value) (uintptr, error) { + if len(args) == 0 { + return 0, nil + } + argSlice, err := buildArgs(args) + if err != nil { + return 0, err + } + return uintptr(unsafe.Pointer(&argSlice[0])), nil +} + +// convert a byte slice to a Blob type that can be sent over FFI +func makeBlob(b []byte) *Blob { + if len(b) == 0 { + return nil + } + blob := &Blob{ + Data: uintptr(unsafe.Pointer(&b[0])), + Len: uint(len(b)), + } + return blob +} + +// converts a blob received via FFI to a native Go byte slice +func toGoBlob(blobPtr uintptr) []byte { + if blobPtr == 0 { + return nil + } + blob := (*Blob)(unsafe.Pointer(blobPtr)) + return unsafe.Slice((*byte)(unsafe.Pointer(blob.Data)), blob.Len) +} + +var freeString func(*byte) + +// free a C style string allocated via FFI +func freeCString(cstr uintptr) { + if cstr == 0 { + return + } + if freeString == nil { + getFfiFunc(&freeString, FfiFreeCString) + } + freeString((*byte)(unsafe.Pointer(cstr))) +} + +func cArrayToGoStrings(arrayPtr uintptr, length uint) []string { + if arrayPtr == 0 || length == 0 { + return nil + } + + ptrSlice := unsafe.Slice( + (**byte)(unsafe.Pointer(arrayPtr)), + length, + ) + + out := make([]string, 0, length) + for _, cstr := range ptrSlice { + out = append(out, GoString(uintptr(unsafe.Pointer(cstr)))) + } + return out +} + +// convert a Go slice of driver.Value to a slice of tursoValue that can be sent over FFI +func buildArgs(args []driver.Value) ([]tursoValue, error) { + argSlice := make([]tursoValue, len(args)) + + for i, v := range args { + switch val := v.(type) { + case nil: + argSlice[i].Type = nullVal + + case int64: + argSlice[i].Type = intVal + storeInt64(&argSlice[i].Value, val) + + case float64: + argSlice[i].Type = realVal + storeFloat64(&argSlice[i].Value, val) + case string: + argSlice[i].Type = textVal + cstr := CString(val) + storePointer(&argSlice[i].Value, cstr) + case []byte: + argSlice[i].Type = blobVal + blob := makeBlob(val) + *(*uintptr)(unsafe.Pointer(&argSlice[i].Value)) = uintptr(unsafe.Pointer(blob)) + default: + return nil, fmt.Errorf("unsupported type: %T", v) + } + } + return argSlice, nil +} + +func storeInt64(data *[8]byte, val int64) { + *(*int64)(unsafe.Pointer(data)) = val +} + +func storeFloat64(data *[8]byte, val float64) { + *(*float64)(unsafe.Pointer(data)) = val +} + +func storePointer(data *[8]byte, ptr *byte) { + *(*uintptr)(unsafe.Pointer(data)) = uintptr(unsafe.Pointer(ptr)) +} + +type stmtExecuteFn func(stmtPtr uintptr, argsPtr uintptr, argCount uint64, changes uintptr) int32 +type stmtQueryFn func(stmtPtr uintptr, argsPtr uintptr, argCount uint64) uintptr + +/* Credit below (Apache2 License) to: +https://github.com/ebitengine/purego/blob/main/internal/strings/strings.go +*/ + +func hasSuffix(s, suffix string) bool { + return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix +} + +func CString(name string) *byte { + if hasSuffix(name, "\x00") { + return &(*(*[]byte)(unsafe.Pointer(&name)))[0] + } + b := make([]byte, len(name)+1) + copy(b, name) + return &b[0] +} + +func GoString(c uintptr) string { + ptr := *(*unsafe.Pointer)(unsafe.Pointer(&c)) + if ptr == nil { + return "" + } + var length int + for { + if *(*byte)(unsafe.Add(ptr, uintptr(length))) == '\x00' { + break + } + length++ + } + return string(unsafe.Slice((*byte)(ptr), length)) +} diff --git a/core/lib.rs b/core/lib.rs index d75510f7c..f03c73e00 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -427,6 +427,10 @@ impl Connection { let prev_total_changes = self.total_changes.get(); self.total_changes.set(prev_total_changes + nchange); } + + pub fn total_changes(&self) -> i64 { + self.total_changes.get() + } } pub struct Statement { @@ -473,6 +477,10 @@ impl Statement { &self.program.parameters } + pub fn parameters_count(&self) -> usize { + self.program.parameters.count() + } + pub fn bind_at(&mut self, index: NonZero, value: Value) { self.state.bind_at(index, value.into()); } From 17e7e03423d408491040d9174d0517a7d55211f7 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 26 Jan 2025 10:28:23 +0200 Subject: [PATCH 22/34] Revert "cargo: Disable LTO.." This reverts commit 4943217045beb468897d1af0014472a9fe22d11b to get 5% of performance back. Before: ``` limbo/Execute prepared statement: 'SELECT 1' time: [111.45 ns 111.52 ns 111.61 ns] thrpt: [8.9594 Melem/s 8.9667 Melem/s 8.9727 Melem/s] Found 8 outliers among 100 measurements (8.00%) 2 (2.00%) low mild 2 (2.00%) high mild 4 (4.00%) high severe ``` ``` limbo/Execute prepared statement: 'SELECT 1' time: [106.22 ns 106.48 ns 106.85 ns] thrpt: [9.3587 Melem/s 9.3911 Melem/s 9.4142 Melem/s] change: time: [-4.8335% -4.6750% -4.5222%] (p = 0.00 < 0.05) thrpt: [+4.7364% +4.9043% +5.0790%] Performance has improved. ``` --- Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 570e5b638..0ffbdf6ac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,7 +58,7 @@ github-attestations = true debug = "line-tables-only" codegen-units = 1 panic = "abort" -lto = "off" +lto = true [profile.bench-profile] inherits = "release" @@ -66,3 +66,4 @@ debug = true [profile.dist] inherits = "release" +lto = "thin" From 8942c38bda724a75f6be217b50188eb63bed9504 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 26 Jan 2025 10:52:58 +0200 Subject: [PATCH 23/34] core: Fix Statement::reset() The first rule of writing fast programs: don't use dynamic memory allocation! Brings back some performance for the `SELECT 1` micro-benchmark, although we're still not where we need to be. --- core/lib.rs | 3 +-- core/vdbe/mod.rs | 13 +++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index f03c73e00..628d21150 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -486,8 +486,7 @@ impl Statement { } pub fn reset(&mut self) { - let state = vdbe::ProgramState::new(self.program.max_registers); - self.state = state + self.state.reset(); } } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index dae7f8806..a06412aa3 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -258,6 +258,19 @@ impl ProgramState { } pub fn reset(&mut self) { + self.pc = 0; + self.btree_table_cursors.borrow_mut().clear(); + self.btree_index_cursors.borrow_mut().clear(); + self.pseudo_cursors.borrow_mut().clear(); + self.sorter_cursors.borrow_mut().clear(); + let max_registers = self.registers.len(); + self.registers.clear(); + self.registers.resize(max_registers, OwnedValue::Null); + self.last_compare = None; + self.deferred_seek = None; + self.ended_coroutine.clear(); + self.regex_cache.like.clear(); + self.interrupted = false; self.parameters.clear(); } } From 9e32ce6c774e292da78c83bded75dbb0e57943e0 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Sun, 26 Jan 2025 12:32:22 +0200 Subject: [PATCH 24/34] Add Cursor enum and store a single BTreeMap of cursors in ProgramState --- core/types.rs | 57 +++++++++- core/vdbe/mod.rs | 256 +++++++++++++++++++------------------------- core/vdbe/sorter.rs | 7 +- 3 files changed, 173 insertions(+), 147 deletions(-) diff --git a/core/types.rs b/core/types.rs index a3c31e1d9..92e4714c7 100644 --- a/core/types.rs +++ b/core/types.rs @@ -2,7 +2,10 @@ use limbo_ext::{AggCtx, FinalizeFunction, StepFunction}; use crate::error::LimboError; use crate::ext::{ExtValue, ExtValueType}; +use crate::pseudo::PseudoCursor; +use crate::storage::btree::BTreeCursor; use crate::storage::sqlite3_ondisk::write_varint; +use crate::vdbe::sorter::Sorter; use crate::Result; use std::fmt::Display; use std::rc::Rc; @@ -604,7 +607,59 @@ impl OwnedRecord { } } -#[derive(PartialEq, Debug)] +pub enum Cursor { + Table(BTreeCursor), + Index(BTreeCursor), + Pseudo(PseudoCursor), + Sorter(Sorter), +} + +impl Cursor { + pub fn new_table(cursor: BTreeCursor) -> Self { + Self::Table(cursor) + } + + pub fn new_index(cursor: BTreeCursor) -> Self { + Self::Index(cursor) + } + + pub fn new_pseudo(cursor: PseudoCursor) -> Self { + Self::Pseudo(cursor) + } + + pub fn new_sorter(cursor: Sorter) -> Self { + Self::Sorter(cursor) + } + + pub fn as_table_mut(&mut self) -> &mut BTreeCursor { + match self { + Self::Table(cursor) => cursor, + _ => panic!("Cursor is not a table"), + } + } + + pub fn as_index_mut(&mut self) -> &mut BTreeCursor { + match self { + Self::Index(cursor) => cursor, + _ => panic!("Cursor is not an index"), + } + } + + pub fn as_pseudo_mut(&mut self) -> &mut PseudoCursor { + match self { + Self::Pseudo(cursor) => cursor, + _ => panic!("Cursor is not a pseudo cursor"), + } + } + + pub fn as_sorter_mut(&mut self) -> &mut Sorter { + match self { + Self::Sorter(cursor) => cursor, + _ => panic!("Cursor is not a sorter cursor"), + } + } +} + pub enum CursorResult { Ok(T), IO, diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index a06412aa3..eb44a9edb 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -32,7 +32,8 @@ use crate::result::LimboResult; use crate::storage::sqlite3_ondisk::DatabaseHeader; use crate::storage::{btree::BTreeCursor, pager::Pager}; use crate::types::{ - AggContext, CursorResult, ExternalAggState, OwnedRecord, OwnedValue, Record, SeekKey, SeekOp, + AggContext, Cursor, CursorResult, ExternalAggState, OwnedRecord, OwnedValue, Record, SeekKey, + SeekOp, }; use crate::util::parse_schema_rows; use crate::vdbe::builder::CursorType; @@ -58,7 +59,7 @@ use rand::{thread_rng, Rng}; use regex::{Regex, RegexBuilder}; use sorter::Sorter; use std::borrow::BorrowMut; -use std::cell::{Cell, RefCell}; +use std::cell::{Cell, RefCell, RefMut}; use std::collections::{BTreeMap, HashMap}; use std::num::NonZero; use std::rc::{Rc, Weak}; @@ -193,13 +194,54 @@ impl RegexCache { } } +fn get_cursor_as_table_mut<'long, 'short>( + cursors: &'short mut RefMut<'long, BTreeMap>, + cursor_id: CursorID, +) -> &'short mut BTreeCursor { + let cursor = cursors + .get_mut(&cursor_id) + .expect("cursor not allocated") + .as_table_mut(); + cursor +} + +fn get_cursor_as_index_mut<'long, 'short>( + cursors: &'short mut RefMut<'long, BTreeMap>, + cursor_id: CursorID, +) -> &'short mut BTreeCursor { + let cursor = cursors + .get_mut(&cursor_id) + .expect("cursor not allocated") + .as_index_mut(); + cursor +} + +fn get_cursor_as_pseudo_mut<'long, 'short>( + cursors: &'short mut RefMut<'long, BTreeMap>, + cursor_id: CursorID, +) -> &'short mut PseudoCursor { + let cursor = cursors + .get_mut(&cursor_id) + .expect("cursor not allocated") + .as_pseudo_mut(); + cursor +} + +fn get_cursor_as_sorter_mut<'long, 'short>( + cursors: &'short mut RefMut<'long, BTreeMap>, + cursor_id: CursorID, +) -> &'short mut Sorter { + let cursor = cursors + .get_mut(&cursor_id) + .expect("cursor not allocated") + .as_sorter_mut(); + cursor +} + /// The program state describes the environment in which the program executes. pub struct ProgramState { pub pc: InsnReference, - btree_table_cursors: RefCell>, - btree_index_cursors: RefCell>, - pseudo_cursors: RefCell>, - sorter_cursors: RefCell>, + cursors: RefCell>, registers: Vec, last_compare: Option, deferred_seek: Option<(CursorID, CursorID)>, @@ -211,18 +253,12 @@ pub struct ProgramState { impl ProgramState { pub fn new(max_registers: usize) -> Self { - let btree_table_cursors = RefCell::new(BTreeMap::new()); - let btree_index_cursors = RefCell::new(BTreeMap::new()); - let pseudo_cursors = RefCell::new(BTreeMap::new()); - let sorter_cursors = RefCell::new(BTreeMap::new()); + let cursors: RefCell> = RefCell::new(BTreeMap::new()); let mut registers = Vec::with_capacity(max_registers); registers.resize(max_registers, OwnedValue::Null); Self { pc: 0, - btree_table_cursors, - btree_index_cursors, - pseudo_cursors, - sorter_cursors, + cursors, registers, last_compare: None, deferred_seek: None, @@ -259,10 +295,7 @@ impl ProgramState { pub fn reset(&mut self) { self.pc = 0; - self.btree_table_cursors.borrow_mut().clear(); - self.btree_index_cursors.borrow_mut().clear(); - self.pseudo_cursors.borrow_mut().clear(); - self.sorter_cursors.borrow_mut().clear(); + self.cursors.borrow_mut().clear(); let max_registers = self.registers.len(); self.registers.clear(); self.registers.resize(max_registers, OwnedValue::Null); @@ -276,11 +309,11 @@ impl ProgramState { } macro_rules! must_be_btree_cursor { - ($cursor_id:expr, $cursor_ref:expr, $btree_table_cursors:expr, $btree_index_cursors:expr, $insn_name:expr) => {{ + ($cursor_id:expr, $cursor_ref:expr, $cursors:expr, $insn_name:expr) => {{ let (_, cursor_type) = $cursor_ref.get($cursor_id).unwrap(); let cursor = match cursor_type { - CursorType::BTreeTable(_) => $btree_table_cursors.get_mut(&$cursor_id).unwrap(), - CursorType::BTreeIndex(_) => $btree_index_cursors.get_mut(&$cursor_id).unwrap(), + CursorType::BTreeTable(_) => get_cursor_as_table_mut(&mut $cursors, $cursor_id), + CursorType::BTreeIndex(_) => get_cursor_as_index_mut(&mut $cursors, $cursor_id), CursorType::Pseudo(_) => panic!("{} on pseudo cursor", $insn_name), CursorType::Sorter => panic!("{} on sorter cursor", $insn_name), }; @@ -333,10 +366,7 @@ impl Program { } let insn = &self.insns[state.pc as usize]; trace_insn(self, state.pc as InsnReference, insn); - let mut btree_table_cursors = state.btree_table_cursors.borrow_mut(); - let mut btree_index_cursors = state.btree_index_cursors.borrow_mut(); - let mut pseudo_cursors = state.pseudo_cursors.borrow_mut(); - let mut sorter_cursors = state.sorter_cursors.borrow_mut(); + let mut cursors = state.cursors.borrow_mut(); match insn { Insn::Init { target_pc } => { assert!(target_pc.is_offset()); @@ -414,13 +444,8 @@ impl Program { state.pc += 1; } Insn::NullRow { cursor_id } => { - let cursor = must_be_btree_cursor!( - *cursor_id, - self.cursor_ref, - btree_table_cursors, - btree_index_cursors, - "NullRow" - ); + let cursor = + must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "NullRow"); cursor.set_null_flag(true); state.pc += 1; } @@ -720,10 +745,10 @@ impl Program { BTreeCursor::new(pager.clone(), *root_page, self.database_header.clone()); match cursor_type { CursorType::BTreeTable(_) => { - btree_table_cursors.insert(*cursor_id, cursor); + cursors.insert(*cursor_id, Cursor::new_table(cursor)); } CursorType::BTreeIndex(_) => { - btree_index_cursors.insert(*cursor_id, cursor); + cursors.insert(*cursor_id, Cursor::new_index(cursor)); } CursorType::Pseudo(_) => { panic!("OpenReadAsync on pseudo cursor"); @@ -743,28 +768,18 @@ impl Program { num_fields: _, } => { let cursor = PseudoCursor::new(); - pseudo_cursors.insert(*cursor_id, cursor); + cursors.insert(*cursor_id, Cursor::new_pseudo(cursor)); state.pc += 1; } Insn::RewindAsync { cursor_id } => { - let cursor = must_be_btree_cursor!( - *cursor_id, - self.cursor_ref, - btree_table_cursors, - btree_index_cursors, - "RewindAsync" - ); + let cursor = + must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "RewindAsync"); return_if_io!(cursor.rewind()); state.pc += 1; } Insn::LastAsync { cursor_id } => { - let cursor = must_be_btree_cursor!( - *cursor_id, - self.cursor_ref, - btree_table_cursors, - btree_index_cursors, - "LastAsync" - ); + let cursor = + must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "LastAsync"); return_if_io!(cursor.last()); state.pc += 1; } @@ -773,13 +788,8 @@ impl Program { pc_if_empty, } => { assert!(pc_if_empty.is_offset()); - let cursor = must_be_btree_cursor!( - *cursor_id, - self.cursor_ref, - btree_table_cursors, - btree_index_cursors, - "LastAwait" - ); + let cursor = + must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "LastAwait"); cursor.wait_for_completion()?; if cursor.is_empty() { state.pc = pc_if_empty.to_offset_int(); @@ -792,13 +802,8 @@ impl Program { pc_if_empty, } => { assert!(pc_if_empty.is_offset()); - let cursor = must_be_btree_cursor!( - *cursor_id, - self.cursor_ref, - btree_table_cursors, - btree_index_cursors, - "RewindAwait" - ); + let cursor = + must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "RewindAwait"); cursor.wait_for_completion()?; if cursor.is_empty() { state.pc = pc_if_empty.to_offset_int(); @@ -812,9 +817,9 @@ impl Program { dest, } => { if let Some((index_cursor_id, table_cursor_id)) = state.deferred_seek.take() { - let index_cursor = btree_index_cursors.get_mut(&index_cursor_id).unwrap(); + let index_cursor = get_cursor_as_index_mut(&mut cursors, index_cursor_id); let rowid = index_cursor.rowid()?; - let table_cursor = btree_table_cursors.get_mut(&table_cursor_id).unwrap(); + let table_cursor = get_cursor_as_table_mut(&mut cursors, table_cursor_id); match table_cursor.seek(SeekKey::TableRowId(rowid.unwrap()), SeekOp::EQ)? { CursorResult::Ok(_) => {} CursorResult::IO => { @@ -829,8 +834,7 @@ impl Program { let cursor = must_be_btree_cursor!( *cursor_id, self.cursor_ref, - btree_table_cursors, - btree_index_cursors, + cursors, "Column" ); let record = cursor.record()?; @@ -845,7 +849,7 @@ impl Program { } } CursorType::Sorter => { - let cursor = sorter_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_sorter_mut(&mut cursors, *cursor_id); if let Some(record) = cursor.record() { state.registers[*dest] = record.values[*column].clone(); } else { @@ -853,7 +857,7 @@ impl Program { } } CursorType::Pseudo(_) => { - let cursor = pseudo_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_pseudo_mut(&mut cursors, *cursor_id); if let Some(record) = cursor.record() { state.registers[*dest] = record.values[*column].clone(); } else { @@ -879,25 +883,15 @@ impl Program { return Ok(StepResult::Row(record)); } Insn::NextAsync { cursor_id } => { - let cursor = must_be_btree_cursor!( - *cursor_id, - self.cursor_ref, - btree_table_cursors, - btree_index_cursors, - "NextAsync" - ); + let cursor = + must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "NextAsync"); cursor.set_null_flag(false); return_if_io!(cursor.next()); state.pc += 1; } Insn::PrevAsync { cursor_id } => { - let cursor = must_be_btree_cursor!( - *cursor_id, - self.cursor_ref, - btree_table_cursors, - btree_index_cursors, - "PrevAsync" - ); + let cursor = + must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "PrevAsync"); cursor.set_null_flag(false); return_if_io!(cursor.prev()); state.pc += 1; @@ -907,13 +901,8 @@ impl Program { pc_if_next, } => { assert!(pc_if_next.is_offset()); - let cursor = must_be_btree_cursor!( - *cursor_id, - self.cursor_ref, - btree_table_cursors, - btree_index_cursors, - "PrevAwait" - ); + let cursor = + must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "PrevAwait"); cursor.wait_for_completion()?; if !cursor.is_empty() { state.pc = pc_if_next.to_offset_int(); @@ -926,13 +915,8 @@ impl Program { pc_if_next, } => { assert!(pc_if_next.is_offset()); - let cursor = must_be_btree_cursor!( - *cursor_id, - self.cursor_ref, - btree_table_cursors, - btree_index_cursors, - "NextAwait" - ); + let cursor = + must_be_btree_cursor!(*cursor_id, self.cursor_ref, cursors, "NextAwait"); cursor.wait_for_completion()?; if !cursor.is_empty() { state.pc = pc_if_next.to_offset_int(); @@ -1062,9 +1046,9 @@ impl Program { } Insn::RowId { cursor_id, dest } => { if let Some((index_cursor_id, table_cursor_id)) = state.deferred_seek.take() { - let index_cursor = btree_index_cursors.get_mut(&index_cursor_id).unwrap(); + let index_cursor = get_cursor_as_index_mut(&mut cursors, index_cursor_id); let rowid = index_cursor.rowid()?; - let table_cursor = btree_table_cursors.get_mut(&table_cursor_id).unwrap(); + let table_cursor = get_cursor_as_table_mut(&mut cursors, table_cursor_id); match table_cursor.seek(SeekKey::TableRowId(rowid.unwrap()), SeekOp::EQ)? { CursorResult::Ok(_) => {} CursorResult::IO => { @@ -1074,7 +1058,7 @@ impl Program { } } - let cursor = btree_table_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id); if let Some(ref rowid) = cursor.rowid()? { state.registers[*dest] = OwnedValue::Integer(*rowid as i64); } else { @@ -1088,7 +1072,7 @@ impl Program { target_pc, } => { assert!(target_pc.is_offset()); - let cursor = btree_table_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id); let rowid = match &state.registers[*src_reg] { OwnedValue::Integer(rowid) => *rowid as u64, OwnedValue::Null => { @@ -1124,7 +1108,7 @@ impl Program { } => { assert!(target_pc.is_offset()); if *is_index { - let cursor = btree_index_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_index_mut(&mut cursors, *cursor_id); let record_from_regs: OwnedRecord = make_owned_record(&state.registers, start_reg, num_regs); let found = return_if_io!( @@ -1136,7 +1120,7 @@ impl Program { state.pc += 1; } } else { - let cursor = btree_table_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id); let rowid = match &state.registers[*start_reg] { OwnedValue::Null => { // All integer values are greater than null so we just rewind the cursor @@ -1169,7 +1153,7 @@ impl Program { } => { assert!(target_pc.is_offset()); if *is_index { - let cursor = btree_index_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_index_mut(&mut cursors, *cursor_id); let record_from_regs: OwnedRecord = make_owned_record(&state.registers, start_reg, num_regs); let found = return_if_io!( @@ -1181,7 +1165,7 @@ impl Program { state.pc += 1; } } else { - let cursor = btree_table_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id); let rowid = match &state.registers[*start_reg] { OwnedValue::Null => { // All integer values are greater than null so we just rewind the cursor @@ -1212,7 +1196,7 @@ impl Program { target_pc, } => { assert!(target_pc.is_offset()); - let cursor = btree_index_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_index_mut(&mut cursors, *cursor_id); let record_from_regs: OwnedRecord = make_owned_record(&state.registers, start_reg, num_regs); if let Some(ref idx_record) = *cursor.record()? { @@ -1235,7 +1219,7 @@ impl Program { target_pc, } => { assert!(target_pc.is_offset()); - let cursor = btree_index_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_index_mut(&mut cursors, *cursor_id); let record_from_regs: OwnedRecord = make_owned_record(&state.registers, start_reg, num_regs); if let Some(ref idx_record) = *cursor.record()? { @@ -1565,7 +1549,7 @@ impl Program { }) .collect(); let cursor = Sorter::new(order); - sorter_cursors.insert(*cursor_id, cursor); + cursors.insert(*cursor_id, Cursor::new_sorter(cursor)); state.pc += 1; } Insn::SorterData { @@ -1573,7 +1557,7 @@ impl Program { dest_reg, pseudo_cursor, } => { - let sorter_cursor = sorter_cursors.get_mut(cursor_id).unwrap(); + let sorter_cursor = get_cursor_as_sorter_mut(&mut cursors, *cursor_id); let record = match sorter_cursor.record() { Some(record) => record.clone(), None => { @@ -1582,7 +1566,7 @@ impl Program { } }; state.registers[*dest_reg] = OwnedValue::Record(record.clone()); - let pseudo_cursor = pseudo_cursors.get_mut(pseudo_cursor).unwrap(); + let pseudo_cursor = get_cursor_as_pseudo_mut(&mut cursors, *pseudo_cursor); pseudo_cursor.insert(record); state.pc += 1; } @@ -1590,7 +1574,7 @@ impl Program { cursor_id, record_reg, } => { - let cursor = sorter_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_sorter_mut(&mut cursors, *cursor_id); let record = match &state.registers[*record_reg] { OwnedValue::Record(record) => record, _ => unreachable!("SorterInsert on non-record register"), @@ -1602,11 +1586,12 @@ impl Program { cursor_id, pc_if_empty, } => { - if let Some(cursor) = sorter_cursors.get_mut(cursor_id) { + let cursor = get_cursor_as_sorter_mut(&mut cursors, *cursor_id); + if cursor.is_empty() { + state.pc = pc_if_empty.to_offset_int(); + } else { cursor.sort(); state.pc += 1; - } else { - state.pc = pc_if_empty.to_offset_int(); } } Insn::SorterNext { @@ -1614,9 +1599,9 @@ impl Program { pc_if_next, } => { assert!(pc_if_next.is_offset()); - let cursor = sorter_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_sorter_mut(&mut cursors, *cursor_id); cursor.next(); - if !cursor.is_empty() { + if cursor.has_more() { state.pc = pc_if_next.to_offset_int(); } else { state.pc += 1; @@ -2153,7 +2138,7 @@ impl Program { record_reg, flag: _, } => { - let cursor = btree_table_cursors.get_mut(cursor).unwrap(); + let cursor = get_cursor_as_table_mut(&mut cursors, *cursor); let record = match &state.registers[*record_reg] { OwnedValue::Record(r) => r, _ => unreachable!("Not a record! Cannot insert a non record value."), @@ -2163,7 +2148,7 @@ impl Program { state.pc += 1; } Insn::InsertAwait { cursor_id } => { - let cursor = btree_table_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id); cursor.wait_for_completion()?; // Only update last_insert_rowid for regular table inserts, not schema modifications if cursor.root_page() != 1 { @@ -2178,12 +2163,12 @@ impl Program { state.pc += 1; } Insn::DeleteAsync { cursor_id } => { - let cursor = btree_table_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id); return_if_io!(cursor.delete()); state.pc += 1; } Insn::DeleteAwait { cursor_id } => { - let cursor = btree_table_cursors.get_mut(cursor_id).unwrap(); + let cursor = get_cursor_as_table_mut(&mut cursors, *cursor_id); cursor.wait_for_completion()?; let prev_changes = self.n_change.get(); self.n_change.set(prev_changes + 1); @@ -2192,7 +2177,7 @@ impl Program { Insn::NewRowid { cursor, rowid_reg, .. } => { - let cursor = btree_table_cursors.get_mut(cursor).unwrap(); + let cursor = get_cursor_as_table_mut(&mut cursors, *cursor); // TODO: make io handle rng let rowid = return_if_io!(get_new_rowid(cursor, thread_rng())); state.registers[*rowid_reg] = OwnedValue::Integer(rowid); @@ -2232,13 +2217,8 @@ impl Program { rowid_reg, target_pc, } => { - let cursor = must_be_btree_cursor!( - *cursor, - self.cursor_ref, - btree_table_cursors, - btree_index_cursors, - "NotExists" - ); + let cursor = + must_be_btree_cursor!(*cursor, self.cursor_ref, cursors, "NotExists"); let exists = return_if_io!(cursor.exists(&state.registers[*rowid_reg])); if exists { state.pc += 1; @@ -2258,9 +2238,9 @@ impl Program { let cursor = BTreeCursor::new(pager.clone(), *root_page, self.database_header.clone()); if is_index { - btree_index_cursors.insert(*cursor_id, cursor); + cursors.insert(*cursor_id, Cursor::new_index(cursor)); } else { - btree_table_cursors.insert(*cursor_id, cursor); + cursors.insert(*cursor_id, Cursor::new_table(cursor)); } state.pc += 1; } @@ -2293,21 +2273,7 @@ impl Program { state.pc += 1; } Insn::Close { cursor_id } => { - let (_, cursor_type) = self.cursor_ref.get(*cursor_id).unwrap(); - match cursor_type { - CursorType::BTreeTable(_) => { - let _ = btree_table_cursors.remove(cursor_id); - } - CursorType::BTreeIndex(_) => { - let _ = btree_index_cursors.remove(cursor_id); - } - CursorType::Pseudo(_) => { - let _ = pseudo_cursors.remove(cursor_id); - } - CursorType::Sorter => { - let _ = sorter_cursors.remove(cursor_id); - } - } + let _ = cursors.remove(&*cursor_id); state.pc += 1; } Insn::IsNull { src, target_pc } => { diff --git a/core/vdbe/sorter.rs b/core/vdbe/sorter.rs index d23a3bef0..2682c5f46 100644 --- a/core/vdbe/sorter.rs +++ b/core/vdbe/sorter.rs @@ -16,8 +16,13 @@ impl Sorter { } } pub fn is_empty(&self) -> bool { - self.current.is_none() + self.records.is_empty() } + + pub fn has_more(&self) -> bool { + self.current.is_some() + } + // We do the sorting here since this is what is called by the SorterSort instruction pub fn sort(&mut self) { self.records.sort_by(|a, b| { From 8b6e76149621eb541277576e5978a4f76e09ecb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=A0=EC=9A=B0?= Date: Sun, 26 Jan 2025 20:04:30 +0900 Subject: [PATCH 25/34] Add lint commands in Makefile --- bindings/java/Makefile | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/bindings/java/Makefile b/bindings/java/Makefile index 6d303c38a..4bcbee2c1 100644 --- a/bindings/java/Makefile +++ b/bindings/java/Makefile @@ -1,6 +1,12 @@ -.PHONY: test build_test +.PHONY: java_lint test build_test -test: build_test +lint: + ./gradlew spotlessCheck + +lint_apply: + ./gradlew spotlessApply + +test: lint build_test ./gradlew test --info build_test: From 2212cc2a09ae3b1eee7344ad90704838f049379e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=A0=EC=9A=B0?= Date: Sun, 26 Jan 2025 20:04:57 +0900 Subject: [PATCH 26/34] Apply lints --- .../java/org/github/tursodatabase/JDBC.java | 114 +- .../org/github/tursodatabase/LimboConfig.java | 77 +- .../github/tursodatabase/LimboDataSource.java | 133 +- .../github/tursodatabase/LimboErrorCode.java | 117 +- .../annotations/NativeInvocation.java | 7 +- .../tursodatabase/annotations/Nullable.java | 10 +- .../annotations/SkipNullableCheck.java | 11 +- .../annotations/VisibleForTesting.java | 7 +- .../github/tursodatabase/core/AbstractDB.java | 113 +- .../tursodatabase/core/LimboConnection.java | 229 +- .../github/tursodatabase/core/LimboDB.java | 174 +- .../tursodatabase/core/LimboDBFactory.java | 70 +- .../tursodatabase/core/LimboResultSet.java | 179 +- .../tursodatabase/core/LimboStatement.java | 116 +- .../tursodatabase/core/LimboStepResult.java | 135 +- .../github/tursodatabase/core/SqliteCode.java | 120 +- .../exceptions/LimboException.java | 19 +- .../tursodatabase/jdbc4/JDBC4Connection.java | 576 ++--- .../tursodatabase/jdbc4/JDBC4ResultSet.java | 2233 +++++++++-------- .../tursodatabase/jdbc4/JDBC4Statement.java | 696 ++--- .../tursodatabase/utils/ByteArrayUtils.java | 29 +- .../utils/LimboExceptionUtils.java | 56 +- .../github/tursodatabase/IntegrationTest.java | 47 +- .../org/github/tursodatabase/JDBCTest.java | 39 +- .../org/github/tursodatabase/TestUtils.java | 10 +- .../core/LimboDBFactoryTest.java | 45 +- .../tursodatabase/core/LimboDBTest.java | 73 +- .../jdbc4/JDBC4ConnectionTest.java | 95 +- .../jdbc4/JDBC4ResultSetTest.java | 82 +- .../jdbc4/JDBC4StatementTest.java | 67 +- 30 files changed, 2841 insertions(+), 2838 deletions(-) diff --git a/bindings/java/src/main/java/org/github/tursodatabase/JDBC.java b/bindings/java/src/main/java/org/github/tursodatabase/JDBC.java index bb46fb05e..63c6e57d7 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/JDBC.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/JDBC.java @@ -1,79 +1,79 @@ package org.github.tursodatabase; -import org.github.tursodatabase.annotations.Nullable; -import org.github.tursodatabase.annotations.SkipNullableCheck; -import org.github.tursodatabase.core.LimboConnection; -import org.github.tursodatabase.jdbc4.JDBC4Connection; - import java.sql.*; import java.util.Locale; import java.util.Properties; import java.util.logging.Logger; +import org.github.tursodatabase.annotations.Nullable; +import org.github.tursodatabase.annotations.SkipNullableCheck; +import org.github.tursodatabase.core.LimboConnection; +import org.github.tursodatabase.jdbc4.JDBC4Connection; public class JDBC implements Driver { - private static final String VALID_URL_PREFIX = "jdbc:sqlite:"; + private static final String VALID_URL_PREFIX = "jdbc:sqlite:"; - static { - try { - DriverManager.registerDriver(new JDBC()); - } catch (Exception e) { - // TODO: log - } + static { + try { + DriverManager.registerDriver(new JDBC()); + } catch (Exception e) { + // TODO: log } + } - @Nullable - public static LimboConnection createConnection(String url, Properties properties) throws SQLException { - if (!isValidURL(url)) return null; + @Nullable + public static LimboConnection createConnection(String url, Properties properties) + throws SQLException { + if (!isValidURL(url)) return null; - url = url.trim(); - return new JDBC4Connection(url, extractAddress(url), properties); - } + url = url.trim(); + return new JDBC4Connection(url, extractAddress(url), properties); + } - private static boolean isValidURL(String url) { - return url != null && url.toLowerCase(Locale.ROOT).startsWith(VALID_URL_PREFIX); - } + private static boolean isValidURL(String url) { + return url != null && url.toLowerCase(Locale.ROOT).startsWith(VALID_URL_PREFIX); + } - private static String extractAddress(String url) { - return url.substring(VALID_URL_PREFIX.length()); - } + private static String extractAddress(String url) { + return url.substring(VALID_URL_PREFIX.length()); + } - @Nullable - @Override - public Connection connect(String url, Properties info) throws SQLException { - return createConnection(url, info); - } + @Nullable + @Override + public Connection connect(String url, Properties info) throws SQLException { + return createConnection(url, info); + } - @Override - public boolean acceptsURL(String url) throws SQLException { - return isValidURL(url); - } + @Override + public boolean acceptsURL(String url) throws SQLException { + return isValidURL(url); + } - @Override - public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { - return LimboConfig.getDriverPropertyInfo(); - } + @Override + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { + return LimboConfig.getDriverPropertyInfo(); + } - @Override - public int getMajorVersion() { - // TODO - return 0; - } + @Override + public int getMajorVersion() { + // TODO + return 0; + } - @Override - public int getMinorVersion() { - // TODO - return 0; - } + @Override + public int getMinorVersion() { + // TODO + return 0; + } - @Override - public boolean jdbcCompliant() { - return false; - } + @Override + public boolean jdbcCompliant() { + return false; + } - @Override - @SkipNullableCheck - public Logger getParentLogger() throws SQLFeatureNotSupportedException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + // TODO + return null; + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/LimboConfig.java b/bindings/java/src/main/java/org/github/tursodatabase/LimboConfig.java index 7f2a2cdf0..6627bca1a 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/LimboConfig.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/LimboConfig.java @@ -4,48 +4,47 @@ import java.sql.DriverPropertyInfo; import java.util.Arrays; import java.util.Properties; -/** - * Limbo Configuration. - */ +/** Limbo Configuration. */ public class LimboConfig { - private final Properties pragma; + private final Properties pragma; - public LimboConfig(Properties properties) { - this.pragma = properties; + public LimboConfig(Properties properties) { + this.pragma = properties; + } + + public static DriverPropertyInfo[] getDriverPropertyInfo() { + return Arrays.stream(Pragma.values()) + .map( + p -> { + DriverPropertyInfo info = new DriverPropertyInfo(p.pragmaName, null); + info.description = p.description; + info.choices = p.choices; + info.required = false; + return info; + }) + .toArray(DriverPropertyInfo[]::new); + } + + public Properties toProperties() { + Properties copy = new Properties(); + copy.putAll(pragma); + return copy; + } + + public enum Pragma { + ; + private final String pragmaName; + private final String description; + private final String[] choices; + + Pragma(String pragmaName, String description, String[] choices) { + this.pragmaName = pragmaName; + this.description = description; + this.choices = choices; } - public static DriverPropertyInfo[] getDriverPropertyInfo() { - return Arrays.stream(Pragma.values()) - .map(p -> { - DriverPropertyInfo info = new DriverPropertyInfo(p.pragmaName, null); - info.description = p.description; - info.choices = p.choices; - info.required = false; - return info; - }) - .toArray(DriverPropertyInfo[]::new); - } - - public Properties toProperties() { - Properties copy = new Properties(); - copy.putAll(pragma); - return copy; - } - - public enum Pragma { - ; - private final String pragmaName; - private final String description; - private final String[] choices; - - Pragma(String pragmaName, String description, String[] choices) { - this.pragmaName = pragmaName; - this.description = description; - this.choices = choices; - } - - public String getPragmaName() { - return pragmaName; - } + public String getPragmaName() { + return pragmaName; } + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/LimboDataSource.java b/bindings/java/src/main/java/org/github/tursodatabase/LimboDataSource.java index ff98ec651..134ea30ff 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/LimboDataSource.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/LimboDataSource.java @@ -1,88 +1,87 @@ package org.github.tursodatabase; -import org.github.tursodatabase.annotations.Nullable; -import org.github.tursodatabase.annotations.SkipNullableCheck; - -import javax.sql.DataSource; import java.io.PrintWriter; import java.sql.Connection; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.util.Properties; import java.util.logging.Logger; +import javax.sql.DataSource; +import org.github.tursodatabase.annotations.Nullable; +import org.github.tursodatabase.annotations.SkipNullableCheck; -/** - * Provides {@link DataSource} API for configuring Limbo database connection. - */ +/** Provides {@link DataSource} API for configuring Limbo database connection. */ public class LimboDataSource implements DataSource { - private final LimboConfig limboConfig; - private final String url; + private final LimboConfig limboConfig; + private final String url; - /** - * Creates a datasource based on the provided configuration. - * - * @param limboConfig The configuration for the datasource. - */ - public LimboDataSource(LimboConfig limboConfig, String url) { - this.limboConfig = limboConfig; - this.url = url; - } + /** + * Creates a datasource based on the provided configuration. + * + * @param limboConfig The configuration for the datasource. + */ + public LimboDataSource(LimboConfig limboConfig, String url) { + this.limboConfig = limboConfig; + this.url = url; + } - @Override - @Nullable - public Connection getConnection() throws SQLException { - return getConnection(null, null); - } + @Override + @Nullable + public Connection getConnection() throws SQLException { + return getConnection(null, null); + } - @Override - @Nullable - public Connection getConnection(@Nullable String username, @Nullable String password) throws SQLException { - Properties properties = limboConfig.toProperties(); - if (username != null) properties.put("user", username); - if (password != null) properties.put("pass", password); - return JDBC.createConnection(url, properties); - } - @Override - @SkipNullableCheck - public PrintWriter getLogWriter() throws SQLException { - // TODO - return null; - } + @Override + @Nullable + public Connection getConnection(@Nullable String username, @Nullable String password) + throws SQLException { + Properties properties = limboConfig.toProperties(); + if (username != null) properties.put("user", username); + if (password != null) properties.put("pass", password); + return JDBC.createConnection(url, properties); + } - @Override - public void setLogWriter(PrintWriter out) throws SQLException { - // TODO - } + @Override + @SkipNullableCheck + public PrintWriter getLogWriter() throws SQLException { + // TODO + return null; + } - @Override - public void setLoginTimeout(int seconds) throws SQLException { - // TODO - } + @Override + public void setLogWriter(PrintWriter out) throws SQLException { + // TODO + } - @Override - public int getLoginTimeout() throws SQLException { - // TODO - return 0; - } + @Override + public void setLoginTimeout(int seconds) throws SQLException { + // TODO + } - @Override - @SkipNullableCheck - public Logger getParentLogger() throws SQLFeatureNotSupportedException { - // TODO - return null; - } + @Override + public int getLoginTimeout() throws SQLException { + // TODO + return 0; + } - @Override - @SkipNullableCheck - public T unwrap(Class iface) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + // TODO + return null; + } - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - // TODO - return false; - } + @Override + @SkipNullableCheck + public T unwrap(Class iface) throws SQLException { + // TODO + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + // TODO + return false; + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/LimboErrorCode.java b/bindings/java/src/main/java/org/github/tursodatabase/LimboErrorCode.java index d2450d266..9c9f7f5fe 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/LimboErrorCode.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/LimboErrorCode.java @@ -2,72 +2,67 @@ package org.github.tursodatabase; import org.github.tursodatabase.core.SqliteCode; -/** - * Limbo error code. Superset of SQLite3 error code. - */ +/** Limbo error code. Superset of SQLite3 error code. */ public enum LimboErrorCode { - SQLITE_OK(SqliteCode.SQLITE_OK, "Successful result"), - SQLITE_ERROR(SqliteCode.SQLITE_ERROR, "SQL error or missing database"), - SQLITE_INTERNAL(SqliteCode.SQLITE_INTERNAL, "An internal logic error in SQLite"), - SQLITE_PERM(SqliteCode.SQLITE_PERM, "Access permission denied"), - SQLITE_ABORT(SqliteCode.SQLITE_ABORT, "Callback routine requested an abort"), - SQLITE_BUSY(SqliteCode.SQLITE_BUSY, "The database file is locked"), - SQLITE_LOCKED(SqliteCode.SQLITE_LOCKED, "A table in the database is locked"), - SQLITE_NOMEM(SqliteCode.SQLITE_NOMEM, "A malloc() failed"), - SQLITE_READONLY(SqliteCode.SQLITE_READONLY, "Attempt to write a readonly database"), - SQLITE_INTERRUPT(SqliteCode.SQLITE_INTERRUPT, "Operation terminated by sqlite_interrupt()"), - SQLITE_IOERR(SqliteCode.SQLITE_IOERR, "Some kind of disk I/O error occurred"), - SQLITE_CORRUPT(SqliteCode.SQLITE_CORRUPT, "The database disk image is malformed"), - SQLITE_NOTFOUND(SqliteCode.SQLITE_NOTFOUND, "(Internal Only) Table or record not found"), - SQLITE_FULL(SqliteCode.SQLITE_FULL, "Insertion failed because database is full"), - SQLITE_CANTOPEN(SqliteCode.SQLITE_CANTOPEN, "Unable to open the database file"), - SQLITE_PROTOCOL(SqliteCode.SQLITE_PROTOCOL, "Database lock protocol error"), - SQLITE_EMPTY(SqliteCode.SQLITE_EMPTY, "(Internal Only) Database table is empty"), - SQLITE_SCHEMA(SqliteCode.SQLITE_SCHEMA, "The database schema changed"), - SQLITE_TOOBIG(SqliteCode.SQLITE_TOOBIG, "Too much data for one row of a table"), - SQLITE_CONSTRAINT(SqliteCode.SQLITE_CONSTRAINT, "Abort due to constraint violation"), - SQLITE_MISMATCH(SqliteCode.SQLITE_MISMATCH, "Data type mismatch"), - SQLITE_MISUSE(SqliteCode.SQLITE_MISUSE, "Library used incorrectly"), - SQLITE_NOLFS(SqliteCode.SQLITE_NOLFS, "Uses OS features not supported on host"), - SQLITE_AUTH(SqliteCode.SQLITE_AUTH, "Authorization denied"), - SQLITE_ROW(SqliteCode.SQLITE_ROW, "sqlite_step() has another row ready"), - SQLITE_DONE(SqliteCode.SQLITE_DONE, "sqlite_step() has finished executing"), - SQLITE_INTEGER(SqliteCode.SQLITE_INTEGER, "Integer type"), - SQLITE_FLOAT(SqliteCode.SQLITE_FLOAT, "Float type"), - SQLITE_TEXT(SqliteCode.SQLITE_TEXT, "Text type"), - SQLITE_BLOB(SqliteCode.SQLITE_BLOB, "Blob type"), - SQLITE_NULL(SqliteCode.SQLITE_NULL, "Null type"), + SQLITE_OK(SqliteCode.SQLITE_OK, "Successful result"), + SQLITE_ERROR(SqliteCode.SQLITE_ERROR, "SQL error or missing database"), + SQLITE_INTERNAL(SqliteCode.SQLITE_INTERNAL, "An internal logic error in SQLite"), + SQLITE_PERM(SqliteCode.SQLITE_PERM, "Access permission denied"), + SQLITE_ABORT(SqliteCode.SQLITE_ABORT, "Callback routine requested an abort"), + SQLITE_BUSY(SqliteCode.SQLITE_BUSY, "The database file is locked"), + SQLITE_LOCKED(SqliteCode.SQLITE_LOCKED, "A table in the database is locked"), + SQLITE_NOMEM(SqliteCode.SQLITE_NOMEM, "A malloc() failed"), + SQLITE_READONLY(SqliteCode.SQLITE_READONLY, "Attempt to write a readonly database"), + SQLITE_INTERRUPT(SqliteCode.SQLITE_INTERRUPT, "Operation terminated by sqlite_interrupt()"), + SQLITE_IOERR(SqliteCode.SQLITE_IOERR, "Some kind of disk I/O error occurred"), + SQLITE_CORRUPT(SqliteCode.SQLITE_CORRUPT, "The database disk image is malformed"), + SQLITE_NOTFOUND(SqliteCode.SQLITE_NOTFOUND, "(Internal Only) Table or record not found"), + SQLITE_FULL(SqliteCode.SQLITE_FULL, "Insertion failed because database is full"), + SQLITE_CANTOPEN(SqliteCode.SQLITE_CANTOPEN, "Unable to open the database file"), + SQLITE_PROTOCOL(SqliteCode.SQLITE_PROTOCOL, "Database lock protocol error"), + SQLITE_EMPTY(SqliteCode.SQLITE_EMPTY, "(Internal Only) Database table is empty"), + SQLITE_SCHEMA(SqliteCode.SQLITE_SCHEMA, "The database schema changed"), + SQLITE_TOOBIG(SqliteCode.SQLITE_TOOBIG, "Too much data for one row of a table"), + SQLITE_CONSTRAINT(SqliteCode.SQLITE_CONSTRAINT, "Abort due to constraint violation"), + SQLITE_MISMATCH(SqliteCode.SQLITE_MISMATCH, "Data type mismatch"), + SQLITE_MISUSE(SqliteCode.SQLITE_MISUSE, "Library used incorrectly"), + SQLITE_NOLFS(SqliteCode.SQLITE_NOLFS, "Uses OS features not supported on host"), + SQLITE_AUTH(SqliteCode.SQLITE_AUTH, "Authorization denied"), + SQLITE_ROW(SqliteCode.SQLITE_ROW, "sqlite_step() has another row ready"), + SQLITE_DONE(SqliteCode.SQLITE_DONE, "sqlite_step() has finished executing"), + SQLITE_INTEGER(SqliteCode.SQLITE_INTEGER, "Integer type"), + SQLITE_FLOAT(SqliteCode.SQLITE_FLOAT, "Float type"), + SQLITE_TEXT(SqliteCode.SQLITE_TEXT, "Text type"), + SQLITE_BLOB(SqliteCode.SQLITE_BLOB, "Blob type"), + SQLITE_NULL(SqliteCode.SQLITE_NULL, "Null type"), - UNKNOWN_ERROR(-1, "Unknown error"), - LIMBO_FAILED_TO_PARSE_BYTE_ARRAY(1100, "Failed to parse ut8 byte array"), - LIMBO_FAILED_TO_PREPARE_STATEMENT(1200, "Failed to prepare statement"), - LIMBO_ETC(9999, "Unclassified error"); + UNKNOWN_ERROR(-1, "Unknown error"), + LIMBO_FAILED_TO_PARSE_BYTE_ARRAY(1100, "Failed to parse ut8 byte array"), + LIMBO_FAILED_TO_PREPARE_STATEMENT(1200, "Failed to prepare statement"), + LIMBO_ETC(9999, "Unclassified error"); - public final int code; - public final String message; + public final int code; + public final String message; - /** - * @param code Error code - * @param message Message for the error. - */ - LimboErrorCode(int code, String message) { - this.code = code; - this.message = message; + /** + * @param code Error code + * @param message Message for the error. + */ + LimboErrorCode(int code, String message) { + this.code = code; + this.message = message; + } + + public static LimboErrorCode getErrorCode(int errorCode) { + for (LimboErrorCode limboErrorCode : LimboErrorCode.values()) { + if (errorCode == limboErrorCode.code) return limboErrorCode; } - public static LimboErrorCode getErrorCode(int errorCode) { - for (LimboErrorCode limboErrorCode: LimboErrorCode.values()) { - if (errorCode == limboErrorCode.code) return limboErrorCode; - } + return UNKNOWN_ERROR; + } - return UNKNOWN_ERROR; - } - - @Override - public String toString() { - return "LimboErrorCode{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } + @Override + public String toString() { + return "LimboErrorCode{" + "code=" + code + ", message='" + message + '\'' + '}'; + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/annotations/NativeInvocation.java b/bindings/java/src/main/java/org/github/tursodatabase/annotations/NativeInvocation.java index 8f57c1bee..2c9c355a9 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/annotations/NativeInvocation.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/annotations/NativeInvocation.java @@ -1,17 +1,16 @@ package org.github.tursodatabase.annotations; - import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** - * Annotation to mark methods that are called by native functions. - * For example, throwing exceptions or creating java objects. + * Annotation to mark methods that are called by native functions. For example, throwing exceptions + * or creating java objects. */ @Retention(RetentionPolicy.SOURCE) @Target({ElementType.METHOD, ElementType.CONSTRUCTOR}) public @interface NativeInvocation { - String invokedFrom() default ""; + String invokedFrom() default ""; } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/annotations/Nullable.java b/bindings/java/src/main/java/org/github/tursodatabase/annotations/Nullable.java index 88451f8b4..33e0d9dac 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/annotations/Nullable.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/annotations/Nullable.java @@ -1,6 +1,5 @@ package org.github.tursodatabase.annotations; - import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -8,11 +7,10 @@ import java.lang.annotation.Target; /** * Annotation to mark nullable types. - *

- * This annotation is used to indicate that a method, field, or parameter can be null. - * It helps in identifying potential nullability issues and improving code quality. + * + *

This annotation is used to indicate that a method, field, or parameter can be null. It helps + * in identifying potential nullability issues and improving code quality. */ @Retention(RetentionPolicy.SOURCE) @Target({ElementType.METHOD, ElementType.FIELD, ElementType.PARAMETER}) -public @interface Nullable { -} +public @interface Nullable {} diff --git a/bindings/java/src/main/java/org/github/tursodatabase/annotations/SkipNullableCheck.java b/bindings/java/src/main/java/org/github/tursodatabase/annotations/SkipNullableCheck.java index 69214e7c4..f34d2f7fd 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/annotations/SkipNullableCheck.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/annotations/SkipNullableCheck.java @@ -1,6 +1,5 @@ package org.github.tursodatabase.annotations; - import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -8,11 +7,11 @@ import java.lang.annotation.Target; /** * Marker annotation to skip nullable checks. - *

- * This annotation is used to mark methods, fields, or parameters that should be excluded from nullable checks. - * It is typically applied to code that is still under development or requires special handling. + * + *

This annotation is used to mark methods, fields, or parameters that should be excluded from + * nullable checks. It is typically applied to code that is still under development or requires + * special handling. */ @Retention(RetentionPolicy.SOURCE) @Target({ElementType.METHOD, ElementType.FIELD, ElementType.PARAMETER}) -public @interface SkipNullableCheck { -} +public @interface SkipNullableCheck {} diff --git a/bindings/java/src/main/java/org/github/tursodatabase/annotations/VisibleForTesting.java b/bindings/java/src/main/java/org/github/tursodatabase/annotations/VisibleForTesting.java index 5f8d30458..79d04392c 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/annotations/VisibleForTesting.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/annotations/VisibleForTesting.java @@ -5,10 +5,7 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; -/** - * Annotation to mark methods that use larger visibility for testing purposes. - */ +/** Annotation to mark methods that use larger visibility for testing purposes. */ @Retention(RetentionPolicy.SOURCE) @Target(ElementType.METHOD) -public @interface VisibleForTesting { -} +public @interface VisibleForTesting {} diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/AbstractDB.java b/bindings/java/src/main/java/org/github/tursodatabase/core/AbstractDB.java index f655e6dcf..4906acd9c 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/AbstractDB.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/AbstractDB.java @@ -5,74 +5,71 @@ import java.sql.SQLFeatureNotSupportedException; import java.util.concurrent.atomic.AtomicBoolean; /** - * Interface to Limbo. It provides some helper functions - * used by other parts of the driver. The goal of the helper functions here - * are not only to provide functionality, but to handle contractual + * Interface to Limbo. It provides some helper functions used by other parts of the driver. The goal + * of the helper functions here are not only to provide functionality, but to handle contractual * differences between the JDBC specification and the Limbo API. */ public abstract class AbstractDB { - protected final String url; - protected final String filePath; - private final AtomicBoolean closed = new AtomicBoolean(true); + protected final String url; + protected final String filePath; + private final AtomicBoolean closed = new AtomicBoolean(true); - public AbstractDB(String url, String filePath) { - this.url = url; - this.filePath = filePath; - } + public AbstractDB(String url, String filePath) { + this.url = url; + this.filePath = filePath; + } - public boolean isClosed() { - return closed.get(); - } + public boolean isClosed() { + return closed.get(); + } - /** - * Aborts any pending operation and returns at its earliest opportunity. - */ - public abstract void interrupt() throws SQLException; + /** Aborts any pending operation and returns at its earliest opportunity. */ + public abstract void interrupt() throws SQLException; - /** - * Creates an SQLite interface to a database for the given connection. - * - * @param openFlags Flags for opening the database. - * @throws SQLException if a database access error occurs. - */ - public final synchronized void open(int openFlags) throws SQLException { - open0(filePath, openFlags); - } + /** + * Creates an SQLite interface to a database for the given connection. + * + * @param openFlags Flags for opening the database. + * @throws SQLException if a database access error occurs. + */ + public final synchronized void open(int openFlags) throws SQLException { + open0(filePath, openFlags); + } - protected abstract void open0(String fileName, int openFlags) throws SQLException; + protected abstract void open0(String fileName, int openFlags) throws SQLException; - /** - * Closes a database connection and finalizes any remaining statements before the closing - * operation. - * - * @throws SQLException if a database access error occurs. - */ - public final synchronized void close() throws SQLException { - // TODO: add implementation - throw new SQLFeatureNotSupportedException(); - } + /** + * Closes a database connection and finalizes any remaining statements before the closing + * operation. + * + * @throws SQLException if a database access error occurs. + */ + public final synchronized void close() throws SQLException { + // TODO: add implementation + throw new SQLFeatureNotSupportedException(); + } - /** - * Connects to a database. - * - * @return Pointer to the connection. - */ - public abstract long connect() throws SQLException; + /** + * Connects to a database. + * + * @return Pointer to the connection. + */ + public abstract long connect() throws SQLException; - /** - * Creates an SQLite interface to a database with the provided open flags. - * - * @param fileName The database to open. - * @param openFlags Flags for opening the database. - * @return pointer to database instance - * @throws SQLException if a database access error occurs. - */ - protected abstract long openUtf8(byte[] fileName, int openFlags) throws SQLException; + /** + * Creates an SQLite interface to a database with the provided open flags. + * + * @param fileName The database to open. + * @param openFlags Flags for opening the database. + * @return pointer to database instance + * @throws SQLException if a database access error occurs. + */ + protected abstract long openUtf8(byte[] fileName, int openFlags) throws SQLException; - /** - * Closes the SQLite interface to a database. - * - * @throws SQLException if a database access error occurs. - */ - protected abstract void close0() throws SQLException; + /** + * Closes the SQLite interface to a database. + * + * @throws SQLException if a database access error occurs. + */ + protected abstract void close0() throws SQLException; } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboConnection.java b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboConnection.java index 8c77424b1..cd200f74f 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboConnection.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboConnection.java @@ -1,140 +1,139 @@ package org.github.tursodatabase.core; -import org.github.tursodatabase.annotations.NativeInvocation; -import org.github.tursodatabase.utils.LimboExceptionUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static org.github.tursodatabase.utils.ByteArrayUtils.stringToUtf8ByteArray; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Properties; - -import static org.github.tursodatabase.utils.ByteArrayUtils.stringToUtf8ByteArray; +import org.github.tursodatabase.annotations.NativeInvocation; +import org.github.tursodatabase.utils.LimboExceptionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public abstract class LimboConnection implements Connection { - private static final Logger logger = LoggerFactory.getLogger(LimboConnection.class); + private static final Logger logger = LoggerFactory.getLogger(LimboConnection.class); - private final long connectionPtr; - private final AbstractDB database; + private final long connectionPtr; + private final AbstractDB database; - public LimboConnection(String url, String filePath) throws SQLException { - this(url, filePath, new Properties()); - } + public LimboConnection(String url, String filePath) throws SQLException { + this(url, filePath, new Properties()); + } - /** - * Creates a connection to limbo database - * - * @param url e.g. "jdbc:sqlite:fileName" - * @param filePath path to file - */ - public LimboConnection(String url, String filePath, Properties properties) throws SQLException { - AbstractDB db = null; + /** + * Creates a connection to limbo database + * + * @param url e.g. "jdbc:sqlite:fileName" + * @param filePath path to file + */ + public LimboConnection(String url, String filePath, Properties properties) throws SQLException { + AbstractDB db = null; - try { - db = open(url, filePath, properties); - } catch (Throwable t) { - try { - if (db != null) { - db.close(); - } - } catch (Throwable t2) { - t.addSuppressed(t2); - } - - throw t; + try { + db = open(url, filePath, properties); + } catch (Throwable t) { + try { + if (db != null) { + db.close(); } + } catch (Throwable t2) { + t.addSuppressed(t2); + } - this.database = db; - this.connectionPtr = db.connect(); + throw t; } - private static AbstractDB open(String url, String filePath, Properties properties) throws SQLException { - return LimboDBFactory.open(url, filePath, properties); + this.database = db; + this.connectionPtr = db.connect(); + } + + private static AbstractDB open(String url, String filePath, Properties properties) + throws SQLException { + return LimboDBFactory.open(url, filePath, properties); + } + + protected void checkOpen() throws SQLException { + if (isClosed()) throw new SQLException("database connection closed"); + } + + @Override + public void close() throws SQLException { + if (isClosed()) return; + database.close(); + } + + @Override + public boolean isClosed() throws SQLException { + return database.isClosed(); + } + + public AbstractDB getDatabase() { + return database; + } + + /** + * Compiles an SQL statement. + * + * @param sql An SQL statement. + * @return Pointer to statement. + * @throws SQLException if a database access error occurs. + */ + public LimboStatement prepare(String sql) throws SQLException { + logger.trace("DriverManager [{}] [SQLite EXEC] {}", Thread.currentThread().getName(), sql); + byte[] sqlBytes = stringToUtf8ByteArray(sql); + if (sqlBytes == null) { + throw new SQLException("Failed to convert " + sql + " into bytes"); } + return new LimboStatement(sql, prepareUtf8(connectionPtr, sqlBytes)); + } - protected void checkOpen() throws SQLException { - if (isClosed()) throw new SQLException("database connection closed"); - } + private native long prepareUtf8(long connectionPtr, byte[] sqlUtf8) throws SQLException; - @Override - public void close() throws SQLException { - if (isClosed()) return; - database.close(); - } + /** @return busy timeout in milliseconds. */ + public int getBusyTimeout() { + // TODO: add support for busyTimeout + return 0; + } - @Override - public boolean isClosed() throws SQLException { - return database.isClosed(); - } + // TODO: check whether this is still valid for limbo - public AbstractDB getDatabase() { - return database; - } + /** + * Checks whether the type, concurrency, and holdability settings for a {@link ResultSet} are + * supported by the SQLite interface. Supported settings are: + * + *

    + *
  • type: {@link ResultSet#TYPE_FORWARD_ONLY} + *
  • concurrency: {@link ResultSet#CONCUR_READ_ONLY}) + *
  • holdability: {@link ResultSet#CLOSE_CURSORS_AT_COMMIT} + *
+ * + * @param resultSetType the type setting. + * @param resultSetConcurrency the concurrency setting. + * @param resultSetHoldability the holdability setting. + */ + protected void checkCursor(int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) + throw new SQLException("SQLite only supports TYPE_FORWARD_ONLY cursors"); + if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) + throw new SQLException("SQLite only supports CONCUR_READ_ONLY cursors"); + if (resultSetHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) + throw new SQLException("SQLite only supports closing cursors at commit"); + } - /** - * Compiles an SQL statement. - * - * @param sql An SQL statement. - * @return Pointer to statement. - * @throws SQLException if a database access error occurs. - */ - public LimboStatement prepare(String sql) throws SQLException { - logger.trace("DriverManager [{}] [SQLite EXEC] {}", Thread.currentThread().getName(), sql); - byte[] sqlBytes = stringToUtf8ByteArray(sql); - if (sqlBytes == null) { - throw new SQLException("Failed to convert " + sql + " into bytes"); - } - return new LimboStatement(sql, prepareUtf8(connectionPtr, sqlBytes)); - } + public void setBusyTimeout(int busyTimeout) { + // TODO: add support for busy timeout + } - private native long prepareUtf8(long connectionPtr, byte[] sqlUtf8) throws SQLException; - - /** - * @return busy timeout in milliseconds. - */ - public int getBusyTimeout() { - // TODO: add support for busyTimeout - return 0; - } - - // TODO: check whether this is still valid for limbo - - /** - * Checks whether the type, concurrency, and holdability settings for a {@link ResultSet} are - * supported by the SQLite interface. Supported settings are: - * - *
    - *
  • type: {@link ResultSet#TYPE_FORWARD_ONLY} - *
  • concurrency: {@link ResultSet#CONCUR_READ_ONLY}) - *
  • holdability: {@link ResultSet#CLOSE_CURSORS_AT_COMMIT} - *
- * - * @param resultSetType the type setting. - * @param resultSetConcurrency the concurrency setting. - * @param resultSetHoldability the holdability setting. - */ - protected void checkCursor(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) - throw new SQLException("SQLite only supports TYPE_FORWARD_ONLY cursors"); - if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) - throw new SQLException("SQLite only supports CONCUR_READ_ONLY cursors"); - if (resultSetHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) - throw new SQLException("SQLite only supports closing cursors at commit"); - } - - public void setBusyTimeout(int busyTimeout) { - // TODO: add support for busy timeout - } - - /** - * Throws formatted SQLException with error code and message. - * - * @param errorCode Error code. - * @param errorMessageBytes Error message. - */ - @NativeInvocation(invokedFrom = "limbo_connection.rs") - private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException { - LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes); - } + /** + * Throws formatted SQLException with error code and message. + * + * @param errorCode Error code. + * @param errorMessageBytes Error message. + */ + @NativeInvocation(invokedFrom = "limbo_connection.rs") + private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException { + LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes); + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDB.java b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDB.java index ad6ee68a0..c32eaadf9 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDB.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDB.java @@ -4,7 +4,6 @@ import static org.github.tursodatabase.utils.ByteArrayUtils.stringToUtf8ByteArra import java.sql.SQLException; import java.util.concurrent.locks.ReentrantLock; - import org.github.tursodatabase.LimboErrorCode; import org.github.tursodatabase.annotations.NativeInvocation; import org.github.tursodatabase.annotations.VisibleForTesting; @@ -12,105 +11,104 @@ import org.github.tursodatabase.utils.LimboExceptionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * This class provides a thin JNI layer over the SQLite3 C API. - */ +/** This class provides a thin JNI layer over the SQLite3 C API. */ public final class LimboDB extends AbstractDB { - private static final Logger logger = LoggerFactory.getLogger(LimboDB.class); - // Pointer to database instance - private long dbPointer; - private boolean isOpen; + private static final Logger logger = LoggerFactory.getLogger(LimboDB.class); + // Pointer to database instance + private long dbPointer; + private boolean isOpen; - private static boolean isLoaded; - private ReentrantLock dbLock = new ReentrantLock(); + private static boolean isLoaded; + private ReentrantLock dbLock = new ReentrantLock(); - static { - if ("The Android Project".equals(System.getProperty("java.vm.vendor"))) { - // TODO - } else { - // continue with non Android execution path - isLoaded = false; - } + static { + if ("The Android Project".equals(System.getProperty("java.vm.vendor"))) { + // TODO + } else { + // continue with non Android execution path + isLoaded = false; + } + } + + /** Loads the SQLite interface backend. */ + public static void load() { + if (isLoaded) { + return; } - /** - * Loads the SQLite interface backend. - */ - public static void load() { - if (isLoaded) {return;} + try { + System.loadLibrary("_limbo_java"); + } finally { + isLoaded = true; + } + } - try { - System.loadLibrary("_limbo_java"); - } finally { - isLoaded = true; - } + /** + * @param url e.g. "jdbc:sqlite:fileName + * @param filePath e.g. path to file + */ + public static LimboDB create(String url, String filePath) throws SQLException { + return new LimboDB(url, filePath); + } + + // TODO: receive config as argument + private LimboDB(String url, String filePath) { + super(url, filePath); + } + + // WRAPPER FUNCTIONS //////////////////////////////////////////// + + // TODO: add support for JNI + @Override + protected native long openUtf8(byte[] file, int openFlags) throws SQLException; + + // TODO: add support for JNI + @Override + protected native void close0() throws SQLException; + + // TODO: add support for JNI + native int execUtf8(byte[] sqlUtf8) throws SQLException; + + // TODO: add support for JNI + @Override + public native void interrupt(); + + @Override + protected void open0(String filePath, int openFlags) throws SQLException { + if (isOpen) { + throw LimboExceptionUtils.buildLimboException( + LimboErrorCode.LIMBO_ETC.code, "Already opened"); } - /** - * @param url e.g. "jdbc:sqlite:fileName - * @param filePath e.g. path to file - */ - public static LimboDB create(String url, String filePath) throws SQLException { - return new LimboDB(url, filePath); + byte[] filePathBytes = stringToUtf8ByteArray(filePath); + if (filePathBytes == null) { + throw LimboExceptionUtils.buildLimboException( + LimboErrorCode.LIMBO_ETC.code, + "File path cannot be converted to byteArray. File name: " + filePath); } - // TODO: receive config as argument - private LimboDB(String url, String filePath) { - super(url, filePath); - } + dbPointer = openUtf8(filePathBytes, openFlags); + isOpen = true; + } - // WRAPPER FUNCTIONS //////////////////////////////////////////// + @Override + public long connect() throws SQLException { + return connect0(dbPointer); + } - // TODO: add support for JNI - @Override - protected native long openUtf8(byte[] file, int openFlags) throws SQLException; + private native long connect0(long databasePtr) throws SQLException; - // TODO: add support for JNI - @Override - protected native void close0() throws SQLException; + @VisibleForTesting + native void throwJavaException(int errorCode) throws SQLException; - // TODO: add support for JNI - native int execUtf8(byte[] sqlUtf8) throws SQLException; - - // TODO: add support for JNI - @Override - public native void interrupt(); - - @Override - protected void open0(String filePath, int openFlags) throws SQLException { - if (isOpen) { - throw LimboExceptionUtils.buildLimboException(LimboErrorCode.LIMBO_ETC.code, "Already opened"); - } - - byte[] filePathBytes = stringToUtf8ByteArray(filePath); - if (filePathBytes == null) { - throw LimboExceptionUtils.buildLimboException( - LimboErrorCode.LIMBO_ETC.code, - "File path cannot be converted to byteArray. File name: " + filePath); - } - - dbPointer = openUtf8(filePathBytes, openFlags); - isOpen = true; - } - - @Override - public long connect() throws SQLException { - return connect0(dbPointer); - } - - private native long connect0(long databasePtr) throws SQLException; - - @VisibleForTesting - native void throwJavaException(int errorCode) throws SQLException; - - /** - * Throws formatted SQLException with error code and message. - * - * @param errorCode Error code. - * @param errorMessageBytes Error message. - */ - @NativeInvocation(invokedFrom = "limbo_db.rs") - private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException { - LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes); - } + /** + * Throws formatted SQLException with error code and message. + * + * @param errorCode Error code. + * @param errorMessageBytes Error message. + */ + @NativeInvocation(invokedFrom = "limbo_db.rs") + private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException { + LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes); + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDBFactory.java b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDBFactory.java index f7a81fd04..e1319c29e 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDBFactory.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboDBFactory.java @@ -5,43 +5,45 @@ import java.util.Properties; import java.util.concurrent.ConcurrentHashMap; /** - * Factory class for managing and creating instances of {@link LimboDB}. - * This class ensures that multiple instances of {@link LimboDB} with the same URL are not created. + * Factory class for managing and creating instances of {@link LimboDB}. This class ensures that + * multiple instances of {@link LimboDB} with the same URL are not created. */ public class LimboDBFactory { - private static final ConcurrentHashMap databaseHolder = new ConcurrentHashMap<>(); + private static final ConcurrentHashMap databaseHolder = + new ConcurrentHashMap<>(); - /** - * If a database with the same URL already exists, it returns the existing instance. - * Otherwise, it creates a new instance and stores it in the database holder. - * - * @param url the URL of the database - * @param filePath the path to the database file - * @param properties additional properties for the database connection - * @return an instance of {@link LimboDB} - * @throws SQLException if there is an error opening the connection - * @throws IllegalArgumentException if the fileName is empty - */ - public static LimboDB open(String url, String filePath, Properties properties) throws SQLException { - if (databaseHolder.containsKey(url)) { - return databaseHolder.get(url); - } - - if (filePath.isEmpty()) { - throw new IllegalArgumentException("filePath should not be empty"); - } - - final LimboDB database; - try { - LimboDB.load(); - database = LimboDB.create(url, filePath); - } catch (Exception e) { - throw new SQLException("Error opening connection", e); - } - - database.open(0); - databaseHolder.put(url, database); - return database; + /** + * If a database with the same URL already exists, it returns the existing instance. Otherwise, it + * creates a new instance and stores it in the database holder. + * + * @param url the URL of the database + * @param filePath the path to the database file + * @param properties additional properties for the database connection + * @return an instance of {@link LimboDB} + * @throws SQLException if there is an error opening the connection + * @throws IllegalArgumentException if the fileName is empty + */ + public static LimboDB open(String url, String filePath, Properties properties) + throws SQLException { + if (databaseHolder.containsKey(url)) { + return databaseHolder.get(url); } + + if (filePath.isEmpty()) { + throw new IllegalArgumentException("filePath should not be empty"); + } + + final LimboDB database; + try { + LimboDB.load(); + database = LimboDB.create(url, filePath); + } catch (Exception e) { + throw new SQLException("Error opening connection", e); + } + + database.open(0); + databaseHolder.put(url, database); + return database; + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboResultSet.java b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboResultSet.java index 882d2b78b..c6cb8d00e 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboResultSet.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboResultSet.java @@ -1,116 +1,119 @@ package org.github.tursodatabase.core; import java.sql.SQLException; - import org.github.tursodatabase.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A table of data representing limbo database result set, which is generated by executing a statement that queries the - * database. - *

- * A {@link LimboResultSet} object is automatically closed when the {@link LimboStatement} object that generated it is - * closed or re-executed. + * A table of data representing limbo database result set, which is generated by executing a + * statement that queries the database. + * + *

A {@link LimboResultSet} object is automatically closed when the {@link LimboStatement} object + * that generated it is closed or re-executed. */ public class LimboResultSet { - private static final Logger log = LoggerFactory.getLogger(LimboResultSet.class); + private static final Logger log = LoggerFactory.getLogger(LimboResultSet.class); - private final LimboStatement statement; + private final LimboStatement statement; - // Whether the result set does not have any rows. - private boolean isEmptyResultSet = false; - // If the result set is open. Doesn't mean it has results. - private boolean open; - // Maximum number of rows as set by the statement - private long maxRows; - // number of current row, starts at 1 (0 is used to represent loading data) - private int row = 0; - private boolean pastLastRow = false; + // Whether the result set does not have any rows. + private boolean isEmptyResultSet = false; + // If the result set is open. Doesn't mean it has results. + private boolean open; + // Maximum number of rows as set by the statement + private long maxRows; + // number of current row, starts at 1 (0 is used to represent loading data) + private int row = 0; + private boolean pastLastRow = false; - @Nullable - private LimboStepResult lastStepResult; + @Nullable private LimboStepResult lastStepResult; - public static LimboResultSet of(LimboStatement statement) { - return new LimboResultSet(statement); + public static LimboResultSet of(LimboStatement statement) { + return new LimboResultSet(statement); + } + + private LimboResultSet(LimboStatement statement) { + this.open = true; + this.statement = statement; + } + + /** + * Moves the cursor forward one row from its current position. A {@link LimboResultSet} cursor is + * initially positioned before the first fow; the first call to the method next makes + * the first row the current row; the second call makes the second row the current row, and so on. + * When a call to the next method returns false, the cursor is + * positioned after the last row. + * + *

Note that limbo only supports ResultSet.TYPE_FORWARD_ONLY, which means that the + * cursor can only move forward. + */ + public boolean next() throws SQLException { + if (!open || isEmptyResultSet || pastLastRow) { + return false; // completed ResultSet } - private LimboResultSet(LimboStatement statement) { - this.open = true; - this.statement = statement; + if (maxRows != 0 && row == maxRows) { + return false; } - /** - * Moves the cursor forward one row from its current position. A {@link LimboResultSet} cursor is initially positioned - * before the first fow; the first call to the method next makes the first row the current row; the second call - * makes the second row the current row, and so on. - * When a call to the next method returns false, the cursor is positioned after the last row. - *

- * Note that limbo only supports ResultSet.TYPE_FORWARD_ONLY, which means that the cursor can only move forward. - */ - public boolean next() throws SQLException { - if (!open || isEmptyResultSet || pastLastRow) { - return false; // completed ResultSet - } - - if (maxRows != 0 && row == maxRows) { - return false; - } - - lastStepResult = this.statement.step(); - log.debug("lastStepResult: {}", lastStepResult); - if (lastStepResult.isRow()) { - row++; - } - - if (lastStepResult.isInInvalidState()) { - open = false; - throw new SQLException("step() returned invalid result: " + lastStepResult); - } - - pastLastRow = lastStepResult.isDone(); - if (pastLastRow) { - open = false; - } - return !pastLastRow; + lastStepResult = this.statement.step(); + log.debug("lastStepResult: {}", lastStepResult); + if (lastStepResult.isRow()) { + row++; } - /** - * Checks whether the last step result has returned row result. - */ - public boolean hasLastStepReturnedRow() { - return lastStepResult != null && lastStepResult.isRow(); + if (lastStepResult.isInInvalidState()) { + open = false; + throw new SQLException("step() returned invalid result: " + lastStepResult); } - /** - * Checks the status of the result set. - * - * @return true if it's ready to iterate over the result set; false otherwise. - */ - public boolean isOpen() { - return open; + pastLastRow = lastStepResult.isDone(); + if (pastLastRow) { + open = false; } + return !pastLastRow; + } - /** - * @throws SQLException if not {@link #open} - */ - public void checkOpen() throws SQLException { - if (!open) { - throw new SQLException("ResultSet closed"); - } - } + /** Checks whether the last step result has returned row result. */ + public boolean hasLastStepReturnedRow() { + return lastStepResult != null && lastStepResult.isRow(); + } - @Override - public String toString() { - return "LimboResultSet{" + - "statement=" + statement + - ", isEmptyResultSet=" + isEmptyResultSet + - ", open=" + open + - ", maxRows=" + maxRows + - ", row=" + row + - ", pastLastRow=" + pastLastRow + - ", lastResult=" + lastStepResult + - '}'; + /** + * Checks the status of the result set. + * + * @return true if it's ready to iterate over the result set; false otherwise. + */ + public boolean isOpen() { + return open; + } + + /** @throws SQLException if not {@link #open} */ + public void checkOpen() throws SQLException { + if (!open) { + throw new SQLException("ResultSet closed"); } + } + + @Override + public String toString() { + return "LimboResultSet{" + + "statement=" + + statement + + ", isEmptyResultSet=" + + isEmptyResultSet + + ", open=" + + open + + ", maxRows=" + + maxRows + + ", row=" + + row + + ", pastLastRow=" + + pastLastRow + + ", lastResult=" + + lastStepResult + + '}'; + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStatement.java b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStatement.java index 747c68c2e..c749e27cc 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStatement.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStatement.java @@ -1,7 +1,6 @@ package org.github.tursodatabase.core; import java.sql.SQLException; - import org.github.tursodatabase.annotations.NativeInvocation; import org.github.tursodatabase.annotations.Nullable; import org.github.tursodatabase.utils.LimboExceptionUtils; @@ -9,68 +8,73 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * By default, only one resultSet object per LimboStatement can be open at the same time. - * Therefore, if the reading of one resultSet object is interleaved with the reading of another, each must - * have been generated by different LimboStatement objects. All execution method in the LimboStatement - * implicitly close the current resultSet object of the statement if an open one exists. + * By default, only one resultSet object per LimboStatement can be open at + * the same time. Therefore, if the reading of one resultSet object is interleaved with + * the reading of another, each must have been generated by different LimboStatement + * objects. All execution method in the LimboStatement implicitly close the current + * resultSet object of the statement if an open one exists. */ public class LimboStatement { - private static final Logger log = LoggerFactory.getLogger(LimboStatement.class); + private static final Logger log = LoggerFactory.getLogger(LimboStatement.class); - private final String sql; - private final long statementPointer; - private final LimboResultSet resultSet; + private final String sql; + private final long statementPointer; + private final LimboResultSet resultSet; - // TODO: what if the statement we ran was DDL, update queries and etc. Should we still create a resultSet? - public LimboStatement(String sql, long statementPointer) { - this.sql = sql; - this.statementPointer = statementPointer; - this.resultSet = LimboResultSet.of(this); - log.debug("Creating statement with sql: {}", this.sql); + // TODO: what if the statement we ran was DDL, update queries and etc. Should we still create a + // resultSet? + public LimboStatement(String sql, long statementPointer) { + this.sql = sql; + this.statementPointer = statementPointer; + this.resultSet = LimboResultSet.of(this); + log.debug("Creating statement with sql: {}", this.sql); + } + + public LimboResultSet getResultSet() { + return resultSet; + } + + /** + * Expects a clean statement created right after prepare method is called. + * + * @return true if the ResultSet has at least one row; false otherwise. + */ + public boolean execute() throws SQLException { + resultSet.next(); + return resultSet.hasLastStepReturnedRow(); + } + + LimboStepResult step() throws SQLException { + final LimboStepResult result = step(this.statementPointer); + if (result == null) { + throw new SQLException("step() returned null, which is only returned when an error occurs"); } - public LimboResultSet getResultSet() { - return resultSet; - } + return result; + } - /** - * Expects a clean statement created right after prepare method is called. - * - * @return true if the ResultSet has at least one row; false otherwise. - */ - public boolean execute() throws SQLException { - resultSet.next(); - return resultSet.hasLastStepReturnedRow(); - } + @Nullable + private native LimboStepResult step(long stmtPointer) throws SQLException; - LimboStepResult step() throws SQLException { - final LimboStepResult result = step(this.statementPointer); - if (result == null) { - throw new SQLException("step() returned null, which is only returned when an error occurs"); - } + /** + * Throws formatted SQLException with error code and message. + * + * @param errorCode Error code. + * @param errorMessageBytes Error message. + */ + @NativeInvocation(invokedFrom = "limbo_statement.rs") + private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException { + LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes); + } - return result; - } - - @Nullable - private native LimboStepResult step(long stmtPointer) throws SQLException; - - /** - * Throws formatted SQLException with error code and message. - * - * @param errorCode Error code. - * @param errorMessageBytes Error message. - */ - @NativeInvocation(invokedFrom = "limbo_statement.rs") - private void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException { - LimboExceptionUtils.throwLimboException(errorCode, errorMessageBytes); - } - - @Override - public String toString() { - return "LimboStatement{" + - "statementPointer=" + statementPointer + - ", sql='" + sql + '\'' + - '}'; - } + @Override + public String toString() { + return "LimboStatement{" + + "statementPointer=" + + statementPointer + + ", sql='" + + sql + + '\'' + + '}'; + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStepResult.java b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStepResult.java index 27a8dfc05..93a1878aa 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStepResult.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/LimboStepResult.java @@ -1,79 +1,78 @@ package org.github.tursodatabase.core; import java.util.Arrays; - import org.github.tursodatabase.annotations.NativeInvocation; import org.github.tursodatabase.annotations.Nullable; -/** - * Represents the step result of limbo's statement's step function. - */ +/** Represents the step result of limbo's statement's step function. */ public class LimboStepResult { - private static final int STEP_RESULT_ID_ROW = 10; - private static final int STEP_RESULT_ID_IO = 20; - private static final int STEP_RESULT_ID_DONE = 30; - private static final int STEP_RESULT_ID_INTERRUPT = 40; - // Indicates that the database file could not be written because of concurrent activity by some other connection - private static final int STEP_RESULT_ID_BUSY = 50; - private static final int STEP_RESULT_ID_ERROR = 60; + private static final int STEP_RESULT_ID_ROW = 10; + private static final int STEP_RESULT_ID_IO = 20; + private static final int STEP_RESULT_ID_DONE = 30; + private static final int STEP_RESULT_ID_INTERRUPT = 40; + // Indicates that the database file could not be written because of concurrent activity by some + // other connection + private static final int STEP_RESULT_ID_BUSY = 50; + private static final int STEP_RESULT_ID_ERROR = 60; - // Identifier for limbo's StepResult - private final int stepResultId; - @Nullable - private final Object[] result; + // Identifier for limbo's StepResult + private final int stepResultId; + @Nullable private final Object[] result; - @NativeInvocation(invokedFrom = "limbo_statement.rs") - public LimboStepResult(int stepResultId) { - this.stepResultId = stepResultId; - this.result = null; - } - - @NativeInvocation(invokedFrom = "limbo_statement.rs") - public LimboStepResult(int stepResultId, Object[] result) { - this.stepResultId = stepResultId; - this.result = result; - } - - public boolean isRow() { - return stepResultId == STEP_RESULT_ID_ROW; - } - - public boolean isDone() { - return stepResultId == STEP_RESULT_ID_DONE; - } - - public boolean isInInvalidState() { - // current implementation doesn't allow STEP_RESULT_ID_IO to be returned - return stepResultId == STEP_RESULT_ID_IO || - stepResultId == STEP_RESULT_ID_INTERRUPT || - stepResultId == STEP_RESULT_ID_BUSY || - stepResultId == STEP_RESULT_ID_ERROR; - } - - @Override - public String toString() { - return "LimboStepResult{" + - "stepResultName=" + getStepResultName() + - ", result=" + Arrays.toString(result) + - '}'; - } - - private String getStepResultName() { - switch (stepResultId) { - case STEP_RESULT_ID_ROW: - return "ROW"; - case STEP_RESULT_ID_IO: - return "IO"; - case STEP_RESULT_ID_DONE: - return "DONE"; - case STEP_RESULT_ID_INTERRUPT: - return "INTERRUPT"; - case STEP_RESULT_ID_BUSY: - return "BUSY"; - case STEP_RESULT_ID_ERROR: - return "ERROR"; - default: - return "UNKNOWN"; - } + @NativeInvocation(invokedFrom = "limbo_statement.rs") + public LimboStepResult(int stepResultId) { + this.stepResultId = stepResultId; + this.result = null; + } + + @NativeInvocation(invokedFrom = "limbo_statement.rs") + public LimboStepResult(int stepResultId, Object[] result) { + this.stepResultId = stepResultId; + this.result = result; + } + + public boolean isRow() { + return stepResultId == STEP_RESULT_ID_ROW; + } + + public boolean isDone() { + return stepResultId == STEP_RESULT_ID_DONE; + } + + public boolean isInInvalidState() { + // current implementation doesn't allow STEP_RESULT_ID_IO to be returned + return stepResultId == STEP_RESULT_ID_IO + || stepResultId == STEP_RESULT_ID_INTERRUPT + || stepResultId == STEP_RESULT_ID_BUSY + || stepResultId == STEP_RESULT_ID_ERROR; + } + + @Override + public String toString() { + return "LimboStepResult{" + + "stepResultName=" + + getStepResultName() + + ", result=" + + Arrays.toString(result) + + '}'; + } + + private String getStepResultName() { + switch (stepResultId) { + case STEP_RESULT_ID_ROW: + return "ROW"; + case STEP_RESULT_ID_IO: + return "IO"; + case STEP_RESULT_ID_DONE: + return "DONE"; + case STEP_RESULT_ID_INTERRUPT: + return "INTERRUPT"; + case STEP_RESULT_ID_BUSY: + return "BUSY"; + case STEP_RESULT_ID_ERROR: + return "ERROR"; + default: + return "UNKNOWN"; } + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/core/SqliteCode.java b/bindings/java/src/main/java/org/github/tursodatabase/core/SqliteCode.java index 3a879cb46..632750e10 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/core/SqliteCode.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/core/SqliteCode.java @@ -15,93 +15,91 @@ */ package org.github.tursodatabase.core; -/** - * Sqlite error codes. - */ +/** Sqlite error codes. */ public class SqliteCode { - /** Successful result */ - public static final int SQLITE_OK = 0; + /** Successful result */ + public static final int SQLITE_OK = 0; - /** SQL error or missing database */ - public static final int SQLITE_ERROR = 1; + /** SQL error or missing database */ + public static final int SQLITE_ERROR = 1; - /** An internal logic error in SQLite */ - public static final int SQLITE_INTERNAL = 2; + /** An internal logic error in SQLite */ + public static final int SQLITE_INTERNAL = 2; - /** Access permission denied */ - public static final int SQLITE_PERM = 3; + /** Access permission denied */ + public static final int SQLITE_PERM = 3; - /** Callback routine requested an abort */ - public static final int SQLITE_ABORT = 4; + /** Callback routine requested an abort */ + public static final int SQLITE_ABORT = 4; - /** The database file is locked */ - public static final int SQLITE_BUSY = 5; + /** The database file is locked */ + public static final int SQLITE_BUSY = 5; - /** A table in the database is locked */ - public static final int SQLITE_LOCKED = 6; + /** A table in the database is locked */ + public static final int SQLITE_LOCKED = 6; - /** A malloc() failed */ - public static final int SQLITE_NOMEM = 7; + /** A malloc() failed */ + public static final int SQLITE_NOMEM = 7; - /** Attempt to write a readonly database */ - public static final int SQLITE_READONLY = 8; + /** Attempt to write a readonly database */ + public static final int SQLITE_READONLY = 8; - /** Operation terminated by sqlite_interrupt() */ - public static final int SQLITE_INTERRUPT = 9; + /** Operation terminated by sqlite_interrupt() */ + public static final int SQLITE_INTERRUPT = 9; - /** Some kind of disk I/O error occurred */ - public static final int SQLITE_IOERR = 10; + /** Some kind of disk I/O error occurred */ + public static final int SQLITE_IOERR = 10; - /** The database disk image is malformed */ - public static final int SQLITE_CORRUPT = 11; + /** The database disk image is malformed */ + public static final int SQLITE_CORRUPT = 11; - /** (Internal Only) Table or record not found */ - public static final int SQLITE_NOTFOUND = 12; + /** (Internal Only) Table or record not found */ + public static final int SQLITE_NOTFOUND = 12; - /** Insertion failed because database is full */ - public static final int SQLITE_FULL = 13; + /** Insertion failed because database is full */ + public static final int SQLITE_FULL = 13; - /** Unable to open the database file */ - public static final int SQLITE_CANTOPEN = 14; + /** Unable to open the database file */ + public static final int SQLITE_CANTOPEN = 14; - /** Database lock protocol error */ - public static final int SQLITE_PROTOCOL = 15; + /** Database lock protocol error */ + public static final int SQLITE_PROTOCOL = 15; - /** (Internal Only) Database table is empty */ - public static final int SQLITE_EMPTY = 16; + /** (Internal Only) Database table is empty */ + public static final int SQLITE_EMPTY = 16; - /** The database schema changed */ - public static final int SQLITE_SCHEMA = 17; + /** The database schema changed */ + public static final int SQLITE_SCHEMA = 17; - /** Too much data for one row of a table */ - public static final int SQLITE_TOOBIG = 18; + /** Too much data for one row of a table */ + public static final int SQLITE_TOOBIG = 18; - /** Abort due to constraint violation */ - public static final int SQLITE_CONSTRAINT = 19; + /** Abort due to constraint violation */ + public static final int SQLITE_CONSTRAINT = 19; - /** Data type mismatch */ - public static final int SQLITE_MISMATCH = 20; + /** Data type mismatch */ + public static final int SQLITE_MISMATCH = 20; - /** Library used incorrectly */ - public static final int SQLITE_MISUSE = 21; + /** Library used incorrectly */ + public static final int SQLITE_MISUSE = 21; - /** Uses OS features not supported on host */ - public static final int SQLITE_NOLFS = 22; + /** Uses OS features not supported on host */ + public static final int SQLITE_NOLFS = 22; - /** Authorization denied */ - public static final int SQLITE_AUTH = 23; + /** Authorization denied */ + public static final int SQLITE_AUTH = 23; - /** sqlite_step() has another row ready */ - public static final int SQLITE_ROW = 100; + /** sqlite_step() has another row ready */ + public static final int SQLITE_ROW = 100; - /** sqlite_step() has finished executing */ - public static final int SQLITE_DONE = 101; + /** sqlite_step() has finished executing */ + public static final int SQLITE_DONE = 101; - // types returned by sqlite3_column_type() + // types returned by sqlite3_column_type() - public static final int SQLITE_INTEGER = 1; - public static final int SQLITE_FLOAT = 2; - public static final int SQLITE_TEXT = 3; - public static final int SQLITE_BLOB = 4; - public static final int SQLITE_NULL = 5; + public static final int SQLITE_INTEGER = 1; + public static final int SQLITE_FLOAT = 2; + public static final int SQLITE_TEXT = 3; + public static final int SQLITE_BLOB = 4; + public static final int SQLITE_NULL = 5; } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/exceptions/LimboException.java b/bindings/java/src/main/java/org/github/tursodatabase/exceptions/LimboException.java index d4526a818..77d07f89a 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/exceptions/LimboException.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/exceptions/LimboException.java @@ -1,18 +1,17 @@ package org.github.tursodatabase.exceptions; +import java.sql.SQLException; import org.github.tursodatabase.LimboErrorCode; -import java.sql.SQLException; - public class LimboException extends SQLException { - private final LimboErrorCode resultCode; + private final LimboErrorCode resultCode; - public LimboException(String message, LimboErrorCode resultCode) { - super(message, null, resultCode.code & 0xff); - this.resultCode = resultCode; - } + public LimboException(String message, LimboErrorCode resultCode) { + super(message, null, resultCode.code & 0xff); + this.resultCode = resultCode; + } - public LimboErrorCode getResultCode() { - return resultCode; - } + public LimboErrorCode getResultCode() { + return resultCode; + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4Connection.java b/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4Connection.java index dc404a9cb..b17c6af36 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4Connection.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4Connection.java @@ -1,353 +1,357 @@ package org.github.tursodatabase.jdbc4; -import org.github.tursodatabase.core.LimboConnection; -import org.github.tursodatabase.annotations.SkipNullableCheck; - import java.sql.*; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.concurrent.Executor; +import org.github.tursodatabase.annotations.SkipNullableCheck; +import org.github.tursodatabase.core.LimboConnection; public class JDBC4Connection extends LimboConnection { - public JDBC4Connection(String url, String filePath) throws SQLException { - super(url, filePath); - } + public JDBC4Connection(String url, String filePath) throws SQLException { + super(url, filePath); + } - public JDBC4Connection(String url, String filePath, Properties properties) throws SQLException { - super(url, filePath, properties); - } + public JDBC4Connection(String url, String filePath, Properties properties) throws SQLException { + super(url, filePath, properties); + } - @Override - public Statement createStatement() throws SQLException { - return createStatement( - ResultSet.TYPE_FORWARD_ONLY, - ResultSet.CONCUR_READ_ONLY, - ResultSet.CLOSE_CURSORS_AT_COMMIT - ); - } + @Override + public Statement createStatement() throws SQLException { + return createStatement( + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT); + } - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { - return createStatement(resultSetType, resultSetConcurrency, ResultSet.CLOSE_CURSORS_AT_COMMIT); - } + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) + throws SQLException { + return createStatement(resultSetType, resultSetConcurrency, ResultSet.CLOSE_CURSORS_AT_COMMIT); + } - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - checkOpen(); - checkCursor(resultSetType, resultSetConcurrency, resultSetHoldability); + @Override + public Statement createStatement( + int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + checkOpen(); + checkCursor(resultSetType, resultSetConcurrency, resultSetHoldability); - return new JDBC4Statement(this); - } + return new JDBC4Statement(this); + } - @Override - @SkipNullableCheck - public PreparedStatement prepareStatement(String sql) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public PreparedStatement prepareStatement(String sql) throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public CallableStatement prepareCall(String sql) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public CallableStatement prepareCall(String sql) throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public String nativeSQL(String sql) throws SQLException { - // TODO - return ""; - } + @Override + @SkipNullableCheck + public String nativeSQL(String sql) throws SQLException { + // TODO + return ""; + } - @Override - public void setAutoCommit(boolean autoCommit) throws SQLException { - // TODO - } + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + // TODO + } - @Override - public boolean getAutoCommit() throws SQLException { - // TODO - return false; - } + @Override + public boolean getAutoCommit() throws SQLException { + // TODO + return false; + } - @Override - public void commit() throws SQLException { - // TODO - } + @Override + public void commit() throws SQLException { + // TODO + } - @Override - public void rollback() throws SQLException { - // TODO - } + @Override + public void rollback() throws SQLException { + // TODO + } - @Override - public void close() throws SQLException { - // TODO - } + @Override + public void close() throws SQLException { + // TODO + } - @Override - public boolean isClosed() throws SQLException { - // TODO - return false; - } + @Override + public boolean isClosed() throws SQLException { + // TODO + return false; + } - @Override - @SkipNullableCheck - public DatabaseMetaData getMetaData() throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public DatabaseMetaData getMetaData() throws SQLException { + // TODO + return null; + } - @Override - public void setReadOnly(boolean readOnly) throws SQLException { - // TODO - } + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + // TODO + } - @Override - public boolean isReadOnly() throws SQLException { - // TODO - return false; - } + @Override + public boolean isReadOnly() throws SQLException { + // TODO + return false; + } - @Override - public void setCatalog(String catalog) throws SQLException { - // TODO - } + @Override + public void setCatalog(String catalog) throws SQLException { + // TODO + } - @Override - public String getCatalog() throws SQLException { - // TODO - return ""; - } + @Override + public String getCatalog() throws SQLException { + // TODO + return ""; + } - @Override - public void setTransactionIsolation(int level) throws SQLException { - // TODO - } + @Override + public void setTransactionIsolation(int level) throws SQLException { + // TODO + } - @Override - public int getTransactionIsolation() throws SQLException { - // TODO - return 0; - } + @Override + public int getTransactionIsolation() throws SQLException { + // TODO + return 0; + } - @Override - @SkipNullableCheck - public SQLWarning getWarnings() throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public SQLWarning getWarnings() throws SQLException { + // TODO + return null; + } - @Override - public void clearWarnings() throws SQLException { - // TODO - } + @Override + public void clearWarnings() throws SQLException { + // TODO + } - @Override - @SkipNullableCheck - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + // TODO + return null; + } - @Override - public Map> getTypeMap() throws SQLException { - // TODO - return new HashMap<>(); - } + @Override + public Map> getTypeMap() throws SQLException { + // TODO + return new HashMap<>(); + } - @Override - public void setTypeMap(Map> map) throws SQLException { - // TODO - } + @Override + public void setTypeMap(Map> map) throws SQLException { + // TODO + } - @Override - public void setHoldability(int holdability) throws SQLException { - // TODO - } + @Override + public void setHoldability(int holdability) throws SQLException { + // TODO + } - @Override - public int getHoldability() throws SQLException { - return 0; - } + @Override + public int getHoldability() throws SQLException { + return 0; + } - @Override - @SkipNullableCheck - public Savepoint setSavepoint() throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public Savepoint setSavepoint() throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public Savepoint setSavepoint(String name) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public Savepoint setSavepoint(String name) throws SQLException { + // TODO + return null; + } - @Override - public void rollback(Savepoint savepoint) throws SQLException { - // TODO - } + @Override + public void rollback(Savepoint savepoint) throws SQLException { + // TODO + } - @Override - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - // TODO - } + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + // TODO + } - @Override - @SkipNullableCheck - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public PreparedStatement prepareStatement( + String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public CallableStatement prepareCall( + String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public Clob createClob() throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public Clob createClob() throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public Blob createBlob() throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public Blob createBlob() throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public NClob createNClob() throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public NClob createNClob() throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public SQLXML createSQLXML() throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public SQLXML createSQLXML() throws SQLException { + // TODO + return null; + } - @Override - public boolean isValid(int timeout) throws SQLException { - // TODO - return false; - } + @Override + public boolean isValid(int timeout) throws SQLException { + // TODO + return false; + } - @Override - public void setClientInfo(String name, String value) throws SQLClientInfoException { - // TODO - } + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + // TODO + } - @Override - public void setClientInfo(Properties properties) throws SQLClientInfoException { - // TODO - } + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + // TODO + } - @Override - public String getClientInfo(String name) throws SQLException { - // TODO - return ""; - } + @Override + public String getClientInfo(String name) throws SQLException { + // TODO + return ""; + } - @Override - @SkipNullableCheck - public Properties getClientInfo() throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public Properties getClientInfo() throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + // TODO + return null; + } - @Override - @SkipNullableCheck - public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - // TODO - return null; - } + @Override + @SkipNullableCheck + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + // TODO + return null; + } - @Override - public void setSchema(String schema) throws SQLException { - // TODO - } + @Override + public void setSchema(String schema) throws SQLException { + // TODO + } - @Override - @SkipNullableCheck - public String getSchema() throws SQLException { - // TODO - return ""; - } + @Override + @SkipNullableCheck + public String getSchema() throws SQLException { + // TODO + return ""; + } - @Override - public void abort(Executor executor) throws SQLException { - // TODO - } + @Override + public void abort(Executor executor) throws SQLException { + // TODO + } - @Override - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - // TODO - } + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + // TODO + } - @Override - public int getNetworkTimeout() throws SQLException { - // TODO - return 0; - } + @Override + public int getNetworkTimeout() throws SQLException { + // TODO + return 0; + } - @Override - @SkipNullableCheck - public T unwrap(Class iface) throws SQLException { - return null; - } + @Override + @SkipNullableCheck + public T unwrap(Class iface) throws SQLException { + return null; + } - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - // TODO - return false; - } + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + // TODO + return false; + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4ResultSet.java b/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4ResultSet.java index 0d7cce084..867b2688e 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4ResultSet.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4ResultSet.java @@ -1,8 +1,5 @@ package org.github.tursodatabase.jdbc4; -import org.github.tursodatabase.annotations.SkipNullableCheck; -import org.github.tursodatabase.core.LimboResultSet; - import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; @@ -10,1117 +7,1127 @@ import java.net.URL; import java.sql.*; import java.util.Calendar; import java.util.Map; +import org.github.tursodatabase.annotations.SkipNullableCheck; +import org.github.tursodatabase.core.LimboResultSet; public class JDBC4ResultSet implements ResultSet { - private final LimboResultSet resultSet; - - public JDBC4ResultSet(LimboResultSet resultSet) { - this.resultSet = resultSet; - } - - @Override - public boolean next() throws SQLException { - return resultSet.next(); - } - - @Override - public void close() throws SQLException { - // TODO - } - - @Override - public boolean wasNull() throws SQLException { - // TODO - return false; - } - - @Override - public String getString(int columnIndex) throws SQLException { - // TODO - return ""; - } - - @Override - public boolean getBoolean(int columnIndex) throws SQLException { - // TODO - return false; - } - - @Override - public byte getByte(int columnIndex) throws SQLException { - // TODO - return 0; - } - - @Override - public short getShort(int columnIndex) throws SQLException { - // TODO - return 0; - } - - @Override - public int getInt(int columnIndex) throws SQLException { - // TODO - return 0; - } - - @Override - public long getLong(int columnIndex) throws SQLException { - // TODO - return 0; - } - - @Override - public float getFloat(int columnIndex) throws SQLException { - // TODO - return 0; - } - - @Override - public double getDouble(int columnIndex) throws SQLException { - // TODO - return 0; - } - - @Override - @SkipNullableCheck - public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - // TODO - return null; - } - - @Override - public byte[] getBytes(int columnIndex) throws SQLException { - // TODO - return new byte[0]; - } - - @Override - @SkipNullableCheck - public Date getDate(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Time getTime(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Timestamp getTimestamp(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public InputStream getAsciiStream(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public InputStream getUnicodeStream(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public InputStream getBinaryStream(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - public String getString(String columnLabel) throws SQLException { - // TODO - return ""; - } - - @Override - public boolean getBoolean(String columnLabel) throws SQLException { - // TODO - return false; - } - - @Override - public byte getByte(String columnLabel) throws SQLException { - // TODO - return 0; - } - - @Override - public short getShort(String columnLabel) throws SQLException { - // TODO - return 0; - } - - @Override - public int getInt(String columnLabel) throws SQLException { - // TODO - return 0; - } - - @Override - public long getLong(String columnLabel) throws SQLException { - // TODO - return 0; - } - - @Override - public float getFloat(String columnLabel) throws SQLException { - // TODO - return 0; - } - - @Override - public double getDouble(String columnLabel) throws SQLException { - // TODO - return 0; - } - - @Override - @SkipNullableCheck - public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - // TODO - return null; - } - - @Override - public byte[] getBytes(String columnLabel) throws SQLException { - // TODO - return new byte[0]; - } - - @Override - @SkipNullableCheck - public Date getDate(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Time getTime(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Timestamp getTimestamp(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public InputStream getAsciiStream(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public InputStream getUnicodeStream(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public InputStream getBinaryStream(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public SQLWarning getWarnings() throws SQLException { - // TODO - return null; - } - - @Override - public void clearWarnings() throws SQLException { - // TODO - } - - @Override - public String getCursorName() throws SQLException { - // TODO - return ""; - } - - @Override - @SkipNullableCheck - public ResultSetMetaData getMetaData() throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Object getObject(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Object getObject(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - public int findColumn(String columnLabel) throws SQLException { - // TODO - return 0; - } - - @Override - @SkipNullableCheck - public Reader getCharacterStream(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Reader getCharacterStream(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public BigDecimal getBigDecimal(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - public boolean isBeforeFirst() throws SQLException { - // TODO - return false; - } - - @Override - public boolean isAfterLast() throws SQLException { - // TODO - return false; - } - - @Override - public boolean isFirst() throws SQLException { - // TODO - return false; - } - - @Override - public boolean isLast() throws SQLException { - // TODO - return false; - } - - @Override - public void beforeFirst() throws SQLException { - // TODO - } - - @Override - public void afterLast() throws SQLException { - // TODO - } - - @Override - public boolean first() throws SQLException { - // TODO - return false; - } - - @Override - public boolean last() throws SQLException { - // TODO - return false; - } - - @Override - public int getRow() throws SQLException { - // TODO - return 0; - } - - @Override - public boolean absolute(int row) throws SQLException { - // TODO - return false; - } - - @Override - public boolean relative(int rows) throws SQLException { - return false; - } - - @Override - public boolean previous() throws SQLException { - // TODO - return false; - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - // TODO - } - - @Override - public int getFetchDirection() throws SQLException { - // TODO - return 0; - } - - @Override - public void setFetchSize(int rows) throws SQLException { - // TODO - } - - @Override - public int getFetchSize() throws SQLException { - // TODO - return 0; - } - - @Override - public int getType() throws SQLException { - // TODO - return 0; - } - - @Override - public int getConcurrency() throws SQLException { - // TODO - return 0; - } - - @Override - public boolean rowUpdated() throws SQLException { - // TODO - return false; - } - - @Override - public boolean rowInserted() throws SQLException { - // TODO - return false; - } - - @Override - public boolean rowDeleted() throws SQLException { - // TODO - return false; - } - - @Override - public void updateNull(int columnIndex) throws SQLException { - // TODO - } - - @Override - public void updateBoolean(int columnIndex, boolean x) throws SQLException { - // TODO - } - - @Override - public void updateByte(int columnIndex, byte x) throws SQLException { - // TODO - } - - @Override - public void updateShort(int columnIndex, short x) throws SQLException { - // TODO - } - - @Override - public void updateInt(int columnIndex, int x) throws SQLException { - // TODO - } - - @Override - public void updateLong(int columnIndex, long x) throws SQLException { - // TODO - } - - @Override - public void updateFloat(int columnIndex, float x) throws SQLException { - // TODO - } - - @Override - public void updateDouble(int columnIndex, double x) throws SQLException { - // TODO - } - - @Override - public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - // TODO - } - - @Override - public void updateString(int columnIndex, String x) throws SQLException { - // TODO - } - - @Override - public void updateBytes(int columnIndex, byte[] x) throws SQLException { - // TODO - } - - @Override - public void updateDate(int columnIndex, Date x) throws SQLException { - // TODO - } - - @Override - public void updateTime(int columnIndex, Time x) throws SQLException { - // TODO - } - - @Override - public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - // TODO - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - // TODO - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - // TODO - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - // TODO - } - - @Override - public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - // TODO - } - - @Override - public void updateObject(int columnIndex, Object x) throws SQLException { - // TODO - } - - @Override - public void updateNull(String columnLabel) throws SQLException { - // TODO - } - - @Override - public void updateBoolean(String columnLabel, boolean x) throws SQLException { - // TODO - } - - @Override - public void updateByte(String columnLabel, byte x) throws SQLException { - // TODO - } - - @Override - public void updateShort(String columnLabel, short x) throws SQLException { - // TODO - } - - @Override - public void updateInt(String columnLabel, int x) throws SQLException { - // TODO - } - - @Override - public void updateLong(String columnLabel, long x) throws SQLException { - // TODO - } - - @Override - public void updateFloat(String columnLabel, float x) throws SQLException { - // TODO - } - - @Override - public void updateDouble(String columnLabel, double x) throws SQLException { - // TODO - } - - @Override - public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - // TODO - } - - @Override - public void updateString(String columnLabel, String x) throws SQLException { - // TODO - } - - @Override - public void updateBytes(String columnLabel, byte[] x) throws SQLException { - // TODO - } - - @Override - public void updateDate(String columnLabel, Date x) throws SQLException { - // TODO - } - - @Override - public void updateTime(String columnLabel, Time x) throws SQLException { - // TODO - } - - @Override - public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - // TODO - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - // TODO - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - // TODO - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - // TODO - } - - @Override - public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - // TODO - } - - @Override - public void updateObject(String columnLabel, Object x) throws SQLException { - // TODO - } - - @Override - public void insertRow() throws SQLException { - // TODO - } - - @Override - public void updateRow() throws SQLException { - // TODO - } - - @Override - public void deleteRow() throws SQLException { - // TODO - } - - @Override - public void refreshRow() throws SQLException { - // TODO - } - - @Override - public void cancelRowUpdates() throws SQLException { - // TODO - } - - @Override - public void moveToInsertRow() throws SQLException { - // TODO - } - - @Override - public void moveToCurrentRow() throws SQLException { - // TODO - } - - @Override - @SkipNullableCheck - public Statement getStatement() throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Object getObject(int columnIndex, Map> map) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Ref getRef(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Blob getBlob(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Clob getClob(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Array getArray(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Object getObject(String columnLabel, Map> map) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Ref getRef(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Blob getBlob(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Clob getClob(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Array getArray(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Date getDate(int columnIndex, Calendar cal) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Date getDate(String columnLabel, Calendar cal) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Time getTime(int columnIndex, Calendar cal) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Time getTime(String columnLabel, Calendar cal) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public URL getURL(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public URL getURL(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - public void updateRef(int columnIndex, Ref x) throws SQLException { - // TODO - } - - @Override - public void updateRef(String columnLabel, Ref x) throws SQLException { - // TODO - } - - @Override - public void updateBlob(int columnIndex, Blob x) throws SQLException { - // TODO - } - - @Override - public void updateBlob(String columnLabel, Blob x) throws SQLException { - // TODO - } - - @Override - public void updateClob(int columnIndex, Clob x) throws SQLException { - // TODO - } - - @Override - public void updateClob(String columnLabel, Clob x) throws SQLException { - // TODO - } - - @Override - public void updateArray(int columnIndex, Array x) throws SQLException { - // TODO - } - - @Override - public void updateArray(String columnLabel, Array x) throws SQLException { - // TODO - } - - @Override - @SkipNullableCheck - public RowId getRowId(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public RowId getRowId(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - public void updateRowId(int columnIndex, RowId x) throws SQLException { - // TODO - } - - @Override - public void updateRowId(String columnLabel, RowId x) throws SQLException { - // TODO - } - - @Override - public int getHoldability() throws SQLException { - // TODO - return 0; - } - - @Override - public boolean isClosed() throws SQLException { - // TODO - return false; - } - - @Override - public void updateNString(int columnIndex, String nString) throws SQLException { - // TODO - } - - @Override - public void updateNString(String columnLabel, String nString) throws SQLException { - // TODO - } - - @Override - public void updateNClob(int columnIndex, NClob nClob) throws SQLException { - // TODO - } - - @Override - public void updateNClob(String columnLabel, NClob nClob) throws SQLException { - // TODO - } - - @Override - @SkipNullableCheck - public NClob getNClob(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public NClob getNClob(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public SQLXML getSQLXML(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public SQLXML getSQLXML(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - // TODO - } - - @Override - public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - // TODO - } - - @Override - public String getNString(int columnIndex) throws SQLException { - // TODO - return ""; - } - - @Override - public String getNString(String columnLabel) throws SQLException { - // TODO - return ""; - } - - @Override - @SkipNullableCheck - public Reader getNCharacterStream(int columnIndex) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public Reader getNCharacterStream(String columnLabel) throws SQLException { - // TODO - return null; - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - // TODO - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - // TODO - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { - // TODO - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { - // TODO - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - // TODO - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { - // TODO - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { - // TODO - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - // TODO - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { - // TODO - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { - // TODO - } - - @Override - public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - // TODO - } - - @Override - public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - // TODO - } - - @Override - public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - // TODO - } - - @Override - public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - // TODO - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - // TODO - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - // TODO - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - // TODO - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - // TODO - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - // TODO - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - // TODO - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - // TODO - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - // TODO - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - // TODO - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - // TODO - } - - @Override - public void updateClob(int columnIndex, Reader reader) throws SQLException { - // TODO - } - - @Override - public void updateClob(String columnLabel, Reader reader) throws SQLException { - // TODO - } - - @Override - public void updateNClob(int columnIndex, Reader reader) throws SQLException { - // TODO - } - - @Override - public void updateNClob(String columnLabel, Reader reader) throws SQLException { - // TODO - } - - @Override - @SkipNullableCheck - public T getObject(int columnIndex, Class type) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public T getObject(String columnLabel, Class type) throws SQLException { - // TODO - return null; - } - - @Override - @SkipNullableCheck - public T unwrap(Class iface) throws SQLException { - // TODO - return null; - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - // TODO - return false; - } - - private SQLException throwNotSupportedException() { - return new SQLFeatureNotSupportedException("Not implemented by the driver"); - } + private final LimboResultSet resultSet; + + public JDBC4ResultSet(LimboResultSet resultSet) { + this.resultSet = resultSet; + } + + @Override + public boolean next() throws SQLException { + return resultSet.next(); + } + + @Override + public void close() throws SQLException { + // TODO + } + + @Override + public boolean wasNull() throws SQLException { + // TODO + return false; + } + + @Override + public String getString(int columnIndex) throws SQLException { + // TODO + return ""; + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + // TODO + return false; + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + // TODO + return 0; + } + + @Override + public short getShort(int columnIndex) throws SQLException { + // TODO + return 0; + } + + @Override + public int getInt(int columnIndex) throws SQLException { + // TODO + return 0; + } + + @Override + public long getLong(int columnIndex) throws SQLException { + // TODO + return 0; + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + // TODO + return 0; + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + // TODO + return 0; + } + + @Override + @SkipNullableCheck + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + // TODO + return null; + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + // TODO + return new byte[0]; + } + + @Override + @SkipNullableCheck + public Date getDate(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Time getTime(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Timestamp getTimestamp(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public InputStream getAsciiStream(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public InputStream getBinaryStream(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + public String getString(String columnLabel) throws SQLException { + // TODO + return ""; + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + // TODO + return false; + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + // TODO + return 0; + } + + @Override + public short getShort(String columnLabel) throws SQLException { + // TODO + return 0; + } + + @Override + public int getInt(String columnLabel) throws SQLException { + // TODO + return 0; + } + + @Override + public long getLong(String columnLabel) throws SQLException { + // TODO + return 0; + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + // TODO + return 0; + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + // TODO + return 0; + } + + @Override + @SkipNullableCheck + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + // TODO + return null; + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + // TODO + return new byte[0]; + } + + @Override + @SkipNullableCheck + public Date getDate(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Time getTime(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Timestamp getTimestamp(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public InputStream getAsciiStream(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public InputStream getBinaryStream(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public SQLWarning getWarnings() throws SQLException { + // TODO + return null; + } + + @Override + public void clearWarnings() throws SQLException { + // TODO + } + + @Override + public String getCursorName() throws SQLException { + // TODO + return ""; + } + + @Override + @SkipNullableCheck + public ResultSetMetaData getMetaData() throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Object getObject(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Object getObject(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + // TODO + return 0; + } + + @Override + @SkipNullableCheck + public Reader getCharacterStream(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Reader getCharacterStream(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + public boolean isBeforeFirst() throws SQLException { + // TODO + return false; + } + + @Override + public boolean isAfterLast() throws SQLException { + // TODO + return false; + } + + @Override + public boolean isFirst() throws SQLException { + // TODO + return false; + } + + @Override + public boolean isLast() throws SQLException { + // TODO + return false; + } + + @Override + public void beforeFirst() throws SQLException { + // TODO + } + + @Override + public void afterLast() throws SQLException { + // TODO + } + + @Override + public boolean first() throws SQLException { + // TODO + return false; + } + + @Override + public boolean last() throws SQLException { + // TODO + return false; + } + + @Override + public int getRow() throws SQLException { + // TODO + return 0; + } + + @Override + public boolean absolute(int row) throws SQLException { + // TODO + return false; + } + + @Override + public boolean relative(int rows) throws SQLException { + return false; + } + + @Override + public boolean previous() throws SQLException { + // TODO + return false; + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + // TODO + } + + @Override + public int getFetchDirection() throws SQLException { + // TODO + return 0; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + // TODO + } + + @Override + public int getFetchSize() throws SQLException { + // TODO + return 0; + } + + @Override + public int getType() throws SQLException { + // TODO + return 0; + } + + @Override + public int getConcurrency() throws SQLException { + // TODO + return 0; + } + + @Override + public boolean rowUpdated() throws SQLException { + // TODO + return false; + } + + @Override + public boolean rowInserted() throws SQLException { + // TODO + return false; + } + + @Override + public boolean rowDeleted() throws SQLException { + // TODO + return false; + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + // TODO + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + // TODO + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + // TODO + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + // TODO + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + // TODO + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + // TODO + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + // TODO + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + // TODO + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + // TODO + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + // TODO + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + // TODO + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + // TODO + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + // TODO + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + // TODO + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + // TODO + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + // TODO + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + // TODO + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + // TODO + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + // TODO + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + // TODO + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + // TODO + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + // TODO + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + // TODO + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + // TODO + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + // TODO + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + // TODO + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + // TODO + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + // TODO + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + // TODO + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + // TODO + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + // TODO + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + // TODO + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + // TODO + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + // TODO + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) + throws SQLException { + // TODO + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) + throws SQLException { + // TODO + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + // TODO + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + // TODO + } + + @Override + public void insertRow() throws SQLException { + // TODO + } + + @Override + public void updateRow() throws SQLException { + // TODO + } + + @Override + public void deleteRow() throws SQLException { + // TODO + } + + @Override + public void refreshRow() throws SQLException { + // TODO + } + + @Override + public void cancelRowUpdates() throws SQLException { + // TODO + } + + @Override + public void moveToInsertRow() throws SQLException { + // TODO + } + + @Override + public void moveToCurrentRow() throws SQLException { + // TODO + } + + @Override + @SkipNullableCheck + public Statement getStatement() throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Object getObject(int columnIndex, Map> map) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Ref getRef(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Blob getBlob(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Clob getClob(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Array getArray(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Object getObject(String columnLabel, Map> map) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Ref getRef(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Blob getBlob(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Clob getClob(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Array getArray(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public URL getURL(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public URL getURL(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + // TODO + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + // TODO + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + // TODO + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + // TODO + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + // TODO + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + // TODO + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + // TODO + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + // TODO + } + + @Override + @SkipNullableCheck + public RowId getRowId(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public RowId getRowId(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + // TODO + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + // TODO + } + + @Override + public int getHoldability() throws SQLException { + // TODO + return 0; + } + + @Override + public boolean isClosed() throws SQLException { + // TODO + return false; + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + // TODO + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + // TODO + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + // TODO + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + // TODO + } + + @Override + @SkipNullableCheck + public NClob getNClob(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public NClob getNClob(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public SQLXML getSQLXML(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public SQLXML getSQLXML(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + // TODO + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + // TODO + } + + @Override + public String getNString(int columnIndex) throws SQLException { + // TODO + return ""; + } + + @Override + public String getNString(String columnLabel) throws SQLException { + // TODO + return ""; + } + + @Override + @SkipNullableCheck + public Reader getNCharacterStream(int columnIndex) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public Reader getNCharacterStream(String columnLabel) throws SQLException { + // TODO + return null; + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + // TODO + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) + throws SQLException { + // TODO + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + // TODO + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + // TODO + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + // TODO + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) + throws SQLException { + // TODO + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) + throws SQLException { + // TODO + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) + throws SQLException { + // TODO + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) + throws SQLException { + // TODO + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) + throws SQLException { + // TODO + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + // TODO + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + // TODO + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + // TODO + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + // TODO + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + // TODO + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + // TODO + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + // TODO + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + // TODO + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + // TODO + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + // TODO + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + // TODO + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + // TODO + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + // TODO + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + // TODO + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + // TODO + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + // TODO + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + // TODO + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + // TODO + } + + @Override + @SkipNullableCheck + public T getObject(int columnIndex, Class type) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public T getObject(String columnLabel, Class type) throws SQLException { + // TODO + return null; + } + + @Override + @SkipNullableCheck + public T unwrap(Class iface) throws SQLException { + // TODO + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + // TODO + return false; + } + + private SQLException throwNotSupportedException() { + return new SQLFeatureNotSupportedException("Not implemented by the driver"); + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4Statement.java b/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4Statement.java index 24a751857..eee4c95a3 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4Statement.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/jdbc4/JDBC4Statement.java @@ -8,7 +8,6 @@ import java.sql.SQLException; import java.sql.SQLWarning; import java.sql.Statement; import java.util.concurrent.locks.ReentrantLock; - import org.github.tursodatabase.annotations.Nullable; import org.github.tursodatabase.annotations.SkipNullableCheck; import org.github.tursodatabase.core.LimboConnection; @@ -17,359 +16,366 @@ import org.github.tursodatabase.core.LimboStatement; public class JDBC4Statement implements Statement { - private final LimboConnection connection; - @Nullable - private LimboStatement statement = null; + private final LimboConnection connection; + @Nullable private LimboStatement statement = null; - private boolean closed; - private boolean closeOnCompletion; + private boolean closed; + private boolean closeOnCompletion; - private final int resultSetType; - private final int resultSetConcurrency; - private final int resultSetHoldability; + private final int resultSetType; + private final int resultSetConcurrency; + private final int resultSetHoldability; - private int queryTimeoutSeconds; - private long updateCount; - private boolean exhaustedResults = false; + private int queryTimeoutSeconds; + private long updateCount; + private boolean exhaustedResults = false; - private ReentrantLock connectionLock = new ReentrantLock(); + private ReentrantLock connectionLock = new ReentrantLock(); - public JDBC4Statement(LimboConnection connection) { - this(connection, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, - ResultSet.CLOSE_CURSORS_AT_COMMIT); + public JDBC4Statement(LimboConnection connection) { + this( + connection, + ResultSet.TYPE_FORWARD_ONLY, + ResultSet.CONCUR_READ_ONLY, + ResultSet.CLOSE_CURSORS_AT_COMMIT); + } + + public JDBC4Statement( + LimboConnection connection, + int resultSetType, + int resultSetConcurrency, + int resultSetHoldability) { + this.connection = connection; + this.resultSetType = resultSetType; + this.resultSetConcurrency = resultSetConcurrency; + this.resultSetHoldability = resultSetHoldability; + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + execute(sql); + + requireNonNull(statement, "statement should not be null after running execute method"); + return new JDBC4ResultSet(statement.getResultSet()); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + execute(sql); + + requireNonNull(statement, "statement should not be null after running execute method"); + final LimboResultSet resultSet = statement.getResultSet(); + while (resultSet.isOpen()) { + resultSet.next(); } - public JDBC4Statement(LimboConnection connection, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) { - this.connection = connection; - this.resultSetType = resultSetType; - this.resultSetConcurrency = resultSetConcurrency; - this.resultSetHoldability = resultSetHoldability; + // TODO: return update count; + return 0; + } + + @Override + public void close() throws SQLException { + clearGeneratedKeys(); + internalClose(); + closed = true; + } + + @Override + public int getMaxFieldSize() throws SQLException { + // TODO + return 0; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + // TODO + } + + @Override + public int getMaxRows() throws SQLException { + // TODO + return 0; + } + + @Override + public void setMaxRows(int max) throws SQLException { + // TODO + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + // TODO + } + + @Override + public int getQueryTimeout() throws SQLException { + // TODO + return 0; + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + if (seconds < 0) { + throw new SQLException("Query timeout must be greater than 0"); + } + this.queryTimeoutSeconds = seconds; + } + + @Override + public void cancel() throws SQLException { + // TODO + } + + @Override + @SkipNullableCheck + public SQLWarning getWarnings() throws SQLException { + // TODO + return null; + } + + @Override + public void clearWarnings() throws SQLException { + // TODO + } + + @Override + public void setCursorName(String name) throws SQLException { + // TODO + } + + /** + * The execute method executes an SQL statement and indicates the form of the first + * result. You must then use the methods getResultSet or getUpdateCount + * to retrieve the result, and getMoreResults to move to any subsequent result(s). + */ + @Override + public boolean execute(String sql) throws SQLException { + internalClose(); + + return this.withConnectionTimeout( + () -> { + try { + // TODO: if sql is a readOnly query, do we still need the locks? + connectionLock.lock(); + statement = connection.prepare(sql); + final boolean result = statement.execute(); + updateGeneratedKeys(); + exhaustedResults = false; + + return result; + } finally { + connectionLock.unlock(); + } + }); + } + + @Override + public ResultSet getResultSet() throws SQLException { + requireNonNull(statement, "statement is null"); + return new JDBC4ResultSet(statement.getResultSet()); + } + + @Override + public int getUpdateCount() throws SQLException { + // TODO + return 0; + } + + @Override + public boolean getMoreResults() throws SQLException { + // TODO + return false; + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + // TODO + } + + @Override + public int getFetchDirection() throws SQLException { + // TODO + return 0; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + // TODO + } + + @Override + public int getFetchSize() throws SQLException { + // TODO + return 0; + } + + @Override + public int getResultSetConcurrency() { + return resultSetConcurrency; + } + + @Override + public int getResultSetType() { + return resultSetType; + } + + @Override + public void addBatch(String sql) throws SQLException { + // TODO + } + + @Override + public void clearBatch() throws SQLException { + // TODO + } + + @Override + public int[] executeBatch() throws SQLException { + // TODO + return new int[0]; + } + + @Override + @SkipNullableCheck + public Connection getConnection() throws SQLException { + // TODO + return null; + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + // TODO + return false; + } + + @Override + @SkipNullableCheck + public ResultSet getGeneratedKeys() throws SQLException { + // TODO + return null; + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + // TODO + return 0; + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + // TODO + return 0; + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + // TODO + return 0; + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + // TODO + return false; + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + // TODO + return false; + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + // TODO + return false; + } + + @Override + public int getResultSetHoldability() { + return resultSetHoldability; + } + + @Override + public boolean isClosed() throws SQLException { + // TODO + return false; + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + // TODO + } + + @Override + public boolean isPoolable() throws SQLException { + // TODO + return false; + } + + @Override + public void closeOnCompletion() throws SQLException { + if (closed) { + throw new SQLException("statement is closed"); + } + closeOnCompletion = true; + } + + /** + * Indicates whether the statement should be closed automatically when all its dependent result + * sets are closed. + */ + @Override + public boolean isCloseOnCompletion() throws SQLException { + if (closed) { + throw new SQLException("statement is closed"); + } + return closeOnCompletion; + } + + @Override + @SkipNullableCheck + public T unwrap(Class iface) throws SQLException { + // TODO + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + // TODO + return false; + } + + protected void internalClose() throws SQLException { + // TODO + } + + protected void clearGeneratedKeys() throws SQLException { + // TODO + } + + protected void updateGeneratedKeys() throws SQLException { + // TODO + } + + private T withConnectionTimeout(SQLCallable callable) throws SQLException { + final int originalBusyTimeoutMillis = connection.getBusyTimeout(); + if (queryTimeoutSeconds > 0) { + // TODO: set busy timeout + connection.setBusyTimeout(1000 * queryTimeoutSeconds); } - @Override - public ResultSet executeQuery(String sql) throws SQLException { - execute(sql); - - requireNonNull(statement, "statement should not be null after running execute method"); - return new JDBC4ResultSet(statement.getResultSet()); + try { + return callable.call(); + } finally { + if (queryTimeoutSeconds > 0) { + connection.setBusyTimeout(originalBusyTimeoutMillis); + } } + } - @Override - public int executeUpdate(String sql) throws SQLException { - execute(sql); - - requireNonNull(statement, "statement should not be null after running execute method"); - final LimboResultSet resultSet = statement.getResultSet(); - while (resultSet.isOpen()) { - resultSet.next(); - } - - // TODO: return update count; - return 0; - } - - @Override - public void close() throws SQLException { - clearGeneratedKeys(); - internalClose(); - closed = true; - } - - @Override - public int getMaxFieldSize() throws SQLException { - // TODO - return 0; - } - - @Override - public void setMaxFieldSize(int max) throws SQLException { - // TODO - } - - @Override - public int getMaxRows() throws SQLException { - // TODO - return 0; - } - - @Override - public void setMaxRows(int max) throws SQLException { - // TODO - } - - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - // TODO - } - - @Override - public int getQueryTimeout() throws SQLException { - // TODO - return 0; - } - - @Override - public void setQueryTimeout(int seconds) throws SQLException { - if (seconds < 0) { - throw new SQLException("Query timeout must be greater than 0"); - } - this.queryTimeoutSeconds = seconds; - } - - @Override - public void cancel() throws SQLException { - // TODO - } - - @Override - @SkipNullableCheck - public SQLWarning getWarnings() throws SQLException { - // TODO - return null; - } - - @Override - public void clearWarnings() throws SQLException { - // TODO - } - - @Override - public void setCursorName(String name) throws SQLException { - // TODO - } - - /** - * The execute method executes an SQL statement and indicates the - * form of the first result. You must then use the methods - * getResultSet or getUpdateCount - * to retrieve the result, and getMoreResults to - * move to any subsequent result(s). - */ - @Override - public boolean execute(String sql) throws SQLException { - internalClose(); - - return this.withConnectionTimeout( - () -> { - try { - // TODO: if sql is a readOnly query, do we still need the locks? - connectionLock.lock(); - statement = connection.prepare(sql); - final boolean result = statement.execute(); - updateGeneratedKeys(); - exhaustedResults = false; - - return result; - } finally { - connectionLock.unlock(); - } - } - ); - } - - @Override - public ResultSet getResultSet() throws SQLException { - requireNonNull(statement, "statement is null"); - return new JDBC4ResultSet(statement.getResultSet()); - } - - @Override - public int getUpdateCount() throws SQLException { - // TODO - return 0; - } - - @Override - public boolean getMoreResults() throws SQLException { - // TODO - return false; - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - // TODO - } - - @Override - public int getFetchDirection() throws SQLException { - // TODO - return 0; - } - - @Override - public void setFetchSize(int rows) throws SQLException { - // TODO - } - - @Override - public int getFetchSize() throws SQLException { - // TODO - return 0; - } - - @Override - public int getResultSetConcurrency() { - return resultSetConcurrency; - } - - @Override - public int getResultSetType() { - return resultSetType; - } - - @Override - public void addBatch(String sql) throws SQLException { - // TODO - } - - @Override - public void clearBatch() throws SQLException { - // TODO - } - - @Override - public int[] executeBatch() throws SQLException { - // TODO - return new int[0]; - } - - @Override - @SkipNullableCheck - public Connection getConnection() throws SQLException { - // TODO - return null; - } - - @Override - public boolean getMoreResults(int current) throws SQLException { - // TODO - return false; - } - - @Override - @SkipNullableCheck - public ResultSet getGeneratedKeys() throws SQLException { - // TODO - return null; - } - - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - // TODO - return 0; - } - - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - // TODO - return 0; - } - - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - // TODO - return 0; - } - - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - // TODO - return false; - } - - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - // TODO - return false; - } - - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - // TODO - return false; - } - - @Override - public int getResultSetHoldability() { - return resultSetHoldability; - } - - @Override - public boolean isClosed() throws SQLException { - // TODO - return false; - } - - @Override - public void setPoolable(boolean poolable) throws SQLException { - // TODO - } - - @Override - public boolean isPoolable() throws SQLException { - // TODO - return false; - } - - @Override - public void closeOnCompletion() throws SQLException { - if (closed) {throw new SQLException("statement is closed");} - closeOnCompletion = true; - } - - /** - * Indicates whether the statement should be closed automatically when all its dependent result sets are closed. - */ - @Override - public boolean isCloseOnCompletion() throws SQLException { - if (closed) {throw new SQLException("statement is closed");} - return closeOnCompletion; - } - - @Override - @SkipNullableCheck - public T unwrap(Class iface) throws SQLException { - // TODO - return null; - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - // TODO - return false; - } - - protected void internalClose() throws SQLException { - // TODO - } - - protected void clearGeneratedKeys() throws SQLException { - // TODO - } - - protected void updateGeneratedKeys() throws SQLException { - // TODO - } - - private T withConnectionTimeout(SQLCallable callable) throws SQLException { - final int originalBusyTimeoutMillis = connection.getBusyTimeout(); - if (queryTimeoutSeconds > 0) { - // TODO: set busy timeout - connection.setBusyTimeout(1000 * queryTimeoutSeconds); - } - - try { - return callable.call(); - } finally { - if (queryTimeoutSeconds > 0) { - connection.setBusyTimeout(originalBusyTimeoutMillis); - } - } - } - - @FunctionalInterface - protected interface SQLCallable { - T call() throws SQLException; - } + @FunctionalInterface + protected interface SQLCallable { + T call() throws SQLException; + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/utils/ByteArrayUtils.java b/bindings/java/src/main/java/org/github/tursodatabase/utils/ByteArrayUtils.java index a89f05042..515fc4e04 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/utils/ByteArrayUtils.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/utils/ByteArrayUtils.java @@ -1,24 +1,23 @@ package org.github.tursodatabase.utils; +import java.nio.charset.StandardCharsets; import org.github.tursodatabase.annotations.Nullable; -import java.nio.charset.StandardCharsets; - public class ByteArrayUtils { - @Nullable - public static String utf8ByteBufferToString(@Nullable byte[] buffer) { - if (buffer == null) { - return null; - } - - return new String(buffer, StandardCharsets.UTF_8); + @Nullable + public static String utf8ByteBufferToString(@Nullable byte[] buffer) { + if (buffer == null) { + return null; } - @Nullable - public static byte[] stringToUtf8ByteArray(@Nullable String str) { - if (str == null) { - return null; - } - return str.getBytes(StandardCharsets.UTF_8); + return new String(buffer, StandardCharsets.UTF_8); + } + + @Nullable + public static byte[] stringToUtf8ByteArray(@Nullable String str) { + if (str == null) { + return null; } + return str.getBytes(StandardCharsets.UTF_8); + } } diff --git a/bindings/java/src/main/java/org/github/tursodatabase/utils/LimboExceptionUtils.java b/bindings/java/src/main/java/org/github/tursodatabase/utils/LimboExceptionUtils.java index 1525fafc8..88032250e 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/utils/LimboExceptionUtils.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/utils/LimboExceptionUtils.java @@ -3,39 +3,39 @@ package org.github.tursodatabase.utils; import static org.github.tursodatabase.utils.ByteArrayUtils.utf8ByteBufferToString; import java.sql.SQLException; - import org.github.tursodatabase.LimboErrorCode; import org.github.tursodatabase.annotations.Nullable; import org.github.tursodatabase.exceptions.LimboException; public class LimboExceptionUtils { - /** - * Throws formatted SQLException with error code and message. - * - * @param errorCode Error code. - * @param errorMessageBytes Error message. - */ - public static void throwLimboException(int errorCode, byte[] errorMessageBytes) throws SQLException { - String errorMessage = utf8ByteBufferToString(errorMessageBytes); - throw buildLimboException(errorCode, errorMessage); + /** + * Throws formatted SQLException with error code and message. + * + * @param errorCode Error code. + * @param errorMessageBytes Error message. + */ + public static void throwLimboException(int errorCode, byte[] errorMessageBytes) + throws SQLException { + String errorMessage = utf8ByteBufferToString(errorMessageBytes); + throw buildLimboException(errorCode, errorMessage); + } + + /** + * Throws formatted SQLException with error code and message. + * + * @param errorCode Error code. + * @param errorMessage Error message. + */ + public static LimboException buildLimboException(int errorCode, @Nullable String errorMessage) + throws SQLException { + LimboErrorCode code = LimboErrorCode.getErrorCode(errorCode); + String msg; + if (code == LimboErrorCode.UNKNOWN_ERROR) { + msg = String.format("%s:%s (%s)", code, errorCode, errorMessage); + } else { + msg = String.format("%s (%s)", code, errorMessage); } - /** - * Throws formatted SQLException with error code and message. - * - * @param errorCode Error code. - * @param errorMessage Error message. - */ - public static LimboException buildLimboException(int errorCode, @Nullable String errorMessage) - throws SQLException { - LimboErrorCode code = LimboErrorCode.getErrorCode(errorCode); - String msg; - if (code == LimboErrorCode.UNKNOWN_ERROR) { - msg = String.format("%s:%s (%s)", code, errorCode, errorMessage); - } else { - msg = String.format("%s (%s)", code, errorMessage); - } - - return new LimboException(msg, code); - } + return new LimboException(msg, code); + } } diff --git a/bindings/java/src/test/java/org/github/tursodatabase/IntegrationTest.java b/bindings/java/src/test/java/org/github/tursodatabase/IntegrationTest.java index be25ffdff..7688a3352 100644 --- a/bindings/java/src/test/java/org/github/tursodatabase/IntegrationTest.java +++ b/bindings/java/src/test/java/org/github/tursodatabase/IntegrationTest.java @@ -1,37 +1,36 @@ package org.github.tursodatabase; -import org.github.tursodatabase.jdbc4.JDBC4Connection; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.Properties; +import org.github.tursodatabase.jdbc4.JDBC4Connection; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; public class IntegrationTest { - private JDBC4Connection connection; + private JDBC4Connection connection; - @BeforeEach - void setUp() throws Exception { - String filePath = TestUtils.createTempFile(); - String url = "jdbc:sqlite:" + filePath; - connection = new JDBC4Connection(url, filePath, new Properties()); - } + @BeforeEach + void setUp() throws Exception { + String filePath = TestUtils.createTempFile(); + String url = "jdbc:sqlite:" + filePath; + connection = new JDBC4Connection(url, filePath, new Properties()); + } - @Test - void create_table_multi_inserts_select() throws Exception { - Statement stmt = createDefaultStatement(); - stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); - stmt.execute("INSERT INTO users VALUES (1, 'seonwoo');"); - stmt.execute("INSERT INTO users VALUES (2, 'seonwoo');"); - stmt.execute("INSERT INTO users VALUES (3, 'seonwoo');"); - stmt.execute("SELECT * FROM users"); - } + @Test + void create_table_multi_inserts_select() throws Exception { + Statement stmt = createDefaultStatement(); + stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); + stmt.execute("INSERT INTO users VALUES (1, 'seonwoo');"); + stmt.execute("INSERT INTO users VALUES (2, 'seonwoo');"); + stmt.execute("INSERT INTO users VALUES (3, 'seonwoo');"); + stmt.execute("SELECT * FROM users"); + } - private Statement createDefaultStatement() throws SQLException { - return connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT); - } + private Statement createDefaultStatement() throws SQLException { + return connection.createStatement( + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT); + } } diff --git a/bindings/java/src/test/java/org/github/tursodatabase/JDBCTest.java b/bindings/java/src/test/java/org/github/tursodatabase/JDBCTest.java index 7f28ddb5d..c28ff7cb3 100644 --- a/bindings/java/src/test/java/org/github/tursodatabase/JDBCTest.java +++ b/bindings/java/src/test/java/org/github/tursodatabase/JDBCTest.java @@ -1,34 +1,33 @@ package org.github.tursodatabase; -import org.github.tursodatabase.core.LimboConnection; -import org.junit.jupiter.api.Test; +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.util.Properties; - -import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import org.github.tursodatabase.core.LimboConnection; +import org.junit.jupiter.api.Test; class JDBCTest { - @Test - void null_is_returned_when_invalid_url_is_passed() throws Exception { - LimboConnection connection = JDBC.createConnection("jdbc:invalid:xxx", new Properties()); - assertThat(connection).isNull(); - } + @Test + void null_is_returned_when_invalid_url_is_passed() throws Exception { + LimboConnection connection = JDBC.createConnection("jdbc:invalid:xxx", new Properties()); + assertThat(connection).isNull(); + } - @Test - void non_null_connection_is_returned_when_valid_url_is_passed() throws Exception { - String fileUrl = TestUtils.createTempFile(); - LimboConnection connection = JDBC.createConnection("jdbc:sqlite:" + fileUrl, new Properties()); - assertThat(connection).isNotNull(); - } + @Test + void non_null_connection_is_returned_when_valid_url_is_passed() throws Exception { + String fileUrl = TestUtils.createTempFile(); + LimboConnection connection = JDBC.createConnection("jdbc:sqlite:" + fileUrl, new Properties()); + assertThat(connection).isNotNull(); + } - @Test - void connection_can_be_retrieved_from_DriverManager() throws SQLException { - try (Connection connection = DriverManager.getConnection("jdbc:sqlite:sample.db")) { - assertThat(connection).isNotNull(); - } + @Test + void connection_can_be_retrieved_from_DriverManager() throws SQLException { + try (Connection connection = DriverManager.getConnection("jdbc:sqlite:sample.db")) { + assertThat(connection).isNotNull(); } + } } diff --git a/bindings/java/src/test/java/org/github/tursodatabase/TestUtils.java b/bindings/java/src/test/java/org/github/tursodatabase/TestUtils.java index 0d7e64488..7c39bec6e 100644 --- a/bindings/java/src/test/java/org/github/tursodatabase/TestUtils.java +++ b/bindings/java/src/test/java/org/github/tursodatabase/TestUtils.java @@ -4,10 +4,8 @@ import java.io.IOException; import java.nio.file.Files; public class TestUtils { - /** - * Create temporary file and returns the path. - */ - public static String createTempFile() throws IOException { - return Files.createTempFile("limbo_test_db", null).toAbsolutePath().toString(); - } + /** Create temporary file and returns the path. */ + public static String createTempFile() throws IOException { + return Files.createTempFile("limbo_test_db", null).toAbsolutePath().toString(); + } } diff --git a/bindings/java/src/test/java/org/github/tursodatabase/core/LimboDBFactoryTest.java b/bindings/java/src/test/java/org/github/tursodatabase/core/LimboDBFactoryTest.java index bc3150f2c..ba7759247 100644 --- a/bindings/java/src/test/java/org/github/tursodatabase/core/LimboDBFactoryTest.java +++ b/bindings/java/src/test/java/org/github/tursodatabase/core/LimboDBFactoryTest.java @@ -1,32 +1,31 @@ package org.github.tursodatabase.core; -import org.github.tursodatabase.TestUtils; -import org.junit.jupiter.api.Test; - -import java.util.Properties; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import java.util.Properties; +import org.github.tursodatabase.TestUtils; +import org.junit.jupiter.api.Test; + class LimboDBFactoryTest { - @Test - void single_database_should_be_created_when_urls_are_same() throws Exception { - String filePath = TestUtils.createTempFile(); - String url = "jdbc:sqlite:" + filePath; - LimboDB db1 = LimboDBFactory.open(url, filePath, new Properties()); - LimboDB db2 = LimboDBFactory.open(url, filePath, new Properties()); - assertEquals(db1, db2); - } + @Test + void single_database_should_be_created_when_urls_are_same() throws Exception { + String filePath = TestUtils.createTempFile(); + String url = "jdbc:sqlite:" + filePath; + LimboDB db1 = LimboDBFactory.open(url, filePath, new Properties()); + LimboDB db2 = LimboDBFactory.open(url, filePath, new Properties()); + assertEquals(db1, db2); + } - @Test - void multiple_databases_should_be_created_when_urls_differ() throws Exception { - String filePath1 = TestUtils.createTempFile(); - String filePath2 = TestUtils.createTempFile(); - String url1 = "jdbc:sqlite:" + filePath1; - String url2 = "jdbc:sqlite:" + filePath2; - LimboDB db1 = LimboDBFactory.open(url1, filePath1, new Properties()); - LimboDB db2 = LimboDBFactory.open(url2, filePath2, new Properties()); - assertNotEquals(db1, db2); - } + @Test + void multiple_databases_should_be_created_when_urls_differ() throws Exception { + String filePath1 = TestUtils.createTempFile(); + String filePath2 = TestUtils.createTempFile(); + String url1 = "jdbc:sqlite:" + filePath1; + String url2 = "jdbc:sqlite:" + filePath2; + LimboDB db1 = LimboDBFactory.open(url1, filePath1, new Properties()); + LimboDB db2 = LimboDBFactory.open(url2, filePath2, new Properties()); + assertNotEquals(db1, db2); + } } diff --git a/bindings/java/src/test/java/org/github/tursodatabase/core/LimboDBTest.java b/bindings/java/src/test/java/org/github/tursodatabase/core/LimboDBTest.java index 9feb39fb7..ca75ac4c7 100644 --- a/bindings/java/src/test/java/org/github/tursodatabase/core/LimboDBTest.java +++ b/bindings/java/src/test/java/org/github/tursodatabase/core/LimboDBTest.java @@ -1,48 +1,47 @@ package org.github.tursodatabase.core; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.sql.SQLException; import org.github.tursodatabase.LimboErrorCode; import org.github.tursodatabase.TestUtils; import org.github.tursodatabase.exceptions.LimboException; import org.junit.jupiter.api.Test; -import java.sql.SQLException; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - public class LimboDBTest { - @Test - void db_should_open_normally() throws Exception { - String dbPath = TestUtils.createTempFile(); - LimboDB.load(); - LimboDB db = LimboDB.create("jdbc:sqlite" + dbPath, dbPath); - db.open(0); - } - - @Test - void should_throw_exception_when_opened_twice() throws Exception { - String dbPath = TestUtils.createTempFile(); - LimboDB.load(); - LimboDB db = LimboDB.create("jdbc:sqlite:" + dbPath, dbPath); - db.open(0); - - assertThatThrownBy(() -> db.open(0)).isInstanceOf(SQLException.class); - } - - @Test - void throwJavaException_should_throw_appropriate_java_exception() throws Exception { - String dbPath = TestUtils.createTempFile(); - LimboDB.load(); - LimboDB db = LimboDB.create("jdbc:sqlite:" + dbPath, dbPath); - - final int limboExceptionCode = LimboErrorCode.LIMBO_ETC.code; - try { - db.throwJavaException(limboExceptionCode); - } catch (Exception e) { - assertThat(e).isInstanceOf(LimboException.class); - LimboException limboException = (LimboException) e; - assertThat(limboException.getResultCode().code).isEqualTo(limboExceptionCode); - } + @Test + void db_should_open_normally() throws Exception { + String dbPath = TestUtils.createTempFile(); + LimboDB.load(); + LimboDB db = LimboDB.create("jdbc:sqlite" + dbPath, dbPath); + db.open(0); + } + + @Test + void should_throw_exception_when_opened_twice() throws Exception { + String dbPath = TestUtils.createTempFile(); + LimboDB.load(); + LimboDB db = LimboDB.create("jdbc:sqlite:" + dbPath, dbPath); + db.open(0); + + assertThatThrownBy(() -> db.open(0)).isInstanceOf(SQLException.class); + } + + @Test + void throwJavaException_should_throw_appropriate_java_exception() throws Exception { + String dbPath = TestUtils.createTempFile(); + LimboDB.load(); + LimboDB db = LimboDB.create("jdbc:sqlite:" + dbPath, dbPath); + + final int limboExceptionCode = LimboErrorCode.LIMBO_ETC.code; + try { + db.throwJavaException(limboExceptionCode); + } catch (Exception e) { + assertThat(e).isInstanceOf(LimboException.class); + LimboException limboException = (LimboException) e; + assertThat(limboException.getResultCode().code).isEqualTo(limboExceptionCode); } + } } diff --git a/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ConnectionTest.java b/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ConnectionTest.java index c1b9afe56..60f6ee56e 100644 --- a/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ConnectionTest.java +++ b/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ConnectionTest.java @@ -1,63 +1,68 @@ package org.github.tursodatabase.jdbc4; -import org.github.tursodatabase.TestUtils; -import org.github.tursodatabase.core.LimboConnection; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.*; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.Properties; - -import static org.junit.jupiter.api.Assertions.*; +import org.github.tursodatabase.TestUtils; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; class JDBC4ConnectionTest { - private JDBC4Connection connection; + private JDBC4Connection connection; - @BeforeEach - void setUp() throws Exception { - String filePath = TestUtils.createTempFile(); - String url = "jdbc:sqlite:" + filePath; - connection = new JDBC4Connection(url, filePath, new Properties()); - } + @BeforeEach + void setUp() throws Exception { + String filePath = TestUtils.createTempFile(); + String url = "jdbc:sqlite:" + filePath; + connection = new JDBC4Connection(url, filePath, new Properties()); + } - @Test - void test_create_statement_valid() throws SQLException { - Statement stmt = connection.createStatement(); - assertNotNull(stmt); - assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType()); - assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency()); - assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, stmt.getResultSetHoldability()); - } + @Test + void test_create_statement_valid() throws SQLException { + Statement stmt = connection.createStatement(); + assertNotNull(stmt); + assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType()); + assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency()); + assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, stmt.getResultSetHoldability()); + } - @Test - void test_create_statement_with_type_and_concurrency_valid() throws SQLException { - Statement stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); - assertNotNull(stmt); - assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType()); - assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency()); - } + @Test + void test_create_statement_with_type_and_concurrency_valid() throws SQLException { + Statement stmt = + connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + assertNotNull(stmt); + assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType()); + assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency()); + } - @Test - void test_create_statement_with_all_params_valid() throws SQLException { - Statement stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT); - assertNotNull(stmt); - assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType()); - assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency()); - assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, stmt.getResultSetHoldability()); - } + @Test + void test_create_statement_with_all_params_valid() throws SQLException { + Statement stmt = + connection.createStatement( + ResultSet.TYPE_FORWARD_ONLY, + ResultSet.CONCUR_READ_ONLY, + ResultSet.CLOSE_CURSORS_AT_COMMIT); + assertNotNull(stmt); + assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType()); + assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency()); + assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, stmt.getResultSetHoldability()); + } - @Test - void test_create_statement_invalid() { - assertThrows(SQLException.class, () -> { - connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, -1); + @Test + void test_create_statement_invalid() { + assertThrows( + SQLException.class, + () -> { + connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, -1); }); - } + } - @Test - void prepare_simple_create_table() throws Exception { - connection.prepare("CREATE TABLE users (id INT PRIMARY KEY, username TEXT)"); - } + @Test + void prepare_simple_create_table() throws Exception { + connection.prepare("CREATE TABLE users (id INT PRIMARY KEY, username TEXT)"); + } } diff --git a/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ResultSetTest.java b/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ResultSetTest.java index 88a499b9d..f764a9361 100644 --- a/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ResultSetTest.java +++ b/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4ResultSetTest.java @@ -6,53 +6,55 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.sql.ResultSet; import java.sql.Statement; import java.util.Properties; - import org.github.tursodatabase.TestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; class JDBC4ResultSetTest { - private Statement stmt; + private Statement stmt; - @BeforeEach - void setUp() throws Exception { - String filePath = TestUtils.createTempFile(); - String url = "jdbc:sqlite:" + filePath; - final JDBC4Connection connection = new JDBC4Connection(url, filePath, new Properties()); - stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, - ResultSet.CONCUR_READ_ONLY, - ResultSet.CLOSE_CURSORS_AT_COMMIT); + @BeforeEach + void setUp() throws Exception { + String filePath = TestUtils.createTempFile(); + String url = "jdbc:sqlite:" + filePath; + final JDBC4Connection connection = new JDBC4Connection(url, filePath, new Properties()); + stmt = + connection.createStatement( + ResultSet.TYPE_FORWARD_ONLY, + ResultSet.CONCUR_READ_ONLY, + ResultSet.CLOSE_CURSORS_AT_COMMIT); + } + + @Test + void invoking_next_before_the_last_row_should_return_true() throws Exception { + stmt.executeUpdate("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); + stmt.executeUpdate("INSERT INTO users VALUES (1, 'sinwoo');"); + stmt.executeUpdate("INSERT INTO users VALUES (2, 'seonwoo');"); + + // first call to next occur internally + stmt.executeQuery("SELECT * FROM users"); + ResultSet resultSet = stmt.getResultSet(); + + assertTrue(resultSet.next()); + } + + @Test + void invoking_next_after_the_last_row_should_return_false() throws Exception { + stmt.executeUpdate("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); + stmt.executeUpdate("INSERT INTO users VALUES (1, 'sinwoo');"); + stmt.executeUpdate("INSERT INTO users VALUES (2, 'seonwoo');"); + + // first call to next occur internally + stmt.executeQuery("SELECT * FROM users"); + ResultSet resultSet = stmt.getResultSet(); + + while (resultSet.next()) { + // run until next() returns false } - @Test - void invoking_next_before_the_last_row_should_return_true() throws Exception { - stmt.executeUpdate("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); - stmt.executeUpdate("INSERT INTO users VALUES (1, 'sinwoo');"); - stmt.executeUpdate("INSERT INTO users VALUES (2, 'seonwoo');"); - - // first call to next occur internally - stmt.executeQuery("SELECT * FROM users"); - ResultSet resultSet = stmt.getResultSet(); - - assertTrue(resultSet.next()); - } - - @Test - void invoking_next_after_the_last_row_should_return_false() throws Exception { - stmt.executeUpdate("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); - stmt.executeUpdate("INSERT INTO users VALUES (1, 'sinwoo');"); - stmt.executeUpdate("INSERT INTO users VALUES (2, 'seonwoo');"); - - // first call to next occur internally - stmt.executeQuery("SELECT * FROM users"); - ResultSet resultSet = stmt.getResultSet(); - - while (resultSet.next()) { - // run until next() returns false - } - - // if the previous call to next() returned false, consecutive call to next() should return false as well - assertFalse(resultSet.next()); - } + // if the previous call to next() returned false, consecutive call to next() should return false + // as well + assertFalse(resultSet.next()); + } } diff --git a/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4StatementTest.java b/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4StatementTest.java index f81e9d482..2a837629d 100644 --- a/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4StatementTest.java +++ b/bindings/java/src/test/java/org/github/tursodatabase/jdbc4/JDBC4StatementTest.java @@ -5,7 +5,6 @@ import static org.junit.jupiter.api.Assertions.*; import java.sql.ResultSet; import java.sql.Statement; import java.util.Properties; - import org.github.tursodatabase.TestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; @@ -13,41 +12,43 @@ import org.junit.jupiter.api.Test; class JDBC4StatementTest { - private Statement stmt; + private Statement stmt; - @BeforeEach - void setUp() throws Exception { - String filePath = TestUtils.createTempFile(); - String url = "jdbc:sqlite:" + filePath; - final JDBC4Connection connection = new JDBC4Connection(url, filePath, new Properties()); - stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, - ResultSet.CONCUR_READ_ONLY, - ResultSet.CLOSE_CURSORS_AT_COMMIT); - } + @BeforeEach + void setUp() throws Exception { + String filePath = TestUtils.createTempFile(); + String url = "jdbc:sqlite:" + filePath; + final JDBC4Connection connection = new JDBC4Connection(url, filePath, new Properties()); + stmt = + connection.createStatement( + ResultSet.TYPE_FORWARD_ONLY, + ResultSet.CONCUR_READ_ONLY, + ResultSet.CLOSE_CURSORS_AT_COMMIT); + } - @Test - void execute_ddl_should_return_false() throws Exception{ - assertFalse(stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);")); - } + @Test + void execute_ddl_should_return_false() throws Exception { + assertFalse(stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);")); + } - @Test - void execute_insert_should_return_false() throws Exception { - stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); - assertFalse(stmt.execute("INSERT INTO users VALUES (1, 'limbo');")); - } + @Test + void execute_insert_should_return_false() throws Exception { + stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); + assertFalse(stmt.execute("INSERT INTO users VALUES (1, 'limbo');")); + } - @Test - @Disabled("UPDATE not supported yet") - void execute_update_should_return_false() throws Exception { - stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); - stmt.execute("INSERT INTO users VALUES (1, 'limbo');"); - assertFalse(stmt.execute("UPDATE users SET username = 'seonwoo' WHERE id = 1;")); - } + @Test + @Disabled("UPDATE not supported yet") + void execute_update_should_return_false() throws Exception { + stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); + stmt.execute("INSERT INTO users VALUES (1, 'limbo');"); + assertFalse(stmt.execute("UPDATE users SET username = 'seonwoo' WHERE id = 1;")); + } - @Test - void execute_select_should_return_true() throws Exception { - stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); - stmt.execute("INSERT INTO users VALUES (1, 'limbo');"); - assertTrue(stmt.execute("SELECT * FROM users;")); - } + @Test + void execute_select_should_return_true() throws Exception { + stmt.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT);"); + stmt.execute("INSERT INTO users VALUES (1, 'limbo');"); + assertTrue(stmt.execute("SELECT * FROM users;")); + } } From b687cf66ebebeef57cede5799d51862ec802e0b6 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Sun, 26 Jan 2025 13:12:47 +0200 Subject: [PATCH 27/34] use bitfield for ended_coroutine --- core/vdbe/mod.rs | 60 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 49 insertions(+), 11 deletions(-) diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index eb44a9edb..0301e811b 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -238,6 +238,29 @@ fn get_cursor_as_sorter_mut<'long, 'short>( cursor } +struct Bitfield([u64; N]); + +impl Bitfield { + fn new() -> Self { + Self([0; N]) + } + + fn set(&mut self, bit: usize) { + assert!(bit < N * 64, "bit out of bounds"); + self.0[bit / 64] |= 1 << (bit % 64); + } + + fn unset(&mut self, bit: usize) { + assert!(bit < N * 64, "bit out of bounds"); + self.0[bit / 64] &= !(1 << (bit % 64)); + } + + fn get(&self, bit: usize) -> bool { + assert!(bit < N * 64, "bit out of bounds"); + (self.0[bit / 64] & (1 << (bit % 64))) != 0 + } +} + /// The program state describes the environment in which the program executes. pub struct ProgramState { pub pc: InsnReference, @@ -245,7 +268,7 @@ pub struct ProgramState { registers: Vec, last_compare: Option, deferred_seek: Option<(CursorID, CursorID)>, - ended_coroutine: HashMap, // flag to indicate that a coroutine has ended (key is the yield register) + ended_coroutine: Bitfield<4>, // flag to indicate that a coroutine has ended (key is the yield register. currently we assume that the yield register is always between 0-255, YOLO) regex_cache: RegexCache, interrupted: bool, parameters: HashMap, OwnedValue>, @@ -262,7 +285,7 @@ impl ProgramState { registers, last_compare: None, deferred_seek: None, - ended_coroutine: HashMap::new(), + ended_coroutine: Bitfield::new(), regex_cache: RegexCache::new(), interrupted: false, parameters: HashMap::new(), @@ -301,7 +324,7 @@ impl ProgramState { self.registers.resize(max_registers, OwnedValue::Null); self.last_compare = None; self.deferred_seek = None; - self.ended_coroutine.clear(); + self.ended_coroutine.0 = [0; 4]; self.regex_cache.like.clear(); self.interrupted = false; self.parameters.clear(); @@ -2086,7 +2109,7 @@ impl Program { assert!(jump_on_definition.is_offset()); let start_offset = start_offset.to_offset_int(); state.registers[*yield_reg] = OwnedValue::Integer(start_offset as i64); - state.ended_coroutine.insert(*yield_reg, false); + state.ended_coroutine.unset(*yield_reg); let jump_on_definition = jump_on_definition.to_offset_int(); state.pc = if jump_on_definition == 0 { state.pc + 1 @@ -2096,7 +2119,7 @@ impl Program { } Insn::EndCoroutine { yield_reg } => { if let OwnedValue::Integer(pc) = state.registers[*yield_reg] { - state.ended_coroutine.insert(*yield_reg, true); + state.ended_coroutine.set(*yield_reg); let pc: u32 = pc .try_into() .unwrap_or_else(|_| panic!("EndCoroutine: pc overflow: {}", pc)); @@ -2110,11 +2133,7 @@ impl Program { end_offset, } => { if let OwnedValue::Integer(pc) = state.registers[*yield_reg] { - if *state - .ended_coroutine - .get(yield_reg) - .expect("coroutine not initialized") - { + if state.ended_coroutine.get(*yield_reg) { state.pc = end_offset.to_offset_int(); } else { let pc: u32 = pc @@ -3403,7 +3422,7 @@ mod tests { exec_ltrim, exec_max, exec_min, exec_nullif, exec_quote, exec_random, exec_randomblob, exec_round, exec_rtrim, exec_sign, exec_soundex, exec_substring, exec_trim, exec_typeof, exec_unhex, exec_unicode, exec_upper, exec_zeroblob, execute_sqlite_version, AggContext, - OwnedValue, + Bitfield, OwnedValue, }; use std::{collections::HashMap, rc::Rc}; @@ -4292,4 +4311,23 @@ mod tests { expected_str ); } + + #[test] + fn test_bitfield() { + let mut bitfield = Bitfield::<4>::new(); + for i in 0..256 { + bitfield.set(i); + assert!(bitfield.get(i)); + for j in 0..i { + assert!(bitfield.get(j)); + } + for j in i + 1..256 { + assert!(!bitfield.get(j)); + } + } + for i in 0..256 { + bitfield.unset(i); + assert!(!bitfield.get(i)); + } + } } From cdafc9033e32611af7d5efaa7569da594908ed62 Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Sun, 26 Jan 2025 13:56:09 +0200 Subject: [PATCH 28/34] ProgramState::registers - no resize --- core/vdbe/mod.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 0301e811b..4c8313259 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -277,8 +277,7 @@ pub struct ProgramState { impl ProgramState { pub fn new(max_registers: usize) -> Self { let cursors: RefCell> = RefCell::new(BTreeMap::new()); - let mut registers = Vec::with_capacity(max_registers); - registers.resize(max_registers, OwnedValue::Null); + let registers = vec![OwnedValue::Null; max_registers]; Self { pc: 0, cursors, @@ -319,9 +318,7 @@ impl ProgramState { pub fn reset(&mut self) { self.pc = 0; self.cursors.borrow_mut().clear(); - let max_registers = self.registers.len(); - self.registers.clear(); - self.registers.resize(max_registers, OwnedValue::Null); + self.registers.iter_mut().for_each(|r| *r = OwnedValue::Null); self.last_compare = None; self.deferred_seek = None; self.ended_coroutine.0 = [0; 4]; From faa6d0c69deb6802967b11696c9ba7d33c9a2d7c Mon Sep 17 00:00:00 2001 From: Jussi Saurio Date: Sun, 26 Jan 2025 14:04:05 +0200 Subject: [PATCH 29/34] use vec for cursors, not map --- core/lib.rs | 5 ++-- core/vdbe/mod.rs | 71 ++++++++++++++++++++++++++++++++++-------------- 2 files changed, 53 insertions(+), 23 deletions(-) diff --git a/core/lib.rs b/core/lib.rs index 628d21150..db59a4785 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -372,7 +372,8 @@ impl Connection { syms, )?; - let mut state = vdbe::ProgramState::new(program.max_registers); + let mut state = + vdbe::ProgramState::new(program.max_registers, program.cursor_ref.len()); program.step(&mut state, self.pager.clone())?; } } @@ -441,7 +442,7 @@ pub struct Statement { impl Statement { pub fn new(program: Rc, pager: Rc) -> Self { - let state = vdbe::ProgramState::new(program.max_registers); + let state = vdbe::ProgramState::new(program.max_registers, program.cursor_ref.len()); Self { program, state, diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 4c8313259..94a02e28a 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -60,7 +60,7 @@ use regex::{Regex, RegexBuilder}; use sorter::Sorter; use std::borrow::BorrowMut; use std::cell::{Cell, RefCell, RefMut}; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use std::num::NonZero; use std::rc::{Rc, Weak}; @@ -195,44 +195,52 @@ impl RegexCache { } fn get_cursor_as_table_mut<'long, 'short>( - cursors: &'short mut RefMut<'long, BTreeMap>, + cursors: &'short mut RefMut<'long, Vec>>, cursor_id: CursorID, ) -> &'short mut BTreeCursor { let cursor = cursors - .get_mut(&cursor_id) + .get_mut(cursor_id) + .expect("cursor id out of bounds") + .as_mut() .expect("cursor not allocated") .as_table_mut(); cursor } fn get_cursor_as_index_mut<'long, 'short>( - cursors: &'short mut RefMut<'long, BTreeMap>, + cursors: &'short mut RefMut<'long, Vec>>, cursor_id: CursorID, ) -> &'short mut BTreeCursor { let cursor = cursors - .get_mut(&cursor_id) + .get_mut(cursor_id) + .expect("cursor id out of bounds") + .as_mut() .expect("cursor not allocated") .as_index_mut(); cursor } fn get_cursor_as_pseudo_mut<'long, 'short>( - cursors: &'short mut RefMut<'long, BTreeMap>, + cursors: &'short mut RefMut<'long, Vec>>, cursor_id: CursorID, ) -> &'short mut PseudoCursor { let cursor = cursors - .get_mut(&cursor_id) + .get_mut(cursor_id) + .expect("cursor id out of bounds") + .as_mut() .expect("cursor not allocated") .as_pseudo_mut(); cursor } fn get_cursor_as_sorter_mut<'long, 'short>( - cursors: &'short mut RefMut<'long, BTreeMap>, + cursors: &'short mut RefMut<'long, Vec>>, cursor_id: CursorID, ) -> &'short mut Sorter { let cursor = cursors - .get_mut(&cursor_id) + .get_mut(cursor_id) + .expect("cursor id out of bounds") + .as_mut() .expect("cursor not allocated") .as_sorter_mut(); cursor @@ -264,7 +272,7 @@ impl Bitfield { /// The program state describes the environment in which the program executes. pub struct ProgramState { pub pc: InsnReference, - cursors: RefCell>, + cursors: RefCell>>, registers: Vec, last_compare: Option, deferred_seek: Option<(CursorID, CursorID)>, @@ -275,8 +283,9 @@ pub struct ProgramState { } impl ProgramState { - pub fn new(max_registers: usize) -> Self { - let cursors: RefCell> = RefCell::new(BTreeMap::new()); + pub fn new(max_registers: usize, max_cursors: usize) -> Self { + let cursors: RefCell>> = + RefCell::new((0..max_cursors).map(|_| None).collect()); let registers = vec![OwnedValue::Null; max_registers]; Self { pc: 0, @@ -317,8 +326,10 @@ impl ProgramState { pub fn reset(&mut self) { self.pc = 0; - self.cursors.borrow_mut().clear(); - self.registers.iter_mut().for_each(|r| *r = OwnedValue::Null); + self.cursors.borrow_mut().iter_mut().for_each(|c| *c = None); + self.registers + .iter_mut() + .for_each(|r| *r = OwnedValue::Null); self.last_compare = None; self.deferred_seek = None; self.ended_coroutine.0 = [0; 4]; @@ -765,10 +776,16 @@ impl Program { BTreeCursor::new(pager.clone(), *root_page, self.database_header.clone()); match cursor_type { CursorType::BTreeTable(_) => { - cursors.insert(*cursor_id, Cursor::new_table(cursor)); + cursors + .get_mut(*cursor_id) + .unwrap() + .replace(Cursor::new_table(cursor)); } CursorType::BTreeIndex(_) => { - cursors.insert(*cursor_id, Cursor::new_index(cursor)); + cursors + .get_mut(*cursor_id) + .unwrap() + .replace(Cursor::new_index(cursor)); } CursorType::Pseudo(_) => { panic!("OpenReadAsync on pseudo cursor"); @@ -788,7 +805,10 @@ impl Program { num_fields: _, } => { let cursor = PseudoCursor::new(); - cursors.insert(*cursor_id, Cursor::new_pseudo(cursor)); + cursors + .get_mut(*cursor_id) + .unwrap() + .replace(Cursor::new_pseudo(cursor)); state.pc += 1; } Insn::RewindAsync { cursor_id } => { @@ -1569,7 +1589,10 @@ impl Program { }) .collect(); let cursor = Sorter::new(order); - cursors.insert(*cursor_id, Cursor::new_sorter(cursor)); + cursors + .get_mut(*cursor_id) + .unwrap() + .replace(Cursor::new_sorter(cursor)); state.pc += 1; } Insn::SorterData { @@ -2254,9 +2277,15 @@ impl Program { let cursor = BTreeCursor::new(pager.clone(), *root_page, self.database_header.clone()); if is_index { - cursors.insert(*cursor_id, Cursor::new_index(cursor)); + cursors + .get_mut(*cursor_id) + .unwrap() + .replace(Cursor::new_index(cursor)); } else { - cursors.insert(*cursor_id, Cursor::new_table(cursor)); + cursors + .get_mut(*cursor_id) + .unwrap() + .replace(Cursor::new_table(cursor)); } state.pc += 1; } @@ -2289,7 +2318,7 @@ impl Program { state.pc += 1; } Insn::Close { cursor_id } => { - let _ = cursors.remove(&*cursor_id); + cursors.get_mut(*cursor_id).unwrap().take(); state.pc += 1; } Insn::IsNull { src, target_pc } => { From 7967cc5efc6536a83bc7a7d64a11d0d1dc580899 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 26 Jan 2025 16:22:04 +0200 Subject: [PATCH 30/34] core: Kill Rows wrapper struct It's just an useless wrapper, kill it. --- bindings/go/rs_src/rows.rs | 10 +++---- bindings/rust/src/lib.rs | 2 +- cli/app.rs | 12 ++++---- cli/import.rs | 2 +- core/benches/benchmark.rs | 6 ++-- core/lib.rs | 30 ++++--------------- core/util.rs | 10 +++++-- core/vdbe/mod.rs | 5 ++-- perf/latency/limbo/src/main.rs | 2 +- simulator/generation/plan.rs | 2 +- .../functions/test_function_rowid.rs | 8 ++--- .../query_processing/test_write_path.rs | 24 +++++++-------- 12 files changed, 49 insertions(+), 64 deletions(-) diff --git a/bindings/go/rs_src/rows.rs b/bindings/go/rs_src/rows.rs index c505924bc..ccd01b20e 100644 --- a/bindings/go/rs_src/rows.rs +++ b/bindings/go/rs_src/rows.rs @@ -2,17 +2,17 @@ use crate::{ statement::TursoStatement, types::{ResultCode, TursoValue}, }; -use limbo_core::{Rows, StepResult, Value}; +use limbo_core::{Statement, StepResult, Value}; use std::ffi::{c_char, c_void}; pub struct TursoRows<'a> { - rows: Rows, + rows: Statement, cursor: Option>>, stmt: Box>, } impl<'a> TursoRows<'a> { - pub fn new(rows: Rows, stmt: Box>) -> Self { + pub fn new(rows: Statement, stmt: Box>) -> Self { TursoRows { rows, stmt, @@ -40,7 +40,7 @@ pub extern "C" fn rows_next(ctx: *mut c_void) -> ResultCode { } let ctx = TursoRows::from_ptr(ctx); - match ctx.rows.next_row() { + match ctx.rows.step() { Ok(StepResult::Row(row)) => { ctx.cursor = Some(row.values); ResultCode::Row @@ -133,6 +133,6 @@ pub extern "C" fn free_rows(rows: *mut c_void) { return; } unsafe { - let _ = Box::from_raw(rows as *mut Rows); + let _ = Box::from_raw(rows as *mut Statement); } } diff --git a/bindings/rust/src/lib.rs b/bindings/rust/src/lib.rs index b2114045a..f19814c38 100644 --- a/bindings/rust/src/lib.rs +++ b/bindings/rust/src/lib.rs @@ -110,7 +110,7 @@ pub enum Params { pub struct Transaction {} pub struct Rows { - _inner: Rc, + _inner: Rc, } impl Rows { diff --git a/cli/app.rs b/cli/app.rs index 9c7e17d65..278bbca43 100644 --- a/cli/app.rs +++ b/cli/app.rs @@ -3,7 +3,7 @@ use crate::{ opcodes_dictionary::OPCODE_DESCRIPTIONS, }; use cli_table::{Cell, Table}; -use limbo_core::{Database, LimboError, Rows, StepResult, Value}; +use limbo_core::{Database, LimboError, Statement, StepResult, Value}; use clap::{Parser, ValueEnum}; use std::{ @@ -614,7 +614,7 @@ impl Limbo { fn print_query_result( &mut self, sql: &str, - mut output: Result, LimboError>, + mut output: Result, LimboError>, ) -> anyhow::Result<()> { match output { Ok(Some(ref mut rows)) => match self.opts.output_mode { @@ -624,7 +624,7 @@ impl Limbo { return Ok(()); } - match rows.next_row() { + match rows.step() { Ok(StepResult::Row(row)) => { for (i, value) in row.values.iter().enumerate() { if i > 0 { @@ -669,7 +669,7 @@ impl Limbo { } let mut table_rows: Vec> = vec![]; loop { - match rows.next_row() { + match rows.step() { Ok(StepResult::Row(row)) => { table_rows.push( row.values @@ -739,7 +739,7 @@ impl Limbo { Ok(Some(ref mut rows)) => { let mut found = false; loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { if let Some(Value::Text(schema)) = row.values.first() { let _ = self.write_fmt(format_args!("{};", schema)); @@ -796,7 +796,7 @@ impl Limbo { Ok(Some(ref mut rows)) => { let mut tables = String::new(); loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { if let Some(Value::Text(table)) = row.values.first() { tables.push_str(table); diff --git a/cli/import.rs b/cli/import.rs index a339bd276..b7557d58b 100644 --- a/cli/import.rs +++ b/cli/import.rs @@ -95,7 +95,7 @@ impl<'a> ImportFile<'a> { match self.conn.query(insert_string) { Ok(rows) => { if let Some(mut rows) = rows { - while let Ok(x) = rows.next_row() { + while let Ok(x) = rows.step() { match x { limbo_core::StepResult::IO => { self.io.run_once().unwrap(); diff --git a/core/benches/benchmark.rs b/core/benches/benchmark.rs index 184deac4a..9858a0c56 100644 --- a/core/benches/benchmark.rs +++ b/core/benches/benchmark.rs @@ -46,7 +46,7 @@ fn limbo_bench(criterion: &mut Criterion) { let io = io.clone(); b.iter(|| { let mut rows = stmt.query().unwrap(); - match rows.next_row().unwrap() { + match rows.step().unwrap() { limbo_core::StepResult::Row(row) => { assert_eq!(row.get::(0).unwrap(), 1); } @@ -74,7 +74,7 @@ fn limbo_bench(criterion: &mut Criterion) { let io = io.clone(); b.iter(|| { let mut rows = stmt.query().unwrap(); - match rows.next_row().unwrap() { + match rows.step().unwrap() { limbo_core::StepResult::Row(row) => { assert_eq!(row.get::(0).unwrap(), 1); } @@ -103,7 +103,7 @@ fn limbo_bench(criterion: &mut Criterion) { let io = io.clone(); b.iter(|| { let mut rows = stmt.query().unwrap(); - match rows.next_row().unwrap() { + match rows.step().unwrap() { limbo_core::StepResult::Row(row) => { assert_eq!(row.get::(0).unwrap(), 1); } diff --git a/core/lib.rs b/core/lib.rs index db59a4785..e6c812110 100644 --- a/core/lib.rs +++ b/core/lib.rs @@ -281,7 +281,7 @@ impl Connection { } } - pub fn query(self: &Rc, sql: impl Into) -> Result> { + pub fn query(self: &Rc, sql: impl Into) -> Result> { let sql = sql.into(); trace!("Querying: {}", sql); let mut parser = Parser::new(sql.as_bytes()); @@ -292,7 +292,7 @@ impl Connection { } } - pub(crate) fn run_cmd(self: &Rc, cmd: Cmd) -> Result> { + pub(crate) fn run_cmd(self: &Rc, cmd: Cmd) -> Result> { let db = self.db.clone(); let syms: &SymbolTable = &db.syms.borrow(); match cmd { @@ -306,7 +306,7 @@ impl Connection { syms, )?); let stmt = Statement::new(program, self.pager.clone()); - Ok(Some(Rows { stmt })) + Ok(Some(stmt)) } Cmd::Explain(stmt) => { let program = translate::translate( @@ -465,9 +465,9 @@ impl Statement { } } - pub fn query(&mut self) -> Result { + pub fn query(&mut self) -> Result { let stmt = Statement::new(self.program.clone(), self.pager.clone()); - Ok(Rows::new(stmt)) + Ok(stmt) } pub fn columns(&self) -> &[String] { @@ -512,24 +512,6 @@ impl<'a> Row<'a> { } } -pub struct Rows { - stmt: Statement, -} - -impl Rows { - pub fn new(stmt: Statement) -> Self { - Self { stmt } - } - - pub fn next_row(&mut self) -> Result> { - self.stmt.step() - } - - pub fn columns(&self) -> &[String] { - self.stmt.columns() - } -} - pub(crate) struct SymbolTable { pub functions: HashMap>, #[cfg(not(target_family = "wasm"))] @@ -605,7 +587,7 @@ impl<'a> QueryRunner<'a> { } impl Iterator for QueryRunner<'_> { - type Item = Result>; + type Item = Result>; fn next(&mut self) -> Option { match self.parser.next() { diff --git a/core/util.rs b/core/util.rs index 9e67313ea..c81747887 100644 --- a/core/util.rs +++ b/core/util.rs @@ -4,7 +4,7 @@ use sqlite3_parser::ast::{Expr, FunctionTail, Literal}; use crate::{ schema::{self, Schema}, - Result, Rows, StepResult, IO, + Result, Statement, StepResult, IO, }; // https://sqlite.org/lang_keywords.html @@ -25,11 +25,15 @@ pub fn normalize_ident(identifier: &str) -> String { pub const PRIMARY_KEY_AUTOMATIC_INDEX_NAME_PREFIX: &str = "sqlite_autoindex_"; -pub fn parse_schema_rows(rows: Option, schema: &mut Schema, io: Arc) -> Result<()> { +pub fn parse_schema_rows( + rows: Option, + schema: &mut Schema, + io: Arc, +) -> Result<()> { if let Some(mut rows) = rows { let mut automatic_indexes = Vec::new(); loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { let ty = row.get::<&str>(0)?; if ty != "table" && ty != "index" { diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 94a02e28a..4345aaa54 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -44,7 +44,7 @@ use crate::{ json::json_arrow_extract, json::json_arrow_shift_extract, json::json_error_position, json::json_extract, json::json_object, json::json_type, }; -use crate::{resolve_ext_path, Connection, Result, Rows, TransactionState, DATABASE_VERSION}; +use crate::{resolve_ext_path, Connection, Result, TransactionState, DATABASE_VERSION}; use datetime::{ exec_date, exec_datetime_full, exec_julianday, exec_strftime, exec_time, exec_unixepoch, }; @@ -2338,10 +2338,9 @@ impl Program { "SELECT * FROM sqlite_schema WHERE {}", where_clause ))?; - let rows = Rows { stmt }; let mut schema = RefCell::borrow_mut(&conn.schema); // TODO: This function below is synchronous, make it not async - parse_schema_rows(Some(rows), &mut schema, conn.pager.io.clone())?; + parse_schema_rows(Some(stmt), &mut schema, conn.pager.io.clone())?; state.pc += 1; } Insn::ShiftRight { lhs, rhs, dest } => { diff --git a/perf/latency/limbo/src/main.rs b/perf/latency/limbo/src/main.rs index b51ffb406..a7302e38a 100644 --- a/perf/latency/limbo/src/main.rs +++ b/perf/latency/limbo/src/main.rs @@ -36,7 +36,7 @@ fn main() { let mut rows = stmt.query().unwrap(); let mut count = 0; loop { - let row = rows.next_row().unwrap(); + let row = rows.step().unwrap(); match row { limbo_core::StepResult::Row(_) => { count += 1; diff --git a/simulator/generation/plan.rs b/simulator/generation/plan.rs index 3b124b8f8..2a794fbbc 100644 --- a/simulator/generation/plan.rs +++ b/simulator/generation/plan.rs @@ -414,7 +414,7 @@ impl Interaction { assert!(rows.is_some()); let mut rows = rows.unwrap(); let mut out = Vec::new(); - while let Ok(row) = rows.next_row() { + while let Ok(row) = rows.step() { match row { StepResult::Row(row) => { let mut r = Vec::new(); diff --git a/tests/integration/functions/test_function_rowid.rs b/tests/integration/functions/test_function_rowid.rs index 6655cee0d..54b72c680 100644 --- a/tests/integration/functions/test_function_rowid.rs +++ b/tests/integration/functions/test_function_rowid.rs @@ -11,7 +11,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> { let mut insert_query = conn.query("INSERT INTO test_rowid (id, val) VALUES (NULL, 'test1')")?; if let Some(ref mut rows) = insert_query { loop { - match rows.next_row()? { + match rows.step()? { StepResult::IO => { tmp_db.io.run_once()?; } @@ -25,7 +25,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> { let mut select_query = conn.query("SELECT last_insert_rowid()")?; if let Some(ref mut rows) = select_query { loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { if let Value::Integer(id) = row.values[0] { assert_eq!(id, 1, "First insert should have rowid 1"); @@ -44,7 +44,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> { // Test explicit rowid match conn.query("INSERT INTO test_rowid (id, val) VALUES (5, 'test2')") { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::IO => { tmp_db.io.run_once()?; } @@ -60,7 +60,7 @@ fn test_last_insert_rowid_basic() -> anyhow::Result<()> { let mut last_id = 0; match conn.query("SELECT last_insert_rowid()") { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { if let Value::Integer(id) = row.values[0] { last_id = id; diff --git a/tests/integration/query_processing/test_write_path.rs b/tests/integration/query_processing/test_write_path.rs index 97b68a804..50d159a96 100644 --- a/tests/integration/query_processing/test_write_path.rs +++ b/tests/integration/query_processing/test_write_path.rs @@ -20,7 +20,7 @@ fn test_simple_overflow_page() -> anyhow::Result<()> { match conn.query(insert_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::IO => { tmp_db.io.run_once()?; } @@ -39,7 +39,7 @@ fn test_simple_overflow_page() -> anyhow::Result<()> { match conn.query(list_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { let first_value = &row.values[0]; let text = &row.values[1]; @@ -93,7 +93,7 @@ fn test_sequential_overflow_page() -> anyhow::Result<()> { let insert_query = format!("INSERT INTO test VALUES ({}, '{}')", i, huge_text.as_str()); match conn.query(insert_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::IO => { tmp_db.io.run_once()?; } @@ -112,7 +112,7 @@ fn test_sequential_overflow_page() -> anyhow::Result<()> { let mut current_index = 0; match conn.query(list_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { let first_value = &row.values[0]; let text = &row.values[1]; @@ -166,7 +166,7 @@ fn test_sequential_write() -> anyhow::Result<()> { let insert_query = format!("INSERT INTO test VALUES ({})", i); match conn.query(insert_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::IO => { tmp_db.io.run_once()?; } @@ -183,7 +183,7 @@ fn test_sequential_write() -> anyhow::Result<()> { let mut current_read_index = 0; match conn.query(list_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { let first_value = row.values.first().expect("missing id"); let id = match first_value { @@ -227,7 +227,7 @@ fn test_regression_multi_row_insert() -> anyhow::Result<()> { match conn.query(insert_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::IO => { tmp_db.io.run_once()?; } @@ -248,7 +248,7 @@ fn test_regression_multi_row_insert() -> anyhow::Result<()> { let mut actual_ids = Vec::new(); match conn.query(list_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { let first_value = row.values.first().expect("missing id"); let id = match first_value { @@ -334,7 +334,7 @@ fn test_wal_checkpoint() -> anyhow::Result<()> { conn.checkpoint()?; match conn.query(insert_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::IO => { tmp_db.io.run_once()?; } @@ -355,7 +355,7 @@ fn test_wal_checkpoint() -> anyhow::Result<()> { let mut current_index = 0; match conn.query(list_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { let first_value = &row.values[0]; let id = match first_value { @@ -394,7 +394,7 @@ fn test_wal_restart() -> anyhow::Result<()> { let insert_query = format!("INSERT INTO test VALUES ({})", i); match conn.query(insert_query) { Ok(Some(ref mut rows)) => loop { - match rows.next_row()? { + match rows.step()? { StepResult::IO => { tmp_db.io.run_once()?; } @@ -418,7 +418,7 @@ fn test_wal_restart() -> anyhow::Result<()> { loop { if let Some(ref mut rows) = conn.query(list_query)? { loop { - match rows.next_row()? { + match rows.step()? { StepResult::Row(row) => { let first_value = &row.values[0]; let count = match first_value { From 983875c443af7ae23d8ebbf503c516b9c94c2ad5 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 26 Jan 2025 16:48:12 +0200 Subject: [PATCH 31/34] core: Remove database header from BTreeCursor It's already in the pager so use it from there to reduce the size of the `BTreeCursor` struct. --- core/storage/btree.rs | 24 +++++++++--------------- core/storage/pager.rs | 2 +- core/vdbe/mod.rs | 12 +++--------- 3 files changed, 13 insertions(+), 25 deletions(-) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index c8746702d..7e7ee4289 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -105,7 +105,6 @@ pub struct BTreeCursor { rowid: RefCell>, record: RefCell>, null_flag: bool, - database_header: Rc>, /// Index internal pages are consumed on the way up, so we store going upwards flag in case /// we just moved to a parent page and the parent page is an internal index page which requires /// to be consumed. @@ -137,18 +136,13 @@ struct PageStack { } impl BTreeCursor { - pub fn new( - pager: Rc, - root_page: usize, - database_header: Rc>, - ) -> Self { + pub fn new(pager: Rc, root_page: usize) -> Self { Self { pager, root_page, rowid: RefCell::new(None), record: RefCell::new(None), null_flag: false, - database_header, going_upwards: false, write_info: WriteInfo { state: WriteState::Start, @@ -750,7 +744,7 @@ impl BTreeCursor { /// and the overflow cell count is used to determine if the page overflows, /// i.e. whether we need to balance the btree after the insert. fn insert_into_cell(&self, page: &mut PageContent, payload: &[u8], cell_idx: usize) { - let free = self.compute_free_space(page, RefCell::borrow(&self.database_header)); + let free = self.compute_free_space(page, RefCell::borrow(&self.pager.db_header)); const CELL_POINTER_SIZE_BYTES: usize = 2; let enough_space = payload.len() + CELL_POINTER_SIZE_BYTES <= free as usize; if !enough_space { @@ -832,7 +826,7 @@ impl BTreeCursor { // then we need to do some more calculation to figure out where to insert the freeblock // in the freeblock linked list. let maxpc = { - let db_header = self.database_header.borrow(); + let db_header = self.pager.db_header.borrow(); let usable_space = (db_header.page_size - db_header.reserved_space as u16) as usize; usable_space as u16 }; @@ -1063,7 +1057,7 @@ impl BTreeCursor { contents.write_u16(PAGE_HEADER_OFFSET_FIRST_FREEBLOCK, 0); contents.write_u16(PAGE_HEADER_OFFSET_CELL_COUNT, 0); - let db_header = RefCell::borrow(&self.database_header); + let db_header = RefCell::borrow(&self.pager.db_header); let cell_content_area_start = db_header.page_size - db_header.reserved_space as u16; contents.write_u16( @@ -1294,7 +1288,7 @@ impl BTreeCursor { /// This marks the page as dirty and writes the page header. fn allocate_page(&self, page_type: PageType, offset: usize) -> PageRef { let page = self.pager.allocate_page().unwrap(); - btree_init_page(&page, page_type, &self.database_header.borrow(), offset); + btree_init_page(&page, page_type, &self.pager.db_header.borrow(), offset); page } @@ -1322,7 +1316,7 @@ impl BTreeCursor { // there are free blocks and enough space if page_ref.first_freeblock() != 0 && gap + 2 <= top { // find slot - let db_header = RefCell::borrow(&self.database_header); + let db_header = RefCell::borrow(&self.pager.db_header); let pc = find_free_cell(page_ref, db_header, amount); if pc != 0 { return pc as u16; @@ -1332,11 +1326,11 @@ impl BTreeCursor { if gap + 2 + amount > top { // defragment - self.defragment_page(page_ref, RefCell::borrow(&self.database_header)); + self.defragment_page(page_ref, RefCell::borrow(&self.pager.db_header)); top = page_ref.read_u16(PAGE_HEADER_OFFSET_CELL_CONTENT_AREA) as usize; } - let db_header = RefCell::borrow(&self.database_header); + let db_header = RefCell::borrow(&self.pager.db_header); top -= amount; page_ref.write_u16(PAGE_HEADER_OFFSET_CELL_CONTENT_AREA, top as u16); @@ -1656,7 +1650,7 @@ impl BTreeCursor { /// The usable size of a page might be an odd number. However, the usable size is not allowed to be less than 480. /// In other words, if the page size is 512, then the reserved space size cannot exceed 32. fn usable_space(&self) -> usize { - let db_header = RefCell::borrow(&self.database_header); + let db_header = self.pager.db_header.borrow(); (db_header.page_size - db_header.reserved_space as u16) as usize } diff --git a/core/storage/pager.rs b/core/storage/pager.rs index 9cc86b958..559e872ae 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -157,7 +157,7 @@ pub struct Pager { /// I/O interface for input/output operations. pub io: Arc, dirty_pages: Rc>>, - db_header: Rc>, + pub db_header: Rc>, flush_info: RefCell, checkpoint_state: RefCell, diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 4345aaa54..427f7b0d2 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -772,8 +772,7 @@ impl Program { root_page, } => { let (_, cursor_type) = self.cursor_ref.get(*cursor_id).unwrap(); - let cursor = - BTreeCursor::new(pager.clone(), *root_page, self.database_header.clone()); + let cursor = BTreeCursor::new(pager.clone(), *root_page); match cursor_type { CursorType::BTreeTable(_) => { cursors @@ -2274,8 +2273,7 @@ impl Program { } => { let (_, cursor_type) = self.cursor_ref.get(*cursor_id).unwrap(); let is_index = cursor_type.is_index(); - let cursor = - BTreeCursor::new(pager.clone(), *root_page, self.database_header.clone()); + let cursor = BTreeCursor::new(pager.clone(), *root_page); if is_index { cursors .get_mut(*cursor_id) @@ -2307,11 +2305,7 @@ impl Program { // TODO: implement temp datbases todo!("temp databases not implemented yet"); } - let mut cursor = Box::new(BTreeCursor::new( - pager.clone(), - 0, - self.database_header.clone(), - )); + let mut cursor = Box::new(BTreeCursor::new(pager.clone(), 0)); let root_page = cursor.btree_create(*flags); state.registers[*root] = OwnedValue::Integer(root_page as i64); From 0903b9b01972ef70cc7fc2daf6eb2a50626c023b Mon Sep 17 00:00:00 2001 From: Harin Date: Sun, 26 Jan 2025 23:35:47 +0530 Subject: [PATCH 32/34] Implemented JSON valid function --- COMPAT.md | 2 +- core/function.rs | 4 ++++ core/json/mod.rs | 14 ++++++++++++++ core/translate/expr.rs | 8 ++++++++ core/vdbe/mod.rs | 10 +++++++--- testing/json.test | 32 ++++++++++++++++++++++++++++++++ 6 files changed, 66 insertions(+), 4 deletions(-) diff --git a/COMPAT.md b/COMPAT.md index 03798d24c..d7c8b0a2c 100644 --- a/COMPAT.md +++ b/COMPAT.md @@ -375,7 +375,7 @@ Modifiers: | jsonb_set(json,path,value,...) | | | | json_type(json) | Yes | | | json_type(json,path) | Yes | | -| json_valid(json) | | | +| json_valid(json) | Yes | | | json_valid(json,flags) | | | | json_quote(value) | | | | json_group_array(value) | | | diff --git a/core/function.rs b/core/function.rs index 6b9ca800e..1e5386696 100644 --- a/core/function.rs +++ b/core/function.rs @@ -79,6 +79,7 @@ pub enum JsonFunc { JsonObject, JsonType, JsonErrorPosition, + JsonValid, } #[cfg(feature = "json")] @@ -97,6 +98,7 @@ impl Display for JsonFunc { Self::JsonObject => "json_object".to_string(), Self::JsonType => "json_type".to_string(), Self::JsonErrorPosition => "json_error_position".to_string(), + Self::JsonValid => "json_valid".to_string(), } ) } @@ -519,6 +521,8 @@ impl Func { "json_type" => Ok(Func::Json(JsonFunc::JsonType)), #[cfg(feature = "json")] "json_error_position" => Ok(Self::Json(JsonFunc::JsonErrorPosition)), + #[cfg(feature = "json")] + "json_valid" => Ok(Self::Json(JsonFunc::JsonValid)), "unixepoch" => Ok(Self::Scalar(ScalarFunc::UnixEpoch)), "julianday" => Ok(Self::Scalar(ScalarFunc::JulianDay)), "hex" => Ok(Self::Scalar(ScalarFunc::Hex)), diff --git a/core/json/mod.rs b/core/json/mod.rs index fee87e3df..10e682148 100644 --- a/core/json/mod.rs +++ b/core/json/mod.rs @@ -984,3 +984,17 @@ mod tests { } } } +pub fn is_json_valid(json_value: &OwnedValue) -> crate::Result { + match json_value { + OwnedValue::Text(ref t) => match from_str::(&t.value) { + Ok(_) => Ok(OwnedValue::Integer(1)), + Err(_) => Ok(OwnedValue::Integer(0)), + }, + OwnedValue::Blob(b) => match jsonb::from_slice(b) { + Ok(_) => Ok(OwnedValue::Integer(1)), + Err(_) => Ok(OwnedValue::Integer(0)), + }, + OwnedValue::Null => Ok(OwnedValue::Null), + _ => Ok(OwnedValue::Integer(1)), + } +} diff --git a/core/translate/expr.rs b/core/translate/expr.rs index d1fea2ee2..9fdf5f9c2 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -929,6 +929,14 @@ pub fn translate_expr( func_ctx, ) } + JsonFunc::JsonValid => translate_function( + program, + args.as_deref().unwrap_or_default(), + referenced_tables, + resolver, + target_register, + func_ctx, + ), }, Func::Scalar(srf) => { match srf { diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 4345aaa54..ec03f0c61 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -40,9 +40,9 @@ use crate::vdbe::builder::CursorType; use crate::vdbe::insn::Insn; #[cfg(feature = "json")] use crate::{ - function::JsonFunc, json::get_json, json::json_array, json::json_array_length, - json::json_arrow_extract, json::json_arrow_shift_extract, json::json_error_position, - json::json_extract, json::json_object, json::json_type, + function::JsonFunc, json::get_json, json::is_json_valid, json::json_array, + json::json_array_length, json::json_arrow_extract, json::json_arrow_shift_extract, + json::json_error_position, json::json_extract, json::json_object, json::json_type, }; use crate::{resolve_ext_path, Connection, Result, TransactionState, DATABASE_VERSION}; use datetime::{ @@ -1743,6 +1743,10 @@ impl Program { Err(e) => return Err(e), } } + JsonFunc::JsonValid => { + let json_value = &state.registers[*start_reg]; + state.registers[*dest] = is_json_valid(json_value)?; + } }, crate::function::Func::Scalar(scalar_func) => match scalar_func { ScalarFunc::Cast => { diff --git a/testing/json.test b/testing/json.test index ea1c9bf0f..5817a8c55 100755 --- a/testing/json.test +++ b/testing/json.test @@ -544,3 +544,35 @@ do_execsql_test json_from_json_object { #do_execsql_test json_object_duplicated_keys { # SELECT json_object('key', 'value', 'key', 'value2'); #} {{{"key":"value2"}}} +# + +do_execsql_test json_valid_1 { + SELECT json_valid('{"a":55,"b":72}'); +} {1} +do_execsql_test json_valid_2 { + SELECT json_valid('["a",55,"b",72]'); +} {1} +do_execsql_test json_valid_3 { + SELECT json_valid( CAST('{"a":1}' AS BLOB) ); +} {1} +do_execsql_test json_valid_4 { + SELECT json_valid(123); +} {1} +do_execsql_test json_valid_5 { + SELECT json_valid(12.3); +} {1} +do_execsql_test json_valid_6 { + SELECT json_valid('not a valid json'); +} {0} +do_execsql_test json_valid_7 { + SELECT json_valid('{"a":"55,"b":72}'); +} {0} +do_execsql_test json_valid_8 { + SELECT json_valid('{"a":55 "b":72}'); +} {0} +do_execsql_test json_valid_3 { + SELECT json_valid( CAST('{"a":"1}' AS BLOB) ); +} {0} +do_execsql_test json_valid_9 { + SELECT json_valid(NULL); +} {} From 0918fc40d4765b8da2374d962a6565ea8213797f Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 26 Jan 2025 20:52:23 +0200 Subject: [PATCH 33/34] bindings/go: Rename to Limbo ...we'll likely call this Turso eventually, but right now, let's keep the code consistent. --- Cargo.lock | 14 ++++----- bindings/go/Cargo.toml | 4 +-- bindings/go/go.mod | 2 +- bindings/go/{turso.go => limbo.go} | 50 +++++++++++++++--------------- bindings/go/rs_src/lib.rs | 14 ++++----- bindings/go/rs_src/rows.rs | 28 ++++++++--------- bindings/go/rs_src/statement.rs | 36 ++++++++++----------- bindings/go/rs_src/types.rs | 16 +++++----- bindings/go/stmt.go | 36 ++++++++++----------- bindings/go/types.go | 16 +++++----- 10 files changed, 108 insertions(+), 108 deletions(-) rename bindings/go/{turso.go => limbo.go} (65%) diff --git a/Cargo.lock b/Cargo.lock index 0f19f5586..112c3793c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1301,6 +1301,13 @@ dependencies = [ "rustyline", ] +[[package]] +name = "limbo-go" +version = "0.0.13" +dependencies = [ + "limbo_core", +] + [[package]] name = "limbo-wasm" version = "0.0.13" @@ -2663,13 +2670,6 @@ version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" -[[package]] -name = "turso-go" -version = "0.0.13" -dependencies = [ - "limbo_core", -] - [[package]] name = "typenum" version = "1.17.0" diff --git a/bindings/go/Cargo.toml b/bindings/go/Cargo.toml index 98056cbe6..b73902c5f 100644 --- a/bindings/go/Cargo.toml +++ b/bindings/go/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "turso-go" +name = "limbo-go" version.workspace = true authors.workspace = true edition.workspace = true @@ -7,7 +7,7 @@ license.workspace = true repository.workspace = true [lib] -name = "_turso_go" +name = "_limbo_go" crate-type = ["cdylib"] path = "rs_src/lib.rs" diff --git a/bindings/go/go.mod b/bindings/go/go.mod index c108e721d..589b9a0e3 100644 --- a/bindings/go/go.mod +++ b/bindings/go/go.mod @@ -1,4 +1,4 @@ -module turso +module limbo go 1.23.4 diff --git a/bindings/go/turso.go b/bindings/go/limbo.go similarity index 65% rename from bindings/go/turso.go rename to bindings/go/limbo.go index dcafb3a64..4011fb1ac 100644 --- a/bindings/go/turso.go +++ b/bindings/go/limbo.go @@ -1,4 +1,4 @@ -package turso +package limbo import ( "database/sql" @@ -15,31 +15,31 @@ import ( "golang.org/x/sys/windows" ) -const turso = "../../target/debug/lib_turso_go" -const driverName = "turso" +const limbo = "../../target/debug/lib_limbo_go" +const driverName = "limbo" -var tursoLib uintptr +var limboLib uintptr func getSystemLibrary() error { switch runtime.GOOS { case "darwin": - slib, err := purego.Dlopen(fmt.Sprintf("%s.dylib", turso), purego.RTLD_LAZY) + slib, err := purego.Dlopen(fmt.Sprintf("%s.dylib", limbo), purego.RTLD_LAZY) if err != nil { return err } - tursoLib = slib + limboLib = slib case "linux": - slib, err := purego.Dlopen(fmt.Sprintf("%s.so", turso), purego.RTLD_LAZY) + slib, err := purego.Dlopen(fmt.Sprintf("%s.so", limbo), purego.RTLD_LAZY) if err != nil { return err } - tursoLib = slib + limboLib = slib case "windows": - slib, err := windows.LoadLibrary(fmt.Sprintf("%s.dll", turso)) + slib, err := windows.LoadLibrary(fmt.Sprintf("%s.dll", limbo)) if err != nil { return err } - tursoLib = slib + limboLib = slib default: panic(fmt.Errorf("GOOS=%s is not supported", runtime.GOOS)) } @@ -49,15 +49,15 @@ func getSystemLibrary() error { func init() { err := getSystemLibrary() if err != nil { - slog.Error("Error opening turso library: ", err) + slog.Error("Error opening limbo library: ", err) os.Exit(1) } - sql.Register(driverName, &tursoDriver{}) + sql.Register(driverName, &limboDriver{}) } -type tursoDriver struct{} +type limboDriver struct{} -func (d tursoDriver) Open(name string) (driver.Conn, error) { +func (d limboDriver) Open(name string) (driver.Conn, error) { return openConn(name) } @@ -66,28 +66,28 @@ func toCString(s string) uintptr { return uintptr(unsafe.Pointer(&b[0])) } -// helper to register an FFI function in the lib_turso_go library +// helper to register an FFI function in the lib_limbo_go library func getFfiFunc(ptr interface{}, name string) { - purego.RegisterLibFunc(&ptr, tursoLib, name) + purego.RegisterLibFunc(&ptr, limboLib, name) } -type tursoConn struct { +type limboConn struct { ctx uintptr sync.Mutex prepare func(uintptr, uintptr) uintptr } -func newConn(ctx uintptr) *tursoConn { +func newConn(ctx uintptr) *limboConn { var prepare func(uintptr, uintptr) uintptr getFfiFunc(&prepare, FfiDbPrepare) - return &tursoConn{ + return &limboConn{ ctx, sync.Mutex{}, prepare, } } -func openConn(dsn string) (*tursoConn, error) { +func openConn(dsn string) (*limboConn, error) { var dbOpen func(uintptr) uintptr getFfiFunc(&dbOpen, FfiDbOpen) @@ -98,10 +98,10 @@ func openConn(dsn string) (*tursoConn, error) { if ctx == 0 { return nil, fmt.Errorf("failed to open database for dsn=%q", dsn) } - return &tursoConn{ctx: ctx}, nil + return &limboConn{ctx: ctx}, nil } -func (c *tursoConn) Close() error { +func (c *limboConn) Close() error { if c.ctx == 0 { return nil } @@ -113,7 +113,7 @@ func (c *tursoConn) Close() error { return nil } -func (c *tursoConn) Prepare(query string) (driver.Stmt, error) { +func (c *limboConn) Prepare(query string) (driver.Stmt, error) { if c.ctx == 0 { return nil, errors.New("connection closed") } @@ -129,13 +129,13 @@ func (c *tursoConn) Prepare(query string) (driver.Stmt, error) { if stmtPtr == 0 { return nil, fmt.Errorf("prepare failed: %q", query) } - return &tursoStmt{ + return &limboStmt{ ctx: stmtPtr, sql: query, }, nil } // begin is needed to implement driver.Conn.. for now not implemented -func (c *tursoConn) Begin() (driver.Tx, error) { +func (c *limboConn) Begin() (driver.Tx, error) { return nil, errors.New("transactions not implemented") } diff --git a/bindings/go/rs_src/lib.rs b/bindings/go/rs_src/lib.rs index 36b5a6db1..199ed10c0 100644 --- a/bindings/go/rs_src/lib.rs +++ b/bindings/go/rs_src/lib.rs @@ -28,7 +28,7 @@ pub unsafe extern "C" fn db_open(path: *const c_char) -> *mut c_void { Ok(db) => { println!("Opened database: {}", path); let conn = db.connect(); - return TursoConn::new(conn, io).to_ptr(); + return LimboConn::new(conn, io).to_ptr(); } Err(e) => { println!("Error opening database: {}", e); @@ -40,25 +40,25 @@ pub unsafe extern "C" fn db_open(path: *const c_char) -> *mut c_void { } #[allow(dead_code)] -struct TursoConn { +struct LimboConn { conn: Rc, io: Arc, } -impl TursoConn { +impl LimboConn { fn new(conn: Rc, io: Arc) -> Self { - TursoConn { conn, io } + LimboConn { conn, io } } #[allow(clippy::wrong_self_convention)] fn to_ptr(self) -> *mut c_void { Box::into_raw(Box::new(self)) as *mut c_void } - fn from_ptr(ptr: *mut c_void) -> &'static mut TursoConn { + fn from_ptr(ptr: *mut c_void) -> &'static mut LimboConn { if ptr.is_null() { panic!("Null pointer"); } - unsafe { &mut *(ptr as *mut TursoConn) } + unsafe { &mut *(ptr as *mut LimboConn) } } } @@ -68,7 +68,7 @@ impl TursoConn { #[no_mangle] pub unsafe extern "C" fn db_close(db: *mut c_void) { if !db.is_null() { - let _ = unsafe { Box::from_raw(db as *mut TursoConn) }; + let _ = unsafe { Box::from_raw(db as *mut LimboConn) }; } } diff --git a/bindings/go/rs_src/rows.rs b/bindings/go/rs_src/rows.rs index ccd01b20e..456d57bdc 100644 --- a/bindings/go/rs_src/rows.rs +++ b/bindings/go/rs_src/rows.rs @@ -1,19 +1,19 @@ use crate::{ - statement::TursoStatement, - types::{ResultCode, TursoValue}, + statement::LimboStatement, + types::{LimboValue, ResultCode}, }; use limbo_core::{Statement, StepResult, Value}; use std::ffi::{c_char, c_void}; -pub struct TursoRows<'a> { +pub struct LimboRows<'a> { rows: Statement, cursor: Option>>, - stmt: Box>, + stmt: Box>, } -impl<'a> TursoRows<'a> { - pub fn new(rows: Statement, stmt: Box>) -> Self { - TursoRows { +impl<'a> LimboRows<'a> { + pub fn new(rows: Statement, stmt: Box>) -> Self { + LimboRows { rows, stmt, cursor: None, @@ -25,11 +25,11 @@ impl<'a> TursoRows<'a> { Box::into_raw(Box::new(self)) as *mut c_void } - pub fn from_ptr(ptr: *mut c_void) -> &'static mut TursoRows<'a> { + pub fn from_ptr(ptr: *mut c_void) -> &'static mut LimboRows<'a> { if ptr.is_null() { panic!("Null pointer"); } - unsafe { &mut *(ptr as *mut TursoRows) } + unsafe { &mut *(ptr as *mut LimboRows) } } } @@ -38,7 +38,7 @@ pub extern "C" fn rows_next(ctx: *mut c_void) -> ResultCode { if ctx.is_null() { return ResultCode::Error; } - let ctx = TursoRows::from_ptr(ctx); + let ctx = LimboRows::from_ptr(ctx); match ctx.rows.step() { Ok(StepResult::Row(row)) => { @@ -61,11 +61,11 @@ pub extern "C" fn rows_get_value(ctx: *mut c_void, col_idx: usize) -> *const c_v if ctx.is_null() { return std::ptr::null(); } - let ctx = TursoRows::from_ptr(ctx); + let ctx = LimboRows::from_ptr(ctx); if let Some(ref cursor) = ctx.cursor { if let Some(value) = cursor.get(col_idx) { - let val = TursoValue::from_value(value); + let val = LimboValue::from_value(value); return val.to_ptr(); } } @@ -87,7 +87,7 @@ pub extern "C" fn rows_get_columns( if rows_ptr.is_null() || out_length.is_null() { return std::ptr::null_mut(); } - let rows = TursoRows::from_ptr(rows_ptr); + let rows = LimboRows::from_ptr(rows_ptr); let c_strings: Vec = rows .rows .columns() @@ -108,7 +108,7 @@ pub extern "C" fn rows_get_columns( #[no_mangle] pub extern "C" fn rows_close(rows_ptr: *mut c_void) { if !rows_ptr.is_null() { - let _ = unsafe { Box::from_raw(rows_ptr as *mut TursoRows) }; + let _ = unsafe { Box::from_raw(rows_ptr as *mut LimboRows) }; } } diff --git a/bindings/go/rs_src/statement.rs b/bindings/go/rs_src/statement.rs index 4a4e29e34..82fb55648 100644 --- a/bindings/go/rs_src/statement.rs +++ b/bindings/go/rs_src/statement.rs @@ -1,6 +1,6 @@ -use crate::rows::TursoRows; -use crate::types::{AllocPool, ResultCode, TursoValue}; -use crate::TursoConn; +use crate::rows::LimboRows; +use crate::types::{AllocPool, LimboValue, ResultCode}; +use crate::LimboConn; use limbo_core::{Statement, StepResult}; use std::ffi::{c_char, c_void}; use std::num::NonZero; @@ -12,11 +12,11 @@ pub extern "C" fn db_prepare(ctx: *mut c_void, query: *const c_char) -> *mut c_v } let query_str = unsafe { std::ffi::CStr::from_ptr(query) }.to_str().unwrap(); - let db = TursoConn::from_ptr(ctx); + let db = LimboConn::from_ptr(ctx); let stmt = db.conn.prepare(query_str.to_string()); match stmt { - Ok(stmt) => TursoStatement::new(stmt, db).to_ptr(), + Ok(stmt) => LimboStatement::new(stmt, db).to_ptr(), Err(_) => std::ptr::null_mut(), } } @@ -24,14 +24,14 @@ pub extern "C" fn db_prepare(ctx: *mut c_void, query: *const c_char) -> *mut c_v #[no_mangle] pub extern "C" fn stmt_execute( ctx: *mut c_void, - args_ptr: *mut TursoValue, + args_ptr: *mut LimboValue, arg_count: usize, changes: *mut i64, ) -> ResultCode { if ctx.is_null() { return ResultCode::Error; } - let stmt = TursoStatement::from_ptr(ctx); + let stmt = LimboStatement::from_ptr(ctx); let args = if !args_ptr.is_null() && arg_count > 0 { unsafe { std::slice::from_raw_parts(args_ptr, arg_count) } @@ -78,20 +78,20 @@ pub extern "C" fn stmt_parameter_count(ctx: *mut c_void) -> i32 { if ctx.is_null() { return -1; } - let stmt = TursoStatement::from_ptr(ctx); + let stmt = LimboStatement::from_ptr(ctx); stmt.statement.parameters_count() as i32 } #[no_mangle] pub extern "C" fn stmt_query( ctx: *mut c_void, - args_ptr: *mut TursoValue, + args_ptr: *mut LimboValue, args_count: usize, ) -> *mut c_void { if ctx.is_null() { return std::ptr::null_mut(); } - let stmt = TursoStatement::from_ptr(ctx); + let stmt = LimboStatement::from_ptr(ctx); let args = if !args_ptr.is_null() && args_count > 0 { unsafe { std::slice::from_raw_parts(args_ptr, args_count) } } else { @@ -104,21 +104,21 @@ pub extern "C" fn stmt_query( match stmt.statement.query() { Ok(rows) => { let stmt = unsafe { Box::from_raw(stmt) }; - TursoRows::new(rows, stmt).to_ptr() + LimboRows::new(rows, stmt).to_ptr() } Err(_) => std::ptr::null_mut(), } } -pub struct TursoStatement<'conn> { +pub struct LimboStatement<'conn> { pub statement: Statement, - pub conn: &'conn mut TursoConn, + pub conn: &'conn mut LimboConn, pub pool: AllocPool, } -impl<'conn> TursoStatement<'conn> { - pub fn new(statement: Statement, conn: &'conn mut TursoConn) -> Self { - TursoStatement { +impl<'conn> LimboStatement<'conn> { + pub fn new(statement: Statement, conn: &'conn mut LimboConn) -> Self { + LimboStatement { statement, conn, pool: AllocPool::new(), @@ -130,10 +130,10 @@ impl<'conn> TursoStatement<'conn> { Box::into_raw(Box::new(self)) as *mut c_void } - fn from_ptr(ptr: *mut c_void) -> &'static mut TursoStatement<'conn> { + fn from_ptr(ptr: *mut c_void) -> &'static mut LimboStatement<'conn> { if ptr.is_null() { panic!("Null pointer"); } - unsafe { &mut *(ptr as *mut TursoStatement) } + unsafe { &mut *(ptr as *mut LimboStatement) } } } diff --git a/bindings/go/rs_src/types.rs b/bindings/go/rs_src/types.rs index b8fc3ac75..851212c65 100644 --- a/bindings/go/rs_src/types.rs +++ b/bindings/go/rs_src/types.rs @@ -26,7 +26,7 @@ pub enum ValueType { } #[repr(C)] -pub struct TursoValue { +pub struct LimboValue { pub value_type: ValueType, pub value: ValueUnion, } @@ -131,9 +131,9 @@ impl ValueUnion { } } -impl TursoValue { +impl LimboValue { pub fn new(value_type: ValueType, value: ValueUnion) -> Self { - TursoValue { value_type, value } + LimboValue { value_type, value } } #[allow(clippy::wrong_self_convention)] @@ -144,16 +144,16 @@ impl TursoValue { pub fn from_value(value: &limbo_core::Value<'_>) -> Self { match value { limbo_core::Value::Integer(i) => { - TursoValue::new(ValueType::Integer, ValueUnion::from_int(*i)) + LimboValue::new(ValueType::Integer, ValueUnion::from_int(*i)) } limbo_core::Value::Float(r) => { - TursoValue::new(ValueType::Real, ValueUnion::from_real(*r)) + LimboValue::new(ValueType::Real, ValueUnion::from_real(*r)) } - limbo_core::Value::Text(s) => TursoValue::new(ValueType::Text, ValueUnion::from_str(s)), + limbo_core::Value::Text(s) => LimboValue::new(ValueType::Text, ValueUnion::from_str(s)), limbo_core::Value::Blob(b) => { - TursoValue::new(ValueType::Blob, ValueUnion::from_bytes(b)) + LimboValue::new(ValueType::Blob, ValueUnion::from_bytes(b)) } - limbo_core::Value::Null => TursoValue::new(ValueType::Null, ValueUnion::from_null()), + limbo_core::Value::Null => LimboValue::new(ValueType::Null, ValueUnion::from_null()), } } diff --git a/bindings/go/stmt.go b/bindings/go/stmt.go index 2b7895fe2..30bceefac 100644 --- a/bindings/go/stmt.go +++ b/bindings/go/stmt.go @@ -1,4 +1,4 @@ -package turso +package limbo import ( "context" @@ -9,8 +9,8 @@ import ( "unsafe" ) -// only construct tursoStmt with initStmt function to ensure proper initialization -type tursoStmt struct { +// only construct limboStmt with initStmt function to ensure proper initialization +type limboStmt struct { ctx uintptr sql string query stmtQueryFn @@ -19,7 +19,7 @@ type tursoStmt struct { } // Initialize/register the FFI function pointers for the statement methods -func initStmt(ctx uintptr, sql string) *tursoStmt { +func initStmt(ctx uintptr, sql string) *limboStmt { var query stmtQueryFn var execute stmtExecuteFn var getParamCount func(uintptr) int32 @@ -27,17 +27,17 @@ func initStmt(ctx uintptr, sql string) *tursoStmt { for i := range methods { methods[i].initFunc() } - return &tursoStmt{ + return &limboStmt{ ctx: uintptr(ctx), sql: sql, } } -func (st *tursoStmt) NumInput() int { +func (st *limboStmt) NumInput() int { return int(st.getParamCount(st.ctx)) } -func (st *tursoStmt) Exec(args []driver.Value) (driver.Result, error) { +func (st *limboStmt) Exec(args []driver.Value) (driver.Result, error) { argArray, err := buildArgs(args) if err != nil { return nil, err @@ -65,7 +65,7 @@ func (st *tursoStmt) Exec(args []driver.Value) (driver.Result, error) { } } -func (st *tursoStmt) Query(args []driver.Value) (driver.Rows, error) { +func (st *limboStmt) Query(args []driver.Value) (driver.Rows, error) { queryArgs, err := buildArgs(args) if err != nil { return nil, err @@ -77,7 +77,7 @@ func (st *tursoStmt) Query(args []driver.Value) (driver.Rows, error) { return initRows(rowsPtr), nil } -func (ts *tursoStmt) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { +func (ts *limboStmt) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { stripped := namedValueToValue(args) argArray, err := getArgsPtr(stripped) if err != nil { @@ -99,7 +99,7 @@ func (ts *tursoStmt) ExecContext(ctx context.Context, query string, args []drive } } -func (st *tursoStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { +func (st *limboStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { queryArgs, err := buildNamedArgs(args) if err != nil { return nil, err @@ -111,8 +111,8 @@ func (st *tursoStmt) QueryContext(ctx context.Context, args []driver.NamedValue) return initRows(rowsPtr), nil } -// only construct tursoRows with initRows function to ensure proper initialization -type tursoRows struct { +// only construct limboRows with initRows function to ensure proper initialization +type limboRows struct { ctx uintptr columns []string closed bool @@ -124,8 +124,8 @@ type tursoRows struct { } // Initialize/register the FFI function pointers for the rows methods -// DO NOT construct 'tursoRows' without this function -func initRows(ctx uintptr) *tursoRows { +// DO NOT construct 'limboRows' without this function +func initRows(ctx uintptr) *limboRows { var getCols func(uintptr, *uint) uintptr var getValue func(uintptr, int32) uintptr var closeRows func(uintptr) uintptr @@ -141,7 +141,7 @@ func initRows(ctx uintptr) *tursoRows { methods[i].initFunc() } - return &tursoRows{ + return &limboRows{ ctx: ctx, getCols: getCols, getValue: getValue, @@ -151,7 +151,7 @@ func initRows(ctx uintptr) *tursoRows { } } -func (r *tursoRows) Columns() []string { +func (r *limboRows) Columns() []string { if r.columns == nil { var columnCount uint colArrayPtr := r.getCols(r.ctx, &columnCount) @@ -166,7 +166,7 @@ func (r *tursoRows) Columns() []string { return r.columns } -func (r *tursoRows) Close() error { +func (r *limboRows) Close() error { if r.closed { return nil } @@ -176,7 +176,7 @@ func (r *tursoRows) Close() error { return nil } -func (r *tursoRows) Next(dest []driver.Value) error { +func (r *limboRows) Next(dest []driver.Value) error { status := r.next(r.ctx) switch ResultCode(status) { case Row: diff --git a/bindings/go/types.go b/bindings/go/types.go index e24b2f168..c27832f43 100644 --- a/bindings/go/types.go +++ b/bindings/go/types.go @@ -1,4 +1,4 @@ -package turso +package limbo import ( "database/sql/driver" @@ -47,7 +47,7 @@ func namedValueToValue(named []driver.NamedValue) []driver.Value { return out } -func buildNamedArgs(named []driver.NamedValue) ([]tursoValue, error) { +func buildNamedArgs(named []driver.NamedValue) ([]limboValue, error) { args := make([]driver.Value, len(named)) for i, nv := range named { args[i] = nv.Value @@ -75,7 +75,7 @@ const ( ) // struct to pass Go values over FFI -type tursoValue struct { +type limboValue struct { Type valueType Value [8]byte } @@ -86,9 +86,9 @@ type Blob struct { Len uint } -// convert a tursoValue to a native Go value +// convert a limboValue to a native Go value func toGoValue(valPtr uintptr) interface{} { - val := (*tursoValue)(unsafe.Pointer(valPtr)) + val := (*limboValue)(unsafe.Pointer(valPtr)) switch val.Type { case intVal: return *(*int64)(unsafe.Pointer(&val.Value)) @@ -169,9 +169,9 @@ func cArrayToGoStrings(arrayPtr uintptr, length uint) []string { return out } -// convert a Go slice of driver.Value to a slice of tursoValue that can be sent over FFI -func buildArgs(args []driver.Value) ([]tursoValue, error) { - argSlice := make([]tursoValue, len(args)) +// convert a Go slice of driver.Value to a slice of limboValue that can be sent over FFI +func buildArgs(args []driver.Value) ([]limboValue, error) { + argSlice := make([]limboValue, len(args)) for i, v := range args { switch val := v.(type) { From 6f93f290e134b09d4d52cb13cbc7d6bc82647b06 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 26 Jan 2025 20:57:00 +0200 Subject: [PATCH 34/34] bindings/java: Log driver loading error --- .../src/main/java/org/github/tursodatabase/JDBC.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/bindings/java/src/main/java/org/github/tursodatabase/JDBC.java b/bindings/java/src/main/java/org/github/tursodatabase/JDBC.java index 63c6e57d7..6928c32cc 100644 --- a/bindings/java/src/main/java/org/github/tursodatabase/JDBC.java +++ b/bindings/java/src/main/java/org/github/tursodatabase/JDBC.java @@ -3,20 +3,23 @@ package org.github.tursodatabase; import java.sql.*; import java.util.Locale; import java.util.Properties; -import java.util.logging.Logger; import org.github.tursodatabase.annotations.Nullable; import org.github.tursodatabase.annotations.SkipNullableCheck; import org.github.tursodatabase.core.LimboConnection; import org.github.tursodatabase.jdbc4.JDBC4Connection; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class JDBC implements Driver { + private static final Logger logger = LoggerFactory.getLogger(JDBC.class); + private static final String VALID_URL_PREFIX = "jdbc:sqlite:"; static { try { DriverManager.registerDriver(new JDBC()); } catch (Exception e) { - // TODO: log + logger.error("Failed to register driver", e); } } @@ -72,7 +75,7 @@ public class JDBC implements Driver { @Override @SkipNullableCheck - public Logger getParentLogger() throws SQLFeatureNotSupportedException { + public java.util.logging.Logger getParentLogger() throws SQLFeatureNotSupportedException { // TODO return null; }