diff --git a/CHANGELOG.md b/CHANGELOG.md index 847deec42..e8189a2e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -218,7 +218,7 @@ * Add support for iif() function (Alex Miller) -* Add suport for last_insert_rowid() function (Krishna Vishal) +* Add support for last_insert_rowid() function (Krishna Vishal) * Add support JOIN USING and NATURAL JOIN (Jussi Saurio) diff --git a/README.md b/README.md index 480933581..8f2665ef0 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ Limbo is a _work-in-progress_, in-process OLTP database engine library written i * **Language bindings** for JavaScript/WebAssembly, Rust, Go, Python, and [Java](bindings/java) * **OS support** for Linux, macOS, and Windows -In the future, we will be also workin on: +In the future, we will be also working on: * **Integrated vector search** for embeddings and vector similarity. * **`BEGIN CONCURRENT`** for improved write throughput. diff --git a/bindings/go/limbo_test.go b/bindings/go/limbo_test.go index 31b1fc3b4..9527faa5f 100644 --- a/bindings/go/limbo_test.go +++ b/bindings/go/limbo_test.go @@ -89,7 +89,7 @@ func TestFunctions(t *testing.T) { } _, err = stmt.Exec(60, "TestFunction", 400) if err != nil { - t.Fatalf("Error executing statment with arguments: %v", err) + t.Fatalf("Error executing statement with arguments: %v", err) } stmt.Close() stmt, err = conn.Prepare("SELECT baz FROM test where foo = ?") diff --git a/bindings/go/rs_src/statement.rs b/bindings/go/rs_src/statement.rs index e8cec0618..d068f01c6 100644 --- a/bindings/go/rs_src/statement.rs +++ b/bindings/go/rs_src/statement.rs @@ -116,12 +116,12 @@ pub extern "C" fn stmt_query( let val = arg.to_value(&mut pool); statement.bind_at(NonZero::new(i + 1).unwrap(), val); } - // ownership of the statement is transfered to the LimboRows object. + // ownership of the statement is transferred to the LimboRows object. LimboRows::new(statement, stmt.conn).to_ptr() } pub struct LimboStatement<'conn> { - /// If 'query' is ran on the statement, ownership is transfered to the LimboRows object + /// If 'query' is ran on the statement, ownership is transferred to the LimboRows object pub statement: Option, pub conn: &'conn mut LimboConn, pub err: Option, diff --git a/bindings/rust/src/params.rs b/bindings/rust/src/params.rs index c15b6adb5..a627cdd3f 100644 --- a/bindings/rust/src/params.rs +++ b/bindings/rust/src/params.rs @@ -18,7 +18,7 @@ use sealed::Sealed; /// /// Many functions in this library let you pass parameters to libsql. Doing this /// lets you avoid any risk of SQL injection, and is simpler than escaping -/// things manually. These functions generally contain some paramter that generically +/// things manually. These functions generally contain some parameter that generically /// accepts some implementation this trait. /// /// # Positional parameters @@ -29,7 +29,7 @@ use sealed::Sealed; /// by doing `(1, "foo")`. /// - For hetergeneous parameter lists of 16 or greater, the [`limbo_libsql::params!`] is supported /// by doing `limbo_libsql::params![1, "foo"]`. -/// - For homogeneous paramter types (where they are all the same type), const arrays are +/// - For homogeneous parameter types (where they are all the same type), const arrays are /// supported by doing `[1, 2, 3]`. /// /// # Example (positional) @@ -58,13 +58,13 @@ use sealed::Sealed; /// # } /// ``` /// -/// # Named paramters +/// # Named parameters /// /// - For heterogeneous parameter lists of 16 or less items a tuple syntax is supported /// by doing `(("key1", 1), ("key2", "foo"))`. /// - For hetergeneous parameter lists of 16 or greater, the [`limbo_libsql::params!`] is supported /// by doing `limbo_libsql::named_params!["key1": 1, "key2": "foo"]`. -/// - For homogeneous paramter types (where they are all the same type), const arrays are +/// - For homogeneous parameter types (where they are all the same type), const arrays are /// supported by doing `[("key1", 1), ("key2, 2), ("key3", 3)]`. /// /// # Example (named) diff --git a/core/storage/btree.rs b/core/storage/btree.rs index 4a963581b..c5417a871 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -156,7 +156,7 @@ pub struct BTreeCursor { /// current_page represents the current page being used in the tree and current_page - 1 would be /// the parent. Using current_page + 1 or higher is undefined behaviour. struct PageStack { - /// Pointer to the currenet page being consumed + /// Pointer to the current page being consumed current_page: RefCell, /// List of pages in the stack. Root page will be in index 0 stack: RefCell<[Option; BTCURSOR_MAX_DEPTH + 1]>, @@ -983,7 +983,7 @@ impl BTreeCursor { db_header: Ref, ) -> Result { // NOTE: freelist is in ascending order of keys and pc - // unused_space is reserved bytes at the end of page, therefore we must substract from maxpc + // unused_space is reserved bytes at the end of page, therefore we must subtract from maxpc let mut free_list_pointer_addr = 1; let mut pc = page_ref.first_freeblock() as usize; @@ -1115,7 +1115,7 @@ impl BTreeCursor { debug!("balance_non_root(page={})", current_page.get().id); // Copy of page used to reference cell bytes. - // This needs to be saved somewhere safe so taht references still point to here, + // This needs to be saved somewhere safe so that references still point to here, // this will be store in write_info below let page_copy = current_page.get().contents.as_ref().unwrap().clone(); @@ -1420,7 +1420,7 @@ impl BTreeCursor { new_root_page_contents.write_u16(PAGE_HEADER_OFFSET_CELL_COUNT, 0); } - /* swap splitted page buffer with new root buffer so we don't have to update page idx */ + /* swap split page buffer with new root buffer so we don't have to update page idx */ { let (root_id, child_id, child) = { let page_ref = self.stack.top(); @@ -2137,7 +2137,7 @@ impl BTreeCursor { 1 => PageType::TableLeaf, 2 => PageType::IndexLeaf, _ => unreachable!( - "wrong create table falgs, should be 1 for table and 2 for index, got {}", + "wrong create table flags, should be 1 for table and 2 for index, got {}", flags, ), }; diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 36acfa935..63734b794 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -173,7 +173,7 @@ macro_rules! expect_arguments_even { ); }; // The only function right now that requires an even number is `json_object` and it allows - // to have no arguments, so thats why in this macro we do not bail with teh `function with no arguments` error + // to have no arguments, so thats why in this macro we do not bail with the `function with no arguments` error args }}; } @@ -476,7 +476,7 @@ pub fn translate_condition_expr( ); } else { crate::bail_parse_error!( - "parenthesized condtional should have exactly one expression" + "parenthesized conditional should have exactly one expression" ); } } @@ -1517,7 +1517,7 @@ pub fn translate_expr( ScalarFunc::TotalChanges => { if args.is_some() { crate::bail_parse_error!( - "{} fucntion with more than 0 arguments", + "{} function with more than 0 arguments", srf.to_string() ); } diff --git a/core/translate/main_loop.rs b/core/translate/main_loop.rs index 3b918e9ea..32c2a2d2d 100644 --- a/core/translate/main_loop.rs +++ b/core/translate/main_loop.rs @@ -401,10 +401,10 @@ pub fn open_loop( program.resolve_label(loop_start, program.offset()); // TODO: We are currently only handling ascending indexes. - // For conditions like index_key > 10, we have already seeked to the first key greater than 10, and can just scan forward. + // For conditions like index_key > 10, we have already sought to the first key greater than 10, and can just scan forward. // For conditions like index_key < 10, we are at the beginning of the index, and will scan forward and emit IdxGE(10) with a conditional jump to the end. - // For conditions like index_key = 10, we have already seeked to the first key greater than or equal to 10, and can just scan forward and emit IdxGT(10) with a conditional jump to the end. - // For conditions like index_key >= 10, we have already seeked to the first key greater than or equal to 10, and can just scan forward. + // For conditions like index_key = 10, we have already sought to the first key greater than or equal to 10, and can just scan forward and emit IdxGT(10) with a conditional jump to the end. + // For conditions like index_key >= 10, we have already sought to the first key greater than or equal to 10, and can just scan forward. // For conditions like index_key <= 10, we are at the beginning of the index, and will scan forward and emit IdxGT(10) with a conditional jump to the end. // For conditions like index_key != 10, TODO. probably the optimal way is not to use an index at all. // diff --git a/core/translate/mod.rs b/core/translate/mod.rs index 7ff780e2b..5f9e19866 100644 --- a/core/translate/mod.rs +++ b/core/translate/mod.rs @@ -496,7 +496,7 @@ fn translate_create_table( program.resolve_label(parse_schema_label, program.offset()); // TODO: SetCookie // - // TODO: remove format, it sucks for performance but is convinient + // TODO: remove format, it sucks for performance but is convenient let parse_schema_where_clause = format!("tbl_name = '{}' AND type != 'trigger'", tbl_name); program.emit_insn(Insn::ParseSchema { db: sqlite_schema_cursor_id, diff --git a/core/util.rs b/core/util.rs index 654951700..0d16f7794 100644 --- a/core/util.rs +++ b/core/util.rs @@ -499,7 +499,7 @@ pub mod tests { } #[test] - fn test_expressions_equivalent_multiplicaiton() { + fn test_expressions_equivalent_multiplication() { let expr1 = Expr::Binary( Box::new(Expr::Literal(Literal::Numeric("42.0".to_string()))), Multiply, diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index 3ccfdc1e3..44b38564b 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -2425,7 +2425,7 @@ impl Program { let pc: u32 = pc .try_into() .unwrap_or_else(|_| panic!("EndCoroutine: pc overflow: {}", pc)); - state.pc = pc - 1; // yield jump is always next to yield. Here we substract 1 to go back to yield instruction + state.pc = pc - 1; // yield jump is always next to yield. Here we subtract 1 to go back to yield instruction } else { unreachable!(); } @@ -2652,7 +2652,7 @@ impl Program { todo!("temp databases not implemented yet"); } // SQLite returns "0" on an empty database, and 2 on the first insertion, - // so we'll mimick that behavior. + // so we'll mimic that behavior. let mut pages = pager.db_header.borrow().database_size.into(); if pages == 1 { pages = 0; diff --git a/extensions/time/src/lib.rs b/extensions/time/src/lib.rs index 5c4d5a383..2e1b8efd2 100644 --- a/extensions/time/src/lib.rs +++ b/extensions/time/src/lib.rs @@ -816,7 +816,7 @@ fn time_until(args: &[Value]) -> Value { time_sub_internal(t, now) } -// Rouding +// Rounding #[scalar(name = "time_trunc", alias = "date_trunc")] fn time_trunc(args: &[Value]) -> Value { diff --git a/simulator/generation/mod.rs b/simulator/generation/mod.rs index ac7defd54..565f65297 100644 --- a/simulator/generation/mod.rs +++ b/simulator/generation/mod.rs @@ -33,11 +33,11 @@ pub trait ArbitraryFromMaybe { } /// Frequency is a helper function for composing different generators with different frequency -/// of occurences. +/// of occurrences. /// The type signature for the `N` parameter is a bit complex, but it /// roughly corresponds to a type that can be summed, compared, subtracted and sampled, which are /// the operations we require for the implementation. -// todo: switch to a simpler type signature that can accomodate all integer and float types, which +// todo: switch to a simpler type signature that can accommodate all integer and float types, which // should be enough for our purposes. pub(crate) fn frequency< 'a, @@ -61,7 +61,7 @@ pub(crate) fn frequency< unreachable!() } -/// one_of is a helper function for composing different generators with equal probability of occurence. +/// one_of is a helper function for composing different generators with equal probability of occurrence. pub(crate) fn one_of<'a, T, R: Rng>(choices: Vec T + 'a>>, rng: &mut R) -> T { let index = rng.gen_range(0..choices.len()); choices[index](rng) diff --git a/simulator/main.rs b/simulator/main.rs index 2eb463529..e8a5b34cf 100644 --- a/simulator/main.rs +++ b/simulator/main.rs @@ -429,7 +429,7 @@ fn setup_simulation( let mut env = SimulatorEnv::new(seed, cli_opts, db_path); // todo: the loading works correctly because of a hacky decision - // Rigth now, the plan generation is the only point we use the rng, so the environment doesn't + // Right now, the plan generation is the only point we use the rng, so the environment doesn't // even need it. In the future, especially with multi-connections and multi-threading, we might // use the RNG for more things such as scheduling, so this assumption will fail. When that happens, // we'll need to reachitect this logic by saving and loading RNG state. diff --git a/simulator/runner/cli.rs b/simulator/runner/cli.rs index 93a14849f..aa3697b27 100644 --- a/simulator/runner/cli.rs +++ b/simulator/runner/cli.rs @@ -64,7 +64,7 @@ impl SimulatorCLI { return Err("Minimum size cannot be greater than maximum size".to_string()); } - // Make sure uncompatible options are not set + // Make sure incompatible options are not set if self.shrink && self.doublecheck { return Err("Cannot use shrink and doublecheck at the same time".to_string()); } diff --git a/simulator/runner/env.rs b/simulator/runner/env.rs index 2813b80e8..83f5180d3 100644 --- a/simulator/runner/env.rs +++ b/simulator/runner/env.rs @@ -43,7 +43,7 @@ impl SimulatorEnv { let opts = SimulatorOpts { ticks: rng.gen_range(cli_opts.minimum_size..=cli_opts.maximum_size), max_connections: 1, // TODO: for now let's use one connection as we didn't implement - // correct transactions procesing + // correct transactions processing max_tables: rng.gen_range(0..128), create_percent, read_percent, diff --git a/testing/json.test b/testing/json.test index 0bfec6247..7a1890369 100755 --- a/testing/json.test +++ b/testing/json.test @@ -759,7 +759,7 @@ do_execsql_test json-patch-add-all-dup-keys-from-patch { '{"z":{}, "z":5, "z":100}' ); } {{{"x":100,"x":200,"z":100}}} -do_execsql_test json-patch-first-occurance-patch { +do_execsql_test json-patch-first-occurrence-patch { select json_patch('{"x":100,"x":200}','{"x":{}, "x":5, "x":100}'); } {{{"x":100,"x":200}}} do_execsql_test json-patch-complex-nested-dup-keys { diff --git a/testing/scalar-functions.test b/testing/scalar-functions.test index f04fa1765..77694863c 100755 --- a/testing/scalar-functions.test +++ b/testing/scalar-functions.test @@ -407,7 +407,7 @@ do_execsql_test length-text { SELECT length('limbo'); } {5} -do_execsql_test lenght-text-utf8-chars { +do_execsql_test length-text-utf8-chars { SELECT length('ąłóżźć'); } {6} @@ -431,7 +431,7 @@ do_execsql_test octet-length-text { SELECT length('limbo'); } {5} -do_execsql_test octet-lenght-text-utf8-chars { +do_execsql_test octet-length-text-utf8-chars { SELECT octet_length('ąłóżźć'); } {12} diff --git a/tests/integration/fuzz/mod.rs b/tests/integration/fuzz/mod.rs index 8179dfde4..af7ae61e9 100644 --- a/tests/integration/fuzz/mod.rs +++ b/tests/integration/fuzz/mod.rs @@ -251,7 +251,7 @@ mod tests { .option_w(bin_op, 1.0) .option_w(paren, 1.0) .option_w(scalar, 1.0) - // unfortunatelly, sqlite behaves weirdly when IS operator is used with TRUE/FALSE constants + // unfortunately, sqlite behaves weirdly when IS operator is used with TRUE/FALSE constants // e.g. 8 IS TRUE == 1 (although 8 = TRUE == 0) // so, we do not use TRUE/FALSE constants as they will produce diff with sqlite results .options_str(["1", "0", "NULL", "2.0", "1.5", "-0.5", "-2.0", "(1 / 0)"]) diff --git a/vendored/sqlite3-parser/src/parser/ast/mod.rs b/vendored/sqlite3-parser/src/parser/ast/mod.rs index 3ea9d5992..3db759eed 100644 --- a/vendored/sqlite3-parser/src/parser/ast/mod.rs +++ b/vendored/sqlite3-parser/src/parser/ast/mod.rs @@ -1691,7 +1691,7 @@ pub struct TriggerCmdInsert { pub col_names: Option, /// `SELECT` or `VALUES` pub select: Box