Add clippy CI, fix or ignore warnings where appropriate

This commit is contained in:
PThorpe92
2024-12-27 18:17:06 -05:00
parent f87dc7cacc
commit f6cd707544
27 changed files with 596 additions and 585 deletions

View File

@@ -39,6 +39,12 @@ jobs:
run: cargo test --verbose
timeout-minutes: 5
clippy:
runs-on: ubuntu-latest
steps:
- name: Clippy
run: cargo clippy -- -A clippy::all -W clippy::correctness -W clippy::perf -W clippy::suspicious --deny=warnings
build-wasm:
runs-on: ubuntu-latest
steps:

View File

@@ -5,7 +5,7 @@ use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
use wasm_bindgen::prelude::*;
#[allow(dead_code)]
#[wasm_bindgen]
pub struct Database {
db: Arc<limbo_core::Database>,

View File

@@ -160,7 +160,7 @@ impl From<&Opts> for Settings {
null_value: String::new(),
output_mode: opts.output_mode,
echo: false,
is_stdout: opts.output == "",
is_stdout: opts.output.is_empty(),
output_filename: opts.output.clone(),
db_file: opts
.database
@@ -192,7 +192,6 @@ impl std::fmt::Display for Settings {
}
impl Limbo {
#[allow(clippy::arc_with_non_send_sync)]
pub fn new() -> anyhow::Result<Self> {
let opts = Opts::parse();
let db_file = opts
@@ -229,13 +228,13 @@ impl Limbo {
app.writeln("Enter \".help\" for usage hints.")?;
app.display_in_memory()?;
}
return Ok(app);
Ok(app)
}
fn handle_first_input(&mut self, cmd: &str) {
if cmd.trim().starts_with('.') {
self.handle_dot_command(&cmd);
} else if let Err(e) = self.query(&cmd) {
self.handle_dot_command(cmd);
} else if let Err(e) = self.query(cmd) {
eprintln!("{}", e);
}
std::process::exit(0);
@@ -293,7 +292,7 @@ impl Limbo {
let db = Database::open_file(self.io.clone(), path)?;
self.conn = db.connect();
self.opts.db_file = ":memory:".to_string();
return Ok(());
Ok(())
}
path => {
let io: Arc<dyn limbo_core::IO> = Arc::new(limbo_core::PlatformIO::new()?);
@@ -301,7 +300,7 @@ impl Limbo {
let db = Database::open_file(self.io.clone(), path)?;
self.conn = db.connect();
self.opts.db_file = path.to_string();
return Ok(());
Ok(())
}
}
}
@@ -317,11 +316,9 @@ impl Limbo {
self.opts.is_stdout = false;
self.opts.output_mode = OutputMode::Raw;
self.opts.output_filename = path.to_string();
return Ok(());
}
Err(e) => {
return Err(e.to_string());
Ok(())
}
Err(e) => Err(e.to_string()),
}
}
@@ -333,7 +330,7 @@ impl Limbo {
fn set_mode(&mut self, mode: OutputMode) -> Result<(), String> {
if mode == OutputMode::Pretty && !self.opts.is_stdout {
return Err("pretty output can only be written to a tty".to_string());
Err("pretty output can only be written to a tty".to_string())
} else {
self.opts.output_mode = mode;
Ok(())
@@ -682,10 +679,9 @@ impl Limbo {
}
}
if tables.len() > 0 {
if !tables.is_empty() {
let _ = self.writeln(tables.trim_end());
} else {
if let Some(pattern) = pattern {
} else if let Some(pattern) = pattern {
let _ = self.write_fmt(format_args!(
"Error: Tables with pattern '{}' not found.",
pattern
@@ -694,7 +690,6 @@ impl Limbo {
let _ = self.writeln("No tables found in the database.");
}
}
}
Ok(None) => {
let _ = self.writeln("No results returned from the query.");
}

View File

@@ -1,10 +1,10 @@
#![allow(clippy::arc_with_non_send_sync)]
mod app;
mod opcodes_dictionary;
use rustyline::{error::ReadlineError, DefaultEditor};
use std::sync::atomic::Ordering;
#[allow(clippy::arc_with_non_send_sync)]
fn main() -> anyhow::Result<()> {
env_logger::init();
let mut app = app::Limbo::new()?;

View File

@@ -9,6 +9,7 @@ pub enum ExtFunc {
Uuid(UuidFunc),
}
#[allow(unreachable_patterns)] // TODO: remove when more extension funcs added
impl std::fmt::Display for ExtFunc {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
@@ -19,6 +20,7 @@ impl std::fmt::Display for ExtFunc {
}
}
#[allow(unreachable_patterns)]
impl ExtFunc {
pub fn resolve_function(name: &str, num_args: usize) -> Option<ExtFunc> {
match name {

View File

@@ -69,8 +69,8 @@ pub struct Database {
header: Rc<RefCell<DatabaseHeader>>,
// Shared structures of a Database are the parts that are common to multiple threads that might
// create DB connections.
shared_page_cache: Arc<RwLock<DumbLruPageCache>>,
shared_wal: Arc<RwLock<WalFileShared>>,
_shared_page_cache: Arc<RwLock<DumbLruPageCache>>,
_shared_wal: Arc<RwLock<WalFileShared>>,
}
impl Database {
@@ -96,6 +96,7 @@ impl Database {
Self::open(io, page_io, wal, wal_shared, buffer_pool)
}
#[allow(clippy::arc_with_non_send_sync)]
pub fn open(
io: Arc<dyn IO>,
page_io: Rc<dyn DatabaseStorage>,
@@ -109,13 +110,13 @@ impl Database {
let version = db_header.borrow().version_number;
version.to_string()
});
let shared_page_cache = Arc::new(RwLock::new(DumbLruPageCache::new(10)));
let _shared_page_cache = Arc::new(RwLock::new(DumbLruPageCache::new(10)));
let pager = Rc::new(Pager::finish_open(
db_header.clone(),
page_io,
wal,
io.clone(),
shared_page_cache.clone(),
_shared_page_cache.clone(),
buffer_pool,
)?);
let bootstrap_schema = Rc::new(RefCell::new(Schema::new()));
@@ -124,7 +125,7 @@ impl Database {
schema: bootstrap_schema.clone(),
header: db_header.clone(),
transaction_state: RefCell::new(TransactionState::None),
db: Weak::new(),
_db: Weak::new(),
last_insert_rowid: Cell::new(0),
});
let mut schema = Schema::new();
@@ -136,8 +137,8 @@ impl Database {
pager,
schema,
header,
shared_page_cache,
shared_wal,
_shared_page_cache,
_shared_wal: shared_wal,
}))
}
@@ -147,7 +148,7 @@ impl Database {
schema: self.schema.clone(),
header: self.header.clone(),
last_insert_rowid: Cell::new(0),
db: Arc::downgrade(self),
_db: Arc::downgrade(self),
transaction_state: RefCell::new(TransactionState::None),
})
}
@@ -206,7 +207,7 @@ pub struct Connection {
pager: Rc<Pager>,
schema: Rc<RefCell<Schema>>,
header: Rc<RefCell<DatabaseHeader>>,
db: Weak<Database>, // backpointer to the database holding this connection
_db: Weak<Database>, // backpointer to the database holding this connection
transaction_state: RefCell<TransactionState>,
last_insert_rowid: Cell<u64>,
}
@@ -269,7 +270,7 @@ impl Connection {
Cmd::ExplainQueryPlan(stmt) => {
match stmt {
ast::Stmt::Select(select) => {
let plan = prepare_select_plan(&*self.schema.borrow(), select)?;
let plan = prepare_select_plan(&self.schema.borrow(), select)?;
let plan = optimize_plan(plan)?;
println!("{}", plan);
}

View File

@@ -10,11 +10,12 @@
//! for reading and writing pages to the database file, either local or
//! remote. The `Wal` struct is responsible for managing the write-ahead log
//! for the database, also either local or remote.
pub(crate) mod btree;
pub(crate) mod buffer_pool;
pub(crate) mod database;
pub(crate) mod page_cache;
#[allow(clippy::arc_with_non_send_sync)]
pub(crate) mod pager;
pub(crate) mod sqlite3_ondisk;
#[allow(clippy::arc_with_non_send_sync)]
pub(crate) mod wal;

View File

@@ -50,6 +50,7 @@ impl Page {
}
}
#[allow(clippy::mut_from_ref)]
pub fn get(&self) -> &mut PageInner {
unsafe { &mut *self.inner.get() }
}
@@ -423,7 +424,7 @@ impl Pager {
CheckpointMode::Passive,
) {
Ok(CheckpointStatus::IO) => {
self.io.run_once();
let _ = self.io.run_once();
}
Ok(CheckpointStatus::Done) => {
break;

View File

@@ -1329,7 +1329,7 @@ pub fn payload_overflows(
/// The 32-bit integers are big-endian if the magic number in the first 4 bytes of the WAL header is 0x377f0683
/// and the integers are little-endian if the magic number is 0x377f0682.
/// The checksum values are always stored in the frame header in a big-endian format regardless of which byte order is used to compute the checksum.
///
/// The checksum algorithm only works for content which is a multiple of 8 bytes in length.
/// In other words, if the inputs are x(0) through x(N) then N must be odd.
/// The checksum algorithm is as follows:

View File

@@ -195,6 +195,7 @@ struct OngoingCheckpoint {
current_page: u64,
}
#[allow(dead_code)]
pub struct WalFile {
io: Arc<dyn crate::io::IO>,
buffer_pool: Rc<BufferPool>,
@@ -219,6 +220,7 @@ pub struct WalFile {
// TODO(pere): lock only important parts + pin WalFileShared
/// WalFileShared is the part of a WAL that will be shared between threads. A wal has information
/// that needs to be communicated between threads so this struct does the job.
#[allow(dead_code)]
pub struct WalFileShared {
wal_header: Arc<RwLock<sqlite3_ondisk::WalHeader>>,
min_frame: u64,

View File

@@ -13,7 +13,7 @@ use crate::translate::plan::{DeletePlan, IterationDirection, Plan, Search};
use crate::types::{OwnedRecord, OwnedValue};
use crate::util::exprs_are_equivalent;
use crate::vdbe::builder::ProgramBuilder;
use crate::vdbe::{BranchOffset, Insn, Program};
use crate::vdbe::{insn::Insn, BranchOffset, Program};
use crate::{Connection, Result};
use super::expr::{
@@ -102,6 +102,7 @@ pub struct Metadata {
}
/// Used to distinguish database operations
#[allow(clippy::upper_case_acronyms, dead_code)]
#[derive(Debug, Clone)]
pub enum OperationMode {
SELECT,
@@ -173,7 +174,7 @@ fn epilogue(
/// Takes a query plan and generates the corresponding bytecode program
pub fn emit_program(
database_header: Rc<RefCell<DatabaseHeader>>,
mut plan: Plan,
plan: Plan,
connection: Weak<Connection>,
) -> Result<Program> {
match plan {

View File

@@ -7,7 +7,7 @@ use crate::function::JsonFunc;
use crate::function::{AggFunc, Func, FuncCtx, MathFuncArity, ScalarFunc};
use crate::schema::Type;
use crate::util::{exprs_are_equivalent, normalize_ident};
use crate::vdbe::{builder::ProgramBuilder, BranchOffset, Insn};
use crate::vdbe::{builder::ProgramBuilder, insn::Insn, BranchOffset};
use crate::Result;
use super::plan::{Aggregate, BTreeTableReference};
@@ -1748,6 +1748,7 @@ pub fn translate_expr(
Ok(target_register)
}
},
#[allow(unreachable_patterns)]
_ => unreachable!("{ext_func} not implemented yet"),
},
Func::Math(math_func) => match math_func.arity() {

View File

@@ -11,7 +11,7 @@ use crate::{
schema::{Column, Schema, Table},
storage::sqlite3_ondisk::DatabaseHeader,
translate::expr::translate_expr,
vdbe::{builder::ProgramBuilder, Insn, Program},
vdbe::{builder::ProgramBuilder, insn::Insn, Program},
};
use crate::{Connection, Result};

View File

@@ -16,21 +16,20 @@ pub(crate) mod plan;
pub(crate) mod planner;
pub(crate) mod select;
use std::cell::RefCell;
use std::fmt::Display;
use std::rc::{Rc, Weak};
use std::str::FromStr;
use crate::schema::Schema;
use crate::storage::pager::Pager;
use crate::storage::sqlite3_ondisk::{DatabaseHeader, MIN_PAGE_CACHE_SIZE};
use crate::translate::delete::translate_delete;
use crate::vdbe::{builder::ProgramBuilder, Insn, Program};
use crate::vdbe::{builder::ProgramBuilder, insn::Insn, Program};
use crate::{bail_parse_error, Connection, Result};
use insert::translate_insert;
use select::translate_select;
use sqlite3_parser::ast::fmt::ToTokens;
use sqlite3_parser::ast::{self, PragmaName};
use std::cell::RefCell;
use std::fmt::Display;
use std::rc::{Rc, Weak};
use std::str::FromStr;
/// Translate SQL statement into bytecode program.
pub fn translate(
@@ -71,13 +70,10 @@ pub fn translate(
bail_parse_error!("CREATE VIRTUAL TABLE not supported yet")
}
ast::Stmt::Delete {
with,
tbl_name,
indexed,
where_clause,
returning,
order_by,
limit,
..
} => translate_delete(
schema,
&tbl_name,

View File

@@ -9,7 +9,7 @@ use super::plan::{
DeletePlan, Direction, IterationDirection, Plan, Search, SelectPlan, SourceOperator,
};
pub fn optimize_plan(mut plan: Plan) -> Result<Plan> {
pub fn optimize_plan(plan: Plan) -> Result<Plan> {
match plan {
Plan::Select(plan) => optimize_select_plan(plan).map(Plan::Select),
Plan::Delete(plan) => optimize_delete_plan(plan).map(Plan::Delete),

View File

@@ -56,6 +56,7 @@ pub struct SelectPlan {
pub contains_constant_false_condition: bool,
}
#[allow(dead_code)]
#[derive(Debug)]
pub struct DeletePlan {
/// A tree of sources (tables).
@@ -205,6 +206,7 @@ pub struct BTreeTableReference {
/// An enum that represents a search operation that can be used to search for a row in a table using an index
/// (i.e. a primary key or a secondary index)
#[allow(clippy::enum_variant_names)]
#[derive(Clone, Debug)]
pub enum Search {
/// A rowid equality point lookup. This is a special case that uses the SeekRowid bytecode instruction and does not loop.
@@ -395,7 +397,7 @@ pub fn get_table_ref_bitmask_for_operator<'a>(
table_refs_mask |= 1
<< tables
.iter()
.position(|t| &t.table_identifier == &table_reference.table_identifier)
.position(|t| t.table_identifier == table_reference.table_identifier)
.unwrap();
}
SourceOperator::Search {
@@ -404,7 +406,7 @@ pub fn get_table_ref_bitmask_for_operator<'a>(
table_refs_mask |= 1
<< tables
.iter()
.position(|t| &t.table_identifier == &table_reference.table_identifier)
.position(|t| t.table_identifier == table_reference.table_identifier)
.unwrap();
}
SourceOperator::Nothing => {}
@@ -420,6 +422,7 @@ pub fn get_table_ref_bitmask_for_operator<'a>(
and predicate = "t1.a = t2.b"
then the return value will be (in bits): 011
*/
#[allow(clippy::only_used_in_recursion)]
pub fn get_table_ref_bitmask_for_ast_expr<'a>(
tables: &'a Vec<BTreeTableReference>,
predicate: &'a ast::Expr,

View File

@@ -15,7 +15,7 @@ pub fn normalize_ident(identifier: &str) -> String {
.iter()
.find(|&(start, end)| identifier.starts_with(*start) && identifier.ends_with(*end));
if let Some(&(start, end)) = quote_pair {
if let Some(&(_, _)) = quote_pair {
&identifier[1..identifier.len() - 1]
} else {
identifier

490
core/vdbe/insn.rs Normal file
View File

@@ -0,0 +1,490 @@
use super::{AggFunc, BranchOffset, CursorID, FuncCtx, PageIdx};
use crate::types::OwnedRecord;
use limbo_macros::Description;
#[derive(Description, Debug)]
pub enum Insn {
// Initialize the program state and jump to the given PC.
Init {
target_pc: BranchOffset,
},
// Write a NULL into register dest. If dest_end is Some, then also write NULL into register dest_end and every register in between dest and dest_end. If dest_end is not set, then only register dest is set to NULL.
Null {
dest: usize,
dest_end: Option<usize>,
},
// Move the cursor P1 to a null row. Any Column operations that occur while the cursor is on the null row will always write a NULL.
NullRow {
cursor_id: CursorID,
},
// Add two registers and store the result in a third register.
Add {
lhs: usize,
rhs: usize,
dest: usize,
},
// Subtract rhs from lhs and store in dest
Subtract {
lhs: usize,
rhs: usize,
dest: usize,
},
// Multiply two registers and store the result in a third register.
Multiply {
lhs: usize,
rhs: usize,
dest: usize,
},
// Divide lhs by rhs and store the result in a third register.
Divide {
lhs: usize,
rhs: usize,
dest: usize,
},
// Compare two vectors of registers in reg(P1)..reg(P1+P3-1) (call this vector "A") and in reg(P2)..reg(P2+P3-1) ("B"). Save the result of the comparison for use by the next Jump instruct.
Compare {
start_reg_a: usize,
start_reg_b: usize,
count: usize,
},
// Place the result of rhs bitwise AND lhs in third register.
BitAnd {
lhs: usize,
rhs: usize,
dest: usize,
},
// Place the result of rhs bitwise OR lhs in third register.
BitOr {
lhs: usize,
rhs: usize,
dest: usize,
},
// Place the result of bitwise NOT register P1 in dest register.
BitNot {
reg: usize,
dest: usize,
},
// Divide lhs by rhs and place the remainder in dest register.
Remainder {
lhs: usize,
rhs: usize,
dest: usize,
},
// Jump to the instruction at address P1, P2, or P3 depending on whether in the most recent Compare instruction the P1 vector was less than, equal to, or greater than the P2 vector, respectively.
Jump {
target_pc_lt: BranchOffset,
target_pc_eq: BranchOffset,
target_pc_gt: BranchOffset,
},
// Move the P3 values in register P1..P1+P3-1 over into registers P2..P2+P3-1. Registers P1..P1+P3-1 are left holding a NULL. It is an error for register ranges P1..P1+P3-1 and P2..P2+P3-1 to overlap. It is an error for P3 to be less than 1.
Move {
source_reg: usize,
dest_reg: usize,
count: usize,
},
// If the given register is a positive integer, decrement it by decrement_by and jump to the given PC.
IfPos {
reg: usize,
target_pc: BranchOffset,
decrement_by: usize,
},
// If the given register is not NULL, jump to the given PC.
NotNull {
reg: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if they are equal.
Eq {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if they are not equal.
Ne {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if the left-hand side is less than the right-hand side.
Lt {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if the left-hand side is less than or equal to the right-hand side.
Le {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if the left-hand side is greater than the right-hand side.
Gt {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if the left-hand side is greater than or equal to the right-hand side.
Ge {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
/// Jump to target_pc if r\[reg\] != 0 or (r\[reg\] == NULL && r\[null_reg\] != 0)
If {
reg: usize, // P1
target_pc: BranchOffset, // P2
/// P3. If r\[reg\] is null, jump iff r\[null_reg\] != 0
null_reg: usize,
},
/// Jump to target_pc if r\[reg\] != 0 or (r\[reg\] == NULL && r\[null_reg\] != 0)
IfNot {
reg: usize, // P1
target_pc: BranchOffset, // P2
/// P3. If r\[reg\] is null, jump iff r\[null_reg\] != 0
null_reg: usize,
},
// Open a cursor for reading.
OpenReadAsync {
cursor_id: CursorID,
root_page: PageIdx,
},
// Await for the completion of open cursor.
OpenReadAwait,
// Open a cursor for a pseudo-table that contains a single row.
OpenPseudo {
cursor_id: CursorID,
content_reg: usize,
num_fields: usize,
},
// Rewind the cursor to the beginning of the B-Tree.
RewindAsync {
cursor_id: CursorID,
},
// Await for the completion of cursor rewind.
RewindAwait {
cursor_id: CursorID,
pc_if_empty: BranchOffset,
},
LastAsync {
cursor_id: CursorID,
},
LastAwait {
cursor_id: CursorID,
pc_if_empty: BranchOffset,
},
// Read a column from the current row of the cursor.
Column {
cursor_id: CursorID,
column: usize,
dest: usize,
},
// Make a record and write it to destination register.
MakeRecord {
start_reg: usize, // P1
count: usize, // P2
dest_reg: usize, // P3
},
// Emit a row of results.
ResultRow {
start_reg: usize, // P1
count: usize, // P2
},
// Advance the cursor to the next row.
NextAsync {
cursor_id: CursorID,
},
// Await for the completion of cursor advance.
NextAwait {
cursor_id: CursorID,
pc_if_next: BranchOffset,
},
PrevAsync {
cursor_id: CursorID,
},
PrevAwait {
cursor_id: CursorID,
pc_if_next: BranchOffset,
},
// Halt the program.
Halt {
err_code: usize,
description: String,
},
// Start a transaction.
Transaction {
write: bool,
},
// Branch to the given PC.
Goto {
target_pc: BranchOffset,
},
// Stores the current program counter into register 'return_reg' then jumps to address target_pc.
Gosub {
target_pc: BranchOffset,
return_reg: usize,
},
// Returns to the program counter stored in register 'return_reg'.
Return {
return_reg: usize,
},
// Write an integer value into a register.
Integer {
value: i64,
dest: usize,
},
// Write a float value into a register
Real {
value: f64,
dest: usize,
},
// If register holds an integer, transform it to a float
RealAffinity {
register: usize,
},
// Write a string value into a register.
String8 {
value: String,
dest: usize,
},
// Write a blob value into a register.
Blob {
value: Vec<u8>,
dest: usize,
},
// Read the rowid of the current row.
RowId {
cursor_id: CursorID,
dest: usize,
},
// Seek to a rowid in the cursor. If not found, jump to the given PC. Otherwise, continue to the next instruction.
SeekRowid {
cursor_id: CursorID,
src_reg: usize,
target_pc: BranchOffset,
},
// P1 is an open index cursor and P3 is a cursor on the corresponding table. This opcode does a deferred seek of the P3 table cursor to the row that corresponds to the current row of P1.
// This is a deferred seek. Nothing actually happens until the cursor is used to read a record. That way, if no reads occur, no unnecessary I/O happens.
DeferredSeek {
index_cursor_id: CursorID,
table_cursor_id: CursorID,
},
// If cursor_id refers to an SQL table (B-Tree that uses integer keys), use the value in start_reg as the key.
// If cursor_id refers to an SQL index, then start_reg is the first in an array of num_regs registers that are used as an unpacked index key.
// Seek to the first index entry that is greater than or equal to the given key. If not found, jump to the given PC. Otherwise, continue to the next instruction.
SeekGE {
is_index: bool,
cursor_id: CursorID,
start_reg: usize,
num_regs: usize,
target_pc: BranchOffset,
},
// If cursor_id refers to an SQL table (B-Tree that uses integer keys), use the value in start_reg as the key.
// If cursor_id refers to an SQL index, then start_reg is the first in an array of num_regs registers that are used as an unpacked index key.
// Seek to the first index entry that is greater than the given key. If not found, jump to the given PC. Otherwise, continue to the next instruction.
SeekGT {
is_index: bool,
cursor_id: CursorID,
start_reg: usize,
num_regs: usize,
target_pc: BranchOffset,
},
// The P4 register values beginning with P3 form an unpacked index key that omits the PRIMARY KEY. Compare this key value against the index that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID fields at the end.
// If the P1 index entry is greater or equal than the key value then jump to P2. Otherwise fall through to the next instruction.
IdxGE {
cursor_id: CursorID,
start_reg: usize,
num_regs: usize,
target_pc: BranchOffset,
},
// The P4 register values beginning with P3 form an unpacked index key that omits the PRIMARY KEY. Compare this key value against the index that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID fields at the end.
// If the P1 index entry is greater than the key value then jump to P2. Otherwise fall through to the next instruction.
IdxGT {
cursor_id: CursorID,
start_reg: usize,
num_regs: usize,
target_pc: BranchOffset,
},
// Decrement the given register and jump to the given PC if the result is zero.
DecrJumpZero {
reg: usize,
target_pc: BranchOffset,
},
AggStep {
acc_reg: usize,
col: usize,
delimiter: usize,
func: AggFunc,
},
AggFinal {
register: usize,
func: AggFunc,
},
// Open a sorter.
SorterOpen {
cursor_id: CursorID, // P1
columns: usize, // P2
order: OwnedRecord, // P4. 0 if ASC and 1 if DESC
},
// Insert a row into the sorter.
SorterInsert {
cursor_id: CursorID,
record_reg: usize,
},
// Sort the rows in the sorter.
SorterSort {
cursor_id: CursorID,
pc_if_empty: BranchOffset,
},
// Retrieve the next row from the sorter.
SorterData {
cursor_id: CursorID, // P1
dest_reg: usize, // P2
pseudo_cursor: usize, // P3
},
// Advance to the next row in the sorter.
SorterNext {
cursor_id: CursorID,
pc_if_next: BranchOffset,
},
// Function
Function {
constant_mask: i32, // P1
start_reg: usize, // P2, start of argument registers
dest: usize, // P3
func: FuncCtx, // P4
},
InitCoroutine {
yield_reg: usize,
jump_on_definition: BranchOffset,
start_offset: BranchOffset,
},
EndCoroutine {
yield_reg: usize,
},
Yield {
yield_reg: usize,
end_offset: BranchOffset,
},
InsertAsync {
cursor: CursorID,
key_reg: usize, // Must be int.
record_reg: usize, // Blob of record data.
flag: usize, // Flags used by insert, for now not used.
},
InsertAwait {
cursor_id: usize,
},
DeleteAsync {
cursor_id: CursorID,
},
DeleteAwait {
cursor_id: CursorID,
},
NewRowid {
cursor: CursorID, // P1
rowid_reg: usize, // P2 Destination register to store the new rowid
prev_largest_reg: usize, // P3 Previous largest rowid in the table (Not used for now)
},
MustBeInt {
reg: usize,
},
SoftNull {
reg: usize,
},
NotExists {
cursor: CursorID,
rowid_reg: usize,
target_pc: BranchOffset,
},
OpenWriteAsync {
cursor_id: CursorID,
root_page: PageIdx,
},
OpenWriteAwait {},
Copy {
src_reg: usize,
dst_reg: usize,
amount: usize, // 0 amount means we include src_reg, dst_reg..=dst_reg+amount = src_reg..=src_reg+amount
},
/// Allocate a new b-tree.
CreateBtree {
/// Allocate b-tree in main database if zero or in temp database if non-zero (P1).
db: usize,
/// The root page of the new b-tree (P2).
root: usize,
/// Flags (P3).
flags: usize,
},
/// Close a cursor.
Close {
cursor_id: CursorID,
},
/// Check if the register is null.
IsNull {
/// Source register (P1).
src: usize,
/// Jump to this PC if the register is null (P2).
target_pc: BranchOffset,
},
ParseSchema {
db: usize,
where_clause: String,
},
}

View File

@@ -18,12 +18,12 @@
//! https://www.sqlite.org/opcode.html
pub mod builder;
mod datetime;
pub mod explain;
pub mod insn;
pub mod likeop;
pub mod sorter;
mod datetime;
use crate::error::{LimboError, SQLITE_CONSTRAINT_PRIMARYKEY};
#[cfg(feature = "uuid")]
use crate::ext::{exec_ts_from_uuid7, exec_uuid, exec_uuidblob, exec_uuidstr, ExtFunc, UuidFunc};
@@ -37,15 +37,12 @@ use crate::types::{
AggContext, Cursor, CursorResult, OwnedRecord, OwnedValue, Record, SeekKey, SeekOp,
};
use crate::util::parse_schema_rows;
use crate::vdbe::insn::Insn;
#[cfg(feature = "json")]
use crate::{function::JsonFunc, json::get_json, json::json_array, json::json_array_length};
use crate::{Connection, Result, TransactionState};
use crate::{Rows, DATABASE_VERSION};
use likeop::{construct_like_escape_arg, exec_like_with_escape};
use limbo_macros::Description;
use crate::{Connection, Result, Rows, TransactionState, DATABASE_VERSION};
use datetime::{exec_date, exec_time, exec_unixepoch};
use likeop::{construct_like_escape_arg, exec_like_with_escape};
use rand::distributions::{Distribution, Uniform};
use rand::{thread_rng, Rng};
use regex::{Regex, RegexBuilder};
@@ -59,493 +56,6 @@ pub type CursorID = usize;
pub type PageIdx = usize;
#[derive(Description, Debug)]
pub enum Insn {
// Initialize the program state and jump to the given PC.
Init {
target_pc: BranchOffset,
},
// Write a NULL into register dest. If dest_end is Some, then also write NULL into register dest_end and every register in between dest and dest_end. If dest_end is not set, then only register dest is set to NULL.
Null {
dest: usize,
dest_end: Option<usize>,
},
// Move the cursor P1 to a null row. Any Column operations that occur while the cursor is on the null row will always write a NULL.
NullRow {
cursor_id: CursorID,
},
// Add two registers and store the result in a third register.
Add {
lhs: usize,
rhs: usize,
dest: usize,
},
// Subtract rhs from lhs and store in dest
Subtract {
lhs: usize,
rhs: usize,
dest: usize,
},
// Multiply two registers and store the result in a third register.
Multiply {
lhs: usize,
rhs: usize,
dest: usize,
},
// Divide lhs by rhs and store the result in a third register.
Divide {
lhs: usize,
rhs: usize,
dest: usize,
},
// Compare two vectors of registers in reg(P1)..reg(P1+P3-1) (call this vector "A") and in reg(P2)..reg(P2+P3-1) ("B"). Save the result of the comparison for use by the next Jump instruct.
Compare {
start_reg_a: usize,
start_reg_b: usize,
count: usize,
},
// Place the result of rhs bitwise AND lhs in third register.
BitAnd {
lhs: usize,
rhs: usize,
dest: usize,
},
// Place the result of rhs bitwise OR lhs in third register.
BitOr {
lhs: usize,
rhs: usize,
dest: usize,
},
// Place the result of bitwise NOT register P1 in dest register.
BitNot {
reg: usize,
dest: usize,
},
// Divide lhs by rhs and place the remainder in dest register.
Remainder {
lhs: usize,
rhs: usize,
dest: usize,
},
// Jump to the instruction at address P1, P2, or P3 depending on whether in the most recent Compare instruction the P1 vector was less than, equal to, or greater than the P2 vector, respectively.
Jump {
target_pc_lt: BranchOffset,
target_pc_eq: BranchOffset,
target_pc_gt: BranchOffset,
},
// Move the P3 values in register P1..P1+P3-1 over into registers P2..P2+P3-1. Registers P1..P1+P3-1 are left holding a NULL. It is an error for register ranges P1..P1+P3-1 and P2..P2+P3-1 to overlap. It is an error for P3 to be less than 1.
Move {
source_reg: usize,
dest_reg: usize,
count: usize,
},
// If the given register is a positive integer, decrement it by decrement_by and jump to the given PC.
IfPos {
reg: usize,
target_pc: BranchOffset,
decrement_by: usize,
},
// If the given register is not NULL, jump to the given PC.
NotNull {
reg: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if they are equal.
Eq {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if they are not equal.
Ne {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if the left-hand side is less than the right-hand side.
Lt {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if the left-hand side is less than or equal to the right-hand side.
Le {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if the left-hand side is greater than the right-hand side.
Gt {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
// Compare two registers and jump to the given PC if the left-hand side is greater than or equal to the right-hand side.
Ge {
lhs: usize,
rhs: usize,
target_pc: BranchOffset,
},
/// Jump to target_pc if r\[reg\] != 0 or (r\[reg\] == NULL && r\[null_reg\] != 0)
If {
reg: usize, // P1
target_pc: BranchOffset, // P2
/// P3. If r\[reg\] is null, jump iff r\[null_reg\] != 0
null_reg: usize,
},
/// Jump to target_pc if r\[reg\] != 0 or (r\[reg\] == NULL && r\[null_reg\] != 0)
IfNot {
reg: usize, // P1
target_pc: BranchOffset, // P2
/// P3. If r\[reg\] is null, jump iff r\[null_reg\] != 0
null_reg: usize,
},
// Open a cursor for reading.
OpenReadAsync {
cursor_id: CursorID,
root_page: PageIdx,
},
// Await for the completion of open cursor.
OpenReadAwait,
// Open a cursor for a pseudo-table that contains a single row.
OpenPseudo {
cursor_id: CursorID,
content_reg: usize,
num_fields: usize,
},
// Rewind the cursor to the beginning of the B-Tree.
RewindAsync {
cursor_id: CursorID,
},
// Await for the completion of cursor rewind.
RewindAwait {
cursor_id: CursorID,
pc_if_empty: BranchOffset,
},
LastAsync {
cursor_id: CursorID,
},
LastAwait {
cursor_id: CursorID,
pc_if_empty: BranchOffset,
},
// Read a column from the current row of the cursor.
Column {
cursor_id: CursorID,
column: usize,
dest: usize,
},
// Make a record and write it to destination register.
MakeRecord {
start_reg: usize, // P1
count: usize, // P2
dest_reg: usize, // P3
},
// Emit a row of results.
ResultRow {
start_reg: usize, // P1
count: usize, // P2
},
// Advance the cursor to the next row.
NextAsync {
cursor_id: CursorID,
},
// Await for the completion of cursor advance.
NextAwait {
cursor_id: CursorID,
pc_if_next: BranchOffset,
},
PrevAsync {
cursor_id: CursorID,
},
PrevAwait {
cursor_id: CursorID,
pc_if_next: BranchOffset,
},
// Halt the program.
Halt {
err_code: usize,
description: String,
},
// Start a transaction.
Transaction {
write: bool,
},
// Branch to the given PC.
Goto {
target_pc: BranchOffset,
},
// Stores the current program counter into register 'return_reg' then jumps to address target_pc.
Gosub {
target_pc: BranchOffset,
return_reg: usize,
},
// Returns to the program counter stored in register 'return_reg'.
Return {
return_reg: usize,
},
// Write an integer value into a register.
Integer {
value: i64,
dest: usize,
},
// Write a float value into a register
Real {
value: f64,
dest: usize,
},
// If register holds an integer, transform it to a float
RealAffinity {
register: usize,
},
// Write a string value into a register.
String8 {
value: String,
dest: usize,
},
// Write a blob value into a register.
Blob {
value: Vec<u8>,
dest: usize,
},
// Read the rowid of the current row.
RowId {
cursor_id: CursorID,
dest: usize,
},
// Seek to a rowid in the cursor. If not found, jump to the given PC. Otherwise, continue to the next instruction.
SeekRowid {
cursor_id: CursorID,
src_reg: usize,
target_pc: BranchOffset,
},
// P1 is an open index cursor and P3 is a cursor on the corresponding table. This opcode does a deferred seek of the P3 table cursor to the row that corresponds to the current row of P1.
// This is a deferred seek. Nothing actually happens until the cursor is used to read a record. That way, if no reads occur, no unnecessary I/O happens.
DeferredSeek {
index_cursor_id: CursorID,
table_cursor_id: CursorID,
},
// If cursor_id refers to an SQL table (B-Tree that uses integer keys), use the value in start_reg as the key.
// If cursor_id refers to an SQL index, then start_reg is the first in an array of num_regs registers that are used as an unpacked index key.
// Seek to the first index entry that is greater than or equal to the given key. If not found, jump to the given PC. Otherwise, continue to the next instruction.
SeekGE {
is_index: bool,
cursor_id: CursorID,
start_reg: usize,
num_regs: usize,
target_pc: BranchOffset,
},
// If cursor_id refers to an SQL table (B-Tree that uses integer keys), use the value in start_reg as the key.
// If cursor_id refers to an SQL index, then start_reg is the first in an array of num_regs registers that are used as an unpacked index key.
// Seek to the first index entry that is greater than the given key. If not found, jump to the given PC. Otherwise, continue to the next instruction.
SeekGT {
is_index: bool,
cursor_id: CursorID,
start_reg: usize,
num_regs: usize,
target_pc: BranchOffset,
},
// The P4 register values beginning with P3 form an unpacked index key that omits the PRIMARY KEY. Compare this key value against the index that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID fields at the end.
// If the P1 index entry is greater or equal than the key value then jump to P2. Otherwise fall through to the next instruction.
IdxGE {
cursor_id: CursorID,
start_reg: usize,
num_regs: usize,
target_pc: BranchOffset,
},
// The P4 register values beginning with P3 form an unpacked index key that omits the PRIMARY KEY. Compare this key value against the index that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID fields at the end.
// If the P1 index entry is greater than the key value then jump to P2. Otherwise fall through to the next instruction.
IdxGT {
cursor_id: CursorID,
start_reg: usize,
num_regs: usize,
target_pc: BranchOffset,
},
// Decrement the given register and jump to the given PC if the result is zero.
DecrJumpZero {
reg: usize,
target_pc: BranchOffset,
},
AggStep {
acc_reg: usize,
col: usize,
delimiter: usize,
func: AggFunc,
},
AggFinal {
register: usize,
func: AggFunc,
},
// Open a sorter.
SorterOpen {
cursor_id: CursorID, // P1
columns: usize, // P2
order: OwnedRecord, // P4. 0 if ASC and 1 if DESC
},
// Insert a row into the sorter.
SorterInsert {
cursor_id: CursorID,
record_reg: usize,
},
// Sort the rows in the sorter.
SorterSort {
cursor_id: CursorID,
pc_if_empty: BranchOffset,
},
// Retrieve the next row from the sorter.
SorterData {
cursor_id: CursorID, // P1
dest_reg: usize, // P2
pseudo_cursor: usize, // P3
},
// Advance to the next row in the sorter.
SorterNext {
cursor_id: CursorID,
pc_if_next: BranchOffset,
},
// Function
Function {
constant_mask: i32, // P1
start_reg: usize, // P2, start of argument registers
dest: usize, // P3
func: FuncCtx, // P4
},
InitCoroutine {
yield_reg: usize,
jump_on_definition: BranchOffset,
start_offset: BranchOffset,
},
EndCoroutine {
yield_reg: usize,
},
Yield {
yield_reg: usize,
end_offset: BranchOffset,
},
InsertAsync {
cursor: CursorID,
key_reg: usize, // Must be int.
record_reg: usize, // Blob of record data.
flag: usize, // Flags used by insert, for now not used.
},
InsertAwait {
cursor_id: usize,
},
DeleteAsync {
cursor_id: CursorID,
},
DeleteAwait {
cursor_id: CursorID,
},
NewRowid {
cursor: CursorID, // P1
rowid_reg: usize, // P2 Destination register to store the new rowid
prev_largest_reg: usize, // P3 Previous largest rowid in the table (Not used for now)
},
MustBeInt {
reg: usize,
},
SoftNull {
reg: usize,
},
NotExists {
cursor: CursorID,
rowid_reg: usize,
target_pc: BranchOffset,
},
OpenWriteAsync {
cursor_id: CursorID,
root_page: PageIdx,
},
OpenWriteAwait {},
Copy {
src_reg: usize,
dst_reg: usize,
amount: usize, // 0 amount means we include src_reg, dst_reg..=dst_reg+amount = src_reg..=src_reg+amount
},
/// Allocate a new b-tree.
CreateBtree {
/// Allocate b-tree in main database if zero or in temp database if non-zero (P1).
db: usize,
/// The root page of the new b-tree (P2).
root: usize,
/// Flags (P3).
flags: usize,
},
/// Close a cursor.
Close {
cursor_id: CursorID,
},
/// Check if the register is null.
IsNull {
/// Source register (P1).
src: usize,
/// Jump to this PC if the register is null (P2).
target_pc: BranchOffset,
},
ParseSchema {
db: usize,
where_clause: String,
},
}
// Index of insn in list of insns
type InsnReference = usize;
@@ -2669,6 +2179,7 @@ impl Program {
state.registers[*dest] = exec_replace(source, pattern, replacement);
}
},
#[allow(unreachable_patterns)]
crate::function::Func::Extension(extfn) => match extfn {
#[cfg(feature = "uuid")]
ExtFunc::Uuid(uuidfn) => match uuidfn {

View File

@@ -65,8 +65,7 @@ pub fn derive_description_from_doc(item: TokenStream) -> TokenStream {
/// Processes a Rust docs to extract the description string.
fn process_description(token_iter: &mut IntoIter) -> Option<String> {
if let Some(doc_token_tree) = token_iter.next() {
if let TokenTree::Group(doc_group) = doc_token_tree {
if let Some(TokenTree::Group(doc_group)) = token_iter.next() {
let mut doc_group_iter = doc_group.stream().into_iter();
// Skip the `desc` and `(` tokens to reach the actual description
doc_group_iter.next();
@@ -75,20 +74,19 @@ fn process_description(token_iter: &mut IntoIter) -> Option<String> {
return Some(description.to_string());
}
}
}
None
}
/// Processes the payload of an enum variant to extract variable names (ignoring types).
fn process_payload(payload_group: Group) -> String {
let mut payload_group_iter = payload_group.stream().into_iter();
let payload_group_iter = payload_group.stream().into_iter();
let mut variable_name_list = String::from("");
let mut is_variable_name = true;
while let Some(token) = payload_group_iter.next() {
for token in payload_group_iter {
match token {
TokenTree::Ident(ident) => {
if is_variable_name {
variable_name_list.push_str(&format!("{},", ident.to_string()));
variable_name_list.push_str(&format!("{},", ident));
}
is_variable_name = false;
}

View File

@@ -9,7 +9,7 @@ use crate::{
query::{Create, Insert, Predicate, Query, Select},
table::Value,
},
SimConnection, SimulatorEnv, SimulatorOpts,
SimConnection, SimulatorEnv,
};
use crate::generation::{frequency, Arbitrary, ArbitraryFrom};
@@ -28,11 +28,11 @@ impl Display for InteractionPlan {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for interaction in &self.plan {
match interaction {
Interaction::Query(query) => write!(f, "{};\n", query)?,
Interaction::Query(query) => writeln!(f, "{};", query)?,
Interaction::Assertion(assertion) => {
write!(f, "-- ASSERT: {};\n", assertion.message)?
writeln!(f, "-- ASSERT: {};", assertion.message)?
}
Interaction::Fault(fault) => write!(f, "-- FAULT: {};\n", fault)?,
Interaction::Fault(fault) => writeln!(f, "-- FAULT: {};", fault)?,
}
}
@@ -73,8 +73,10 @@ impl Display for Interaction {
}
}
type AssertionFunc = dyn Fn(&Vec<ResultSet>) -> bool;
pub(crate) struct Assertion {
pub(crate) func: Box<dyn Fn(&Vec<ResultSet>) -> bool>,
pub(crate) func: Box<AssertionFunc>,
pub(crate) message: String,
}
@@ -244,7 +246,7 @@ impl Interaction {
Self::Assertion(_) => {
unreachable!("unexpected: this function should only be called on queries")
}
Self::Fault(fault) => {
Interaction::Fault(_) => {
unreachable!("unexpected: this function should only be called on queries")
}
}
@@ -347,7 +349,7 @@ fn property_insert_select<R: rand::Rng>(rng: &mut R, env: &SimulatorEnv) -> Inte
Interactions(vec![insert_query, select_query, assertion])
}
fn create_table<R: rand::Rng>(rng: &mut R, env: &SimulatorEnv) -> Interactions {
fn create_table<R: rand::Rng>(rng: &mut R, _env: &SimulatorEnv) -> Interactions {
let create_query = Interaction::Query(Query::Create(Create::arbitrary(rng)));
Interactions(vec![create_query])
}
@@ -363,7 +365,7 @@ fn random_write<R: rand::Rng>(rng: &mut R, env: &SimulatorEnv) -> Interactions {
Interactions(vec![insert_query])
}
fn random_fault<R: rand::Rng>(rng: &mut R, env: &SimulatorEnv) -> Interactions {
fn random_fault<R: rand::Rng>(_rng: &mut R, _env: &SimulatorEnv) -> Interactions {
let fault = Interaction::Fault(Fault::Disconnect);
Interactions(vec![fault])
}

View File

@@ -227,7 +227,7 @@ impl ArbitraryFrom<(&str, &Value)> for Predicate {
fn arbitrary_from<R: Rng>(rng: &mut R, (column_name, value): &(&str, &Value)) -> Self {
one_of(
vec![
Box::new(|rng| Self::Eq(column_name.to_string(), (*value).clone())),
Box::new(|_| Predicate::Eq(column_name.to_string(), (*value).clone())),
Box::new(|rng| {
Self::Gt(
column_name.to_string(),

View File

@@ -1,7 +1,8 @@
#![allow(clippy::arc_with_non_send_sync, dead_code)]
use clap::Parser;
use generation::plan::{Interaction, InteractionPlan, ResultSet};
use generation::{pick_index, ArbitraryFrom};
use limbo_core::{Connection, Database, Result, StepResult, IO};
use limbo_core::{Database, Result};
use model::table::Value;
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
@@ -11,7 +12,6 @@ use runner::io::SimulatorIO;
use std::backtrace::Backtrace;
use std::io::Write;
use std::path::Path;
use std::rc::Rc;
use std::sync::Arc;
use tempfile::TempDir;
@@ -19,7 +19,6 @@ mod generation;
mod model;
mod runner;
#[allow(clippy::arc_with_non_send_sync)]
fn main() {
let _ = env_logger::try_init();
@@ -189,7 +188,7 @@ fn run_simulation(
let mut f = std::fs::File::create(plan_path).unwrap();
// todo: create a detailed plan file with all the plans. for now, we only use 1 connection, so it's safe to use the first plan.
f.write(plans[0].to_string().as_bytes()).unwrap();
f.write_all(plans[0].to_string().as_bytes()).unwrap();
log::info!("{}", plans[0].stats());
@@ -207,7 +206,7 @@ fn run_simulation(
result
}
fn execute_plans(env: &mut SimulatorEnv, plans: &mut Vec<InteractionPlan>) -> Result<()> {
fn execute_plans(env: &mut SimulatorEnv, plans: &mut [InteractionPlan]) -> Result<()> {
// todo: add history here by recording which interaction was executed at which tick
for _tick in 0..env.opts.ticks {
// Pick the connection to interact with
@@ -222,7 +221,7 @@ fn execute_plans(env: &mut SimulatorEnv, plans: &mut Vec<InteractionPlan>) -> Re
fn execute_plan(
env: &mut SimulatorEnv,
connection_index: usize,
plans: &mut Vec<InteractionPlan>,
plans: &mut [InteractionPlan],
) -> Result<()> {
let connection = &env.connections[connection_index];
let plan = &mut plans[connection_index];

View File

@@ -16,7 +16,7 @@ pub(crate) struct Table {
pub(crate) name: String,
pub(crate) columns: Vec<Column>,
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub(crate) struct Column {
pub(crate) name: String,
@@ -54,8 +54,12 @@ pub(crate) enum Value {
}
fn to_sqlite_blob(bytes: &[u8]) -> String {
let hex: String = bytes.iter().map(|b| format!("{:02X}", b)).collect();
format!("X'{}'", hex)
format!(
"X'{}'",
bytes
.iter()
.fold(String::new(), |acc, b| acc + &format!("{:02X}", b))
)
}
impl Display for Value {

View File

@@ -1,7 +1,6 @@
use std::{cell::RefCell, rc::Rc};
use limbo_core::{File, Result};
pub(crate) struct SimulatorFile {
pub(crate) inner: Rc<dyn File>,
pub(crate) fault: RefCell<bool>,

View File

@@ -1,4 +1,5 @@
pub mod cli;
pub mod env;
#[allow(dead_code)]
pub mod file;
pub mod io;

View File

@@ -67,7 +67,7 @@ pub struct sqlite3_stmt<'a> {
pub(crate) row: RefCell<Option<limbo_core::Row<'a>>>,
}
impl<'a> sqlite3_stmt<'a> {
impl sqlite3_stmt<'_> {
pub fn new(stmt: limbo_core::Statement) -> Self {
let row = RefCell::new(None);
Self { stmt, row }
@@ -998,9 +998,7 @@ pub unsafe extern "C" fn sqlite3_threadsafe() -> ffi::c_int {
#[no_mangle]
pub unsafe extern "C" fn sqlite3_libversion() -> *const std::ffi::c_char {
ffi::CStr::from_bytes_with_nul(b"3.42.0\0")
.unwrap()
.as_ptr()
c"3.42.0".as_ptr()
}
#[no_mangle]
@@ -1094,7 +1092,7 @@ pub unsafe extern "C" fn sqlite3_wal_checkpoint_v2(
}
let db: &mut sqlite3 = &mut *db;
// TODO: Checkpointing modes and reporting back log size and checkpoint count to caller.
if let Err(e) = db.conn.checkpoint() {
if db.conn.checkpoint().is_err() {
return SQLITE_ERROR;
}
SQLITE_OK