diff --git a/bindings/python/src/lib.rs b/bindings/python/src/lib.rs index ba66adbad..4f8241351 100644 --- a/bindings/python/src/lib.rs +++ b/bindings/python/src/lib.rs @@ -83,6 +83,7 @@ pub struct Cursor { // SAFETY: The limbo_core crate guarantees that `Cursor` is thread-safe. unsafe impl Send for Cursor {} +#[allow(unused_variables, clippy::arc_with_non_send_sync)] #[pymethods] impl Cursor { #[pyo3(signature = (sql, parameters=None))] @@ -229,6 +230,7 @@ impl Connection { } } +#[allow(clippy::arc_with_non_send_sync)] #[pyfunction] pub fn connect(path: &str) -> Result { let io = Arc::new(limbo_core::PlatformIO::new().map_err(|e| { diff --git a/bindings/wasm/lib.rs b/bindings/wasm/lib.rs index 0bf4fae5c..8c0fb8cd4 100644 --- a/bindings/wasm/lib.rs +++ b/bindings/wasm/lib.rs @@ -8,6 +8,7 @@ pub struct Database { _inner: limbo_core::Database, } +#[allow(clippy::arc_with_non_send_sync)] #[wasm_bindgen] impl Database { #[wasm_bindgen(constructor)] @@ -29,6 +30,7 @@ pub struct File { fd: i32, } +#[allow(dead_code)] impl File { fn new(vfs: VFS, fd: i32) -> Self { File { vfs, fd } diff --git a/cli/main.rs b/cli/main.rs index 8ce18dfc0..78d8eaf10 100644 --- a/cli/main.rs +++ b/cli/main.rs @@ -33,6 +33,7 @@ struct Opts { output_mode: OutputMode, } +#[allow(clippy::arc_with_non_send_sync)] fn main() -> anyhow::Result<()> { env_logger::init(); let opts = Opts::parse(); diff --git a/core/function.rs b/core/function.rs index 6a5a0036c..8ce7a3a11 100644 --- a/core/function.rs +++ b/core/function.rs @@ -1,13 +1,20 @@ +use std::fmt; +use std::fmt::Display; + #[derive(Debug, Clone, PartialEq)] pub enum JsonFunc { - JSON, + Json, } -impl ToString for JsonFunc { - fn to_string(&self) -> String { - match self { - JsonFunc::JSON => "json".to_string(), - } +impl Display for JsonFunc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}", + match self { + JsonFunc::Json => "json".to_string(), + } + ) } } @@ -67,9 +74,9 @@ pub enum ScalarFunc { UnixEpoch, } -impl ToString for ScalarFunc { - fn to_string(&self) -> String { - match self { +impl Display for ScalarFunc { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let str = match self { ScalarFunc::Char => "char".to_string(), ScalarFunc::Coalesce => "coalesce".to_string(), ScalarFunc::Concat => "concat".to_string(), @@ -95,7 +102,8 @@ impl ToString for ScalarFunc { ScalarFunc::Unicode => "unicode".to_string(), ScalarFunc::Quote => "quote".to_string(), ScalarFunc::UnixEpoch => "unixepoch".to_string(), - } + }; + write!(f, "{}", str) } } @@ -106,12 +114,12 @@ pub enum Func { Json(JsonFunc), } -impl Func { - pub fn to_string(&self) -> String { +impl Display for Func { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Func::Agg(agg_func) => agg_func.to_string().to_string(), - Func::Scalar(scalar_func) => scalar_func.to_string(), - Func::Json(json_func) => json_func.to_string(), + Func::Agg(agg_func) => write!(f, "{}", agg_func.to_string()), + Func::Scalar(scalar_func) => write!(f, "{}", scalar_func), + Func::Json(json_func) => write!(f, "{}", json_func), } } } @@ -157,7 +165,7 @@ impl Func { "time" => Ok(Func::Scalar(ScalarFunc::Time)), "unicode" => Ok(Func::Scalar(ScalarFunc::Unicode)), "quote" => Ok(Func::Scalar(ScalarFunc::Quote)), - "json" => Ok(Func::Json(JsonFunc::JSON)), + "json" => Ok(Func::Json(JsonFunc::Json)), "unixepoch" => Ok(Func::Scalar(ScalarFunc::UnixEpoch)), _ => Err(()), } diff --git a/core/schema.rs b/core/schema.rs index f3d769297..f6d5c0434 100644 --- a/core/schema.rs +++ b/core/schema.rs @@ -403,6 +403,7 @@ pub fn sqlite_schema_table() -> BTreeTable { } } +#[allow(dead_code)] #[derive(Debug)] pub struct Index { pub name: String, @@ -412,6 +413,7 @@ pub struct Index { pub unique: bool, } +#[allow(dead_code)] #[derive(Debug, Clone)] pub struct IndexColumn { pub name: String, diff --git a/core/storage/btree.rs b/core/storage/btree.rs index f531220ee..930581c2a 100644 --- a/core/storage/btree.rs +++ b/core/storage/btree.rs @@ -638,7 +638,7 @@ impl BTreeCursor { _ => unreachable!("Parent should always be a "), }; if found { - let (start, len) = parent.cell_get_raw_region( + let (start, _len) = parent.cell_get_raw_region( cell_idx, self.max_local(page_type.clone()), self.min_local(page_type.clone()), @@ -678,11 +678,11 @@ impl BTreeCursor { cells_per_page }; - let mut i = 0; - for cell_idx in current_cell_index..current_cell_index + cells_to_copy { + let cell_index_range = + current_cell_index..current_cell_index + cells_to_copy; + for (j, cell_idx) in cell_index_range.enumerate() { let cell = scratch_cells[cell_idx]; - self.insert_into_cell(page, cell, i); - i += 1; + self.insert_into_cell(page, cell, j); } divider_cells_index.push(current_cell_index + cells_to_copy - 1); current_cell_index += cells_to_copy; @@ -999,6 +999,7 @@ impl BTreeCursor { // Free blocks can be zero, meaning the "real free space" that can be used to allocate is expected to be between first cell byte // and end of cell pointer area. + #[allow(unused_assignments)] fn compute_free_space(&self, page: &PageContent, db_header: Ref) -> u16 { let buf = page.as_ptr(); @@ -1019,13 +1020,13 @@ impl BTreeCursor { let mut pc = free_block_pointer as usize; if pc > 0 { - let mut next = 0; - let mut size = 0; if pc < first_byte_in_cell_content as usize { // corrupt todo!("corrupted page"); } + let mut next = 0; + let mut size = 0; loop { // TODO: check corruption icellast next = u16::from_be_bytes(buf[pc..pc + 2].try_into().unwrap()) as usize; diff --git a/core/storage/pager.rs b/core/storage/pager.rs index f1882c2bb..2a463514f 100644 --- a/core/storage/pager.rs +++ b/core/storage/pager.rs @@ -92,6 +92,7 @@ impl Page { } } +#[allow(dead_code)] struct PageCacheEntry { key: usize, page: Rc>, @@ -100,7 +101,7 @@ struct PageCacheEntry { } impl PageCacheEntry { - fn into_non_null(&mut self) -> NonNull { + fn as_non_null(&mut self) -> NonNull { NonNull::new(&mut *self).unwrap() } } @@ -175,7 +176,7 @@ impl DumbLruPageCache { } fn detach(&mut self, entry: &mut PageCacheEntry) { - let mut current = entry.into_non_null(); + let mut current = entry.as_non_null(); let (next, prev) = unsafe { let c = current.as_mut(); @@ -203,7 +204,7 @@ impl DumbLruPageCache { } fn touch(&mut self, entry: &mut PageCacheEntry) { - let mut current = entry.into_non_null(); + let mut current = entry.as_non_null(); unsafe { let c = current.as_mut(); c.next = *self.head.borrow(); @@ -231,10 +232,12 @@ impl DumbLruPageCache { } } +#[allow(dead_code)] pub struct PageCache { cache: SieveCache, } +#[allow(dead_code)] impl PageCache { pub fn new(cache: SieveCache) -> Self { Self { cache } @@ -372,6 +375,7 @@ impl Pager { Get's a new page that increasing the size of the page or uses a free page. Currently free list pages are not yet supported. */ + #[allow(clippy::readonly_write_lock)] pub fn allocate_page(&self) -> Result>> { let header = &self.db_header; let mut header = RefCell::borrow_mut(header); diff --git a/core/storage/sqlite3_ondisk.rs b/core/storage/sqlite3_ondisk.rs index 4f4cd2f1a..58d204f46 100644 --- a/core/storage/sqlite3_ondisk.rs +++ b/core/storage/sqlite3_ondisk.rs @@ -99,6 +99,7 @@ pub struct WalHeader { checksum_2: u32, } +#[allow(dead_code)] #[derive(Debug, Default)] pub struct WalFrameHeader { page_number: u32, @@ -288,6 +289,7 @@ impl PageContent { self.read_u8(self.offset).try_into().unwrap() } + #[allow(clippy::mut_from_ref)] pub fn as_ptr(&self) -> &mut [u8] { unsafe { // unsafe trick to borrow twice @@ -552,6 +554,7 @@ pub fn begin_write_btree_page(pager: &Pager, page: &Rc>) -> Result Ok(()) } +#[allow(clippy::enum_variant_names)] #[derive(Debug, Clone)] pub enum BTreeCell { TableInteriorCell(TableInteriorCell), @@ -667,6 +670,7 @@ pub fn read_btree_cell( /// read_payload takes in the unread bytearray with the payload size /// and returns the payload on the page, and optionally the first overflow page number. +#[allow(clippy::readonly_write_lock)] fn read_payload(unread: &[u8], payload_size: usize, pager: Rc) -> (Vec, Option) { let cell_len = unread.len(); if payload_size <= cell_len { @@ -924,8 +928,7 @@ pub fn write_varint(buf: &mut [u8], value: u64) -> usize { } pub fn write_varint_to_vec(value: u64, payload: &mut Vec) { - let mut varint: Vec = Vec::new(); - varint.extend(std::iter::repeat(0).take(9)); + let mut varint: Vec = vec![0; 9]; let n = write_varint(&mut varint.as_mut_slice()[0..9], value); write_varint(&mut varint, value); varint.truncate(n); @@ -961,8 +964,9 @@ fn finish_read_wal_header(buf: Rc>, header: Rc, + io: &dyn File, offset: usize, ) -> Result>> { let drop_fn = Rc::new(|_buf| {}); @@ -978,6 +982,7 @@ pub fn begin_read_wal_frame_header( Ok(result) } +#[allow(dead_code)] fn finish_read_wal_frame_header( buf: Rc>, frame: Rc>, diff --git a/core/translate/emitter.rs b/core/translate/emitter.rs index 378e1b7c3..82f42a2e1 100644 --- a/core/translate/emitter.rs +++ b/core/translate/emitter.rs @@ -1,7 +1,6 @@ use std::cell::RefCell; use std::collections::HashMap; use std::rc::Rc; -use std::usize; use sqlite3_parser::ast; @@ -25,7 +24,7 @@ use super::plan::{Operator, ProjectionColumn}; * The Emitter trait is used to emit bytecode instructions for a given operator in the query plan. * * - step: perform a single step of the operator, emitting bytecode instructions as needed, - and returning a result indicating whether the operator is ready to emit a result row + and returning a result indicating whether the operator is ready to emit a result row */ pub trait Emitter { fn step( @@ -143,9 +142,9 @@ pub struct Metadata { /// - Continue: the operator is not yet ready to emit a result row /// - ReadyToEmit: the operator is ready to emit a result row /// - Done: the operator has completed execution -/// For example, a Scan operator will return Continue until it has opened a cursor, rewound it and applied any predicates. -/// At that point, it will return ReadyToEmit. -/// Finally, when the Scan operator has emitted a Next instruction, it will return Done. +/// For example, a Scan operator will return Continue until it has opened a cursor, rewound it and applied any predicates. +/// At that point, it will return ReadyToEmit. +/// Finally, when the Scan operator has emitted a Next instruction, it will return Done. /// /// Parent operators are free to make decisions based on the result a child operator's step() method. /// @@ -614,6 +613,7 @@ impl Emitter for Operator { return Ok(OpStepResult::Continue); } + #[allow(clippy::never_loop)] GROUP_BY_SORT_AND_COMPARE => { loop { match source.step(program, m, referenced_tables)? { @@ -1065,6 +1065,7 @@ impl Emitter for Operator { Ok(OpStepResult::Continue) } + #[allow(clippy::never_loop)] ORDER_SORT_AND_OPEN_LOOP => { loop { match source.step(program, m, referenced_tables)? { diff --git a/core/translate/expr.rs b/core/translate/expr.rs index 54695f5a6..3899de85d 100644 --- a/core/translate/expr.rs +++ b/core/translate/expr.rs @@ -451,10 +451,7 @@ pub fn translate_condition_expr( escape: _, } => { let cur_reg = program.alloc_register(); - assert!(match rhs.as_ref() { - ast::Expr::Literal(_) => true, - _ => false, - }); + assert!(matches!(rhs.as_ref(), ast::Expr::Literal(_))); match op { ast::LikeOperator::Like => { let pattern_reg = program.alloc_register(); @@ -732,7 +729,7 @@ pub fn translate_expr( crate::bail_parse_error!("aggregation function in non-aggregation context") } Func::Json(j) => match j { - JsonFunc::JSON => { + JsonFunc::Json => { let args = if let Some(args) = args { if args.len() != 1 { crate::bail_parse_error!( @@ -1115,10 +1112,11 @@ pub fn translate_expr( } ScalarFunc::UnixEpoch => { let mut start_reg = 0; - if let Some(args) = args { - if args.len() > 1 { + match args { + Some(args) if args.len() > 1 => { crate::bail_parse_error!("epoch function with > 1 arguments. Modifiers are not yet supported."); - } else if args.len() == 1 { + } + Some(args) if args.len() == 1 => { let arg_reg = program.alloc_register(); let _ = translate_expr( program, @@ -1130,6 +1128,7 @@ pub fn translate_expr( )?; start_reg = arg_reg; } + _ => {} } program.emit_insn(Insn::Function { constant_mask: 0, @@ -1446,8 +1445,8 @@ fn wrap_eval_jump_expr( pub fn resolve_ident_qualified( program: &ProgramBuilder, - table_name: &String, - ident: &String, + table_name: &str, + ident: &str, referenced_tables: &[(Rc, String)], cursor_hint: Option, ) -> Result<(usize, Type, usize, bool)> { @@ -1494,7 +1493,7 @@ pub fn resolve_ident_qualified( pub fn resolve_ident_table( program: &ProgramBuilder, - ident: &String, + ident: &str, referenced_tables: Option<&[(Rc, String)]>, cursor_hint: Option, ) -> Result<(usize, Type, usize, bool)> { diff --git a/core/translate/insert.rs b/core/translate/insert.rs index 7e04f4901..d79445042 100644 --- a/core/translate/insert.rs +++ b/core/translate/insert.rs @@ -12,6 +12,7 @@ use crate::{ vdbe::{builder::ProgramBuilder, Insn, Program}, }; +#[allow(clippy::too_many_arguments)] pub fn translate_insert( schema: &Schema, with: &Option, diff --git a/core/translate/optimizer.rs b/core/translate/optimizer.rs index fe15ecb42..54164ac60 100644 --- a/core/translate/optimizer.rs +++ b/core/translate/optimizer.rs @@ -668,19 +668,17 @@ fn find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_o Operator::Join { .. } => 0, Operator::Order { .. } => 0, Operator::Projection { expressions, .. } => { - let mut idx = 0; let mut mask = 0; - for e in expressions.iter() { + for (idx, e) in expressions.iter().enumerate() { match e { - super::plan::ProjectionColumn::Column(c) => { + ProjectionColumn::Column(c) => { if c == expr { mask |= 1 << idx; } } - super::plan::ProjectionColumn::Star => {} - super::plan::ProjectionColumn::TableStar(_, _) => {} + ProjectionColumn::Star => {} + ProjectionColumn::TableStar(_, _) => {} } - idx += 1; } mask @@ -696,7 +694,7 @@ fn find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_o match expr { ast::Expr::Between { lhs, - not, + not: _, start, end, } => { @@ -706,7 +704,7 @@ fn find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_o mask |= find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially(end, operator); mask } - ast::Expr::Binary(lhs, op, rhs) => { + ast::Expr::Binary(lhs, _op, rhs) => { let mut mask = 0; mask |= find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially(lhs, operator); mask |= find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially(rhs, operator); @@ -730,24 +728,24 @@ fn find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_o } mask } - ast::Expr::Cast { expr, type_name } => { + ast::Expr::Cast { expr, type_name: _ } => { find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially( expr, operator, ) } - ast::Expr::Collate(expr, collation) => { + ast::Expr::Collate(expr, _collation) => { find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially( expr, operator, ) } - ast::Expr::DoublyQualified(schema, tbl, ident) => 0, + ast::Expr::DoublyQualified(_schema, _tbl, _ident) => 0, ast::Expr::Exists(_) => 0, ast::Expr::FunctionCall { - name, - distinctness, + name: _, + distinctness: _, args, - order_by, - filter_over, + order_by: _, + filter_over: _, } => { let mut mask = 0; if let Some(args) = args { @@ -757,9 +755,12 @@ fn find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_o } mask } - ast::Expr::FunctionCallStar { name, filter_over } => 0, + ast::Expr::FunctionCallStar { + name: _, + filter_over: _, + } => 0, ast::Expr::Id(_) => 0, - ast::Expr::InList { lhs, not, rhs } => { + ast::Expr::InList { lhs, not: _, rhs } => { let mut mask = 0; mask |= find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially(lhs, operator); if let Some(rhs) = rhs { @@ -769,16 +770,20 @@ fn find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_o } mask } - ast::Expr::InSelect { lhs, not, rhs } => { + ast::Expr::InSelect { + lhs, + not: _, + rhs: _, + } => { find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially( lhs, operator, ) } ast::Expr::InTable { - lhs, - not, - rhs, - args, + lhs: _, + not: _, + rhs: _, + args: _, } => 0, ast::Expr::IsNull(expr) => { find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially( @@ -787,10 +792,10 @@ fn find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_o } ast::Expr::Like { lhs, - not, - op, + not: _, + op: _, rhs, - escape, + escape: _, } => { let mut mask = 0; mask |= find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially(lhs, operator); @@ -814,7 +819,7 @@ fn find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_o ast::Expr::Qualified(_, _) => 0, ast::Expr::Raise(_, _) => 0, ast::Expr::Subquery(_) => 0, - ast::Expr::Unary(op, expr) => { + ast::Expr::Unary(_op, expr) => { find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially( expr, operator, ) @@ -848,9 +853,7 @@ fn find_shared_expressions_in_child_operators_and_mark_them_so_that_the_parent_o } Operator::Join { .. } => {} Operator::Order { source, key, .. } => { - let mut idx = 0; - - for (expr, _) in key.iter() { + for (idx, (expr, _)) in key.iter().enumerate() { let result = find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially(expr, source); if result != 0 { expr_result_cache.set_precomputation_key( @@ -860,13 +863,11 @@ fn find_shared_expressions_in_child_operators_and_mark_them_so_that_the_parent_o result, ); } - idx += 1; } find_shared_expressions_in_child_operators_and_mark_them_so_that_the_parent_operator_doesnt_recompute_them(source, expr_result_cache) } Operator::Projection { source, expressions, .. } => { - let mut idx = 0; - for expr in expressions.iter() { + for (idx, expr) in expressions.iter().enumerate() { if let ProjectionColumn::Column(expr) = expr { let result = find_indexes_of_all_result_columns_in_operator_that_match_expr_either_fully_or_partially(expr, source); if result != 0 { @@ -878,7 +879,6 @@ fn find_shared_expressions_in_child_operators_and_mark_them_so_that_the_parent_o ); } } - idx += 1; } find_shared_expressions_in_child_operators_and_mark_them_so_that_the_parent_operator_doesnt_recompute_them(source, expr_result_cache) } diff --git a/core/translate/planner.rs b/core/translate/planner.rs index 8cd27324b..cc5151c13 100644 --- a/core/translate/planner.rs +++ b/core/translate/planner.rs @@ -65,6 +65,7 @@ fn resolve_aggregates(expr: &ast::Expr, aggs: &mut Vec) { } } +#[allow(clippy::extra_unused_lifetimes)] pub fn prepare_select_plan<'a>(schema: &Schema, select: ast::Select) -> Result { match select.body.select { ast::OneSelect::Select { @@ -112,7 +113,7 @@ pub fn prepare_select_plan<'a>(schema: &Schema, select: ast::Select) -> Result

(schema: &Schema, select: ast::Select) -> Result

{ let args_count = if let Some(args) = &args { args.len() @@ -155,7 +156,10 @@ pub fn prepare_select_plan<'a>(schema: &Schema, select: ast::Select) -> Result

{} } } - ast::Expr::FunctionCallStar { name, filter_over } => { + ast::Expr::FunctionCallStar { + name, + filter_over: _, + } => { if let Ok(Func::Agg(f)) = Func::resolve_function( normalize_ident(name.0.as_str()).as_str(), 0, @@ -176,7 +180,7 @@ pub fn prepare_select_plan<'a>(schema: &Schema, select: ast::Select) -> Result

(schema: &Schema, select: ast::Select) -> Result

, diff --git a/core/types.rs b/core/types.rs index 415457b76..bec50fdb6 100644 --- a/core/types.rs +++ b/core/types.rs @@ -84,7 +84,8 @@ impl AggContext { } } -impl std::cmp::PartialOrd for OwnedValue { +#[allow(clippy::non_canonical_partial_ord_impl)] +impl PartialOrd for OwnedValue { fn partial_cmp(&self, other: &Self) -> Option { match (self, other) { (OwnedValue::Integer(int_left), OwnedValue::Integer(int_right)) => { diff --git a/core/vdbe/builder.rs b/core/vdbe/builder.rs index 681d06119..4a0161e07 100644 --- a/core/vdbe/builder.rs +++ b/core/vdbe/builder.rs @@ -4,6 +4,7 @@ use crate::storage::sqlite3_ondisk::DatabaseHeader; use super::{BranchOffset, CursorID, Insn, InsnReference, Program, Table}; +#[allow(dead_code)] pub struct ProgramBuilder { next_free_register: usize, next_free_label: BranchOffset, diff --git a/core/vdbe/datetime.rs b/core/vdbe/datetime.rs index 4fc2d2d5b..86ad57677 100644 --- a/core/vdbe/datetime.rs +++ b/core/vdbe/datetime.rs @@ -67,8 +67,8 @@ fn apply_modifier(dt: &mut NaiveDateTime, modifier: &str) -> Result<()> { Modifier::Hours(hours) => *dt += TimeDelta::hours(hours), Modifier::Minutes(minutes) => *dt += TimeDelta::minutes(minutes), Modifier::Seconds(seconds) => *dt += TimeDelta::seconds(seconds), - Modifier::Months(months) => todo!(), - Modifier::Years(years) => todo!(), + Modifier::Months(_months) => todo!(), + Modifier::Years(_years) => todo!(), Modifier::TimeOffset(offset) => *dt += offset, Modifier::DateOffset { years, @@ -80,7 +80,7 @@ fn apply_modifier(dt: &mut NaiveDateTime, modifier: &str) -> Result<()> { .ok_or_else(|| InvalidModifier("Invalid date offset".to_string()))?; *dt += TimeDelta::days(days as i64); } - Modifier::DateTimeOffset { date, time } => todo!(), + Modifier::DateTimeOffset { date: _, time: _ } => todo!(), Modifier::Ceiling => todo!(), Modifier::Floor => todo!(), Modifier::StartOfMonth => todo!(), @@ -93,12 +93,12 @@ fn apply_modifier(dt: &mut NaiveDateTime, modifier: &str) -> Result<()> { Modifier::StartOfDay => { *dt = dt.date().and_hms_opt(0, 0, 0).unwrap(); } - Modifier::Weekday(day) => todo!(), + Modifier::Weekday(_day) => todo!(), Modifier::UnixEpoch => todo!(), Modifier::JulianDay => todo!(), Modifier::Auto => todo!(), Modifier::Localtime => { - let utc_dt = DateTime::::from_utc(*dt, Utc); + let utc_dt = DateTime::::from_naive_utc_and_offset(*dt, Utc); *dt = utc_dt.with_timezone(&chrono::Local).naive_local(); } Modifier::Utc => { @@ -249,12 +249,13 @@ fn get_time_from_naive_datetime(value: NaiveDateTime) -> String { fn get_max_datetime_exclusive() -> NaiveDateTime { // The maximum date in SQLite is 9999-12-31 NaiveDateTime::new( - NaiveDate::from_ymd_opt(10000, 01, 01).unwrap(), + NaiveDate::from_ymd_opt(10000, 1, 1).unwrap(), NaiveTime::from_hms_milli_opt(00, 00, 00, 000).unwrap(), ) } /// Modifier doc https://www.sqlite.org/lang_datefunc.html#modifiers +#[allow(dead_code)] #[derive(Debug, PartialEq)] enum Modifier { Days(i64), @@ -340,23 +341,17 @@ fn parse_modifier(modifier: &str) -> Result { 1 => { // first part can be either date ±YYYY-MM-DD or 3 types of time modifiers let date = parse_modifier_date(parts[0]); - if date.is_ok() { - Ok(Modifier::DateTimeOffset { - date: date.unwrap(), - time: None, - }) + if let Ok(date) = date { + Ok(Modifier::DateTimeOffset { date, time: None }) } else { // try to parse time if error parsing date let time = parse_modifier_time(parts[0])?; // TODO handle nanoseconds - let time_delta; - if s.starts_with('-') { - time_delta = - TimeDelta::seconds(-(time.num_seconds_from_midnight() as i64)); + let time_delta = if s.starts_with('-') { + TimeDelta::seconds(-(time.num_seconds_from_midnight() as i64)) } else { - time_delta = - TimeDelta::seconds(time.num_seconds_from_midnight() as i64); - } + TimeDelta::seconds(time.num_seconds_from_midnight() as i64) + }; Ok(Modifier::TimeOffset(time_delta)) } } diff --git a/core/vdbe/mod.rs b/core/vdbe/mod.rs index be3856fdc..a6e5a4dff 100644 --- a/core/vdbe/mod.rs +++ b/core/vdbe/mod.rs @@ -41,6 +41,7 @@ use regex::Regex; use std::borrow::BorrowMut; use std::cell::RefCell; use std::collections::{BTreeMap, HashMap}; +use std::fmt::Display; use std::rc::Rc; pub type BranchOffset = i64; @@ -49,18 +50,20 @@ pub type CursorID = usize; pub type PageIdx = usize; +#[allow(dead_code)] #[derive(Debug)] pub enum Func { Scalar(ScalarFunc), Json(JsonFunc), } -impl ToString for Func { - fn to_string(&self) -> String { - match self { +impl Display for Func { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let str = match self { Func::Scalar(scalar_func) => scalar_func.to_string(), Func::Json(json_func) => json_func.to_string(), - } + }; + write!(f, "{}", str) } } @@ -739,7 +742,7 @@ impl Program { state.pc = target_pc; } _ => { - if &state.registers[lhs] == &state.registers[rhs] { + if state.registers[lhs] == state.registers[rhs] { state.pc = target_pc; } else { state.pc += 1; @@ -761,7 +764,7 @@ impl Program { state.pc = target_pc; } _ => { - if &state.registers[lhs] != &state.registers[rhs] { + if state.registers[lhs] != state.registers[rhs] { state.pc = target_pc; } else { state.pc += 1; @@ -783,7 +786,7 @@ impl Program { state.pc = target_pc; } _ => { - if &state.registers[lhs] < &state.registers[rhs] { + if state.registers[lhs] < state.registers[rhs] { state.pc = target_pc; } else { state.pc += 1; @@ -805,7 +808,7 @@ impl Program { state.pc = target_pc; } _ => { - if &state.registers[lhs] <= &state.registers[rhs] { + if state.registers[lhs] <= state.registers[rhs] { state.pc = target_pc; } else { state.pc += 1; @@ -827,7 +830,7 @@ impl Program { state.pc = target_pc; } _ => { - if &state.registers[lhs] > &state.registers[rhs] { + if state.registers[lhs] > state.registers[rhs] { state.pc = target_pc; } else { state.pc += 1; @@ -849,7 +852,7 @@ impl Program { state.pc = target_pc; } _ => { - if &state.registers[lhs] >= &state.registers[rhs] { + if state.registers[lhs] >= state.registers[rhs] { state.pc = target_pc; } else { state.pc += 1; @@ -1413,7 +1416,7 @@ impl Program { } => { let arg_count = func.arg_count; match &func.func { - crate::function::Func::Json(JsonFunc::JSON) => { + crate::function::Func::Json(JsonFunc::Json) => { let json_value = &state.registers[*start_reg]; let json_str = get_json(json_value); match json_str { @@ -1714,8 +1717,8 @@ impl Program { fn get_new_rowid(cursor: &mut Box, mut rng: R) -> Result> { cursor.seek_to_last()?; let mut rowid = cursor.rowid()?.unwrap_or(0) + 1; - if rowid > std::i64::MAX.try_into().unwrap() { - let distribution = Uniform::from(1..=std::i64::MAX); + if rowid > i64::MAX.try_into().unwrap() { + let distribution = Uniform::from(1..=i64::MAX); let max_attempts = 100; for count in 0..max_attempts { rowid = distribution.sample(&mut rng).try_into().unwrap(); diff --git a/simulator/main.rs b/simulator/main.rs index 6e623b890..25760af53 100644 --- a/simulator/main.rs +++ b/simulator/main.rs @@ -5,6 +5,7 @@ use std::cell::RefCell; use std::rc::Rc; use std::sync::Arc; +#[allow(clippy::arc_with_non_send_sync)] fn main() { let seed = match std::env::var("SEED") { Ok(seed) => seed.parse::().unwrap(), diff --git a/sqlite3/src/lib.rs b/sqlite3/src/lib.rs index c0495e2c1..e79dc1f09 100644 --- a/sqlite3/src/lib.rs +++ b/sqlite3/src/lib.rs @@ -83,6 +83,7 @@ pub unsafe extern "C" fn sqlite3_shutdown() -> ffi::c_int { } #[no_mangle] +#[allow(clippy::arc_with_non_send_sync)] pub unsafe extern "C" fn sqlite3_open( filename: *const ffi::c_char, db_out: *mut *mut sqlite3, @@ -883,7 +884,7 @@ fn sqlite3_errstr_impl(rc: i32) -> *const std::ffi::c_char { "datatype mismatch", // SQLITE_MISMATCH "bad parameter or other API misuse", // SQLITE_MISUSE #[cfg(feature = "lfs")] - "", // SQLITE_NOLFS + "", // SQLITE_NOLFS #[cfg(not(feature = "lfs"))] "large file support is disabled", // SQLITE_NOLFS "authorization denied", // SQLITE_AUTH diff --git a/test/src/lib.rs b/test/src/lib.rs index c36f6ce7c..46ad3e879 100644 --- a/test/src/lib.rs +++ b/test/src/lib.rs @@ -4,11 +4,13 @@ use std::fs; use std::path::PathBuf; use std::sync::Arc; +#[allow(dead_code)] struct TempDatabase { pub path: PathBuf, pub io: Arc, } +#[allow(dead_code, clippy::arc_with_non_send_sync)] impl TempDatabase { pub fn new(table_sql: &str) -> Self { let mut path = env::current_dir().unwrap(); @@ -174,7 +176,7 @@ mod tests { let mut huge_texts = Vec::new(); for i in 0..iterations { let mut huge_text = String::new(); - for j in 0..8192 { + for _j in 0..8192 { huge_text.push((b'A' + i as u8) as char); } huge_texts.push(huge_text);