Working version of the incremental column and "optimal" record

compare functions. Now we optimize them
This commit is contained in:
Krishna Vishal
2025-06-27 22:13:40 +05:30
parent 692f0413eb
commit ef147181c2
4 changed files with 688 additions and 468 deletions

View File

@@ -14,8 +14,8 @@ use crate::{
translate::{collate::CollationSeq, plan::IterationDirection},
turso_assert,
types::{
compare_records_generic, find_compare, IndexKeyInfo, IndexKeySortOrder, ParseRecordState,
RecordCompare, RecordCursor,
compare_records_generic, find_compare, get_tie_breaker_from_seek_op, IndexKeyInfo,
IndexKeySortOrder, ParseRecordState, RecordCompare, RecordCursor,
},
MvCursor,
};
@@ -803,6 +803,7 @@ impl BTreeCursor {
std::mem::swap(payload, &mut payload_swap);
let mut reuse_immutable = self.get_immutable_record_or_create();
reuse_immutable.as_mut().unwrap().invalidate();
reuse_immutable
.as_mut()
@@ -1456,7 +1457,7 @@ impl BTreeCursor {
let record = record.as_ref().unwrap();
let key_values = key.get_values();
let tie_breaker = get_tie_breaker_from_seek_op(seek_op);
// OLD WAY - compare_immutable
let old_cmp = {
let record_values = record.get_values();
@@ -1470,30 +1471,37 @@ impl BTreeCursor {
};
// NEW WAY - optimized comparison
let new_cmp =
match record_comparer.compare(record, &key_values, index_info, &self.collations, 0) {
Ok(ordering) => ordering,
Err(e) => {
println!("Optimized record comparison failed: {:?}", e);
// For debugging, let's also try generic comparison
match compare_records_generic(
record,
&key_values,
index_info,
&self.collations,
0,
) {
Ok(ordering) => {
println!("Generic comparison succeeded where optimized failed");
ordering
}
Err(fallback_err) => {
println!("Generic comparison also failed: {:?}", fallback_err);
old_cmp // Use old comparison as final fallback
}
let new_cmp = match record_comparer.compare(
record,
&key_values,
index_info,
&self.collations,
0,
tie_breaker,
) {
Ok(ordering) => ordering,
Err(e) => {
println!("Optimized record comparison failed: {:?}", e);
// For debugging, let's also try generic comparison
match compare_records_generic(
record,
&key_values,
index_info,
&self.collations,
0,
tie_breaker,
) {
Ok(ordering) => {
println!("Generic comparison succeeded where optimized failed");
ordering
}
Err(fallback_err) => {
println!("Generic comparison also failed: {:?}", fallback_err);
old_cmp // Use old comparison as final fallback
}
}
};
}
};
// COMPARE RESULTS
if old_cmp != new_cmp {
@@ -1828,25 +1836,21 @@ impl BTreeCursor {
let iter_dir = cmp.iteration_direction();
let key_values = index_key.get_values();
let index_info = &self.index_key_info.unwrap();
let index_info_default = IndexKeyInfo::default();
let index_info = self
.index_key_info
.as_ref()
.unwrap_or(&index_info_default)
.clone();
let record_comparer = find_compare(&key_values, &index_info, &self.collations);
println!("=== INDEXBTREE_MOVE_TO START ===");
println!("Searching for: {:?}", key_values);
println!("record_comparer: {:?}", record_comparer);
tracing::debug!("Using record comparison strategy: {:?}", record_comparer);
let tie_breaker = get_tie_breaker_from_seek_op(cmp);
println!("tie_breaker = {:?}", tie_breaker);
'outer: loop {
let page = self.stack.top();
return_if_locked_maybe_load!(self.pager, page);
let page = page.get();
let contents = page.get().contents.as_ref().unwrap();
println!(
"Current page: {}, is_leaf: {}",
page.get().id,
contents.is_leaf()
);
if contents.is_leaf() {
let eq_seen = match &self.seek_state {
CursorSeekState::MovingBetweenPages { eq_seen } => eq_seen.get(),
@@ -1855,15 +1859,9 @@ impl BTreeCursor {
self.seek_state = CursorSeekState::FoundLeaf {
eq_seen: Cell::new(eq_seen),
};
println!("=== REACHED LEAF PAGE ===");
return Ok(CursorResult::Ok(()));
}
println!(
"Processing interior page with {} cells",
contents.cell_count()
);
if matches!(
self.seek_state,
CursorSeekState::Start | CursorSeekState::MovingBetweenPages { .. }
@@ -1901,16 +1899,11 @@ impl BTreeCursor {
loop {
let min = min_cell_idx.get();
let max = max_cell_idx.get();
println!("Binary search: min={}, max={}", min, max);
if min > max {
println!("Binary search complete");
let Some(leftmost_matching_cell) = nearest_matching_cell.get() else {
println!("No matching cell found, going to rightmost child");
self.stack.set_cell_index(contents.cell_count() as i32 + 1);
match contents.rightmost_pointer() {
Some(right_most_pointer) => {
println!("Going to rightmost child page: {}", right_most_pointer);
let mem_page = self.read_page(right_most_pointer as usize)?;
self.stack.push(mem_page);
self.seek_state = CursorSeekState::MovingBetweenPages {
@@ -1928,7 +1921,12 @@ impl BTreeCursor {
let matching_cell =
contents.cell_get(leftmost_matching_cell, self.usable_space())?;
self.stack.set_cell_index(leftmost_matching_cell as i32);
// we don't advance in case of forward iteration and index tree internal nodes because we will visit this node going up.
// in backwards iteration, we must retreat because otherwise we would unnecessarily visit this node again.
// Example:
// this parent: key 666, and we found the target key in the left child.
// left child has: key 663, key 664, key 665
// we need to move to the previous parent (with e.g. key 662) when iterating backwards so that we don't end up back here again.
if iter_dir == IterationDirection::Backwards {
self.stack.retreat();
}
@@ -1939,7 +1937,6 @@ impl BTreeCursor {
unreachable!("unexpected cell type: {:?}", matching_cell);
};
println!("Going to left child page: {}", left_child_page);
let mem_page = self.read_page(*left_child_page as usize)?;
self.stack.push(mem_page);
self.seek_state = CursorSeekState::MovingBetweenPages {
@@ -1948,8 +1945,7 @@ impl BTreeCursor {
continue 'outer;
}
let cur_cell_idx = (min + max) >> 1;
println!("Examining cell at index: {}", cur_cell_idx);
let cur_cell_idx = (min + max) >> 1; // rustc generates extra insns for (min+max)/2 due to them being isize. we know min&max are >=0 here.
self.stack.set_cell_index(cur_cell_idx as i32);
let cell = contents.cell_get(cur_cell_idx as usize, self.usable_space())?;
let BTreeCell::IndexInteriorCell(IndexInteriorCell {
@@ -1965,19 +1961,22 @@ impl BTreeCursor {
if let Some(next_page) = first_overflow_page {
return_if_io!(self.process_overflow_read(payload, *next_page, *payload_size))
} else {
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
.invalidate();
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
.start_serialization(payload);
self.record_cursor.borrow_mut().invalidate();
};
let (target_leaf_page_is_in_left_subtree, is_eq) = {
let record = self.get_immutable_record();
let record = record.as_ref().unwrap();
// USE OLD COMPARISON ONLY (temporarily disable optimization)
let interior_cell_vs_index_key = {
// DEBUG COMPARISON - Compare old vs new method
let old_cmp = {
let record_values = record.get_values();
let record_slice_equal_number_of_cols = &record_values[..key_values.len()];
compare_immutable(
@@ -1988,26 +1987,100 @@ impl BTreeCursor {
)
};
println!(
"Interior cell comparison: key={:?} vs record={:?} => {:?}",
key_values,
record.get_values(),
interior_cell_vs_index_key
);
let target_is_left = match cmp {
SeekOp::GT => interior_cell_vs_index_key.is_gt(),
SeekOp::GE { .. } => interior_cell_vs_index_key.is_ge(),
SeekOp::LE { .. } => interior_cell_vs_index_key.is_gt(),
SeekOp::LT => interior_cell_vs_index_key.is_ge(),
let new_cmp = match record_comparer.compare(
record,
&key_values,
&index_info,
&self.collations,
0,
tie_breaker,
) {
Ok(ordering) => ordering,
Err(e) => {
println!("Optimized record comparison failed: {:?}", e);
match compare_records_generic(
record,
&key_values,
&index_info,
&self.collations,
0,
tie_breaker,
) {
Ok(ordering) => {
println!("Generic comparison succeeded where optimized failed");
ordering
}
Err(fallback_err) => {
println!("Generic comparison also failed: {:?}", fallback_err);
old_cmp // Use old comparison as final fallback
}
}
}
};
println!(
"Navigation decision: seek_op={:?}, cell_vs_key={:?}, go_left={}",
cmp, interior_cell_vs_index_key, target_is_left
);
println!("old_cmp: {:?}, new_cmp: {:?}", old_cmp, new_cmp);
(target_is_left, interior_cell_vs_index_key.is_eq())
// Check for mismatches
if old_cmp != new_cmp {
println!(
"COMPARISON MISMATCH in indexbtree_move_to! Old: {:?}, New: {:?}, Strategy: {:?}",
old_cmp, new_cmp, record_comparer);
let record_values = record.get_values();
println!(
"Record values: {:?}, Key values: {:?}, Index info: {:?}",
record_values, key_values, index_info
);
// Use old comparison for correctness while debugging
let interior_cell_vs_index_key = old_cmp;
} else {
// Use new comparison if they match
let interior_cell_vs_index_key = new_cmp;
}
let interior_cell_vs_index_key =
if old_cmp != new_cmp { old_cmp } else { new_cmp };
// in sqlite btrees left child pages have <= keys.
// in general, in forwards iteration we want to find the first key that matches the seek condition.
// in backwards iteration we want to find the last key that matches the seek condition.
//
// Logic table for determining if target leaf page is in left subtree.
// For index b-trees this is a bit more complicated since the interior cells contain payloads (the key is the payload).
// and for non-unique indexes there might be several cells with the same key.
//
// Forwards iteration (looking for first match in tree):
// OP | Current Cell vs Seek Key | Action? | Explanation
// GT | > | go left | First > key could be exactly this one, or in left subtree
// GT | = or < | go right | First > key must be in right subtree
// GE | > | go left | First >= key could be exactly this one, or in left subtree
// GE | = | go left | First >= key could be exactly this one, or in left subtree
// GE | < | go right | First >= key must be in right subtree
//
// Backwards iteration (looking for last match in tree):
// OP | Current Cell vs Seek Key | Action? | Explanation
// LE | > | go left | Last <= key must be in left subtree
// LE | = | go right | Last <= key is either this one, or somewhere to the right of this one. So we need to go right to make sure
// LE | < | go right | Last <= key must be in right subtree
// LT | > | go left | Last < key must be in left subtree
// LT | = | go left | Last < key must be in left subtree since we want strictly less than
// LT | < | go right | Last < key could be exactly this one, or in right subtree
//
// No iteration (point query):
// EQ | > | go left | First = key must be in left subtree
// EQ | = | go left | First = key could be exactly this one, or in left subtree
// EQ | < | go right | First = key must be in right subtree
(
match cmp {
SeekOp::GT => interior_cell_vs_index_key.is_gt(),
SeekOp::GE { .. } => interior_cell_vs_index_key.is_ge(),
SeekOp::LE { .. } => interior_cell_vs_index_key.is_gt(),
SeekOp::LT => interior_cell_vs_index_key.is_ge(),
},
interior_cell_vs_index_key.is_eq(),
)
};
if is_eq {
@@ -2015,21 +2088,259 @@ impl BTreeCursor {
}
if target_leaf_page_is_in_left_subtree {
println!(
"Going left: setting nearest_matching_cell={}, max={}",
cur_cell_idx,
cur_cell_idx - 1
);
nearest_matching_cell.set(Some(cur_cell_idx as usize));
max_cell_idx.set(cur_cell_idx - 1);
} else {
println!("Going right: setting min={}", cur_cell_idx + 1);
min_cell_idx.set(cur_cell_idx + 1);
}
}
}
}
// /// Specialized version of move_to() for index btrees.
// #[instrument(skip(self, index_key), level = Level::TRACE)]
// fn indexbtree_move_to(
// &mut self,
// index_key: &ImmutableRecord,
// cmp: SeekOp,
// ) -> Result<CursorResult<()>> {
// let iter_dir = cmp.iteration_direction();
// let key_values = index_key.get_values();
// let index_info_default = IndexKeyInfo::default();
// let index_info = self
// .index_key_info
// .as_ref()
// .unwrap_or(&index_info_default)
// .clone();
// let record_comparer = find_compare(&key_values, &index_info, &self.collations);
// println!("=== INDEXBTREE_MOVE_TO START ===");
// println!("Searching for: {:?}", key_values);
// println!("record_comparer: {:?}", record_comparer);
// tracing::debug!("Using record comparison strategy: {:?}", record_comparer);
// 'outer: loop {
// let page = self.stack.top();
// return_if_locked_maybe_load!(self.pager, page);
// let page = page.get();
// let contents = page.get().contents.as_ref().unwrap();
// println!(
// "Current page: {}, is_leaf: {}",
// page.get().id,
// contents.is_leaf()
// );
// if contents.is_leaf() {
// let eq_seen = match &self.seek_state {
// CursorSeekState::MovingBetweenPages { eq_seen } => eq_seen.get(),
// _ => false,
// };
// self.seek_state = CursorSeekState::FoundLeaf {
// eq_seen: Cell::new(eq_seen),
// };
// println!("=== REACHED LEAF PAGE ===");
// return Ok(CursorResult::Ok(()));
// }
// println!(
// "Processing interior page with {} cells",
// contents.cell_count()
// );
// if matches!(
// self.seek_state,
// CursorSeekState::Start | CursorSeekState::MovingBetweenPages { .. }
// ) {
// let eq_seen = match &self.seek_state {
// CursorSeekState::MovingBetweenPages { eq_seen } => eq_seen.get(),
// _ => false,
// };
// let cell_count = contents.cell_count();
// let min_cell_idx = Cell::new(0);
// let max_cell_idx = Cell::new(cell_count as isize - 1);
// let nearest_matching_cell = Cell::new(None);
// self.seek_state = CursorSeekState::InteriorPageBinarySearch {
// min_cell_idx,
// max_cell_idx,
// nearest_matching_cell,
// eq_seen: Cell::new(eq_seen),
// };
// }
// let CursorSeekState::InteriorPageBinarySearch {
// min_cell_idx,
// max_cell_idx,
// nearest_matching_cell,
// eq_seen,
// } = &self.seek_state
// else {
// unreachable!(
// "we must be in an interior binary search state, got {:?}",
// self.seek_state
// );
// };
// loop {
// let min = min_cell_idx.get();
// let max = max_cell_idx.get();
// println!("Binary search: min={}, max={}", min, max);
// if min > max {
// println!("Binary search complete");
// let Some(leftmost_matching_cell) = nearest_matching_cell.get() else {
// println!("No matching cell found, going to rightmost child");
// self.stack.set_cell_index(contents.cell_count() as i32 + 1);
// match contents.rightmost_pointer() {
// Some(right_most_pointer) => {
// println!("Going to rightmost child page: {}", right_most_pointer);
// let mem_page = self.read_page(right_most_pointer as usize)?;
// self.stack.push(mem_page);
// self.seek_state = CursorSeekState::MovingBetweenPages {
// eq_seen: Cell::new(eq_seen.get()),
// };
// continue 'outer;
// }
// None => {
// unreachable!(
// "we shall not go back up! The only way is down the slope"
// );
// }
// }
// };
// println!("Found matching cell at index: {}", leftmost_matching_cell);
// let matching_cell = contents.cell_get(
// leftmost_matching_cell,
// payload_overflow_threshold_max(
// contents.page_type(),
// self.usable_space() as u16,
// ),
// payload_overflow_threshold_min(
// contents.page_type(),
// self.usable_space() as u16,
// ),
// self.usable_space(),
// )?;
// self.stack.set_cell_index(leftmost_matching_cell as i32);
// if iter_dir == IterationDirection::Backwards {
// self.stack.retreat();
// }
// let BTreeCell::IndexInteriorCell(IndexInteriorCell {
// left_child_page, ..
// }) = &matching_cell
// else {
// unreachable!("unexpected cell type: {:?}", matching_cell);
// };
// println!("Going to left child page: {}", left_child_page);
// let mem_page = self.read_page(*left_child_page as usize)?;
// self.stack.push(mem_page);
// self.seek_state = CursorSeekState::MovingBetweenPages {
// eq_seen: Cell::new(eq_seen.get()),
// };
// continue 'outer;
// }
// let cur_cell_idx = (min + max) >> 1;
// println!("Examining cell at index: {}", cur_cell_idx);
// self.stack.set_cell_index(cur_cell_idx as i32);
// let cell = contents.cell_get(
// cur_cell_idx as usize,
// payload_overflow_threshold_max(
// contents.page_type(),
// self.usable_space() as u16,
// ),
// payload_overflow_threshold_min(
// contents.page_type(),
// self.usable_space() as u16,
// ),
// self.usable_space(),
// )?;
// let BTreeCell::IndexInteriorCell(IndexInteriorCell {
// payload,
// payload_size,
// first_overflow_page,
// ..
// }) = &cell
// else {
// unreachable!("unexpected cell type: {:?}", cell);
// };
// if let Some(next_page) = first_overflow_page {
// return_if_io!(self.process_overflow_read(payload, *next_page, *payload_size))
// } else {
// self.get_immutable_record_or_create()
// .as_mut()
// .unwrap()
// .invalidate();
// self.get_immutable_record_or_create()
// .as_mut()
// .unwrap()
// .start_serialization(payload);
// self.record_cursor.borrow_mut().invalidate();
// };
// let (target_leaf_page_is_in_left_subtree, is_eq) = {
// let record = self.get_immutable_record();
// let record = record.as_ref().unwrap();
// // USE OLD COMPARISON ONLY (temporarily disable optimization)
// let interior_cell_vs_index_key = {
// let record_values = record.get_values();
// let record_slice_equal_number_of_cols = &record_values[..key_values.len()];
// compare_immutable(
// record_slice_equal_number_of_cols,
// key_values.as_slice(),
// self.key_sort_order(),
// &self.collations,
// )
// };
// println!(
// "Interior cell comparison: key={:?} vs record={:?} => {:?}",
// key_values,
// record.get_values(),
// interior_cell_vs_index_key
// );
// let target_is_left = match cmp {
// SeekOp::GT => interior_cell_vs_index_key.is_gt(),
// SeekOp::GE { .. } => interior_cell_vs_index_key.is_ge(),
// SeekOp::LE { .. } => interior_cell_vs_index_key.is_gt(),
// SeekOp::LT => interior_cell_vs_index_key.is_ge(),
// };
// println!(
// "Navigation decision: seek_op={:?}, cell_vs_key={:?}, go_left={}",
// cmp, interior_cell_vs_index_key, target_is_left
// );
// (target_is_left, interior_cell_vs_index_key.is_eq())
// };
// if is_eq {
// eq_seen.set(true);
// }
// if target_leaf_page_is_in_left_subtree {
// println!(
// "Going left: setting nearest_matching_cell={}, max={}",
// cur_cell_idx,
// cur_cell_idx - 1
// );
// nearest_matching_cell.set(Some(cur_cell_idx as usize));
// max_cell_idx.set(cur_cell_idx - 1);
// } else {
// println!("Going right: setting min={}", cur_cell_idx + 1);
// min_cell_idx.set(cur_cell_idx + 1);
// }
// }
// }
// }
/// Specialized version of do_seek() for table btrees that uses binary search instead
/// of iterating cells in order.
#[instrument(skip_all, level = Level::INFO)]
@@ -2158,8 +2469,14 @@ impl BTreeCursor {
key: &ImmutableRecord,
seek_op: SeekOp,
) -> Result<CursorResult<bool>> {
println!("###we are in index btree seek###");
let key_values = key.get_values();
let index_info = &self.index_key_info.unwrap();
let index_info_default = IndexKeyInfo::default();
let index_info = self
.index_key_info
.as_ref()
.unwrap_or(&index_info_default)
.clone();
let record_comparer = find_compare(&key_values, &index_info, &self.collations);
tracing::debug!(
@@ -2250,6 +2567,10 @@ impl BTreeCursor {
if let Some(next_page) = first_overflow_page {
return_if_io!(self.process_overflow_read(payload, *next_page, *payload_size))
} else {
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
.invalidate();
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
@@ -2257,8 +2578,12 @@ impl BTreeCursor {
self.record_cursor.borrow_mut().invalidate();
};
let (_, found) =
self.compare_with_current_record(key, seek_op, &record_comparer, &index_info);
let (_, found) = self.compare_with_current_record(
key_values.as_slice(),
seek_op,
&record_comparer,
&index_info,
);
moving_up_to_parent.set(false);
return Ok(CursorResult::Ok(found));
}
@@ -2346,6 +2671,10 @@ impl BTreeCursor {
if let Some(next_page) = first_overflow_page {
return_if_io!(self.process_overflow_read(payload, *next_page, *payload_size))
} else {
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
.invalidate();
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
@@ -2353,8 +2682,12 @@ impl BTreeCursor {
self.record_cursor.borrow_mut().invalidate();
};
let (cmp, found) =
self.compare_with_current_record(key, seek_op, &record_comparer, &index_info);
let (cmp, found) = self.compare_with_current_record(
key_values.as_slice(),
seek_op,
&record_comparer,
&index_info,
);
if found {
nearest_matching_cell.set(Some(cur_cell_idx as usize));
match iter_dir {
@@ -2382,174 +2715,38 @@ impl BTreeCursor {
}
}
// fn compare_with_current_record(
// &self,
// key: &ImmutableRecord,
// seek_op: SeekOp,
// record_comparer: &RecordCompare,
// index_info: &IndexKeyInfo,
// ) -> (Ordering, bool) {
// let cmp = {
// let record = self.get_immutable_record();
// let record = record.as_ref().unwrap();
// tracing::debug!(?record);
// let key_values = key.get_values();
// record_comparer
// .compare(record, &key_values, index_info, &self.collations, 0)
// .unwrap()
// };
// let old_cmp = {
// let record = self.get_immutable_record();
// let record = record.as_ref().unwrap();
// let record_values = record.get_values();
// let key_values = key.get_values();
// let record_slice_equal_number_of_cols = &record_values[..key_values.len()];
// compare_immutable(
// record_slice_equal_number_of_cols,
// key_values.as_slice(),
// self.key_sort_order(),
// &self.collations,
// )
// };
// println!("oldcmp: {:?}, newcmp: {:?}", old_cmp, cmp);
// let found = match seek_op {
// SeekOp::GT => cmp.is_gt(),
// SeekOp::GE { eq_only: true } => old_cmp.is_eq(),
// SeekOp::GE { eq_only: false } => old_cmp.is_ge(),
// SeekOp::LE { eq_only: true } => old_cmp.is_eq(),
// SeekOp::LE { eq_only: false } => old_cmp.is_le(),
// SeekOp::LT => old_cmp.is_lt(),
// };
// (old_cmp, found)
// }
fn compare_with_current_record(
&self,
key: &ImmutableRecord,
key_unpacked: &[RefValue],
seek_op: SeekOp,
record_comparer: &RecordCompare,
index_info: &IndexKeyInfo,
) -> (Ordering, bool) {
let record = self.get_immutable_record();
let record = record.as_ref().unwrap();
tracing::debug!(?record);
let key_values = key.get_values();
let tie_breaker = get_tie_breaker_from_seek_op(seek_op);
// Debug: Print the actual values being compared
let record_values = record.get_values();
let record_slice_equal_number_of_cols = &record_values[..key_values.len()];
let cmp = record_comparer
.compare(
record,
key_unpacked,
index_info,
&self.collations,
0,
tie_breaker,
)
.unwrap();
println!("=== COMPARISON DEBUG ===");
println!("Key values: {:?}", key_values);
println!("Record values (full): {:?}", record_values);
println!(
"Record values (slice): {:?}",
record_slice_equal_number_of_cols
);
// If we're dealing with integers, show the actual integer values
if !key_values.is_empty() && !record_slice_equal_number_of_cols.is_empty() {
match (&key_values[0], &record_slice_equal_number_of_cols[0]) {
(RefValue::Integer(key_int), RefValue::Integer(record_int)) => {
println!("Integer comparison: key={}, record={}", key_int, record_int);
}
_ => {
println!(
"Non-integer comparison: key={:?}, record={:?}",
key_values[0], record_slice_equal_number_of_cols[0]
);
}
}
}
// NEW WAY - optimized comparison
let new_cmp =
match record_comparer.compare(record, &key_values, index_info, &self.collations, 0) {
Ok(ordering) => ordering,
Err(e) => {
println!("Optimized record comparison failed: {:?}", e);
// For debugging, let's also try generic comparison
match compare_records_generic(
record,
&key_values,
index_info,
&self.collations,
0,
) {
Ok(ordering) => {
println!("Generic comparison succeeded where optimized failed");
ordering
}
Err(fallback_err) => {
println!("Generic comparison also failed: {:?}", fallback_err);
// Final fallback to compare_immutable
compare_immutable(
record_slice_equal_number_of_cols,
key_values.as_slice(),
self.key_sort_order(),
&self.collations,
)
}
}
}
};
// OLD WAY - compare_immutable (for debugging)
let old_cmp = compare_immutable(
record_slice_equal_number_of_cols,
key_values.as_slice(),
self.key_sort_order(),
&self.collations,
);
// Debug output
if old_cmp != new_cmp {
println!(
"COMPARISON MISMATCH! oldcmp: {:?}, newcmp: {:?}",
old_cmp, new_cmp
);
// Use old comparison for correctness while debugging
let cmp = old_cmp;
let found = match seek_op {
SeekOp::GT => cmp.is_gt(),
SeekOp::GE { eq_only: true } => cmp.is_eq(),
SeekOp::GE { eq_only: false } => cmp.is_ge(),
SeekOp::LE { eq_only: true } => cmp.is_eq(),
SeekOp::LE { eq_only: false } => cmp.is_le(),
SeekOp::LT => cmp.is_lt(),
};
println!("Using old comparison - found: {}", found);
return (cmp, found);
} else {
println!("oldcmp: {:?}, newcmp: {:?}", old_cmp, new_cmp);
}
// Use the new comparison consistently
let cmp = new_cmp;
let found = match seek_op {
SeekOp::GT => cmp.is_gt(),
SeekOp::GE { eq_only: true } => cmp.is_eq(),
SeekOp::GE { eq_only: false } => cmp.is_ge(),
SeekOp::LE { eq_only: true } => cmp.is_eq(),
SeekOp::LE { eq_only: false } => cmp.is_le(),
SeekOp::GE { eq_only: true } => cmp.is_eq(),
SeekOp::LT => cmp.is_lt(),
SeekOp::LE { eq_only: false } => cmp.is_le(),
SeekOp::LE { eq_only: true } => cmp.is_eq(),
};
println!(
"Final result - cmp: {:?}, found: {}, seek_op: {:?}",
cmp, found, seek_op
);
println!("========================");
(cmp, found)
}
@@ -2562,6 +2759,10 @@ impl BTreeCursor {
if let Some(next_page) = next_page {
self.process_overflow_read(payload, next_page, payload_size)
} else {
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
.invalidate();
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
@@ -4677,6 +4878,10 @@ impl BTreeCursor {
if let Some(next_page) = first_overflow_page {
return_if_io!(self.process_overflow_read(payload, next_page, payload_size))
} else {
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
.invalidate();
self.get_immutable_record_or_create()
.as_mut()
.unwrap()
@@ -7288,46 +7493,6 @@ mod tests {
(pager, page2.get().get().id, db, conn)
}
#[test]
fn debug_single_comparison() {
// Create a simple test case to isolate the comparison issue
let key = Value::Integer(0);
let record_values = vec![RefValue::Integer(0)]; // This should be equal
// Test the comparison directly
let index_info = IndexKeyInfo {
sort_order: IndexKeySortOrder::default(),
has_rowid: true,
num_cols: 1,
};
let collations = vec![CollationSeq::Binary];
let comparer = find_compare(&record_values, &index_info, &collations);
println!("Strategy selected: {:?}", comparer);
// Create an ImmutableRecord from the value
let register = Register::Value(Value::Integer(0));
let immutable_record = ImmutableRecord::from_registers(&[register], 1);
let result = comparer.compare(
&immutable_record,
&record_values,
&index_info,
&collations,
0,
);
println!("Comparison result: {:?}", result);
// Also test with compare_immutable for comparison
let old_result = compare_immutable(
&record_values,
&record_values,
index_info.sort_order,
&collations,
);
println!("Old comparison result: {:?}", old_result);
}
#[test]
#[ignore]
pub fn btree_insert_fuzz_ex() {

View File

@@ -1053,49 +1053,49 @@ impl<T: Default + Copy, const N: usize> Iterator for SmallVecIter<'_, T, N> {
}
}
pub fn read_record(payload: &[u8], reuse_immutable: &mut ImmutableRecord) -> Result<()> {
// Let's clear previous use
reuse_immutable.invalidate();
// Copy payload to ImmutableRecord in order to make RefValue that point to this new buffer.
// By reusing this immutable record we make it less allocation expensive.
reuse_immutable.start_serialization(payload);
// pub fn read_record(payload: &[u8], reuse_immutable: &mut ImmutableRecord) -> Result<()> {
// // Let's clear previous use
// reuse_immutable.invalidate();
// // Copy payload to ImmutableRecord in order to make RefValue that point to this new buffer.
// // By reusing this immutable record we make it less allocation expensive.
// reuse_immutable.start_serialization(payload);
let mut pos = 0;
let (header_size, nr) = read_varint(payload)?;
assert!((header_size as usize) >= nr);
let mut header_size = (header_size as usize) - nr;
pos += nr;
// let mut pos = 0;
// let (header_size, nr) = read_varint(payload)?;
// assert!((header_size as usize) >= nr);
// let mut header_size = (header_size as usize) - nr;
// pos += nr;
let mut serial_types = SmallVec::<u64, 64>::new();
while header_size > 0 {
let (serial_type, nr) = read_varint(&reuse_immutable.get_payload()[pos..])?;
validate_serial_type(serial_type)?;
serial_types.push(serial_type);
pos += nr;
assert!(header_size >= nr);
header_size -= nr;
}
// let mut serial_types = SmallVec::<u64, 64>::new();
// while header_size > 0 {
// let (serial_type, nr) = read_varint(&reuse_immutable.get_payload()[pos..])?;
// validate_serial_type(serial_type)?;
// serial_types.push(serial_type);
// pos += nr;
// assert!(header_size >= nr);
// header_size -= nr;
// }
for &serial_type in &serial_types.data[..serial_types.len.min(serial_types.data.len())] {
let (value, n) = read_value(&reuse_immutable.get_payload()[pos..], unsafe {
serial_type.assume_init().try_into()?
})?;
pos += n;
reuse_immutable.add_value(value);
}
if let Some(extra) = serial_types.extra_data.as_ref() {
for serial_type in extra {
let (value, n) = read_value(
&reuse_immutable.get_payload()[pos..],
(*serial_type).try_into()?,
)?;
pos += n;
reuse_immutable.add_value(value);
}
}
// for &serial_type in &serial_types.data[..serial_types.len.min(serial_types.data.len())] {
// let (value, n) = read_value(&reuse_immutable.get_payload()[pos..], unsafe {
// serial_type.assume_init().try_into()?
// })?;
// pos += n;
// reuse_immutable.add_value(value);
// }
// if let Some(extra) = serial_types.extra_data.as_ref() {
// for serial_type in extra {
// let (value, n) = read_value(
// &reuse_immutable.get_payload()[pos..],
// (*serial_type).try_into()?,
// )?;
// pos += n;
// reuse_immutable.add_value(value);
// }
// }
Ok(())
}
// Ok(())
// }
/// Reads a value that might reference the buffer it is reading from. Be sure to store RefValue with the buffer
/// always.

View File

@@ -978,10 +978,6 @@ impl ImmutableRecord {
self.payload.as_blob_mut().extend_from_slice(payload);
}
pub fn end_serialization(&mut self) {}
pub fn add_value(&mut self, value: RefValue) {}
pub fn invalidate(&mut self) {
self.payload.as_blob_mut().clear(); }
@@ -1113,9 +1109,15 @@ impl RecordCursor {
let serial_type_obj = SerialType::try_from(serial_type as u64)?;
match serial_type_obj.kind() {
SerialTypeKind::Null => return Ok(RefValue::Null),
SerialTypeKind::ConstInt0 => return Ok(RefValue::Integer(0)),
SerialTypeKind::ConstInt1 => return Ok(RefValue::Integer(1)),
SerialTypeKind::Null => {
return Ok(RefValue::Null);
}
SerialTypeKind::ConstInt0 => {
return Ok(RefValue::Integer(0));
}
SerialTypeKind::ConstInt1 => {
return Ok(RefValue::Integer(1));
}
_ => {} // Continue to read from payload
}
@@ -1129,11 +1131,23 @@ impl RecordCursor {
let end_offset = self.header_offsets[idx + 2] as usize;
let payload = record.get_payload();
if start_offset >= payload.len() || end_offset > payload.len() {
if start_offset >= payload.len() {
return Ok(RefValue::Null);
}
let column_data = &payload[start_offset..end_offset];
// If end_offset is beyond payload, we might still be able to read the column
// But we need at least enough bytes for the serial type
let expected_size = serial_type_obj.size();
let available_bytes = payload.len().saturating_sub(start_offset);
if available_bytes < expected_size {
return Ok(RefValue::Null);
}
// Use the minimum of end_offset and what's actually available
let actual_end_offset = end_offset.min(payload.len());
let column_data = &payload[start_offset..actual_end_offset];
let (value, _) = crate::storage::sqlite3_ondisk::read_value(column_data, serial_type_obj)?;
Ok(value)
}
@@ -1326,7 +1340,7 @@ fn sqlite_int_float_compare(int_val: i64, float_val: f64) -> std::cmp::Ordering
/// be compared differently.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct IndexKeySortOrder(u64);
pub struct IndexKeySortOrder(pub u64);
impl IndexKeySortOrder {
pub fn get_sort_order_for_col(&self, column_idx: usize) -> SortOrder {
@@ -1369,6 +1383,16 @@ pub struct IndexKeyInfo {
pub num_cols: usize,
}
impl Default for IndexKeyInfo {
fn default() -> Self {
Self {
sort_order: IndexKeySortOrder::default(),
has_rowid: true,
num_cols: 1,
}
}
}
impl IndexKeyInfo {
pub fn new_from_index(index: &Index) -> Self {
Self {
@@ -1410,6 +1434,7 @@ pub fn compare_immutable_for_testing(
r: &[RefValue],
index_key_sort_order: IndexKeySortOrder,
collations: &[CollationSeq],
tie_breaker: std::cmp::Ordering,
) -> std::cmp::Ordering {
let min_len = l.len().min(r.len());
@@ -1432,19 +1457,7 @@ pub fn compare_immutable_for_testing(
}
}
// All common fields are equal; resolve by field count difference
let len_cmp = l.len().cmp(&r.len());
if len_cmp == std::cmp::Ordering::Equal {
std::cmp::Ordering::Equal
} else {
// Use sort order of the last compared column, or default to Asc
let last_index = min_len.saturating_sub(1);
match index_key_sort_order.get_sort_order_for_col(last_index) {
SortOrder::Asc => len_cmp,
SortOrder::Desc => len_cmp.reverse(),
}
}
tie_breaker
}
#[derive(Debug, Clone, Copy)]
@@ -1462,15 +1475,23 @@ impl RecordCompare {
index_info: &IndexKeyInfo,
collations: &[CollationSeq],
skip: usize,
tie_breaker: std::cmp::Ordering,
) -> Result<std::cmp::Ordering> {
match self {
RecordCompare::Int => compare_records_int(serialized, unpacked, index_info, collations),
RecordCompare::Int => {
compare_records_int(serialized, unpacked, index_info, collations, tie_breaker)
}
RecordCompare::String => {
compare_records_string(serialized, unpacked, index_info, collations)
}
RecordCompare::Generic => {
compare_records_generic(serialized, unpacked, index_info, collations, skip)
compare_records_string(serialized, unpacked, index_info, collations, tie_breaker)
}
RecordCompare::Generic => compare_records_generic(
serialized,
unpacked,
index_info,
collations,
skip,
tie_breaker,
),
}
}
}
@@ -1491,34 +1512,77 @@ pub fn find_compare(
}
}
pub fn get_tie_breaker_from_seek_op(seek_op: SeekOp) -> std::cmp::Ordering {
match seek_op {
// exactmatch “key == X” opcodes
SeekOp::GE { eq_only: true } | SeekOp::LE { eq_only: true } => std::cmp::Ordering::Equal,
// forward search want the *first* ≥ / > key
SeekOp::GE { eq_only: false } => std::cmp::Ordering::Greater,
SeekOp::GT => std::cmp::Ordering::Less,
// backward search want the *last* ≤ / < key
SeekOp::LE { eq_only: false } => std::cmp::Ordering::Less,
SeekOp::LT => std::cmp::Ordering::Greater,
}
}
fn compare_records_int(
serialized: &ImmutableRecord,
unpacked: &[RefValue],
index_info: &IndexKeyInfo,
collations: &[CollationSeq],
tie_breaker: std::cmp::Ordering,
) -> Result<std::cmp::Ordering> {
let payload = serialized.get_payload();
if payload.len() < 2 || payload[0] > 63 {
return compare_records_generic(serialized, unpacked, index_info, collations, 0);
return compare_records_generic(
serialized,
unpacked,
index_info,
collations,
0,
tie_breaker,
);
}
let header_size = payload[0] as usize;
let first_serial_type = payload[1];
if !matches!(first_serial_type, 1..=6 | 8 | 9) {
return compare_records_generic(serialized, unpacked, index_info, collations, 0);
return compare_records_generic(
serialized,
unpacked,
index_info,
collations,
0,
tie_breaker,
);
}
let data_start = header_size;
if data_start >= payload.len() {
return compare_records_generic(serialized, unpacked, index_info, collations, 0);
return compare_records_generic(
serialized,
unpacked,
index_info,
collations,
0,
tie_breaker,
);
}
let lhs_int = read_integer(&payload[data_start..], first_serial_type)?;
let RefValue::Integer(rhs_int) = unpacked[0] else {
return compare_records_generic(serialized, unpacked, index_info, collations, 0);
return compare_records_generic(
serialized,
unpacked,
index_info,
collations,
0,
tie_breaker,
);
};
let comparison = match index_info.sort_order.get_sort_order_for_col(0) {
SortOrder::Asc => lhs_int.cmp(&rhs_int),
SortOrder::Desc => lhs_int.cmp(&rhs_int).reverse(),
@@ -1527,17 +1591,16 @@ fn compare_records_int(
std::cmp::Ordering::Equal => {
// First fields equal, compare remaining fields if any
if unpacked.len() > 1 {
return compare_records_generic(serialized, unpacked, index_info, collations, 1);
} else {
let mut record_cursor = RecordCursor::new();
let serial_type_len = record_cursor.len(serialized);
if serial_type_len > unpacked.len() {
Ok(std::cmp::Ordering::Greater)
} else {
Ok(std::cmp::Ordering::Equal)
}
return compare_records_generic(
serialized,
unpacked,
index_info,
collations,
1,
tie_breaker,
);
}
Ok(tie_breaker)
}
other => Ok(other),
}
@@ -1548,10 +1611,18 @@ fn compare_records_string(
unpacked: &[RefValue],
index_info: &IndexKeyInfo,
collations: &[CollationSeq],
tie_breaker: std::cmp::Ordering,
) -> Result<std::cmp::Ordering> {
let payload = serialized.get_payload();
if payload.len() < 2 {
return compare_records_generic(serialized, unpacked, index_info, collations, 0);
return compare_records_generic(
serialized,
unpacked,
index_info,
collations,
0,
tie_breaker,
);
}
let header_size = payload[0] as usize;
@@ -1559,11 +1630,25 @@ fn compare_records_string(
// Check if serial type is not a string or if its a blob
if first_serial_type < 13 || (first_serial_type & 1) == 0 {
return compare_records_generic(serialized, unpacked, index_info, collations, 0);
return compare_records_generic(
serialized,
unpacked,
index_info,
collations,
0,
tie_breaker,
);
}
let RefValue::Text(rhs_text) = &unpacked[0] else {
return compare_records_generic(serialized, unpacked, index_info, collations, 0);
return compare_records_generic(
serialized,
unpacked,
index_info,
collations,
0,
tie_breaker,
);
};
let string_len = (first_serial_type as usize - 13) / 2;
@@ -1575,7 +1660,14 @@ fn compare_records_string(
let (lhs_value, _) = read_value(&payload[data_start..], serial_type)?;
let RefValue::Text(lhs_text) = lhs_value else {
return compare_records_generic(serialized, unpacked, index_info, collations, 0);
return compare_records_generic(
serialized,
unpacked,
index_info,
collations,
0,
tie_breaker,
);
};
let comparison = if let Some(collation) = collations.get(0) {
@@ -1602,17 +1694,16 @@ fn compare_records_string(
}
if unpacked.len() > 1 {
compare_records_generic(serialized, unpacked, index_info, collations, 1)
} else {
let mut record_cursor = RecordCursor::new();
let serial_type_len = record_cursor.len(serialized);
if serial_type_len > unpacked.len() {
Ok(std::cmp::Ordering::Greater)
} else {
Ok(std::cmp::Ordering::Equal)
}
return compare_records_generic(
serialized,
unpacked,
index_info,
collations,
1,
tie_breaker,
);
}
Ok(tie_breaker)
}
other => Ok(other),
}
@@ -1624,21 +1715,18 @@ pub fn compare_records_generic(
index_info: &IndexKeyInfo,
collations: &[CollationSeq],
skip: usize,
tie_breaker: std::cmp::Ordering,
) -> Result<std::cmp::Ordering> {
// println!("hitting compare_records_generic");
let payload = serialized.get_payload();
// println!("payload: {:?}", payload);
if payload.is_empty() {
return Ok(std::cmp::Ordering::Less);
}
let (header_size, mut pos) = read_varint(payload)?;
let header_end = header_size as usize;
debug_assert!(header_end <= payload.len());
let mut serial_types = Vec::new();
while pos < header_end {
let (serial_type, bytes_read) = read_varint(&payload[pos..])?;
serial_types.push(serial_type);
@@ -1647,110 +1735,55 @@ pub fn compare_records_generic(
let mut data_pos = header_size as usize;
// Skip over `skip` fields
for i in 0..skip {
let serial_type = SerialType::try_from(serial_types[i])?;
if !matches!(
serial_type.kind(),
SerialTypeKind::ConstInt0 | SerialTypeKind::ConstInt1 | SerialTypeKind::Null
) {
let len = serial_type.size();
data_pos += len;
data_pos += serial_type.size();
}
}
// println!("skip: {}", skip);
// println!(
// "unpacked.len(): {}, serial_types.len(): {}",
// unpacked.len(),
// serial_types.len()
// );
for i in skip..unpacked.len().min(serial_types.len()) {
let serial_type = SerialType::try_from(serial_types[i])?;
// println!("i = {}", i);
// println!("serial type kind = {:?}", serial_type.kind());
let rhs_value = &unpacked[i];
let rhs_value = &unpacked[i]; // key value
let lhs_value = match serial_type.kind() {
// record value
SerialTypeKind::ConstInt0 => RefValue::Integer(0),
SerialTypeKind::ConstInt1 => RefValue::Integer(1),
SerialTypeKind::Null => RefValue::Null,
_ => {
// Use existing read_value function for all other types
let (value, field_size) = read_value(&payload[data_pos..], serial_type)?;
data_pos += field_size;
value
}
};
// println!("lhs_value: {:?}, rhs_value: {:?}", lhs_value, rhs_value);
let comparison = match rhs_value {
RefValue::Integer(rhs_int) => {
match &lhs_value {
RefValue::Null => std::cmp::Ordering::Less,
RefValue::Integer(lhs_int) => lhs_int.cmp(rhs_int),
RefValue::Float(lhs_float) => {
sqlite_int_float_compare(*rhs_int, *lhs_float).reverse()
}
RefValue::Text(_) | RefValue::Blob(_) => std::cmp::Ordering::Less, // Numbers < Text/Blob
// Use the existing partial_cmp implementation for type ordering and basic comparison
let comparison = match (&lhs_value, rhs_value) {
// Special case: Text comparison with collation support
(RefValue::Text(lhs_text), RefValue::Text(rhs_text)) => {
if let Some(collation) = collations.get(i) {
collation.compare_strings(lhs_text.as_str(), rhs_text.as_str())
} else {
// Use existing partial_cmp for binary text comparison
lhs_value.partial_cmp(rhs_value).unwrap()
}
}
RefValue::Float(rhs_float) => {
match &lhs_value {
RefValue::Null => std::cmp::Ordering::Less,
RefValue::Integer(lhs_int) => sqlite_int_float_compare(*lhs_int, *rhs_float),
RefValue::Float(lhs_float) => {
if lhs_float.is_nan() && rhs_float.is_nan() {
std::cmp::Ordering::Equal
} else if lhs_float.is_nan() {
std::cmp::Ordering::Less // NaN is NULL
} else if rhs_float.is_nan() {
std::cmp::Ordering::Greater
} else {
lhs_float
.partial_cmp(rhs_float)
.unwrap_or(std::cmp::Ordering::Equal)
}
}
RefValue::Text(_) | RefValue::Blob(_) => std::cmp::Ordering::Less, // Numbers < Text/Blob
}
// Special case: Integer vs Float comparison using sqlite_int_float_compare
(RefValue::Integer(lhs_int), RefValue::Float(rhs_float)) => {
sqlite_int_float_compare(*lhs_int, *rhs_float)
}
RefValue::Text(rhs_text) => {
match &lhs_value {
RefValue::Null | RefValue::Integer(_) | RefValue::Float(_) => {
std::cmp::Ordering::Less
}
RefValue::Text(lhs_text) => {
if let Some(collation) = collations.get(i) {
collation.compare_strings(lhs_text.as_str(), rhs_text.as_str())
} else {
// Binary comparison (no collation)
lhs_text.value.to_slice().cmp(rhs_text.value.to_slice())
}
}
RefValue::Blob(_) => std::cmp::Ordering::Less, // Text < Blob
}
(RefValue::Float(lhs_float), RefValue::Integer(rhs_int)) => {
sqlite_int_float_compare(*rhs_int, *lhs_float).reverse()
}
// RHS is a blob
RefValue::Blob(rhs_blob) => match &lhs_value {
RefValue::Null | RefValue::Integer(_) | RefValue::Float(_) | RefValue::Text(_) => {
std::cmp::Ordering::Less
}
RefValue::Blob(lhs_blob) => lhs_blob.to_slice().cmp(rhs_blob.to_slice()),
},
// RHS is null
RefValue::Null => {
match &lhs_value {
RefValue::Null => std::cmp::Ordering::Equal,
RefValue::Float(f) if f.is_nan() => std::cmp::Ordering::Equal, // NaN treated as NULL
_ => std::cmp::Ordering::Less, // Non-NULL > NULL
}
}
_ => lhs_value.partial_cmp(rhs_value).unwrap(),
};
let final_comparison = match index_info.sort_order.get_sort_order_for_col(i) {
@@ -1764,7 +1797,8 @@ pub fn compare_records_generic(
}
}
Ok(serial_types.len().cmp(&unpacked.len()))
// All compared fields equal: use caller-provided tie_breaker
Ok(tie_breaker)
}
#[inline(always)]
@@ -2137,7 +2171,6 @@ impl RawSlice {
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::sqlite3_ondisk::read_value;
use crate::translate::collate::CollationSeq;
fn create_record(values: Vec<Value>) -> ImmutableRecord {
@@ -2198,16 +2231,26 @@ mod tests {
.map(|v| value_to_ref_value(v))
.collect();
let tie_breaker = std::cmp::Ordering::Equal;
let gold_result = compare_immutable_for_testing(
&serialized_ref_values,
&unpacked_values,
index_info.sort_order.clone(),
collations,
tie_breaker,
);
let comparer = find_compare(&unpacked_values, index_info, collations);
let optimized_result = comparer
.compare(&serialized, &unpacked_values, index_info, collations, 0)
.compare(
&serialized,
&unpacked_values,
index_info,
collations,
0,
tie_breaker,
)
.unwrap();
assert_eq!(
@@ -2216,9 +2259,15 @@ mod tests {
test_name, gold_result, optimized_result, comparer
);
let generic_result =
compare_records_generic(&serialized, &unpacked_values, index_info, collations, 0)
.unwrap();
let generic_result = compare_records_generic(
&serialized,
&unpacked_values,
index_info,
collations,
0,
tie_breaker,
)
.unwrap();
assert_eq!(
gold_result, generic_result,
"Test '{}' failed with generic: Full Comparison: {:?}, Generic: {:?}",
@@ -2561,10 +2610,25 @@ mod tests {
RefValue::Integer(3),
];
let result_skip_0 =
compare_records_generic(&serialized, &unpacked, &index_info, &collations, 0).unwrap();
let result_skip_1 =
compare_records_generic(&serialized, &unpacked, &index_info, &collations, 1).unwrap();
let tie_breaker = std::cmp::Ordering::Equal;
let result_skip_0 = compare_records_generic(
&serialized,
&unpacked,
&index_info,
&collations,
0,
tie_breaker,
)
.unwrap();
let result_skip_1 = compare_records_generic(
&serialized,
&unpacked,
&index_info,
&collations,
1,
tie_breaker,
)
.unwrap();
assert_eq!(result_skip_0, std::cmp::Ordering::Less);

View File

@@ -8,7 +8,7 @@ use crate::storage::pager::CreateBTreeFlags;
use crate::storage::wal::DummyWAL;
use crate::storage::{self, header_accessor};
use crate::translate::collate::CollationSeq;
use crate::types::{compare_immutable_for_testing, ImmutableRecord, Text};
use crate::types::{ImmutableRecord, Text};
use crate::util::normalize_ident;
use crate::{
error::{
@@ -2455,21 +2455,12 @@ pub fn op_idx_gt(
cursor.collations(),
);
let ord_debug = compare_immutable_for_testing(
idx_values,
record_values.as_slice(),
cursor.key_sort_order(),
cursor.collations(),
);
println!("idx_gt== old={:?}, debug={:?}", ord, ord_debug);
if ord.is_gt() {
target_pc.as_offset_int()
} else {
state.pc + 1
}
} else {
println!("we are taking the else branch");
target_pc.as_offset_int()
};
pc