safer api for ImmutableRecord recreation

This commit is contained in:
Pere Diaz Bou
2025-03-30 11:00:13 +02:00
parent f2f6173670
commit 6ccb2e16d1
3 changed files with 49 additions and 57 deletions

View File

@@ -260,7 +260,7 @@ pub struct BTreeCursor {
stack: PageStack,
/// Reusable immutable record, used to allow better allocation strategy.
reusable_immutable_record: RefCell<Option<ImmutableRecord>>,
empty_record: RefCell<bool>,
empty_record: Cell<bool>,
}
/// Stack of pages representing the tree traversal order.
@@ -308,7 +308,7 @@ impl BTreeCursor {
stack: RefCell::new([const { None }; BTCURSOR_MAX_DEPTH + 1]),
},
reusable_immutable_record: RefCell::new(None),
empty_record: RefCell::new(true),
empty_record: Cell::new(true),
}
}
@@ -1912,7 +1912,7 @@ impl BTreeCursor {
}
pub fn is_empty(&self) -> bool {
*self.empty_record.borrow()
self.empty_record.get()
}
pub fn root_page(&self) -> usize {
@@ -2000,7 +2000,7 @@ impl BTreeCursor {
Some(mv_cursor) => {
let row_id =
crate::mvcc::database::RowID::new(self.table_id() as u64, *int_key as u64);
let record_buf = record.payload.to_vec();
let record_buf = record.get_payload().to_vec();
let row = crate::mvcc::database::Row::new(row_id, record_buf);
mv_cursor.borrow_mut().insert(row).unwrap();
}
@@ -2739,10 +2739,7 @@ impl BTreeCursor {
fn get_lazy_immutable_record(&self) -> std::cell::RefMut<'_, Option<ImmutableRecord>> {
if self.reusable_immutable_record.borrow().is_none() {
let record = ImmutableRecord {
payload: Vec::with_capacity(4096),
values: Vec::with_capacity(10),
};
let record = ImmutableRecord::new(4096, 10);
self.reusable_immutable_record.replace(Some(record));
}
self.reusable_immutable_record.borrow_mut()
@@ -3423,7 +3420,7 @@ fn fill_cell_payload(
PageType::TableLeaf | PageType::IndexLeaf
));
// TODO: make record raw from start, having to serialize is not good
let record_buf = record.payload.to_vec();
let record_buf = record.get_payload().to_vec();
// fill in header
if matches!(page_type, PageType::TableLeaf) {

View File

@@ -1085,11 +1085,10 @@ impl<T: Default + Copy> SmallVec<T> {
pub fn read_record(payload: &[u8], reuse_immutable: &mut ImmutableRecord) -> Result<()> {
// Let's clear previous use
reuse_immutable.payload.clear();
reuse_immutable.values.clear();
reuse_immutable.invalidate();
// Copy payload to ImmutableRecord in order to make RefValue that point to this new buffer.
// By reusing this immutable record we make it less allocation expensive.
reuse_immutable.payload.extend_from_slice(payload);
reuse_immutable.start_serialization(payload);
let mut pos = 0;
let (header_size, nr) = read_varint(payload)?;
@@ -1099,7 +1098,7 @@ pub fn read_record(payload: &[u8], reuse_immutable: &mut ImmutableRecord) -> Res
let mut serial_types = SmallVec::new();
while header_size > 0 {
let (serial_type, nr) = read_varint(&reuse_immutable.payload[pos..])?;
let (serial_type, nr) = read_varint(&reuse_immutable.get_payload()[pos..])?;
let serial_type = validate_serial_type(serial_type)?;
serial_types.push(serial_type);
pos += nr;
@@ -1108,17 +1107,17 @@ pub fn read_record(payload: &[u8], reuse_immutable: &mut ImmutableRecord) -> Res
}
for &serial_type in &serial_types.data[..serial_types.len.min(serial_types.data.len())] {
let (value, n) = read_value(&reuse_immutable.payload[pos..], unsafe {
let (value, n) = read_value(&reuse_immutable.get_payload()[pos..], unsafe {
*serial_type.as_ptr()
})?;
pos += n;
reuse_immutable.values.push(value);
reuse_immutable.add_value(value);
}
if let Some(extra) = serial_types.extra_data.as_ref() {
for serial_type in extra {
let (value, n) = read_value(&reuse_immutable.payload[pos..], *serial_type)?;
let (value, n) = read_value(&reuse_immutable.get_payload()[pos..], *serial_type)?;
pos += n;
reuse_immutable.values.push(value);
reuse_immutable.add_value(value);
}
}

View File

@@ -8,7 +8,6 @@ use crate::storage::sqlite3_ondisk::write_varint;
use crate::vdbe::sorter::Sorter;
use crate::vdbe::{Register, VTabOpaqueCursor};
use crate::Result;
use std::cmp::Ordering;
use std::fmt::Display;
const MAX_REAL_SIZE: u8 = 15;
@@ -631,8 +630,9 @@ pub struct ImmutableRecord {
// We have to be super careful with this buffer since we make values point to the payload we need to take care reallocations
// happen in a controlled manner. If we realocate with values that should be correct, they will now point to undefined data.
// We don't use pin here because it would make it imposible to reuse the buffer if we need to push a new record in the same struct.
pub payload: Vec<u8>,
payload: Vec<u8>,
pub values: Vec<RefValue>,
recreating: bool,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
@@ -699,6 +699,14 @@ impl<'a> AppendWriter<'a> {
}
impl ImmutableRecord {
pub fn new(payload_capacity: usize, value_capacity: usize) -> Self {
Self {
payload: Vec::with_capacity(payload_capacity),
values: Vec::with_capacity(value_capacity),
recreating: false,
}
}
pub fn get<'a, T: FromValue<'a> + 'a>(&'a self, idx: usize) -> Result<T> {
let value = self
.values
@@ -835,8 +843,32 @@ impl ImmutableRecord {
Self {
payload: buf,
values,
recreating: false,
}
}
pub fn start_serialization(&mut self, payload: &[u8]) {
self.recreating = true;
self.payload.extend_from_slice(payload);
}
pub fn end_serialization(&mut self) {
assert!(self.recreating);
self.recreating = false;
}
pub fn add_value(&mut self, value: RefValue) {
assert!(self.recreating);
self.values.push(value);
}
pub fn invalidate(&mut self) {
self.payload.clear();
self.values.clear();
}
pub fn get_payload(&self) -> &[u8] {
&self.payload
}
}
impl Clone for ImmutableRecord {
@@ -872,6 +904,7 @@ impl Clone for ImmutableRecord {
Self {
payload: new_payload,
values: new_values,
recreating: self.recreating,
}
}
}
@@ -1090,45 +1123,8 @@ impl PartialEq<OwnedValue> for RefValue {
}
}
pub fn compare_record_to_immutable(
record: &[OwnedValue],
immutable: &[RefValue],
) -> std::cmp::Ordering {
for (a, b) in record.iter().zip(immutable.iter()) {
match a.partial_cmp(b).unwrap() {
Ordering::Equal => {}
order => {
return order;
}
}
}
Ordering::Equal
}
pub fn compare_immutable_to_record(
immutable: &[RefValue],
record: &[OwnedValue],
) -> std::cmp::Ordering {
for (a, b) in immutable.iter().zip(record.iter()) {
match a.partial_cmp(b).unwrap() {
Ordering::Equal => {}
order => {
return order;
}
}
}
Ordering::Equal
}
pub fn compare_immutable(l: &[RefValue], r: &[RefValue]) -> std::cmp::Ordering {
for (a, b) in l.iter().zip(r.iter()) {
match a.partial_cmp(b).unwrap() {
Ordering::Equal => {}
order => {
return order;
}
}
}
Ordering::Equal
l.partial_cmp(r).unwrap()
}
const I8_LOW: i64 = -128;